diff options
| author | Andi Albrecht <albrecht.andi@gmail.com> | 2010-07-04 20:32:48 +0200 |
|---|---|---|
| committer | Andi Albrecht <albrecht.andi@gmail.com> | 2010-07-04 20:32:48 +0200 |
| commit | 2a090575f6d0571208a6d3a6ae57c31c4adbc653 (patch) | |
| tree | 4e77d6431ee87422ff7285fd58edcc20abd233d4 /sqlparse | |
| parent | b11dd976c8465b95896af26f270334797523428d (diff) | |
| download | sqlparse-2a090575f6d0571208a6d3a6ae57c31c4adbc653.tar.gz | |
Clean up imports.
Removed wildcard imports, mainly to keep pyflakes quiet. But this
change lifted some wrong imports and class usage.
Diffstat (limited to 'sqlparse')
| -rw-r--r-- | sqlparse/engine/filter.py | 2 | ||||
| -rw-r--r-- | sqlparse/engine/grouping.py | 63 | ||||
| -rw-r--r-- | sqlparse/filters.py | 93 | ||||
| -rw-r--r-- | sqlparse/keywords.py | 1113 | ||||
| -rw-r--r-- | sqlparse/lexer.py | 70 |
5 files changed, 668 insertions, 673 deletions
diff --git a/sqlparse/engine/filter.py b/sqlparse/engine/filter.py index 0f6eec0..64f4079 100644 --- a/sqlparse/engine/filter.py +++ b/sqlparse/engine/filter.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- +from sqlparse.sql import Statement, Token from sqlparse import tokens as T -from sqlparse.engine.grouping import Statement, Token class TokenFilter(object): diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py index a03c0b4..010a282 100644 --- a/sqlparse/engine/grouping.py +++ b/sqlparse/engine/grouping.py @@ -2,8 +2,8 @@ import itertools +from sqlparse import sql from sqlparse import tokens as T -from sqlparse.sql import * def _group_left_right(tlist, ttype, value, cls, @@ -83,34 +83,35 @@ def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value, token = tlist.token_next_match(idx, start_ttype, start_value) def group_if(tlist): - _group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', If, True) + _group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', sql.If, True) def group_for(tlist): - _group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP', For, True) + _group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP', + sql.For, True) def group_as(tlist): def _right_valid(token): # Currently limited to DML/DDL. Maybe additional more non SQL reserved # keywords should appear here (see issue8). return not token.ttype in (T.DML, T.DDL) - _group_left_right(tlist, T.Keyword, 'AS', Identifier, + _group_left_right(tlist, T.Keyword, 'AS', sql.Identifier, check_right=_right_valid) def group_assignment(tlist): - _group_left_right(tlist, T.Assignment, ':=', Assignment, + _group_left_right(tlist, T.Assignment, ':=', sql.Assignment, include_semicolon=True) def group_comparsion(tlist): def _parts_valid(token): return (token.ttype in (T.String.Symbol, T.Name, T.Number, T.Number.Integer, T.Literal) - or isinstance(token, (Identifier,))) - _group_left_right(tlist, T.Operator.Comparsion, None, Comparsion, + or isinstance(token, (sql.Identifier,))) + _group_left_right(tlist, T.Operator.Comparsion, None, sql.Comparsion, check_left=_parts_valid, check_right=_parts_valid) def group_case(tlist): - _group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', Case, + _group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', sql.Case, include_semicolon=True, recurse=True) @@ -131,11 +132,11 @@ def group_identifier(tlist): # bottom up approach: group subgroups first [group_identifier(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, Identifier)] + if not isinstance(sgroup, sql.Identifier)] # real processing idx = 0 - token = tlist.token_next_by_instance(idx, Function) + token = tlist.token_next_by_instance(idx, sql.Function) if token is None: token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name)) while token: @@ -143,28 +144,28 @@ def group_identifier(tlist): _consume_cycle(tlist, tlist.token_index(token)+1)) if not (len(identifier_tokens) == 1 - and isinstance(identifier_tokens[0], Function)): - group = tlist.group_tokens(Identifier, identifier_tokens) + and isinstance(identifier_tokens[0], sql.Function)): + group = tlist.group_tokens(sql.Identifier, identifier_tokens) idx = tlist.token_index(group)+1 else: idx += 1 - token = tlist.token_next_by_instance(idx, Function) + token = tlist.token_next_by_instance(idx, sql.Function) if token is None: token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name)) def group_identifier_list(tlist): [group_identifier_list(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, (Identifier, IdentifierList))] + if not isinstance(sgroup, (sql.Identifier, sql.IdentifierList))] idx = 0 # Allowed list items - fend1_funcs = [lambda t: isinstance(t, Identifier), + fend1_funcs = [lambda t: isinstance(t, sql.Identifier), lambda t: t.is_whitespace(), lambda t: t.ttype == T.Wildcard, lambda t: t.match(T.Keyword, 'null'), lambda t: t.ttype == T.Number.Integer, lambda t: t.ttype == T.String.Single, - lambda t: isinstance(t, Comparsion), + lambda t: isinstance(t, sql.Comparsion), ] tcomma = tlist.token_next_match(idx, T.Punctuation, ',') start = None @@ -190,7 +191,7 @@ def group_identifier_list(tlist): if next_ is None or not next_.match(T.Punctuation, ','): # Reached the end of the list tokens = tlist.tokens_between(start, after) - group = tlist.group_tokens(IdentifierList, tokens) + group = tlist.group_tokens(sql.IdentifierList, tokens) start = None tcomma = tlist.token_next_match(tlist.token_index(group)+1, T.Punctuation, ',') @@ -199,11 +200,12 @@ def group_identifier_list(tlist): def group_parenthesis(tlist): - _group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')', Parenthesis) + _group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')', + sql.Parenthesis) def group_comments(tlist): [group_comments(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, Comment)] + if not isinstance(sgroup, sql.Comment)] idx = 0 token = tlist.token_next_by_type(idx, T.Comment) while token: @@ -217,13 +219,13 @@ def group_comments(tlist): eidx = tlist.token_index(end) grp_tokens = tlist.tokens_between(token, tlist.token_prev(eidx, False)) - group = tlist.group_tokens(Comment, grp_tokens) + group = tlist.group_tokens(sql.Comment, grp_tokens) idx = tlist.token_index(group) token = tlist.token_next_by_type(idx, T.Comment) def group_where(tlist): [group_where(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, Where)] + if not isinstance(sgroup, sql.Where)] idx = 0 token = tlist.token_next_match(idx, T.Keyword, 'WHERE') stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION') @@ -234,41 +236,42 @@ def group_where(tlist): end = tlist._groupable_tokens[-1] else: end = tlist.tokens[tlist.token_index(end)-1] - group = tlist.group_tokens(Where, tlist.tokens_between(token, end)) + group = tlist.group_tokens(sql.Where, + tlist.tokens_between(token, end)) idx = tlist.token_index(group) token = tlist.token_next_match(idx, T.Keyword, 'WHERE') def group_aliased(tlist): [group_aliased(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, Identifier)] + if not isinstance(sgroup, sql.Identifier)] idx = 0 - token = tlist.token_next_by_instance(idx, Identifier) + token = tlist.token_next_by_instance(idx, sql.Identifier) while token: next_ = tlist.token_next(tlist.token_index(token)) - if next_ is not None and isinstance(next_, Identifier): + if next_ is not None and isinstance(next_, sql.Identifier): grp = tlist.tokens_between(token, next_)[1:] token.tokens.extend(grp) for t in grp: tlist.tokens.remove(t) idx = tlist.token_index(token)+1 - token = tlist.token_next_by_instance(idx, Identifier) + token = tlist.token_next_by_instance(idx, sql.Identifier) def group_typecasts(tlist): - _group_left_right(tlist, T.Punctuation, '::', Identifier) + _group_left_right(tlist, T.Punctuation, '::', sql.Identifier) def group_functions(tlist): [group_functions(sgroup) for sgroup in tlist.get_sublists() - if not isinstance(sgroup, Function)] + if not isinstance(sgroup, sql.Function)] idx = 0 token = tlist.token_next_by_type(idx, T.Name) while token: next_ = tlist.token_next(token) - if not isinstance(next_, Parenthesis): + if not isinstance(next_, sql.Parenthesis): idx = tlist.token_index(token)+1 else: - func = tlist.group_tokens(Function, + func = tlist.group_tokens(sql.Function, tlist.tokens_between(token, next_)) idx = tlist.token_index(func)+1 token = tlist.token_next_by_type(idx, T.Name) diff --git a/sqlparse/filters.py b/sqlparse/filters.py index d2bfebc..a31e0b4 100644 --- a/sqlparse/filters.py +++ b/sqlparse/filters.py @@ -2,7 +2,6 @@ import re -from sqlparse.engine import grouping from sqlparse import tokens as T from sqlparse import sql @@ -53,10 +52,9 @@ class IdentifierCaseFilter(_CaseFilter): class StripCommentsFilter(Filter): def _process(self, tlist): - idx = 0 clss = set([x.__class__ for x in tlist.tokens]) - while grouping.Comment in clss: - token = tlist.token_next_by_instance(0, grouping.Comment) + while sql.Comment in clss: + token = tlist.token_next_by_instance(0, sql.Comment) tidx = tlist.token_index(token) prev = tlist.token_prev(tidx, False) next_ = tlist.token_next(tidx, False) @@ -66,7 +64,7 @@ class StripCommentsFilter(Filter): and not prev.is_whitespace() and not next_.is_whitespace() and not (prev.match(T.Punctuation, '(') or next_.match(T.Punctuation, ')'))): - tlist.tokens[tidx] = grouping.Token(T.Whitespace, ' ') + tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ') else: tlist.tokens.pop(tidx) clss = set([x.__class__ for x in tlist.tokens]) @@ -130,7 +128,7 @@ class ReindentFilter(Filter): def nl(self): # TODO: newline character should be configurable ws = '\n'+(self.char*((self.indent*self.width)+self.offset)) - return grouping.Token(T.Whitespace, ws) + return sql.Token(T.Whitespace, ws) def _split_kwds(self, tlist): split_words = ('FROM', 'JOIN$', 'AND', 'OR', @@ -209,7 +207,6 @@ class ReindentFilter(Filter): self._process_default(tlist) def _process_case(self, tlist): - cases = tlist.get_cases() is_first = True num_offset = None case = tlist.tokens[0] @@ -245,17 +242,17 @@ class ReindentFilter(Filter): [self._process(sgroup) for sgroup in tlist.get_sublists()] def process(self, stack, stmt): - if isinstance(stmt, grouping.Statement): + if isinstance(stmt, sql.Statement): self._curr_stmt = stmt self._process(stmt) - if isinstance(stmt, grouping.Statement): + if isinstance(stmt, sql.Statement): if self._last_stmt is not None: if self._last_stmt.to_unicode().endswith('\n'): nl = '\n' else: nl = '\n\n' stmt.tokens.insert(0, - grouping.Token(T.Whitespace, nl)) + sql.Token(T.Whitespace, nl)) if self._last_stmt != stmt: self._last_stmt = stmt @@ -264,7 +261,7 @@ class ReindentFilter(Filter): class RightMarginFilter(Filter): keep_together = ( -# grouping.TypeCast, grouping.Identifier, grouping.Alias, +# sql.TypeCast, sql.Identifier, sql.Alias, ) def __init__(self, width=79): @@ -289,7 +286,7 @@ class RightMarginFilter(Filter): indent = match.group() else: indent = '' - yield grouping.Token(T.Whitespace, '\n%s' % indent) + yield sql.Token(T.Whitespace, '\n%s' % indent) self.line = indent self.line += val yield token @@ -321,14 +318,14 @@ class OutputPythonFilter(Filter): def _process(self, stream, varname, count, has_nl): if count > 1: - yield grouping.Token(T.Whitespace, '\n') - yield grouping.Token(T.Name, varname) - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Operator, '=') - yield grouping.Token(T.Whitespace, ' ') + yield sql.Token(T.Whitespace, '\n') + yield sql.Token(T.Name, varname) + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Operator, '=') + yield sql.Token(T.Whitespace, ' ') if has_nl: - yield grouping.Token(T.Operator, '(') - yield grouping.Token(T.Text, "'") + yield sql.Token(T.Operator, '(') + yield sql.Token(T.Text, "'") cnt = 0 for token in stream: cnt += 1 @@ -336,20 +333,20 @@ class OutputPythonFilter(Filter): if cnt == 1: continue after_lb = token.value.split('\n', 1)[1] - yield grouping.Token(T.Text, " '") - yield grouping.Token(T.Whitespace, '\n') + yield sql.Token(T.Text, " '") + yield sql.Token(T.Whitespace, '\n') for i in range(len(varname)+4): - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Text, "'") + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Text, "'") if after_lb: # it's the indendation - yield grouping.Token(T.Whitespace, after_lb) + yield sql.Token(T.Whitespace, after_lb) continue elif token.value and "'" in token.value: token.value = token.value.replace("'", "\\'") - yield grouping.Token(T.Text, token.value or '') - yield grouping.Token(T.Text, "'") + yield sql.Token(T.Text, token.value or '') + yield sql.Token(T.Text, "'") if has_nl: - yield grouping.Token(T.Operator, ')') + yield sql.Token(T.Operator, ')') def process(self, stack, stmt): self.cnt += 1 @@ -370,36 +367,32 @@ class OutputPHPFilter(Filter): def _process(self, stream, varname): if self.count > 1: - yield grouping.Token(T.Whitespace, '\n') - yield grouping.Token(T.Name, varname) - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Operator, '=') - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Text, '"') - cnt = 0 + yield sql.Token(T.Whitespace, '\n') + yield sql.Token(T.Name, varname) + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Operator, '=') + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Text, '"') for token in stream: if token.is_whitespace() and '\n' in token.value: -# cnt += 1 -# if cnt == 1: -# continue after_lb = token.value.split('\n', 1)[1] - yield grouping.Token(T.Text, ' "') - yield grouping.Token(T.Operator, ';') - yield grouping.Token(T.Whitespace, '\n') - yield grouping.Token(T.Name, varname) - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Punctuation, '.') - yield grouping.Token(T.Operator, '=') - yield grouping.Token(T.Whitespace, ' ') - yield grouping.Token(T.Text, '"') + yield sql.Token(T.Text, ' "') + yield sql.Token(T.Operator, ';') + yield sql.Token(T.Whitespace, '\n') + yield sql.Token(T.Name, varname) + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Punctuation, '.') + yield sql.Token(T.Operator, '=') + yield sql.Token(T.Whitespace, ' ') + yield sql.Token(T.Text, '"') if after_lb: - yield grouping.Token(T.Text, after_lb) + yield sql.Token(T.Text, after_lb) continue elif '"' in token.value: token.value = token.value.replace('"', '\\"') - yield grouping.Token(T.Text, token.value) - yield grouping.Token(T.Text, '"') - yield grouping.Token(T.Punctuation, ';') + yield sql.Token(T.Text, token.value) + yield sql.Token(T.Text, '"') + yield sql.Token(T.Punctuation, ';') def process(self, stack, stmt): self.count += 1 diff --git a/sqlparse/keywords.py b/sqlparse/keywords.py index 9218ee7..0eb5f8a 100644 --- a/sqlparse/keywords.py +++ b/sqlparse/keywords.py @@ -1,564 +1,563 @@ -from sqlparse.tokens import * +from sqlparse import tokens KEYWORDS = { - 'ABORT': Keyword, - 'ABS': Keyword, - 'ABSOLUTE': Keyword, - 'ACCESS': Keyword, - 'ADA': Keyword, - 'ADD': Keyword, - 'ADMIN': Keyword, - 'AFTER': Keyword, - 'AGGREGATE': Keyword, - 'ALIAS': Keyword, - 'ALL': Keyword, - 'ALLOCATE': Keyword, - 'ANALYSE': Keyword, - 'ANALYZE': Keyword, - 'ANY': Keyword, - 'ARE': Keyword, - 'ASC': Keyword, - 'ASENSITIVE': Keyword, - 'ASSERTION': Keyword, - 'ASSIGNMENT': Keyword, - 'ASYMMETRIC': Keyword, - 'AT': Keyword, - 'ATOMIC': Keyword, - 'AUTHORIZATION': Keyword, - 'AVG': Keyword, - - 'BACKWARD': Keyword, - 'BEFORE': Keyword, - 'BEGIN': Keyword, - 'BETWEEN': Keyword, - 'BITVAR': Keyword, - 'BIT_LENGTH': Keyword, - 'BOTH': Keyword, - 'BREADTH': Keyword, - -# 'C': Keyword, # most likely this is an alias - 'CACHE': Keyword, - 'CALL': Keyword, - 'CALLED': Keyword, - 'CARDINALITY': Keyword, - 'CASCADE': Keyword, - 'CASCADED': Keyword, - 'CAST': Keyword, - 'CATALOG': Keyword, - 'CATALOG_NAME': Keyword, - 'CHAIN': Keyword, - 'CHARACTERISTICS': Keyword, - 'CHARACTER_LENGTH': Keyword, - 'CHARACTER_SET_CATALOG': Keyword, - 'CHARACTER_SET_NAME': Keyword, - 'CHARACTER_SET_SCHEMA': Keyword, - 'CHAR_LENGTH': Keyword, - 'CHECK': Keyword, - 'CHECKED': Keyword, - 'CHECKPOINT': Keyword, - 'CLASS': Keyword, - 'CLASS_ORIGIN': Keyword, - 'CLOB': Keyword, - 'CLOSE': Keyword, - 'CLUSTER': Keyword, - 'COALSECE': Keyword, - 'COBOL': Keyword, - 'COLLATE': Keyword, - 'COLLATION': Keyword, - 'COLLATION_CATALOG': Keyword, - 'COLLATION_NAME': Keyword, - 'COLLATION_SCHEMA': Keyword, - 'COLUMN': Keyword, - 'COLUMN_NAME': Keyword, - 'COMMAND_FUNCTION': Keyword, - 'COMMAND_FUNCTION_CODE': Keyword, - 'COMMENT': Keyword, - 'COMMIT': Keyword, - 'COMMITTED': Keyword, - 'COMPLETION': Keyword, - 'CONDITION_NUMBER': Keyword, - 'CONNECT': Keyword, - 'CONNECTION': Keyword, - 'CONNECTION_NAME': Keyword, - 'CONSTRAINT': Keyword, - 'CONSTRAINTS': Keyword, - 'CONSTRAINT_CATALOG': Keyword, - 'CONSTRAINT_NAME': Keyword, - 'CONSTRAINT_SCHEMA': Keyword, - 'CONSTRUCTOR': Keyword, - 'CONTAINS': Keyword, - 'CONTINUE': Keyword, - 'CONVERSION': Keyword, - 'CONVERT': Keyword, - 'COPY': Keyword, - 'CORRESPONTING': Keyword, - 'COUNT': Keyword, - 'CREATEDB': Keyword, - 'CREATEUSER': Keyword, - 'CROSS': Keyword, - 'CUBE': Keyword, - 'CURRENT': Keyword, - 'CURRENT_DATE': Keyword, - 'CURRENT_PATH': Keyword, - 'CURRENT_ROLE': Keyword, - 'CURRENT_TIME': Keyword, - 'CURRENT_TIMESTAMP': Keyword, - 'CURRENT_USER': Keyword, - 'CURSOR': Keyword, - 'CURSOR_NAME': Keyword, - 'CYCLE': Keyword, - - 'DATA': Keyword, - 'DATABASE': Keyword, - 'DATETIME_INTERVAL_CODE': Keyword, - 'DATETIME_INTERVAL_PRECISION': Keyword, - 'DAY': Keyword, - 'DEALLOCATE': Keyword, - 'DECLARE': Keyword, - 'DEFAULT': Keyword, - 'DEFAULTS': Keyword, - 'DEFERRABLE': Keyword, - 'DEFERRED': Keyword, - 'DEFINED': Keyword, - 'DEFINER': Keyword, - 'DELIMITER': Keyword, - 'DELIMITERS': Keyword, - 'DEREF': Keyword, - 'DESC': Keyword, - 'DESCRIBE': Keyword, - 'DESCRIPTOR': Keyword, - 'DESTROY': Keyword, - 'DESTRUCTOR': Keyword, - 'DETERMINISTIC': Keyword, - 'DIAGNOSTICS': Keyword, - 'DICTIONARY': Keyword, - 'DISCONNECT': Keyword, - 'DISPATCH': Keyword, - 'DO': Keyword, - 'DOMAIN': Keyword, - 'DYNAMIC': Keyword, - 'DYNAMIC_FUNCTION': Keyword, - 'DYNAMIC_FUNCTION_CODE': Keyword, - - 'EACH': Keyword, - 'ENCODING': Keyword, - 'ENCRYPTED': Keyword, - 'END-EXEC': Keyword, - 'EQUALS': Keyword, - 'ESCAPE': Keyword, - 'EVERY': Keyword, - 'EXCEPT': Keyword, - 'ESCEPTION': Keyword, - 'EXCLUDING': Keyword, - 'EXCLUSIVE': Keyword, - 'EXEC': Keyword, - 'EXECUTE': Keyword, - 'EXISTING': Keyword, - 'EXISTS': Keyword, - 'EXTERNAL': Keyword, - 'EXTRACT': Keyword, - - 'FALSE': Keyword, - 'FETCH': Keyword, - 'FINAL': Keyword, - 'FIRST': Keyword, - 'FORCE': Keyword, - 'FOREIGN': Keyword, - 'FORTRAN': Keyword, - 'FORWARD': Keyword, - 'FOUND': Keyword, - 'FREE': Keyword, - 'FREEZE': Keyword, - 'FULL': Keyword, - 'FUNCTION': Keyword, - - 'G': Keyword, - 'GENERAL': Keyword, - 'GENERATED': Keyword, - 'GET': Keyword, - 'GLOBAL': Keyword, - 'GO': Keyword, - 'GOTO': Keyword, - 'GRANT': Keyword, - 'GRANTED': Keyword, - 'GROUPING': Keyword, - - 'HANDLER': Keyword, - 'HAVING': Keyword, - 'HIERARCHY': Keyword, - 'HOLD': Keyword, - 'HOST': Keyword, - - 'IDENTITY': Keyword, - 'IGNORE': Keyword, - 'ILIKE': Keyword, - 'IMMEDIATE': Keyword, - 'IMMUTABLE': Keyword, - - 'IMPLEMENTATION': Keyword, - 'IMPLICIT': Keyword, - 'INCLUDING': Keyword, - 'INCREMENT': Keyword, - 'INDEX': Keyword, - - 'INDITCATOR': Keyword, - 'INFIX': Keyword, - 'INHERITS': Keyword, - 'INITIALIZE': Keyword, - 'INITIALLY': Keyword, - 'INOUT': Keyword, - 'INPUT': Keyword, - 'INSENSITIVE': Keyword, - 'INSTANTIABLE': Keyword, - 'INSTEAD': Keyword, - 'INTERSECT': Keyword, - 'INTO': Keyword, - 'INVOKER': Keyword, - 'IS': Keyword, - 'ISNULL': Keyword, - 'ISOLATION': Keyword, - 'ITERATE': Keyword, - - 'K': Keyword, - 'KEY': Keyword, - 'KEY_MEMBER': Keyword, - 'KEY_TYPE': Keyword, - - 'LANCOMPILER': Keyword, - 'LANGUAGE': Keyword, - 'LARGE': Keyword, - 'LAST': Keyword, - 'LATERAL': Keyword, - 'LEADING': Keyword, - 'LENGTH': Keyword, - 'LESS': Keyword, - 'LEVEL': Keyword, - 'LIMIT': Keyword, - 'LISTEN': Keyword, - 'LOAD': Keyword, - 'LOCAL': Keyword, - 'LOCALTIME': Keyword, - 'LOCALTIMESTAMP': Keyword, - 'LOCATION': Keyword, - 'LOCATOR': Keyword, - 'LOCK': Keyword, - 'LOWER': Keyword, - - 'M': Keyword, - 'MAP': Keyword, - 'MATCH': Keyword, - 'MAXVALUE': Keyword, - 'MESSAGE_LENGTH': Keyword, - 'MESSAGE_OCTET_LENGTH': Keyword, - 'MESSAGE_TEXT': Keyword, - 'METHOD': Keyword, - 'MINUTE': Keyword, - 'MINVALUE': Keyword, - 'MOD': Keyword, - 'MODE': Keyword, - 'MODIFIES': Keyword, - 'MODIFY': Keyword, - 'MONTH': Keyword, - 'MORE': Keyword, - 'MOVE': Keyword, - 'MUMPS': Keyword, - - 'NAMES': Keyword, - 'NATIONAL': Keyword, - 'NATURAL': Keyword, - 'NCHAR': Keyword, - 'NCLOB': Keyword, - 'NEW': Keyword, - 'NEXT': Keyword, - 'NO': Keyword, - 'NOCREATEDB': Keyword, - 'NOCREATEUSER': Keyword, - 'NONE': Keyword, - 'NOT': Keyword, - 'NOTHING': Keyword, - 'NOTIFY': Keyword, - 'NOTNULL': Keyword, - 'NULL': Keyword, - 'NULLABLE': Keyword, - 'NULLIF': Keyword, - - 'OBJECT': Keyword, - 'OCTET_LENGTH': Keyword, - 'OF': Keyword, - 'OFF': Keyword, - 'OFFSET': Keyword, - 'OIDS': Keyword, - 'OLD': Keyword, - 'ONLY': Keyword, - 'OPEN': Keyword, - 'OPERATION': Keyword, - 'OPERATOR': Keyword, - 'OPTION': Keyword, - 'OPTIONS': Keyword, - 'ORDINALITY': Keyword, - 'OUT': Keyword, - 'OUTPUT': Keyword, - 'OVERLAPS': Keyword, - 'OVERLAY': Keyword, - 'OVERRIDING': Keyword, - 'OWNER': Keyword, - - 'PAD': Keyword, - 'PARAMETER': Keyword, - 'PARAMETERS': Keyword, - 'PARAMETER_MODE': Keyword, - 'PARAMATER_NAME': Keyword, - 'PARAMATER_ORDINAL_POSITION': Keyword, - 'PARAMETER_SPECIFIC_CATALOG': Keyword, - 'PARAMETER_SPECIFIC_NAME': Keyword, - 'PARAMATER_SPECIFIC_SCHEMA': Keyword, - 'PARTIAL': Keyword, - 'PASCAL': Keyword, - 'PENDANT': Keyword, - 'PLACING': Keyword, - 'PLI': Keyword, - 'POSITION': Keyword, - 'POSTFIX': Keyword, - 'PRECISION': Keyword, - 'PREFIX': Keyword, - 'PREORDER': Keyword, - 'PREPARE': Keyword, - 'PRESERVE': Keyword, - 'PRIMARY': Keyword, - 'PRIOR': Keyword, - 'PRIVILEGES': Keyword, - 'PROCEDURAL': Keyword, - 'PROCEDURE': Keyword, - 'PUBLIC': Keyword, - - 'RAISE': Keyword, - 'READ': Keyword, - 'READS': Keyword, - 'RECHECK': Keyword, - 'RECURSIVE': Keyword, - 'REF': Keyword, - 'REFERENCES': Keyword, - 'REFERENCING': Keyword, - 'REINDEX': Keyword, - 'RELATIVE': Keyword, - 'RENAME': Keyword, - 'REPEATABLE': Keyword, - 'REPLACE': Keyword, - 'RESET': Keyword, - 'RESTART': Keyword, - 'RESTRICT': Keyword, - 'RESULT': Keyword, - 'RETURN': Keyword, - 'RETURNED_LENGTH': Keyword, - 'RETURNED_OCTET_LENGTH': Keyword, - 'RETURNED_SQLSTATE': Keyword, - 'RETURNS': Keyword, - 'REVOKE': Keyword, - 'RIGHT': Keyword, - 'ROLE': Keyword, - 'ROLLBACK': Keyword, - 'ROLLUP': Keyword, - 'ROUTINE': Keyword, - 'ROUTINE_CATALOG': Keyword, - 'ROUTINE_NAME': Keyword, - 'ROUTINE_SCHEMA': Keyword, - 'ROW': Keyword, - 'ROWS': Keyword, - 'ROW_COUNT': Keyword, - 'RULE': Keyword, - - 'SAVE_POINT': Keyword, - 'SCALE': Keyword, - 'SCHEMA': Keyword, - 'SCHEMA_NAME': Keyword, - 'SCOPE': Keyword, - 'SCROLL': Keyword, - 'SEARCH': Keyword, - 'SECOND': Keyword, - 'SECURITY': Keyword, - 'SELF': Keyword, - 'SENSITIVE': Keyword, - 'SERIALIZABLE': Keyword, - 'SERVER_NAME': Keyword, - 'SESSION': Keyword, - 'SESSION_USER': Keyword, - 'SETOF': Keyword, - 'SETS': Keyword, - 'SHARE': Keyword, - 'SHOW': Keyword, - 'SIMILAR': Keyword, - 'SIMPLE': Keyword, - 'SIZE': Keyword, - 'SOME': Keyword, - 'SOURCE': Keyword, - 'SPACE': Keyword, - 'SPECIFIC': Keyword, - 'SPECIFICTYPE': Keyword, - 'SPECIFIC_NAME': Keyword, - 'SQL': Keyword, - 'SQLCODE': Keyword, - 'SQLERROR': Keyword, - 'SQLEXCEPTION': Keyword, - 'SQLSTATE': Keyword, - 'SQLWARNINIG': Keyword, - 'STABLE': Keyword, - 'START': Keyword, - 'STATE': Keyword, - 'STATEMENT': Keyword, - 'STATIC': Keyword, - 'STATISTICS': Keyword, - 'STDIN': Keyword, - 'STDOUT': Keyword, - 'STORAGE': Keyword, - 'STRICT': Keyword, - 'STRUCTURE': Keyword, - 'STYPE': Keyword, - 'SUBCLASS_ORIGIN': Keyword, - 'SUBLIST': Keyword, - 'SUBSTRING': Keyword, - 'SUM': Keyword, - 'SYMMETRIC': Keyword, - 'SYSID': Keyword, - 'SYSTEM': Keyword, - 'SYSTEM_USER': Keyword, - - 'TABLE': Keyword, - 'TABLE_NAME': Keyword, - ' TEMP': Keyword, - 'TEMPLATE': Keyword, - 'TEMPORARY': Keyword, - 'TERMINATE': Keyword, - 'THAN': Keyword, - 'TIMESTAMP': Keyword, - 'TIMEZONE_HOUR': Keyword, - 'TIMEZONE_MINUTE': Keyword, - 'TO': Keyword, - 'TOAST': Keyword, - 'TRAILING': Keyword, - 'TRANSATION': Keyword, - 'TRANSACTIONS_COMMITTED': Keyword, - 'TRANSACTIONS_ROLLED_BACK': Keyword, - 'TRANSATION_ACTIVE': Keyword, - 'TRANSFORM': Keyword, - 'TRANSFORMS': Keyword, - 'TRANSLATE': Keyword, - 'TRANSLATION': Keyword, - 'TREAT': Keyword, - 'TRIGGER': Keyword, - 'TRIGGER_CATALOG': Keyword, - 'TRIGGER_NAME': Keyword, - 'TRIGGER_SCHEMA': Keyword, - 'TRIM': Keyword, - 'TRUE': Keyword, - 'TRUNCATE': Keyword, - 'TRUSTED': Keyword, - 'TYPE': Keyword, - - 'UNCOMMITTED': Keyword, - 'UNDER': Keyword, - 'UNENCRYPTED': Keyword, - 'UNION': Keyword, - 'UNIQUE': Keyword, - 'UNKNOWN': Keyword, - 'UNLISTEN': Keyword, - 'UNNAMED': Keyword, - 'UNNEST': Keyword, - 'UNTIL': Keyword, - 'UPPER': Keyword, - 'USAGE': Keyword, - 'USER': Keyword, - 'USER_DEFINED_TYPE_CATALOG': Keyword, - 'USER_DEFINED_TYPE_NAME': Keyword, - 'USER_DEFINED_TYPE_SCHEMA': Keyword, - 'USING': Keyword, - - 'VACUUM': Keyword, - 'VALID': Keyword, - 'VALIDATOR': Keyword, - 'VALUES': Keyword, - 'VARIABLE': Keyword, - 'VERBOSE': Keyword, - 'VERSION': Keyword, - 'VIEW': Keyword, - 'VOLATILE': Keyword, - - 'WHENEVER': Keyword, - 'WITH': Keyword, - 'WITHOUT': Keyword, - 'WORK': Keyword, - 'WRITE': Keyword, - - 'YEAR': Keyword, - - 'ZONE': Keyword, - - - 'ARRAY': Name.Builtin, - 'BIGINT': Name.Builtin, - 'BINARY': Name.Builtin, - 'BIT': Name.Builtin, - 'BLOB': Name.Builtin, - 'BOOLEAN': Name.Builtin, - 'CHAR': Name.Builtin, - 'CHARACTER': Name.Builtin, - 'DATE': Name.Builtin, - 'DEC': Name.Builtin, - 'DECIMAL': Name.Builtin, - 'FLOAT': Name.Builtin, - 'INT': Name.Builtin, - 'INTEGER': Name.Builtin, - 'INTERVAL': Name.Builtin, - 'NUMBER': Name.Builtin, - 'NUMERIC': Name.Builtin, - 'REAL': Name.Builtin, - 'SERIAL': Name.Builtin, - 'SMALLINT': Name.Builtin, - 'VARCHAR': Name.Builtin, - 'VARYING': Name.Builtin, - 'INT8': Name.Builtin, - 'SERIAL8': Name.Builtin, - 'TEXT': Name.Builtin, + 'ABORT': tokens.Keyword, + 'ABS': tokens.Keyword, + 'ABSOLUTE': tokens.Keyword, + 'ACCESS': tokens.Keyword, + 'ADA': tokens.Keyword, + 'ADD': tokens.Keyword, + 'ADMIN': tokens.Keyword, + 'AFTER': tokens.Keyword, + 'AGGREGATE': tokens.Keyword, + 'ALIAS': tokens.Keyword, + 'ALL': tokens.Keyword, + 'ALLOCATE': tokens.Keyword, + 'ANALYSE': tokens.Keyword, + 'ANALYZE': tokens.Keyword, + 'ANY': tokens.Keyword, + 'ARE': tokens.Keyword, + 'ASC': tokens.Keyword, + 'ASENSITIVE': tokens.Keyword, + 'ASSERTION': tokens.Keyword, + 'ASSIGNMENT': tokens.Keyword, + 'ASYMMETRIC': tokens.Keyword, + 'AT': tokens.Keyword, + 'ATOMIC': tokens.Keyword, + 'AUTHORIZATION': tokens.Keyword, + 'AVG': tokens.Keyword, + + 'BACKWARD': tokens.Keyword, + 'BEFORE': tokens.Keyword, + 'BEGIN': tokens.Keyword, + 'BETWEEN': tokens.Keyword, + 'BITVAR': tokens.Keyword, + 'BIT_LENGTH': tokens.Keyword, + 'BOTH': tokens.Keyword, + 'BREADTH': tokens.Keyword, + +# 'C': tokens.Keyword, # most likely this is an alias + 'CACHE': tokens.Keyword, + 'CALL': tokens.Keyword, + 'CALLED': tokens.Keyword, + 'CARDINALITY': tokens.Keyword, + 'CASCADE': tokens.Keyword, + 'CASCADED': tokens.Keyword, + 'CAST': tokens.Keyword, + 'CATALOG': tokens.Keyword, + 'CATALOG_NAME': tokens.Keyword, + 'CHAIN': tokens.Keyword, + 'CHARACTERISTICS': tokens.Keyword, + 'CHARACTER_LENGTH': tokens.Keyword, + 'CHARACTER_SET_CATALOG': tokens.Keyword, + 'CHARACTER_SET_NAME': tokens.Keyword, + 'CHARACTER_SET_SCHEMA': tokens.Keyword, + 'CHAR_LENGTH': tokens.Keyword, + 'CHECK': tokens.Keyword, + 'CHECKED': tokens.Keyword, + 'CHECKPOINT': tokens.Keyword, + 'CLASS': tokens.Keyword, + 'CLASS_ORIGIN': tokens.Keyword, + 'CLOB': tokens.Keyword, + 'CLOSE': tokens.Keyword, + 'CLUSTER': tokens.Keyword, + 'COALSECE': tokens.Keyword, + 'COBOL': tokens.Keyword, + 'COLLATE': tokens.Keyword, + 'COLLATION': tokens.Keyword, + 'COLLATION_CATALOG': tokens.Keyword, + 'COLLATION_NAME': tokens.Keyword, + 'COLLATION_SCHEMA': tokens.Keyword, + 'COLUMN': tokens.Keyword, + 'COLUMN_NAME': tokens.Keyword, + 'COMMAND_FUNCTION': tokens.Keyword, + 'COMMAND_FUNCTION_CODE': tokens.Keyword, + 'COMMENT': tokens.Keyword, + 'COMMIT': tokens.Keyword, + 'COMMITTED': tokens.Keyword, + 'COMPLETION': tokens.Keyword, + 'CONDITION_NUMBER': tokens.Keyword, + 'CONNECT': tokens.Keyword, + 'CONNECTION': tokens.Keyword, + 'CONNECTION_NAME': tokens.Keyword, + 'CONSTRAINT': tokens.Keyword, + 'CONSTRAINTS': tokens.Keyword, + 'CONSTRAINT_CATALOG': tokens.Keyword, + 'CONSTRAINT_NAME': tokens.Keyword, + 'CONSTRAINT_SCHEMA': tokens.Keyword, + 'CONSTRUCTOR': tokens.Keyword, + 'CONTAINS': tokens.Keyword, + 'CONTINUE': tokens.Keyword, + 'CONVERSION': tokens.Keyword, + 'CONVERT': tokens.Keyword, + 'COPY': tokens.Keyword, + 'CORRESPONTING': tokens.Keyword, + 'COUNT': tokens.Keyword, + 'CREATEDB': tokens.Keyword, + 'CREATEUSER': tokens.Keyword, + 'CROSS': tokens.Keyword, + 'CUBE': tokens.Keyword, + 'CURRENT': tokens.Keyword, + 'CURRENT_DATE': tokens.Keyword, + 'CURRENT_PATH': tokens.Keyword, + 'CURRENT_ROLE': tokens.Keyword, + 'CURRENT_TIME': tokens.Keyword, + 'CURRENT_TIMESTAMP': tokens.Keyword, + 'CURRENT_USER': tokens.Keyword, + 'CURSOR': tokens.Keyword, + 'CURSOR_NAME': tokens.Keyword, + 'CYCLE': tokens.Keyword, + + 'DATA': tokens.Keyword, + 'DATABASE': tokens.Keyword, + 'DATETIME_INTERVAL_CODE': tokens.Keyword, + 'DATETIME_INTERVAL_PRECISION': tokens.Keyword, + 'DAY': tokens.Keyword, + 'DEALLOCATE': tokens.Keyword, + 'DECLARE': tokens.Keyword, + 'DEFAULT': tokens.Keyword, + 'DEFAULTS': tokens.Keyword, + 'DEFERRABLE': tokens.Keyword, + 'DEFERRED': tokens.Keyword, + 'DEFINED': tokens.Keyword, + 'DEFINER': tokens.Keyword, + 'DELIMITER': tokens.Keyword, + 'DELIMITERS': tokens.Keyword, + 'DEREF': tokens.Keyword, + 'DESC': tokens.Keyword, + 'DESCRIBE': tokens.Keyword, + 'DESCRIPTOR': tokens.Keyword, + 'DESTROY': tokens.Keyword, + 'DESTRUCTOR': tokens.Keyword, + 'DETERMINISTIC': tokens.Keyword, + 'DIAGNOSTICS': tokens.Keyword, + 'DICTIONARY': tokens.Keyword, + 'DISCONNECT': tokens.Keyword, + 'DISPATCH': tokens.Keyword, + 'DO': tokens.Keyword, + 'DOMAIN': tokens.Keyword, + 'DYNAMIC': tokens.Keyword, + 'DYNAMIC_FUNCTION': tokens.Keyword, + 'DYNAMIC_FUNCTION_CODE': tokens.Keyword, + + 'EACH': tokens.Keyword, + 'ENCODING': tokens.Keyword, + 'ENCRYPTED': tokens.Keyword, + 'END-EXEC': tokens.Keyword, + 'EQUALS': tokens.Keyword, + 'ESCAPE': tokens.Keyword, + 'EVERY': tokens.Keyword, + 'EXCEPT': tokens.Keyword, + 'ESCEPTION': tokens.Keyword, + 'EXCLUDING': tokens.Keyword, + 'EXCLUSIVE': tokens.Keyword, + 'EXEC': tokens.Keyword, + 'EXECUTE': tokens.Keyword, + 'EXISTING': tokens.Keyword, + 'EXISTS': tokens.Keyword, + 'EXTERNAL': tokens.Keyword, + 'EXTRACT': tokens.Keyword, + + 'FALSE': tokens.Keyword, + 'FETCH': tokens.Keyword, + 'FINAL': tokens.Keyword, + 'FIRST': tokens.Keyword, + 'FORCE': tokens.Keyword, + 'FOREIGN': tokens.Keyword, + 'FORTRAN': tokens.Keyword, + 'FORWARD': tokens.Keyword, + 'FOUND': tokens.Keyword, + 'FREE': tokens.Keyword, + 'FREEZE': tokens.Keyword, + 'FULL': tokens.Keyword, + 'FUNCTION': tokens.Keyword, + + 'G': tokens.Keyword, + 'GENERAL': tokens.Keyword, + 'GENERATED': tokens.Keyword, + 'GET': tokens.Keyword, + 'GLOBAL': tokens.Keyword, + 'GO': tokens.Keyword, + 'GOTO': tokens.Keyword, + 'GRANT': tokens.Keyword, + 'GRANTED': tokens.Keyword, + 'GROUPING': tokens.Keyword, + + 'HANDLER': tokens.Keyword, + 'HAVING': tokens.Keyword, + 'HIERARCHY': tokens.Keyword, + 'HOLD': tokens.Keyword, + 'HOST': tokens.Keyword, + + 'IDENTITY': tokens.Keyword, + 'IGNORE': tokens.Keyword, + 'ILIKE': tokens.Keyword, + 'IMMEDIATE': tokens.Keyword, + 'IMMUTABLE': tokens.Keyword, + + 'IMPLEMENTATION': tokens.Keyword, + 'IMPLICIT': tokens.Keyword, + 'INCLUDING': tokens.Keyword, + 'INCREMENT': tokens.Keyword, + 'INDEX': tokens.Keyword, + + 'INDITCATOR': tokens.Keyword, + 'INFIX': tokens.Keyword, + 'INHERITS': tokens.Keyword, + 'INITIALIZE': tokens.Keyword, + 'INITIALLY': tokens.Keyword, + 'INOUT': tokens.Keyword, + 'INPUT': tokens.Keyword, + 'INSENSITIVE': tokens.Keyword, + 'INSTANTIABLE': tokens.Keyword, + 'INSTEAD': tokens.Keyword, + 'INTERSECT': tokens.Keyword, + 'INTO': tokens.Keyword, + 'INVOKER': tokens.Keyword, + 'IS': tokens.Keyword, + 'ISNULL': tokens.Keyword, + 'ISOLATION': tokens.Keyword, + 'ITERATE': tokens.Keyword, + + 'K': tokens.Keyword, + 'KEY': tokens.Keyword, + 'KEY_MEMBER': tokens.Keyword, + 'KEY_TYPE': tokens.Keyword, + + 'LANCOMPILER': tokens.Keyword, + 'LANGUAGE': tokens.Keyword, + 'LARGE': tokens.Keyword, + 'LAST': tokens.Keyword, + 'LATERAL': tokens.Keyword, + 'LEADING': tokens.Keyword, + 'LENGTH': tokens.Keyword, + 'LESS': tokens.Keyword, + 'LEVEL': tokens.Keyword, + 'LIMIT': tokens.Keyword, + 'LISTEN': tokens.Keyword, + 'LOAD': tokens.Keyword, + 'LOCAL': tokens.Keyword, + 'LOCALTIME': tokens.Keyword, + 'LOCALTIMESTAMP': tokens.Keyword, + 'LOCATION': tokens.Keyword, + 'LOCATOR': tokens.Keyword, + 'LOCK': tokens.Keyword, + 'LOWER': tokens.Keyword, + + 'M': tokens.Keyword, + 'MAP': tokens.Keyword, + 'MATCH': tokens.Keyword, + 'MAXVALUE': tokens.Keyword, + 'MESSAGE_LENGTH': tokens.Keyword, + 'MESSAGE_OCTET_LENGTH': tokens.Keyword, + 'MESSAGE_TEXT': tokens.Keyword, + 'METHOD': tokens.Keyword, + 'MINUTE': tokens.Keyword, + 'MINVALUE': tokens.Keyword, + 'MOD': tokens.Keyword, + 'MODE': tokens.Keyword, + 'MODIFIES': tokens.Keyword, + 'MODIFY': tokens.Keyword, + 'MONTH': tokens.Keyword, + 'MORE': tokens.Keyword, + 'MOVE': tokens.Keyword, + 'MUMPS': tokens.Keyword, + + 'NAMES': tokens.Keyword, + 'NATIONAL': tokens.Keyword, + 'NATURAL': tokens.Keyword, + 'NCHAR': tokens.Keyword, + 'NCLOB': tokens.Keyword, + 'NEW': tokens.Keyword, + 'NEXT': tokens.Keyword, + 'NO': tokens.Keyword, + 'NOCREATEDB': tokens.Keyword, + 'NOCREATEUSER': tokens.Keyword, + 'NONE': tokens.Keyword, + 'NOT': tokens.Keyword, + 'NOTHING': tokens.Keyword, + 'NOTIFY': tokens.Keyword, + 'NOTNULL': tokens.Keyword, + 'NULL': tokens.Keyword, + 'NULLABLE': tokens.Keyword, + 'NULLIF': tokens.Keyword, + + 'OBJECT': tokens.Keyword, + 'OCTET_LENGTH': tokens.Keyword, + 'OF': tokens.Keyword, + 'OFF': tokens.Keyword, + 'OFFSET': tokens.Keyword, + 'OIDS': tokens.Keyword, + 'OLD': tokens.Keyword, + 'ONLY': tokens.Keyword, + 'OPEN': tokens.Keyword, + 'OPERATION': tokens.Keyword, + 'OPERATOR': tokens.Keyword, + 'OPTION': tokens.Keyword, + 'OPTIONS': tokens.Keyword, + 'ORDINALITY': tokens.Keyword, + 'OUT': tokens.Keyword, + 'OUTPUT': tokens.Keyword, + 'OVERLAPS': tokens.Keyword, + 'OVERLAY': tokens.Keyword, + 'OVERRIDING': tokens.Keyword, + 'OWNER': tokens.Keyword, + + 'PAD': tokens.Keyword, + 'PARAMETER': tokens.Keyword, + 'PARAMETERS': tokens.Keyword, + 'PARAMETER_MODE': tokens.Keyword, + 'PARAMATER_NAME': tokens.Keyword, + 'PARAMATER_ORDINAL_POSITION': tokens.Keyword, + 'PARAMETER_SPECIFIC_CATALOG': tokens.Keyword, + 'PARAMETER_SPECIFIC_NAME': tokens.Keyword, + 'PARAMATER_SPECIFIC_SCHEMA': tokens.Keyword, + 'PARTIAL': tokens.Keyword, + 'PASCAL': tokens.Keyword, + 'PENDANT': tokens.Keyword, + 'PLACING': tokens.Keyword, + 'PLI': tokens.Keyword, + 'POSITION': tokens.Keyword, + 'POSTFIX': tokens.Keyword, + 'PRECISION': tokens.Keyword, + 'PREFIX': tokens.Keyword, + 'PREORDER': tokens.Keyword, + 'PREPARE': tokens.Keyword, + 'PRESERVE': tokens.Keyword, + 'PRIMARY': tokens.Keyword, + 'PRIOR': tokens.Keyword, + 'PRIVILEGES': tokens.Keyword, + 'PROCEDURAL': tokens.Keyword, + 'PROCEDURE': tokens.Keyword, + 'PUBLIC': tokens.Keyword, + + 'RAISE': tokens.Keyword, + 'READ': tokens.Keyword, + 'READS': tokens.Keyword, + 'RECHECK': tokens.Keyword, + 'RECURSIVE': tokens.Keyword, + 'REF': tokens.Keyword, + 'REFERENCES': tokens.Keyword, + 'REFERENCING': tokens.Keyword, + 'REINDEX': tokens.Keyword, + 'RELATIVE': tokens.Keyword, + 'RENAME': tokens.Keyword, + 'REPEATABLE': tokens.Keyword, + 'REPLACE': tokens.Keyword, + 'RESET': tokens.Keyword, + 'RESTART': tokens.Keyword, + 'RESTRICT': tokens.Keyword, + 'RESULT': tokens.Keyword, + 'RETURN': tokens.Keyword, + 'RETURNED_LENGTH': tokens.Keyword, + 'RETURNED_OCTET_LENGTH': tokens.Keyword, + 'RETURNED_SQLSTATE': tokens.Keyword, + 'RETURNS': tokens.Keyword, + 'REVOKE': tokens.Keyword, + 'RIGHT': tokens.Keyword, + 'ROLE': tokens.Keyword, + 'ROLLBACK': tokens.Keyword, + 'ROLLUP': tokens.Keyword, + 'ROUTINE': tokens.Keyword, + 'ROUTINE_CATALOG': tokens.Keyword, + 'ROUTINE_NAME': tokens.Keyword, + 'ROUTINE_SCHEMA': tokens.Keyword, + 'ROW': tokens.Keyword, + 'ROWS': tokens.Keyword, + 'ROW_COUNT': tokens.Keyword, + 'RULE': tokens.Keyword, + + 'SAVE_POINT': tokens.Keyword, + 'SCALE': tokens.Keyword, + 'SCHEMA': tokens.Keyword, + 'SCHEMA_NAME': tokens.Keyword, + 'SCOPE': tokens.Keyword, + 'SCROLL': tokens.Keyword, + 'SEARCH': tokens.Keyword, + 'SECOND': tokens.Keyword, + 'SECURITY': tokens.Keyword, + 'SELF': tokens.Keyword, + 'SENSITIVE': tokens.Keyword, + 'SERIALIZABLE': tokens.Keyword, + 'SERVER_NAME': tokens.Keyword, + 'SESSION': tokens.Keyword, + 'SESSION_USER': tokens.Keyword, + 'SETOF': tokens.Keyword, + 'SETS': tokens.Keyword, + 'SHARE': tokens.Keyword, + 'SHOW': tokens.Keyword, + 'SIMILAR': tokens.Keyword, + 'SIMPLE': tokens.Keyword, + 'SIZE': tokens.Keyword, + 'SOME': tokens.Keyword, + 'SOURCE': tokens.Keyword, + 'SPACE': tokens.Keyword, + 'SPECIFIC': tokens.Keyword, + 'SPECIFICTYPE': tokens.Keyword, + 'SPECIFIC_NAME': tokens.Keyword, + 'SQL': tokens.Keyword, + 'SQLCODE': tokens.Keyword, + 'SQLERROR': tokens.Keyword, + 'SQLEXCEPTION': tokens.Keyword, + 'SQLSTATE': tokens.Keyword, + 'SQLWARNINIG': tokens.Keyword, + 'STABLE': tokens.Keyword, + 'START': tokens.Keyword, + 'STATE': tokens.Keyword, + 'STATEMENT': tokens.Keyword, + 'STATIC': tokens.Keyword, + 'STATISTICS': tokens.Keyword, + 'STDIN': tokens.Keyword, + 'STDOUT': tokens.Keyword, + 'STORAGE': tokens.Keyword, + 'STRICT': tokens.Keyword, + 'STRUCTURE': tokens.Keyword, + 'STYPE': tokens.Keyword, + 'SUBCLASS_ORIGIN': tokens.Keyword, + 'SUBLIST': tokens.Keyword, + 'SUBSTRING': tokens.Keyword, + 'SUM': tokens.Keyword, + 'SYMMETRIC': tokens.Keyword, + 'SYSID': tokens.Keyword, + 'SYSTEM': tokens.Keyword, + 'SYSTEM_USER': tokens.Keyword, + + 'TABLE': tokens.Keyword, + 'TABLE_NAME': tokens.Keyword, + ' TEMP': tokens.Keyword, + 'TEMPLATE': tokens.Keyword, + 'TEMPORARY': tokens.Keyword, + 'TERMINATE': tokens.Keyword, + 'THAN': tokens.Keyword, + 'TIMESTAMP': tokens.Keyword, + 'TIMEZONE_HOUR': tokens.Keyword, + 'TIMEZONE_MINUTE': tokens.Keyword, + 'TO': tokens.Keyword, + 'TOAST': tokens.Keyword, + 'TRAILING': tokens.Keyword, + 'TRANSATION': tokens.Keyword, + 'TRANSACTIONS_COMMITTED': tokens.Keyword, + 'TRANSACTIONS_ROLLED_BACK': tokens.Keyword, + 'TRANSATION_ACTIVE': tokens.Keyword, + 'TRANSFORM': tokens.Keyword, + 'TRANSFORMS': tokens.Keyword, + 'TRANSLATE': tokens.Keyword, + 'TRANSLATION': tokens.Keyword, + 'TREAT': tokens.Keyword, + 'TRIGGER': tokens.Keyword, + 'TRIGGER_CATALOG': tokens.Keyword, + 'TRIGGER_NAME': tokens.Keyword, + 'TRIGGER_SCHEMA': tokens.Keyword, + 'TRIM': tokens.Keyword, + 'TRUE': tokens.Keyword, + 'TRUNCATE': tokens.Keyword, + 'TRUSTED': tokens.Keyword, + 'TYPE': tokens.Keyword, + + 'UNCOMMITTED': tokens.Keyword, + 'UNDER': tokens.Keyword, + 'UNENCRYPTED': tokens.Keyword, + 'UNION': tokens.Keyword, + 'UNIQUE': tokens.Keyword, + 'UNKNOWN': tokens.Keyword, + 'UNLISTEN': tokens.Keyword, + 'UNNAMED': tokens.Keyword, + 'UNNEST': tokens.Keyword, + 'UNTIL': tokens.Keyword, + 'UPPER': tokens.Keyword, + 'USAGE': tokens.Keyword, + 'USER': tokens.Keyword, + 'USER_DEFINED_TYPE_CATALOG': tokens.Keyword, + 'USER_DEFINED_TYPE_NAME': tokens.Keyword, + 'USER_DEFINED_TYPE_SCHEMA': tokens.Keyword, + 'USING': tokens.Keyword, + + 'VACUUM': tokens.Keyword, + 'VALID': tokens.Keyword, + 'VALIDATOR': tokens.Keyword, + 'VALUES': tokens.Keyword, + 'VARIABLE': tokens.Keyword, + 'VERBOSE': tokens.Keyword, + 'VERSION': tokens.Keyword, + 'VIEW': tokens.Keyword, + 'VOLATILE': tokens.Keyword, + + 'WHENEVER': tokens.Keyword, + 'WITH': tokens.Keyword, + 'WITHOUT': tokens.Keyword, + 'WORK': tokens.Keyword, + 'WRITE': tokens.Keyword, + + 'YEAR': tokens.Keyword, + + 'ZONE': tokens.Keyword, + + + 'ARRAY': tokens.Name.Builtin, + 'BIGINT': tokens.Name.Builtin, + 'BINARY': tokens.Name.Builtin, + 'BIT': tokens.Name.Builtin, + 'BLOB': tokens.Name.Builtin, + 'BOOLEAN': tokens.Name.Builtin, + 'CHAR': tokens.Name.Builtin, + 'CHARACTER': tokens.Name.Builtin, + 'DATE': tokens.Name.Builtin, + 'DEC': tokens.Name.Builtin, + 'DECIMAL': tokens.Name.Builtin, + 'FLOAT': tokens.Name.Builtin, + 'INT': tokens.Name.Builtin, + 'INTEGER': tokens.Name.Builtin, + 'INTERVAL': tokens.Name.Builtin, + 'NUMBER': tokens.Name.Builtin, + 'NUMERIC': tokens.Name.Builtin, + 'REAL': tokens.Name.Builtin, + 'SERIAL': tokens.Name.Builtin, + 'SMALLINT': tokens.Name.Builtin, + 'VARCHAR': tokens.Name.Builtin, + 'VARYING': tokens.Name.Builtin, + 'INT8': tokens.Name.Builtin, + 'SERIAL8': tokens.Name.Builtin, + 'TEXT': tokens.Name.Builtin, } KEYWORDS_COMMON = { - 'SELECT': Keyword.DML, - 'INSERT': Keyword.DML, - 'DELETE': Keyword.DML, - 'UPDATE': Keyword.DML, - 'DROP': Keyword.DDL, - 'CREATE': Keyword.DDL, - 'ALTER': Keyword.DDL, - - 'WHERE': Keyword, - 'FROM': Keyword, - 'INNER': Keyword, - 'JOIN': Keyword, - 'AND': Keyword, - 'OR': Keyword, - 'LIKE': Keyword, - 'ON': Keyword, - 'IN': Keyword, - 'SET': Keyword, - - 'BY': Keyword, - 'GROUP': Keyword, - 'ORDER': Keyword, - 'LEFT': Keyword, - 'OUTER': Keyword, - - 'IF': Keyword, - 'END': Keyword, - 'THEN': Keyword, - 'LOOP': Keyword, - 'AS': Keyword, - 'ELSE': Keyword, - 'FOR': Keyword, - - 'CASE': Keyword, - 'WHEN': Keyword, - 'MIN': Keyword, - 'MAX': Keyword, - 'DISTINCT': Keyword, - + 'SELECT': tokens.Keyword.DML, + 'INSERT': tokens.Keyword.DML, + 'DELETE': tokens.Keyword.DML, + 'UPDATE': tokens.Keyword.DML, + 'DROP': tokens.Keyword.DDL, + 'CREATE': tokens.Keyword.DDL, + 'ALTER': tokens.Keyword.DDL, + + 'WHERE': tokens.Keyword, + 'FROM': tokens.Keyword, + 'INNER': tokens.Keyword, + 'JOIN': tokens.Keyword, + 'AND': tokens.Keyword, + 'OR': tokens.Keyword, + 'LIKE': tokens.Keyword, + 'ON': tokens.Keyword, + 'IN': tokens.Keyword, + 'SET': tokens.Keyword, + + 'BY': tokens.Keyword, + 'GROUP': tokens.Keyword, + 'ORDER': tokens.Keyword, + 'LEFT': tokens.Keyword, + 'OUTER': tokens.Keyword, + + 'IF': tokens.Keyword, + 'END': tokens.Keyword, + 'THEN': tokens.Keyword, + 'LOOP': tokens.Keyword, + 'AS': tokens.Keyword, + 'ELSE': tokens.Keyword, + 'FOR': tokens.Keyword, + + 'CASE': tokens.Keyword, + 'WHEN': tokens.Keyword, + 'MIN': tokens.Keyword, + 'MAX': tokens.Keyword, + 'DISTINCT': tokens.Keyword, } diff --git a/sqlparse/lexer.py b/sqlparse/lexer.py index b7b16af..0e00687 100644 --- a/sqlparse/lexer.py +++ b/sqlparse/lexer.py @@ -14,14 +14,14 @@ import re +from sqlparse import tokens from sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON -from sqlparse.tokens import * -from sqlparse.tokens import _TokenType class include(str): pass + class combined(tuple): """Indicates a state combined from multiple states.""" @@ -34,7 +34,7 @@ class combined(tuple): def is_keyword(value): test = value.upper() - return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, Name)), value + return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value def apply_filters(stream, filters, lexer=None): @@ -64,13 +64,13 @@ class LexerMeta(type): assert state[0] != '#', "invalid state name %r" % state if state in processed: return processed[state] - tokens = processed[state] = [] + tokenlist = processed[state] = [] rflags = cls.flags for tdef in unprocessed[state]: if isinstance(tdef, include): # it's a state reference assert tdef != state, "circular state reference %r" % state - tokens.extend(cls._process_state( + tokenlist.extend(cls._process_state( unprocessed, processed, str(tdef))) continue @@ -83,7 +83,7 @@ class LexerMeta(type): " %r of %r: %s" % (tdef[0], state, cls, err))) - assert type(tdef[1]) is _TokenType or callable(tdef[1]), \ + assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \ 'token type must be simple type or callable, not %r' % (tdef[1],) if len(tdef) == 2: @@ -122,8 +122,8 @@ class LexerMeta(type): new_state = tdef2 else: assert False, 'unknown new state def %r' % tdef2 - tokens.append((rex, tdef[1], new_state)) - return tokens + tokenlist.append((rex, tdef[1], new_state)) + return tokenlist def process_tokendef(cls): cls._all_tokens = {} @@ -161,34 +161,34 @@ class Lexer: tokens = { 'root': [ - (r'--.*?(\r|\n|\r\n)', Comment.Single), - (r'(\r|\n|\r\n)', Newline), - (r'\s+', Whitespace), - (r'/\*', Comment.Multiline, 'multiline-comments'), - (r':=', Assignment), - (r'::', Punctuation), - (r'[*]', Wildcard), - (r"`(``|[^`])*`", Name), - (r"´(´´|[^´])*´", Name), - (r'@[a-zA-Z_][a-zA-Z0-9_]+', Name), - (r'[<>=~!]+', Operator.Comparsion), - (r'[+/@#%^&|`?^-]+', Operator), - (r'[0-9]+', Number.Integer), + (r'--.*?(\r|\n|\r\n)', tokens.Comment.Single), + (r'(\r|\n|\r\n)', tokens.Newline), + (r'\s+', tokens.Whitespace), + (r'/\*', tokens.Comment.Multiline, 'multiline-comments'), + (r':=', tokens.Assignment), + (r'::', tokens.Punctuation), + (r'[*]', tokens.Wildcard), + (r"`(``|[^`])*`", tokens.Name), + (r"´(´´|[^´])*´", tokens.Name), + (r'@[a-zA-Z_][a-zA-Z0-9_]+', tokens.Name), + (r'[<>=~!]+', tokens.Operator.Comparsion), + (r'[+/@#%^&|`?^-]+', tokens.Operator), + (r'[0-9]+', tokens.Number.Integer), # TODO: Backslash escapes? - (r"(''|'.*?[^\\]')", String.Single), - (r'(""|".*?[^\\]")', String.Symbol), # not a real string literal in ANSI SQL - (r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN\b', Keyword), - (r'END( IF| LOOP)?\b', Keyword), - (r'CREATE( OR REPLACE)?\b', Keyword.DDL), + (r"(''|'.*?[^\\]')", tokens.String.Single), + (r'(""|".*?[^\\]")', tokens.String.Symbol), # not a real string literal in ANSI SQL + (r'(LEFT |RIGHT )?(INNER |OUTER )?JOIN\b', tokens.Keyword), + (r'END( IF| LOOP)?\b', tokens.Keyword), + (r'CREATE( OR REPLACE)?\b', tokens.Keyword.DDL), (r'[a-zA-Z_][a-zA-Z0-9_]*', is_keyword), - (r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', Name.Builtin), - (r'[;:()\[\],\.]', Punctuation), + (r'\$([a-zA-Z_][a-zA-Z0-9_]*)?\$', tokens.Name.Builtin), + (r'[;:()\[\],\.]', tokens.Punctuation), ], 'multiline-comments': [ - (r'/\*', Comment.Multiline, 'multiline-comments'), - (r'\*/', Comment.Multiline, '#pop'), - (r'[^/\*]+', Comment.Multiline), - (r'[/*]', Comment.Multiline) + (r'/\*', tokens.Comment.Multiline, 'multiline-comments'), + (r'\*/', tokens.Comment.Multiline, '#pop'), + (r'[^/\*]+', tokens.Comment.Multiline), + (r'[/*]', tokens.Comment.Multiline) ] } @@ -266,7 +266,7 @@ class Lexer: value = m.group() if value in known_names: yield pos, known_names[value], value - elif type(action) is _TokenType: + elif type(action) is tokens._TokenType: yield pos, action, value elif hasattr(action, '__call__'): ttype, value = action(value) @@ -302,9 +302,9 @@ class Lexer: pos += 1 statestack = ['root'] statetokens = tokendefs['root'] - yield pos, Text, u'\n' + yield pos, tokens.Text, u'\n' continue - yield pos, Error, text[pos] + yield pos, tokens.Error, text[pos] pos += 1 except IndexError: break |
