diff options
author | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-02 14:09:21 -0700 |
---|---|---|
committer | Victor Uriarte <victor.m.uriarte@intel.com> | 2016-06-04 15:06:04 -0700 |
commit | 62423c0d5e2e570341d5d0db74982712ff2348c7 (patch) | |
tree | 50a426c798502a5b72a1bad28281a7cc5f85b0b7 | |
parent | c6a5e7ac2a5ecc993f4e5292ab16e6df6b84f26c (diff) | |
download | sqlparse-62423c0d5e2e570341d5d0db74982712ff2348c7.tar.gz |
Remove undocumented features
These features/function/classes were added for AntiORM.
Quick look-up didn't show any usage outside of AntiORM.
Closes #246
-rw-r--r-- | sqlparse/__init__.py | 6 | ||||
-rw-r--r-- | sqlparse/filters.py | 222 | ||||
-rw-r--r-- | sqlparse/functions.py | 44 | ||||
-rw-r--r-- | sqlparse/pipeline.py | 31 | ||||
-rw-r--r-- | sqlparse/utils.py | 71 | ||||
-rw-r--r-- | tests/test_filters.py | 77 | ||||
-rw-r--r-- | tests/test_functions.py | 161 | ||||
-rw-r--r-- | tests/test_pipeline.py | 70 |
8 files changed, 1 insertions, 681 deletions
diff --git a/sqlparse/__init__.py b/sqlparse/__init__.py index 2943997..d69a3d9 100644 --- a/sqlparse/__init__.py +++ b/sqlparse/__init__.py @@ -68,9 +68,3 @@ def split(sql, encoding=None): stack = engine.FilterStack() stack.split_statements = True return [u(stmt).strip() for stmt in stack.run(sql, encoding)] - - -def split2(stream): - from sqlparse.engine.filter import StatementFilter - splitter = StatementFilter() - return list(splitter.process(None, stream)) diff --git a/sqlparse/filters.py b/sqlparse/filters.py index 1cb2f16..ccf8735 100644 --- a/sqlparse/filters.py +++ b/sqlparse/filters.py @@ -7,15 +7,8 @@ import re -from os.path import abspath, join - from sqlparse import sql, tokens as T from sqlparse.compat import u, text_type -from sqlparse.engine import FilterStack -from sqlparse.pipeline import Pipeline -from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation, - String, Whitespace) -from sqlparse.utils import memoize_generator from sqlparse.utils import split_unquoted_newlines @@ -74,130 +67,6 @@ class TruncateStringFilter(object): yield ttype, value -class GetComments(object): - """Get the comments from a stack""" - def process(self, stack, stream): - for token_type, value in stream: - if token_type in Comment: - yield token_type, value - - -class StripComments(object): - """Strip the comments from a stack""" - def process(self, stack, stream): - for token_type, value in stream: - if token_type not in Comment: - yield token_type, value - - -def StripWhitespace(stream): - "Strip the useless whitespaces from a stream leaving only the minimal ones" - last_type = None - has_space = False - ignore_group = frozenset((Comparison, Punctuation)) - - for token_type, value in stream: - # We got a previous token (not empty first ones) - if last_type: - if token_type in Whitespace: - has_space = True - continue - - # Ignore first empty spaces and dot-commas - elif token_type in (Whitespace, Whitespace.Newline, ignore_group): - continue - - # Yield a whitespace if it can't be ignored - if has_space: - if not ignore_group.intersection((last_type, token_type)): - yield Whitespace, ' ' - has_space = False - - # Yield the token and set its type for checking with the next one - yield token_type, value - last_type = token_type - - -class IncludeStatement(object): - """Filter that enable a INCLUDE statement""" - - def __init__(self, dirpath=".", maxrecursive=10, raiseexceptions=False): - if maxrecursive <= 0: - raise ValueError('Max recursion limit reached') - - self.dirpath = abspath(dirpath) - self.maxRecursive = maxrecursive - self.raiseexceptions = raiseexceptions - - self.detected = False - - @memoize_generator - def process(self, stack, stream): - # Run over all tokens in the stream - for token_type, value in stream: - # INCLUDE statement found, set detected mode - if token_type in Name and value.upper() == 'INCLUDE': - self.detected = True - continue - - # INCLUDE statement was found, parse it - elif self.detected: - # Omit whitespaces - if token_type in Whitespace: - continue - - # Found file path to include - if token_type in String.Symbol: - # Get path of file to include - path = join(self.dirpath, value[1:-1]) - - try: - f = open(path) - raw_sql = f.read() - f.close() - - # There was a problem loading the include file - except IOError as err: - # Raise the exception to the interpreter - if self.raiseexceptions: - raise - - # Put the exception as a comment on the SQL code - yield Comment, u'-- IOError: %s\n' % err - - else: - # Create new FilterStack to parse readed file - # and add all its tokens to the main stack recursively - try: - filtr = IncludeStatement(self.dirpath, - self.maxRecursive - 1, - self.raiseexceptions) - - # Max recursion limit reached - except ValueError as err: - # Raise the exception to the interpreter - if self.raiseexceptions: - raise - - # Put the exception as a comment on the SQL code - yield Comment, u'-- ValueError: %s\n' % err - - stack = FilterStack() - stack.preprocess.append(filtr) - - for tv in stack.run(raw_sql): - yield tv - - # Set normal mode - self.detected = False - - # Don't include any token while in detected mode - continue - - # Normal token - yield token_type, value - - # ---------------------- # statement process @@ -520,57 +389,6 @@ class RightMarginFilter(object): group.tokens = self._process(stack, group, group.tokens) -class ColumnsSelect(object): - """Get the columns names of a SELECT query""" - def process(self, stack, stream): - mode = 0 - oldValue = "" - parenthesis = 0 - - for token_type, value in stream: - # Ignore comments - if token_type in Comment: - continue - - # We have not detected a SELECT statement - if mode == 0: - if token_type in Keyword and value == 'SELECT': - mode = 1 - - # We have detected a SELECT statement - elif mode == 1: - if value == 'FROM': - if oldValue: - yield oldValue - - mode = 3 # Columns have been checked - - elif value == 'AS': - oldValue = "" - mode = 2 - - elif (token_type == Punctuation - and value == ',' and not parenthesis): - if oldValue: - yield oldValue - oldValue = "" - - elif token_type not in Whitespace: - if value == '(': - parenthesis += 1 - elif value == ')': - parenthesis -= 1 - - oldValue += value - - # We are processing an AS keyword - elif mode == 2: - # We check also for Keywords because a bug in SQLParse - if token_type == Name or token_type == Keyword: - yield value - mode = 1 - - # --------------------------- # postprocess @@ -583,15 +401,6 @@ class SerializerUnicode(object): return res -def Tokens2Unicode(stream): - result = "" - - for _, value in stream: - result += u(value) - - return result - - class OutputFilter(object): varname_prefix = '' @@ -704,34 +513,3 @@ class OutputPHPFilter(OutputFilter): # Close quote yield sql.Token(T.Text, '"') yield sql.Token(T.Punctuation, ';') - - -class Limit(object): - """Get the LIMIT of a query. - - If not defined, return -1 (SQL specification for no LIMIT query) - """ - def process(self, stack, stream): - index = 7 - stream = list(stream) - stream.reverse() - - # Run over all tokens in the stream from the end - for token_type, value in stream: - index -= 1 - -# if index and token_type in Keyword: - if index and token_type in Keyword and value == 'LIMIT': - return stream[4 - index][1] - - return -1 - - -def compact(stream): - """Function that return a compacted version of the stream""" - pipe = Pipeline() - - pipe.append(StripComments()) - pipe.append(StripWhitespace) - - return pipe(stream) diff --git a/sqlparse/functions.py b/sqlparse/functions.py deleted file mode 100644 index e54457e..0000000 --- a/sqlparse/functions.py +++ /dev/null @@ -1,44 +0,0 @@ -''' -Created on 17/05/2012 - -@author: piranna - -Several utility functions to extract info from the SQL sentences -''' - -from sqlparse.filters import ColumnsSelect, Limit -from sqlparse.pipeline import Pipeline -from sqlparse.tokens import Keyword, Whitespace - - -def getlimit(stream): - """Function that return the LIMIT of a input SQL """ - pipe = Pipeline() - - pipe.append(Limit()) - - result = pipe(stream) - try: - return int(result) - except ValueError: - return result - - -def getcolumns(stream): - """Function that return the colums of a SELECT query""" - pipe = Pipeline() - - pipe.append(ColumnsSelect()) - - return pipe(stream) - - -class IsType(object): - """Functor that return is the statement is of a specific type""" - def __init__(self, type): - self.type = type - - def __call__(self, stream): - for token_type, value in stream: - if token_type not in Whitespace: - return token_type in Keyword and value == self.type diff --git a/sqlparse/pipeline.py b/sqlparse/pipeline.py deleted file mode 100644 index 34dad19..0000000 --- a/sqlparse/pipeline.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2011 Jesus Leganes "piranna", piranna@gmail.com -# -# This module is part of python-sqlparse and is released under -# the BSD License: http://www.opensource.org/licenses/bsd-license.php. - -from types import GeneratorType - - -class Pipeline(list): - """Pipeline to process filters sequentially""" - - def __call__(self, stream): - """Run the pipeline - - Return a static (non generator) version of the result - """ - - # Run the stream over all the filters on the pipeline - for filter in self: - # Functions and callable objects (objects with '__call__' method) - if callable(filter): - stream = filter(stream) - - # Normal filters (objects with 'process' method) - else: - stream = filter.process(None, stream) - - # If last filter return a generator, staticalize it inside a list - if isinstance(stream, GeneratorType): - return list(stream) - return stream diff --git a/sqlparse/utils.py b/sqlparse/utils.py index 2513c26..4da44c6 100644 --- a/sqlparse/utils.py +++ b/sqlparse/utils.py @@ -7,78 +7,9 @@ import itertools import re -from collections import OrderedDict, deque +from collections import deque from contextlib import contextmanager - -class Cache(OrderedDict): - """Cache with LRU algorithm using an OrderedDict as basis - """ - - def __init__(self, maxsize=100): - OrderedDict.__init__(self) - - self._maxsize = maxsize - - def __getitem__(self, key, *args, **kwargs): - # Get the key and remove it from the cache, or raise KeyError - value = OrderedDict.__getitem__(self, key) - del self[key] - - # Insert the (key, value) pair on the front of the cache - OrderedDict.__setitem__(self, key, value) - - # Return the value from the cache - return value - - def __setitem__(self, key, value, *args, **kwargs): - # Key was inserted before, remove it so we put it at front later - if key in self: - del self[key] - - # Too much items on the cache, remove the least recent used - elif len(self) >= self._maxsize: - self.popitem(False) - - # Insert the (key, value) pair on the front of the cache - OrderedDict.__setitem__(self, key, value, *args, **kwargs) - - -def memoize_generator(func): - """Memoize decorator for generators - - Store `func` results in a cache according to their arguments as 'memoize' - does but instead this works on decorators instead of regular functions. - Obviusly, this is only useful if the generator will always return the same - values for each specific parameters... - """ - cache = Cache() - - def wrapped_func(*args, **kwargs): - params = (args, tuple(sorted(kwargs.items()))) - - # Look if cached - try: - cached = cache[params] - - # Not cached, exec and store it - except KeyError: - cached = [] - - for item in func(*args, **kwargs): - cached.append(item) - yield item - - cache[params] = cached - - # Cached, yield its items - else: - for item in cached: - yield item - - return wrapped_func - - # This regular expression replaces the home-cooked parser that was here before. # It is much faster, but requires an extra post-processing step to get the # desired results (that are compatible with what you would expect from the diff --git a/tests/test_filters.py b/tests/test_filters.py deleted file mode 100644 index 925b0b6..0000000 --- a/tests/test_filters.py +++ /dev/null @@ -1,77 +0,0 @@ -''' -Created on 24/03/2012 - -@author: piranna -''' -import unittest - -from sqlparse.filters import StripWhitespace, Tokens2Unicode -from sqlparse.lexer import tokenize - - -class Test__StripWhitespace(unittest.TestCase): - sql = """INSERT INTO dir_entries(type)VALUES(:type); - - INSERT INTO directories(inode) - VALUES(:inode) - LIMIT 1""" - - sql2 = """SELECT child_entry,asdf AS inode, creation - FROM links - WHERE parent_dir == :parent_dir AND name == :name - LIMIT 1""" - - sql3 = """SELECT - 0 AS st_dev, - 0 AS st_uid, - 0 AS st_gid, - - dir_entries.type AS st_mode, - dir_entries.inode AS st_ino, - COUNT(links.child_entry) AS st_nlink, - - :creation AS st_ctime, - dir_entries.access AS st_atime, - dir_entries.modification AS st_mtime, - - COALESCE(files.size,0) AS st_size, - COALESCE(files.size,0) AS size - -FROM dir_entries - LEFT JOIN files - ON dir_entries.inode == files.inode - LEFT JOIN links - ON dir_entries.inode == links.child_entry - -WHERE dir_entries.inode == :inode - -GROUP BY dir_entries.inode -LIMIT 1""" - - def test_StripWhitespace1(self): - self.assertEqual( - Tokens2Unicode(StripWhitespace(tokenize(self.sql))), - 'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO ' - 'directories(inode)VALUES(:inode)LIMIT 1') - - def test_StripWhitespace2(self): - self.assertEqual( - Tokens2Unicode(StripWhitespace(tokenize(self.sql2))), - 'SELECT child_entry,asdf AS inode,creation FROM links WHERE ' - 'parent_dir==:parent_dir AND name==:name LIMIT 1') - - def test_StripWhitespace3(self): - self.assertEqual( - Tokens2Unicode(StripWhitespace(tokenize(self.sql3))), - 'SELECT 0 AS st_dev,0 AS st_uid,0 AS st_gid,dir_entries.type AS ' - 'st_mode,dir_entries.inode AS st_ino,COUNT(links.child_entry)AS ' - 'st_nlink,:creation AS st_ctime,dir_entries.access AS st_atime,' - 'dir_entries.modification AS st_mtime,COALESCE(files.size,0)AS ' - 'st_size,COALESCE(files.size,0)AS size FROM dir_entries LEFT JOIN' - ' files ON dir_entries.inode==files.inode LEFT JOIN links ON ' - 'dir_entries.inode==links.child_entry WHERE dir_entries.inode==' - ':inode GROUP BY dir_entries.inode LIMIT 1') - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_functions.py b/tests/test_functions.py deleted file mode 100644 index fd2774e..0000000 --- a/tests/test_functions.py +++ /dev/null @@ -1,161 +0,0 @@ -''' -Created on 13/02/2012 - -@author: piranna -''' -from unittest import main, TestCase - -from sqlparse.filters import IncludeStatement, Tokens2Unicode -from sqlparse.lexer import tokenize - -from sqlparse.filters import compact -from sqlparse.functions import getcolumns, getlimit, IsType -from tests.utils import FILES_DIR - - -class Test_IncludeStatement(TestCase): - sql = """-- type: script - -- return: integer - - INCLUDE "_Make_DirEntry.sql"; - - INSERT INTO directories(inode) - VALUES(:inode) - LIMIT 1""" - - def test_includeStatement(self): - stream = tokenize(self.sql) - includeStatement = IncludeStatement(FILES_DIR, - raiseexceptions=True) - stream = includeStatement.process(None, stream) - stream = compact(stream) - - result = Tokens2Unicode(stream) - - self.assertEqual( - result, ( - 'INSERT INTO dir_entries(type)VALUES(:type);INSERT INTO ' - 'directories(inode)VALUES(:inode)LIMIT 1')) - - -class Test_SQL(TestCase): - sql = """-- type: script - -- return: integer - - INSERT INTO directories(inode) - VALUES(:inode) - LIMIT 1""" - - sql2 = """SELECT child_entry,asdf AS inode, creation - FROM links - WHERE parent_dir == :parent_dir AND name == :name - LIMIT 1""" - - sql3 = """SELECT - 0 AS st_dev, - 0 AS st_uid, - 0 AS st_gid, - - dir_entries.type AS st_mode, - dir_entries.inode AS st_ino, - COUNT(links.child_entry) AS st_nlink, - - :creation AS st_ctime, - dir_entries.access AS st_atime, - dir_entries.modification AS st_mtime, --- :creation AS st_ctime, --- CAST(STRFTIME('%s',dir_entries.access) AS INTEGER) AS st_atime, --- CAST(STRFTIME('%s',dir_entries.modification) AS INTEGER) AS st_mtime, - - COALESCE(files.size,0) AS st_size, -- Python-FUSE - COALESCE(files.size,0) AS size -- PyFilesystem - -FROM dir_entries - LEFT JOIN files - ON dir_entries.inode == files.inode - LEFT JOIN links - ON dir_entries.inode == links.child_entry - -WHERE dir_entries.inode == :inode - -GROUP BY dir_entries.inode -LIMIT 1""" - - -class Test_Compact(Test_SQL): - def test_compact1(self): - stream = compact(tokenize(self.sql)) - - result = Tokens2Unicode(stream) - - self.assertEqual(result, - 'INSERT INTO directories(inode)VALUES(:inode)LIMIT 1') - - def test_compact2(self): - stream = tokenize(self.sql2) - - result = compact(stream) - - self.assertEqual( - Tokens2Unicode(result), - 'SELECT child_entry,asdf AS inode,creation FROM links WHERE ' - 'parent_dir==:parent_dir AND name==:name LIMIT 1') - - def test_compact3(self): - stream = tokenize(self.sql3) - - result = compact(stream) - - self.assertEqual( - Tokens2Unicode(result), - 'SELECT 0 AS st_dev,0 AS st_uid,0 AS st_gid,dir_entries.type AS ' - 'st_mode,dir_entries.inode AS st_ino,COUNT(links.child_entry)AS ' - 'st_nlink,:creation AS st_ctime,dir_entries.access AS st_atime,' - 'dir_entries.modification AS st_mtime,COALESCE(files.size,0)AS ' - 'st_size,COALESCE(files.size,0)AS size FROM dir_entries LEFT JOIN' - ' files ON dir_entries.inode==files.inode LEFT JOIN links ON ' - 'dir_entries.inode==links.child_entry WHERE dir_entries.inode==' - ':inode GROUP BY dir_entries.inode LIMIT 1') - - -class Test_GetColumns(Test_SQL): - def test_getcolumns1(self): - columns = getcolumns(tokenize(self.sql)) - self.assertEqual(columns, []) - - def test_getcolumns2(self): - columns = getcolumns(tokenize(self.sql2)) - self.assertEqual(columns, ['child_entry', 'inode', 'creation']) - - def test_getcolumns3(self): - columns = getcolumns(tokenize(self.sql3)) - self.assertEqual(columns, ['st_dev', 'st_uid', 'st_gid', 'st_mode', - 'st_ino', 'st_nlink', 'st_ctime', - 'st_atime', 'st_mtime', 'st_size', 'size']) - - -class Test_GetLimit(Test_SQL): - def test_getlimit1(self): - limit = getlimit(tokenize(self.sql)) - self.assertEqual(limit, 1) - - def test_getlimit2(self): - limit = getlimit(tokenize(self.sql2)) - self.assertEqual(limit, 1) - - def test_getlimit3(self): - limit = getlimit(tokenize(self.sql3)) - self.assertEqual(limit, 1) - - -class Test_IsType(Test_SQL): - def test_istype2(self): - stream = tokenize(self.sql2) - self.assertTrue(IsType('SELECT')(stream)) - - stream = tokenize(self.sql2) - self.assertFalse(IsType('INSERT')(stream)) - - -if __name__ == "__main__": - main() diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py deleted file mode 100644 index 3442a5b..0000000 --- a/tests/test_pipeline.py +++ /dev/null @@ -1,70 +0,0 @@ -import unittest - -from sqlparse.filters import ColumnsSelect -from sqlparse.lexer import tokenize -from sqlparse.pipeline import Pipeline - - -class Test(unittest.TestCase): - - def setUp(self): - self.pipe = Pipeline() - self.pipe.append(tokenize) - self.pipe.append(ColumnsSelect()) - - def test_1(self): - sql = """ - -- type: script - -- return: integer - - INCLUDE "Direntry.make.sql"; - - INSERT INTO directories(inode) - VALUES(:inode) - LIMIT 1""" - self.assertEqual([], self.pipe(sql)) - - def test_2(self): - sql = """ - SELECT child_entry,asdf AS inode, creation - FROM links - WHERE parent_dir == :parent_dir AND name == :name - LIMIT 1""" - self.assertEqual([u'child_entry', u'inode', u'creation'], - self.pipe(sql)) - - def test_3(self): - sql = """ -SELECT -0 AS st_dev, -0 AS st_uid, -0 AS st_gid, - -dir_entries.type AS st_mode, -dir_entries.inode AS st_ino, -COUNT(links.child_entry) AS st_nlink, - -:creation AS st_ctime, -dir_entries.access AS st_atime, -dir_entries.modification AS st_mtime, --- :creation AS st_ctime, --- CAST(STRFTIME('%s',dir_entries.access) AS INTEGER) AS st_atime, --- CAST(STRFTIME('%s',dir_entries.modification) AS INTEGER) AS st_mtime, - -COALESCE(files.size,0) AS st_size, -- Python-FUSE -COALESCE(files.size,0) AS size -- PyFilesystem - -FROM dir_entries -LEFT JOIN files -ON dir_entries.inode == files.inode -LEFT JOIN links -ON dir_entries.inode == links.child_entry - -WHERE dir_entries.inode == :inode - -GROUP BY dir_entries.inode -LIMIT 1""" - self.assertEqual([u'st_dev', u'st_uid', u'st_gid', u'st_mode', - u'st_ino', u'st_nlink', u'st_ctime', - u'st_atime', u'st_mtime', u'st_size', u'size'], - self.pipe(sql)) |