summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Simionato <michele.simionato@gmail.com>2014-07-29 11:57:01 +0200
committerMichele Simionato <michele.simionato@gmail.com>2014-07-29 11:57:01 +0200
commit807443ef3d043565a00ba475aa8d132006304b77 (patch)
treee8e2b4a22bc75da4c67c708e91185f2610eeacc7
parent90ecdcfcdb3fb64d6f5db609a97d9c18d54652ee (diff)
downloadmicheles-master.tar.gz
Removed the old sqlplain codeHEADmaster
-rw-r--r--sqlplain/Makefile5
-rw-r--r--sqlplain/__init__.py2
-rw-r--r--sqlplain/configurator.py78
-rw-r--r--sqlplain/connection.py298
-rw-r--r--sqlplain/doc/__init__.py0
-rw-r--r--sqlplain/doc/book_importer.py46
-rw-r--r--sqlplain/doc/books1.sql42
-rw-r--r--sqlplain/doc/cache_ex.py33
-rw-r--r--sqlplain/doc/compare.py67
-rw-r--r--sqlplain/doc/compare_tables.py32
-rw-r--r--sqlplain/doc/doc.py752
-rw-r--r--sqlplain/doc/first-page.txt125
-rw-r--r--sqlplain/doc/grant_perm.py16
-rw-r--r--sqlplain/doc/logtable.py8
-rw-r--r--sqlplain/doc/queries.py8
-rw-r--r--sqlplain/doc/sql_repl.py72
-rw-r--r--sqlplain/doc/strip_blanks.py50
-rw-r--r--sqlplain/doc/tables.py90
-rw-r--r--sqlplain/doc/threadlocal_ex.py39
-rw-r--r--sqlplain/memoize.py40
-rw-r--r--sqlplain/mssql_support.py84
-rw-r--r--sqlplain/mssql_util.py125
-rw-r--r--sqlplain/namedtuple.py38
-rw-r--r--sqlplain/pager.py70
-rw-r--r--sqlplain/postgres_support.py25
-rw-r--r--sqlplain/postgres_util.py113
-rw-r--r--sqlplain/runtransac.py36
-rw-r--r--sqlplain/sql_support.py91
-rw-r--r--sqlplain/sqlite_support.py16
-rw-r--r--sqlplain/sqlite_util.py44
-rw-r--r--sqlplain/table.py262
-rw-r--r--sqlplain/tests/books-data.sql12
-rw-r--r--sqlplain/tests/books-schema.sql9
-rw-r--r--sqlplain/tests/sqlplain_ex.py24
-rw-r--r--sqlplain/tests/test_bcp.py8
-rw-r--r--sqlplain/tests/test_million.py60
-rw-r--r--sqlplain/tests/test_pager.py14
-rw-r--r--sqlplain/tests/test_qmark2pyformat.py53
-rw-r--r--sqlplain/tests/test_reset.py18
-rw-r--r--sqlplain/tests/test_table.py0
-rw-r--r--sqlplain/tests/test_truncate.py25
-rw-r--r--sqlplain/uri.py119
-rw-r--r--sqlplain/util.py310
43 files changed, 0 insertions, 3359 deletions
diff --git a/sqlplain/Makefile b/sqlplain/Makefile
deleted file mode 100644
index cc97ed5..0000000
--- a/sqlplain/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-count:
- wc -l __init__.py configurator.py connection.py \
- mssql_support.py postgres_support.py sqlite_support.py \
- mssql_util.py postgres_util.py sqlite_util.py \
- sql_support.py uri.py util.py table.py runtransac.py memoize.py
diff --git a/sqlplain/__init__.py b/sqlplain/__init__.py
deleted file mode 100644
index f7694c0..0000000
--- a/sqlplain/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from sqlplain.connection import LazyConnection as connect
-from sqlplain.sql_support import do
diff --git a/sqlplain/configurator.py b/sqlplain/configurator.py
deleted file mode 100644
index 8bef7d1..0000000
--- a/sqlplain/configurator.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""
-This module defines a singleton configurator object with a single
-public method .get_uri(alias).
-When .get_uri is called, the environment variable $SQLPLAIN is looked at.
-It should contain the name of a configuration file. If $SQLPLAIN is missing,
-the configuration file name is assumed to be ~/.sqlplain. The configuration
-file must exists and must contain a section [uri] as follows:
-
-[uri]
-dev: mssql://user:passwd@host:port/dev_db
-test: mssql://user:passwd@host:port/test_db
-prod: mssql://user:passwd@host:port/prod_db
-
-Then .get_alias('dev') returns the URI mssql://user:passwd@host:port/dev_db.
-Analogously for 'test' and 'prod'.
-
-The configuration file may also contain a [dir] section specifying the
-full pathname for the directory containing the creational scripts of
-the corresponding database.
-"""
-
-import os
-from ConfigParser import RawConfigParser # no magic interpolation
-
-class ReadOnlyObject(object):
- """
- Take a list [(name, value), ...] and returns a read-only object
- with associated attributes. The names cannot be private. The ordering
- is preserved in the list ._names. The object has a ._name attribute
- useful for debugging (the default is 'anonymous').
- """
- def __init__(self, items, name='anonymous'):
- names = []
- for name, value in items:
- if name.startswith('_'):
- raise TypeError('Inner attributes cannot begin with "_"')
- object.__setattr__(self, name, value)
- names.append(name)
- object.__setattr__(self, '_names', names)
- object.__setattr__(self, '_name', name)
- def __iter__(self):
- for name in self._names:
- yield name, getattr(self, name)
- def __contains__(self, name):
- return name in self._names
- def __len__(self):
- return self(self._names)
- def __setattr__(self, name, value):
- if name in self._dic:
- raise TypeError('Read-only object!')
- else:
- object.__setattr__(self, name, value)
- def __str__(self):
- return '\n'.join('%s=%s' % (k, v) for k, v in self)
- def __repr__(self):
- return '<%s:%s>' % (self.__class__.__name__, self._name)
-
-class _Configurator(object): # singleton
- _initialized = False
-
- def _initialize(self):
- "You may want to call this again if you modify the config file"
- cfp = RawConfigParser()
- self._conf_file = os.environ.get(
- 'SQLPLAIN', os.path.expanduser('~/.sqlplain'))
- cfp.readfp(file(self._conf_file))
- self._conf_obj = ReadOnlyObject(
- [(sect, ReadOnlyObject(cfp.items(sect), sect))
- for sect in cfp.sections()], self._conf_file)
- self._initialized = True
- self._databases = self._conf_obj.uri._names
-
- def __getattr__(self, name):
- if not self._initialized:
- self._initialize()
- return getattr(self._conf_obj, name)
-
-configurator = _Configurator()
diff --git a/sqlplain/connection.py b/sqlplain/connection.py
deleted file mode 100644
index c93d760..0000000
--- a/sqlplain/connection.py
+++ /dev/null
@@ -1,298 +0,0 @@
-import sys, threading, itertools, string
-from operator import attrgetter
-try:
- from collections import namedtuple
-except ImportError:
- from sqlplain.namedtuple import namedtuple
-from sqlplain.uri import URI
-from sqlplain.sql_support import get_args_templ
-from decorator import decorator
-
-class NotFoundError(Exception):
- pass
-
-@decorator
-def retry(func, conn, *args, **kw):
- """
- A decorator making a function taking a connection as first argument
- to retry in case of errors
- """
- try:
- return func(conn, *args, **kw)
- except conn.driver.Error, e:
- conn.close() # retry with a fresh connection
- return func(conn, *args, **kw)
-
-Field = namedtuple(
- 'Field',
- 'name type_code display_size internal_size precision scale null_ok'.split())
-
-counter = itertools.count(1)
-
-def noname():
- return 'noname%d' % counter.next()
-
-class TupleList(list):
- "Used as result of LazyConn.execute"
- header = None
- rowcount = None
- descr = None
-
-class Transaction(object):
- """
- A wrapper around database actions with signature (conn, *args, **kw).
- """
- def __init__(self, action, conn, *args, **kw):
- self.action = action
- self.conn = conn
- self.args = args
- self.kw = kw
-
- def run(self, conn=None, args=None, kw=None, commit=True):
- "Execute the action in a transaction"
- conn = conn or self.conn
- args = args or self.args
- kw = kw or self.kw
- try:
- return self.action(conn, *args, **kw)
- except Exception, e:
- conn.rollback()
- raise e.__class__, e, sys.exc_info()[2]
- else:
- if commit:
- conn.commit()
- else:
- conn.rollback()
-
-class _Storage(object):
- "A place where to store the low level connection and cursor"
-
- @classmethod
- def new(cls, connect, args):
- "Make a subclass of _Storage and instantiate it"
- subc = type('%sSubclass' % cls.__name__, (cls,),
- dict(connect=staticmethod(connect), args=args))
- return subc()
-
- @property
- def conn(self):
- "Return the low level connection"
- conn = getattr(self, '_conn', None)
- if conn is None:
- connect, args = self.__class__.connect, self.__class__.args
- conn = self._conn = connect(*args)
- return conn
-
- @property
- def curs(self):
- "Return the low level cursor"
- curs = getattr(self, '_curs', None)
- if curs is None:
- curs = self._curs = self.conn.cursor()
- return curs
-
- def close(self):
- """The next time you will call an active method, a fresh new
- connection will be instantiated"""
- if getattr(self, '_curs', False):
- try:
- self._curs.close()
- except: # ignore if already closed
- pass
- self._curs = None
- if getattr(self, '_conn', False):
- try:
- self._conn.close()
- except: # ignore if already closed
- pass
- self._conn = None
-
-class _ThreadLocalStorage(threading.local, _Storage):
- "A threadlocal object where to store low level connection and cursor"
-
-class LazyConnection(object):
- """
- A lazy connection object. It is lazy since the database connection
- is performed at execution time, not at inizialization time. Notice
- that this class does not manage any kind of logging, on purpose.
- There is however a chatty method for easy of debugging.
- """
- def __init__(self, uri, isolation_level=None, threadlocal=False,
- params='SEQUENCE'):
- if params not in ('SEQUENCE', 'MAPPING'):
- raise TypeError("params must be 'SEQUENCE' or 'MAPPING', you "
- "passed %s" % params)
- self.params = params
- self.uri = URI(uri)
- self.name = self.uri['database']
- self.dbtype = self.uri['dbtype']
- self.driver, params = self.uri.get_driver_params()
- connect = self.driver.connect
- args = params, isolation_level
- self.chatty = False
- self.isolation_level = isolation_level
- self.threadlocal = threadlocal
- if threadlocal:
- self._storage = _ThreadLocalStorage.new(connect, args)
- else:
- self._storage = _Storage.new(connect, args)
-
- def _raw_execute(self, templ, args):
- """
- Call a dbapi2 cursor; return the rowcount or a list of tuples,
- plus an header (None in the case of the rowcount).
- """
- cursor = self._storage.curs
- try:
- # this special casing is needed for psycopg2
- if args:
- cursor.execute(templ, args)
- else:
- cursor.execute(templ)
- except Exception, e:
- tb = sys.exc_info()[2]
- raise e.__class__, '%s\nQUERY WAS:%s%s' % (e, templ, args), tb
- descr = cursor.description
- if descr is None: # after an update
- return None, cursor.rowcount
- else: # after a select
- return descr, cursor.fetchall()
-
- def execute(self, templ, args=(), ntuple=None, scalar=False):
- "args must be a sequence, not a dictionary"
- if self.dbtype == 'mssql':
- # converts unicode arguments to utf8
- lst = []
- for a in args:
- if isinstance(a, bool):
- lst.append(int(a))
- elif isinstance(a, unicode):
- lst.append(a.encode('utf8'))
- else:
- lst.append(a)
- args = tuple(lst)
- if self.driver.placeholder: # the template has to be interpolated
- argnames, templ = get_args_templ(templ, self.driver.placeholder)
- if len(argnames) != len(args): # especially useful for mssql
- raise TypeError(
- "TypeError when executing %s\nExpected %d arguments (%s), got %d %s" %
- (templ, len(argnames), ', '.join(argnames), len(args), args))
- if self.params == 'MAPPING':
- # replace the passed dictionary with the corresponding tuple
- args = tuple(args[name] for name in argnames)
- descr, res = self._raw_execute(templ, args)
- cursor = self._storage.curs # needed to make the reset work
- if self.chatty:
- print(cursor.rowcount, templ, args)
- if scalar: # you expected a scalar result
- if not res:
- raise NotFoundError(
- "No result\nQUERY WAS: %s%s\n" % (templ, args))
- elif len(res) > 1:
- raise ValueError(
- "Expected to get a scalar result, got %s\nQUERY WAS:%s%s\n"
- % (res, templ, args))
- return res[0][0]
- if descr: # the query was a SELECT
- fields = [Field(d) for d in descr]
- header = [f.name or noname() for f in fields]
- if ntuple is None:
- Ntuple = namedtuple('DBTuple', header)
- elif isinstance(ntuple, str):
- Ntuple = namedtuple(ntuple, header)
- else:
- Ntuple = ntuple
- res = TupleList(Ntuple(row) for row in res)
- res.descr = fields
- res.header = Ntuple(header)
- return res
-
- def executescript(self, sql, *dicts, **kw):
- "A driver-independent method to execute sql templates"
- d = {}
- for dct in dicts + (kw,):
- d.update(dct)
- if d:
- sql = string.Template(sql).substitute(d)
- if self.dbtype == 'sqlite':
- self._storage.curs.executescript(sql)
- else: # psycopg and pymssql are already able to execute chunks
- self.execute(sql)
-
- def cursor(self):
- "Return a new cursor at each call. Here for DB API 2 compatibility"
- return self._storage.conn.cursor()
-
- def open(self):
- "Return the low level underlying connection"
- return self._storage.conn
-
- def close(self):
- """The next time you will call an active method, a fresh new
- connection will be instantiated"""
- self._storage.close()
-
- def rollback(self):
- return self._storage.conn.rollback()
-
- def commit(self):
- return self._storage.conn.commit()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_class, exc, tb):
- if exc_class:
- self.rollback()
- raise exc_class, exc, tb
- else:
- self.commit()
-
- def __repr__(self):
- return "<%s %s, isolation_level=%s>" % (
- self.__class__.__name__, self.uri, self.isolation_level)
-
- @property
- def rowcount(self):
- return self._storage.curs.rowcount
-
-class RetryingConnection(LazyConnection):
- """A LazyConnection which retries once in case of execution errors.
- It makes sense for long running applications in autocommit mode."""
- _raw_execute = retry(LazyConnection._raw_execute.im_func)
-
-class NullObject(object):
- '''Implements the NullObject pattern.
-
- >>> n = NullObject()
- >>> n.dosomething(1,2,3)
- '''
- def __getattr__(self, name):
- return lambda *a, **k: None
- def __repr__(self):
- return 'None'
- def __nonzero__(self):
- return False
- def __iter__(self):
- return ()
- def __call__(self, *a, **k):
- return None
-
-class FakeConnection(object):
- def __init__(self, iodict):
- self.iodict = iodict
- self._storage = NullObject()
- def execute(self, templ, args=()):
- return self.iodict[(templ,) + args]
- def executescript(self, templ, *dicts, **kw):
- pass
- def commit(self):
- pass
- def rollback(self):
- pass
- def close(self):
- pass
- def __enter__(self):
- return self
- def __exit_(self, exctype=None, exc=None, tb=None):
- pass
diff --git a/sqlplain/doc/__init__.py b/sqlplain/doc/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/sqlplain/doc/__init__.py
+++ /dev/null
diff --git a/sqlplain/doc/book_importer.py b/sqlplain/doc/book_importer.py
deleted file mode 100644
index 66e39a1..0000000
--- a/sqlplain/doc/book_importer.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import re, csv, itertools
-from sqlplain.namedtuple import namedtuple
-
-class BookImporter(object):
-
- # format of the CSV files to be imported
- BOOKFILE = re.compile('books(\d\d).csv')
- Book = namedtuple('title author genre nation date')
-
- def __init__(self, db):
- self.db = db
- self.bookid = KTable(db, 'bookid')
- self.book = KTable(db, 'book')
- self.score = dict(KTable(db, 'score'))
- self.genre = KTable(db, 'genre')
- self.nation = KTable(db, 'nation')
- self.counter = itertools.count(1)
-
- def import_books(self, books):
- for b in books:
- self.bookid.insert_row(
- id = self.counter.next(),
- title = b.title,
- author = b.author,
- rdate = b.date)
- self.book.insert_row(
- id = id,
- genre = b.genre,
- nation = b.nation,
- score = self.scoredic[b.score])
-
- def import_all(self, dirname=None):
- dirname = dirname or self.db.uri.scriptdir
- for fname in os.listdir(dirname):
- mo = self.BOOKFILE.match(fname)
- f = file(os.path.join(dirname, fname))
- if mo:
- books = map(self.Book, csv.reader(f))
- f.close()
- self.import_books(books)
-
-
-if __name__ == '__main__':
- db = connect('bookdb')
- imp = BookImporter(db)
- imp.import_all()
diff --git a/sqlplain/doc/books1.sql b/sqlplain/doc/books1.sql
deleted file mode 100644
index 77da2ad..0000000
--- a/sqlplain/doc/books1.sql
+++ /dev/null
@@ -1,42 +0,0 @@
-CREATE TABLE bookid (
- id INTEGER PRIMARY KEY,
- title VARCHAR(128),
- author VARCHAR(64),
- rdate DATETIME,
- UNIQUE (title, author, rdate)
-);
-
-CREATE TABLE score (
- score VARCHAR(4) PRIMARY KEY,
- value INTEGER);
-
-INSERT INTO score VALUES ('+', 1);
-INSERT INTO score VALUES ('O', 2);
-INSERT INTO score VALUES ('O+', 3);
-INSERT INTO score VALUES ('OO', 4);
-INSERT INTO score VALUES ('OO+', 5);
-INSERT INTO score VALUES ('OOO', 6);
-INSERT INTO score VALUES ('OOO+', 7);
-INSERT INTO score VALUES ('OOOO', 8);
-
-CREATE TABLE genre (
- id CHAR(2) PRIMARY KEY,
- descr VARCHAR(32))
-
-CREATE TABLE nation (
- id CHAR(2) PRIMARY KEY,
- descr VARCHAR(32));
-
-CREATE TABLE book (
- id INTEGER PRIMARY KEY REFERENCES bookid (id),
- genre CHAR(2) REFERENCES genre (id),
- nation CHAR(2) REFERENCES nation (id),
- score INTEGER REFERENCES score (value));
-
-CREATE VIEW book_view AS
- SELECT a.id, a.title, a.author, a.rdate, b.genre, b.nation, c.score
- FROM bookid AS a
- INNER JOIN book AS b
- ON a.id = b.id
- INNER JOIN score AS c
- ON c.value=b.score;
diff --git a/sqlplain/doc/cache_ex.py b/sqlplain/doc/cache_ex.py
deleted file mode 100644
index 2e5a46c..0000000
--- a/sqlplain/doc/cache_ex.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from sqlplain.memoize import Memoize
-
-class Forever(object):
- "Cache type for caches which are is never cleared"
-
-class LongTime(object):
- "Cache type for caches which are rarely cleared"
-
-class ShortTime(object):
- "Cache type for caches which are is often cleared"
-
-@Memoize(ShortTime)
-def f1():
- return 1
-
-@Memoize(LongTime)
-def f2():
- return 2
-
-@Memoize(Forever)
-def f3():
- return 3
-
-def test_cache():
- assert not f1.cache
- f1()
- assert f1.cache == {(): 1}
- f1()
- assert f1.cache == {(): 1}
- f2()
- Memoize.clear(ShortTime)
- assert not f1.cache
- assert f2.cache == {(): 2}
diff --git a/sqlplain/doc/compare.py b/sqlplain/doc/compare.py
deleted file mode 100644
index f558019..0000000
--- a/sqlplain/doc/compare.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""
-Compare two tables
-"""
-
-from sqlplain import util
-
-TEMPL = '''
-SELECT %(csfields)s FROM %(table1)s AS a
-INNER JOIN %(table2)s AS b
-ON %(condition)s
-WHERE %(clause)s
-'''
-
-SUB = '''
-SELECT %(csfields)s FROM %(table1)s AS a
-LEFT OUTER JOIN %(table2)s AS b
-ON %(condition)s
-WHERE %(clause)s
-'''
-
-def sub(db, table1, table2, kfields=(), dfields=()):
- """
- Returns the kfields and dfields in table1 but not in table2.
- If kfields and/or dfields are not given, they are reflected
- from the database. table1 and table2 must have the same primary key.
- """
- if not kfields:
- kfields = k1 = util.get_kfields(db, table1)
- k2 = util.get_kfields(db, table2)
- assert k1 == k2, '%s and %s have different primary key!' % (
- table1, table2)
- csfields = ', '.join('a.%s' % k for k in (kfields + dfields))
- condition = ' AND '.join('a.%s=b.%s' % (k, k) for k in kfields)
- clause = ' AND '.join('b.%s IS NULL' % k for k in kields)
- return db.execute(SUB % locals())
-
-def compare(db, table1, table2, kfields=(), dfields=()):
- """
- Compare table1 and table2; returns the rows in table1 and not
- in table2, the rows in table2 and not in table1, and the rows
- in both tables such that the dfields differ.
- """
- if not kfields:
- kfields = k1 = util.get_kfields(db, table1)
- k2 = util.get_kfields(db, table2)
- assert k1 == k2, '%s and %s have different primary key!' % (
- table1, table2)
-
- sub12 = sub(db, table1, table2, kfields)
- sub21 = sub(db, table2, table1, kfields)
-
- csfields = ', '.join('a.%s' % k for k in kfields)
- condition = ' AND '.join('a.%s=b.%s' % (k, k) for k in kfields)
- if not dfields:
- d1 = util.get_dfields(db, table1)
- d2 = util.get_dfields(db, table2)
- assert d1 == d2, '%s and %s have different data fields!' % (
- table1, table2)
- dfields = d1
- clause = ' OR '.join('a.%s<>b.%s' % (d, d) for d in dfields)
- return sub12, sub21, db.execute(TEMPL % locals())
-
-if __name__ == '__main__':
- from sqlplain import connect
- db = connect('dbserver2')
- #compare(db, 'riskfactor', 'riskfactor2')
- print sub(db, 'riskfactor', 'riskfactor2')
diff --git a/sqlplain/doc/compare_tables.py b/sqlplain/doc/compare_tables.py
deleted file mode 100644
index 36f18e8..0000000
--- a/sqlplain/doc/compare_tables.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from sqlplain import connect, do
-from cStringIO import StringIO
-#from sqlplain.postgres_util import get_schema_postgres
-#print get_schema_postgres(rcare.uri, 'customer')
-
-rcare = connect('rcare')
-rcare_prod = connect('rcare_prod')
-
-CREATE_CUSTOMER = '''
-CREATE TABLE $customer (
- client_id character varying(32) PRIMARY KEY,
- description character varying,
- email character varying,
- short_description character varying(16),
- mail_report_info character varying(128),
- attach_prefix character varying(32),
- filter_from character varying(32),
- zope_id character varying(32),
- lookup boolean,
- client_srs character varying(32),
- CONSTRAINT nnull_id_short_desc CHECK ((short_description IS NOT NULL))
-);
-'''
-
-def copy_from(src, dest, table):
- out = StringIO()
- src._curs.copy_to(out, 'customer')
- drop_table(dest, 'customer_prod', force=True):
- dest.executescript(CREATE_CUSTOMER, customer='customer_prod')
- dest._curs.copy_from(out, 'customer_prod')
- out.close()
-
diff --git a/sqlplain/doc/doc.py b/sqlplain/doc/doc.py
deleted file mode 100644
index d90c337..0000000
--- a/sqlplain/doc/doc.py
+++ /dev/null
@@ -1,752 +0,0 @@
-"""
-SQLPLAIN, an opinionated database library
-
-.. contents::
-
-sqlplain: core
-=================================================================
-
-Introduction
----------------------------------------------------------------
-
-I never liked the database API for Python. I have always found it cumbersome
-to use and very much unpythonic. Moreover, it is too much low level for my
-taste. So I have always used some small custom made wrapper over it.
-Then, SQLAlchemy came. SQLAlchemy has a lot of good things going for it,
-but I must say that I felt it too much high level for my taste, so I kept
-using my own little wrapper over the DB API 2. Recently at work we
-decided to make SQLAlchemy the official database toolkit for our code.
-Then I have decided to remove my own little wrapper from our code
-base. Since in the years I have developed some affection for it I have
-decided to clean it up a little and to give to it a new life as an
-Open Source library.
-
-``sqlplain`` is intended to be a lightweight wrapper over the DB API 2
-and *not* an Object Relation Mapper. Currently the code base of sqlplain
-consists of nearly 1,000 lines of code (for comparison, sqlalchemy 0.5 contains
-something like 42,000 lines of code). In the future sqlplain may grow,
-but I am committed to never make it "big" in any sense, and I would like
-to keep it well below the 5% of the size of sqlalchemy. The reason is that
-one of the main design goals behind sqlplain is to keep it small enough
-that anybody can read the full code base in a single day and have a good
-idea of how it works. Moreover, the code base is intentionally kept simple,
-and no fancy Python features are used: it just requires Python 2.4 to run.
-
-The name ``sqlplain`` come from the main design goal of the library:
-to use plain old SQL as much as possible, and not Python objects.
-This is the reason why ``sqlplain`` will never become an Object Relational
-Mapper. ORM have they usages: in particular they are useful if your main
-goal is to persist Python objects into a relation database. However,
-this is not the use case for ``sqlplain``. ``sqlplain`` is intented to
-be used in situations where you have a pre-existing relational database
-which has an independent life from your Python application.
-
-Nowadays it has become common to embed the model in Python code and to
-abstract completely from the database; this is the way Diango works
-(see the `Django philosophy`_ page) and the same is true for most
-modern Web framework. That approach is fine in many circumstances: it
-basically amounts to hiding the underlying relation database and to
-use it as if it was an object database. However ``sqlplain`` does not
-follow this route. ``sqlplain`` does not want to hide the underlying
-database: I routinely have to debug performance issue and therefore I
-want to have in the code plain SQL queries that I can cut and paste on
-the database consolle to run them under the query analyzer.
-
-When you are debugging performance issues, you want to stay as close
-as possible to bare SQL, since you don't want to be confused about the
-performance issue due to the ORM and the intrinsic issues: this is
-the reason why ``sqlplain`` has very little support for generating
-SQL programmatically from Python objects. This may change and I could
-improve the support for SQL generation in future versions; however,
-it is very unlikely that ``sqlplain`` will ever support creating tables or
-other database objects from Python objects.
-There is nothing similar to sqlalchemy ``Metadata.create_all``: if
-you want to change the schema of your database you must do it via
-plain old SQL queries, without any support from Python.
-
-``sqlplain`` is very much opinionated
-
-.. _Django philosophy: http://docs.djangoproject.com/en/dev/misc/design-philosophies/#misc-design-philosophies
-
-Differences with the DB API
---------------------------------------------------
-
-sqlplain has a functional feeling.
-i have always hated the DB API 2, particularly the fact that the execute
-method does not return anyything but works by side effects, changing
-the state of the cursor so that the next fetch operation returns the
-retrieved rows. In sqlplain instead the eexecute method returns
-directly the retried rows and there is no explicit concept of cursor.
-This is not a particularly original idea, and actually the sqlite
-driver offers the same functionality. The rows are returned as named
-tuples, not as plain tuples.
-
-sqlplain for the impatient
----------------------------------------------------------------
-
-Enough with the introduction: here is how you can run a query on a
-given database via ``sqlplain``. For exemplification purposes, let
-me assume that you have a database of books (``bookdb``) running on localhost
-on SQL Server, and a superuser ``pyadmin`` with password ``secret`` which
-all permissions on the ``bookdb``. Suppose the database contains a table
-called ``book`` containing books with title, author, publication date,
-and other information. Suppose I want to retrieve all the books by Isaac
-Asimov. That can be done with the following code:
-
-.. code-block:: python
-
- >> from sqlplain import connect
- >> bookdb = connect('mssql://pyadmin:secret@localhost/bookdb')
- >> bookdb.execute("SELECT * FROM book WHERE author LIKE :author",
- ('%Asimov%',))
-
-Here is the explanation. The first line import the ``LazyConnection``
-class from ``sqlplain``: instances of ``LazyConnection`` are *lazy
-connection* objects. A lazy connection is a wrapper over an ordinary
-DB API 2 connection: the connection is lazy in the sense that the real
-DB API 2 connection is instantiated only when needed, i.e. when the
-``execute`` method is called. Just instantiating ``bookdb`` does not
-open any connection: however instantiating ``LazyConnection`` involves
-parsing the connection string or uri (in this case
-``mssql://pyadmin:secret@localhost/bookdb``) and importing the
-corresponding database driver (in this case ``pymssql``) which must be
-installed in your system, otherwise you get an ``ImportError``.
-
-The syntax of the URI is the same as in SQLAlchemy (I did copy it
-directly from SQLAlchemy; even Storm uses the same convention and I
-see no reason to change it). Internally ``LazyConn`` instantiates an
-URI object which is a dictionary:
-
-.. code-block:: python
-
- >> bookdb.uri
- {'database': 'bookdb',
- 'dbtype': 'mssql',
- 'host': 'localhost',
- 'password': 'secret',
- 'port': None,
- 'server': 'localhost',
- 'user': 'pyadmin'}
-
-The port is None here, therefore the low level driver ``pymssql`` will open
-the connection by using the default port number for MS SQL, i.e. 1433.
-
-The ``execute`` method of the lazy connection object is the one
-performing the real job: it opens a low level connection, instantiates
-a DB API 2 cursor and it runs the ``SELECT`` query: the result is
-returned as a list of named tuples. Named tuples are a Python 2.6
-construct, however ``sqlplain`` ships with its own version of
-namedtuples (I have just copied Raymond Hettinger's recipe from the
-Python Cookbook site, with a few tweaks) which is used if you are
-running an early version of Python.
-
-An important thing to notice is that ``sqlplain`` uses named arguments
-in the templates for *all* supported database drivers, even if the
-underlying low level driver uses qmark paramstyle - like SQLite - or
-(py)format paramstyle - like pymssql and psycogpg. BTW, the (py)format
-paramstyle, is really a terrible choice, since it collides for the
-usage of ``%s`` in Python string templates :-(.
-Also notice that by default ``execute`` does not accept a mapping as
-arguments: it expects an integer-indexed sequence. However, the
-default behavior can be changed by setting the option ``params='MAPPING'``
-at initialization time. Here is an example:
-
- >> bookdb = connect('mssql://pyadmin:secret@localhost/bookdb',
- params='MAPPING')
- >> bookdb.execute('SELECT * FROM book WHERE author=:author',
- dict(author='Asimov'))
-
-The ``execute`` method is smart enough: if you run it again,
-the previously instantiated DB API2 connection and cursors are
-re-used, i.e. it does not recreate a connection for each query.
-Moreover, the template is parsed only once and then cached,
-so that there is not big penalty is you execute twice the same
-template with different parameters.
-
-You can access the low level connection and cursor via the
-properties ``._conn`` and ``._curs``:
-
-.. code-block:: python
-
- >> bookdb._conn
-
- >> bookdb._curs
-
-There is an underscore, meaning that you are not supposed to access
-those attributes directly. Notice that calling twice
-``bookdb._conn``/``bookdb._curs`` may return different connections/cursors
-in rare circumstances.
-
-You can ``execute`` a ``SELECT`` query, or other types of queries, such
-as ``UPDATE/INSERT/DELETE``; in those cases ``execute`` does not return
-a list of named tuples, it returns a number instead:
-
-.. code-block:: python
-
->> bookdb.execute("UPDATE book SET author=:a WHERE author like :b",
- ('Isaac Asimov', '%Asimov%'))
- 2
-
-The number is the DB API 2 ``rowcount`` attribute of the cursor, i.e.
-the number of rows affected by the query.
-
-Allocated connections take resources on the server even if
-they are not used, therefore you may want to close an unused connection:
-
-
-.. code-block:: python
-
- >> bookdb.close()
-
-Notice that closing twice a connection will
-not raise an error.
-
-Any closed connection will be automatically re-reopened at the first
-call of ``.execute``.
-
-In Python 2.5+ you can use the ``with`` statement and the
-``contextlib.closing`` function to make sure a connection is closed
-after the execution of a given block of code, by using the pattern
-
-.. code-block:: python
-
- with closing(cx):
- do_something(cx)
-
-The configuration file
---------------------------------------------------------------
-
-Passing the URI to a lazy connection can be annoying, since URIs
-are quite verbose. This problem is solved by the builtin aliasing mechanism
-of ``sqlplain``. The trick is the following: as argument of a lazy connection
-you can use a true uri, i.e. anything starting with ``mssql://`` or
-``postgres://`` or ``sqlite://`` or also an alias, i.e. a plain Python name
-(I recommend to use lowercase letters, digits and underscores only,
-even if this is not forced). The alias is interpreted by looking
-at the ``sqlplain`` configuration file.
-
-The location of the configuration file is determined by the environment
-variable ``$SQLPLAIN``: if empty or missing, the configuration file is
-assumed to be located in ``~/.sqlplain`` where ``~`` means the current
-user home directory. The configuration file has a ``[uri]`` section
-containing the expansion of the aliases, i.e. the mapping from the alias
-name to the full URI. For instance, this could be an example of a valid
-configuration file::
-
- $ echo ~/.sqlplain
- [uri]
- bookdb: mssql://pyadmin:secret@localhost/bookdb
- testdb: sqlite:///:memory:
-
-The configuration file is read when the lazy connection is instantiated:
-if an alias is found, the corresponding true
-uri is parsed and the correct database driver is loaded, otherwise a
-``NameError``
-is raised. If the configuration file is missing, an ``ImportError`` is raised.
-
-Lazy connections can be used as global variables.
-
-.. (do not believe people saying that globals are evil: Python is full of
- globals, modules are global variables, classes are global variables, and
- there is nothing wrong in having lazy connection as globals)
-
-If you instantiate your lazy connections at the beginning
-of your module, then the underlying low level database driver
-is imported when your module is imported. If you follow this pattern,
-then, the configuration file is part of your application and
-you should consider it as required Python code, even if for sake of simplicity
-it uses the traditional .INI format. If you distribute code based on sqlplain,
-the user is supposed to edit the configuration file by setting the correct
-database uri for her database. Of course, you can always write a small
-application to set the configuration file if you don't want your users
-to touch the .INI file directly (you could set it at installation time,
-or write a small GUI to edit the configuration file).
-
-A typical way to pass the URI is to read it from the command line:
-
-.. code-block:: python
-
- $ cat example_sqlplain_app.py
- import sqlplain
-
- def main(db):
- # do something with the lazy connection db
-
- if __name__ == '__main__':
- main(sys.argv[1]) # add argument parsing at will
-
-This works if ``sys.argv[1]`` is a valid URI or a valid alias.
-However, if you are writing functional tests and you invoke them
-with (say) nose_, you cannot use this pattern since ``sys.argv[1]``
-is the name of the file to be tested. When writing nose tests it makes sense to
-use a global lazy connection, instantiated at the top of your
-testing script, something like ``testdb = connect('testdb')`` where
-``testdb`` is an alias to the database used for your automatic tests.
-
-.. _nose: http://somethingaboutorange.com/mrl/projects/nose/
-
-Transactions
---------------------------------------------------------------
-
-By default ``sqlplain`` works in autocommit mode. If you want to use
-transactions, you must specify the isolation level. When running
-in transactional mode, your lazy connection is an instance of
-``TransactionalConnection``, a subclass of ``LazyConnection``
-with methods ``commit`` and ``rollback``
-
-.. code-block:: python
-
- >> bookdb = connect('mssql://pyadmin:secret@localhost/bookdb',
- isolation_level='SERIALIZABLE')
-
-Transactional connections have support the ``with`` statement,
-therefore if you are using a recent enough version of Python (2.5+) you can
-write
-
-.. code-block:: python
-
- with booksb: # will begin a transaction and commit or rollback it
- do_something
-
-Otherwise, ``sqlplain`` provides a ``Transaction`` class coding
-the ``rollback/commit`` pattern:
-
-$$Transaction
-
-Retrying connections
-----------------------------------------------------
-
-Suppose you have a long running application connected to a database.
-If you lose the connection to the database, or if you restart the
-database, the low level connection object will not be usable anymore.
-The solution is to use a retrying lazy connection: in case of error,
-the query is retried with a fresh low level connection.
-
-Threadlocal connections
--------------------------------------------------------
-
-The typical user of ``sqlplain`` is expected to write simple programs,
-for instance scripts for extracting statistics from a database, or
-import scripts, or maintenance scripts. The idea is to provide a simple
-library to perform simple tasks. User wanted something more sophisticated
-(for instance people writing a server program connected to a database
-and managing many users) are expected to use a more sophisticated
-database toolkit. This is the reason why ``sqlplain`` does not provide
-a connection pool and it will never provide one. You are supposed to
-use the connection pool of your database driver (for instance psycopg2
-provides one), the connection pool of SQLAlchemy, or a custom made one.
-Having said so, ``sqlplain`` provides some support for server programs.
-The reason is that I often need to wrap a script with a Web interface,
-and at work we use Pylons as web framework. Pylons comes with the Paste
-Web server which is fine for usage in our local intranet. The Paste
-server is a multithreaded server which internally manages a pool of threads.
-To make ``sqlplain`` to work in this situation, you must set the threadlocal
-flag: doing so ensure that each thread gets its own lower level
-connection, independent from the connections of the other threads.
-
-Here in an example script showing multiple threads writing on a sqlite
-database; if you forget to set the ``threadlocal`` flag, you will likely
-incur in errors (for instance I get ``OperationalError: database is locked``).
-
-$$threadlocal_ex
-
-I have been using this approach for one year in production on linux
-without problems, however, only in situations of low concurrency
-and only in autocommit mode. You should consider the multithread
-support of ``sqlplain`` as experimental and there is no guarantee
-it will work in your settings. Also, the multithreading support is
-very low in my list of priorities (I am in the camp of people who
-are against thread) and what it is there is the minimun I needed
-to do in order make my scripts work with the Paste server.
-
-sqlplain: utilities
-================================================================
-
-``sqlplain`` is a very poor toolkit compared to other database toolkits;
-this is done on purpose. Nevertheless, it provides a few convenient
-functions to work with a database directly, collected in the ``util``
-module. We can split these utilities in different classes:
-
-- introspection utilities, to extract information from the database;
-- management utilities, to create/drop database from scratch and to
- create/drop/populate tables;
-
-Reflection facilities
------------------------------------------------------------------
-
-``exists_table``, ``get_descr``, ``get_fields``, ``get_kfields``
-and ``get_dfields``,
-
-They are the following::
-
- openclose(uri, templ, *args, **kw):
-
- exists_db drop_db create_db(uri, drop=False),
- make_db(alias=None, uri=None, dir=None):
-
-
-Moreover, there are a few utilities to manage database schemas, which
-are a PostgreSQL-only feature: ``set_schema(db, name), exists_schema(db, name),
-drop_schema(db, name), create_schema(db, schema, drop=False), make_schema``.
-
-``sqlplain`` provide some limited introspection features (the introspection
-features are likely to be enhanced in future versions). For the moment,
-the only things you can do is to introspect a table or a view and to
-return a named tuple with the names of the fields:
-
-Database management utilities
---------------------------------------------------------------
-
-``sqlplain`` provides three utilities to create and to drop database in
-a database-independent way. Obviously, in order to take advantage of
-such facilities, you must connect to the database cluster as an user with
-sufficient permissions.
-
-``create_db(uri, force=False)`` creates the database specified by
-``uri`` and returns a (lazy) connection to it. If the database already
-exists, raises an error. This behavior can be modified by passing the
-flag ``force=True``: in this case the pre-existing database (if any)
-is silently dropped and recreated.
-
-``drop_db(uri, force=False)`` drop an existing
-database; it raises an error if the database does not exists, unless
-the flag ``force=True`` is passed: in that case nothing is done if the
-database does not not exists.
-
-``make_db(uri, force=False, script_dir=None)`` is an extension of ``create_db``
-which also executes all the scripts contained in ``script_dir``. If no
-``script_dir`` is passed, it looks in the configuration file for a ``[dir]``
-section (if any) and executes the scripts in that directory (if any).
-
-
-Table management
---------------------------------------------------------------
-
-- ``create_table(conn, tablename, field_descr, force=False)``
-
->>> from sqlplain impor util
->>> db = util.create_db('sqlite_test', force=True)
->>> util.create_table(db, 'book',
-... ['title VARCHAR(128)', 'author VARCHAR(64)'])
--1
-
-
-``sqlplain`` provides utilities to truncate (
-``truncate_table(conn, tablename)``) and to drop tables
-(``drop_table(conn, tablename)``), assuming you have the right
-permissions for those operations.
-
-Moreover it provides utilities to populate a table:
-
-- ``insert_rows(conn, tablename, rows)`` inserts a sequence of rows
- (any iterable returning valid rows will do) into a table;
-
-- ``load_file(conn, filename, tablename, sep=',')`` inserts the content
- of a CSV file into a table.
-
-The difference between the two operations is that ``load_file`` is orders
-of magnitude faster that ``insert_rows`` since it uses the underlying
-database mechanism for bulk inserting files, possibly disabling transactional
-safety. ``sqlplain`` comes with a test script in the tests directory
-(``test_million.py``) which tries to import a datafile with the two
-mechanism; if you run it, you may see how big is the performance
-difference on your platform. On my MacBook the difference is a factor
-of 60, i.e. an operation that would take 5 hours with ``insert_rows``
-is reduced to 5 minutes with ``load_file``.
-
-Nevertheless, ``insert_rows`` is more convenient when you have small
-tables and when you are writing unit tests, therefore it makes sense
-for ``sqlplain`` to provide it.
-
-The problem of both ``insert_rows`` and ``load_file`` is that you
-do not have line-by-line control of the operation, therefore a single
-ill-formed line in a million lines file will screw up the entire
-operation.
-
-If you want to insert a line at the time, you can do so by using the
-low level mechanism (
-``conn.execute("INSERT INTO mytable VALUES (:1, :2, :3)", (r1, r2, r3))``)
-or by using the high level `table framework`_.
-
-.. _table framework: tables.html
-
-SQL template functions
---------------------------------------------------------------
-
-``sqlplain`` allows you to litter you source code with scattered
-SQL queries, but does not force you to do so.
-Actually, it offers the possibility to collect
-your queries in a common place. Moreover, it provided a mechanism
-to dynamically generate queries by adding clauses to a base
-template.
-
-Let me show how it works. You can write all of your SQL templates
-in a file called ``queries.py`` like the following
-
-$$queries
-
-The ``sqlplain.do`` utility converts a SQL template into a Python
-function with signature ``(conn, arg1, ..., argN)`` where ``conn``
-is a ``sqlplain`` connection and arg1, ..., argN are arguments
-in correspondence with the question marks in the template.
-Moreover, the docstring of the generated functions is set to the
-SQL template. That means that the built-in ``help`` functionality (as well as
-documentation tools) play well with the generated functions.
-Here are a few examples:
-
-.. code-block:: python
-
- >> from queries import *
- >> help(get_authors)
- Help on function sqlquery in module queries:
-
- sqlquery(conn)
- SELECT author FROM book
- Help on function sqlquery in module queries:
-
- >> help(get_titles)
- sqlquery(conn, arg1)
- SELECT title FROM book WHERE author=:a
-
- >> help(set_uppercase_titles)
- Help on function sqlquery in module queries:
-
- sqlquery(conn, author, pubdate)
- UPDATE book SET title=upper(title)
- WHERE author like :author AND pubdate=:pubdate
-
-By default all the functions generated by ``do`` have the name
-``sqlquery``, but is possible to specify a different name; it
-is even possible to specify the names of the arguments. For
-instance, we could have defined ``set_uppercase_titles`` as
-follows:
-
-.. code-block:: python
-
- >> set_uppercase_titles = do('''
- UPDATE book SET title=upper(title)
- WHERE author like ? AND pubdate=?
- ''', name='set_uppercase_titles', args='author, pubdate')
-
- >> help(set_uppercase_titles)
- Help on function set_uppercase_titles in module queries:
-
- set_uppercase_titles(conn, author, pubdate)
- UPDATE book SET title=upper(title)
- WHERE author like ? AND pubdate=?
-
-It is also possible to set default values for some arguments,
-by passing a tuple of defaults. The defaults refer to the rightmost
-arguments: in this example with are setting the default value for
-the last argument, i.e. pubdate:
-
-.. code-block:: python
-
- >> set_uppercase_titles = do('''
- UPDATE book SET title=upper(title)
- WHERE author like ? AND pubdate=?
- ''', name='set_uppercase_titles', args='author, pubdate',
- defaults=('2008-12-01',))
-
- >> help(set_uppercase_titles)
- Help on function set_uppercase_titles in module queries:
-
- set_uppercase_titles(conn, author, pubdate='2008-12-01')
- UPDATE book SET title=upper(title)
- WHERE author like ? AND pubdate=?
-
-Setting the function name and the argument names explicitly is a good idea
-if you want to have readable error messages in case of errors.
-
-Dynamic generation of SQL templates
---------------------------------------------------------------
-
-There many situations where the ability to generate SQL templates
-at runtime is handy; a typical use case is writing a select
-box. You can find an industrial strength solution of this problem
-(generating SQL queries from Python) in sqlalchemy and in other ORMs.
-However, the philosophy of ``sqlplain`` is to keep things simple
-and primitive as much as possible. Therefore, ``sqlplain`` does even try
-to implement a general database-independent solution.
-Personally, I am not even convinced that a general
-database-independent solution is a good thing to have.
-A design goal of ``sqlplain`` is to keep the generate queries
-close to what you would write by hand, and to forget about
-database independence.
-SQL template functions provide a ``clause`` attribute, which is function
-adding a template fragment to the original template, and returning
-a new SQL template function. Here is an example of use:
-
-$$customize_select
-
-Notice that SQL template functions are functional in spirit:
-they are not objects and adding a template fragment result
-in a *new* function, there is no mutation involved. One advantage
-of this approach is that different threads can safely generate
-different SQL template functions from the same original function,
-without interferring each other.
-Also, SQL template functions are plain old functions: they are not
-callable objects in the sense of instances of a custom class with
-a ``__call__`` method. Still, SQL template functions have non-trivial
-attributes, such as the ``clause`` attribute, which would be a method
-if I had chosen a more object oriented implementation. But I feel that
-plain functions are easier to inspect and
-to manage and I wanted to avoid the complication of inheritance.
-
-Here are a few examples of usage:
-
-.. code-block:: python
-
- >>> select_books = do('SELECT * FROM book')
- >>> print customize_select(select_books).__doc__
- SELECT * FROM book WHERE true
-
- >>> print customize_select(select_books, author='Asimov%').__doc__
- SELECT * FROM book WHERE true AND author LIKE 'Asimov%'
-
- >>> print customize_select(select_books, author='Asimov%', pubdate='2008-11-12').__doc__
- SELECT * FROM book WHERE true AND pubdate > '2008-11-12' AND author LIKE 'Asimov%'
-
-In this last example the generated function has an additional argument
-with respect to the original one, since there is a question mark in
-the query template. ``sqlplain`` takes care of that automatically.
-Of course the mechanism is very primitive and one could write
-something more sophisticated on top of it, but for the moment it
-works well enough for my needs. Future versions of ``sqlplain``
-could offer additional functionality for generating SQL templates,
-or could not.
-
-sqlplain: extensions
-=================================================================
-
-``sqlplain`` is designed as a small core - just a lightweight wrapper
-over the standard DB API 2 interface - and a set of extensions.
-Future versions of ``sqlplain`` may offer additional extensions, but
-for the moment very little is provided. I am committed to keep the
-whole of ``sqlplain`` small - as I said, well under the 5% of the
-codebase of sqlalchemy - so even the extension part is guaranteed to
-stay small in the foreseeable future.
-
-Memoization
--------------------------------------------------------------
-
-Very often I have to work with heavy queries
-which results must be cached. Since this is a common requirement,
-I have decided to provide a simple caching facility in ``sqlplain``.
-The core functionality is provided by the ``Memoize`` class
-in ``sqlplain.memoize``. ``Memoize`` takes in input a new-style class
-(the cache type) and returns a decorator object which is able to
-memoize functions. The cache type is attached as an attribute
-to the memoized function. Moreover any memoized function has
-a .cache attribute (a dictionary <function args> -> <function result>)
-which is looked up when the function is called a second time.
-If the second time the function is called with the same arguments
-the result is retrieved from the cache.
-
-A global registry of the memoized
-functions is kept in memory and there is a classmethod
-``Memoize.clear(cachetype=object)`` which is able to clear the cache.
-If you specify the cachetype, only the functions with a cache type
-which is a subclass of the specified one will be affected.
-If you do not specify the cachetype, by default ``object`` will be
-assumed, therefore *all* caches for all memoized functions will be
-cleared.
-
-Here is an example of use:
-
-$$cache_ex
-
-Here the cache of ``f1`` is cleared, but the cache of
-``f2`` is not cleared.
-
-According to the goal of keeping things simple, ``sqlplain``
-does not provide the concept of cache expiration, and you are
-expected to clear the cache by hand. Anyway, it is pretty easy to schedule
-a cache cleaner function to be run periodically (which of course depends on
-the framework you are using) and you can implement it yourself.
-
-``sqlplain`` tries to make your life easier when you are interested
-in caching simple queries: to this goal, the ``do`` utilities has a
-``cachetype`` default argument which you can set to enable caching::
-
- >> def cached_short(templ):
- return Memoize(ShortType)(do(templ))
-
- >> get_title = cached_short('select title from book where author=?')
-
->>> get_score_value = memoize(do('SELECT value FROM score where score=?'))
->>> score= KTable.create('score', 'score VARCHAR(4) PRIMARY KEY',
- 'value INTEGER UNIQUE')
->>> score.insert_rows([
- ('+', 1),
- ('O', 2),
- ('O+', 3),
- ('OO', 4),
- ('OO+', 5),
- ('OOO', 6),
- ('OOO+', 7),
- ('OOOO', 8),
- ])
-
->>> d = dict(conn.execute('SELECT score, value FROM score'))
-
-
-An example project using sqlplain: books
---------------------------------------------------
-
-In this section I discuss a toy project realized with sqlplain, i.e.
-an archive of the books I have read. I did start keeping track of
-the books I read more than twenty years ago, and writing a program
-to make statistics about my books was one of my first programs ever.
-It is nice to come back to the same problem after twenty years, now that I
-know SQL ;)
-I will implement the project by using a test first approach.
-
-"""
-
-from ms.tools.minidoc import Document
-from sqlplain.doc import threadlocal_ex
-from sqlplain import Transaction, do, util, table
-import queries, cache_ex
-import logtable
-
-def customize_select(queryfunction, pubdate=None, author=None, title=None):
- templ = queryfunction.templ
- clause = ''
- if pubdate:
- clause += ' AND pubdate > %r' % pubdate
- if author:
- clause += ' AND author LIKE %r' % author
- if title:
- clause += ' AND title LIKE %r' % title
- return do(templ + ' WHERE true' + clause)
-
-dtable_doc = str(Document(table.DTable))
-
-if __name__ == '__main__':
- import doctest; doctest.testmod()
-
-
-# removed functionality
-"""
-Lazy connections have an interesting feature: if
-a query raises an error, ``sqlplain`` tries to execute it a second
-time with a fresh connection, and sometimes the second attempt may
-succeed. The reason is that
-sometimes a correct query fails due to network issues or other
-problems (for instance somebody restarted the database server and
-the existing connection has become obsolete) which are transitory:
-so the first time the query fails because the connection is
-in a bad state, but the second time it succeeds since the fresh
-connection is in a good state. Of course, if the network outage or
-the other error persists, there will be an error even at the second
-attempt and the exception will be raised (we all know that
-*errors should never pass silently*). By default this feature is
-turned on, but you may disable it (if you do not want to retry
-every failed query) by setting the ``.retry`` attribute
-of the lazy connection object (or class) to ``False``.
-
-Retrying connections are good when writing long running programs, such as
-user interfaces (CLI, GUI and Web): in such a situations
-errors are trapped.
-"""
diff --git a/sqlplain/doc/first-page.txt b/sqlplain/doc/first-page.txt
deleted file mode 100644
index 19f9521..0000000
--- a/sqlplain/doc/first-page.txt
+++ /dev/null
@@ -1,125 +0,0 @@
-sqlplain: a simple and opinionated Databases Toolkit
-====================================================================
-
-sqlplain is a small Python library with the aim of simplifying your
-interaction with relation databases.
-
-sqlplain is very much opinionated and it
-does not try to be everything to everybody. It is intended to be a
-middle level toolkit: higher level than the DB API 2, but lower
-level than SQLAlchemy.
-
-It was written in part as a reaction to SQLAlchemy (only in part since I
-wrote the core of sqlplain years ago, before the publication of SQLAlchemy).
-SQLAlchemy is actually
-an impressive piece of software but I think it is overkill for many users.
-sqlplain is for users looking for a simple library. It is also fine for
-database administrators with a lot of experience in SQL and little
-experience in Python, since its basic mode of operation is just to run
-plain old SQL scripts (hence the name).
-
-Its design guidelines are the following:
-
-- conciseness
-
-sqlplain is committed to keep the codebase small (say under 2000 lines)
-and the number of features limited;
-
-- simplicity
-
-sqlplain is based on a simple
-implementation, to make it easy for its users to understand it and to
-build the features they need;
-
-- easyness of use
-
-sqlplain has a small learning curve
-and provides a small set of convenient utilities for scripting your
-database interation;
-
-- familiarity
-
-sqlplain connections are a simplified version of standard
-DB API 2 connections, and you can readily get the underlying low
-level connection to use of all its power, if need there is;
-
-- transparency
-
-sqlplain tries hard not to add too many layers of indirection to your
-SQL queries: the idea is that it should be easy for the user to extract
-the plain old SQL query from the code, cut and paste it on the database
-console, and try it out directly.
-
-sqlplain can be used to do anything on a database, but there is special
-support for the following areas:
-
-- creating test databases
-- running database performance tests
-- introspecting pre-existing databases
-- scripting database operations
-- memoizing queries
-
-You can use sqlplain with your Web framework of choice (I have
-been running it with Pylons for years without problems) but there
-is no special integration with any framework - this is an advantage
-or a disadvantage depending on your point of view. Certainly sqlplain
-is *not* intended to be used as the Django ORM. Django (as most
-Python ORMs) puts the emphasis on Python, in the sense that the
-model is written down as a set of Python classes which are then
-persistent to the database. On the contrary, sqlplain assumes
-that you already have your tables on the database and that they
-are managed via DDL queries: you can introspect your tables
-from Python, but if you want to look at their definitions you
-must look into SQL code, not into Python code. There is no
-facility to create tables from Python code (actually there is,
-but it works by just passing plain SQL code to the creational procedure)
-and no facility to modify the schema of an existing table from
-Python (apart for passing raw "ALTER TABLE" queries to the connection
-object).
-
-sqlplain does provide limited support for the following areas:
-
-- database independence
-
-True database-independence is a dream, since the features and the
-performance characteristics provided by different databases are
-totally different. Therefore, sqlplain does not even attempt to
-be a fully database-independent toolkit. However, in the cases where
-I could keep database-independence with little effort, I kept it.
-In particular the introspection functionalities are database-independent.
-But in general I judge spending a lot of time to provide a false sensation
-of database independence not worth the effort.
-
-- programmatic generation of SQL queries
-
-Some database toolkits (notably SQLAlchemy) have an incredibly sophisticated
-mechanism to generate SQL queries from Python objects in a
-database-independent way. That is not the goal of
-sqlplain. Out of the box sqlplain provides no support at all for generating
-SQL queries programmatically, however the documentation provides a few
-do-it-yourself recipes.
-
-- Object Relation Mapper
-
-The core of sqlplain does not provide any ORM. A small table framework
-is provided as an experimental extension: its goal is to provide a small
-object oriented wrapper over the tables of database, but it is not an
-ORM such as the ones you can find in SQLAlchemy, SQLObject or Storm.
-It is pretty easy to write custom classes making use of the table
-framework, but you are on your own, there is nothing built-in.
-This is done on purpose.
-
-- Connection pooling
-
-sqlplain does not provide any connection pooling facility and it will never
-provide one. There are already many connection pool implementations available
-out there and just use the one you like, if you need one.
-Notice that you can use sqlplain connections in a multithreaded application
-(such as a Web server) even without an explicit connection pool since the
-connections can be made thread local.
-
-- no support for nested transactions, no unit-of-work pattern.
-
-
-
-
diff --git a/sqlplain/doc/grant_perm.py b/sqlplain/doc/grant_perm.py
deleted file mode 100644
index 3695eba..0000000
--- a/sqlplain/doc/grant_perm.py
+++ /dev/null
@@ -1,16 +0,0 @@
-'''
-$ grant_perm $DSN schema perm role
-'''
-
-import sys
-from sqlplain import connect, util
-
-if __name__ == '__main__':
- try:
- dsn, schema, perm, role = sys.argv[1:]
- except ValueError:
- sys.exit(__doc__)
- db = connect(dsn)
- for table in util.get_tables(db, 'public'):
- db.execute('GRANT %s ON %s TO %s' % (perm, table, role))
- print 'Granted %s on %s to %s' % (perm, table, role)
diff --git a/sqlplain/doc/logtable.py b/sqlplain/doc/logtable.py
deleted file mode 100644
index 08ad2f4..0000000
--- a/sqlplain/doc/logtable.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from datetime import datetime
-from sqlplain import do, connect, util, table
-
-def init(db_uri):
- db = connect(db_uri)
- util.create_table(db, 'log', 'date DATETIME, message VARCHAR(255)',
- force=True)
- return db, table.DTable('log')
diff --git a/sqlplain/doc/queries.py b/sqlplain/doc/queries.py
deleted file mode 100644
index 517e8e3..0000000
--- a/sqlplain/doc/queries.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from sqlplain import do
-
-get_authors = do('SELECT author FROM book')
-get_titles = do('SELECT title FROM book WHERE author=?')
-set_uppercase_titles = do('''
-UPDATE book SET title=upper(title)
-WHERE author like ? AND pubdate=?
-''')
diff --git a/sqlplain/doc/sql_repl.py b/sqlplain/doc/sql_repl.py
deleted file mode 100644
index b81096a..0000000
--- a/sqlplain/doc/sql_repl.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""
-usage: sql_repl <dbname>
-"""
-# alias sql_repl="rlwrap -m python ~/gcode/sqlplain/doc/sql_repl.py"
-
-import os, sys, subprocess
-from sqlplain import connect
-
-def less(text):
- "Send a text to less via a pipe"
- # -c clear the screen before starting less
- po = subprocess.Popen(['less', '-c'], stdin=subprocess.PIPE)
- try:
- po.stdin.write(text)
- po.stdin.flush()
- except IOError:
- pass
- po.stdin.close()
- po.wait()
-
-class Console(object):
- "A textual console to interact with a database"
-
- def __init__(self, dbname, input_src=sys.stdin):
- self.db = connect(dbname)
- self.input_src = input_src
- self.prompt = '%s> ' % self.db.name
-
- def sql_eval(self, code):
- rows = self.db.execute(code)
- if isinstance(rows, int):
- if rows != -1:
- return '%s rows were affected' % rows
- else:
- return ''
- out = ['%d rows were returned' % len(rows), '\t'.join(rows.header)]
- for row in rows:
- out.append('\t'.join(map(str, row)))
- return out
-
- def readcode(self):
- sys.stdout.write(self.prompt)
- sys.stdout.flush()
- lines = []
- while True:
- line = self.input_src.readline()
- if not line:
- raise EOFError
- lines.append(line)
- if line.endswith(';\n'):
- return '\n'.join(lines)
-
- def repl(self):
- while True:
- try:
- code = self.readcode()
- result = self.sql_eval(code)
- if isinstance(result, list):
- less('\n'.join(result))
- else:
- print result
- except EOFError:
- break
- except Exception, e:
- print e.__class__.__name__, str(e)
-
-if __name__ == '__main__':
- try:
- alias = sys.argv[1]
- except IndexError:
- sys.exit(__doc__)
- Console(alias).repl()
diff --git a/sqlplain/doc/strip_blanks.py b/sqlplain/doc/strip_blanks.py
deleted file mode 100644
index 25ad41e..0000000
--- a/sqlplain/doc/strip_blanks.py
+++ /dev/null
@@ -1,50 +0,0 @@
-"""
-Take a table with a primary key and some nullable text fields.
-Strip leading spaces in all text fields and
-replace the fields which are all blanks with None.
-
-usage:
-
-$ python %s <db-uri> <table>
-
-"""
-
-import os, sys
-from sqlplain import connect, util
-from sqlplain.table import KTable
-
-def strip(value):
- "value is None or a string"
- if value is None: # do nothing
- return None
- return value.strip() or None
-
-def get_text_fields(conn, table, exclude=()):
- 'Returns the text fields of a table, possibly excluding some of them'
- names = []
- for row in util.get_descr(conn, table):
- if row.type_code == 1 and row.name not in exclude:
- names.append(row.name)
- return names
-
-def replace_blanks(conn, tablename):
- # conn.chatty = True
- pkeys = util.get_kfields(conn, tablename)
- textfields = get_text_fields(conn, tablename, exclude=pkeys)
- tbl = KTable.type(tablename, pkeys, textfields)(conn)
- for row in tbl.select():
- kw = dict((tf, strip(getattr(row, tf))) for tf in textfields)
- newrow = row._replace(**kw)
- if newrow != row:
- tbl.update_row(newrow)
- print newrow
- else:
- sys.stdout.write('.')
- sys.stdout.flush()
-
-if __name__ == '__main__':
- try:
- uri, tablename = sys.argv[1:]
- except ValueError:
- sys.exit(__doc__ % sys.argv[0])
- replace_blanks(connect(uri), tablename)
diff --git a/sqlplain/doc/tables.py b/sqlplain/doc/tables.py
deleted file mode 100644
index a039460..0000000
--- a/sqlplain/doc/tables.py
+++ /dev/null
@@ -1,90 +0,0 @@
-"""
-The table framework
-------------------------------------------------------------
-
-As I said in the introduction, ``sqlplain`` is not intended to be
-a fully fledged ORM, therefore it does not provide a builtin mechanism
-to map user defined classes to database objects, such as the mapping
-mechanism in SQLAlchemy, or the popular Active Record pattern; nevertheless,
-it provides a table framework which us a lightweight object oriented layer
-over database tables.
-``sqlplain`` table object comes in two flavors: D-tables, which should be
-used for tables without a primary key, and K-tables, which must
-used for tables with a primary key, possibly composite. Actually K-tables
-are D-tables too, since they inherit from D-tables, therefore they
-share all D-tables methods and they add a few methods which are
-meaninful only if the underlying table has a primary key.
-If you try to use a K-table over a database table which has no primary
-key, a ``TypeError`` will be raised. Here is an example:
-
-$$logtable
-
->>> from sqlplain import table
->>> import logtable
->>> db = logtable.init('sqlite_test')
->>> log = table.KTable(db, 'log')
-Traceback (most recent call last):
- ...
-TypeError: table log has no primary key!
-
-Using a DTable instead works:
-
->>> log = table.DTable.reflect(db, 'log')
-
-The ``.reflect`` classmethod creates a suitable subclass of ``DTable``
-(called ``Log``) and instantiates it. ``DTable`` is a kind of abstract
-base class and you cannot instantiate it directly (the same is true
-for the KTable class):
-
->>> table.DTable(db)
-Traceback (most recent call last):
- ...
-TypeError: You cannot instantiate the ABC DTable
-
-In order to create a concrete subclass of DTable (or KTable) one needs
-to set the tabletuple class attribute ``tt``, which contains information
-about the table structure. The ``.reflect`` method extracts the information
-from the database schema; for instance ``log.tt`` is a namedtuple with
-fields ``date`` and ``message``:
-
->>> print log.tt._fields
-('date', 'message')
-
-
->>> from datetime import datetime
->>> now = datetime.now
->>> log.insert_row(date=now(), message='message1')
-1
->>> log.insert_row(date=now(), message='message2')
-1
->>> print len(log)
-2
-
-Here is the full API for DTable:
-
-- create
-- select
-- count
-- delete
-- truncate
-- insert_row
-- insert_rows
-- load_file
-
-Here are the additional functions of K-tables:
-
-- select_row
-- update_row
-- update_or_insert_row
-- delete_row
-
-Instead of an explanation, I will give examples::
-
- select_book = select_row('book', 'title author')
- select_book(bookdb, title='T', author='A')
-
-``select_row`` raises an error if the corresponding queries
-returns no result (you are looking for a missing record) or
-if it returns multiple results (it means that your primary key specification
-was incomplete).
-"""
diff --git a/sqlplain/doc/threadlocal_ex.py b/sqlplain/doc/threadlocal_ex.py
deleted file mode 100644
index 980be1c..0000000
--- a/sqlplain/doc/threadlocal_ex.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import threading
-from sqlplain import connect, do
-from sqlplain.util import create_db
-
-create_book = do('''
-CREATE TABLE book(
- title VARCHAR(64),
- author VARCHAR(32),
- rdate DATE)
-''')
-
-select_author = do('''
-SELECT author FROM book
-''')
-
-insert_author = do('''
-INSERT INTO book VALUES ('No title', ?, '2008-11-22')
-''')
-
-def show_authors(db):
- thread = threading.currentThread().getName()
- print('Printing from %s' % thread)
- insert_author(db, thread)
- for i, author in enumerate(select_author(db)):
- print(i, author[0])
- print('---------------------')
-
-def run_many(N, action, db):
- threads = [threading.Thread(None, lambda : action(db)) for _ in range(N)]
- try:
- for th in threads:
- th.start()
- finally:
- th.join()
-
-if __name__ == '__main__':
- db = create_db('sqlite:///tmp.sqlite', force=True, threadlocal=True)
- create_book(db)
- run_many(10, show_authors, db)
diff --git a/sqlplain/memoize.py b/sqlplain/memoize.py
deleted file mode 100644
index 45e0f18..0000000
--- a/sqlplain/memoize.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from decorator import decorator
-
-class Memoize(object):
-
- registry = []
-
- @classmethod
- def clear(cls, cachetype=object):
- for func in cls.registry:
- if issubclass(func.cachetype, cachetype):
- func.cache.clear()
-
- def __init__(self, cachetype):
- self.cachetype = cachetype
-
- def call(self, func, *args, **kw):
- if kw:
- key = args, frozenset(kw.iteritems())
- else:
- key = args
- try:
- return func.cache[key]
- except KeyError:
- res = func.cache[key] = func(*args, **kw)
- return res
-
- def __call__(self, func):
- func.cache = {}
- new = decorator(self.call, func)
- new.cachetype = self.cachetype
- self.registry.append(new)
- return new
-
-memoize = Memoize(object)
-
-if __name__ == '__main__': # test
-
- @memoize
- def f():
- return 1
diff --git a/sqlplain/mssql_support.py b/sqlplain/mssql_support.py
deleted file mode 100644
index 67b461b..0000000
--- a/sqlplain/mssql_support.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import sys
-import _mssql
-import pymssql as dbapi2
-from pymssql import OperationalError, pymssqlCursor as Cursor
-
-ISOLATION_LEVELS = (
- None, 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ',
- 'SERIALIZABLE')
-
-placeholder = '%s'
-
-## notice that retrieving the internal size does not work with pymssql
-CODEMAP = {
- 1: 'VARCHAR',
- 2: 'INTEGER',
- 3: 'FLOAT',
- 4: 'DATETIME',
-}
-
-class Connection(object):
-
- def __init__(self, cnx, isolation_level=None):
- assert isolation_level in ISOLATION_LEVELS, isolation_level
- self._cnx = cnx
- self.isolation_level = isolation_level
- if isolation_level:
- cnx.query("set transaction isolation level " + isolation_level)
- try:
- self._cnx.query("begin tran")
- self._cnx.fetch_array()
- except Exception, e:
- raise OperationalError(e)
-
- def cursor(self):
- if self._cnx is None:
- raise OperationalError("Closed connection")
- cursor = Cursor(self._cnx)
- if not hasattr(cursor, 'connection'): # for old versions of pymssql
- cursor.connection = self
- return cursor
-
- def close(self):
- if self._cnx is None:
- return # the connection was already closed
- self._cnx.close()
- self._cnx = None
-
- def commit(self):
- if self._cnx is None:
- raise OperationalError("Closed connection")
- try:
- self._cnx.query("commit tran")
- self._cnx.fetch_array()
- self._cnx.query("begin tran")
- self._cnx.fetch_array()
- except Exception, e:
- raise OperationalError, "can't commit: %s" % e, sys.exc_info()[2]
-
- def rollback(self):
- if self._cnx is None:
- raise OperationalError("Closed connection")
- try:
- self._cnx.query("rollback tran")
- self._cnx.fetch_array()
- self._cnx.query("begin tran")
- self._cnx.fetch_array()
- except Exception, e:
- # XXX: sometimes the can't rollback message is wrong; a typical
- # example is to try to create an already existing table with
- # try: <create> except: <rollback>; SQL server performs an
- # inner rollback, so that you should not call the second one
- # the error here can be declassed to a Warning
- raise OperationalError, "can't rollback: %s" % e, sys.exc_info()[2]
-
-def connect(params, isolation_level=None):
- user, pwd, host, port, db = params
- port = port or 1433
- if sys.platform != 'win32': # on linux
- if '\\' in host: # strip the instance
- host, instance = host.split('\\')
- host = '%s:%s' % (host, port) # add the port
- _conn = _mssql.connect(host, user, pwd)
- _conn.select_db(db)
- return Connection(_conn, isolation_level)
diff --git a/sqlplain/mssql_util.py b/sqlplain/mssql_util.py
deleted file mode 100644
index c4603e7..0000000
--- a/sqlplain/mssql_util.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import re, sys
-from sqlplain.util import openclose, getoutput
-
-BCP = ['freebcp', 'bcp'][sys.platform == 'win32']
-
-def bcp(uri, table_or_query, filename, in_out, *flags):
- "Helper for calling the bcp command line utility as an external process"
- assert in_out in ('in', 'out', 'queryout')
- if table_or_query.lstrip().lower().startswith('select'):
- in_out = 'queryout'
- elif not '.' in table_or_query:
- table_or_query = '.'.join([uri['database'], '', table_or_query])
- cmd = [BCP, table_or_query, in_out, filename, '-S', uri['host'],
- '-U', uri['user'], '-P', uri['password']] + list(flags)
- return getoutput(cmd)
-
-def get_kfields_mssql(conn, table):
- return [x.COLUMN_NAME for x in conn.execute('sp_pkeys %s' % table)]
-
-def _normalize(ref):
- """Convert a string of the form REFERENCES dbname.dbowner.tname into
- REFERENCES tname."""
- references, rest = ref.split(' ', 1)
- return references + ' ' + rest.split('.')[-1]
-
-## similar info also comes from get_descr
-def get_columns_mssql(conn, table):
- return conn.execute('sp_columns %s' % table)
-
-def get_keys_mssql(conn, table):
- """
- Return a list of strings describing the keys (both primary and foreign)
- of a given table.
- """
- info = iter(conn.execute("sp_helpconstraint %s, 'nomsg'" % table))
- result = []
- for row in info:
- ctype = row.constraint_type
- if ctype == 'FOREIGN KEY':
- nextrow = info.next()
- ref = '(%s) %s' % (row.constraint_keys, _normalize(
- nextrow.constraint_keys))
- elif ctype.startswith('PRIMARY KEY'):
- ref = '(%s)' % row.constraint_keys
- else: # other column type
- continue
- result.append('%s %s' % (ctype, ref))
- return result
-
-def get_dependencies(conn, *tables):
- s = set()
- for table in tables:
- for line in get_keys_mssql(conn, table):
- if line.startswith('FOREIGN KEY'):
- mo = re.search('REFERENCES ([\w\d]+) ', line)
- s.add((table.lower(), mo.group(1).lower()))
- return sorted(s)
-
-# works only for views and procedures, and constraints
-GET_DEF = '''SELECT definition FROM sys.sql_modules WHERE object_id=\
-(SELECT object_id FROM sys.objects WHERE name=:name)'''
-
-def get_source_mssql(conn, objname):
- "Extracts the source code for views and procedures"
- return conn.execute(GET_DEF, (objname,), scalar=True)
-
-def dump_file_mssql(uri, table_or_query, fname, mode, sep='\t', null=''):
- """
- Dump a table or query into a CSV file by using bcp. Examples:
- >> dump_file(conn, 'client.csv', 'client', sep='\t')
- >> dump_file(conn, 'client.csv',
- 'select * from %(database)s..client' % conn.uri, sep='\t')
- """
- if mode == 'c': # csv mode
- return bcp(uri, table_or_query, fname, 'out', '-c', '-t', sep)
- else: # binary mode
- return bcp(uri, table_or_query, fname, 'out', '-n')
-
-def load_file_mssql(uri, table, fname, mode, sep='\t'):
- "Insert a file into a table via bcp"
- if mode == 'c': # csv mode
- return bcp(uri, table, fname, 'in', '-c', '-t', sep)
- else: # binary mode
- return bcp(uri, table, fname, 'in', '-n')
-
-## introspection
-
-# TODO: add schema
-def get_sizeK_mssql(conn, table):
- size = conn.execute('sp_spaceused %s' % table)[0] # first row
- if size.data:
- return int(size.data.split()[0])
- else:
- return 0
-
-def get_tables_mssql(conn, schema=None):
- if schema:
- return [r.TABLE_NAME for r in conn.execute('sp_tables')
- if r.TABLE_OWNER==schema and r.TABLE_TYPE == 'TABLE']
- else:
- return [r.TABLE_NAME for r in conn.execute('sp_tables')
- if r.TABLE_TYPE == 'TABLE']
-
-def exists_table_mssql(conn, tname):
- return conn.execute('SELECT count(*) FROM sysobjects WHERE name=:name',
- (tname,), scalar=True)
-
-def exists_db_mssql(uri):
- dbname = uri['database']
- master = uri.copy(database='master')
- # for misterious reasons you must be transactional to use sp_databases
- for row in openclose(
- master, 'sp_databases', isolation_level='SERIALIZABLE'):
- if row[0] == dbname:
- return True
- return False
-
-def drop_db_mssql(uri):
- openclose(uri.copy(database='master'),
- 'DROP DATABASE %(database)s' % uri)
-
-def create_db_mssql(uri):
- openclose(uri.copy(database='master'),
- 'CREATE DATABASE %(database)s' % uri)
-
diff --git a/sqlplain/namedtuple.py b/sqlplain/namedtuple.py
deleted file mode 100644
index 8163625..0000000
--- a/sqlplain/namedtuple.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-The namedtuple class here is inspired from the namedtuple class
-used in Python 2.6. Unfortunately, I cannot use that class,
-since it does not work for field names which are Python keywords, and names
-coming from a database are uncontrollable. There is still an issue
-for names starting and ending with a double underscore, and for the
-following names: _fields, _fromdict and _asdict.
-Conflict with such names however is pretty rare and it is ignored.
-"""
-
-from operator import itemgetter
-
-RESERVED_NAMES = set('_asdict _fields _fromdict'.split())
-
-class Namedtuple(tuple):
- _fields = ()
- __slots__ = ()
-
- @classmethod
- def _fromdict(cls, dic):
- return cls(dic[name] for name in self._fields)
-
- def _asdict(self):
- return dict(zip(self._fields, self))
-
- def __repr__(self):
- s = ', '.join('%r: %r' % (n, v) for (n, v) in zip(self._fields, self))
- return '%s(%s)' % (self.__class__.__name__, s)
-
-def namedtuple(typename, fields):
- "Return a Namedtuple subclass without slots and with the right properties"
- dic = dict(_fields=fields, __slots__=())
- for i, f in enumerate(fields):
- if f.startswith('__') and f.endswith('__') or f in RESERVED_NAMES:
- raise NameError('%s is not a valid field name' % f)
- dic[f] = property(
- itemgetter(i), doc="Property for the field #%d, %r" % (i, f))
- return type(typename, (Namedtuple,), dic)
diff --git a/sqlplain/pager.py b/sqlplain/pager.py
deleted file mode 100644
index 13ba1f1..0000000
--- a/sqlplain/pager.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import string
-
-def _fetchpage_mssql(self, pageno, maxrows, query_args):
- query_templ = self.query_templ
- nrows, npages = self._rows_pages(maxrows, query_templ, query_args)
- n_end = max(nrows - pageno * maxrows, 0)
- ofields = self.ofields
- templ = string.Template('''
- SELECT TOP ${maxrows} * FROM
- (SELECT TOP ${n_end} * FROM
- ($query_templ) AS T1
- ORDER BY ${ofields} DESC) AS T2
- ORDER BY ${ofields} ASC
- ''').substitute(locals())
- res = self.conn.execute(templ, query_args)
- res.nrows = nrows
- res.npages = npages
- return res
-
-def _fetchpage(self, pageno, maxrows, query_args):
- query_templ = self.query_templ
- currentrow = pageno * maxrows
- ofields = self.ofields
- templ = string.Template('''
- SELECT * FROM ($query_templ) AS tmp
- ORDER BY ${ofields} LIMIT ${maxrows} OFFSET ${currentrow}
- ''').substitute(locals())
- res = self.conn.execute(templ, query_args)
- res.nrows, res.npages = self._rows_pages(maxrows, query_templ, query_args)
- return res
-
-class SqlPaginator(object):
- def __init__(self, conn, query_templ, ofields, query_args=()):
- self.conn = conn
- self.query_templ = query_templ
- self.query_args = query_args
- self.ofields = ofields
- if conn.dbtype == 'mssql':
- self._fetchpage = _fetchpage_mssql.__get__(self)
- else:
- self._fetchpage = _fetchpage.__get__(self)
-
- def _rows_pages(self, maxrows, query_templ, query_args):
- "Count the rows and pages returned by the query"
- ct = 'SELECT count(*) FROM (%s) AS t' % query_templ
- nrows = self.conn.execute(ct, query_args, scalar=True)
- npages, rest = divmod(nrows, maxrows)
- if rest:
- npages += 1
- return nrows, npages
-
- def fetchpage(self, pageno, maxrows, query_args=None):
- """
- Fetch a page from a paginated query. Return a list with attributes
- nrows and npages.
- """
- pageno = max(0, pageno) # makes sure pageno is positive
- if query_args is None:
- query_args = self.query_args
- return self._fetchpage(pageno, maxrows, query_args)
-
-'''
-Notice: in SQL Server 2005 there is the row_number() function which can
-be used in conjunction with ORDER BY; an example is the following:
-select * from
-(select Row_number() over (order by csfb_issuer_id) as rn,*
- from csfbcreditdefaultswaps) as x
-where rn between 100 and 110
-
-'''
diff --git a/sqlplain/postgres_support.py b/sqlplain/postgres_support.py
deleted file mode 100644
index 527203a..0000000
--- a/sqlplain/postgres_support.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import psycopg2 as dbapi2
-
-ISOLATION_LEVELS = None, 0, 1, 2
-
-placeholder = '%s'
-
-# AUTOCOMMIT = None, 0
-# READ_COMMITTED = READ_UNCOMMITTED = 1
-# REPEATABLE_READ = SERIALIZABLE = 2
-
-CODEMAP = {
- 1082: 'DATE',
- 23: 'INTEGER',
- 701: 'FLOAT',
- 1043: 'VARCHAR'
- }
-
-def connect(params, isolation_level=None, **kw):
- user, pwd, host, port, db = params
- port = port or 5432
- conn = dbapi2.connect(
- database=db, host=host, port=port, user=user, password=pwd, **kw)
- if isolation_level is None:
- conn.set_isolation_level(0)
- return conn
diff --git a/sqlplain/postgres_util.py b/sqlplain/postgres_util.py
deleted file mode 100644
index b83bc1c..0000000
--- a/sqlplain/postgres_util.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import os
-from sqlplain.util import openclose, getoutput
-
-GET_PKEYS = '''\
-SELECT attname FROM pg_attribute
-WHERE attrelid = (
- SELECT indexrelid FROM pg_index AS i
- WHERE i.indrelid = (SELECT oid FROM pg_class WHERE relname=:table)
- AND i.indisprimary = 't')
-ORDER BY attnum
-'''
-
-def get_kfields_postgres(conn, tname):
- return [x.attname for x in conn.execute(GET_PKEYS, (tname,))]
-
-def create_db_postgres(uri):
- openclose(uri.copy(database='template1'),
- 'CREATE DATABASE %(database)s' % uri)
-
-def drop_db_postgres(uri):
- openclose(uri.copy(database='template1'),
- 'DROP DATABASE %(database)s' % uri)
-
-def get_sizeK_postgres(conn, table):
- return conn.execute('SELECT relpages*8 FROM pg_class WHERE relname=:table',
- (table,), scalar=True)
-
-def get_tables_postgres(conn, schema):
- query = 'SELECT tablename FROM pg_tables'
- if schema:
- res = conn.execute(query + ' WHERE schemaname=:a', (schema,))
- else:
- res = conn.execute(query)
- return [r[0] for r in res]
-
-def exists_table_postgres(conn, tname):
- return conn.execute('SELECT count(*) FROM pg_tables WHERE tablename=:table',
- (tname,), scalar=True)
-
-def exists_db_postgres(uri):
- dbname = uri['database']
- for row in openclose(
- uri.copy(database='template1'), 'SELECT datname FROM pg_database'):
- if row[0] == dbname:
- return True
- return False
-
-def dump_file_postgres(uri, query, filename, mode, sep='\t', null='',
- schema=None): # or null='\N'
- """
- Save the result of a query on a local file by using COPY TO and psql
- """
- if not ' ' in query: # assumes a table name was given
- query = '(select * from %s)' % query
- else:
- query = '(%s)' % query
- if mode == 'b':
- return psql(uri, "COPY %s TO STDOUT BINARY" % query, filename, schema)
- else:
- return psql(
- uri, "COPY %s TO STDOUT WITH DELIMITER '%s' NULL '%s'" %
- (query, sep, null), filename, schema)
-
-# apparently copy_from from psycopg2 is buggy for large files
-def load_file_postgres(uri, tname, filename, mode, sep='\t', null='',
- schema=None): # or null='\N'
- """
- Load a file into a table by using COPY FROM and psql
- """
- stdin = file(filename)
- if mode == 'b':
- return psql(uri, "COPY %s FROM STDIN BINARY" % tname, stdin=stdin,
- schema=schema)
- else: # csv mode
- copy_from = "COPY %s FROM STDIN WITH DELIMITER E'%s' NULL E'%s'" % (
- tname, sep, null)
- return psql(uri, copy_from, stdin=stdin, schema=schema)
-
-###############################################################################
-
-## pg_dump and pg_restore should be used for multiple tables or whole databases
-
-def pg_dump(uri, filename, *args):
- """
- A small wrapper over pg_dump. Example:
- >> pg_dump(uri, thetable, thefile)
- """
- cmd = ['pg_dump', '-h', uri['host'], '-U', uri['user'],
- '-d', uri['database'], '-f', filename] + list(args)
- print ' '.join(cmd)
- return getoutput(cmd)
-
-def pg_restore(uri, filename, *args):
- """
- A small wrapper over pg_restore. Example:
- >> pg_restore(uri, thetable, thefile)
- """
- cmd = ['pg_restore', '-h', uri['host'], '-U', uri['user'],
- '-d', uri['database'], filename] + list(args)
- print ' '.join(cmd)
- return getoutput(cmd)
-
-
-def psql(uri, query, filename=os.devnull, stdin=None, schema=None):
- "Execute a query and save its result on filename"
- if not ' ' in query: # assumes a table name was given
- query = 'select * from %s' % query
- if schema:
- query = 'SET search_path TO %s; %s' % (schema, query)
- cmd = ['psql', '-h', uri['host'], '-U', uri['user'], '-d', uri['database'],
- '-c', query, '-o', filename]
- # print cmd
- return getoutput(cmd, stdin)
diff --git a/sqlplain/runtransac.py b/sqlplain/runtransac.py
deleted file mode 100644
index 47f6597..0000000
--- a/sqlplain/runtransac.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""usage:
-$ python %s uri "sqlcode"
-or
-$ python %s uri -f file.sql
-"""
-
-import sys
-from sqlplain.util import openclose
-
-def get_uri_sql(argv):
- "Extract the URI string and the SQL code from the command line"
- if len(argv) <= 2:
- raise sys.exit(__doc__ % (argv[0], argv[0]))
- uri = argv[1]
- if uri == '-f':
- raise sys.exit(__doc__ % (argv[0], argv[0]))
- for i, arg in enumerate(argv):
- if arg == '-f':
- try:
- fname = argv[i+1]
- except IndexError:
- raise RuntimeError('Missing -f argument!')
- sql = file(fname).read()
- break
- else: # if not break
- sql = argv[2]
- return uri, sql
-
-def test():
- print get_uri_sql(['', 'uri', 'sql'])
- print get_uri_sql(['', 'uri', '-f', 'runtransac.py'])
- print get_uri_sql(['', 'uri', '-f'])
-
-if __name__ == '__main__':
- uri, sql = get_uri_sql(sys.argv)
- openclose(uri, sql, autocommit=False)
diff --git a/sqlplain/sql_support.py b/sqlplain/sql_support.py
deleted file mode 100644
index 357a14a..0000000
--- a/sqlplain/sql_support.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import re, inspect
-from decorator import FunctionMaker
-
-class _SymbolReplacer(object):
- """
- A small internal class to parse SQL templates with names arguments.
- Returns the names of the arguments and the template interpolated with
- a placeholder. Used by get_args_templ.
- """
- STRING_OR_COMMENT = re.compile(r"('[^']*'|--.*\n)")
- SYMBOL = re.compile(r"(?<!:):(\w+)") # a name prefixed by colons
-
- def __init__(self, placeholder):
- self.placeholder = placeholder
- self.replaced = []
- self.found = set()
-
- def get_args_templ(self, templ):
- argnames = []
- def repl(mo):
- "Replace named args with placeholders"
- argname = mo.group(1)
- if argname in argnames:
- raise NameError('Duplicate argument %r in SQL template'
- % argname)
- argnames.append(argname)
- return self.placeholder or mo.group()
- out = []
- for i, chunk in enumerate(self.STRING_OR_COMMENT.split(templ)):
- if i % 2 == 0: # real sql code
- chunk = self.SYMBOL.sub(repl, chunk)
- out.append(chunk)
- return argnames, ''.join(out)
-
-templ_cache = {}
-
-# used in .execute and do
-def get_args_templ(templ, placeholder=None):
- # this is small hack instead of a full featured SQL parser
- """
- Take a SQL template and replace named arguments with the placeholder, except
- in strings and comments. Return the replaced arguments and the new
- template. The results are cached.
-
- >>> args, templ = get_args_templ('INSERT INTO book (:title, :author)')
- >>> print args
- ['title', 'author']
- >>> print templ
- INSERT INTO book (:title, :author)
- >>> print get_args_templ('INSERT INTO book (:title, :author)', '?')[1]
- INSERT INTO book (?, ?)
- """
- if (templ, placeholder) in templ_cache:
- return templ_cache[templ, placeholder]
- argnames, new_templ = _SymbolReplacer(placeholder).get_args_templ(templ)
- templ_cache[templ, placeholder] = argnames, new_templ
- return argnames, new_templ
-
-def do(templ, name='sqlquery', defaults=None, scalar=False, ntuple=None):
- """
- Compile a SQL query template down to a Python function with attributes
- __source__, argnames defaults, scalar, ntuple. defaults is a tuple.
- """
- argnames = ', '.join(get_args_templ(templ)[0])
- if argnames:
- argnames += ','
- src = '''def %(name)s(conn, %(argnames)s):
- return conn.execute(templ, (%(argnames)s), scalar=scalar, ntuple=ntuple)
- ''' % locals()
- fn = FunctionMaker(
- name=name, signature=argnames, defaults=defaults, doc=templ).make(
- src, dict(templ=templ, scalar=scalar, ntuple=ntuple), addsource=True)
- comment = '# ntuple = %s\n# scalar = %s\n# templ=\n%s\n' % (
- ntuple, scalar, '\n'.join('## ' + ln for ln in templ.splitlines()))
- fn.__source__ = '%s\n%s' % (comment, fn.__source__)
- fn.templ = templ
- fn.argnames = argnames
- fn.defaults = defaults
- fn.scalar = scalar
- fn.ntuple = ntuple
- return fn
-
-def spec(fn, clause, argnames=None, defaults=None, ntuple=None):
- "Add a clause to an SQL Template function"
- return do(fn.templ + clause, argnames = argnames or fn.argnames,
- defaults=defaults or fn.defaults,
- scalar=fn.scalar, ntuple=ntuple or fn.ntuple)
-
-
-if __name__ == '__main__':
- import doctest; doctest.testmod()
diff --git a/sqlplain/sqlite_support.py b/sqlplain/sqlite_support.py
deleted file mode 100644
index 912bccf..0000000
--- a/sqlplain/sqlite_support.py
+++ /dev/null
@@ -1,16 +0,0 @@
-try: # Python 2.5
- from sqlite3 import dbapi2
-except ImportError: # Python < 2.5
- from pysqlite2 import dbapi2
-
-ISOLATION_LEVELS = (None, "", "DEFERRED", "IMMEDIATE", "EXCLUSIVE")
-
-placeholder = '?'
-
-CODEMAP = {}
-
-def connect(fname, isolation_level=None, **kw):
- dbapi2.register_converter('datetime', dbapi2.converters['TIMESTAMP'])
- return dbapi2.connect(
- fname, isolation_level=isolation_level,
- detect_types=dbapi2.PARSE_DECLTYPES, **kw)
diff --git a/sqlplain/sqlite_util.py b/sqlplain/sqlite_util.py
deleted file mode 100644
index f33d395..0000000
--- a/sqlplain/sqlite_util.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os
-from sqlplain.util import openclose, insert_rows
-from sqlplain import connect
-
-def get_info(conn, tname):
- """
- Returns a list of namedtuples [(cid, name, type, notnull, dflt_value, pk)]
- """
- return conn.execute('PRAGMA table_info(%s)' % tname)
-
-def load_file_sqlite(uri, tname, fname, mode, sep):
- import csv
- assert mode == 'c', "Only CSV files can be bulk imported in sqlite"
- csvfile = file(fname)
- conn = connect(uri)
- conn.execute('PRAGMA synchronous = OFF')
- try:
- n = insert_rows(conn, tname, csv.reader(csvfile, delimiter=sep))
- finally:
- csvfile.close()
- conn.execute('PRAGMA synchronous = ON')
- return n
-
-def get_kfields_sqlite(conn, tname):
- return [x.name for x in get_info(conn, tname) if x.pk]
-
-def get_tables_sqlite(conn):
- return [r.name for r in conn.execute('PRAGMA table_info')]
-
-def exists_table_sqlite(conn, tname):
- res = conn.execute('PRAGMA table_info(%s)' % tname)
- return res != -1
-
-def exists_db_sqlite(uri):
- fname = uri['database']
- return fname == ':memory:' or os.path.exists(fname)
-
-def drop_db_sqlite(uri):
- fname = uri['database']
- if fname != ':memory:':
- os.remove(fname)
-
-def create_db_sqlite(uri):
- "Do nothing, since the db is automatically created"
diff --git a/sqlplain/table.py b/sqlplain/table.py
deleted file mode 100644
index 5ea4dc4..0000000
--- a/sqlplain/table.py
+++ /dev/null
@@ -1,262 +0,0 @@
-import sys, string
-from sqlplain import util, do
-from sqlplain.namedtuple import namedtuple
-
-def make_and_clause(kfields):
- return ' AND '.join('%s=:%s' % (f, f) for f in kfields)
-
-# kfields and dfields must be tuples, not strings
-def tabletuple(name, kfields, dfields):
- """
- Returns a namedtuple with attributes ._kfields, ._dfields and properties
- ._kvalues, ._dvalues.
- """
- ttuple = namedtuple(name, kfields + dfields)
- ttuple._ktuple = ktuple = namedtuple(name + '_key', kfields)
- ttuple._dtuple = dtuple = namedtuple(name + '_data', dfields)
- ttuple._kfields = ktuple._fields
- ttuple._dfields = dtuple._fields
- ttuple._kvalues = property(
- lambda self: ktuple(*[getattr(self, n) for n in self._kfields]))
- ttuple._dvalues = property(
- lambda self: dtuple(*[getattr(self, n) for n in self._dfields]))
- return ttuple
-
-# closures to be instantiated in DTable.__init__
-
-def insert(ttuple):
- "Returns a procedure inserting a row or a dictionary into a table"
- name = ttuple.__name__
- fields = ttuple._fields
- csfields = ', '.join(fields)
- qmarks = ', '.join('?'*len(fields))
- templ = 'INSERT INTO %s (%s) VALUES (%s)' % (name, csfields, qmarks)
- def insert_row(conn, row=None, **kw):
- row = row or {}
- if isinstance(row, dict):
- row.update(kw)
- missing = set(fields) - set(row) # check for a better error message
- if missing:
- raise TypeError('Missing field(s) %s' % ', '.join(missing))
- row = ttuple(**row)
- return conn.execute(templ, row)
- insert_row.__doc__ = insert_row.templ = templ
- return insert_row
-
-def select(ttuple):
- """
- Returns a function with signature (conn, key) - where key can be
- a dictionary or a tabletuple - returning a single row.
- """
- name = ttuple.__name__
- csfields = ','.join(ttuple._fields)
- clause = make_and_clause(ttuple._kfields)
- templ = 'SELECT %s FROM %s WHERE %s' % (csfields, name, clause)
- def select_row(conn, row=None, **kw):
- row = row or {}
- if isinstance(row, dict):
- row.update(kw)
- row = ttuple._ktuple(**row)
- res = conn.execute(templ, row, ttuple)
- if not res:
- raise KeyError('Missing record for %s' % str(row))
- elif len(res) > 1:
- raise RuntimeError('Got %s instead of a single row' % res)
- return res[0]
- select_row.__doc__ = select_row.templ = templ
- return select_row
-
-def delete(ttuple):
- "Returns a procedure inserting a row or a dictionary into a table"
- name = ttuple.__name__
- clause = make_and_clause(ttuple._kfields)
- templ = 'DELETE FROM %s WHERE %s' % (name, clause)
- def delete_row(conn, row=None, **kw):
- row = row or {}
- if isinstance(row, dict):
- row.update(kw)
- row = ttuple._ktuple(**row)
- return conn.execute(templ, row, ttuple)
- delete_row.__doc__ = delete_row.templ = templ
- return delete_row
-
-def update(ttuple):
- "Returns a procedure updating a row"
- name = ttuple.__name__
- where =
- templ = string.Template('UPDATE %s SET $set WHERE %s' % (name, where))
- def update_row(conn, row=None, **kw):
- if row is None:
- row = {}
- elif hasattr(row, '_asdict'):
- row = row._asdict()
- row.update(kw)
- kvalues, dvalues, dfields = [], [], []
- for f, v in row.iteritems():
- if f in ttuple._kfields:
- kvalues.append(v)
- else:
- dvalues.append(v)
- dfields.append(f)
- sql = templ.substitute(set=', '.join('%s=?' % f for f in dfields))
- return conn.execute(sql, dvalues + kvalues)
- update_row.__doc__ = update_row.templ = templ
- return update_row
-
-def update_or_insert(ttuple):
- "Returns a procedure updating or inserting a row"
- up = update(ttuple)
- ins = insert(ttuple)
- def update_or_insert_row(conn, row=None, **kw):
- n = up(conn, row, **kw)
- if n == 0:
- n = ins(conn, row, **kw)
- return n
- update_or_insert_row.__doc__ = update_or_insert_row.templ = None
- return update_or_insert_row
-
-############################### views ###############################
-
-class DView(object):
- """
- A wrapper over a database view.
- """
-
- @classmethod
- def create(cls, conn, name, fields, force=False):
- util.create_view(conn, name, fields, force)
- return cls(conn, name)
-
- def __init__(self, conn, name, fields=(), subquery=''):
- self.conn = conn
- self.name = name
- if subquery:
- self.subquery = '(%s) AS %s' % (subquery, name)
- s = 'SELECT * FROM %s WHERE 1=0' % self.subquery
- fields = fields or [r.name for name in conn.execute(s).descr]
- else:
- self.subquery = name
- fields = fields or util.get_fields(conn, name)
- self.tt = tabletuple(name, fields)
-
- def select(self, clause='', *args):
- "Select rows from the table"
- fields = ', '.join(self.tt._fields)
- templ = 'SELECT %s FROM %s %s' % (fields, self.subquery, clause)
- if args:
- return do(templ, ntuple=self.tt)(self.conn, templ, *args)
- else:
- return self.conn.execute(templ, ntuple=self.tt)
-
- def count(self, clause=''):
- "Count the number of rows satisfying the given clause"
- templ = 'SELECT COUNT(*) FROM %s %s' % (self.subquery, clause)
- if args:
- return do(templ)(self.conn, templ, *args)
- return self.conn.execute(templ, scalar=True)
-
- def __iter__(self):
- return iter(self.select())
-
- def __len__(self):
- "Return the total number of rows in the table"
- return self.count()
-
-class KView(DView):
- """
- A database view with a unique field (possibly composite).
- """
-
- def __init__(cls, name, kfields=(), dfields=()):
- kfields = kfields or util.get_kfields(conn, name)
- # util.get_kfields probably does work on a view
- dfields = dfields or util.get_dfields(conn, name)
- if not kfields:
- raise TypeError('table %s has no primary key!' % name)
- self.tt = tabletuple(name, kfields, dfields)
- self.select_row = select(self.tt)
-
- def __contains__(self, key):
- try:
- self.select_row(key)
- except KeyError:
- return False
- else:
- return True
-
- def keyset(self):
- """Return a set with the key(s) of the table"""
- kfields = ', '.join(self.tt._kfields)
- return set(self.conn.execute(
- 'SELECT %s FROM %s' % (kfields, self.subquery)))
-
-############################### tables ###############################
-
-class DTable(DView):
- """
- A simple table class for database tables without a primary key.
- The only methods are insert_row, load_file, delete, truncate, select.
- """
-
- @classmethod
- def create(cls, conn, name, body, force=False):
- "Create a table on the db and return the associated table object"
- util.create_table(conn, name, body, force)
- return cls(conn, name)
-
- def __init__(self, conn, name, fields=()): # add subquery functionality?
- self.conn = conn
- self.name = self.subquery = name
- if not fields:
- fields = util.get_fields(conn, name)
- self.tt = namedtuple(name, fields)
- self.insert_row = insert(self.tt)
-
- def insert_rows(self, rows):
- 'Populate a table by reading a row-iterator'
- return util.insert_rows(self.conn, self.name, rows)
-
- def load_file(self, file, sep='1t'):
- 'Populate a table by reading a file-like object'
- return util.load_file(self.conn, file, self.name, sep)
-
- def delete(self, clause=''):
- "Delete rows from the table"
- templ = 'DELETE FROM %s %s' % (self.name, clause)
- if args:
- return do(templ)(self.conn, templ, *args)
- return self.conn.execute(templ)
-
- def truncate(self):
- "Truncate the table"
- return util.truncate_table(self.conn, self.name)
-
-class KTable(DTable, KView):
- """
- An object oriented wrapper for database tables with a primary key.
- """
- def __init__(cls, name, kfields=(), dfields=()):
- kfields = kfields or util.get_kfields(conn, name)
- dfields = dfields or util.get_dfields(conn, name)
- if not kfields:
- raise TypeError('table %s has no primary key!' % name)
- self.tt = tabletuple(name, kfields, dfields)
- for nam in ['insert', 'delete', 'select', 'update',
- 'update_or_insert']:
- func = globals()[nam](self.tt)
- setattr(self, nam + '_row', func)
-
-if __name__ == '__main__':
- tt = tabletuple('tt', 'x y', 'a,b')(1, 2, 3, 4)
- print tt._kvalues, tt._dvalues
- print tt.__class__.mro()
-
- Book = KTable.type('book', 'pubdate', 'title author')
- from sqlplain import connect
- conn = connect('srs_dev')
- book = Book(conn)
- #help(Book.select_row)
- #help(Book.insert_row)
- #help(Book.delete_row)
- #help(Book.update_row)
- #help(Book.update_or_insert_row)
diff --git a/sqlplain/tests/books-data.sql b/sqlplain/tests/books-data.sql
deleted file mode 100644
index 8429b9c..0000000
--- a/sqlplain/tests/books-data.sql
+++ /dev/null
@@ -1,12 +0,0 @@
--- populate score
-INSERT INTO score VALUES ('+');
-INSERT INTO score VALUES ('O');
-INSERT INTO score VALUES ('O+');
-INSERT INTO score VALUES ('OO');
-INSERT INTO score VALUES ('OO+');
-INSERT INTO score VALUES ('OOO');
-INSERT INTO score VALUES ('OOO+');
-INSERT INTO score VALUES ('OOOO');
-
---populate books
-INSERT INTO book VALUES ('Expert Python Programming', 'Ziadé', 'OOO+');
diff --git a/sqlplain/tests/books-schema.sql b/sqlplain/tests/books-schema.sql
deleted file mode 100644
index 72d9089..0000000
--- a/sqlplain/tests/books-schema.sql
+++ /dev/null
@@ -1,9 +0,0 @@
-CREATE TABLE score(
- score VARCHAR(4) PRIMARY KEY);
-
-CREATE TABLE book(
- title VARCHAR(64),
- author VARCHAR(64),
- score VARCHAR(4) REFERENCES score (score),
- PRIMARY KEY (author, title)
-);
diff --git a/sqlplain/tests/sqlplain_ex.py b/sqlplain/tests/sqlplain_ex.py
deleted file mode 100644
index ea76b0e..0000000
--- a/sqlplain/tests/sqlplain_ex.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from sqlplain import DB
-
-def test(autocommit):
- db = DB('mssql://python:p1tone@192.168.0.129/srsdevbak', autocommit)
- print db
- print db.execute('select * from client')
- print db.execute("update client set client='pippo' where 1=2")
-
-
- db = DB('postgres://python:p1tone@localhost/rcare', autocommit)
- print db
- print db.execute('select * from customer')
- print db.execute("update customer set client_id='pippo' where 1=2")
-
- db = DB('sqlite:///:memory:', autocommit)
- print db
- print db._raw_execute('create table prova (i integer)', (), None)
- print db.execute("insert into prova values (1)")
- print db.execute("select * from prova")
-
-if __name__ == '__main__':
- test(autocommit=False)
- test(autocommit=True)
-
diff --git a/sqlplain/tests/test_bcp.py b/sqlplain/tests/test_bcp.py
deleted file mode 100644
index 811952b..0000000
--- a/sqlplain/tests/test_bcp.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from sqlplain import connect
-from sqlplain.mssql_util import *
-
-srs = connect('dbserver2')
-
-bcp_dump(srs.uri, 'client', '/tmp/client.bcpdump')
-
-bcp_restore(srs.uri, '/tmp/client.bcpdump', 'client')
diff --git a/sqlplain/tests/test_million.py b/sqlplain/tests/test_million.py
deleted file mode 100644
index bd47a7d..0000000
--- a/sqlplain/tests/test_million.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""
-A test script to investigate the performance of load_file.
-"""
-
-from __future__ import with_statement
-import os, sys, csv, tempfile
-from random import random
-from datetime import date, timedelta
-from sqlplain import do, util, table
-from sqlplain.recipes import Clock
-
-create_price_table = do('''
-CREATE TABLE price(
-code CHAR(4),
-date DATE,
-price FLOAT,
-PRIMARY KEY (code, date)
-)
-''')
-
-clock = Clock(lambda et: sys.stdout.write('Elapsed time: %s\n' % et))
-
-def makedatafile(ncodes, ndates):
- "Create a big datafile with ncodes * ndates rows; return the file name"
- tmp, fname = tempfile.mkstemp()
- today = date.today()
- for i in range(ncodes):
- for j in range(ndates):
- code = '%04d' % i
- day = today - timedelta(j)
- value = 1 + random()
- os.write(tmp, '%s,%s,%s\n' % (code, day, value))
- os.close(tmp)
- os.chmod(fname, 0755)
- return fname
-
-def makedb(uri):
- global fname
- db = util.create_db(uri, force=True)
- create_price_table(db)
- fname = makedatafile(100, 100)
- print 'Created datafile %s' % fname
- return db
-
-def test():
- db = makedb('sqlite_test')
- price = table.DTable.reflect(db, 'price')
- with clock:
- # 104 seconds for 100,000 rows on my MacBook
- for r in csv.reader(file(fname)):
- price.insert_row(r)
- yield lambda x:x, 'insert_row'
- price.truncate()
- with clock:
- # 2.4 seconds for 100,000 rows on my MacBook
- price.load_file(fname, sep=',')
- yield lambda x:x, 'load_file'
-
-def teardown():
- os.remove(fname)
diff --git a/sqlplain/tests/test_pager.py b/sqlplain/tests/test_pager.py
deleted file mode 100644
index 8ded9fb..0000000
--- a/sqlplain/tests/test_pager.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from sqlplain import connect
-from sqlplain.pager import SqlPaginator
-
-def setup():
- global pager
- srs = connect('dbserver2')
- pager = SqlPaginator(srs, ('select * from CSFBCreditDefaultSwaps', ()),
- 'csfb_issuer_id')
-
-def test1():
- page = pager.fetchpage(64, 100)
- for row in page:
- print row.csfb_issuer_id
- print 'pages, rows, size:', page.npages, page.nrows, len(page)
diff --git a/sqlplain/tests/test_qmark2pyformat.py b/sqlplain/tests/test_qmark2pyformat.py
deleted file mode 100644
index e535503..0000000
--- a/sqlplain/tests/test_qmark2pyformat.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from __future__ import with_statement
-from sqlplain.sql_support import qmark2pyformat, STRING_OR_COMMENT
-from ms.time_utils import Clock
-
-TEMPL = '''
--- insert into covered
-INSERT INTO rhp_covered_product
- SELECT p.refdate, c.client_srs, c.client_id, p.pricer, ep.productcode,
- ep.clientcode, rmtck.covered_date FROM rhp_exportedproduct AS ep
- INNER JOIN customer AS c
- ON lower(ep.client) = c.client_srs
- INNER JOIN rhp_product AS p
- ON ep.productcode = p.productcode
- AND ep.refdate = p.refdate
- AND p.refdate = ?
- INNER JOIN (
- SELECT client_id, productcode, covered_date
- FROM covered_by_nonrequested
-
- UNION
-
- SELECT client_id, productcode , covered_date FROM
- covered_by_rhp_tickerlookup
-
- ) AS rmtck
- ON c.client_id = rmtck.client_id
- AND rmtck.productcode = p.productcode
-'''
-
-#TEMPL = 'select * from client where client=?'
-
-def write(dt):
- print "Spent %s ms" % dt
-
-with Clock(write):
- for i in xrange(100000):
- qmark2pyformat(TEMPL)
-
-def qmark2pyformat(templ): # version without cache
- qmarks = 0
- out = []
- for i, chunk in enumerate(STRING_OR_COMMENT.split(templ)):
- if i % 2 == 0: # real sql code
- qmarks += chunk.count('?')
- out.append(chunk.replace('?', '%s'))
- else: # string or comment
- out.append(chunk)
- new_templ = ''.join(out)
- return qmarks, new_templ
-
-with Clock(write):
- for i in xrange(100000):
- qmark2pyformat(TEMPL)
diff --git a/sqlplain/tests/test_reset.py b/sqlplain/tests/test_reset.py
deleted file mode 100644
index 1971e04..0000000
--- a/sqlplain/tests/test_reset.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from sqlplain import connect, do
-
-def test_postgres():
- count_customers = do('select count(*) from customer', scalar=True)
- conn = connect('postgres://python:p1tone@localhost/rcare')
- print count_customers(conn)
- conn._curs.close() # closed the cursor by hand to emulate an error
- print count_customers(conn)
-
-def test_mssql():
- count_customers = do('select count(*) from client', scalar=True)
- conn = connect('srs_dev')
- print count_customers(conn)
- conn._curs.close() # closed the cursor by hand to emulate an error
- print count_customers(conn)
-
-if __name__ == '__main__':
- test_mssql()
diff --git a/sqlplain/tests/test_table.py b/sqlplain/tests/test_table.py
deleted file mode 100644
index e69de29..0000000
--- a/sqlplain/tests/test_table.py
+++ /dev/null
diff --git a/sqlplain/tests/test_truncate.py b/sqlplain/tests/test_truncate.py
deleted file mode 100644
index fbfb390..0000000
--- a/sqlplain/tests/test_truncate.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from __future__ import with_statement
-import os
-from sqlplain import connect, util
-from test_million import makedb, makedatafile, clock, create_price_table
-
-databases = 'postgres_test sqlite_test'.split()
-
-
-def test_truncate():
- for uri in databases:
- db = util.create_db(uri, force=True)
- create_price_table(db)
- fname = makedatafile(100, 100)
- try:
- util.load_file(db, fname, 'price')
- with clock:
- util.truncate_table(db, 'price')
- yield lambda *a: None, uri, 'truncate'
-
- util.load_file(db, fname, 'price')
- with clock:
- db.execute('delete from price')
- yield lambda *a: None, uri, 'delete'
- finally:
- os.remove(fname)
diff --git a/sqlplain/uri.py b/sqlplain/uri.py
deleted file mode 100644
index c17d9ff..0000000
--- a/sqlplain/uri.py
+++ /dev/null
@@ -1,119 +0,0 @@
-"""
-A couple of utilities to convert an uri into a pair
-(driver connector, connection arguments).
-"""
-
-import os
-from sqlplain.configurator import configurator
-
-SUPPORTED_DBTYPES = 'mssql', 'postgres', 'sqlite'
-CODEMAP = {} # will be filled with the codemaps for the various drivers
-
-def imp(mod):
- return __import__(mod, globals(), locals(), [''])
-
-class URI(object):
- """
- Extract: the connection parameters from a SQLAlchemy-like uri string.
- Has attributes
-
- - dbtype
- - server # means host:port
- - database
- - host
- - port
- - scriptdir
-
- In the case of mssql, the host may contain an instance name.
- """
- def __init__(self, uri):
- if isinstance(uri, URI): # copy data from uri
- vars(self).update(vars(uri))
- return
- assert uri and isinstance(uri, str), '%r is not a valid string!' % uri
- self.scriptdir = None
- if not '://' in uri: # assume it is an alias
- try:
- section = configurator.scriptdir
- except AttributeError: # missing [scripdir] section in conf
- pass
- else:
- scriptdir = getattr(section, uri, None)
- if scriptdir:
- self.scriptdir = os.path.expanduser(scriptdir)
- try:
- uri = getattr(configurator.uri, uri)
- except AttributeError:
- msg = '%s is not a valid URI, not a recognized alias in %s' % (
- uri, configurator._conf_file)
- msg += '; available aliases are %s' % configurator._databases
- raise NameError(msg)
- if not uri.startswith(SUPPORTED_DBTYPES):
- raise NameError('Invalid URI %s' % uri)
- dbtype, partial_uri = uri.split('://')
- if dbtype == 'sqlite': # strip a leading slash, since according to
- # SQLAlchemy conventions full_uri starts with three slashes or more
- self.dbtype = dbtype
- self.user = ''
- self.password = ''
- self.database = partial_uri[1:]
- self.host = 'localhost'
- return
- elif not ('@' in partial_uri and '/' in partial_uri and \
- ':' in partial_uri):
- raise ValueError(
- 'Wrong uri %s: should be dbtype://user:passwd@host:port/db' %
- partial_uri)
- user_pwd, host_db = partial_uri.split('@')
- self.dbtype = dbtype
- self.server, self.database = host_db.split('/')
- self.user, self.password = user_pwd.split(':')
- self.user = self.user or os.environ.get('USER')
- if not self.user:
- raise ValueError('Empty username and $USER!')
- if ':' in self.server: # look if an explicit port is passed
- self.host, self.port = self.server.split(':')
- else:
- self.host, self.port = self.server, None
-
- def copy(self, **kw):
- "Returns a copy of the URI object with different attributes"
- new = self.__class__(self)
- vars(new).update(kw)
- return new
-
- def import_driver(self):
- "Import the right driver and populate the util module"
- from sqlplain import util
- dbtype = self.dbtype
- driver = imp('sqlplain.%s_support' % dbtype)
- CODEMAP[dbtype] = driver.CODEMAP
- driver_util = imp('sqlplain.%s_util' % dbtype)
- # dynamically populate the 'util' module with the driver-specific func
- for name, value in vars(driver_util).iteritems():
- if name.endswith(dbtype):
- setattr(util, name, value)
- return driver
-
- def get_driver_params(self):
- """
- Determine the database type (and therefore the driver to use) from
- the URI and returns the right driver, as well as the connection string
- arguments user, pwd, host, port, db.
- """
- driver = self.import_driver()
- if self.dbtype == 'sqlite':
- params = self.database
- else:
- params = (self.user, self.password, self.host,
- self.port, self.database)
- return driver, params
-
- def __getitem__(self, name):
- # make the interpolation syntax (string-templ % self) possible
- return self.__dict__[name]
-
- def __repr__(self):
- if self.dbtype == 'sqlite':
- return '<sqlite:///%s>' % self.database
- return '<%(dbtype)s://%(user)s:xxxxx@%(server)s/%(database)s>' % self
diff --git a/sqlplain/util.py b/sqlplain/util.py
deleted file mode 100644
index 626ff7d..0000000
--- a/sqlplain/util.py
+++ /dev/null
@@ -1,310 +0,0 @@
-"""
-Notice: create_db and drop_db are not transactional.
-"""
-
-import os, sys, re, subprocess, tempfile
-from sqlplain.uri import URI, CODEMAP
-from sqlplain.sql_support import get_args_templ
-from sqlplain import connect, do
-from sqlplain.connection import Transaction
-from sqlplain.namedtuple import namedtuple
-try:
- CalledProcessError = subprocess.CalledProcessError
-except AttributeError: # Python < 2.5
- class CalledProcessError(Exception):
- def __init__(self, returncode, cmd):
- self.returncode = returncode
- self.cmd =cmd
-
-chatty = False
-
-def getoutput(commandlist, stdin=None, save_on=None):
- '''
- Returns the output of a system command or raise a CalledProcessError.
- Optionally, saves the output on save_on (a writable file-like object).
- '''
- if stdin is None:
- stdin = subprocess.PIPE
- if save_on is None:
- save_on = subprocess.PIPE
- elif isinstance(save_on, str): # assume it is a filename
- save_on = file(save_on, 'w')
- po = subprocess.Popen(commandlist, stdin=stdin, stdout=save_on)
- if chatty:
- print 'Running %s' % ' '.join(map(repr, commandlist))
- out, err = po.communicate()
- if po.returncode or err:
- if err:
- sys.stderr.write(err)
- sys.stderr.flush()
- cmd_str = ''
- for cmd in commandlist:
- if re.search(r'\s', cmd):
- cmd_str += repr(cmd) + " "
- else:
- cmd_str += cmd + " "
- raise CalledProcessError(po.returncode, cmd_str)
- return out
-
-VERSION = re.compile(r'(\d[\d\.-]+)')
-Chunk = namedtuple('Chunk', 'version fname code')
-
-def _call(procname, uri_or_conn, *args, **kw):
- "Call a procedure by name, by dispatching on the database type"
- dbtype = uri_or_conn.dbtype
- proc = globals().get(procname + '_' + dbtype)
- if proc is None:
- raise NameError('Missing procedure %s for %s' % (procname, dbtype))
- return proc(uri_or_conn, *args, **kw)
-
-# exported utilities
-
-def openclose(uri, templ, *args, **kw):
- "Open a connection, perform an action and close the connection"
- unexpected = set(kw) - set(['isolation_level'])
- if unexpected:
- raise ValueError('Received unexpected keywords: %s' % unexpected)
- isolation_level = kw.get('isolation_level', None)
- conn = connect(uri, isolation_level)
- try:
- if isolation_level is None:
- return conn.execute(templ, args)
- else:
- return Transaction(conn.__class__.execute, conn, templ, args).run()
- finally:
- conn.close()
-
-def exists_db(uri):
- "Check is a database exists"
- return _call('exists_db', URI(uri))
-
-def drop_db(uri):
- "Drop an existing database"
- _call('drop_db', URI(uri))
-
-# helper for create_db
-def _collect(directory, exts):
- '''
- Read the files with a given set of extensions from a directory
- and returns them ordered by version number.
- '''
- chunks = []
- for fname in os.listdir(directory):
- if fname.endswith(exts) and not fname.startswith('_'):
- version = VERSION.search(fname)
- if version:
- code = file(os.path.join(directory, fname), 'U').read()
- chunks.append(Chunk(version, fname, code))
- return sorted(chunks) # chunks are named tuples
-
-def runscripts(db, scriptdir, exts):
- for chunk in _collect(scriptdir, exts):
- if chunk.fname.endswith('.sql'):
- if chatty:
- print "EXECUTING %s" % chunk.fname
- db.executescript(chunk.code)
- elif chunk.fname.endswith('.py'):
- exec chunk.code in {}
-
-def create_db(uri, force=False, scriptdir=None, **kw):
- """
- Create the database specified by uri. If the database exists already
- an error is raised, unless force is True: in that case the database
- is dropped and recreated.
- """
- uri = URI(uri)
- uri.import_driver() # import the driver
- if exists_db(uri):
- if force:
- drop_db(uri)
- else:
- raise RuntimeError(
- 'There is already a database %s!' % uri)
- _call('create_db', uri)
- db = connect(uri, **kw)
- scriptdir = uri.scriptdir or scriptdir
- if scriptdir:
- runscripts(db, scriptdir, ('.sql', '.py'))
- return db
-
-def create_table(conn, tname, body, force=False):
- """
- Create a table. If the table already exists, raise an error, unless
- force is True.
- """
- if exists_table(conn, tname) and force:
- drop_table(conn, tname) # do not raise an error
- return conn.execute('CREATE TABLE %s(\n%s)' % (tname, body))
-
-def drop_table(conn, tname, force=False):
- """
- Drop a table. If the table does not exist, raise an error, unless
- force is True.
- """
- if not exists_table(conn, tname) and force:
- return # do not raise an error
- return conn.execute('DROP TABLE %s' % tname)
-
-def _make_clause(dic, sep):
- '''
- An utility function to generate an SQL clause from a dictionary:
- >>> make_clause(dict(a=1, b=2), ' AND ')
- ('a=:a AND b=:b', (1, 2))
- '''
- clauses = []
- vals = []
- for n, v in dic.iteritems():
- clauses.append('%s=:%s' % (n, n))
- vals.append(v)
- return sep.join(clauses), tuple(vals)
-
-def update_table(conn, tname, vdict, **kdict):
- "A low-level utility to update a table"
- where, kvals = _make_clause(kdict, ' AND ')
- set_, vvals = _make_clause(vdict, ', ')
- templ = 'UPDATE %s SET %s WHERE %s' % (tname, set_, where)
- return conn.execute(templ, vvals + kvals)
-
-def copy_table(conn, src, dest, force=False):
- """
- Copy src into dest by using SELECT INTO; dest must be a valid tablename.
- If force is True and dest is an already existing table, dest is
- destroyed and recreated, and a primary key is added.
- """
- query = "SELECT * INTO %s FROM %s" % (dest, src)
- recreate = force and exists_table(conn, dest)
- if recreate:
- drop_table(conn, dest)
- n = conn.execute(query)
- if recreate:
- kfields = ', '.join(get_kfields(conn, src))
- conn.execute('ALTER TABLE %s ADD PRIMARY KEY (%s)' % (dest, kfields))
- return n
-
-def remote_copy_table(remote_db, local_db, src, dest, mode='b', truncate=False):
- """
- Return the temporary file used.
- """
- fd, tempname = tempfile.mkstemp()
- try:
- dump_file(remote_db.uri, src, tempname, mode)
- if truncate:
- truncate_table(local_db, dest)
- print load_file(local_db.uri, dest, tempname, mode)
- finally:
- return tempname
-
-def truncate_table(conn, tname):
- if conn.dbtype == 'sqlite':
- # TRUNCATE is not supported right now
- return conn.execute('DELETE FROM %s' % tname)
- else:
- return conn.execute('TRUNCATE TABLE %s' % tname)
-
-def insert_rows(conn, tname, rows):
- """Insert an iterable sequence of rows into a table;
- useful for unit tests. Notice that it invokes executemany
- on the underlying low-level connection"""
- lst = list(rows)
- n = 0 # number of inserted lines
- try:
- row = lst[0]
- except IndexError: # nothing to insert
- return n
- numeric = [':%s' % (i + 1) for i in range(len(row))]
- templ = 'INSERT INTO %s VALUES (%s)' % (tname, ', '.join(numeric))
- argnames, raw_templ = get_args_templ(templ)
- return conn._storage.curs.executemany(raw_templ, rows)
-
-def load_file(uri, tname, fname, mode, **kwargs):
- "Bulk insert a (binary or csv) file into a table"""
- assert mode in 'bc', 'Mode must be "b" (binary) or "c" (csv)'
- return _call('load_file', uri, tname, fname, mode, **kwargs)
-
-def dump_file(uri, query, fname, mode, **kwargs):
- "Dump the result of a query into a (binary or csv) file"
- assert mode in 'bc', 'Mode must be "b" (binary) or "c" (csv)'
- return _call('dump_file', uri, query, fname, mode, **kwargs)
-
-########################## introspection routines ######################
-
-def get_sizeK(conn, table):
- return _call('get_sizeK', conn, table)
-
-def get_tables(conn, schema=None):
- """Return the names of the tables in the current database
- (and schema, if any)"""
- return _call('get_tables', conn, schema)
-
-def get_views(conn, schema=None):
- """Return the names of the views in the current database
- (and schema, if any)"""
- return _call('get_views', conn, schema)
-
-def exists_table(conn, tname, schema=None):
- "Check if a table exists"
- return _call('exists_table', conn, tname)
-
-def get_descr(conn, tname):
- "Return the DB API 2 description as a list of rows"
- return conn.execute('SELECT * FROM %s WHERE 1=0;' % tname).descr
-
-def inspect_columns(conn, tname, tuples=False):
- """
- Return a list of strings "fieldname fieldtype(size)" or of tuples
- (fieldname, fieldtype, size).
- """
- codemap = CODEMAP[conn.dbtype]
- ls = []
- for x in get_descr(conn, tname):
- fname, ftype, fsize = x.name, codemap[x.type_code], x.internal_size
- if tuples:
- ls.append((fname, ftype, fsize))
- else:
- ls.append('%s %s%s' %
- (fname, ftype, '(%s)' % fsize
- if ftype=='VARCHAR' and fsize>0 else ''))
- return ls
-
-def get_fields(conn, tname):
- """
- Return the names of the columns of a table (must be ASCII).
- """
- return [x.name for x in get_descr(conn, tname)]
-
-def get_kfields(conn, tname):
- """
- Return the names of the primary key column(s) of a table (must be ASCII).
- """
- return map(str, _call('get_kfields', conn, tname))
-
-def get_dfields(conn, tname):
- """
- Return the names of the data column(s) of a table (must be ASCII).
- """
- kfields = set(get_kfields(conn, tname))
- return [name for name in get_fields(conn, tname) if name not in kfields]
-
-########################## schema management ###########################
-
-## the folling routines are postgres-only
-
-set_schema = do('SET search_path TO :schema')
-
-exists_schema = do("SELECT nspname FROM pg_namespace WHERE nspname=:schema")
-
-def drop_schema(db, schema):
- db.execute('DROP SCHEMA %s CASCADE' % schema)
-
-def create_schema(db, schema, force=False, schema_dir=None):
- """
- Create the specified schema. If the schema exists already
- an error is raised, unless force is True: in that case the schema
- is dropped and recreated. We are left in the created schema.
- """
- if force and exists_schema(db, schema):
- drop_schema(db, schema)
- db.execute('CREATE SCHEMA %s' % schema)
- set_schema(db, schema)
- if schema_dir:
- runscripts(db, schema_dir, ('.sql', '.py'))