summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--NEWS24
-rw-r--r--ZPsycopgDA/DA.py2
-rw-r--r--doc/src/advanced.rst21
-rw-r--r--doc/src/conf.py4
-rw-r--r--doc/src/connection.rst84
-rw-r--r--doc/src/cursor.rst53
-rw-r--r--doc/src/faq.rst4
-rw-r--r--doc/src/usage.rst26
-rw-r--r--lib/__init__.py8
-rw-r--r--lib/extensions.py14
-rw-r--r--psycopg/adapter_datetime.h4
-rw-r--r--psycopg/adapter_mxdatetime.c8
-rw-r--r--psycopg/adapter_pdecimal.c35
-rw-r--r--psycopg/adapter_pfloat.c25
-rw-r--r--psycopg/adapter_pint.c266
-rw-r--r--psycopg/adapter_pint.h53
-rw-r--r--psycopg/config.h4
-rw-r--r--psycopg/connection.h35
-rw-r--r--psycopg/connection_int.c290
-rw-r--r--psycopg/connection_type.c222
-rw-r--r--psycopg/cursor.h2
-rw-r--r--psycopg/cursor_type.c60
-rw-r--r--psycopg/green.c15
-rw-r--r--psycopg/lobject.h2
-rw-r--r--psycopg/lobject_int.c2
-rw-r--r--psycopg/lobject_type.c4
-rw-r--r--psycopg/pqpath.c200
-rw-r--r--psycopg/pqpath.h6
-rw-r--r--psycopg/psycopgmodule.c54
-rw-r--r--psycopg/python.h3
-rw-r--r--psycopg/typecast.c15
-rw-r--r--psycopg/typecast_mxdatetime.c8
-rw-r--r--psycopg/xid_type.c2
-rw-r--r--scripts/ticket58.py75
-rw-r--r--setup.py371
-rwxr-xr-xtests/test_connection.py279
-rwxr-xr-xtests/test_copy.py9
-rwxr-xr-xtests/test_dates.py6
-rwxr-xr-xtests/types_basic.py10
39 files changed, 1820 insertions, 485 deletions
diff --git a/NEWS b/NEWS
index 0037c3b..dd59596 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,27 @@
+What's new in psycopg 2.4.2
+---------------------------
+
+ - Added 'set_session()' method and 'autocommit' property to the
+ connection. Added support for read-only sessions and, for PostgreSQL
+ 9.1, for the "repeatable read" isolation level and the "deferrable"
+ transaction property.
+ - Psycopg doesn't execute queries at connection time to find the
+ default isolation level.
+ - Fixed bug with multithread code potentially causing loss of sync
+ with the server communication or lock of the client (ticket #55).
+ - Don't fail import if mx.DateTime module can't be found, even if its
+ support was built (ticket #53).
+ - Fixed escape for negative numbers prefixed by minus operator
+ (ticket #57).
+ - Fixed refcount issue during copy. Reported and fixed by Dave
+ Malcolm (ticket #58, Red Hat Bug 711095).
+ - Trying to execute concurrent operations on the same connection
+ through concurrent green thread results in an error instead of a
+ deadlock.
+ - Fixed detection of pg_config on Window. Report and fix, plus some
+ long needed setup.py cleanup by Steve Lacy: thanks!
+
+
What's new in psycopg 2.4.1
---------------------------
diff --git a/ZPsycopgDA/DA.py b/ZPsycopgDA/DA.py
index 7a681e4..f515437 100644
--- a/ZPsycopgDA/DA.py
+++ b/ZPsycopgDA/DA.py
@@ -16,7 +16,7 @@
# their work without bothering about the module dependencies.
-ALLOWED_PSYCOPG_VERSIONS = ('2.4-beta1', '2.4-beta2', '2.4', '2.4.1')
+ALLOWED_PSYCOPG_VERSIONS = ('2.4-beta1', '2.4-beta2', '2.4', '2.4.1', '2.4.2')
import sys
import time
diff --git a/doc/src/advanced.rst b/doc/src/advanced.rst
index 3d95cb2..c2c3a15 100644
--- a/doc/src/advanced.rst
+++ b/doc/src/advanced.rst
@@ -239,9 +239,8 @@ be sent from Python code simply executing a :sql:`NOTIFY` command in an
`~cursor.execute()` call.
Because of the way sessions interact with notifications (see |NOTIFY|_
-documentation), you should keep the connection in :ref:`autocommit
-<autocommit>` mode if you wish to receive or send notifications in a timely
-manner.
+documentation), you should keep the connection in `~connection.autocommit`
+mode if you wish to receive or send notifications in a timely manner.
.. |LISTEN| replace:: :sql:`LISTEN`
.. _LISTEN: http://www.postgresql.org/docs/9.0/static/sql-listen.html
@@ -373,12 +372,14 @@ When an asynchronous query is being executed, `connection.isexecuting()` returns
connection.
There are several limitations in using asynchronous connections: the
-connection is always in :ref:`autocommit <autocommit>` mode and it is not
-possible to change it using `~connection.set_isolation_level()`. So a
+connection is always in `~connection.autocommit` mode and it is not
+possible to change it. So a
transaction is not implicitly started at the first query and is not possible
to use methods `~connection.commit()` and `~connection.rollback()`: you can
manually control transactions using `~cursor.execute()` to send database
-commands such as :sql:`BEGIN`, :sql:`COMMIT` and :sql:`ROLLBACK`.
+commands such as :sql:`BEGIN`, :sql:`COMMIT` and :sql:`ROLLBACK`. Similarly
+`~connection.set_session()` can't be used but it is still possible to invoke the
+:sql:`SET` command with the proper :sql:`default_transaction_...` parameter.
With asynchronous connections it is also not possible to use
`~connection.set_client_encoding()`, `~cursor.executemany()`, :ref:`large
@@ -431,11 +432,9 @@ SQLAlchemy_) to be used in coroutine-based programs.
.. warning::
Psycopg connections are not *green thread safe* and can't be used
- concurrently by different green threads. Each connection has a lock
- used to serialize requests from different cursors to the backend process.
- The lock is held for the duration of the command: if the control switched
- to a different thread and the latter tried to access the same connection,
- the result would be a deadlock.
+ concurrently by different green threads. Trying to execute more than one
+ command at time using one cursor per thread will result in an error (or a
+ deadlock on versions before 2.4.2).
Therefore, programmers are advised to either avoid sharing connections
between coroutines or to use a library-friendly lock to synchronize shared
diff --git a/doc/src/conf.py b/doc/src/conf.py
index 56a0768..db64f86 100644
--- a/doc/src/conf.py
+++ b/doc/src/conf.py
@@ -111,10 +111,10 @@ rst_epilog = """
.. _DBAPI: http://www.python.org/dev/peps/pep-0249/
.. _transaction isolation level:
- http://www.postgresql.org/docs/9.0/static/transaction-iso.html
+ http://www.postgresql.org/docs/9.1/static/transaction-iso.html
.. _serializable isolation level:
- http://www.postgresql.org/docs/9.0/static/transaction-iso.html#XACT-SERIALIZABLE
+ http://www.postgresql.org/docs/9.1/static/transaction-iso.html#XACT-SERIALIZABLE
.. _mx.DateTime: http://www.egenix.com/products/python/mxBase/mxDateTime/
diff --git a/doc/src/connection.rst b/doc/src/connection.rst
index 0103258..f9a1b86 100644
--- a/doc/src/connection.rst
+++ b/doc/src/connection.rst
@@ -327,11 +327,93 @@ The ``connection`` class
pair: Transaction; Autocommit
pair: Transaction; Isolation level
- .. _autocommit:
+ .. method:: set_session([isolation_level,] [readonly,] [deferrable,] [autocommit])
+
+ Set one or more parameters for the next transactions or statements in
+ the current session. See |SET TRANSACTION|_ for further details.
+
+ .. |SET TRANSACTION| replace:: :sql:`SET TRANSACTION`
+ .. _SET TRANSACTION: http://www.postgresql.org/docs/9.1/static/sql-set-transaction.html
+
+ :param isolation_level: set the `isolation level`_ for the next
+ transactions/statements. The value can be one of the
+ :ref:`constants <isolation-level-constants>` defined in the
+ `~psycopg2.extensions` module or one of the literal values
+ ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE READ``,
+ ``SERIALIZABLE``.
+ :param readonly: if `!True`, set the connection to read only;
+ read/write if `!False`.
+ :param deferrable: if `!True`, set the connection to deferrable;
+ non deferrable if `!False`. Only available from PostgreSQL 9.1.
+ :param autocommit: switch the connection to autocommit mode: not a
+ PostgreSQL session setting but an alias for setting the
+ `autocommit` attribute.
+
+ The parameters *isolation_level*, *readonly* and *deferrable* also
+ accept the string ``DEFAULT`` as a value: the effect is to reset the
+ parameter to the server default.
+
+ .. _isolation level:
+ http://www.postgresql.org/docs/9.1/static/transaction-iso.html
+
+ The function must be invoked with no transaction in progress. At every
+ function invocation, only the specified parameters are changed.
+
+ The default for the values are defined by the server configuration:
+ see values for |default_transaction_isolation|__,
+ |default_transaction_read_only|__, |default_transaction_deferrable|__.
+
+ .. |default_transaction_isolation| replace:: :sql:`default_transaction_isolation`
+ .. __: http://www.postgresql.org/docs/9.1/static/runtime-config-client.html#GUC-DEFAULT-TRANSACTION-ISOLATION
+ .. |default_transaction_read_only| replace:: :sql:`default_transaction_read_only`
+ .. __: http://www.postgresql.org/docs/9.1/static/runtime-config-client.html#GUC-DEFAULT-TRANSACTION-READ-ONLY
+ .. |default_transaction_deferrable| replace:: :sql:`default_transaction_deferrable`
+ .. __: http://www.postgresql.org/docs/9.1/static/runtime-config-client.html#GUC-DEFAULT-TRANSACTION-DEFERRABLE
+
+ .. note::
+
+ There is currently no builtin method to read the current value for
+ the parameters: use :sql:`SHOW default_transaction_...` to read
+ the values from the backend.
+
+ .. versionadded:: 2.4.2
+
+
+ .. attribute:: autocommit
+
+ Read/write attribute: if `!True`, no transaction is handled by the
+ driver and every statement sent to the backend has immediate effect;
+ if `!False` a new transaction is started at the first command
+ execution: the methods `commit()` or `rollback()` must be manually
+ invoked to terminate the transaction.
+
+ The autocommit mode is useful to execute commands requiring to be run
+ outside a transaction, such as :sql:`CREATE DATABASE` or
+ :sql:`VACUUM`.
+
+ The default is `!False` (manual commit) as per DBAPI specification.
+
+ .. warning::
+
+ By default, any query execution, including a simple :sql:`SELECT`
+ will start a transaction: for long-running programs, if no further
+ action is taken, the session will remain "idle in transaction", a
+ condition non desiderable for several reasons (locks are held by
+ the session, tables bloat...). For long lived scripts, either
+ ensure to terminate a transaction as soon as possible or use an
+ autocommit connection.
+
+ .. versionadded:: 2.4.2
+
.. attribute:: isolation_level
.. method:: set_isolation_level(level)
+ .. note::
+
+ From version 2.4.2, `set_session()` and `autocommit`, offer
+ finer control on the transaction characteristics.
+
Read or set the `transaction isolation level`_ for the current session.
The level defines the different phenomena that can happen in the
database between concurrent transactions.
diff --git a/doc/src/cursor.rst b/doc/src/cursor.rst
index 3982d94..d5cf524 100644
--- a/doc/src/cursor.rst
+++ b/doc/src/cursor.rst
@@ -444,20 +444,23 @@ The ``cursor`` class
The :sql:`COPY` command is a PostgreSQL extension to the SQL standard.
As such, its support is a Psycopg extension to the |DBAPI|.
- .. method:: copy_from(file, table, sep='\\t', null='\\N', columns=None)
-
+ .. method:: copy_from(file, table, sep='\\t', null='\\N', size=8192, columns=None)
+
Read data *from* the file-like object *file* appending them to
- the table named *table*. *file* must have both
- `!read()` and `!readline()` method. See :ref:`copy` for an
- overview.
+ the table named *table*. See :ref:`copy` for an overview.
- The optional argument *sep* is the columns separator and
- *null* represents :sql:`NULL` values in the file.
+ :param file: file-like object to read data from. It must have both
+ `!read()` and `!readline()` methods.
+ :param table: name of the table to copy data into.
+ :param sep: columns separator expected in the file. Defaults to a tab.
+ :param null: textual representation of :sql:`NULL` in the file.
+ :param size: size of the buffer used to read from the file.
+ :param columns: iterable with name of the columns to import.
+ The length and types should match the content of the file to read.
+ If not specified, it is assumed that the entire table matches the
+ file structure.
- The *columns* argument is a sequence containing the name of the
- fields where the read data will be entered. Its length and column
- type should match the content of the read file. If not specifies, it
- is assumed that the entire table matches the file structure.
+ Example::
>>> f = StringIO("42\tfoo\n74\tbar\n")
>>> cur.copy_from(f, 'test', columns=('num', 'data'))
@@ -476,14 +479,17 @@ The ``cursor`` class
.. method:: copy_to(file, table, sep='\\t', null='\\N', columns=None)
Write the content of the table named *table* *to* the file-like
- object *file*. *file* must have a `!write()` method.
- See :ref:`copy` for an overview.
+ object *file*. See :ref:`copy` for an overview.
- The optional argument *sep* is the columns separator and
- *null* represents :sql:`NULL` values in the file.
+ :param file: file-like object to write data into. It must have a
+ `!write()` method.
+ :param table: name of the table to copy data from.
+ :param sep: columns separator expected in the file. Defaults to a tab.
+ :param null: textual representation of :sql:`NULL` in the file.
+ :param columns: iterable with name of the columns to export.
+ If not specified, export all the columns.
- The *columns* argument is a sequence of field names: if not
- `!None` only the specified fields will be included in the dump.
+ Example::
>>> cur.copy_to(sys.stdout, 'test', sep="|")
1|100|abc'def
@@ -499,17 +505,18 @@ The ``cursor`` class
from the backend.
- .. method:: copy_expert(sql, file [, size])
+ .. method:: copy_expert(sql, file, size=8192)
Submit a user-composed :sql:`COPY` statement. The method is useful to
handle all the parameters that PostgreSQL makes available (see
|COPY|__ command documentation).
- *file* must be an open, readable file for :sql:`COPY FROM` or an
- open, writeable file for :sql:`COPY TO`. The optional *size*
- argument, when specified for a :sql:`COPY FROM` statement, will be
- passed to *file*\ 's read method to control the read buffer
- size.
+ :param sql: the :sql:`COPY` statement to execute.
+ :param file: a file-like object; must be a readable file for
+ :sql:`COPY FROM` or an writeable file for :sql:`COPY TO`.
+ :param size: size of the read buffer to be used in :sql:`COPY FROM`.
+
+ Example:
>>> cur.copy_expert("COPY test TO STDOUT WITH CSV HEADER", sys.stdout)
id,num,data
diff --git a/doc/src/faq.rst b/doc/src/faq.rst
index 4ebf15a..e7fe76f 100644
--- a/doc/src/faq.rst
+++ b/doc/src/faq.rst
@@ -22,8 +22,8 @@ Why does `!psycopg2` leave database sessions "idle in transaction"?
call one of the transaction closing methods before leaving the connection
unused for a long time (which may also be a few seconds, depending on the
concurrency level in your database). Alternatively you can use a
- connection in :ref:`autocommit <autocommit>` mode to avoid a new
- transaction to be started at the first command.
+ connection in `~connection.autocommit` mode to avoid a new transaction to
+ be started at the first command.
I receive the error *current transaction is aborted, commands ignored until end of transaction block* and can't do anything else!
There was a problem *in the previous* command to the database, which
diff --git a/doc/src/usage.rst b/doc/src/usage.rst
index 4d039de..5f6c5b1 100644
--- a/doc/src/usage.rst
+++ b/doc/src/usage.rst
@@ -489,7 +489,7 @@ rounded to the nearest minute, with an error of up to 30 seconds.
versions use `psycopg2.extras.register_tstz_w_secs()`.
-.. index:: Transaction, Begin, Commit, Rollback, Autocommit
+.. index:: Transaction, Begin, Commit, Rollback, Autocommit, Read only
.. _transactions-control:
@@ -503,7 +503,7 @@ The following database commands will be executed in the context of the same
transaction -- not only the commands issued by the first cursor, but the ones
issued by all the cursors created by the same connection. Should any command
fail, the transaction will be aborted and no further command will be executed
-until a call to the `connection.rollback()` method.
+until a call to the `~connection.rollback()` method.
The connection is responsible to terminate its transaction, calling either the
`~connection.commit()` or `~connection.rollback()` method. Committed
@@ -516,9 +516,23 @@ It is possible to set the connection in *autocommit* mode: this way all the
commands executed will be immediately committed and no rollback is possible. A
few commands (e.g. :sql:`CREATE DATABASE`, :sql:`VACUUM`...) require to be run
outside any transaction: in order to be able to run these commands from
-Psycopg, the session must be in autocommit mode. Read the documentation for
-`connection.set_isolation_level()` to know how to change the commit mode.
+Psycopg, the session must be in autocommit mode: you can use the
+`~connection.autocommit` property (`~connection.set_isolation_level()` in
+older versions).
+.. warning::
+
+ By default even a simple :sql:`SELECT` will start a transaction: in
+ long-running programs, if no further action is taken, the session will
+ remain "idle in transaction", a condition non desiderable for several
+ reasons (locks are held by the session, tables bloat...). For long lived
+ scripts, either ensure to terminate a transaction as soon as possible or
+ use an autocommit connection.
+
+A few other transaction properties can be set session-wide by the
+`!connection`: for instance it is possible to have read-only transactions or
+change the isolation level. See the `~connection.set_session()` method for all
+the details.
.. index::
@@ -594,8 +608,8 @@ forking web deploy method such as FastCGI ensure to create the connections
.. __: http://www.postgresql.org/docs/9.0/static/libpq-connect.html#LIBPQ-CONNECT
-Connections shouldn't be shared either by different green threads: doing so
-may result in a deadlock. See :ref:`green-support` for further details.
+Connections shouldn't be shared either by different green threads: see
+:ref:`green-support` for further details.
diff --git a/lib/__init__.py b/lib/__init__.py
index 48a9847..3831210 100644
--- a/lib/__init__.py
+++ b/lib/__init__.py
@@ -40,20 +40,16 @@ Homepage: http://initd.org/projects/psycopg2
# Import modules needed by _psycopg to allow tools like py2exe to do
# their work without bothering about the module dependencies.
-#
-# TODO: we should probably use the Warnings framework to signal a missing
-# module instead of raising an exception (in case we're running a thin
-# embedded Python or something even more devious.)
import sys, warnings
-if sys.version_info[0] >= 2 and sys.version_info[1] >= 3:
+if sys.version_info >= (2, 3):
try:
import datetime as _psycopg_needs_datetime
except:
warnings.warn(
"can't import datetime module probably needed by _psycopg",
RuntimeWarning)
-if sys.version_info[0] >= 2 and sys.version_info[1] >= 4:
+if sys.version_info >= (2, 4):
try:
import decimal as _psycopg_needs_decimal
except:
diff --git a/lib/extensions.py b/lib/extensions.py
index 82e17fa..1b36e2e 100644
--- a/lib/extensions.py
+++ b/lib/extensions.py
@@ -39,7 +39,7 @@ from psycopg2._psycopg import DECIMALARRAY, FLOATARRAY, INTEGERARRAY, INTERVALAR
from psycopg2._psycopg import LONGINTEGERARRAY, ROWIDARRAY, STRINGARRAY, TIMEARRAY
from psycopg2._psycopg import UNICODEARRAY
-from psycopg2._psycopg import Binary, Boolean, Float, QuotedString, AsIs
+from psycopg2._psycopg import Binary, Boolean, Int, Float, QuotedString, AsIs
try:
from psycopg2._psycopg import MXDATE, MXDATETIME, MXINTERVAL, MXTIME
from psycopg2._psycopg import MXDATEARRAY, MXDATETIMEARRAY, MXINTERVALARRAY, MXTIMEARRAY
@@ -68,13 +68,11 @@ except ImportError:
pass
"""Isolation level values."""
-ISOLATION_LEVEL_AUTOCOMMIT = 0
-ISOLATION_LEVEL_READ_COMMITTED = 1
-ISOLATION_LEVEL_SERIALIZABLE = 2
-
-# PostgreSQL maps the the other standard values to already defined levels
-ISOLATION_LEVEL_REPEATABLE_READ = ISOLATION_LEVEL_SERIALIZABLE
-ISOLATION_LEVEL_READ_UNCOMMITTED = ISOLATION_LEVEL_READ_COMMITTED
+ISOLATION_LEVEL_AUTOCOMMIT = 0
+ISOLATION_LEVEL_READ_UNCOMMITTED = 1
+ISOLATION_LEVEL_READ_COMMITTED = 2
+ISOLATION_LEVEL_REPEATABLE_READ = 3
+ISOLATION_LEVEL_SERIALIZABLE = 4
"""psycopg connection status values."""
STATUS_SETUP = 0
diff --git a/psycopg/adapter_datetime.h b/psycopg/adapter_datetime.h
index dd59e9b..09abd88 100644
--- a/psycopg/adapter_datetime.h
+++ b/psycopg/adapter_datetime.h
@@ -45,11 +45,11 @@ typedef struct {
} pydatetimeObject;
+HIDDEN int psyco_adapter_datetime_init(void);
+
/* functions exported to psycopgmodule.c */
#ifdef PSYCOPG_DEFAULT_PYDATETIME
-HIDDEN int psyco_adapter_datetime_init(void);
-
HIDDEN PyObject *psyco_Date(PyObject *module, PyObject *args);
#define psyco_Date_doc \
"Date(year, month, day) -> new date\n\n" \
diff --git a/psycopg/adapter_mxdatetime.c b/psycopg/adapter_mxdatetime.c
index 793dfba..abe73f8 100644
--- a/psycopg/adapter_mxdatetime.c
+++ b/psycopg/adapter_mxdatetime.c
@@ -26,7 +26,6 @@
#define PSYCOPG_MODULE
#include "psycopg/psycopg.h"
-/* TODO: check if still compiles ok: I have no mx on this box */
#include "psycopg/adapter_mxdatetime.h"
#include "psycopg/microprotocols_proto.h"
@@ -34,13 +33,16 @@
#include <string.h>
+/* Return 0 on success, -1 on failure, but don't set an exception */
+
int
psyco_adapter_mxdatetime_init(void)
{
Dprintf("psyco_adapter_mxdatetime_init: mx.DateTime init");
- if(mxDateTime_ImportModuleAndAPI()) {
- PyErr_SetString(PyExc_ImportError, "mx.DateTime initialization failed");
+ if (mxDateTime_ImportModuleAndAPI()) {
+ Dprintf("psyco_adapter_mxdatetime_init: mx.DateTime initialization failed");
+ PyErr_Clear();
return -1;
}
return 0;
diff --git a/psycopg/adapter_pdecimal.c b/psycopg/adapter_pdecimal.c
index 9b57346..e14e769 100644
--- a/psycopg/adapter_pdecimal.c
+++ b/psycopg/adapter_pdecimal.c
@@ -41,8 +41,10 @@ pdecimal_getquoted(pdecimalObject *self, PyObject *args)
PyObject *check, *res = NULL;
check = PyObject_CallMethod(self->wrapped, "is_finite", NULL);
if (check == Py_True) {
- res = PyObject_Str(self->wrapped);
- goto end;
+ if (!(res = PyObject_Str(self->wrapped))) {
+ goto end;
+ }
+ goto output;
}
else if (check) {
res = Bytes_FromString("'NaN'::numeric");
@@ -70,16 +72,39 @@ pdecimal_getquoted(pdecimalObject *self, PyObject *args)
goto end;
}
- res = PyObject_Str(self->wrapped);
+ /* wrapped is finite */
+ if (!(res = PyObject_Str(self->wrapped))) {
+ goto end;
+ }
+
+ /* res may be unicode and may suffer for issue #57 */
+output:
+
#if PY_MAJOR_VERSION > 2
/* unicode to bytes in Py3 */
- if (res) {
+ {
PyObject *tmp = PyUnicode_AsUTF8String(res);
Py_DECREF(res);
- res = tmp;
+ if (!(res = tmp)) {
+ goto end;
+ }
}
#endif
+ if ('-' == Bytes_AS_STRING(res)[0]) {
+ /* Prepend a space in front of negative numbers (ticket #57) */
+ PyObject *tmp;
+ if (!(tmp = Bytes_FromString(" "))) {
+ Py_DECREF(res);
+ res = NULL;
+ goto end;
+ }
+ Bytes_ConcatAndDel(&tmp, res);
+ if (!(res = tmp)) {
+ goto end;
+ }
+ }
+
end:
Py_XDECREF(check);
return res;
diff --git a/psycopg/adapter_pfloat.c b/psycopg/adapter_pfloat.c
index 715ed8f..1b8074f 100644
--- a/psycopg/adapter_pfloat.c
+++ b/psycopg/adapter_pfloat.c
@@ -49,18 +49,37 @@ pfloat_getquoted(pfloatObject *self, PyObject *args)
rv = Bytes_FromString("'-Infinity'::float");
}
else {
- rv = PyObject_Repr(self->wrapped);
+ if (!(rv = PyObject_Repr(self->wrapped))) {
+ goto exit;
+ }
#if PY_MAJOR_VERSION > 2
/* unicode to bytes in Py3 */
- if (rv) {
+ {
PyObject *tmp = PyUnicode_AsUTF8String(rv);
Py_DECREF(rv);
- rv = tmp;
+ if (!(rv = tmp)) {
+ goto exit;
+ }
}
#endif
+
+ if ('-' == Bytes_AS_STRING(rv)[0]) {
+ /* Prepend a space in front of negative numbers (ticket #57) */
+ PyObject *tmp;
+ if (!(tmp = Bytes_FromString(" "))) {
+ Py_DECREF(rv);
+ rv = NULL;
+ goto exit;
+ }
+ Bytes_ConcatAndDel(&tmp, rv);
+ if (!(rv = tmp)) {
+ goto exit;
+ }
+ }
}
+exit:
return rv;
}
diff --git a/psycopg/adapter_pint.c b/psycopg/adapter_pint.c
new file mode 100644
index 0000000..ad89a06
--- /dev/null
+++ b/psycopg/adapter_pint.c
@@ -0,0 +1,266 @@
+/* adapter_int.c - psycopg pint type wrapper implementation
+ *
+ * Copyright (C) 2011 Daniele Varrazzo <daniele.varrazzo@gmail.com>
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#define PSYCOPG_MODULE
+#include "psycopg/psycopg.h"
+
+#include "psycopg/adapter_pint.h"
+#include "psycopg/microprotocols_proto.h"
+
+
+/** the Int object **/
+
+static PyObject *
+pint_getquoted(pintObject *self, PyObject *args)
+{
+ PyObject *res;
+ if (!(res = PyObject_Str(self->wrapped))) {
+ goto exit;
+ }
+
+#if PY_MAJOR_VERSION > 2
+ /* unicode to bytes in Py3 */
+ {
+ PyObject *tmp = PyUnicode_AsUTF8String(res);
+ Py_DECREF(res);
+ if (!(res = tmp)) {
+ goto exit;
+ }
+ }
+#endif
+
+ if ('-' == Bytes_AS_STRING(res)[0]) {
+ /* Prepend a space in front of negative numbers (ticket #57) */
+ PyObject *tmp;
+ if (!(tmp = Bytes_FromString(" "))) {
+ Py_DECREF(res);
+ res = NULL;
+ goto exit;
+ }
+ Bytes_ConcatAndDel(&tmp, res);
+ if (!(res = tmp)) {
+ goto exit;
+ }
+ }
+
+exit:
+ return res;
+}
+
+static PyObject *
+pint_str(pintObject *self)
+{
+ return psycopg_ensure_text(pint_getquoted(self, NULL));
+}
+
+static PyObject *
+pint_conform(pintObject *self, PyObject *args)
+{
+ PyObject *res, *proto;
+
+ if (!PyArg_ParseTuple(args, "O", &proto)) return NULL;
+
+ if (proto == (PyObject*)&isqlquoteType)
+ res = (PyObject*)self;
+ else
+ res = Py_None;
+
+ Py_INCREF(res);
+ return res;
+}
+
+/** the int object */
+
+/* object member list */
+
+static struct PyMemberDef pintObject_members[] = {
+ {"adapted", T_OBJECT, offsetof(pintObject, wrapped), READONLY},
+ {NULL}
+};
+
+/* object method table */
+
+static PyMethodDef pintObject_methods[] = {
+ {"getquoted", (PyCFunction)pint_getquoted, METH_NOARGS,
+ "getquoted() -> wrapped object value as SQL-quoted string"},
+ {"__conform__", (PyCFunction)pint_conform, METH_VARARGS, NULL},
+ {NULL} /* Sentinel */
+};
+
+/* initialization and finalization methods */
+
+static int
+pint_setup(pintObject *self, PyObject *obj)
+{
+ Dprintf("pint_setup: init pint object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self)
+ );
+
+ Py_INCREF(obj);
+ self->wrapped = obj;
+
+ Dprintf("pint_setup: good pint object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ self, Py_REFCNT(self)
+ );
+ return 0;
+}
+
+static int
+pint_traverse(PyObject *obj, visitproc visit, void *arg)
+{
+ pintObject *self = (pintObject *)obj;
+
+ Py_VISIT(self->wrapped);
+ return 0;
+}
+
+static void
+pint_dealloc(PyObject* obj)
+{
+ pintObject *self = (pintObject *)obj;
+
+ Py_CLEAR(self->wrapped);
+
+ Dprintf("pint_dealloc: deleted pint object at %p, refcnt = "
+ FORMAT_CODE_PY_SSIZE_T,
+ obj, Py_REFCNT(obj)
+ );
+
+ Py_TYPE(obj)->tp_free(obj);
+}
+
+static int
+pint_init(PyObject *obj, PyObject *args, PyObject *kwds)
+{
+ PyObject *o;
+
+ if (!PyArg_ParseTuple(args, "O", &o))
+ return -1;
+
+ return pint_setup((pintObject *)obj, o);
+}
+
+static PyObject *
+pint_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ return type->tp_alloc(type, 0);
+}
+
+static void
+pint_del(PyObject* self)
+{
+ PyObject_GC_Del(self);
+}
+
+static PyObject *
+pint_repr(pintObject *self)
+{
+ return PyString_FromFormat("<psycopg2._psycopg.Int object at %p>",
+ self);
+}
+
+
+/* object type */
+
+#define pintType_doc \
+"Int(str) -> new Int adapter object"
+
+PyTypeObject pintType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "psycopg2._psycopg.Int",
+ sizeof(pintObject),
+ 0,
+ pint_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+
+ 0, /*tp_compare*/
+
+ (reprfunc)pint_repr, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+
+ 0, /*tp_call*/
+ (reprfunc)pint_str, /*tp_str*/
+
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ pintType_doc, /*tp_doc*/
+
+ pint_traverse, /*tp_traverse*/
+ 0, /*tp_clear*/
+
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+
+ /* Attribute descriptor and subclassing stuff */
+
+ pintObject_methods, /*tp_methods*/
+ pintObject_members, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+
+ pint_init, /*tp_init*/
+ 0, /*tp_alloc will be set to PyType_GenericAlloc in module init*/
+ pint_new, /*tp_new*/
+ (freefunc)pint_del, /*tp_free Low-level free-memory routine */
+ 0, /*tp_is_gc For PyObject_IS_GC */
+ 0, /*tp_bases*/
+ 0, /*tp_mro method resolution order */
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0 /*tp_weaklist*/
+};
+
+
+/** module-level functions **/
+
+PyObject *
+psyco_Int(PyObject *module, PyObject *args)
+{
+ PyObject *obj;
+
+ if (!PyArg_ParseTuple(args, "O", &obj))
+ return NULL;
+
+ return PyObject_CallFunctionObjArgs((PyObject *)&pintType, obj, NULL);
+}
diff --git a/psycopg/adapter_pint.h b/psycopg/adapter_pint.h
new file mode 100644
index 0000000..fd553e8
--- /dev/null
+++ b/psycopg/adapter_pint.h
@@ -0,0 +1,53 @@
+/* adapter_pint.h - definition for the psycopg int type wrapper
+ *
+ * Copyright (C) 2011 Daniele Varrazzo <daniele.varrazzo@gmail.com>
+ *
+ * This file is part of psycopg.
+ *
+ * psycopg2 is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * In addition, as a special exception, the copyright holders give
+ * permission to link this program with the OpenSSL library (or with
+ * modified versions of OpenSSL that use the same license as OpenSSL),
+ * and distribute linked combinations including the two.
+ *
+ * You must obey the GNU Lesser General Public License in all respects for
+ * all of the code used other than OpenSSL.
+ *
+ * psycopg2 is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ */
+
+#ifndef PSYCOPG_PINT_H
+#define PSYCOPG_PINT_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern HIDDEN PyTypeObject pintType;
+
+typedef struct {
+ PyObject_HEAD
+
+ /* this is the real object we wrap */
+ PyObject *wrapped;
+
+} pintObject;
+
+/* functions exported to psycopgmodule.c */
+
+HIDDEN PyObject *psyco_Int(PyObject *module, PyObject *args);
+#define psyco_Int_doc \
+ "Int(obj) -> new int value"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(PSYCOPG_PINT_H) */
diff --git a/psycopg/config.h b/psycopg/config.h
index 551cfe4..2112043 100644
--- a/psycopg/config.h
+++ b/psycopg/config.h
@@ -27,7 +27,7 @@
#define PSYCOPG_CONFIG_H 1
/* GCC 4.0 and later have support for specifying symbol visibility */
-#if __GNUC__ >= 4
+#if __GNUC__ >= 4 && !defined(__MINGW32__)
# define HIDDEN __attribute__((visibility("hidden")))
#else
# define HIDDEN
@@ -136,6 +136,8 @@ static int pthread_mutex_init(pthread_mutex_t *mutex, void* fake)
* in libxml2 code */
#define isinf(x) ((_fpclass(x) == _FPCLASS_PINF) ? 1 \
: ((_fpclass(x) == _FPCLASS_NINF) ? -1 : 0))
+
+#define strcasecmp(x, y) lstrcmpi(x, y)
#endif
#endif
diff --git a/psycopg/connection.h b/psycopg/connection.h
index 552b93d..7f512a1 100644
--- a/psycopg/connection.h
+++ b/psycopg/connection.h
@@ -59,14 +59,6 @@ extern "C" {
later change it, she must know what she's doing... these are the queries we
need to issue */
#define psyco_datestyle "SET DATESTYLE TO 'ISO'"
-#define psyco_transaction_isolation "SHOW default_transaction_isolation"
-
-/* possible values for isolation_level */
-typedef enum {
- ISOLATION_LEVEL_AUTOCOMMIT = 0,
- ISOLATION_LEVEL_READ_COMMITTED = 1,
- ISOLATION_LEVEL_SERIALIZABLE = 2,
-} conn_isolation_level_t;
extern HIDDEN PyTypeObject connectionType;
@@ -87,7 +79,6 @@ typedef struct {
long int closed; /* 1 means connection has been closed;
2 that something horrible happened */
- long int isolation_level; /* isolation level for this connection */
long int mark; /* number of commits/rollbacks done so far */
int status; /* status of the connection */
XidObject *tpc_xid; /* Transaction ID in two-phase commit */
@@ -99,7 +90,10 @@ typedef struct {
PGconn *pgconn; /* the postgresql connection */
PGcancel *cancel; /* the cancellation structure */
- PyObject *async_cursor; /* weakref to a cursor executing an asynchronous query */
+ /* Weakref to the object executing an asynchronous query. The object
+ * is a cursor for async connections, but it may be something else
+ * for a green connection. If NULL, the connection is idle. */
+ PyObject *async_cursor;
int async_status; /* asynchronous execution status */
/* notice processing */
@@ -117,12 +111,20 @@ typedef struct {
int equote; /* use E''-style quotes for escaped strings */
PyObject *weakreflist; /* list of weak references */
+ int autocommit;
+
} connectionObject;
+/* map isolation level values into a numeric const */
+typedef struct {
+ char *name;
+ int value;
+} IsolationLevel;
+
/* C-callable functions in connection_int.c and connection_ext.c */
HIDDEN PyObject *conn_text_from_chars(connectionObject *pgconn, const char *str);
HIDDEN int conn_get_standard_conforming_strings(PGconn *pgconn);
-HIDDEN int conn_get_isolation_level(PGresult *pgres);
+HIDDEN int conn_get_isolation_level(connectionObject *self);
HIDDEN int conn_get_protocol_version(PGconn *pgconn);
HIDDEN int conn_get_server_version(PGconn *pgconn);
HIDDEN PGcancel *conn_get_cancel(PGconn *pgconn);
@@ -134,6 +136,10 @@ HIDDEN int conn_connect(connectionObject *self, long int async);
HIDDEN void conn_close(connectionObject *self);
HIDDEN int conn_commit(connectionObject *self);
HIDDEN int conn_rollback(connectionObject *self);
+HIDDEN int conn_set_session(connectionObject *self, const char *isolevel,
+ const char *readonly, const char *deferrable,
+ int autocommit);
+HIDDEN int conn_set_autocommit(connectionObject *self, int value);
HIDDEN int conn_switch_isolation_level(connectionObject *self, int level);
HIDDEN int conn_set_client_encoding(connectionObject *self, const char *enc);
HIDDEN int conn_poll(connectionObject *self);
@@ -152,6 +158,13 @@ HIDDEN PyObject *conn_tpc_recover(connectionObject *self);
"in asynchronous mode"); \
return NULL; }
+#define EXC_IF_IN_TRANSACTION(self, cmd) \
+ if (self->status != CONN_STATUS_READY) { \
+ PyErr_Format(ProgrammingError, \
+ "%s cannot be used inside a transaction", #cmd); \
+ return NULL; \
+ }
+
#define EXC_IF_TPC_NOT_SUPPORTED(self) \
if ((self)->server_version < 80100) { \
PyErr_Format(NotSupportedError, \
diff --git a/psycopg/connection_int.c b/psycopg/connection_int.c
index 22c5bc5..441d362 100644
--- a/psycopg/connection_int.c
+++ b/psycopg/connection_int.c
@@ -34,6 +34,19 @@
#include <string.h>
+/* Mapping from isolation level name to value exposed by Python.
+ * Only used for backward compatibility by the isolation_level property */
+
+const IsolationLevel conn_isolevels[] = {
+ {"", 0}, /* autocommit */
+ {"read uncommitted", 1},
+ {"read committed", 2},
+ {"repeatable read", 3},
+ {"serializable", 4},
+ {"default", -1},
+ { NULL }
+};
+
/* Return a new "string" from a char* from the database.
*
@@ -82,6 +95,10 @@ conn_notice_callback(void *args, const char *message)
self->notice_pending = notice;
}
+/* Expose the notices received as Python objects.
+ *
+ * The function should be called with the connection lock and the GIL.
+ */
void
conn_notice_process(connectionObject *self)
{
@@ -92,10 +109,6 @@ conn_notice_process(connectionObject *self)
return;
}
- Py_BEGIN_ALLOW_THREADS;
- pthread_mutex_lock(&self->lock);
- Py_BLOCK_THREADS;
-
notice = self->notice_pending;
nnotices = PyList_GET_SIZE(self->notice_list);
@@ -119,10 +132,6 @@ conn_notice_process(connectionObject *self)
0, nnotices - CONN_NOTICES_LIMIT);
}
- Py_UNBLOCK_THREADS;
- pthread_mutex_unlock(&self->lock);
- Py_END_ALLOW_THREADS;
-
conn_notice_clean(self);
}
@@ -130,8 +139,6 @@ void
conn_notice_clean(connectionObject *self)
{
struct connectionObject_notice *tmp, *notice;
- Py_BEGIN_ALLOW_THREADS;
- pthread_mutex_lock(&self->lock);
notice = self->notice_pending;
@@ -141,11 +148,8 @@ conn_notice_clean(connectionObject *self)
free((void*)tmp->message);
free(tmp);
}
-
+
self->notice_pending = NULL;
-
- pthread_mutex_unlock(&self->lock);
- Py_END_ALLOW_THREADS;
}
@@ -161,8 +165,6 @@ conn_notifies_process(connectionObject *self)
PyObject *notify = NULL;
PyObject *pid = NULL, *channel = NULL, *payload = NULL;
- /* TODO: we are called without the lock! */
-
while ((pgn = PQnotifies(self->pgconn)) != NULL) {
Dprintf("conn_notifies_process: got NOTIFY from pid %d, msg = %s",
@@ -358,26 +360,57 @@ exit:
return rv;
}
+
int
-conn_get_isolation_level(PGresult *pgres)
+conn_get_isolation_level(connectionObject *self)
{
- static const char lvl1a[] = "read uncommitted";
- static const char lvl1b[] = "read committed";
- int rv;
+ PGresult *pgres = NULL;
+ char *error = NULL;
+ int rv = -1;
+ char *lname;
+ const IsolationLevel *level;
+
+ /* this may get called by async connections too: here's your result */
+ if (self->autocommit) {
+ return 0;
+ }
+
+ Py_BEGIN_ALLOW_THREADS;
+ pthread_mutex_lock(&self->lock);
- char *isolation_level = PQgetvalue(pgres, 0, 0);
+ if (!(lname = pq_get_guc_locked(self, "default_transaction_isolation",
+ &pgres, &error, &_save))) {
+ goto endlock;
+ }
- if ((strcmp(lvl1b, isolation_level) == 0) /* most likely */
- || (strcmp(lvl1a, isolation_level) == 0))
- rv = ISOLATION_LEVEL_READ_COMMITTED;
- else /* if it's not one of the lower ones, it's SERIALIZABLE */
- rv = ISOLATION_LEVEL_SERIALIZABLE;
+ /* find the value for the requested isolation level */
+ level = conn_isolevels;
+ while ((++level)->name) {
+ if (0 == strcasecmp(level->name, lname)) {
+ rv = level->value;
+ break;
+ }
+ }
+ if (-1 == rv) {
+ error = malloc(256);
+ PyOS_snprintf(error, 256,
+ "unexpected isolation level: '%s'", lname);
+ }
- CLEARPGRES(pgres);
+ free(lname);
+
+endlock:
+ pthread_mutex_unlock(&self->lock);
+ Py_END_ALLOW_THREADS;
+
+ if (rv < 0) {
+ pq_complete_error(self, &pgres, &error);
+ }
return rv;
}
+
int
conn_get_protocol_version(PGconn *pgconn)
{
@@ -425,8 +458,8 @@ conn_is_datestyle_ok(PGconn *pgconn)
int
conn_setup(connectionObject *self, PGconn *pgconn)
{
- PGresult *pgres;
- int green;
+ PGresult *pgres = NULL;
+ char *error = NULL;
self->equote = conn_get_standard_conforming_strings(pgconn);
self->server_version = conn_get_server_version(pgconn);
@@ -450,51 +483,24 @@ conn_setup(connectionObject *self, PGconn *pgconn)
pthread_mutex_lock(&self->lock);
Py_BLOCK_THREADS;
- green = psyco_green();
-
- if (green && (pq_set_non_blocking(self, 1, 1) != 0)) {
+ if (psyco_green() && (pq_set_non_blocking(self, 1, 1) != 0)) {
return -1;
}
if (!conn_is_datestyle_ok(self->pgconn)) {
- if (!green) {
- Py_UNBLOCK_THREADS;
- Dprintf("conn_connect: exec query \"%s\";", psyco_datestyle);
- pgres = PQexec(pgconn, psyco_datestyle);
- Py_BLOCK_THREADS;
- } else {
- pgres = psyco_exec_green(self, psyco_datestyle);
- }
-
- if (pgres == NULL || PQresultStatus(pgres) != PGRES_COMMAND_OK ) {
- PyErr_SetString(OperationalError, "can't set datestyle to ISO");
- IFCLEARPGRES(pgres);
- Py_UNBLOCK_THREADS;
- pthread_mutex_unlock(&self->lock);
- Py_BLOCK_THREADS;
- return -1;
- }
- CLEARPGRES(pgres);
- }
-
- if (!green) {
+ int res;
Py_UNBLOCK_THREADS;
- pgres = PQexec(pgconn, psyco_transaction_isolation);
+ res = pq_set_guc_locked(self, "datestyle", "ISO",
+ &pgres, &error, &_save);
Py_BLOCK_THREADS;
- } else {
- pgres = psyco_exec_green(self, psyco_transaction_isolation);
+ if (res < 0) {
+ pq_complete_error(self, &pgres, &error);
+ return -1;
+ }
}
- if (pgres == NULL || PQresultStatus(pgres) != PGRES_TUPLES_OK) {
- PyErr_SetString(OperationalError,
- "can't fetch default_isolation_level");
- IFCLEARPGRES(pgres);
- Py_UNBLOCK_THREADS;
- pthread_mutex_unlock(&self->lock);
- Py_BLOCK_THREADS;
- return -1;
- }
- self->isolation_level = conn_get_isolation_level(pgres);
+ /* for reset */
+ self->autocommit = 0;
Py_UNBLOCK_THREADS;
pthread_mutex_unlock(&self->lock);
@@ -779,7 +785,7 @@ _conn_poll_setup_async(connectionObject *self)
* expected to manage the transactions himself, by sending
* (asynchronously) BEGIN and COMMIT statements.
*/
- self->isolation_level = ISOLATION_LEVEL_AUTOCOMMIT;
+ self->autocommit = 1;
/* If the datestyle is ISO or anything else good,
* we can skip the CONN_STATUS_DATESTYLE step. */
@@ -857,7 +863,7 @@ conn_poll(connectionObject *self)
case CONN_STATUS_PREPARED:
res = _conn_poll_query(self);
- if (res == PSYCO_POLL_OK && self->async_cursor) {
+ if (res == PSYCO_POLL_OK && self->async && self->async_cursor) {
/* An async query has just finished: parse the tuple in the
* target cursor. */
cursorObject *curs;
@@ -952,6 +958,77 @@ conn_rollback(connectionObject *self)
return res;
}
+int
+conn_set_session(connectionObject *self,
+ const char *isolevel, const char *readonly, const char *deferrable,
+ int autocommit)
+{
+ PGresult *pgres = NULL;
+ char *error = NULL;
+ int res = -1;
+
+ Py_BEGIN_ALLOW_THREADS;
+ pthread_mutex_lock(&self->lock);
+
+ if (isolevel) {
+ Dprintf("conn_set_session: setting isolation to %s", isolevel);
+ if ((res = pq_set_guc_locked(self,
+ "default_transaction_isolation", isolevel,
+ &pgres, &error, &_save))) {
+ goto endlock;
+ }
+ }
+
+ if (readonly) {
+ Dprintf("conn_set_session: setting read only to %s", readonly);
+ if ((res = pq_set_guc_locked(self,
+ "default_transaction_read_only", readonly,
+ &pgres, &error, &_save))) {
+ goto endlock;
+ }
+ }
+
+ if (deferrable) {
+ Dprintf("conn_set_session: setting deferrable to %s", deferrable);
+ if ((res = pq_set_guc_locked(self,
+ "default_transaction_deferrable", deferrable,
+ &pgres, &error, &_save))) {
+ goto endlock;
+ }
+ }
+
+ if (self->autocommit != autocommit) {
+ Dprintf("conn_set_session: setting autocommit to %d", autocommit);
+ self->autocommit = autocommit;
+ }
+
+ res = 0;
+
+endlock:
+ pthread_mutex_unlock(&self->lock);
+ Py_END_ALLOW_THREADS;
+
+ if (res < 0) {
+ pq_complete_error(self, &pgres, &error);
+ }
+
+ return res;
+}
+
+int
+conn_set_autocommit(connectionObject *self, int value)
+{
+ Py_BEGIN_ALLOW_THREADS;
+ pthread_mutex_lock(&self->lock);
+
+ self->autocommit = value;
+
+ pthread_mutex_unlock(&self->lock);
+ Py_END_ALLOW_THREADS;
+
+ return 0;
+}
+
/* conn_switch_isolation_level - switch isolation level on the connection */
int
@@ -959,33 +1036,80 @@ conn_switch_isolation_level(connectionObject *self, int level)
{
PGresult *pgres = NULL;
char *error = NULL;
- int res = 0;
+ int curr_level;
+ int ret = -1;
+
+ /* use only supported levels on older PG versions */
+ if (self->server_version < 80000) {
+ if (level == 1 || level == 3) {
+ ++level;
+ }
+ }
+
+ if (-1 == (curr_level = conn_get_isolation_level(self))) {
+ return -1;
+ }
+
+ if (curr_level == level) {
+ /* no need to change level */
+ return 0;
+ }
+
+ /* Emulate the previous semantic of set_isolation_level() using the
+ * functions currently available. */
Py_BEGIN_ALLOW_THREADS;
pthread_mutex_lock(&self->lock);
- /* if the current isolation level is equal to the requested one don't switch */
- if (self->isolation_level != level) {
+ /* terminate the current transaction if any */
+ if ((ret = pq_abort_locked(self, &pgres, &error, &_save))) {
+ goto endlock;
+ }
- /* if the current isolation level is > 0 we need to abort the current
- transaction before changing; that all folks! */
- if (self->isolation_level != ISOLATION_LEVEL_AUTOCOMMIT) {
- res = pq_abort_locked(self, &pgres, &error, &_save);
+ if (level == 0) {
+ if ((ret = pq_set_guc_locked(self,
+ "default_transaction_isolation", "default",
+ &pgres, &error, &_save))) {
+ goto endlock;
+ }
+ self->autocommit = 1;
+ }
+ else {
+ /* find the name of the requested level */
+ const IsolationLevel *isolevel = conn_isolevels;
+ while ((++isolevel)->name) {
+ if (level == isolevel->value) {
+ break;
+ }
+ }
+ if (!isolevel->name) {
+ ret = -1;
+ error = strdup("bad isolation level value");
+ goto endlock;
}
- self->isolation_level = level;
- Dprintf("conn_switch_isolation_level: switched to level %d", level);
+ if ((ret = pq_set_guc_locked(self,
+ "default_transaction_isolation", isolevel->name,
+ &pgres, &error, &_save))) {
+ goto endlock;
+ }
+ self->autocommit = 0;
}
+ Dprintf("conn_switch_isolation_level: switched to level %d", level);
+
+endlock:
pthread_mutex_unlock(&self->lock);
Py_END_ALLOW_THREADS;
- if (res < 0)
+ if (ret < 0) {
pq_complete_error(self, &pgres, &error);
+ }
- return res;
+ return ret;
}
+
/* conn_set_client_encoding - switch client encoding on connection */
int
@@ -993,7 +1117,6 @@ conn_set_client_encoding(connectionObject *self, const char *enc)
{
PGresult *pgres = NULL;
char *error = NULL;
- char query[48];
int res = 1;
char *codec = NULL;
char *clean_enc = NULL;
@@ -1009,16 +1132,14 @@ conn_set_client_encoding(connectionObject *self, const char *enc)
Py_BEGIN_ALLOW_THREADS;
pthread_mutex_lock(&self->lock);
- /* set encoding, no encoding string is longer than 24 bytes */
- PyOS_snprintf(query, 47, "SET client_encoding = '%s'", clean_enc);
-
/* abort the current transaction, to set the encoding ouside of
transactions */
if ((res = pq_abort_locked(self, &pgres, &error, &_save))) {
goto endlock;
}
- if ((res = pq_execute_command_locked(self, query, &pgres, &error, &_save))) {
+ if ((res = pq_set_guc_locked(self, "client_encoding", clean_enc,
+ &pgres, &error, &_save))) {
goto endlock;
}
@@ -1042,7 +1163,6 @@ conn_set_client_encoding(connectionObject *self, const char *enc)
self->encoding, self->codec);
endlock:
-
pthread_mutex_unlock(&self->lock);
Py_END_ALLOW_THREADS;
diff --git a/psycopg/connection_type.c b/psycopg/connection_type.c
index 7ca395d..e456ce4 100644
--- a/psycopg/connection_type.c
+++ b/psycopg/connection_type.c
@@ -187,6 +187,7 @@ psyco_conn_tpc_begin(connectionObject *self, PyObject *args)
EXC_IF_CONN_CLOSED(self);
EXC_IF_CONN_ASYNC(self, tpc_begin);
EXC_IF_TPC_NOT_SUPPORTED(self);
+ EXC_IF_IN_TRANSACTION(self, tpc_begin);
if (!PyArg_ParseTuple(args, "O", &oxid)) {
goto exit;
@@ -196,15 +197,8 @@ psyco_conn_tpc_begin(connectionObject *self, PyObject *args)
goto exit;
}
- /* check we are not in a transaction */
- if (self->status != CONN_STATUS_READY) {
- PyErr_SetString(ProgrammingError,
- "tpc_begin must be called outside a transaction");
- goto exit;
- }
-
/* two phase commit and autocommit make no point */
- if (self->isolation_level == ISOLATION_LEVEL_AUTOCOMMIT) {
+ if (self->autocommit) {
PyErr_SetString(ProgrammingError,
"tpc_begin can't be called in autocommit mode");
goto exit;
@@ -384,6 +378,199 @@ psyco_conn_tpc_recover(connectionObject *self, PyObject *args)
#ifdef PSYCOPG_EXTENSIONS
+
+/* parse a python object into one of the possible isolation level values */
+
+extern const IsolationLevel conn_isolevels[];
+
+static const char *
+_psyco_conn_parse_isolevel(connectionObject *self, PyObject *pyval)
+{
+ const IsolationLevel *isolevel = NULL;
+
+ Py_INCREF(pyval); /* for ensure_bytes */
+
+ /* parse from one of the level constants */
+ if (PyInt_Check(pyval)) {
+ long level = PyInt_AsLong(pyval);
+ if (level == -1 && PyErr_Occurred()) { goto exit; }
+ if (level < 1 || level > 4) {
+ PyErr_SetString(PyExc_ValueError,
+ "isolation_level must be between 1 and 4");
+ goto exit;
+ }
+
+ isolevel = conn_isolevels + level;
+ }
+
+ /* parse from the string -- this includes "default" */
+ else {
+ isolevel = conn_isolevels;
+ while ((++isolevel)->name) {
+ if (!(pyval = psycopg_ensure_bytes(pyval))) {
+ goto exit;
+ }
+ if (0 == strcasecmp(isolevel->name, Bytes_AS_STRING(pyval))) {
+ break;
+ }
+ }
+ if (!isolevel->name) {
+ char msg[256];
+ snprintf(msg, sizeof(msg),
+ "bad value for isolation_level: '%s'", Bytes_AS_STRING(pyval));
+ PyErr_SetString(PyExc_ValueError, msg);
+ }
+ }
+
+ /* use only supported levels on older PG versions */
+ if (isolevel && self->server_version < 80000) {
+ if (isolevel->value == 1 || isolevel->value == 3) {
+ ++isolevel;
+ }
+ }
+
+exit:
+ Py_XDECREF(pyval);
+
+ return isolevel ? isolevel->name : NULL;
+}
+
+/* convert True/False/"default" into a C string */
+
+static const char *
+_psyco_conn_parse_onoff(PyObject *pyval)
+{
+ int istrue = PyObject_IsTrue(pyval);
+ if (-1 == istrue) { return NULL; }
+ if (istrue) {
+ int cmp;
+ PyObject *pydef;
+ if (!(pydef = Text_FromUTF8("default"))) { return NULL; }
+ cmp = PyObject_RichCompareBool(pyval, pydef, Py_EQ);
+ Py_DECREF(pydef);
+ if (-1 == cmp) { return NULL; }
+ return cmp ? "default" : "on";
+ }
+ else {
+ return "off";
+ }
+}
+
+/* set_session - set default transaction characteristics */
+
+#define psyco_conn_set_session_doc \
+"set_session(...) -- Set one or more parameters for the next transactions.\n\n" \
+"Accepted arguments are 'isolation_level', 'readonly', 'deferrable', 'autocommit'."
+
+static PyObject *
+psyco_conn_set_session(connectionObject *self, PyObject *args, PyObject *kwargs)
+{
+ PyObject *isolevel = Py_None;
+ PyObject *readonly = Py_None;
+ PyObject *deferrable = Py_None;
+ PyObject *autocommit = Py_None;
+
+ const char *c_isolevel = NULL;
+ const char *c_readonly = NULL;
+ const char *c_deferrable = NULL;
+ int c_autocommit = self->autocommit;
+
+ static char *kwlist[] =
+ {"isolation_level", "readonly", "deferrable", "autocommit", NULL};
+
+ EXC_IF_CONN_CLOSED(self);
+ EXC_IF_CONN_ASYNC(self, set_session);
+ EXC_IF_IN_TRANSACTION(self, set_session);
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOO", kwlist,
+ &isolevel, &readonly, &deferrable, &autocommit)) {
+ return NULL;
+ }
+
+ if (Py_None != isolevel) {
+ if (!(c_isolevel = _psyco_conn_parse_isolevel(self, isolevel))) {
+ return NULL;
+ }
+ }
+
+ if (Py_None != readonly) {
+ if (!(c_readonly = _psyco_conn_parse_onoff(readonly))) {
+ return NULL;
+ }
+ }
+ if (Py_None != deferrable) {
+ if (self->server_version < 90100) {
+ PyErr_SetString(ProgrammingError,
+ "the 'deferrable' setting is only available"
+ " from PostgreSQL 9.1");
+ return NULL;
+ }
+ if (!(c_deferrable = _psyco_conn_parse_onoff(deferrable))) {
+ return NULL;
+ }
+ }
+ if (Py_None != autocommit) {
+ c_autocommit = PyObject_IsTrue(autocommit);
+ if (-1 == c_autocommit) { return NULL; }
+ }
+
+ if (0 != conn_set_session(self,
+ c_isolevel, c_readonly, c_deferrable, c_autocommit)) {
+ return NULL;
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+
+#define psyco_conn_autocommit_doc \
+"set or return the autocommit status."
+
+static PyObject *
+psyco_conn_autocommit_get(connectionObject *self)
+{
+ PyObject *ret;
+ ret = self->autocommit ? Py_True : Py_False;
+ Py_INCREF(ret);
+ return ret;
+}
+
+static PyObject *
+_psyco_conn_autocommit_set_checks(connectionObject *self)
+{
+ /* wrapper to use the EXC_IF macros.
+ * return NULL in case of error, else whatever */
+ EXC_IF_CONN_CLOSED(self);
+ EXC_IF_CONN_ASYNC(self, autocommit);
+ EXC_IF_IN_TRANSACTION(self, autocommit);
+ return Py_None; /* borrowed */
+}
+
+static int
+psyco_conn_autocommit_set(connectionObject *self, PyObject *pyvalue)
+{
+ int value;
+
+ if (!_psyco_conn_autocommit_set_checks(self)) { return -1; }
+ if (-1 == (value = PyObject_IsTrue(pyvalue))) { return -1; }
+ if (0 != conn_set_autocommit(self, value)) { return -1; }
+
+ return 0;
+}
+
+
+/* isolation_level - return the current isolation level */
+
+static PyObject *
+psyco_conn_isolation_level_get(connectionObject *self)
+{
+ int rv = conn_get_isolation_level(self);
+ if (-1 == rv) { return NULL; }
+ return PyInt_FromLong((long)rv);
+}
+
+
/* set_isolation_level method - switch connection isolation level */
#define psyco_conn_set_isolation_level_doc \
@@ -400,9 +587,9 @@ psyco_conn_set_isolation_level(connectionObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "i", &level)) return NULL;
- if (level < 0 || level > 2) {
+ if (level < 0 || level > 4) {
PyErr_SetString(PyExc_ValueError,
- "isolation level must be between 0 and 2");
+ "isolation level must be between 0 and 4");
return NULL;
}
@@ -717,6 +904,8 @@ static struct PyMethodDef connectionObject_methods[] = {
{"tpc_recover", (PyCFunction)psyco_conn_tpc_recover,
METH_NOARGS, psyco_conn_tpc_recover_doc},
#ifdef PSYCOPG_EXTENSIONS
+ {"set_session", (PyCFunction)psyco_conn_set_session,
+ METH_VARARGS|METH_KEYWORDS, psyco_conn_set_session_doc},
{"set_isolation_level", (PyCFunction)psyco_conn_set_isolation_level,
METH_VARARGS, psyco_conn_set_isolation_level_doc},
{"set_client_encoding", (PyCFunction)psyco_conn_set_client_encoding,
@@ -749,9 +938,6 @@ static struct PyMemberDef connectionObject_members[] = {
#ifdef PSYCOPG_EXTENSIONS
{"closed", T_LONG, offsetof(connectionObject, closed), READONLY,
"True if the connection is closed."},
- {"isolation_level", T_LONG,
- offsetof(connectionObject, isolation_level), READONLY,
- "The current isolation level."},
{"encoding", T_STRING, offsetof(connectionObject, encoding), READONLY,
"The current client encoding."},
{"notices", T_OBJECT, offsetof(connectionObject, notice_list), READONLY},
@@ -792,6 +978,16 @@ static struct PyGetSetDef connectionObject_getsets[] = {
EXCEPTION_GETTER(IntegrityError),
EXCEPTION_GETTER(DataError),
EXCEPTION_GETTER(NotSupportedError),
+#ifdef PSYCOPG_EXTENSIONS
+ { "autocommit",
+ (getter)psyco_conn_autocommit_get,
+ (setter)psyco_conn_autocommit_set,
+ psyco_conn_autocommit_doc },
+ { "isolation_level",
+ (getter)psyco_conn_isolation_level_get,
+ (setter)NULL,
+ "The current isolation level." },
+#endif
{NULL}
};
#undef EXCEPTION_GETTER
diff --git a/psycopg/cursor.h b/psycopg/cursor.h
index 09ac12a..c6ca25b 100644
--- a/psycopg/cursor.h
+++ b/psycopg/cursor.h
@@ -64,7 +64,7 @@ struct cursorObject {
PyObject *copyfile; /* file-like used during COPY TO/FROM ops */
Py_ssize_t copysize; /* size of the copy buffer during COPY TO/FROM ops */
#define DEFAULT_COPYSIZE 16384
-#define DEFAULT_COPYBUFF 8132
+#define DEFAULT_COPYBUFF 8192
PyObject *tuple_factory; /* factory for result tuples */
PyObject *tzinfo_factory; /* factory for tzinfo objects */
diff --git a/psycopg/cursor_type.c b/psycopg/cursor_type.c
index d166de7..717cf9c 100644
--- a/psycopg/cursor_type.c
+++ b/psycopg/cursor_type.c
@@ -456,7 +456,7 @@ psyco_curs_execute(cursorObject *self, PyObject *args, PyObject *kwargs)
NULL, NULL);
return NULL;
}
- if (self->conn->isolation_level == ISOLATION_LEVEL_AUTOCOMMIT) {
+ if (self->conn->autocommit) {
psyco_set_error(ProgrammingError, self,
"can't use a named cursor outside of transactions", NULL, NULL);
return NULL;
@@ -739,7 +739,6 @@ psyco_curs_fetchone(cursorObject *self, PyObject *args)
PyObject *res;
EXC_IF_CURS_CLOSED(self);
- EXC_IF_ASYNC_IN_PROGRESS(self, fetchone);
if (_psyco_curs_prefetch(self) < 0) return NULL;
EXC_IF_NO_TUPLES(self);
@@ -747,6 +746,7 @@ psyco_curs_fetchone(cursorObject *self, PyObject *args)
char buffer[128];
EXC_IF_NO_MARK(self);
+ EXC_IF_ASYNC_IN_PROGRESS(self, fetchone);
EXC_IF_TPC_PREPARED(self->conn, fetchone);
PyOS_snprintf(buffer, 127, "FETCH FORWARD 1 FROM \"%s\"", self->name);
if (pq_execute(self, buffer, 0) == -1) return NULL;
@@ -853,7 +853,6 @@ psyco_curs_fetchmany(cursorObject *self, PyObject *args, PyObject *kwords)
}
EXC_IF_CURS_CLOSED(self);
- EXC_IF_ASYNC_IN_PROGRESS(self, fetchmany);
if (_psyco_curs_prefetch(self) < 0) return NULL;
EXC_IF_NO_TUPLES(self);
@@ -861,6 +860,7 @@ psyco_curs_fetchmany(cursorObject *self, PyObject *args, PyObject *kwords)
char buffer[128];
EXC_IF_NO_MARK(self);
+ EXC_IF_ASYNC_IN_PROGRESS(self, fetchmany);
EXC_IF_TPC_PREPARED(self->conn, fetchone);
PyOS_snprintf(buffer, 127, "FETCH FORWARD %d FROM \"%s\"",
(int)size, self->name);
@@ -924,7 +924,6 @@ psyco_curs_fetchall(cursorObject *self, PyObject *args)
PyObject *list, *res;
EXC_IF_CURS_CLOSED(self);
- EXC_IF_ASYNC_IN_PROGRESS(self, fetchall);
if (_psyco_curs_prefetch(self) < 0) return NULL;
EXC_IF_NO_TUPLES(self);
@@ -932,6 +931,7 @@ psyco_curs_fetchall(cursorObject *self, PyObject *args)
char buffer[128];
EXC_IF_NO_MARK(self);
+ EXC_IF_ASYNC_IN_PROGRESS(self, fetchall);
EXC_IF_TPC_PREPARED(self->conn, fetchall);
PyOS_snprintf(buffer, 127, "FETCH FORWARD ALL FROM \"%s\"", self->name);
if (pq_execute(self, buffer, 0) == -1) return NULL;
@@ -1112,7 +1112,6 @@ psyco_curs_scroll(cursorObject *self, PyObject *args, PyObject *kwargs)
return NULL;
EXC_IF_CURS_CLOSED(self);
- EXC_IF_ASYNC_IN_PROGRESS(self, scroll)
/* if the cursor is not named we have the full result set and we can do
our own calculations to scroll; else we just delegate the scrolling
@@ -1141,6 +1140,7 @@ psyco_curs_scroll(cursorObject *self, PyObject *args, PyObject *kwargs)
char buffer[128];
EXC_IF_NO_MARK(self);
+ EXC_IF_ASYNC_IN_PROGRESS(self, scroll)
EXC_IF_TPC_PREPARED(self->conn, scroll);
if (strcmp(mode, "absolute") == 0) {
@@ -1213,15 +1213,19 @@ static int _psyco_curs_copy_columns(PyObject *columns, char *columnlist)
/* extension: copy_from - implements COPY FROM */
#define psyco_curs_copy_from_doc \
-"copy_from(file, table, sep='\\t', null='\\N', columns=None) -- Copy table from file."
+"copy_from(file, table, sep='\\t', null='\\N', size=8192, columns=None) -- Copy table from file."
static int
_psyco_curs_has_read_check(PyObject* o, void* var)
{
if (PyObject_HasAttrString(o, "readline")
&& PyObject_HasAttrString(o, "read")) {
- /* It's OK to store a borrowed reference, because it is only held for
- * the duration of psyco_curs_copy_from. */
+ /* This routine stores a borrowed reference. Although it is only held
+ * for the duration of psyco_curs_copy_from, nested invocations of
+ * Py_BEGIN_ALLOW_THREADS could surrender control to another thread,
+ * which could invoke the garbage collector. We thus need an
+ * INCREF/DECREF pair if we store this pointer in a GC object, such as
+ * a cursorObject */
*((PyObject**)var) = o;
return 1;
}
@@ -1311,6 +1315,7 @@ psyco_curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
Dprintf("psyco_curs_copy_from: query = %s", query);
self->copysize = bufsize;
+ Py_INCREF(file);
self->copyfile = file;
if (pq_execute(self, query, 0) == 1) {
@@ -1319,6 +1324,7 @@ psyco_curs_copy_from(cursorObject *self, PyObject *args, PyObject *kwargs)
}
self->copyfile = NULL;
+ Py_DECREF(file);
exit:
PyMem_Free(quoted_delimiter);
@@ -1337,8 +1343,6 @@ static int
_psyco_curs_has_write_check(PyObject* o, void* var)
{
if (PyObject_HasAttrString(o, "write")) {
- /* It's OK to store a borrowed reference, because it is only held for
- * the duration of psyco_curs_copy_to. */
*((PyObject**)var) = o;
return 1;
}
@@ -1424,12 +1428,15 @@ psyco_curs_copy_to(cursorObject *self, PyObject *args, PyObject *kwargs)
Dprintf("psyco_curs_copy_to: query = %s", query);
self->copysize = 0;
+ Py_INCREF(file);
self->copyfile = file;
if (pq_execute(self, query, 0) == 1) {
res = Py_None;
Py_INCREF(Py_None);
}
+
+ Py_DECREF(file);
self->copyfile = NULL;
exit:
@@ -1447,7 +1454,7 @@ exit:
*/
#define psyco_curs_copy_expert_doc \
-"copy_expert(sql, file, size=None) -- Submit a user-composed COPY statement.\n" \
+"copy_expert(sql, file, size=8192) -- Submit a user-composed COPY statement.\n" \
"`file` must be an open, readable file for COPY FROM or an open, writeable\n" \
"file for COPY TO. The optional `size` argument, when specified for a COPY\n" \
"FROM statement, will be passed to file's read method to control the read\n" \
@@ -1471,18 +1478,18 @@ psyco_curs_copy_expert(cursorObject *self, PyObject *args, PyObject *kwargs)
EXC_IF_TPC_PREPARED(self->conn, copy_expert);
sql = _psyco_curs_validate_sql_basic(self, sql);
-
- /* Any failure from here forward should 'goto fail' rather than
+
+ /* Any failure from here forward should 'goto exit' rather than
'return NULL' directly. */
-
- if (sql == NULL) { goto fail; }
+
+ if (sql == NULL) { goto exit; }
/* This validation of file is rather weak, in that it doesn't enforce the
assocation between "COPY FROM" -> "read" and "COPY TO" -> "write".
However, the error handling in _pq_copy_[in|out] must be able to handle
the case where the attempt to call file.read|write fails, so no harm
done. */
-
+
if ( !PyObject_HasAttrString(file, "read")
&& !PyObject_HasAttrString(file, "write")
)
@@ -1490,26 +1497,23 @@ psyco_curs_copy_expert(cursorObject *self, PyObject *args, PyObject *kwargs)
PyErr_SetString(PyExc_TypeError, "file must be a readable file-like"
" object for COPY FROM; a writeable file-like object for COPY TO."
);
- goto fail;
+ goto exit;
}
self->copysize = bufsize;
+ Py_INCREF(file);
self->copyfile = file;
/* At this point, the SQL statement must be str, not unicode */
- if (pq_execute(self, Bytes_AS_STRING(sql), 0) != 1) { goto fail; }
-
- res = Py_None;
- Py_INCREF(res);
- goto cleanup;
- fail:
- if (res != NULL) {
- Py_DECREF(res);
- res = NULL;
+ if (pq_execute(self, Bytes_AS_STRING(sql), 0) == 1) {
+ res = Py_None;
+ Py_INCREF(res);
}
- /* Fall through to cleanup */
- cleanup:
+
self->copyfile = NULL;
+ Py_DECREF(file);
+
+exit:
Py_XDECREF(sql);
return res;
diff --git a/psycopg/green.c b/psycopg/green.c
index c9b6e07..65578f5 100644
--- a/psycopg/green.c
+++ b/psycopg/green.c
@@ -152,6 +152,20 @@ psyco_exec_green(connectionObject *conn, const char *command)
{
PGresult *result = NULL;
+ /* Check that there is a single concurrently executing query */
+ if (conn->async_cursor) {
+ PyErr_SetString(ProgrammingError,
+ "a single async query can be executed on the same connection");
+ goto end;
+ }
+ /* we don't care about which cursor is executing the query, and
+ * it may also be that no cursor is involved at all and this is
+ * an internal query. So just store anything in the async_cursor,
+ * respecting the code expecting it to be a weakref */
+ if (!(conn->async_cursor = PyWeakref_NewRef((PyObject*)conn, NULL))) {
+ goto end;
+ }
+
/* Send the query asynchronously */
if (0 == pq_send_query(conn, command)) {
goto end;
@@ -173,6 +187,7 @@ psyco_exec_green(connectionObject *conn, const char *command)
end:
conn->async_status = ASYNC_DONE;
+ Py_CLEAR(conn->async_cursor);
return result;
}
diff --git a/psycopg/lobject.h b/psycopg/lobject.h
index 84fd3b5..f52d85c 100644
--- a/psycopg/lobject.h
+++ b/psycopg/lobject.h
@@ -76,7 +76,7 @@ HIDDEN int lobject_close(lobjectObject *self);
return NULL; }
#define EXC_IF_LOBJ_LEVEL0(self) \
-if (self->conn->isolation_level == 0) { \
+if (self->conn->autocommit) { \
psyco_set_error(ProgrammingError, NULL, \
"can't use a lobject outside of transactions", NULL, NULL); \
return NULL; \
diff --git a/psycopg/lobject_int.c b/psycopg/lobject_int.c
index 3fe1f86..e6ad1b6 100644
--- a/psycopg/lobject_int.c
+++ b/psycopg/lobject_int.c
@@ -252,7 +252,7 @@ lobject_close_locked(lobjectObject *self, char **error)
break;
}
- if (self->conn->isolation_level == ISOLATION_LEVEL_AUTOCOMMIT ||
+ if (self->conn->autocommit ||
self->conn->mark != self->mark ||
self->fd == -1)
return 0;
diff --git a/psycopg/lobject_type.c b/psycopg/lobject_type.c
index ba45de2..a55272c 100644
--- a/psycopg/lobject_type.c
+++ b/psycopg/lobject_type.c
@@ -51,7 +51,7 @@ psyco_lobj_close(lobjectObject *self, PyObject *args)
closing the current transaction is equivalent to close all the
opened large objects */
if (!lobject_is_closed(self)
- && self->conn->isolation_level != ISOLATION_LEVEL_AUTOCOMMIT
+ && !self->conn->autocommit
&& self->conn->mark == self->mark)
{
Dprintf("psyco_lobj_close: closing lobject at %p", self);
@@ -331,7 +331,7 @@ lobject_setup(lobjectObject *self, connectionObject *conn,
{
Dprintf("lobject_setup: init lobject object at %p", self);
- if (conn->isolation_level == ISOLATION_LEVEL_AUTOCOMMIT) {
+ if (conn->autocommit) {
psyco_set_error(ProgrammingError, NULL,
"can't use a lobject outside of transactions", NULL, NULL);
return -1;
diff --git a/psycopg/pqpath.c b/psycopg/pqpath.c
index 6a6d05a..cee5ce4 100644
--- a/psycopg/pqpath.c
+++ b/psycopg/pqpath.c
@@ -343,12 +343,12 @@ pq_execute_command_locked(connectionObject *conn, const char *query,
*tstate = PyEval_SaveThread();
}
if (*pgres == NULL) {
- const char *msg;
-
Dprintf("pq_execute_command_locked: PQexec returned NULL");
- msg = PQerrorMessage(conn->pgconn);
- if (msg)
- *error = strdup(msg);
+ if (!PyErr_Occurred()) {
+ const char *msg;
+ msg = PQerrorMessage(conn->pgconn);
+ if (msg && *msg) { *error = strdup(msg); }
+ }
goto cleanup;
}
@@ -361,8 +361,8 @@ pq_execute_command_locked(connectionObject *conn, const char *query,
retvalue = 0;
IFCLEARPGRES(*pgres);
-
- cleanup:
+
+cleanup:
return retvalue;
}
@@ -406,23 +406,17 @@ int
pq_begin_locked(connectionObject *conn, PGresult **pgres, char **error,
PyThreadState **tstate)
{
- const char *query[] = {
- NULL,
- "BEGIN; SET TRANSACTION ISOLATION LEVEL READ COMMITTED",
- "BEGIN; SET TRANSACTION ISOLATION LEVEL SERIALIZABLE"};
int result;
- Dprintf("pq_begin_locked: pgconn = %p, isolevel = %ld, status = %d",
- conn->pgconn, conn->isolation_level, conn->status);
+ Dprintf("pq_begin_locked: pgconn = %p, autocommit = %d, status = %d",
+ conn->pgconn, conn->autocommit, conn->status);
- if (conn->isolation_level == ISOLATION_LEVEL_AUTOCOMMIT
- || conn->status != CONN_STATUS_READY) {
+ if (conn->autocommit || conn->status != CONN_STATUS_READY) {
Dprintf("pq_begin_locked: transaction in progress");
return 0;
}
- result = pq_execute_command_locked(conn, query[conn->isolation_level],
- pgres, error, tstate);
+ result = pq_execute_command_locked(conn, "BEGIN", pgres, error, tstate);
if (result == 0)
conn->status = CONN_STATUS_BEGIN;
@@ -442,11 +436,10 @@ pq_commit(connectionObject *conn)
PGresult *pgres = NULL;
char *error = NULL;
- Dprintf("pq_commit: pgconn = %p, isolevel = %ld, status = %d",
- conn->pgconn, conn->isolation_level, conn->status);
+ Dprintf("pq_commit: pgconn = %p, autocommit = %d, status = %d",
+ conn->pgconn, conn->autocommit, conn->status);
- if (conn->isolation_level == ISOLATION_LEVEL_AUTOCOMMIT
- || conn->status != CONN_STATUS_BEGIN) {
+ if (conn->autocommit || conn->status != CONN_STATUS_BEGIN) {
Dprintf("pq_commit: no transaction to commit");
return 0;
}
@@ -457,11 +450,13 @@ pq_commit(connectionObject *conn)
retvalue = pq_execute_command_locked(conn, "COMMIT", &pgres, &error, &_save);
+ Py_BLOCK_THREADS;
+ conn_notice_process(conn);
+ Py_UNBLOCK_THREADS;
+
pthread_mutex_unlock(&conn->lock);
Py_END_ALLOW_THREADS;
- conn_notice_process(conn);
-
if (retvalue < 0)
pq_complete_error(conn, &pgres, &error);
@@ -478,11 +473,10 @@ pq_abort_locked(connectionObject *conn, PGresult **pgres, char **error,
{
int retvalue = -1;
- Dprintf("pq_abort_locked: pgconn = %p, isolevel = %ld, status = %d",
- conn->pgconn, conn->isolation_level, conn->status);
+ Dprintf("pq_abort_locked: pgconn = %p, autocommit = %d, status = %d",
+ conn->pgconn, conn->autocommit, conn->status);
- if (conn->isolation_level == ISOLATION_LEVEL_AUTOCOMMIT
- || conn->status != CONN_STATUS_BEGIN) {
+ if (conn->autocommit || conn->status != CONN_STATUS_BEGIN) {
Dprintf("pq_abort_locked: no transaction to abort");
return 0;
}
@@ -507,11 +501,10 @@ pq_abort(connectionObject *conn)
PGresult *pgres = NULL;
char *error = NULL;
- Dprintf("pq_abort: pgconn = %p, isolevel = %ld, status = %d",
- conn->pgconn, conn->isolation_level, conn->status);
+ Dprintf("pq_abort: pgconn = %p, autocommit = %d, status = %d",
+ conn->pgconn, conn->autocommit, conn->status);
- if (conn->isolation_level == ISOLATION_LEVEL_AUTOCOMMIT
- || conn->status != CONN_STATUS_BEGIN) {
+ if (conn->autocommit || conn->status != CONN_STATUS_BEGIN) {
Dprintf("pq_abort: no transaction to abort");
return 0;
}
@@ -521,11 +514,13 @@ pq_abort(connectionObject *conn)
retvalue = pq_abort_locked(conn, &pgres, &error, &_save);
+ Py_BLOCK_THREADS;
+ conn_notice_process(conn);
+ Py_UNBLOCK_THREADS;
+
pthread_mutex_unlock(&conn->lock);
Py_END_ALLOW_THREADS;
- conn_notice_process(conn);
-
if (retvalue < 0)
pq_complete_error(conn, &pgres, &error);
@@ -547,13 +542,12 @@ pq_reset_locked(connectionObject *conn, PGresult **pgres, char **error,
{
int retvalue = -1;
- Dprintf("pq_reset_locked: pgconn = %p, isolevel = %ld, status = %d",
- conn->pgconn, conn->isolation_level, conn->status);
+ Dprintf("pq_reset_locked: pgconn = %p, autocommit = %d, status = %d",
+ conn->pgconn, conn->autocommit, conn->status);
conn->mark += 1;
- if (conn->isolation_level != ISOLATION_LEVEL_AUTOCOMMIT
- && conn->status == CONN_STATUS_BEGIN) {
+ if (!conn->autocommit && conn->status == CONN_STATUS_BEGIN) {
retvalue = pq_execute_command_locked(conn, "ABORT", pgres, error, tstate);
if (retvalue != 0) return retvalue;
}
@@ -578,19 +572,21 @@ pq_reset(connectionObject *conn)
PGresult *pgres = NULL;
char *error = NULL;
- Dprintf("pq_reset: pgconn = %p, isolevel = %ld, status = %d",
- conn->pgconn, conn->isolation_level, conn->status);
+ Dprintf("pq_reset: pgconn = %p, autocommit = %d, status = %d",
+ conn->pgconn, conn->autocommit, conn->status);
Py_BEGIN_ALLOW_THREADS;
pthread_mutex_lock(&conn->lock);
retvalue = pq_reset_locked(conn, &pgres, &error, &_save);
+ Py_BLOCK_THREADS;
+ conn_notice_process(conn);
+ Py_UNBLOCK_THREADS;
+
pthread_mutex_unlock(&conn->lock);
Py_END_ALLOW_THREADS;
- conn_notice_process(conn);
-
if (retvalue < 0) {
pq_complete_error(conn, &pgres, &error);
}
@@ -601,6 +597,98 @@ pq_reset(connectionObject *conn)
}
+/* Get a session parameter.
+ *
+ * The function should be called on a locked connection without
+ * holding the GIL.
+ *
+ * The result is a new string allocated with malloc.
+ */
+
+char *
+pq_get_guc_locked(
+ connectionObject *conn, const char *param,
+ PGresult **pgres, char **error, PyThreadState **tstate)
+{
+ char query[256];
+ int size;
+ char *rv = NULL;
+
+ Dprintf("pq_get_guc_locked: reading %s", param);
+
+ size = PyOS_snprintf(query, sizeof(query), "SHOW %s", param);
+ if (size >= sizeof(query)) {
+ *error = strdup("SHOW: query too large");
+ goto cleanup;
+ }
+
+ Dprintf("pq_get_guc_locked: pgconn = %p, query = %s", conn->pgconn, query);
+
+ *error = NULL;
+ if (!psyco_green()) {
+ *pgres = PQexec(conn->pgconn, query);
+ } else {
+ PyEval_RestoreThread(*tstate);
+ *pgres = psyco_exec_green(conn, query);
+ *tstate = PyEval_SaveThread();
+ }
+
+ if (*pgres == NULL) {
+ Dprintf("pq_get_guc_locked: PQexec returned NULL");
+ if (!PyErr_Occurred()) {
+ const char *msg;
+ msg = PQerrorMessage(conn->pgconn);
+ if (msg && *msg) { *error = strdup(msg); }
+ }
+ goto cleanup;
+ }
+ if (PQresultStatus(*pgres) != PGRES_TUPLES_OK) {
+ Dprintf("pq_get_guc_locked: result was not TUPLES_OK (%d)",
+ PQresultStatus(*pgres));
+ goto cleanup;
+ }
+
+ rv = strdup(PQgetvalue(*pgres, 0, 0));
+ CLEARPGRES(*pgres);
+
+cleanup:
+ return rv;
+}
+
+/* Set a session parameter.
+ *
+ * The function should be called on a locked connection without
+ * holding the GIL
+ */
+
+int
+pq_set_guc_locked(
+ connectionObject *conn, const char *param, const char *value,
+ PGresult **pgres, char **error, PyThreadState **tstate)
+{
+ char query[256];
+ int size;
+ int rv = -1;
+
+ Dprintf("pq_set_guc_locked: setting %s to %s", param, value);
+
+ if (0 == strcmp(value, "default")) {
+ size = PyOS_snprintf(query, sizeof(query),
+ "SET %s TO DEFAULT", param);
+ }
+ else {
+ size = PyOS_snprintf(query, sizeof(query),
+ "SET %s TO '%s'", param, value);
+ }
+ if (size >= sizeof(query)) {
+ *error = strdup("SET: query too large");
+ }
+
+ rv = pq_execute_command_locked(conn, query, pgres, error, tstate);
+
+ return rv;
+}
+
/* Call one of the PostgreSQL tpc-related commands.
*
* This function should only be called on a locked connection without
@@ -626,12 +714,12 @@ pq_tpc_command_locked(connectionObject *conn, const char *cmd, const char *tid,
{ goto exit; }
/* prepare the command to the server */
- buflen = 3 + strlen(cmd) + strlen(etid); /* add space, semicolon, zero */
+ buflen = 2 + strlen(cmd) + strlen(etid); /* add space, zero */
if (!(buf = PyMem_Malloc(buflen))) {
PyErr_NoMemory();
goto exit;
}
- if (0 > PyOS_snprintf(buf, buflen, "%s %s;", cmd, etid)) { goto exit; }
+ if (0 > PyOS_snprintf(buf, buflen, "%s %s", cmd, etid)) { goto exit; }
/* run the command and let it handle the error cases */
*tstate = PyEval_SaveThread();
@@ -675,12 +763,14 @@ pq_is_busy(connectionObject *conn)
res = PQisBusy(conn->pgconn);
+ Py_BLOCK_THREADS;
+ conn_notifies_process(conn);
+ conn_notice_process(conn);
+ Py_UNBLOCK_THREADS;
+
pthread_mutex_unlock(&(conn->lock));
Py_END_ALLOW_THREADS;
- conn_notice_process(conn);
- conn_notifies_process(conn);
-
return res;
}
@@ -700,9 +790,9 @@ pq_is_busy_locked(connectionObject *conn)
return -1;
}
- /* We can't call conn_notice_process/conn_notifies_process because
- they try to get the lock. We don't need anyway them because at the end of
- the loop we are in (async reading) pq_fetch will be called. */
+ /* notices and notifies will be processed at the end of the loop we are in
+ * (async reading) by pq_fetch. */
+
return PQisBusy(conn->pgconn);
}
@@ -791,6 +881,15 @@ pq_execute(cursorObject *curs, const char *query, int async)
}
return -1;
}
+
+ /* Process notifies here instead of when fetching the tuple as we are
+ * into the same critical section that received the data. Without this
+ * care, reading notifies may disrupt other thread communications.
+ * (as in ticket #55). */
+ Py_BLOCK_THREADS;
+ conn_notifies_process(curs->conn);
+ conn_notice_process(curs->conn);
+ Py_UNBLOCK_THREADS;
}
else if (async == 1) {
@@ -1379,9 +1478,6 @@ pq_fetch(cursorObject *curs)
break;
}
- conn_notice_process(curs->conn);
- conn_notifies_process(curs->conn);
-
/* error checking, close the connection if necessary (some critical errors
are not really critical, like a COPY FROM error: if that's the case we
raise the exception but we avoid to close the connection) */
diff --git a/psycopg/pqpath.h b/psycopg/pqpath.h
index 080047c..bf012ad 100644
--- a/psycopg/pqpath.h
+++ b/psycopg/pqpath.h
@@ -47,6 +47,12 @@ HIDDEN int pq_abort(connectionObject *conn);
HIDDEN int pq_reset_locked(connectionObject *conn, PGresult **pgres,
char **error, PyThreadState **tstate);
HIDDEN int pq_reset(connectionObject *conn);
+HIDDEN char *pq_get_guc_locked(connectionObject *conn, const char *param,
+ PGresult **pgres,
+ char **error, PyThreadState **tstate);
+HIDDEN int pq_set_guc_locked(connectionObject *conn, const char *param,
+ const char *value, PGresult **pgres,
+ char **error, PyThreadState **tstate);
HIDDEN int pq_tpc_command_locked(connectionObject *conn,
const char *cmd, const char *tid,
PGresult **pgres, char **error,
diff --git a/psycopg/psycopgmodule.c b/psycopg/psycopgmodule.c
index 0106941..f37a98e 100644
--- a/psycopg/psycopgmodule.c
+++ b/psycopg/psycopgmodule.c
@@ -39,6 +39,7 @@
#include "psycopg/adapter_qstring.h"
#include "psycopg/adapter_binary.h"
#include "psycopg/adapter_pboolean.h"
+#include "psycopg/adapter_pint.h"
#include "psycopg/adapter_pfloat.h"
#include "psycopg/adapter_pdecimal.h"
#include "psycopg/adapter_asis.h"
@@ -316,9 +317,9 @@ psyco_adapters_init(PyObject *mod)
microprotocols_add(&PyFloat_Type, NULL, (PyObject*)&pfloatType);
#if PY_MAJOR_VERSION < 3
- microprotocols_add(&PyInt_Type, NULL, (PyObject*)&asisType);
+ microprotocols_add(&PyInt_Type, NULL, (PyObject*)&pintType);
#endif
- microprotocols_add(&PyLong_Type, NULL, (PyObject*)&asisType);
+ microprotocols_add(&PyLong_Type, NULL, (PyObject*)&pintType);
microprotocols_add(&PyBool_Type, NULL, (PyObject*)&pbooleanType);
/* strings */
@@ -359,10 +360,16 @@ psyco_adapters_init(PyObject *mod)
#ifdef HAVE_MXDATETIME
/* as above, we use the callable objects from the psycopg module */
- call = PyMapping_GetItemString(mod, "TimestampFromMx");
- microprotocols_add(mxDateTime.DateTime_Type, NULL, call);
- call = PyMapping_GetItemString(mod, "TimeFromMx");
- microprotocols_add(mxDateTime.DateTimeDelta_Type, NULL, call);
+ if (NULL != (call = PyMapping_GetItemString(mod, "TimestampFromMx"))) {
+ microprotocols_add(mxDateTime.DateTime_Type, NULL, call);
+
+ /* if we found the above, we have this too. */
+ call = PyMapping_GetItemString(mod, "TimeFromMx");
+ microprotocols_add(mxDateTime.DateTimeDelta_Type, NULL, call);
+ }
+ else {
+ PyErr_Clear();
+ }
#endif
}
@@ -757,11 +764,13 @@ static PyMethodDef psycopgMethods[] = {
{"QuotedString", (PyCFunction)psyco_QuotedString,
METH_VARARGS, psyco_QuotedString_doc},
{"Boolean", (PyCFunction)psyco_Boolean,
- METH_VARARGS, psyco_Float_doc},
+ METH_VARARGS, psyco_Boolean_doc},
+ {"Int", (PyCFunction)psyco_Int,
+ METH_VARARGS, psyco_Int_doc},
{"Float", (PyCFunction)psyco_Float,
- METH_VARARGS, psyco_Decimal_doc},
+ METH_VARARGS, psyco_Float_doc},
{"Decimal", (PyCFunction)psyco_Decimal,
- METH_VARARGS, psyco_Boolean_doc},
+ METH_VARARGS, psyco_Decimal_doc},
{"Binary", (PyCFunction)psyco_Binary,
METH_VARARGS, psyco_Binary_doc},
{"Date", (PyCFunction)psyco_Date,
@@ -789,6 +798,7 @@ static PyMethodDef psycopgMethods[] = {
METH_VARARGS, psyco_IntervalFromPy_doc},
#ifdef HAVE_MXDATETIME
+ /* to be deleted if not found at import time */
{"DateFromMx", (PyCFunction)psyco_DateFromMx,
METH_VARARGS, psyco_DateFromMx_doc},
{"TimeFromMx", (PyCFunction)psyco_TimeFromMx,
@@ -848,6 +858,7 @@ INIT_MODULE(_psycopg)(void)
Py_TYPE(&binaryType) = &PyType_Type;
Py_TYPE(&isqlquoteType) = &PyType_Type;
Py_TYPE(&pbooleanType) = &PyType_Type;
+ Py_TYPE(&pintType) = &PyType_Type;
Py_TYPE(&pfloatType) = &PyType_Type;
Py_TYPE(&pdecimalType) = &PyType_Type;
Py_TYPE(&asisType) = &PyType_Type;
@@ -863,6 +874,7 @@ INIT_MODULE(_psycopg)(void)
if (PyType_Ready(&binaryType) == -1) goto exit;
if (PyType_Ready(&isqlquoteType) == -1) goto exit;
if (PyType_Ready(&pbooleanType) == -1) goto exit;
+ if (PyType_Ready(&pintType) == -1) goto exit;
if (PyType_Ready(&pfloatType) == -1) goto exit;
if (PyType_Ready(&pdecimalType) == -1) goto exit;
if (PyType_Ready(&asisType) == -1) goto exit;
@@ -880,12 +892,16 @@ INIT_MODULE(_psycopg)(void)
#ifdef HAVE_MXDATETIME
Py_TYPE(&mxdatetimeType) = &PyType_Type;
if (PyType_Ready(&mxdatetimeType) == -1) goto exit;
- if (mxDateTime_ImportModuleAndAPI() != 0) {
- Dprintf("initpsycopg: why marc hide mx.DateTime again?!");
- PyErr_SetString(PyExc_ImportError, "can't import mx.DateTime module");
+ if (0 != mxDateTime_ImportModuleAndAPI()) {
+ PyErr_Clear();
+
+ /* only fail if the mx typacaster should have been the default */
+#ifdef PSYCOPG_DEFAULT_MXDATETIME
+ PyErr_SetString(PyExc_ImportError,
+ "can't import mx.DateTime module (requested as default adapter)");
goto exit;
+#endif
}
- if (psyco_adapter_mxdatetime_init()) { goto exit; }
#endif
/* import python builtin datetime module, if available */
@@ -962,6 +978,16 @@ INIT_MODULE(_psycopg)(void)
/* encodings dictionary in module dictionary */
PyModule_AddObject(module, "encodings", psycoEncodings);
+#ifdef HAVE_MXDATETIME
+ /* If we can't find mx.DateTime objects at runtime,
+ * remove them from the module (and, as consequence, from the adapters). */
+ if (0 != psyco_adapter_mxdatetime_init()) {
+ PyDict_DelItemString(dict, "DateFromMx");
+ PyDict_DelItemString(dict, "TimeFromMx");
+ PyDict_DelItemString(dict, "TimestampFromMx");
+ PyDict_DelItemString(dict, "IntervalFromMx");
+ }
+#endif
/* initialize default set of typecasters */
typecast_init(dict);
@@ -978,6 +1004,7 @@ INIT_MODULE(_psycopg)(void)
binaryType.tp_alloc = PyType_GenericAlloc;
isqlquoteType.tp_alloc = PyType_GenericAlloc;
pbooleanType.tp_alloc = PyType_GenericAlloc;
+ pintType.tp_alloc = PyType_GenericAlloc;
pfloatType.tp_alloc = PyType_GenericAlloc;
pdecimalType.tp_alloc = PyType_GenericAlloc;
connectionType.tp_alloc = PyType_GenericAlloc;
@@ -993,7 +1020,6 @@ INIT_MODULE(_psycopg)(void)
lobjectType.tp_alloc = PyType_GenericAlloc;
#endif
-
#ifdef HAVE_MXDATETIME
mxdatetimeType.tp_alloc = PyType_GenericAlloc;
#endif
diff --git a/psycopg/python.h b/psycopg/python.h
index fed0303..6d87fa5 100644
--- a/psycopg/python.h
+++ b/psycopg/python.h
@@ -105,6 +105,7 @@ typedef unsigned long Py_uhash_t;
#if PY_MAJOR_VERSION > 2
#define PyInt_Type PyLong_Type
+#define PyInt_Check PyLong_Check
#define PyInt_AsLong PyLong_AsLong
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSsize_t PyLong_FromSsize_t
@@ -129,6 +130,7 @@ typedef unsigned long Py_uhash_t;
#define Bytes_FromString PyString_FromString
#define Bytes_FromStringAndSize PyString_FromStringAndSize
#define Bytes_FromFormat PyString_FromFormat
+#define Bytes_ConcatAndDel PyString_ConcatAndDel
#define _Bytes_Resize _PyString_Resize
#else
@@ -144,6 +146,7 @@ typedef unsigned long Py_uhash_t;
#define Bytes_FromString PyBytes_FromString
#define Bytes_FromStringAndSize PyBytes_FromStringAndSize
#define Bytes_FromFormat PyBytes_FromFormat
+#define Bytes_ConcatAndDel PyBytes_ConcatAndDel
#define _Bytes_Resize _PyBytes_Resize
#endif
diff --git a/psycopg/typecast.c b/psycopg/typecast.c
index ba3871e..56a203d 100644
--- a/psycopg/typecast.c
+++ b/psycopg/typecast.c
@@ -292,13 +292,14 @@ typecast_init(PyObject *dict)
/* register the date/time typecasters with their original names */
#ifdef HAVE_MXDATETIME
- if (psyco_typecast_mxdatetime_init()) { return -1; }
- for (i = 0; typecast_mxdatetime[i].name != NULL; i++) {
- typecastObject *t;
- Dprintf("typecast_init: initializing %s", typecast_mxdatetime[i].name);
- t = (typecastObject *)typecast_from_c(&(typecast_mxdatetime[i]), dict);
- if (t == NULL) return -1;
- PyDict_SetItem(dict, t->name, (PyObject *)t);
+ if (0 == psyco_typecast_mxdatetime_init()) {
+ for (i = 0; typecast_mxdatetime[i].name != NULL; i++) {
+ typecastObject *t;
+ Dprintf("typecast_init: initializing %s", typecast_mxdatetime[i].name);
+ t = (typecastObject *)typecast_from_c(&(typecast_mxdatetime[i]), dict);
+ if (t == NULL) return -1;
+ PyDict_SetItem(dict, t->name, (PyObject *)t);
+ }
}
#endif
diff --git a/psycopg/typecast_mxdatetime.c b/psycopg/typecast_mxdatetime.c
index a3a95fa..e61224d 100644
--- a/psycopg/typecast_mxdatetime.c
+++ b/psycopg/typecast_mxdatetime.c
@@ -25,13 +25,17 @@
#include "mxDateTime.h"
+
+/* Return 0 on success, -1 on failure, but don't set an exception */
+
static int
psyco_typecast_mxdatetime_init(void)
{
Dprintf("psyco_typecast_mxdatetime_init: mx.DateTime init");
- if(mxDateTime_ImportModuleAndAPI()) {
- PyErr_SetString(PyExc_ImportError, "mx.DateTime initialization failed");
+ if (mxDateTime_ImportModuleAndAPI()) {
+ Dprintf("psyco_typecast_mxdatetime_init: mx.DateTime initialization failed");
+ PyErr_Clear();
return -1;
}
return 0;
diff --git a/psycopg/xid_type.c b/psycopg/xid_type.c
index 9e95fd1..4de46b4 100644
--- a/psycopg/xid_type.c
+++ b/psycopg/xid_type.c
@@ -663,7 +663,7 @@ xid_recover(PyObject *conn)
/* curs.execute(...) */
if (!(tmp = PyObject_CallMethod(curs, "execute", "s",
- "SELECT gid, prepared, owner, database FROM pg_prepared_xacts;")))
+ "SELECT gid, prepared, owner, database FROM pg_prepared_xacts")))
{
goto exit;
}
diff --git a/scripts/ticket58.py b/scripts/ticket58.py
new file mode 100644
index 0000000..95520c1
--- /dev/null
+++ b/scripts/ticket58.py
@@ -0,0 +1,75 @@
+"""
+A script to reproduce the race condition described in ticket #58
+
+from https://bugzilla.redhat.com/show_bug.cgi?id=711095
+
+Results in the error:
+
+ python: Modules/gcmodule.c:277: visit_decref: Assertion `gc->gc.gc_refs != 0'
+ failed.
+
+on unpatched library.
+"""
+
+import threading
+import gc
+import time
+
+import psycopg2
+from StringIO import StringIO
+
+done = 0
+
+class GCThread(threading.Thread):
+ # A thread that sits in an infinite loop, forcing the garbage collector
+ # to run
+ def run(self):
+ global done
+ while not done:
+ gc.collect()
+ time.sleep(0.1) # give the other thread a chance to run
+
+gc_thread = GCThread()
+
+
+# This assumes a pre-existing db named "test", with:
+# "CREATE TABLE test (id serial PRIMARY KEY, num integer, data varchar);"
+
+conn = psycopg2.connect("dbname=test user=postgres")
+cur = conn.cursor()
+
+# Start the other thread, running the GC regularly
+gc_thread.start()
+
+# Now do lots of "cursor.copy_from" calls:
+print "copy_from"
+for i in range(1000):
+ f = StringIO("42\tfoo\n74\tbar\n")
+ cur.copy_from(f, 'test', columns=('num', 'data'))
+ # Assuming the other thread gets a chance to run during this call, expect a
+ # build of python (with assertions enabled) to bail out here with:
+ # python: Modules/gcmodule.c:277: visit_decref: Assertion `gc->gc.gc_refs != 0' failed.
+
+# Also exercise the copy_to code path
+print "copy_to"
+cur.execute("truncate test")
+f = StringIO("42\tfoo\n74\tbar\n")
+cur.copy_from(f, 'test', columns=('num', 'data'))
+for i in range(1000):
+ f = StringIO()
+ cur.copy_to(f, 'test', columns=('num', 'data'))
+
+# And copy_expert too
+print "copy_expert"
+cur.execute("truncate test")
+for i in range(1000):
+ f = StringIO("42\tfoo\n74\tbar\n")
+ cur.copy_expert("copy test to stdout", f)
+
+# Terminate the GC thread's loop:
+done = 1
+
+cur.close()
+conn.close()
+
+
diff --git a/setup.py b/setup.py
index 9ae8117..e03876a 100644
--- a/setup.py
+++ b/setup.py
@@ -45,21 +45,15 @@ Operating System :: Unix
# Note: The setup.py must be compatible with both Python 2 and 3
import os
-import os.path
import sys
import re
import subprocess
from distutils.core import setup, Extension
-from distutils.errors import DistutilsFileError
from distutils.command.build_ext import build_ext
from distutils.sysconfig import get_python_inc
from distutils.ccompiler import get_default_compiler
-from distutils.dep_util import newer_group
from distutils.util import get_platform
-try:
- from distutils.msvc9compiler import MSVCCompiler
-except ImportError:
- MSVCCompiler = None
+
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
@@ -79,27 +73,135 @@ except ImportError:
# Take a look at http://www.python.org/dev/peps/pep-0386/
# for a consistent versioning pattern.
-PSYCOPG_VERSION = '2.4.1'
+PSYCOPG_VERSION = '2.4.2'
version_flags = ['dt', 'dec']
PLATFORM_IS_WINDOWS = sys.platform.lower().startswith('win')
-def get_pg_config(kind, pg_config):
- try:
- p = subprocess.Popen([pg_config, "--" + kind],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- except OSError:
- raise Warning("Unable to find 'pg_config' file in '%s'" % pg_config)
- p.stdin.close()
- r = p.stdout.readline().strip()
- if not r:
- raise Warning(p.stderr.readline())
- if not isinstance(r, str):
- r = r.decode('ascii')
- return r
+
+class PostgresConfig:
+ def __init__(self, build_ext):
+ self.build_ext = build_ext
+ self.pg_config_exe = self.build_ext.pg_config
+ if not self.pg_config_exe:
+ self.pg_config_exe = self.autodetect_pg_config_path()
+ if self.pg_config_exe is None:
+ sys.stderr.write("""\
+Error: pg_config executable not found.
+
+Please add the directory containing pg_config to the PATH
+or specify the full executable path with the option:
+
+ python setup.py build_ext --pg-config /path/to/pg_config build ...
+
+or with the pg_config option in 'setup.cfg'.
+""")
+ sys.exit(1)
+
+ def query(self, attr_name):
+ """Spawn the pg_config executable, querying for the given config
+ name, and return the printed value, sanitized. """
+ try:
+ pg_config_process = subprocess.Popen(
+ [self.pg_config_exe, "--" + attr_name],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ except OSError:
+ raise Warning("Unable to find 'pg_config' file in '%s'" %
+ self.pg_config_exe)
+ pg_config_process.stdin.close()
+ result = pg_config_process.stdout.readline().strip()
+ if not result:
+ raise Warning(pg_config_process.stderr.readline())
+ if not isinstance(result, str):
+ result = result.decode('ascii')
+ return result
+
+ def find_on_path(self, exename, path_directories=None):
+ if not path_directories:
+ path_directories = os.environ['PATH'].split(os.pathsep)
+ for dir_name in path_directories:
+ fullpath = os.path.join(dir_name, exename)
+ if os.path.isfile(fullpath):
+ return fullpath
+ return None
+
+ def autodetect_pg_config_path(self):
+ """Find and return the path to the pg_config executable."""
+ if PLATFORM_IS_WINDOWS:
+ return self.autodetect_pg_config_path_windows()
+ else:
+ return self.find_on_path('pg_config')
+
+ def autodetect_pg_config_path_windows(self):
+ """Attempt several different ways of finding the pg_config
+ executable on Windows, and return its full path, if found."""
+
+ # This code only runs if they have not specified a pg_config option
+ # in the config file or via the commandline.
+
+ # First, check for pg_config.exe on the PATH, and use that if found.
+ pg_config_exe = self.find_on_path('pg_config.exe')
+ if pg_config_exe:
+ return pg_config_exe
+
+ # Now, try looking in the Windows Registry to find a PostgreSQL
+ # installation, and infer the path from that.
+ pg_config_exe = self._get_pg_config_from_registry()
+ if pg_config_exe:
+ return pg_config_exe
+
+ return None
+
+ def _get_pg_config_from_registry(self):
+ try:
+ import winreg
+ except ImportError:
+ import _winreg as winreg
+
+ reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
+ try:
+ pg_inst_list_key = winreg.OpenKey(reg,
+ 'SOFTWARE\\PostgreSQL\\Installations')
+ except EnvironmentError:
+ # No PostgreSQL installation, as best as we can tell.
+ return None
+
+ try:
+ # Determine the name of the first subkey, if any:
+ try:
+ first_sub_key_name = winreg.EnumKey(pg_inst_list_key, 0)
+ except EnvironmentError:
+ return None
+
+ pg_first_inst_key = winreg.OpenKey(reg,
+ 'SOFTWARE\\PostgreSQL\\Installations\\'
+ + first_sub_key_name)
+ try:
+ pg_inst_base_dir = winreg.QueryValueEx(
+ pg_first_inst_key, 'Base Directory')[0]
+ finally:
+ winreg.CloseKey(pg_first_inst_key)
+
+ finally:
+ winreg.CloseKey(pg_inst_list_key)
+
+ pg_config_path = os.path.join(
+ pg_inst_base_dir, 'bin', 'pg_config.exe')
+ if not os.path.exists(pg_config_path):
+ return None
+
+ # Support unicode paths, if this version of Python provides the
+ # necessary infrastructure:
+ if sys.version_info[0] < 3 \
+ and hasattr(sys, 'getfilesystemencoding'):
+ pg_config_path = pg_config_path.encode(
+ sys.getfilesystemencoding())
+
+ return pg_config_path
+
class psycopg_build_ext(build_ext):
"""Conditionally complement the setup.cfg options file.
@@ -126,6 +228,9 @@ class psycopg_build_ext(build_ext):
boolean_options = build_ext.boolean_options[:]
boolean_options.extend(('use-pydatetime', 'have-ssl', 'static-libpq'))
+ def __init__(self, *args, **kwargs):
+ build_ext.__init__(self, *args, **kwargs)
+
def initialize_options(self):
build_ext.initialize_options(self)
self.use_pg_dll = 1
@@ -136,7 +241,13 @@ class psycopg_build_ext(build_ext):
self.static_libpq = static_libpq
self.pg_config = None
- def get_compiler(self):
+ def compiler_is_msvc(self):
+ return self.get_compiler_name().lower().startswith('msvc')
+
+ def compiler_is_mingw(self):
+ return self.get_compiler_name().lower().startswith('mingw')
+
+ def get_compiler_name(self):
"""Return the name of the C compiler used to compile extensions.
If a compiler was not explicitly set (on the command line, for
@@ -153,43 +264,40 @@ class psycopg_build_ext(build_ext):
name = get_default_compiler()
return name
- def get_pg_config(self, kind):
- return get_pg_config(kind, self.pg_config)
-
- def get_export_symbols(self, ext):
+ def get_export_symbols(self, extension):
# Fix MSVC seeing two of the same export symbols.
- if self.get_compiler().lower().startswith('msvc'):
+ if self.compiler_is_msvc():
return []
else:
- return build_ext.get_export_symbols(self, ext)
+ return build_ext.get_export_symbols(self, extension)
- def build_extension(self, ext):
- build_ext.build_extension(self, ext)
+ def build_extension(self, extension):
+ build_ext.build_extension(self, extension)
+ sysVer = sys.version_info[:2]
# For Python versions that use MSVC compiler 2008, re-insert the
- # manifest into the resulting .pyd file.
- if MSVCCompiler and isinstance(self.compiler, MSVCCompiler):
+ # manifest into the resulting .pyd file.
+ if self.compiler_is_msvc() and sysVer not in ((2, 4), (2, 5)):
platform = get_platform()
# Default to the x86 manifest
manifest = '_psycopg.vc9.x86.manifest'
if platform == 'win-amd64':
manifest = '_psycopg.vc9.amd64.manifest'
- self.compiler.spawn(['mt.exe', '-nologo', '-manifest',
- os.path.join('psycopg', manifest),
- '-outputresource:%s;2' % (os.path.join(self.build_lib, 'psycopg2', '_psycopg.pyd'))])
+ self.compiler.spawn(
+ ['mt.exe', '-nologo', '-manifest',
+ os.path.join('psycopg', manifest),
+ '-outputresource:%s;2' % (
+ os.path.join(self.build_lib,
+ 'psycopg2', '_psycopg.pyd'))])
def finalize_win32(self):
"""Finalize build system configuration on win32 platform."""
- import struct
sysVer = sys.version_info[:2]
# Add compiler-specific arguments:
extra_compiler_args = []
- compiler_name = self.get_compiler().lower()
- compiler_is_msvc = compiler_name.startswith('msvc')
- compiler_is_mingw = compiler_name.startswith('mingw')
- if compiler_is_mingw:
+ if self.compiler_is_mingw():
# Default MinGW compilation of Python extensions on Windows uses
# only -O:
extra_compiler_args.append('-O3')
@@ -201,23 +309,23 @@ class psycopg_build_ext(build_ext):
extra_compiler_args.append('-fno-strict-aliasing')
# Force correct C runtime library linkage:
- if sysVer <= (2,3):
+ if sysVer <= (2, 3):
# Yes: 'msvcr60', rather than 'msvcrt', is the correct value
# on the line below:
self.libraries.append('msvcr60')
- elif sysVer in ((2,4), (2,5)):
+ elif sysVer in ((2, 4), (2, 5)):
self.libraries.append('msvcr71')
# Beyond Python 2.5, we take our chances on the default C runtime
# library, because we don't know what compiler those future
# versions of Python will use.
- for exten in ext: # ext is a global list of Extension objects
- exten.extra_compile_args.extend(extra_compiler_args)
+ for extension in ext: # ext is a global list of Extension objects
+ extension.extra_compile_args.extend(extra_compiler_args)
# End of add-compiler-specific arguments section.
self.libraries.append("ws2_32")
self.libraries.append("advapi32")
- if compiler_is_msvc:
+ if self.compiler_is_msvc():
# MSVC requires an explicit "libpq"
self.libraries.remove("pq")
self.libraries.append("secur32")
@@ -242,48 +350,39 @@ class psycopg_build_ext(build_ext):
def finalize_linux2(self):
"""Finalize build system configuration on GNU/Linux platform."""
# tell piro that GCC is fine and dandy, but not so MS compilers
- for ext in self.extensions:
- ext.extra_compile_args.append('-Wdeclaration-after-statement')
+ for extension in self.extensions:
+ extension.extra_compile_args.append(
+ '-Wdeclaration-after-statement')
def finalize_options(self):
"""Complete the build system configuation."""
build_ext.finalize_options(self)
- if self.pg_config is None:
- self.pg_config = self.autodetect_pg_config_path()
- if self.pg_config is None:
- sys.stderr.write("""\
-Error: pg_config executable not found.
-
-Please add the directory containing pg_config to the PATH
-or specify the full executable path with the option:
-
- python setup.py build_ext --pg-config /path/to/pg_config build ...
-or with the pg_config option in 'setup.cfg'.
-""")
- sys.exit(1)
+ pg_config_helper = PostgresConfig(self)
self.include_dirs.append(".")
if self.static_libpq:
- if not self.link_objects: self.link_objects = []
+ if not hasattr(self, 'link_objects'):
+ self.link_objects = []
self.link_objects.append(
- os.path.join(self.get_pg_config("libdir"), "libpq.a"))
+ os.path.join(pg_config_helper.query("libdir"), "libpq.a"))
else:
self.libraries.append("pq")
try:
- self.library_dirs.append(self.get_pg_config("libdir"))
- self.include_dirs.append(self.get_pg_config("includedir"))
- self.include_dirs.append(self.get_pg_config("includedir-server"))
+ self.library_dirs.append(pg_config_helper.query("libdir"))
+ self.include_dirs.append(pg_config_helper.query("includedir"))
+ self.include_dirs.append(pg_config_helper.query("includedir-server"))
try:
# Here we take a conservative approach: we suppose that
# *at least* PostgreSQL 7.4 is available (this is the only
# 7.x series supported by psycopg 2)
- pgversion = self.get_pg_config("version").split()[1]
+ pgversion = pg_config_helper.query("version").split()[1]
except:
pgversion = "7.4.0"
- verre = re.compile(r"(\d+)\.(\d+)(?:(?:\.(\d+))|(devel|(alpha|beta|rc)\d+))")
+ verre = re.compile(
+ r"(\d+)\.(\d+)(?:(?:\.(\d+))|(devel|(alpha|beta|rc)\d+))")
m = verre.match(pgversion)
if m:
pgmajor, pgminor, pgpatch = m.group(1, 2, 3)
@@ -298,108 +397,21 @@ or with the pg_config option in 'setup.cfg'.
define_macros.append(("PG_VERSION_HEX", "0x%02X%02X%02X" %
(int(pgmajor), int(pgminor), int(pgpatch))))
except Warning:
- w = sys.exc_info()[1] # work around py 2/3 different syntax
+ w = sys.exc_info()[1] # work around py 2/3 different syntax
sys.stderr.write("Error: %s\n" % w)
sys.exit(1)
if hasattr(self, "finalize_" + sys.platform):
getattr(self, "finalize_" + sys.platform)()
- def autodetect_pg_config_path(self):
- if PLATFORM_IS_WINDOWS:
- return self.autodetect_pg_config_path_windows()
- else:
- return self.autodetect_pg_config_path_posix()
-
- def autodetect_pg_config_path_posix(self):
- exename = 'pg_config'
- for dir in os.environ['PATH'].split(os.pathsep):
- fn = os.path.join(dir, exename)
- if os.path.isfile(fn):
- return fn
-
- def autodetect_pg_config_path_windows(self):
- # Find the first PostgreSQL installation listed in the registry and
- # return the full path to its pg_config utility.
- #
- # This autodetection is performed *only* if the following conditions
- # hold:
- #
- # 1) The pg_config utility is not already available on the PATH:
- if os.popen('pg_config').close() is None: # .close()->None == success
- return None
- # 2) The user has not specified any of the following settings in
- # setup.cfg:
- # - pg_config
- # - include_dirs
- # - library_dirs
- for settingName in ('pg_config', 'include_dirs', 'library_dirs'):
- try:
- val = parser.get('build_ext', settingName)
- except configparser.NoOptionError:
- pass
- else:
- if val.strip() != '':
- return None
- # end of guard conditions
-
- try:
- import winreg
- except ImportError:
- import _winreg as winreg
-
- pg_inst_base_dir = None
- pg_config_path = None
-
- reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
- try:
- pg_inst_list_key = winreg.OpenKey(reg,
- 'SOFTWARE\\PostgreSQL\\Installations'
- )
- except EnvironmentError:
- pg_inst_list_key = None
-
- if pg_inst_list_key is not None:
- try:
- # Determine the name of the first subkey, if any:
- try:
- first_sub_key_name = winreg.EnumKey(pg_inst_list_key, 0)
- except EnvironmentError:
- first_sub_key_name = None
-
- if first_sub_key_name is not None:
- pg_first_inst_key = winreg.OpenKey(reg,
- 'SOFTWARE\\PostgreSQL\\Installations\\'
- + first_sub_key_name
- )
- try:
- pg_inst_base_dir = winreg.QueryValueEx(
- pg_first_inst_key, 'Base Directory'
- )[0]
- finally:
- winreg.CloseKey(pg_first_inst_key)
- finally:
- winreg.CloseKey(pg_inst_list_key)
-
- if pg_inst_base_dir and os.path.exists(pg_inst_base_dir):
- pg_config_path = os.path.join(pg_inst_base_dir, 'bin',
- 'pg_config.exe'
- )
- # Support unicode paths, if this version of Python provides the
- # necessary infrastructure:
- if hasattr(sys, 'getfilesystemencoding'):
- pg_config_path = pg_config_path.encode(
- sys.getfilesystemencoding()
- )
-
- return pg_config_path
# let's start with macro definitions (the ones not already in setup.cfg)
define_macros = []
include_dirs = []
# gather information to build the extension module
-ext = [] ; data_files = []
+ext = []
+data_files = []
# sources
@@ -414,7 +426,7 @@ sources = [
'adapter_asis.c', 'adapter_binary.c', 'adapter_datetime.c',
'adapter_list.c', 'adapter_pboolean.c', 'adapter_pdecimal.c',
- 'adapter_pfloat.c', 'adapter_qstring.c',
+ 'adapter_pint.c', 'adapter_pfloat.c', 'adapter_qstring.c',
'microprotocols.c', 'microprotocols_proto.c',
'typecast.c',
]
@@ -427,7 +439,7 @@ depends = [
'adapter_asis.h', 'adapter_binary.h', 'adapter_datetime.h',
'adapter_list.h', 'adapter_pboolean.h', 'adapter_pdecimal.h',
- 'adapter_pfloat.h', 'adapter_qstring.h',
+ 'adapter_pint.h', 'adapter_pfloat.h', 'adapter_qstring.h',
'microprotocols.h', 'microprotocols_proto.h',
'typecast.h', 'typecast_binary.h',
@@ -450,8 +462,9 @@ if parser.has_option('build_ext', 'mx_include_dir'):
else:
mxincludedir = os.path.join(get_python_inc(plat_specific=1), "mx")
if os.path.exists(mxincludedir):
+ # Build the support for mx: we will check at runtime if it can be imported
include_dirs.append(mxincludedir)
- define_macros.append(('HAVE_MXDATETIME','1'))
+ define_macros.append(('HAVE_MXDATETIME', '1'))
sources.append('adapter_mxdatetime.c')
depends.extend(['adapter_mxdatetime.h', 'typecast_mxdatetime.c'])
have_mxdatetime = True
@@ -459,18 +472,21 @@ if os.path.exists(mxincludedir):
# now decide which package will be the default for date/time typecasts
if have_pydatetime and (use_pydatetime or not have_mxdatetime):
- define_macros.append(('PSYCOPG_DEFAULT_PYDATETIME','1'))
+ define_macros.append(('PSYCOPG_DEFAULT_PYDATETIME', '1'))
elif have_mxdatetime:
- define_macros.append(('PSYCOPG_DEFAULT_MXDATETIME','1'))
+ define_macros.append(('PSYCOPG_DEFAULT_MXDATETIME', '1'))
else:
- def e(msg):
- sys.stderr.write("error: " + msg + "\n")
- e("psycopg requires a datetime module:")
- e(" mx.DateTime module not found")
- e(" python datetime module not found")
- e("Note that psycopg needs the module headers and not just the module")
- e("itself. If you installed Python or mx.DateTime from a binary package")
- e("you probably need to install its companion -dev or -devel package.")
+ error_message = """\
+psycopg requires a datetime module:
+ mx.DateTime module not found
+ python datetime module not found
+
+Note that psycopg needs the module headers and not just the module
+itself. If you installed Python or mx.DateTime from a binary package
+you probably need to install its companion -dev or -devel package."""
+
+ for line in error_message.split("\n"):
+ sys.stderr.write("error: " + line)
sys.exit(1)
# generate a nice version string to avoid confusion when users report bugs
@@ -484,9 +500,9 @@ else:
PSYCOPG_VERSION_EX = PSYCOPG_VERSION
if not PLATFORM_IS_WINDOWS:
- define_macros.append(('PSYCOPG_VERSION', '"'+PSYCOPG_VERSION_EX+'"'))
+ define_macros.append(('PSYCOPG_VERSION', '"' + PSYCOPG_VERSION_EX + '"'))
else:
- define_macros.append(('PSYCOPG_VERSION', '\\"'+PSYCOPG_VERSION_EX+'\\"'))
+ define_macros.append(('PSYCOPG_VERSION', '\\"' + PSYCOPG_VERSION_EX + '\\"'))
if parser.has_option('build_ext', 'have_ssl'):
have_ssl = int(parser.get('build_ext', 'have_ssl'))
@@ -524,17 +540,16 @@ setup(name="psycopg2",
author="Federico Di Gregorio",
author_email="fog@initd.org",
url="http://initd.org/psycopg/",
- download_url = download_url,
+ download_url=download_url,
license="GPL with exceptions or ZPL",
- platforms = ["any"],
+ platforms=["any"],
description=__doc__.split("\n")[0],
long_description="\n".join(__doc__.split("\n")[2:]),
classifiers=[x for x in classifiers.split("\n") if x],
data_files=data_files,
- package_dir={'psycopg2':'lib', 'psycopg2.tests': 'tests'},
+ package_dir={'psycopg2': 'lib', 'psycopg2.tests': 'tests'},
packages=['psycopg2', 'psycopg2.tests'],
cmdclass={
'build_ext': psycopg_build_ext,
'build_py': build_py, },
ext_modules=ext)
-
diff --git a/tests/test_connection.py b/tests/test_connection.py
index d9da471..ce0bf7e 100755
--- a/tests/test_connection.py
+++ b/tests/test_connection.py
@@ -25,7 +25,8 @@
import os
import time
import threading
-from testutils import unittest, decorate_all_tests, skip_before_postgres
+from testutils import unittest, decorate_all_tests
+from testutils import skip_before_postgres, skip_after_postgres
from operator import attrgetter
import psycopg2
@@ -201,24 +202,39 @@ class IsolationLevelsTestCase(unittest.TestCase):
def test_set_isolation_level(self):
conn = self.connect()
+ curs = conn.cursor()
- conn.set_isolation_level(
- psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
- self.assertEqual(conn.isolation_level,
- psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ levels = [
+ (None, psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT),
+ ('read uncommitted', psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED),
+ ('read committed', psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED),
+ ('repeatable read', psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ),
+ ('serializable', psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE),
+ ]
+ for name, level in levels:
+ conn.set_isolation_level(level)
- conn.set_isolation_level(
- psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
- self.assertEqual(conn.isolation_level,
- psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
+ # the only values available on prehistoric PG versions
+ if conn.server_version < 80000:
+ if level in (
+ psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
+ psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ):
+ name, level = levels[levels.index((name, level)) + 1]
- conn.set_isolation_level(
- psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
- self.assertEqual(conn.isolation_level,
- psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
+ self.assertEqual(conn.isolation_level, level)
+
+ curs.execute('show transaction_isolation;')
+ got_name = curs.fetchone()[0]
+
+ if name is None:
+ curs.execute('show default_transaction_isolation;')
+ name = curs.fetchone()[0]
+
+ self.assertEqual(name, got_name)
+ conn.commit()
self.assertRaises(ValueError, conn.set_isolation_level, -1)
- self.assertRaises(ValueError, conn.set_isolation_level, 3)
+ self.assertRaises(ValueError, conn.set_isolation_level, 5)
def test_set_isolation_level_abort(self):
conn = self.connect()
@@ -701,6 +717,241 @@ from testutils import skip_if_tpc_disabled
decorate_all_tests(ConnectionTwoPhaseTests, skip_if_tpc_disabled)
+class TransactionControlTests(unittest.TestCase):
+ def setUp(self):
+ self.conn = psycopg2.connect(dsn)
+
+ def tearDown(self):
+ if not self.conn.closed:
+ self.conn.close()
+
+ def test_not_in_transaction(self):
+ cur = self.conn.cursor()
+ cur.execute("select 1")
+ self.assertRaises(psycopg2.ProgrammingError,
+ self.conn.set_session,
+ psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
+
+ def test_set_isolation_level(self):
+ cur = self.conn.cursor()
+ self.conn.set_session(
+ psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
+ cur.execute("SHOW default_transaction_isolation;")
+ self.assertEqual(cur.fetchone()[0], 'serializable')
+ self.conn.rollback()
+
+ self.conn.set_session(
+ psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ)
+ cur.execute("SHOW default_transaction_isolation;")
+ if self.conn.server_version > 80000:
+ self.assertEqual(cur.fetchone()[0], 'repeatable read')
+ else:
+ self.assertEqual(cur.fetchone()[0], 'serializable')
+ self.conn.rollback()
+
+ self.conn.set_session(
+ isolation_level=psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
+ cur.execute("SHOW default_transaction_isolation;")
+ self.assertEqual(cur.fetchone()[0], 'read committed')
+ self.conn.rollback()
+
+ self.conn.set_session(
+ isolation_level=psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED)
+ cur.execute("SHOW default_transaction_isolation;")
+ if self.conn.server_version > 80000:
+ self.assertEqual(cur.fetchone()[0], 'read uncommitted')
+ else:
+ self.assertEqual(cur.fetchone()[0], 'read committed')
+ self.conn.rollback()
+
+ def test_set_isolation_level_str(self):
+ cur = self.conn.cursor()
+ self.conn.set_session("serializable")
+ cur.execute("SHOW default_transaction_isolation;")
+ self.assertEqual(cur.fetchone()[0], 'serializable')
+ self.conn.rollback()
+
+ self.conn.set_session("repeatable read")
+ cur.execute("SHOW default_transaction_isolation;")
+ if self.conn.server_version > 80000:
+ self.assertEqual(cur.fetchone()[0], 'repeatable read')
+ else:
+ self.assertEqual(cur.fetchone()[0], 'serializable')
+ self.conn.rollback()
+
+ self.conn.set_session("read committed")
+ cur.execute("SHOW default_transaction_isolation;")
+ self.assertEqual(cur.fetchone()[0], 'read committed')
+ self.conn.rollback()
+
+ self.conn.set_session("read uncommitted")
+ cur.execute("SHOW default_transaction_isolation;")
+ if self.conn.server_version > 80000:
+ self.assertEqual(cur.fetchone()[0], 'read uncommitted')
+ else:
+ self.assertEqual(cur.fetchone()[0], 'read committed')
+ self.conn.rollback()
+
+ def test_bad_isolation_level(self):
+ self.assertRaises(ValueError, self.conn.set_session, 0)
+ self.assertRaises(ValueError, self.conn.set_session, 5)
+ self.assertRaises(ValueError, self.conn.set_session, 'whatever')
+
+ def test_set_read_only(self):
+ cur = self.conn.cursor()
+ self.conn.set_session(readonly=True)
+ cur.execute("SHOW default_transaction_read_only;")
+ self.assertEqual(cur.fetchone()[0], 'on')
+ self.conn.rollback()
+ cur.execute("SHOW default_transaction_read_only;")
+ self.assertEqual(cur.fetchone()[0], 'on')
+ self.conn.rollback()
+
+ cur = self.conn.cursor()
+ self.conn.set_session(readonly=None)
+ cur.execute("SHOW default_transaction_read_only;")
+ self.assertEqual(cur.fetchone()[0], 'on')
+ self.conn.rollback()
+
+ self.conn.set_session(readonly=False)
+ cur.execute("SHOW default_transaction_read_only;")
+ self.assertEqual(cur.fetchone()[0], 'off')
+ self.conn.rollback()
+
+ def test_set_default(self):
+ cur = self.conn.cursor()
+ cur.execute("SHOW default_transaction_isolation;")
+ default_isolevel = cur.fetchone()[0]
+ cur.execute("SHOW default_transaction_read_only;")
+ default_readonly = cur.fetchone()[0]
+ self.conn.rollback()
+
+ self.conn.set_session(isolation_level='serializable', readonly=True)
+ self.conn.set_session(isolation_level='default', readonly='default')
+
+ cur.execute("SHOW default_transaction_isolation;")
+ self.assertEqual(cur.fetchone()[0], default_isolevel)
+ cur.execute("SHOW default_transaction_read_only;")
+ self.assertEqual(cur.fetchone()[0], default_readonly)
+
+ @skip_before_postgres(9, 1)
+ def test_set_deferrable(self):
+ cur = self.conn.cursor()
+ self.conn.set_session(readonly=True, deferrable=True)
+ cur.execute("SHOW default_transaction_read_only;")
+ self.assertEqual(cur.fetchone()[0], 'on')
+ cur.execute("SHOW default_transaction_deferrable;")
+ self.assertEqual(cur.fetchone()[0], 'on')
+ self.conn.rollback()
+ cur.execute("SHOW default_transaction_deferrable;")
+ self.assertEqual(cur.fetchone()[0], 'on')
+ self.conn.rollback()
+
+ self.conn.set_session(deferrable=False)
+ cur.execute("SHOW default_transaction_read_only;")
+ self.assertEqual(cur.fetchone()[0], 'on')
+ cur.execute("SHOW default_transaction_deferrable;")
+ self.assertEqual(cur.fetchone()[0], 'off')
+ self.conn.rollback()
+
+ @skip_after_postgres(9, 1)
+ def test_set_deferrable_error(self):
+ self.assertRaises(psycopg2.ProgrammingError,
+ self.conn.set_session, readonly=True, deferrable=True)
+
+
+class AutocommitTests(unittest.TestCase):
+ def setUp(self):
+ self.conn = psycopg2.connect(dsn)
+
+ def tearDown(self):
+ if not self.conn.closed:
+ self.conn.close()
+
+ def test_default_no_autocommit(self):
+ self.assert_(not self.conn.autocommit)
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_IDLE)
+
+ cur = self.conn.cursor()
+ cur.execute('select 1;')
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
+
+ self.conn.rollback()
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_IDLE)
+
+ def test_set_autocommit(self):
+ self.conn.autocommit = True
+ self.assert_(self.conn.autocommit)
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_IDLE)
+
+ cur = self.conn.cursor()
+ cur.execute('select 1;')
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_IDLE)
+
+ self.conn.autocommit = False
+ self.assert_(not self.conn.autocommit)
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_IDLE)
+
+ cur.execute('select 1;')
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
+
+ def test_set_intrans_error(self):
+ cur = self.conn.cursor()
+ cur.execute('select 1;')
+ self.assertRaises(psycopg2.ProgrammingError,
+ setattr, self.conn, 'autocommit', True)
+
+ def test_set_session_autocommit(self):
+ self.conn.set_session(autocommit=True)
+ self.assert_(self.conn.autocommit)
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_IDLE)
+
+ cur = self.conn.cursor()
+ cur.execute('select 1;')
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_IDLE)
+
+ self.conn.set_session(autocommit=False)
+ self.assert_(not self.conn.autocommit)
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_IDLE)
+
+ cur.execute('select 1;')
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
+ self.conn.rollback()
+
+ self.conn.set_session('serializable', readonly=True, autocommit=True)
+ self.assert_(self.conn.autocommit)
+ cur.execute('select 1;')
+ self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
+ self.assertEqual(self.conn.get_transaction_status(),
+ psycopg2.extensions.TRANSACTION_STATUS_IDLE)
+ cur.execute("SHOW default_transaction_isolation;")
+ self.assertEqual(cur.fetchone()[0], 'serializable')
+ cur.execute("SHOW default_transaction_read_only;")
+ self.assertEqual(cur.fetchone()[0], 'on')
+
+
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
diff --git a/tests/test_copy.py b/tests/test_copy.py
index 9026abc..7ec1b76 100755
--- a/tests/test_copy.py
+++ b/tests/test_copy.py
@@ -244,6 +244,15 @@ class CopyTests(unittest.TestCase):
self.assertEqual(ntests, len(string.ascii_letters))
+ def test_copy_expert_file_refcount(self):
+ class Whatever(object):
+ pass
+
+ f = Whatever()
+ curs = self.conn.cursor()
+ self.assertRaises(TypeError,
+ curs.copy_expert, 'COPY tcopy (data) FROM STDIN', f)
+
decorate_all_tests(CopyTests, skip_if_green)
diff --git a/tests/test_dates.py b/tests/test_dates.py
index db2050a..27abbc1 100755
--- a/tests/test_dates.py
+++ b/tests/test_dates.py
@@ -417,7 +417,11 @@ class mxDateTimeTests(unittest.TestCase, CommonDatetimeTestsMixin):
from mx.DateTime import DateTime
value = self.execute('select (%s)::timestamp::text',
[DateTime(-41, 1, 1, 13, 30, 29.123456)])
- self.assertEqual(value, '0042-01-01 13:30:29.123456 BC')
+ # microsecs for BC timestamps look not available in PG < 8.4
+ # but more likely it's determined at compile time.
+ self.assert_(value in (
+ '0042-01-01 13:30:29.123456 BC',
+ '0042-01-01 13:30:29 BC'), value)
def test_adapt_timedelta(self):
from mx.DateTime import DateTimeDeltaFrom
diff --git a/tests/types_basic.py b/tests/types_basic.py
index 1ca668d..709907e 100755
--- a/tests/types_basic.py
+++ b/tests/types_basic.py
@@ -275,6 +275,16 @@ class TypesBasicTests(unittest.TestCase):
o2 = self.execute("SELECT %s::bytea AS foo", (o1,))
self.assertEqual(b('x'), o2[0])
+ def testNegNumber(self):
+ d1 = self.execute("select -%s;", (decimal.Decimal('-1.0'),))
+ self.assertEqual(1, d1)
+ f1 = self.execute("select -%s;", (-1.0,))
+ self.assertEqual(1, f1)
+ i1 = self.execute("select -%s;", (-1,))
+ self.assertEqual(1, i1)
+ l1 = self.execute("select -%s;", (-1L,))
+ self.assertEqual(1, l1)
+
class AdaptSubclassTest(unittest.TestCase):
def test_adapt_subtype(self):