summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2010-04-15 00:13:48 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2010-04-15 00:13:48 -0400
commit1cf4a745d83acc51a562ca1d1289cf524fbee33c (patch)
treeb7d175573b73938c1e01cd20fb291d86a60e69a6
parent74a417a5996f829f301853eeed363e5389226107 (diff)
downloadsqlalchemy-1cf4a745d83acc51a562ca1d1289cf524fbee33c.tar.gz
- beef up the --reversetop test option to embed RandomSet throughout the ORM
- with m2m we have to go back to the previous approach of having both sides of the DP fire off, tracking each pair of objects. history may not be consistently present in one side or the other - this revealed a whole lot of issues with self-referential m2m, which are fixed
-rw-r--r--lib/sqlalchemy/orm/dependency.py55
-rw-r--r--lib/sqlalchemy/sql/expression.py3
-rw-r--r--lib/sqlalchemy/test/assertsql.py3
-rw-r--r--lib/sqlalchemy/test/config.py13
-rw-r--r--lib/sqlalchemy/test/noseplugin.py2
-rw-r--r--lib/sqlalchemy/topological.py4
-rw-r--r--test/orm/test_unitofworkv2.py62
-rw-r--r--test/perf/large_flush.py2
8 files changed, 63 insertions, 81 deletions
diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py
index 624035c68..9f1b78f4a 100644
--- a/lib/sqlalchemy/orm/dependency.py
+++ b/lib/sqlalchemy/orm/dependency.py
@@ -101,12 +101,6 @@ class DependencyProcessor(object):
"""
- # assertions to ensure this method isn't being
- # called unnecessarily. can comment these out when
- # code is stable
- assert not self.post_update or not self._check_reverse(uow)
-
-
# locate and disable the aggregate processors
# for this dependency
@@ -776,11 +770,6 @@ class DetectKeySwitch(DependencyProcessor):
class ManyToManyDP(DependencyProcessor):
- def per_property_preprocessors(self, uow):
- if self._check_reverse(uow):
- return
- DependencyProcessor.per_property_preprocessors(self, uow)
-
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
@@ -860,11 +849,27 @@ class ManyToManyDP(DependencyProcessor):
child):
uowcommit.register_object(
attributes.instance_state(c), isdelete=True)
-
+
+ def _get_reversed_processed_set(self, uow):
+ if not self.prop._reverse_property:
+ return None
+
+ process_key = tuple(sorted(
+ [self.key] +
+ [p.key for p in self.prop._reverse_property]
+ ))
+ return uow.memo(
+ ('reverse_key', process_key),
+ set
+ )
+
def process_deletes(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
+
+ processed = self._get_reversed_processed_set(uowcommit)
+
for state in states:
history = uowcommit.get_attribute_history(
state,
@@ -872,7 +877,9 @@ class ManyToManyDP(DependencyProcessor):
passive=self.passive_deletes)
if history:
for child in history.non_added():
- if child is None:
+ if child is None or \
+ (processed is not None and (state, child) in processed) or \
+ not uowcommit.session._contains_state(child):
continue
associationrow = {}
self._synchronize(
@@ -881,7 +888,10 @@ class ManyToManyDP(DependencyProcessor):
associationrow,
False, uowcommit)
secondary_delete.append(associationrow)
-
+
+ if processed is not None:
+ processed.update((c, state) for c in history.non_added())
+
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
@@ -890,11 +900,14 @@ class ManyToManyDP(DependencyProcessor):
secondary_insert = []
secondary_update = []
+ processed = self._get_reversed_processed_set(uowcommit)
+
for state in states:
history = uowcommit.get_attribute_history(state, self.key)
if history:
for child in history.added:
- if child is None:
+ if child is None or \
+ (processed is not None and (state, child) in processed):
continue
associationrow = {}
self._synchronize(state,
@@ -903,7 +916,9 @@ class ManyToManyDP(DependencyProcessor):
False, uowcommit)
secondary_insert.append(associationrow)
for child in history.deleted:
- if child is None:
+ if child is None or \
+ (processed is not None and (state, child) in processed) or \
+ not uowcommit.session._contains_state(child):
continue
associationrow = {}
self._synchronize(state,
@@ -911,7 +926,10 @@ class ManyToManyDP(DependencyProcessor):
associationrow,
False, uowcommit)
secondary_delete.append(associationrow)
-
+
+ if processed is not None:
+ processed.update((c, state) for c in history.added + history.deleted)
+
if not self.passive_updates and \
self._pks_changed(uowcommit, state):
if not history:
@@ -935,13 +953,14 @@ class ManyToManyDP(DependencyProcessor):
secondary_update.append(associationrow)
+
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def _run_crud(self, uowcommit, secondary_insert,
secondary_update, secondary_delete):
connection = uowcommit.transaction.connection(self.mapper)
-
+
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(sql.and_(*[
diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py
index fc6b5ad97..70e26cfcc 100644
--- a/lib/sqlalchemy/sql/expression.py
+++ b/lib/sqlalchemy/sql/expression.py
@@ -2898,6 +2898,9 @@ class Join(FromClause):
underlying :func:`select()` function.
"""
+ global sql_util
+ if not sql_util:
+ from sqlalchemy.sql import util as sql_util
if fold_equivalents:
collist = sql_util.folded_equivalents(self)
else:
diff --git a/lib/sqlalchemy/test/assertsql.py b/lib/sqlalchemy/test/assertsql.py
index d67de2355..81ef73a7c 100644
--- a/lib/sqlalchemy/test/assertsql.py
+++ b/lib/sqlalchemy/test/assertsql.py
@@ -173,7 +173,8 @@ class CompiledSQL(SQLMatchRule):
self._result = equivalent
if not self._result:
self._errmsg = "Testing for compiled statement %r partial params %r, " \
- "received %r with params %r" % (self.statement, all_params, _received_statement, all_received)
+ "received %r with params %r" % \
+ (self.statement, all_params, _received_statement, all_received)
class CountStatements(AssertRule):
diff --git a/lib/sqlalchemy/test/config.py b/lib/sqlalchemy/test/config.py
index efbe00fef..7d528a04b 100644
--- a/lib/sqlalchemy/test/config.py
+++ b/lib/sqlalchemy/test/config.py
@@ -1,6 +1,5 @@
import optparse, os, sys, re, ConfigParser, time, warnings
-
# 2to3
import StringIO
@@ -166,15 +165,9 @@ post_configure['table_options'] = _set_table_options
def _reverse_topological(options, file_config):
if options.reversetop:
- from sqlalchemy.orm import unitofwork
+ from sqlalchemy.orm import unitofwork, session, mapper, dependency
from sqlalchemy import topological
- class RevQueueDepSort(topological.QueueDependencySorter):
- def __init__(self, tuples, allitems):
- self.tuples = list(tuples)
- self.allitems = list(allitems)
- self.tuples.reverse()
- self.allitems.reverse()
- topological.QueueDependencySorter = RevQueueDepSort
- unitofwork.DependencySorter = RevQueueDepSort
+ from sqlalchemy.test.util import RandomSet
+ topological.set = unitofwork.set = session.set = mapper.set = dependency.set = RandomSet
post_configure['topological'] = _reverse_topological
diff --git a/lib/sqlalchemy/test/noseplugin.py b/lib/sqlalchemy/test/noseplugin.py
index 5e8e21e8f..6a3106e69 100644
--- a/lib/sqlalchemy/test/noseplugin.py
+++ b/lib/sqlalchemy/test/noseplugin.py
@@ -51,7 +51,7 @@ class NoseSQLAlchemy(Plugin):
callback=_engine_strategy,
help="Engine strategy (plain or threadlocal, defaults to plain)")
opt("--reversetop", action="store_true", dest="reversetop", default=False,
- help="Reverse the collection ordering for topological sorts (helps "
+ help="Use a random-ordering set implementation in the ORM (helps "
"reveal dependency issues)")
opt("--unhashable", action="store_true", dest="unhashable", default=False,
help="Disallow SQLAlchemy from performing a hash() on mapped test objects.")
diff --git a/lib/sqlalchemy/topological.py b/lib/sqlalchemy/topological.py
index 2b6eadd5d..6c3e90d98 100644
--- a/lib/sqlalchemy/topological.py
+++ b/lib/sqlalchemy/topological.py
@@ -9,10 +9,6 @@
from sqlalchemy.exc import CircularDependencyError
from sqlalchemy import util
-# this enables random orderings for iterated subsets
-# of non-dependent items.
-#from sqlalchemy.test.util import RandomSet as set
-
__all__ = ['sort', 'sort_as_subsets', 'find_cycles']
def sort_as_subsets(tuples, allitems):
diff --git a/test/orm/test_unitofworkv2.py b/test/orm/test_unitofworkv2.py
index e28537b00..33e5f557e 100644
--- a/test/orm/test_unitofworkv2.py
+++ b/test/orm/test_unitofworkv2.py
@@ -1,7 +1,7 @@
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy.test import testing
from sqlalchemy.test.schema import Table, Column
-from sqlalchemy import Integer, String, ForeignKey
+from sqlalchemy import Integer, String, ForeignKey, func
from test.orm import _fixtures, _base
from sqlalchemy.orm import mapper, relationship, backref, \
create_session, unitofwork, attributes
@@ -585,50 +585,20 @@ class SingleCycleM2MTest(_base.MappedTest, testing.AssertsExecutionResults, Asse
sess.add_all([n1, n2, n3, n4, n5])
- self.assert_sql_execution(
- testing.db,
- sess.flush,
-
- CompiledSQL(
- "INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- {'data': 'n2', 'favorite_node_id': None}
- ),
- CompiledSQL(
- "INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- {'data': 'n3', 'favorite_node_id': None}),
- CompiledSQL("INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- lambda ctx:{'data': 'n5', 'favorite_node_id': n2.id}),
- CompiledSQL(
- "INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- lambda ctx:{'data': 'n4', 'favorite_node_id': n3.id}),
- CompiledSQL(
- "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
- "VALUES (:left_node_id, :right_node_id)",
- lambda ctx:[
- {'right_node_id': n5.id, 'left_node_id': n3.id},
- {'right_node_id': n4.id, 'left_node_id': n3.id},
- {'right_node_id': n3.id, 'left_node_id': n2.id},
- {'right_node_id': n5.id, 'left_node_id': n2.id}
- ]
- ),
- CompiledSQL(
- "INSERT INTO nodes (data, favorite_node_id) "
- "VALUES (:data, :favorite_node_id)",
- lambda ctx:[{'data': 'n1', 'favorite_node_id': n5.id}]
- ),
- CompiledSQL(
- "INSERT INTO node_to_nodes (left_node_id, right_node_id) "
- "VALUES (:left_node_id, :right_node_id)",
- lambda ctx:[
- {'right_node_id': n2.id, 'left_node_id': n1.id},
- {'right_node_id': n3.id, 'left_node_id': n1.id},
- {'right_node_id': n4.id, 'left_node_id': n1.id}
- ])
- )
+ # can't really assert the SQL on this easily
+ # since there's too many ways to insert the rows.
+ # so check the end result
+ sess.flush()
+ eq_(
+ sess.query(node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id).\
+ order_by(node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id).\
+ all(),
+ sorted([
+ (n1.id, n2.id), (n1.id, n3.id), (n1.id, n4.id),
+ (n2.id, n3.id), (n2.id, n5.id),
+ (n3.id, n5.id), (n3.id, n4.id)
+ ])
+ )
sess.delete(n1)
@@ -653,7 +623,7 @@ class SingleCycleM2MTest(_base.MappedTest, testing.AssertsExecutionResults, Asse
for n in [n2, n3, n4, n5]:
sess.delete(n)
-
+
# load these collections
# outside of the flush() below
n4.children
diff --git a/test/perf/large_flush.py b/test/perf/large_flush.py
index 431a28944..5dd6f610f 100644
--- a/test/perf/large_flush.py
+++ b/test/perf/large_flush.py
@@ -70,7 +70,7 @@ mapper(A, a_table, inherits=Object, polymorphic_identity='A',
SA_Metadata.create_all(engine)
-@profiling.profiled('large_flush', always=True, sort=['file'])
+@profiling.profiled('large_flush', always=True, sort=['cumulative'])
def generate_error():
q = Q()
for j in range(100): #at 306 the error does not pop out (depending on recursion depth)