summaryrefslogtreecommitdiff
path: root/test/perf/poolload.py
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2007-07-20 15:10:56 +0000
committerMike Bayer <mike_mp@zzzcomputing.com>2007-07-20 15:10:56 +0000
commit687d9342e6a4a59f63648bb83d28e338f274a0f6 (patch)
tree2fc4a2e46cd40edf7e59c889c8ae2e8c96b12b47 /test/perf/poolload.py
parentc4b081cc1d89e8d02cb9fad6a84daf035a90df63 (diff)
downloadsqlalchemy-687d9342e6a4a59f63648bb83d28e338f274a0f6.tar.gz
- a new mutex that was added in 0.3.9 causes the pool_timeout
feature to fail during a race condition; threads would raise TimeoutError immediately with no delay if many threads push the pool into overflow at the same time. this issue has been fixed.
Diffstat (limited to 'test/perf/poolload.py')
-rw-r--r--test/perf/poolload.py45
1 files changed, 23 insertions, 22 deletions
diff --git a/test/perf/poolload.py b/test/perf/poolload.py
index 090827709..d096f1c67 100644
--- a/test/perf/poolload.py
+++ b/test/perf/poolload.py
@@ -1,36 +1,37 @@
-# this program should open three connections. then after five seconds, the remaining
-# 45 threads should receive a timeout error. then the program will just stop until
-# ctrl-C is pressed. it should *NOT* open a bunch of new connections.
+# load test of connection pool
from sqlalchemy import *
import sqlalchemy.pool as pool
-import psycopg2 as psycopg
import thread,time
-psycopg = pool.manage(psycopg,pool_size=2,max_overflow=1, timeout=5, echo=True)
-print psycopg
-db = create_engine('postgres://scott:tiger@127.0.0.1/test',pool=psycopg,strategy='threadlocal')
-print db.connection_provider._pool
+db = create_engine('mysql://scott:tiger@127.0.0.1/test', pool_timeout=30, echo_pool=True)
+
metadata = MetaData(db)
users_table = Table('users', metadata,
Column('user_id', Integer, primary_key=True),
Column('user_name', String(40)),
Column('password', String(10)))
+metadata.drop_all()
metadata.create_all()
-class User(object):
- pass
-usermapper = mapper(User, users_table)
+users_table.insert().execute([{'user_name':'user#%d' % i, 'password':'pw#%d' % i} for i in range(1000)])
-#Then i create loads of threads and in run() of each thread:
-def run():
- session = create_session()
- transaction = session.create_transaction()
- query = session.query(User)
- u1=query.select(User.c.user_id==3)
-
-for x in range(0,50):
- thread.start_new_thread(run, ())
+def runfast():
+ while True:
+ c = db.connection_provider._pool.connect()
+ time.sleep(.5)
+ c.close()
+# result = users_table.select(limit=100).execute()
+# d = {}
+# for row in result:
+# for col in row.keys():
+# d[col] = row[col]
+# time.sleep(.005)
+# result.close()
+ print "runfast cycle complete"
+
+#thread.start_new_thread(runslow, ())
+for x in xrange(0,50):
+ thread.start_new_thread(runfast, ())
-while True:
- time.sleep(5)
+time.sleep(100)