summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Docs/manual.texi45
-rw-r--r--sql/ha_berkeley.cc13
-rw-r--r--sql/mysql_priv.h3
-rw-r--r--sql/sql_parse.cc10
4 files changed, 49 insertions, 22 deletions
diff --git a/Docs/manual.texi b/Docs/manual.texi
index 389e8befec5..5ddcd1fa67a 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -5467,9 +5467,10 @@ something that is of course not true. We could make things even worse
by just taking the test where PostgreSQL performs worst and claim that
MySQL is more than 2000 times faster than PostgreSQL.
-The case is that MySQL does a lot of optimizations that PostgreSQL doesn't
-do and the other way around. An SQL optimizer is a very complex thing, and
-a company could spend years on just making the optimizer faster and faster.
+The case is that MySQL does a lot of optimizations that PostgreSQL
+doesn't do. This is of course also true the other way around. An SQL
+optimizer is a very complex thing, and a company could spend years on
+just making the optimizer faster and faster.
When looking at the benchmark results you should look for things that
you do in your application and just use these results to decide which
@@ -5604,12 +5605,14 @@ MySQL with 2000 simultaneous connections doing 400 queries per second.
It sounded like he was using a Linux kernel that either had some
problems with many threads, such as kernels before 2.4, which had a problem
-with this but we have documented how to fix this and Tim should be aware of
-this problem. The other possible problem could have been an old glibc
-library and that Tim didn't use a MySQL binary from our site, which is
-linked with a corrected glibc library, but had compiled a version of his
-own with. In any of the above cases, the symptom would have been exactly
-what Tim had measured.
+with many threads on multi-CPU machines. We have documented in this manual
+how to fix this and Tim should be aware of this problem.
+
+The other possible problem could have been an old glibc library and
+that Tim didn't use a MySQL binary from our site, which is linked with
+a corrected glibc library, but had compiled a version of his own with.
+In any of the above cases, the symptom would have been exactly what Tim
+had measured.
We asked Tim if we could get access to his data so that we could repeat
the benchmark and if he could check the MySQL version on the machine to
@@ -5618,6 +5621,16 @@ He has not done that yet.
Because of this we can't put any trust in this benchmark either :(
+Over time things also changes and the above benchmarks are not that
+relevant anymore. MySQL now have a couple of different table handlers
+with different speed/concurrency tradeoffs. @xref{Table types}. It
+would be interesting to see how the above tests would run with the
+different transactional table types in MySQL. PostgreSQL has of course
+also got new features since the test was made. As the above test are
+not publicly available there is no way for us to know how the
+database would preform in the same tests today.
+
+
Conclusion:
The only benchmarks that exist today that anyone can download and run
@@ -5632,15 +5645,15 @@ The thing we find strange is that every test we have seen about
PostgreSQL, that is impossible to reproduce, claims that PostgreSQL is
better in most cases while our tests, which anyone can reproduce,
clearly shows otherwise. With this we don't want to say that PostgreSQL
-isn't good at many things (it is!). We would just like to see a fair test
-where they are very good so that we could get some friendly competition
-going!
+isn't good at many things (it is!) or that it isn't faster than MySQL
+under certain conditions. We would just like to see a fair test where
+they are very good so that we could get some friendly competition going!
For more information about our benchmarks suite @xref{MySQL Benchmarks}.
-We are working on an even better benchmark suite, including much better
-documentation of what the individual tests really do, and how to add more
-tests to the suite.
+We are working on an even better benchmark suite, including multi user
+tests, and a better documentation of what the individual tests really
+do and how to add more tests to the suite.
@node TODO, , Comparisons, Introduction
@@ -46771,7 +46784,7 @@ not yet 100% confident in this code.
@appendixsubsec Changes in release 3.23.42
@itemize @bullet
@item
-Fixed a problem when using @code{LOCK TABLES} and @code{BDB} tables.
+Fixed problem when using @code{LOCK TABLES} and @code{BDB} tables.
@item
Fixed problem with @code{REPAIR TABLE} on MyISAM tables with row lengths
between 65517 - 65520 bytes
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 25f8148e52f..7ee72803dd9 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -1658,12 +1658,15 @@ int ha_berkeley::external_lock(THD *thd, int lock_type)
{
if (!thd->transaction.bdb_lock_count++)
{
+ changed_rows=0;
/* First table lock, start transaction */
- if ((thd->options & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN)) &&
+ if ((thd->options & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN |
+ OPTION_TABLE_LOCK)) &&
!thd->transaction.all.bdb_tid)
{
+ DBUG_ASSERT(thd->transaction.stmt.bdb_tid != 0);
/* We have to start a master transaction */
- DBUG_PRINT("trans",("starting transaction"));
+ DBUG_PRINT("trans",("starting transaction all"));
if ((error=txn_begin(db_env, 0,
(DB_TXN**) &thd->transaction.all.bdb_tid,
0)))
@@ -1671,8 +1674,10 @@ int ha_berkeley::external_lock(THD *thd, int lock_type)
thd->transaction.bdb_lock_count--; // We didn't get the lock /* purecov: inspected */
DBUG_RETURN(error); /* purecov: inspected */
}
+ if (thd->in_lock_tables)
+ DBUG_RETURN(0); // Don't create stmt trans
}
- DBUG_PRINT("trans",("starting transaction for statement"));
+ DBUG_PRINT("trans",("starting transaction stmt"));
if ((error=txn_begin(db_env,
(DB_TXN*) thd->transaction.all.bdb_tid,
(DB_TXN**) &thd->transaction.stmt.bdb_tid,
@@ -1684,7 +1689,6 @@ int ha_berkeley::external_lock(THD *thd, int lock_type)
}
}
transaction= (DB_TXN*) thd->transaction.stmt.bdb_tid;
- changed_rows=0;
}
else
{
@@ -1722,6 +1726,7 @@ int ha_berkeley::start_stmt(THD *thd)
DBUG_ENTER("ha_berkeley::start_stmt");
if (!thd->transaction.stmt.bdb_tid)
{
+ DBUG_PRINT("trans",("starting transaction stmt"));
error=txn_begin(db_env, (DB_TXN*) thd->transaction.all.bdb_tid,
(DB_TXN**) &thd->transaction.stmt.bdb_tid,
0);
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 06d0b1528f4..841d76928e1 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -163,7 +163,8 @@ void kill_one_thread(THD *thd, ulong id);
#define OPTION_BIN_LOG OPTION_BUFFER_RESULT*2
#define OPTION_NOT_AUTO_COMMIT OPTION_BIN_LOG*2
#define OPTION_BEGIN OPTION_NOT_AUTO_COMMIT*2
-#define OPTION_QUICK OPTION_BEGIN*2
+#define OPTION_TABLE_LOCK OPTION_BEGIN*2
+#define OPTION_QUICK OPTION_TABLE_LOCK*2
#define OPTION_QUOTE_SHOW_CREATE OPTION_QUICK*2
#define OPTION_INTERNAL_SUBTRANSACTIONS OPTION_QUOTE_SHOW_CREATE*2
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 417484b2ef7..18ab3c45359 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -81,7 +81,8 @@ static void init_signals(void)
inline bool end_active_trans(THD *thd)
{
int error=0;
- if (thd->options & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))
+ if (thd->options & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN |
+ OPTION_TABLE_LOCK))
{
thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE);
thd->server_status&= ~SERVER_STATUS_IN_TRANS;
@@ -1825,7 +1826,11 @@ mysql_execute_command(void)
{
thd->lock=thd->locked_tables;
thd->locked_tables=0; // Will be automaticly closed
+ }
+ if (thd->options & OPTION_TABLE_LOCK)
+ {
end_active_trans(thd);
+ thd->options&= ~(ulong) (OPTION_TABLE_LOCK);
}
if (thd->global_read_lock)
{
@@ -1847,12 +1852,15 @@ mysql_execute_command(void)
if (check_db_used(thd,tables) || end_active_trans(thd))
goto error;
thd->in_lock_tables=1;
+ thd->options|= OPTION_TABLE_LOCK;
if (!(res=open_and_lock_tables(thd,tables)))
{
thd->locked_tables=thd->lock;
thd->lock=0;
send_ok(&thd->net);
}
+ else
+ thd->options&= ~(ulong) (OPTION_TABLE_LOCK);
thd->in_lock_tables=0;
break;
case SQLCOM_CREATE_DB: