summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.bzrignore1
-rw-r--r--BitKeeper/etc/logging_ok2
-rw-r--r--Docs/manual.texi594
-rw-r--r--isam/_dynrec.c2
-rw-r--r--myisam/mi_dynrec.c9
-rw-r--r--myisam/mi_packrec.c2
-rw-r--r--mysql-test/r/alter_table.result5
-rw-r--r--mysql-test/r/variables.result4
-rw-r--r--mysql-test/t/alter_table.test11
-rw-r--r--mysql-test/t/variables.test4
-rw-r--r--mysys/my_pread.c6
-rw-r--r--sql-bench/Makefile.am2
-rwxr-xr-xsql-bench/README2
-rw-r--r--sql/item_func.cc2
-rw-r--r--sql/share/swedish/errmsg.OLD3
-rw-r--r--sql/sql_base.cc3
-rw-r--r--sql/sql_insert.cc1
-rw-r--r--sql/sql_table.cc9
18 files changed, 596 insertions, 66 deletions
diff --git a/.bzrignore b/.bzrignore
index dbe41ffe92a..b160b186fba 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -284,3 +284,4 @@ support-files/mysql.spec
tags
tmp/*
sql-bench/gif/*
+sql-bench/graph-compare-results
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index e8deba03c8a..01a1261eae5 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -1 +1 @@
-monty@hundin.mysql.fi
+monty@tik.mysql.fi
diff --git a/Docs/manual.texi b/Docs/manual.texi
index 76e84b4cdad..22275637890 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -686,7 +686,7 @@ System/Compile Time and Startup Parameter Tuning
* Compile and link options:: How compiling and linking affects the speed of MySQL
* Disk issues:: Disk issues
-* Symbolic links::
+* Symbolic links:: Using Symbolic Links
* Server parameters:: Tuning server parameters
* Table cache:: How MySQL opens and closes tables
* Creating many tables:: Drawbacks of creating large numbers of tables in the same database
@@ -952,6 +952,17 @@ How MySQL Compares to @code{mSQL}
* Protocol differences:: How @code{mSQL} and @strong{MySQL} client/server communications protocols differ
* Syntax differences:: How @code{mSQL} 2.0 SQL syntax differs from @strong{MySQL}
+How MySQL Compares to PostgreSQL
+
+* MySQL-PostgreSQL goals::
+* MySQL-PostgreSQL features::
+* MySQL-PostgreSQL benchmarks::
+
+MySQL and PostgreSQL development goals
+
+* MySQL-PostgreSQL features::
+* MySQL-PostgreSQL benchmarks::
+
MySQL Internals
* MySQL threads:: MySQL threads
@@ -9000,7 +9011,7 @@ named pipe connections. You should use either @code{mysqld-nt} or
@code{mysqld-max-nt}.)
If @code{mysqld} doesn't start, please check whether or not the
-@file{\mysql\mysql.err} file contains any reason for this. You can also
+@file{\mysql\data\mysql.err} file contains any reason for this. You can also
try to start the server with @code{mysqld --standalone}; In this case, you may
get some useful information on the screen that may help solve the problem.
@@ -9174,14 +9185,12 @@ server, you can do so using this command:
C:\> mysqladmin --user=root --password=your_password shutdown
@end example
-If you are using the old shareware version of @strong{MySQL} Version 3.21
-under Windows, the above command will fail with an error: @code{parse error
-near 'SET OPTION password'}. This is because the old shareware version,
-which is based on @strong{MySQL} Version 3.21, doesn't have the
-@code{SET PASSWORD} command. The fix is in this case to upgrade to
-the Version 3.22 shareware.
+If you are using the old shareware version of @strong{MySQL} Version
+3.21 under Windows, the above command will fail with an error:
+@code{parse error near 'SET OPTION password'}. The fix is in to upgrade
+to the current @strong{MySQL} version, which is freely available.
-With the newer @strong{MySQL} versions you can easily add new users
+With the current @strong{MySQL} versions you can easily add new users
and change privileges with @code{GRANT} and @code{REVOKE} commands.
@xref{GRANT}.
@@ -9250,7 +9259,7 @@ Note that the symbolic link will be used only if the directory
For example, if the @strong{MySQL} data directory is @file{C:\mysql\data}
and you want to have database @code{foo} located at @file{D:\data\foo}, you
should create the file @file{C:\mysql\data\foo.sym} that contains the
-text @code{D:\data\foo}. After that, all tables created in the database
+text @code{D:\data\foo\}. After that, all tables created in the database
@code{foo} will be created in @file{D:\data\foo}.
Note that because of the speed penalty you get when opening every table,
@@ -10066,7 +10075,7 @@ correctly, check the log file to see if you can find out why. Log files
are located in the data directory (typically
@file{/usr/local/mysql/data} for a binary distribution,
@file{/usr/local/var} for a source distribution,
-@file{\mysql\mysql.err} on Windows.) Look in the data directory for
+@file{\mysql\data\mysql.err} on Windows.) Look in the data directory for
files with names of the form @file{host_name.err} and
@file{host_name.log} where @code{host_name} is the name of your server
host. Then check the last few lines of these files:
@@ -10611,7 +10620,7 @@ interactive-timeout
@tindex .my.cnf file
If you have a source distribution, you will find sample configuration
files named @file{my-xxxx.cnf} in the @file{support-files} directory.
-If you have a binary distribution, look in the @file{DIR/share/mysql}
+If you have a binary distribution, look in the @file{DIR/support-files}
directory, where @code{DIR} is the pathname to the @strong{MySQL}
installation directory (typically @file{/usr/local/mysql}). Currently
there are sample configuration files for small, medium, large, and very
@@ -19563,6 +19572,11 @@ sequence number by executing @code{SET INSERT_ID=#} before
@code{ALTER TABLE} or using the @code{AUTO_INCREMENT = #} table option.
@xref{SET OPTION}.
+With MyISAM tables, if you don't change the @code{AUTO_INCREMENT}
+column, the sequence number will not be affected. If you drop an
+@code{AUTO_INCREMENT} column and then add another @code{AUTO_INCREMENT}
+column, the numbers will start from 1 again.
+
@xref{ALTER TABLE problems}.
@findex RENAME TABLE
@@ -39430,7 +39444,7 @@ switch to a new log) by executing @code{FLUSH LOGS}. @xref{FLUSH}.
@code{mysqld} writes all errors to the stderr, which the
@code{safe_mysqld} script redirects to a file called
@code{'hostname'.err}. (On Windows, @code{mysqld} writes this directly
-to @file{mysql.err}).
+to @file{\mysql\data\mysql.err}).
This contains information indicating when @code{mysqld} was started and
stopped and also any critical errors found when running. If @code{mysqld}
@@ -43585,53 +43599,516 @@ users.
@item
@end table
-@cindex PostgreSQL, comparison
+@cindex PostgreSQL/MySQL, overview
@node Compare PostgreSQL, , Compare mSQL, Comparisons
@section How MySQL Compares to PostgreSQL
+When reading the following, please note that both products are
+continually evolving. We at @strong{MySQL AB} and the PostgreSQL
+developers are both working on making our respective database as good as
+possible, so we are both a serious choice to any commercial database.
+
+The following comparison is made by us at MySQL AB. We have tried to be
+as accurate and fair as possible, but because we don't have a full
+knowledge of all PostgreSQL features while we know MySQL througly, we
+may have got some things wrong. We will however correct these when they
+come to our attention.
+
We would first like to note that @code{PostgreSQL} and @strong{MySQL}
-are both widely used products, but their design goals are completely
-different. This means that for some applications @strong{MySQL} is more
-suitable and for others @code{PostgreSQL} is more suitable. When
-choosing which database to use, you should first check if the database's
-feature set is good enough to satisfy your application. If you need
-speed, @strong{MySQL} is probably your best choice. If you need some
-of the extra features that @code{PostgreSQL} can offer, you should use
+are both widely used products, but with different design goals, even if
+we are both striving to be ANSI SQL compatible. This means that for
+some applications @strong{MySQL} is more suitable and for others
+@code{PostgreSQL} is more suitable. When choosing which database to
+use, you should first check if the database's feature set satisfies your
+application. If you need speed, @strong{MySQL} is probably your best
+choice. If you need some of the extra features that only @code{PostgreSQL}
+can offer, you should use @code{PostgreSQL}.
+
+@cindex PostgreSQL/MySQL, goals
+@menu
+* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development goals
+* MySQL-PostgreSQL features:: Featurevise Comparison of MySQL and PostgreSQL
+* MySQL-PostgreSQL benchmarks:: Benchmarking MySQL and PostgreSQL
+@end menu
+
+@node MySQL-PostgreSQL goals, MySQL-PostgreSQL features, Compare PostgreSQL, Compare PostgreSQL
+@subsection MySQL and PostgreSQL development goals
+
+When adding things to MySQL we take pride to do an optimal, definite
+solution. The code should be so good that we shouldn't have any need to
+change it in the foreseeable future. We also do not like to sacrifice
+speed for features but instead will do our utmost to find a solution
+that will give maximal throughput. This means that development will take
+a little longer, but the end result will be well worth this. This kind
+of development is only possible because all server code are checked by
+one of a few (currently two) persons before it's included in the
+@strong{MySQL} server.
+
+We at MySQL AB believe in frequent releases to be able to push out new
+features quickly to our users. Because of this we do a new small release
+about every 3 weeks, which a major branch every year. All releases are
+throughly tested with our testing tools on a lot of different platforms.
+
+PostgreSQL is based on a kernel with lots of contributors. In this setup
+it makes sense to prioritize adding a lot of new features, instead of
+implementing them optimally, because one can always optimize things
+later if there arises a need for this.
+
+Another big difference between @strong{MySQL} and PostgreSQL is that
+nearly all of the code in the MySQL server are coded by developers that
+are employed by MySQL AB and are still working on the server code. The
+exceptions are the transaction engines and the regexp library.
+
+This is in sharp contrast to the PostgreSQL code where the majority of
+the code is coded by a big group of people with different backgrounds.
+It was only recently that the PostgreSQL developers announced that they
+current developer group had finally had time to take a look at all
+the code in the current PostgreSQL release.
+
+Both of the above development methods has it's own merits and drawbacks.
+We here at @strong{MySQL AB} think of course that our model is better
+because our model gives better code consistence, more optimal and
+reusable code and, in our opinion, fewer bugs. Because we are the
+authors of the @strong{MySQL} server code we are better able to
+coordinate new features and releases.
+
+@cindex PostgreSQL/MySQL, features
+@node MySQL-PostgreSQL features, MySQL-PostgreSQL benchmarks, MySQL-PostgreSQL goals, Compare PostgreSQL
+@subsection Featurevise Comparison of MySQL and PostgreSQL
+
+On the @uref{http://www.mysql.com/information/crash-me.php, crash-me}
+page you can find a list of those database constructs and limits that
+one can detect automatically with a program. Note however that a lot of
+the numerical limits may be changed with startup options for respective
+database. The above web page is however extremely useful when you want to
+ensure that your applications works with many different databases or
+when you want to convert your application from one datbase to another.
+
+@strong{MySQL} offers the following advantages over PostgreSQL:
+
+@itemize @bullet
+@item
+@code{MySQL} is generally much faster than PostgreSQL.
+@xref{MySQL-PostgreSQL benchmarks}.
+@item
+Because @strong{MySQL} has a much larger user base than PostgreSQL the
+code is more tested and has historically been more stable than
+PostgreSQL. @strong{MySQL} is the much more used in production
+environments than PostgreSQL, mostly thanks to that @strong{MySQL AB},
+former TCX DataKonsult AB, has provided top quality commercial support
+for @strong{MySQL} from the day it was released, whereas until recently
+PostgreSQL was unsupported.
+@item
+@strong{MySQL} works on more platforms than PostgreSQL. @xref{Which OS}.
+@item
+@strong{MySQL} works better on Windows; @strong{MySQL} is running as a
+native windows application (a service on NT/Win2000/WinXP), while
+PostgreSQL is run under the cygwin emulation. We have heard that
+PostgreSQL is not yet that stable on windows but we haven't been able to
+verify this ourselves.
+@item
+@strong{MySQL} has more API to other languages and is supported by more
+programs than PostgreSQL. @xref{Contrib}.
+@item
+@strong{MySQL} works on 24/7 heavy duty systems. In most circumstances
+you never have to run any cleanups on @code{MySQL}. PostgreSQL doesn't
+yet support 24/7 systems because you have have to run @code{vacuum()}
+once in a while to reclaim space from @code{UPDATE} and @code{DELETE}
+commands and to perform statistics analyzes that are critical to get
+good performance with PostgreSQL. On a busy system with lots of changes
+vacuum must be run very frequently, in the worst cases even many times a
+day. During the @code{vacuum()} run, which may take hours if the
+database is big, the database is from a production standpoint
+practically dead. The PostgreSQL team has fixing this on their TODO,
+but we assume that this is not an easy thing to fix permanently.
+@item
+A working, tested replication feature used by sites like
+@uref{http://finance.yahoo.com, Yahoo finance},
+@uref{http://www.mobile.de/,mobile.de} and
+@uref{http://www.slashdot.org,Slashdot}.
+@item
+Included in the @strong{MySQL} distribution is included two different
+testing suits (@file{mysql-test-run} and
+@uref{http://www.mysql.com/information/crash-me.php,crash-me}) and a
+benchmark suite. The test system is actively updated with code to test
+each new feature and almost all repeatable bugs that comes to our
+attention. We test @strong{MySQL} with these on a lot of platforms
+before every release. These tests are more sofisticated than anything
+have seen from PostgreSQL and ensures that the @strong{MySQL} code keeps
+at a high standard.
+@item
+There are far moore books in print on @strong{MySQL} than on PostgreSQL.
+O'Reilly, Sams, Que, and New Riders are all major publishers with books
+about MySQL. All @strong{MySQL} features is also documented in the
+@strong{MySQL} on-line manual because when a feature is implemented, the
+@strong{MySQL} developers are required to document it before it's
+included in the source.
+@item
+@strong{MySQL} has supports more of the standard ODBC functions than
@code{PostgreSQL}.
+@item
+@strong{MySQL} has a much more sophisticated @code{ALTER TABLE}.
+@item
+@strong{MySQL} has support for tables without transactions for
+applications that need all speed they can get. The tables may be memory
+based,@code{HEAP} tables or disk based @code{MyISAM}. @xref{Table types}.
+@item
+@strong{MySQL} has support for 3 different table handles that support
+transactions (@code{BDB}, @code{InnoDB} and @code{Gemini}. Because
+every transaction engine performs differently under different
+conditions, this gives the application writer more options to find an
+optimal solution for his/her setup. @xref{Table types}.
+@item
+@code{MERGE} tables gives you a unique way to instantly make a view over
+a set of identical tables and use these as one. This is perfectly for
+systems where you have log files that you order for example by month.
+@xref{MERGE}.
+@item
+The option to compress read-only tables, but still have direct access to
+the rows in the table, gives you better performance by minimizing disk
+reads. This is very useful when you are archiving
+things.@xref{myisampack}.
+@item
+@strong{MySQL} has internal support for text search. @xref{Fulltext Search}.
+@item
+You can access many databases from the same connection (depending of course
+on your privileges).
+@item
+@strong{MySQL} is coded from the start with multi-threading while
+PostgreSQL uses processes. Because context switching and access to
+common storage areas is much faster between threads, than are separate
+processes, this gives @strong{MySQL} a big speed advantage in multi-user
+applications and also makes it easier for @strong{MySQL} to take full
+advantage of symmetric multiprocessor systems (SMP).
+@item
+@strong{MySQL} has a much more sophisticated privilege system than
+PostgreSQL. While PostgreSQL only supports @code{INSERT},
+@code{SELECT}, @code{update/delete} grants per user on a database or a
+table @strong{MySQL} allows you to define a full set of different
+privileges on database, table and columns level. @strong{MySQL} also allows
+you to specify the privilege on host+user combinations. @xref{GRANT}.
+@item
+@strong{MySQL} supports a compressed server/client protocol which
+improves performance over slow links.
+@item
+@strong{MySQL} employs the table handler concept and is the only
+relational database we know of built around this concept. This allows
+different low level table types to be swapped into the SQL engine, each
+table type optimized for a different performance characteristics.
+@item
+All @code{MySQL} table types (except @strong{InnoDB}) are implemented as
+files (ie: one table per file), which makes it really easy to backup,
+move, delete and even symlink databases and tables when the server is
+down.
+@item
+Tools to repair and optimize @strong{MyISAM} tables (the most common
+@strong{MySQL} table type). A repair tool is only needed when a
+physical corruption of a data file happens, usually from a hardware
+failure. It allows a majority of the data to be recovered.
+@item
+Upgrading @strong{MySQL} is painless. When you upgrading @strong{MySQL},
+you don't need to dump/restore your data, as you have to do with most
+PostgreSQL upgrades.
+@end itemize
+
+Drawbacks with @strong{MySQL} compared to PostgreSQL:
+
+@itemize @bullet
+@item
+The transaction support in @strong{MySQL} is not yet as well tested as
+PostgreSQL's system.
+@item
+Because @strong{MySQL} uses threads, which are still a moving target on
+many OS, one must either use binaries from
+@uref{http://www.mysql.com/downloads} or carefully follow our
+instructions on
+@uref{http://www.mysql.com/doc/I/n/Installing_source.html} to get an
+optimal binary that works in all cases.
+@item
+Table locking, as used by the non-transactional @code{MyISAM} tables, is
+in many cases faster than page locks, row locks or versioning. The
+drawback however is that if one doesn't take into account how table
+locks works, a single long-running query can block a table for updates
+for a long time. This can usable be avoided when designing the
+application. If not, one can always switch the trouble table to use one
+of the transactional table types. @xref{Table locking}.
+@item
+With UDF (user defined functions) one can extend @strong{MySQL} with
+both normal SQL functions and aggregates, but this is not as easy or as
+flexible as in PostgreSQL. @xref{Adding functions}.
+@item
+Updates and deletes that goes over multiple tables is harder to do in
+@strong{MySQL}. (Will be fixed in @strong{MySQL} 4.0 with multi-table
+@code{DELETE} and multi-table @code{UPDATE} and in @strong{MySQL} 4.1
+with @code{SUB-SELECT})
+@end itemize
+
+PostgreSQL offers currently the following advantages over @strong{MySQL}:
+
+Note that because we know the @strong{MySQL} road map, we have included
+in the following table the version when @strong{MySQL} should support
+this feature. Unfortunately we couldn't do this for previous comparison,
+because we don't know the PostgreSQL roadmap.
+
+@multitable @columnfractions .70 .30
+@item @strong{Feature} @tab @strong{MySQL version}
+@item Subselects @tab 4.1
+@item Foreign keys @tab 4.0 and 4.1
+@item Views. @tab 4.2
+@item Stored procedures in multiple languages @tab 4.1
+@item Extensible type system. @tab Not planed
+@item Unions @tab 4.0.
+@item Full join. @tab 4.0 or 4.1.
+@item Triggers. @tab 4.1
+@item Constrainst @tab 4.1
+@item Cursors @tab 4.1 or 4.2
+@item Extensible index types like R-trees @tab R-trees are planned to 4.2
+@item Inherited tables @tab Not planned
+@end multitable
+
+Other reasons to use PostgreSQL:
+
+@itemize @bullet
+@item
+Standard usage is in PostgreSQL closer to ANSI SQL in some cases.
+@item
+One can get speed up PostgreSQL by coding things as stored procedures.
+@item
+Bigger team of developers that contributes to the server.
+@end itemize
-@code{PostgreSQL} has some more advanced features like user-defined
-types, triggers, rules, and some transaction support (currently it
-has about the same semantics as @strong{MySQL}'s transactions in that the
-transaction is not 100% atomic). However, PostgreSQL lacks many of the
-standard types and functions from ANSI SQL and ODBC. See the @code{crash-me}
-Web page (@uref{http://www.mysql.com/information/crash-me.php}) for a complete
-list of limits and which types and functions are supported or unsupported.
+Drawbacks with PostgreSQL compared to @strong{MySQL}:
-Normally, @code{PostgreSQL} is a magnitude slower than @strong{MySQL}.
-@xref{Benchmarks}. This is due largely to the fact that they have only
-transaction-safe tables and that their transactions system is not as
-sophisticated as Berkeley DB's. In @strong{MySQL} you can decide per
-table if you want the table to be fast or take the speed penalty of
-making it transaction-safe.
+@itemize @bullet
+@item
+@code{Vaccum()} makes PostgreSQL hard to use in a 24/7 environment.
+@item
+Only transactional tables.
+@item
+Much slower insert/delete/update.
+@end itemize
-The most important things that @code{PostgreSQL} supports that @strong{MySQL}
-doesn't yet support:
+For a complete list of drawbacks, you should also examine the first table
+in this section.
-@table @code
-@item Sub select
-@item Foreign keys
-@item Stored procedures
-@item An extendable type system.
-@item A way to extend the SQL to handle new key types (like R-trees)
-@end table
+@cindex PostgreSQL/MySQL, benchmarks
+@node MySQL-PostgreSQL benchmarks, , MySQL-PostgreSQL features, Compare PostgreSQL
+@subsection Benchmarking MySQL and PostgreSQL
-@strong{MySQL}, on the other hand, supports many ANSI SQL constructs
-that @code{PostgreSQL} doesn't support. Most of these can be found at the
-@uref{http://www.mysql.com/information/crash-me.php, @code{crash-me} Web page}.
+The only open source benchmark, that we know of, that can be used to
+benchmark @strong{MySQL} and PostgreSQL (and other databases) is our
+own. It can be found at:
+@uref{http://www.mysql.com/information/benchmarks.html}.
-If you really need the rich type system @code{PostgreSQL} offers and you
-can afford the speed penalty of having to do everything transaction
-safe, you should take a look at @code{PostgreSQL}.
+We have many times asked the PostgreSQL developers and some PostgreSQL
+users to help us extend this benchmark to make the definitive benchmark
+for databases, but unfortunately we haven't got any feedback for this.
+
+We, the @strong{MySQL} developers, has because of this spent a lot of
+hours to get maximum performance from PostgreSQL for the benchmarks, but
+because we don't know PostgreSQL intimately we are sure that there are
+things that we have missed. We have on the benchmark page documented
+exactly how we did run the benchmark so that it should be easy for
+anyone to repeat and verify our results.
+
+The benchmarks are usually run with and without the @code{--fast}
+option. When run with @code{--fast} we are trying to use every trick
+the server can do to get the code to execute as fast as possible.
+The idea is that the normal run should show how the server would work in
+a default setup and the @code{--fast} run shows how the server would do
+if the application developer would use extensions in the server to make
+his application run faster.
+
+When running with PostgreSQL and @code{--fast} we do a @code{vacuum()}
+between after every major table update and drop table to make the database
+in perfect shape for the following selects. The time for vacuum() is
+measured separately.
+
+When running with PostgreSQL 7.1.1 we could however not run with
+@code{--fast} because during the insert test, the postmaster (the
+PostgreSQL deamon) died and the database was so corrupted that it was
+impossible to restart postmaster. (The details about the machine we run
+the benchmark can be found on the benchmark page). After this happened
+twice, we decided to postpone the @code{--fast} test until next
+PostgreSQL release.
+
+Before going to the other benchmarks we know of, We would like to give
+some background to benchmarks:
+
+It's very easy to write a test that shows ANY database to be best
+database in the world, by just restricting the test to something the
+database is very good at and not test anything that the database is not
+good at; If one after this publish the result with a single figure
+things is even easier.
+
+This would be like we would measure the speed of @strong{MySQL} compared
+to PostgreSQL by looking at the summary time of the MySQL benchmarks on
+our web page. Based on this @strong{MySQL} would be more than 40 times
+faster than PostgreSQL, something that is of course not true. We could
+make things even worse by just taking the test where PostgreSQL performs
+worst and claim that @strong{MySQL} is more than 2000 times faster than
+PostgreSQL.
+
+The case is that @strong{MySQL} does a lot of optimizations that
+PostgreSQL doesn't do and the other way around. An SQL optimizer is a
+very complex thing and a company could spend years on just making the
+optimizer faster and faster.
+
+When looking at the benchmark results you should look for things that
+you do in your application and just use these results to decide which
+database would be best suited for your application. The benchmark
+results also shows things a particular database is not good at and should
+give you a notion about things to avoid and what you may have to do in
+other ways.
+
+We know of two benchmark tests that claims that PostgreSQL performers
+better than @strong{MySQL}. These both where multi-user tests, a test
+that we here at @strong{MySQL AB} haven't had time to write and include in
+the benchmark suite, mainly because it's a big task to do this in a
+manner that is fair against all databases.
+
+One is the benchmark paid for by
+@uref{http://www.greatbridge.com/about/press.php?content_id=4,Great
+Bridge}.
+
+This is the worst benchmark we have ever seen anyone ever conduct. This
+was not only tuned to only test what PostgreSQL is absolutely best at,
+it was also totally unfair against every other database involved in the
+test.
+
+@strong{NOTE}: We know that not even some of the main PostgreSQL
+developers did like the way Great Bridge conducted the benchmark, so we
+don't blame them for the way the benchmark was made.
+
+This benchmark has been condemned in a lot of postings and newsgroups so
+we will here just shortly repeat some things that where wrong with it.
+
+@itemize @bullet
+@item
+The tests where run with an expensive commercial tool, that makes it
+impossible for an open source company like us to verify the benchmarks,
+or even check how the benchmark where really done. The tool is not even
+a true benchmark tool, but a application/setup testing tool. To refer
+this as STANDARD benchmark tool is to stretch the truth a long way.
+@item
+Great Bridge admitted that they had optimized the PostgreSQL database
+(with vacuum() before the test) and tuned the startup for the tests,
+something they hadn't done for any of the other databases involved. To
+say "This process optimizes indexes and frees up disk space a bit. The
+optimized indexes boost performance by some margin". Our benchmarks
+clearly indicates that the difference in running a lot of selects on a
+database with and without vacuum() can easily differ by a factor of 10.
+@item
+The test results where also strange; The ASPAP3 test benchmark
+documentation mentions that the test does:
+
+"selections, simple joins, projections, aggregates, one-tuple updates,
+and bulk updates"
+
+PostgreSQL is good at doing selects and joins (especially after a
+vacuum()), but doesn't perform as well on inserts/updates; The
+benchmarks seem to indicate that only SELECTs where done (or very few
+updates) . This could easily explain they good results for PostgreSQL in
+this test. The bad results for MySQL will be obvious a bit down in this
+document.
+@item
+They did run the so called benchmark from a Windows machine against a
+Linux machine over ODBC, a setup that no normal database user would ever
+do when running a heavy multi-user application. This tested more the
+ODBC driver and the Windows protocol used between the clients than the
+database itself.
+@item
+When running the database against Oracle and MS-SQL (Great Bridge has
+indirectly indicated that the databases they used in the test),
+they didn't use the native protocol but instead ODBC. Anyone that has
+ever used Oracle, knows that all real application uses the native
+interface instead of ODBC. Doing a test through ODBC and claiming that
+the results had anything to do with using the database for real can't
+be regarded as fair play. They should have done two tests with and
+without ODBC to provide the right facts (after having got experts to tune
+all involved databases of course).
+@item
+They refer to the TCP-C tests, but doesn't anywhere mention that the
+tests they did where not a true TCP-C test and they where not even
+allowed to call it a TCP-C test. A TCP-C test can only be conducted by
+the rules approved by the @uref{http://www.tpc.org,TCP-council}. Great
+Bridge didn't do that. By doing this they have both violated the TCP
+trademark and miscredited their own benchmarks. The rules set by the
+TCP-council are very strict to ensure that no one can produce false
+results or make unprovable statements. Apparently Great Bridge wasn't
+interested in doing this.
+@item
+After the first test, we contacted Great Bridge and mentioned to them
+some of the obvious mistakes they had done with @strong{MySQL}; Running
+with a debug version of our ODBC driver, running on a Linux system that
+wasn't optimized for threads, using an old MySQL version when there was
+a recommended newer one available, not starting @strong{MySQL} with the
+right options for heavy multi-user use (the default installation of
+MySQL is tuned for minimal resource use). Great Bridge did run a new
+test, with our optimized ODBC driver and with better startup options for
+MySQL, but refused to either use our updated glibc library or our
+standard binary (used by 80% of our users), which was statically linked
+with a fixed glibc library.
+
+According to what we know, Great Bridge did nothing to ensure that the
+other databases was setup correctly to run good in their test
+environment. We are sure however that they didn't contact Oracle or
+Microsoft to ask for their advice in this matter ;)
+@item
+The benchmark was paid for by Great Bridge, and they decided to publish
+only partial chosen results (instead of publishing it all).
+@end itemize
+
+Tim Perdue, a long time PostgreSQL fan and a reluctant MySQL user
+published a comparison on
+@uref{http://www.phpbuilder.com/columns/tim20001112.php3,phpbuider}.
+
+When we got aware of the comparison, we phoned Tim Perdue about this
+because there was a lot of strange things in his results. For example,
+he claimed that MySQL had a problem with five users in his tests, when we
+know that there are users with similar machines as his that are using
+MySQL with 2000 simultaneous connections doing 400 queries per second (In
+this case the limit was the web bandwidth, not the database).
+
+It sounded like he was using a Linux kernel that either had some
+problems with many threads (Linux kernels before 2.4 had a problem with
+this but we have documented how to fix this and Tim should be aware of
+this problem). The other possible problem could have been an old glibc
+library and that Tim didn't use a MySQL binary from our site, which is
+linked with a corrected glibc library, but had compiled a version of his
+own with. In any of the above cases, the symptom would have been exactly
+what Tim had measured.
+
+We asked Tim if we could get access to his data so that we could repeat
+the benchmark and if he could check the MySQL version on the machine to
+find out what was wrong and he promised to come back to us about this.
+He has not done that yet.
+
+Because of this we can't put any trust in this benchmark either :(
+
+Conclusion:
+
+The only benchmarks that exist today that anyone can download and run
+against @strong{MySQL}and PostgreSQL is the MySQL benchmarks. We here
+at @strong{MySQL} believe that open source databases should be tested
+with open source tools! This is the only way to ensure that no one
+does tests that none can reproduce and use this to claim that a
+database is better than another. Without knowing all the facts it's
+impossible to answer the claims of the tester.
+
+The thing we find strange is that every test we have seen about
+PostgreSQL, that is impossible to reproduce, claims that PostgreSQL is
+better in most cases while our tests, which anyone can reproduce,
+clearly shows otherwise. With this we don't want to say that PostgreSQL
+isn't good at many things (It is!) We would just like to see a fair test
+where they are very good so that we could get some friendly competition
+going!
+
+For more information about our benchmarks suite see @xref{MySQL
+Benchmarks}.
+
+We are working on even better benchmarks including much better
+documentation (the current is lacking).
@cindex internals
@cindex threads
@@ -44863,7 +45340,7 @@ html templates. By Alex Krohn.
This cgi scripts in Perl enables you to edit content of Mysql
database. By Tomas Zeman.
@item
-@uref{http://futurerealm.com/opensource/futuresql.htm, FutureSQL Web Database Administration Tool}.
+@uref{http://worldcommunity.com/opensource/futuresql, FutureSQL Web Database Administration Tool}.
FutureSQL by Peter F. Brown, is a free, open source rapid application
development Web database administration tool, written in Perl,
using @strong{MySQL}. It uses @code{DBI:DBD} and @code{CGI.pm}.
@@ -45122,6 +45599,10 @@ Patches for @code{radiusd} to make it support @strong{MySQL}. By Wim Bonis,
@appendixsec Useful Tools
@itemize @bullet
+@item @uref{http://worldcommunity.com/opensource/utilities/mysql_backup.html, MySQL Backup}.
+
+A backup script for MySQL. By Peter F. Brown.
+
@item @uref{http://www.mysql.com/Downloads/Contrib/mytop, mytop}
@item @uref{http://public.yahoo.com/~jzawodn/mytop/, mytop home page}
mytop is a Perl program that allows you to monitor @strong{MySQL} servers by
@@ -45839,6 +46320,15 @@ not yet 100% confident in this code.
@appendixsubsec Changes in release 3.23.39
@itemize @bullet
@item
+If one dropped and added an @code{auto_increment} column, the
+@code{auto_increment} value wasn't reset.
+@item
+Fixed problem where @code{LOCK TABLES table_name READ} followed by
+@code{FLUSH TABLES} put a exclusive lock on the table.
+@item
+@code{REAL} @@variables with was represented with 2 digits when
+converted to strings.
+@item
Fixed problem that client 'hung' when @code{LOAD TABLE FROM MASTER} failed.
@item
Running @code{myisamchk --fast --force} will no longer repair tables
@@ -51095,6 +51585,10 @@ Everything in this list is approximately in the order it will be done. If you
want to affect the priority order, please register a license or support us and
tell us what you want to have done more quickly. @xref{Licensing and Support}.
+The plan is that we in the future will support the full ANSI SQL99
+standard, but with a lot of useful extensions. The challenge is to do
+this without sacrifying the speed or compromise the code.
+
@node TODO MySQL 4.0, TODO future, TODO, TODO
@appendixsec Things that should be in 4.0
diff --git a/isam/_dynrec.c b/isam/_dynrec.c
index 42a596fa623..2a908f5b42c 100644
--- a/isam/_dynrec.c
+++ b/isam/_dynrec.c
@@ -954,7 +954,7 @@ static int _nisam_cmp_buffer(File file, const byte *buff, ulong filepos, uint le
{
if (my_read(file,temp_buff,next_length,MYF(MY_NABP)))
goto err;
- if (memcmp((byte*) buff,temp_buff,IO_SIZE))
+ if (memcmp((byte*) buff,temp_buff,next_length))
DBUG_RETURN(1);
buff+=next_length;
length-= next_length;
diff --git a/myisam/mi_dynrec.c b/myisam/mi_dynrec.c
index 4c05f6c737d..e090498f3fe 100644
--- a/myisam/mi_dynrec.c
+++ b/myisam/mi_dynrec.c
@@ -1221,20 +1221,19 @@ static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos,
char temp_buff[IO_SIZE*2];
DBUG_ENTER("_mi_cmp_buffer");
- VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
next_length= IO_SIZE*2 - (uint) (filepos & (IO_SIZE-1));
while (length > IO_SIZE*2)
{
- if (my_read(file,temp_buff,next_length,MYF(MY_NABP)))
+ if (my_pread(file,temp_buff,next_length,filepos, MYF(MY_NABP)) ||
+ memcmp((byte*) buff,temp_buff,next_length))
goto err;
- if (memcmp((byte*) buff,temp_buff,IO_SIZE))
- DBUG_RETURN(1);
+ filepos+=next_length;
buff+=next_length;
length-= next_length;
next_length=IO_SIZE*2;
}
- if (my_read(file,temp_buff,length,MYF(MY_NABP)))
+ if (my_pread(file,temp_buff,length,filepos,MYF(MY_NABP)))
goto err;
DBUG_RETURN(memcmp((byte*) buff,temp_buff,length));
err:
diff --git a/myisam/mi_packrec.c b/myisam/mi_packrec.c
index b6a9435ee3d..be7f9dcae0a 100644
--- a/myisam/mi_packrec.c
+++ b/myisam/mi_packrec.c
@@ -1010,7 +1010,7 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BLOCK_INFO *info, File file,
{
ref_length=myisam->s->pack.ref_length;
/*
- We can't use my_pread() here because mi_rad_pack_record assumes
+ We can't use my_pread() here because mi_read_rnd_pack_record assumes
position is ok
*/
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result
index 4f12f71c7ce..dbdbb7f57a9 100644
--- a/mysql-test/r/alter_table.result
+++ b/mysql-test/r/alter_table.result
@@ -27,3 +27,8 @@ n
12
Table Op Msg_type Msg_text
test.t1 optimize status OK
+i
+1
+2
+3
+4
diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result
index ee04e437bb7..f852378e6a1 100644
--- a/mysql-test/r/variables.result
+++ b/mysql-test/r/variables.result
@@ -1,7 +1,7 @@
@test @`select` @TEST @not_used
1 2 3 NULL
@test_int @test_double @test_string @test_string2 @select
-10 0.00 abcdeghi abcdefghij NULL
+10 1e-10 abcdeghi abcdefghij NULL
@test_int @test_double @test_string @test_string2
hello hello hello hello
@test_int @test_double @test_string @test_string2
@@ -10,3 +10,5 @@ hellohello hellohello hellohello hellohello
NULL NULL NULL NULL
@t1:=(@t2:=1)+@t3:=4 @t1 @t2 @t3
5 5 1 4
+@t5
+1.23456
diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test
index da98240c2cf..dbfbd4267d8 100644
--- a/mysql-test/t/alter_table.test
+++ b/mysql-test/t/alter_table.test
@@ -71,3 +71,14 @@ ALTER TABLE t1 ADD Column new_col int not null;
UNLOCK TABLES;
OPTIMIZE TABLE t1;
DROP TABLE t1;
+
+#
+# Drop and add an auto_increment column
+#
+
+create table t1 (i int unsigned not null auto_increment primary key);
+insert into t1 values (null),(null),(null),(null);
+alter table t1 drop i,add i int unsigned not null auto_increment, drop primary key, add primary key (i);
+select * from t1;
+drop table t1;
+
diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test
index 1067559b759..d5ff64d199b 100644
--- a/mysql-test/t/variables.test
+++ b/mysql-test/t/variables.test
@@ -1,7 +1,7 @@
#
# test variables
#
-set @`test`=1,@TEST=3,@select=2;
+set @`test`=1,@TEST=3,@select=2,@t5=1.23456;
select @test,@`select`,@TEST,@not_used;
set @test_int=10,@test_double=1e-10,@test_string="abcdeghi",@test_string2="abcdefghij",@select=NULL;
select @test_int,@test_double,@test_string,@test_string2,@select;
@@ -12,3 +12,5 @@ select @test_int,@test_double,@test_string,@test_string2;
set @test_int=null,@test_double=null,@test_string=null,@test_string2=null;
select @test_int,@test_double,@test_string,@test_string2;
select @t1:=(@t2:=1)+@t3:=4,@t1,@t2,@t3;
+select @t5;
+
diff --git a/mysys/my_pread.c b/mysys/my_pread.c
index 4e0de71bcf5..5c7d0be5854 100644
--- a/mysys/my_pread.c
+++ b/mysys/my_pread.c
@@ -66,11 +66,11 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset,
my_filename(Filedes),my_errno);
}
if ((int) readbytes == -1 || (MyFlags & (MY_FNABP | MY_NABP)))
- DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
+ DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
}
if (MyFlags & (MY_NABP | MY_FNABP))
- DBUG_RETURN(0); /* Ok vid l{sning */
- DBUG_RETURN(readbytes); /* purecov: inspected */
+ DBUG_RETURN(0); /* Read went ok; Return 0 */
+ DBUG_RETURN(readbytes); /* purecov: inspected */
}
} /* my_pread */
diff --git a/sql-bench/Makefile.am b/sql-bench/Makefile.am
index 9de8da5c189..673a36852e9 100644
--- a/sql-bench/Makefile.am
+++ b/sql-bench/Makefile.am
@@ -23,7 +23,7 @@ bench_SCRIPTS = test-ATIS test-connect test-create test-insert \
test-big-tables test-select test-wisconsin \
test-alter-table graph-compare-results \
bench-init.pl compare-results run-all-tests \
- server-cfg crash-me copy-db \
+ server-cfg crash-me copy-db
CLEANFILES = $(bench_SCRIPTS)
EXTRA_SCRIPTS = test-ATIS.sh test-connect.sh test-create.sh \
test-insert.sh test-big-tables.sh test-select.sh \
diff --git a/sql-bench/README b/sql-bench/README
index 6096c5cc1e8..6b6a5fc95c0 100755
--- a/sql-bench/README
+++ b/sql-bench/README
@@ -11,7 +11,7 @@ In this directory are the queries and raw data files used to populate
the MySQL benchmarks. In order to run the benchmarks you should normally
execute a command like the following:
-run-all-tests --server=msyql --cmp=mysql,pg,solid --user=test --password=test --log
+run-all-tests --server=mysql --cmp=mysql,pg,solid --user=test --password=test --log
The above means that one wants to run the benchmark with MySQL. The limits
should be taken from all of mysql,PostgreSQL and Solid. Login name and
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 84bc972608e..8a2bd15ae6d 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1758,7 +1758,7 @@ Item_func_get_user_var::val_str(String *str)
return NULL;
switch (entry->type) {
case REAL_RESULT:
- str->set(*(double*) entry->value);
+ str->set(*(double*) entry->value,decimals);
break;
case INT_RESULT:
str->set(*(longlong*) entry->value);
diff --git a/sql/share/swedish/errmsg.OLD b/sql/share/swedish/errmsg.OLD
index 672ce97c575..fc26a08e9ee 100644
--- a/sql/share/swedish/errmsg.OLD
+++ b/sql/share/swedish/errmsg.OLD
@@ -206,3 +206,6 @@
"Kunde inte starta en tråd för replikering",
"Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar",
"Du kan endast använda konstant-uttryck med SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index d9470ee0b59..d068dd500bc 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -389,6 +389,9 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
thd->in_lock_tables=1;
result=reopen_tables(thd,1,1);
thd->in_lock_tables=0;
+ /* Set version for table */
+ for (TABLE *table=thd->open_tables; table ; table=table->next)
+ table->version=refresh_version;
}
VOID(pthread_mutex_unlock(&LOCK_open));
if (if_wait_for_refresh)
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index cfd16df5d17..14f4a732eac 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1362,6 +1362,7 @@ select_create::prepare(List<Item> &values)
if (info.handle_duplicates == DUP_IGNORE ||
info.handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ table->file->deactivate_non_unique_index((ha_rows) 0);
DBUG_RETURN(0);
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 3fa2bc5d9d3..207f9dd324d 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1233,7 +1233,16 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
if (drop->type == Alter_drop::COLUMN &&
!my_strcasecmp(field->field_name, drop->name))
+ {
+ /* Reset auto_increment value if it was dropped */
+ if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER &&
+ !(create_info->used_fields & HA_CREATE_USED_AUTO))
+ {
+ create_info->auto_increment_value=0;
+ create_info->used_fields|=HA_CREATE_USED_AUTO;
+ }
break;
+ }
}
if (drop)
{