summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorserg@serg.mysql.com <>2001-08-08 14:29:34 +0200
committerserg@serg.mysql.com <>2001-08-08 14:29:34 +0200
commit37c417318e583891379dc153e8f21568861ea9d4 (patch)
treeac520259524a31f8422266af71fbef798d4a6de2
parent7eae61b9a91427a153488015d291534cc2be66e8 (diff)
parent094ca761c41ea290ed560da1e3b854d660a5b2e9 (diff)
downloadmariadb-git-37c417318e583891379dc153e8f21568861ea9d4.tar.gz
Merge work:/home/bk/mysql into serg.mysql.com:/usr/home/serg/Abk/mysql
-rw-r--r--BitKeeper/etc/logging_ok1
-rw-r--r--Docs/Flags/indonesia.eps0
-rw-r--r--Docs/Flags/indonesia.gifbin0 -> 133 bytes
-rw-r--r--Docs/Flags/indonesia.txt0
-rw-r--r--Docs/Flags/yugoslavia.eps0
-rw-r--r--Docs/Flags/yugoslavia.gifbin0 -> 250 bytes
-rw-r--r--Docs/Flags/yugoslavia.txt0
-rw-r--r--Docs/manual.texi31840
-rw-r--r--client/mysqlimport.c16
-rw-r--r--configure.in11
-rw-r--r--include/mysql_com.h1
-rw-r--r--include/mysqld_error.h3
-rw-r--r--innobase/btr/btr0btr.c71
-rw-r--r--innobase/btr/btr0cur.c820
-rw-r--r--innobase/buf/buf0buf.c81
-rw-r--r--innobase/buf/buf0flu.c195
-rw-r--r--innobase/buf/buf0rea.c14
-rw-r--r--innobase/data/data0data.c171
-rw-r--r--innobase/fil/fil0fil.c81
-rw-r--r--innobase/fsp/fsp0fsp.c4
-rw-r--r--innobase/ibuf/ibuf0ibuf.c7
-rw-r--r--innobase/include/btr0btr.h38
-rw-r--r--innobase/include/btr0cur.h123
-rw-r--r--innobase/include/buf0buf.h8
-rw-r--r--innobase/include/buf0flu.h2
-rw-r--r--innobase/include/data0data.h58
-rw-r--r--innobase/include/data0data.ic7
-rw-r--r--innobase/include/dict0mem.h2
-rw-r--r--innobase/include/fil0fil.h10
-rw-r--r--innobase/include/fsp0fsp.h2
-rw-r--r--innobase/include/mach0data.ic4
-rw-r--r--innobase/include/os0file.h18
-rw-r--r--innobase/include/rem0cmp.h13
-rw-r--r--innobase/include/rem0rec.h53
-rw-r--r--innobase/include/rem0rec.ic72
-rw-r--r--innobase/include/row0ins.h6
-rw-r--r--innobase/include/row0mysql.h8
-rw-r--r--innobase/include/row0row.h1
-rw-r--r--innobase/include/row0upd.h12
-rw-r--r--innobase/include/row0upd.ic5
-rw-r--r--innobase/include/srv0srv.h22
-rw-r--r--innobase/include/sync0sync.h2
-rw-r--r--innobase/include/trx0rec.h16
-rw-r--r--innobase/include/trx0rec.ic17
-rw-r--r--innobase/include/trx0sys.h70
-rw-r--r--innobase/include/trx0types.h1
-rw-r--r--innobase/include/trx0undo.h4
-rw-r--r--innobase/include/univ.i24
-rw-r--r--innobase/include/ut0dbg.h10
-rw-r--r--innobase/include/ut0ut.h3
-rw-r--r--innobase/lock/lock0lock.c40
-rw-r--r--innobase/log/log0log.c15
-rw-r--r--innobase/log/log0recv.c3
-rw-r--r--innobase/os/os0file.c84
-rw-r--r--innobase/page/page0cur.c8
-rw-r--r--innobase/pars/pars0pars.c4
-rw-r--r--innobase/rem/rem0cmp.c49
-rw-r--r--innobase/rem/rem0rec.c67
-rw-r--r--innobase/row/row0ins.c91
-rw-r--r--innobase/row/row0mysql.c42
-rw-r--r--innobase/row/row0purge.c99
-rw-r--r--innobase/row/row0row.c34
-rw-r--r--innobase/row/row0sel.c63
-rw-r--r--innobase/row/row0uins.c9
-rw-r--r--innobase/row/row0umod.c121
-rw-r--r--innobase/row/row0undo.c12
-rw-r--r--innobase/row/row0upd.c143
-rw-r--r--innobase/srv/srv0srv.c160
-rw-r--r--innobase/srv/srv0start.c133
-rw-r--r--innobase/sync/sync0rw.c5
-rw-r--r--innobase/sync/sync0sync.c38
-rw-r--r--innobase/trx/trx0purge.c7
-rw-r--r--innobase/trx/trx0rec.c62
-rw-r--r--innobase/trx/trx0sys.c319
-rw-r--r--myisam/myisamchk.c4
-rw-r--r--mysql-test/r/order_by.result31
-rw-r--r--mysql-test/t/fulltext.test23
-rw-r--r--mysql-test/t/order_by.test52
-rw-r--r--mysys/default.c2
-rw-r--r--scripts/mysqlhotcopy.sh47
-rw-r--r--sql/item_func.cc20
-rw-r--r--sql/log_event.h3
-rw-r--r--sql/mysqld.cc2
-rw-r--r--sql/share/czech/errmsg.txt1
-rw-r--r--sql/share/danish/errmsg.txt1
-rw-r--r--sql/share/dutch/errmsg.txt1
-rw-r--r--sql/share/english/errmsg.txt1
-rw-r--r--sql/share/estonian/errmsg.txt1
-rw-r--r--sql/share/french/errmsg.txt1
-rw-r--r--sql/share/german/errmsg.txt1
-rw-r--r--sql/share/greek/errmsg.txt1
-rw-r--r--sql/share/hungarian/errmsg.txt1
-rw-r--r--sql/share/italian/errmsg.txt1
-rw-r--r--sql/share/japanese/errmsg.txt1
-rw-r--r--sql/share/korean/errmsg.txt1
-rw-r--r--sql/share/norwegian-ny/errmsg.txt1
-rw-r--r--sql/share/norwegian/errmsg.txt1
-rw-r--r--sql/share/polish/errmsg.txt1
-rw-r--r--sql/share/portuguese/errmsg.txt1
-rw-r--r--sql/share/romanian/errmsg.txt1
-rw-r--r--sql/share/russian/errmsg.txt1
-rw-r--r--sql/share/slovak/errmsg.txt1
-rw-r--r--sql/share/spanish/errmsg.txt1
-rw-r--r--sql/share/swedish/errmsg.OLD4
-rw-r--r--sql/share/swedish/errmsg.txt1
-rw-r--r--sql/sql_select.cc12
106 files changed, 19144 insertions, 16612 deletions
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index 70b86544398..fbf9c3702a3 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -13,3 +13,4 @@ tim@threads.polyesthetic.msg
tim@white.box
jcole@tetra.spaceapes.com
davida@isil.mysql.com
+tonu@x153.internalnet
diff --git a/Docs/Flags/indonesia.eps b/Docs/Flags/indonesia.eps
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/Docs/Flags/indonesia.eps
diff --git a/Docs/Flags/indonesia.gif b/Docs/Flags/indonesia.gif
new file mode 100644
index 00000000000..1c421df50ba
--- /dev/null
+++ b/Docs/Flags/indonesia.gif
Binary files differ
diff --git a/Docs/Flags/indonesia.txt b/Docs/Flags/indonesia.txt
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/Docs/Flags/indonesia.txt
diff --git a/Docs/Flags/yugoslavia.eps b/Docs/Flags/yugoslavia.eps
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/Docs/Flags/yugoslavia.eps
diff --git a/Docs/Flags/yugoslavia.gif b/Docs/Flags/yugoslavia.gif
new file mode 100644
index 00000000000..650eac242d6
--- /dev/null
+++ b/Docs/Flags/yugoslavia.gif
Binary files differ
diff --git a/Docs/Flags/yugoslavia.txt b/Docs/Flags/yugoslavia.txt
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/Docs/Flags/yugoslavia.txt
diff --git a/Docs/manual.texi b/Docs/manual.texi
index 8b5052aa19c..86a4b92f304 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -52,9 +52,6 @@
@smallbook
@end ifset
-@c We want the contents at the beginning, where it's supposed to be.
-@setcontentsaftertitlepage
-
@c %**end of header
@ifinfo
@@ -75,6 +72,13 @@ END-INFO-DIR-ENTRY
@page
@end titlepage
+@c Short contents, blank page, long contents.
+@c until i can figure out the blank page, no short contents.
+@c @shortcontents
+@c @page
+@c @page
+@contents
+
@c This should be added. The HTML conversion also needs a MySQL version
@c number somewhere.
@@ -83,15 +87,8 @@ END-INFO-DIR-ENTRY
@c printing
@headings single
-@ifclear nusphere
-@everyheading @thispage @| @| @thischapter
-@everyfooting @| @| Version: @value{mysql_version} Printed: @today{}
-@end ifclear
-
-@ifset nusphere
@oddheading @thischapter @| @| @thispage
@evenheading @thispage @| @| MySQL Technical Reference for Version @value{mysql_version}
-@end ifset
@end iftex
@@ -112,16 +109,12 @@ distribution for that version.
@menu
* Introduction:: General Information about @strong{MySQL}
* Installing:: Installing @strong{MySQL}
-* Privilege system:: The @strong{MySQL} access privilege system
+* Tutorial:: @strong{MySQL} Tutorial
+* MySQL Database Administration::
+* MySQL Optimization::
* Reference:: @strong{MySQL} language reference
* Table types:: @strong{MySQL} table types
-* Tutorial:: @strong{MySQL} Tutorial
-* Server:: @strong{MySQL} Server
-* Replication:: Replication
* Fulltext Search:: Fulltext Search
-* Performance:: Getting maximum performance from @strong{MySQL}
-* MySQL Benchmarks:: The @strong{MySQL} benchmark suite
-* Tools:: @strong{MySQL} Utilities
* Maintenance:: Maintaining a @strong{MySQL} installation
* Adding functions:: Adding new functions to @strong{MySQL}
* Adding procedures:: Adding new procedures to @strong{MySQL}
@@ -129,7 +122,6 @@ distribution for that version.
* Common programs:: Using @strong{MySQL} with some common programs
* Problems:: Problems
* Common problems:: Solving some common problems with @strong{MySQL}
-* Log files::
* Clients:: @strong{MySQL} client tools and APIs
* MySQL internals:: @strong{MySQL} internals
* Environment variables:: @strong{MySQL} environment variables
@@ -143,1012 +135,10 @@ distribution for that version.
* Unireg:: What is Unireg?
* GPL license:: GNU General Public License
* LGPL license:: GNU Library General Public License
+* Placeholder::
* Function Index:: SQL command, type and function index
* Concept Index:: Concept Index
-@detailmenu
- --- The Detailed Node Listing ---
-
-General Information About MySQL
-
-* MySQL and MySQL AB::
-* MySQL Information Sources::
-* Licensing and Support::
-* Compatibility::
-* Comparisons::
-* TODO::
-
-MySQL, MySQL AB, and Open Source
-
-* What-is:: What is @strong{MySQL}?
-* What is MySQL AB:: What is @strong{MySQL AB}?
-* Manual-info:: About this manual
-* Manual conventions:: Conventions used in this manual
-* History:: History of @strong{MySQL}
-* Features:: The main features of @strong{MySQL}
-* Stability:: How stable is @strong{MySQL}?
-* Table size::
-* Year 2000 compliance:: Year 2000 compliance
-
-MySQL Information Sources
-
-* MySQL-Books::
-* General-SQL::
-* Useful Links::
-* Questions::
-
-MySQL Mailing Lists
-
-* Mailing-list:: The @strong{MySQL} mailing lists
-* Asking questions:: Asking questions or reporting bugs
-* Bug reports:: How to report bugs or problems
-* Answering questions:: Guidelines for answering questions on the mailing list
-
-MySQL Licensing and Support
-
-* Licensing policy:: @strong{MySQL} licensing policy
-* Copyright:: Copyrights used by @strong{MySQL}
-* Licensing examples:: Example licensing situations
-* Cost:: @strong{MySQL} licensing and support costs
-* Support:: Types of commercial support
-
-Copyrights Used by MySQL
-
-* Copyright changes:: Possible future copyright changes
-
-Example Licensing Situations
-
-* Products that use MySQL:: Selling products that use @strong{MySQL}
-* ISP:: ISP @strong{MySQL} services
-* Web server:: Running a web server using @strong{MySQL}.
-
-MySQL Licensing and Support Costs
-
-* Payment information:: Payment information
-* Contact information:: Contact information
-
-Types of Commercial Support
-
-* Basic email support:: Basic email support
-* Extended email support:: Extended email support
-* Login support:: Login support
-* Extended login support:: Extended login support
-* Telephone support:: Telephone support
-* Table handler support:: Support for other table handlers
-
-How Standards-compatible Is MySQL?
-
-* Extensions to ANSI:: @strong{MySQL} extensions to ANSI SQL92
-* Differences from ANSI:: @strong{MySQL} differences compared to ANSI SQL92
-* ANSI mode:: Running @strong{MySQL} in ANSI mode
-* Missing functions:: Functionality missing from @strong{MySQL}
-* Standards:: What standards does @strong{MySQL} follow?
-* Commit-rollback:: How to cope without @code{COMMIT}-@code{ROLLBACK}
-* Bugs::
-
-Functionality Missing from MySQL
-
-* Missing Sub-selects:: Sub-selects
-* Missing SELECT INTO TABLE:: @code{SELECT INTO TABLE}
-* Missing Transactions:: Transactions
-* Missing Triggers:: Triggers
-* Missing Foreign Keys:: Foreign Keys
-* Broken Foreign KEY::
-* Missing Views:: Views
-* Missing comments:: @samp{--} as the start of a comment
-
-Foreign Keys
-
-* Broken Foreign KEY:: Reasons NOT to use foreign keys constraints
-
-How MySQL Compares to Other Databases
-
-* Compare mSQL:: How @strong{MySQL} compares to @code{mSQL}
-* Protocol differences::
-* Compare PostgreSQL:: How @strong{MySQL} compares with PostgreSQL
-* MySQL-PostgreSQL features::
-
-How MySQL Compares to @code{mSQL}
-
-* Using mSQL tools:: How to convert @code{mSQL} tools for @strong{MySQL}
-
-How @code{mSQL} and MySQL Client/Server Communications Protocols Differ
-
-* Syntax differences::
-
-How MySQL Compares to PostgreSQL
-
-* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development strategies
-
-Featurevise Comparison of MySQL and PostgreSQL
-
-* MySQL-PostgreSQL benchmarks::
-
-MySQL and the future (The TODO)
-
-* TODO MySQL 4.0:: Things that should be in Version 4.0
-* TODO future:: Things that must be done in the near future
-* TODO sometime:: Things that have to be done sometime
-* TODO unplanned:: Some things we don't have any plans to do
-
-Installing MySQL
-
-* Getting MySQL:: How to get @strong{MySQL}
-* Which OS:: Operating systems supported by @strong{MySQL}
-* Which version:: Which @strong{MySQL} version to use
-* Many versions:: How and when updates are released
-* Installation layouts:: Installation layouts
-* Installing binary:: Installing a @strong{MySQL} binary distribution
-* Installing source:: Installing a @strong{MySQL} source distribution
-* Installing source tree:: Installing @strong{MySQL} from development source tree
-* Compilation problems:: Problems compiling?
-* MIT-pthreads:: MIT-pthreads notes
-* Perl support:: Perl installation comments
-* Source install system issues:: System-specific issues
-* Windows:: Windows notes
-* OS/2:: OS/2 notes
-* MySQL binaries:: MySQL binaries
-* Post-installation:: Post-installation setup and testing
-* Installing many servers:: Installing many servers on the same machine
-* Upgrade:: Upgrading/Downgrading MySQL
-
-Installing a MySQL Binary Distribution
-
-* Linux-RPM:: Linux RPM files
-* Building clients:: Building client programs
-* Binary install system issues:: System-specific issues
-
-System-specific Issues
-
-* Binary notes-Linux:: Linux notes for binary distribution
-* Binary notes-HP-UX:: HP-UX notes for binary distribution
-
-Installing a MySQL Source Distribution
-
-* Quick install:: Quick installation overview
-* Applying patches:: Applying patches
-* configure options:: Typical @code{configure} options
-
-Perl Installation Comments
-
-* Perl installation:: Installing Perl on Unix
-* ActiveState Perl:: Installing ActiveState Perl on Windows
-* Windows Perl:: Installing the @strong{MySQL} Perl distribution on Windows
-* Perl support problems:: Problems using the Perl @code{DBI}/@code{DBD} interface
-
-System-specific Issues
-
-* Solaris:: Solaris notes
-* Solaris 2.7:: Solaris 2.7 / 2.8 notes
-* Solaris x86:: Solaris x86 notes
-* SunOS:: SunOS 4 notes
-* Linux:: Linux notes (all Linux versions)
-* Alpha-DEC-UNIX:: Alpha-DEC-UNIX notes
-* Alpha-DEC-OSF1:: Alpha-DEC-OSF1 notes
-* SGI-Irix:: SGI-Irix notes
-* FreeBSD:: FreeBSD notes
-* NetBSD:: NetBSD notes
-* OpenBSD:: OpenBSD 2.5 notes
-* BSDI:: BSD/OS notes
-* SCO:: SCO notes
-* SCO Unixware:: SCO Unixware 7.0 notes
-* IBM-AIX:: IBM-AIX notes
-* HP-UX 10.20:: HP-UX 10.20 notes
-* HP-UX 11.x:: HP-UX 11.x notes
-* Mac OS X:: Mac OS X notes
-* BEOS:: BeOS Notes
-
-Linux Notes (All Linux Versions)
-
-* Linux-x86:: Linux-x86 notes
-* Linux-RedHat50:: RedHat 5.0 notes
-* Linux-RedHat51:: RedHat 5.1 notes
-* Linux-SPARC:: Linux-SPARC notes
-* Linux-Alpha:: Linux-Alpha notes
-* MKLinux:: MkLinux notes
-* Qube2:: Qube2 Linux notes
-* Linux-Ia64:: Linux-Ia64 notes
-
-OpenBSD Notes
-
-* OpenBSD 2.5:: OpenBSD 2.5 Notes
-* OpenBSD 2.8:: OpenBSD 2.8 Notes
-
-BSD/OS Notes
-
-* BSDI2:: BSD/OS 2.x notes
-* BSDI3:: BSD/OS 3.x notes
-* BSDI4:: BSD/OS 4.x notes
-
-Mac OS X Notes
-
-* Mac OS X Public Data::
-* Mac OS X Server::
-
-Windows Notes
-
-* Windows installation:: Installing @strong{MySQL} on Windows
-* Win95 start:: Starting @strong{MySQL} on Win95 / Win98
-* NT start:: Starting @strong{MySQL} on NT / Win2000
-* Windows running:: Running @strong{MySQL} on Windows
-* Windows and SSH:: Connecting to a remote @strong{MySQL} from Windows with SSH
-* Windows symbolic links:: Splitting data across different disks under Win32
-* Windows compiling:: Compiling MySQL clients on Windows.
-* Windows vs Unix:: @strong{MySQL}-Windows compared to Unix @strong{MySQL}
-
-Post-installation Setup and Testing
-
-* mysql_install_db:: Problems running @code{mysql_install_db}
-* Starting server:: Problems starting the @strong{MySQL} server
-* Automatic start:: Starting and stopping @strong{MySQL} automatically
-* Command-line options:: Command-line options
-* Option files:: Option files
-
-Upgrading/Downgrading MySQL
-
-* Upgrading-from-3.22:: Upgrading from a 3.22 version to 3.23
-* Upgrading-from-3.21:: Upgrading from a 3.21 version to 3.22
-* Upgrading-from-3.20:: Upgrading from a 3.20 version to 3.21
-* Upgrading-to-arch:: Upgrading to another architecture
-
-The MySQL Access Privilege System
-
-* General security:: General security
-* Security:: How to make @strong{MySQL} secure against crackers
-* Privileges options::
-* What Privileges:: What the privilege system does
-* User names:: @strong{MySQL} user names and passwords
-* Connecting:: Connecting to the @strong{MySQL} server
-* Password security:: Keeping your password secure
-* Privileges provided:: Privileges provided by @strong{MySQL}
-* Privileges:: How the privilege system works
-* Connection access:: Access control, stage 1: Connection verification
-* Request access:: Access control, stage 2: Request verification
-* Privilege changes:: When privilege changes take effect
-* Default privileges:: Setting up the initial @strong{MySQL} privileges
-* Adding users:: Adding new users to @strong{MySQL}
-* Passwords:: How to set up passwords
-* Access denied:: Causes of @code{Access denied} errors
-
-MySQL Language Reference
-
-* Literals:: Literals: How to write strings and numbers
-* Variables:: User variables
-* Column types:: Column types
-* Functions:: Functions
-* CREATE DATABASE:: @code{CREATE DATABASE} syntax
-* DROP DATABASE:: @code{DROP DATABASE} syntax
-* CREATE TABLE:: @code{CREATE TABLE} syntax
-* ALTER TABLE:: @code{ALTER TABLE} syntax
-* RENAME TABLE:: @code{RENAME TABLE} syntax
-* DROP TABLE:: @code{DROP TABLE} syntax
-* OPTIMIZE TABLE:: @code{OPTIMIZE TABLE} syntax
-* CHECK TABLE:: @code{CHECK TABLE} syntax
-* BACKUP TABLE:: @code{BACKUP TABLE} syntax
-* RESTORE TABLE:: @code{RESTORE TABLE} syntax
-* ANALYZE TABLE:: @code{ANALYZE TABLE} syntax
-* REPAIR TABLE:: @code{REPAIR TABLE} syntax
-* DELETE:: @code{DELETE} syntax
-* TRUNCATE:: @code{TRUNCATE} syntax
-* SELECT:: @code{SELECT} syntax
-* JOIN:: @code{JOIN} syntax
-* INSERT:: @code{INSERT} syntax
-* REPLACE:: @code{REPLACE} syntax
-* LOAD DATA:: @code{LOAD DATA INFILE} syntax
-* UPDATE:: @code{UPDATE} syntax
-* USE:: @code{USE} syntax
-* FLUSH:: @code{FLUSH} syntax (clearing caches)
-* KILL:: @code{KILL} syntax
-* SHOW:: @code{SHOW} syntax (Get information about tables, columns, ...)
-* EXPLAIN:: @code{EXPLAIN} syntax (Get information about a @code{SELECT})
-* DESCRIBE:: @code{DESCRIBE} syntax (Get information about names of columns)
-* COMMIT:: @code{BEGIN/COMMIT/ROLLBACK} syntax
-* LOCK TABLES:: @code{LOCK TABLES/UNLOCK TABLES} syntax
-* SET OPTION:: @code{SET OPTION} syntax
-* SET TRANSACTION:: @code{SET TRANSACTION} syntax
-* GRANT:: @code{GRANT} and @code{REVOKE} syntax
-* CREATE INDEX:: @code{CREATE INDEX} syntax
-* DROP INDEX:: @code{DROP INDEX} syntax
-* Comments:: Comment syntax
-* CREATE FUNCTION:: @code{CREATE FUNCTION} syntax
-* Reserved words:: Is @strong{MySQL} picky about reserved words?
-
-Literals: How to Write Strings and Numbers
-
-* String syntax:: Strings
-* Number syntax:: Numbers
-* Hexadecimal values:: Hexadecimal values
-* NULL values:: @code{NULL} values
-* Legal names:: Database, Table, Index, Column, and Alias Names
-
-Database, Table, Index, Column, and Alias Names
-
-* Name case sensitivity:: Case sensitivity in names
-
-Column Types
-
-* Storage requirements:: Column type storage requirements
-* Numeric types:: Numeric types
-* Date and time types:: Date and time types
-* String types:: String types
-* Choosing types:: Choosing the right type for a column
-* Indexes:: Column indexes
-* Multiple-column indexes:: Multiple-column indexes
-* Other-vendor column types:: Using column types from other database engines
-
-Date and Time Types
-
-* Y2K issues:: Y2K issues and date types
-* DATETIME:: The @code{DATETIME}, @code{DATE} and @code{TIMESTAMP} types
-* TIME:: The @code{TIME} type
-* YEAR:: The @code{YEAR} type
-
-String Types
-
-* CHAR:: The @code{CHAR} and @code{VARCHAR} types
-* BLOB:: The @code{BLOB} and @code{TEXT} types
-* ENUM:: The @code{ENUM} type
-* SET:: The @code{SET} type
-
-Functions for Use in @code{SELECT} and @code{WHERE} Clauses
-
-* Grouping functions:: Grouping functions
-* Arithmetic functions:: Normal arithmetic operations
-* Bit functions:: Bit functions
-* Logical functions:: Logical operations
-* Comparison functions:: Comparison operators
-* String comparison functions:: String comparison functions
-* Casts:: Cast operators
-* Control flow functions:: Control flow functions
-* Mathematical functions:: Mathematical functions
-* String functions:: String functions
-* Date and time functions:: Date and time functions
-* Miscellaneous functions:: Miscellaneous functions
-* Group by functions:: Functions for @code{GROUP BY} clause
-
-@code{CREATE TABLE} Syntax
-
-* Silent column changes:: Silent column changes
-
-@code{INSERT} Syntax
-
-* INSERT SELECT::
-* INSERT DELAYED::
-
-@code{SHOW} Syntax
-
-* SHOW DATABASE INFO::
-* SHOW TABLE STATUS::
-* SHOW STATUS::
-* SHOW VARIABLES::
-* SHOW LOGS::
-* SHOW PROCESSLIST::
-* SHOW GRANTS::
-* SHOW CREATE TABLE::
-
-MySQL Table Types
-
-* MyISAM:: MyISAM tables
-* MERGE:: MERGE tables
-* ISAM:: ISAM tables
-* HEAP:: HEAP tables
-* InnoDB:: InnoDB tables
-* BDB:: BDB or Berkeley_db tables
-
-MyISAM Tables
-
-* Key space:: Space needed for keys
-* MyISAM table formats:: MyISAM table formats
-* MyISAM table problems::
-
-MyISAM Table Formats
-
-* Static format:: Static (Fixed-length) table characteristics
-* Dynamic format:: Dynamic table characteristics
-* Compressed format:: Compressed table characteristics
-
-MyISAM table problems.
-
-* Corrupted MyISAM tables::
-* MyISAM table close::
-
-InnoDB Tables
-
-* InnoDB overview:: InnoDB tables overview
-* InnoDB start:: InnoDB startup options
-* InnoDB init:: Creating InnoDB table space.
-* Using InnoDB tables:: Creating InnoDB tables
-* Adding and removing:: Adding and removing InnoDB data and log files
-* Backing up:: Backing up and recovering an InnoDB database
-* Moving:: Moving an InnoDB database to another machine
-* InnoDB transaction model:: InnoDB transaction model.
-* Implementation:: Implementation of multiversioning
-* Table and index:: Table and index structures
-* File space management:: File space management and disk i/o
-* Error handling:: Error handling
-* InnoDB restrictions:: Some restrictions on InnoDB tables
-* InnoDB contact information:: InnoDB contact information.
-
-Creating InnoDB table space
-
-* Error creating InnoDB::
-
-Backing up and recovering an InnoDB database
-
-* InnoDB checkpoints::
-
-InnoDB transaction model
-
-* InnoDB consistent read::
-* InnoDB locking reads::
-* InnoDB Next-key locking::
-* InnoDB Locks set::
-* InnoDB Deadlock detection::
-
-Table and index structures
-
-* InnoDB physical structure::
-* InnoDB Insert buffering::
-* InnoDB Adaptive hash::
-* InnoDB Physical record::
-
-File space management and disk i/o
-
-* InnoDB Disk i/o::
-* InnoDB File space::
-* InnoDB File Defragmenting::
-
-BDB or Berkeley_DB Tables
-
-* BDB overview:: Overview of BDB Tables
-* BDB install:: Installing BDB
-* BDB start:: BDB startup options
-* BDB characteristic:: Some characteristic of @code{BDB} tables:
-* BDB TODO:: Some things we need to fix for BDB in the near future:
-* BDB portability:: Operating systems supported by @strong{BDB}
-* BDB errors:: Errors You May Get When Using BDB Tables
-
-MySQL Tutorial
-
-* Connecting-disconnecting:: Connecting to and disconnecting from the server
-* Entering queries:: Entering queries
-* Database use:: Creating and using a database
-* Getting information:: Getting information about databases and tables
-* Examples:: Examples
-* Batch mode:: Using @code{mysql} in batch mode
-* Twin:: Queries from twin project
-
-Creating and Using a Database
-
-* Creating database:: Creating a database
-* Creating tables:: Creating a table
-* Loading tables:: Loading data into a table
-* Retrieving data:: Retrieving information from a table
-
-Retrieving Information from a Table
-
-* Selecting all:: Selecting all data
-* Selecting rows:: Selecting particular rows
-* Selecting columns:: Selecting particular columns
-* Sorting rows:: Sorting rows
-* Date calculations:: Date calculations
-* Working with NULL:: Working with @code{NULL} values
-* Pattern matching:: Pattern matching
-* Counting rows:: Counting rows
-* Multiple tables::
-
-Examples of Common Queries
-
-* example-Maximum-column:: The maximum value for a column
-* example-Maximum-row:: The row holding the maximum of a certain column
-* example-Maximum-column-group:: Maximum of column per group
-* example-Maximum-column-group-row:: The rows holding the group-wise maximum of a certain field
-* example-user-variables:: Using user variables
-* example-Foreign keys:: Using foreign keys
-* Searching on two keys::
-* Calculating days::
-
-Queries from Twin Project
-
-* Twin pool:: Find all non-distributed twins
-* Twin event:: Show a table on twin pair status
-
-MySQL Server Functions
-
-* Languages:: What languages are supported by @strong{MySQL}?
-
-What Languages Are Supported by MySQL?
-
-* Character sets:: The character set used for data and sorting
-* Adding character set:: Adding a new character set
-* Character arrays:: The character definition arrays
-* String collating:: String collating support
-* Multi-byte characters:: Multi-byte character support
-
-Replication in MySQL
-
-* Replication Intro:: Introduction
-* Replication Implementation:: Replication Implementation Overview
-* Replication HOWTO:: HOWTO
-* Replication Features:: Replication Features
-* Replication Options:: Replication Options in my.cnf
-* Replication SQL:: SQL Commands related to replication
-* Replication FAQ:: Frequently Asked Questions about replication
-* Replication Problems:: Troubleshooting Replication.
-
-MySQL Full-text Search
-
-* Fulltext Fine-tuning::
-* Fulltext Features to Appear in MySQL 4.0::
-* Fulltext TODO::
-
-Getting Maximum Performance from MySQL
-
-* Optimize Basics:: Optimization overview
-* System:: System/Compile time and startup parameter tuning
-* Data size:: Get your data as small as possible
-* MySQL indexes:: How @strong{MySQL} uses indexes
-* Query Speed:: Speed of queries that access or update data
-* Tips:: Other optimization tips
-* Benchmarks:: Using your own benchmarks
-* Design:: Design choices
-* Design Limitations:: MySQL design limitations/tradeoffs
-* Portability:: Portability
-* Internal use:: What have we used MySQL for?
-
-System/Compile Time and Startup Parameter Tuning
-
-* Compile and link options:: How compiling and linking affects the speed of MySQL
-* Disk issues:: Disk issues
-* Symbolic links:: Using Symbolic Links
-* Server parameters:: Tuning server parameters
-* Table cache:: How MySQL opens and closes tables
-* Creating many tables:: Drawbacks of creating large numbers of tables in the same database
-* Open tables:: Why so many open tables?
-* Memory use:: How MySQL uses memory
-* Internal locking:: How MySQL locks tables
-* Table locking:: Table locking issues
-* DNS::
-
-Using Symbolic Links
-
-* Symbolic links to database::
-* Symbolic links to tables::
-
-Speed of Queries that Access or Update Data
-
-* Estimating performance:: Estimating query performance
-* SELECT speed:: Speed of @code{SELECT} queries
-* Where optimizations:: How MySQL optimizes @code{WHERE} clauses
-* DISTINCT optimization:: How MySQL Optimizes @code{DISTINCT}
-* LEFT JOIN optimization:: How MySQL optimizes @code{LEFT JOIN}
-* LIMIT optimization:: How MySQL optimizes @code{LIMIT}
-* Insert speed:: Speed of @code{INSERT} queries
-* Update speed:: Speed of @code{UPDATE} queries
-* Delete speed:: Speed of @code{DELETE} queries
-
-MySQL Utilites
-
-* Programs:: What do the executables do?
-* mysqld-max:: mysqld-max, An extended mysqld server
-* safe_mysqld:: safe_mysqld, the wrapper around mysqld
-* mysqld_multi:: Program for managing multiple @strong{MySQL} servers
-* mysql:: The command line tool
-* mysqladmin:: Administering a @strong{MySQL} server
-* mysqldump:: Dumping the structure and data from @strong{MySQL} databases and tables
-* mysqlhotcopy:: Copying @strong{MySQL} Databases and Tables
-* mysqlimport:: Importing data from text files
-* perror:: Displaying error messages
-* mysqlshow:: Showing databases, tables and columns
-* myisampack:: The @strong{MySQL} compressed read-only table generator
-
-Maintaining a MySQL Installation
-
-* Table maintenance:: Table maintenance and crash recovery
-* Using mysqlcheck:: Using mysqlcheck for maintenance and recovery
-* Maintenance regimen:: Setting up a table maintenance regimen
-* Table-info:: Getting information about a table
-* Crash recovery:: Using @code{myisamchk} for crash recovery
-* Log file maintenance:: Log file maintenance
-
-Using @code{myisamchk} for Table Maintenance and Crash Recovery
-
-* myisamchk syntax:: @code{myisamchk} invocation syntax
-* myisamchk memory:: @code{myisamchk} memory usage
-
-@code{myisamchk} Invocation Syntax
-
-* myisamchk general options::
-* myisamchk check options::
-* myisamchk repair options::
-* myisamchk other options::
-
-Using @code{myisamchk} for Crash Recovery
-
-* Check:: How to check tables for errors
-* Repair:: How to repair tables
-* Optimization:: Table optimization
-
-Adding New Functions to MySQL
-
-* Adding UDF:: Adding a new user-definable function
-* Adding native function:: Adding a new native function
-
-Adding a New User-definable Function
-
-* UDF calling sequences:: UDF calling sequences
-* UDF arguments:: Argument processing
-* UDF return values:: Return values and error handling
-* UDF compiling:: Compiling and installing user-definable functions
-
-Adding New Procedures to MySQL
-
-* procedure analyse:: Procedure analyse
-* Writing a procedure:: Writing a procedure.
-
-MySQL ODBC Support
-
-* Installing MyODBC:: How to install MyODBC
-* ODBC administrator:: How to fill in the various fields in the ODBC administrator program
-* MyODBC connect parameters::
-* ODBC Problems:: How to report problems with @strong{MySQL} ODBC
-* MyODBC clients:: Programs known to work with @strong{MyODBC}
-* ODBC and last_insert_id:: How to get the value of an @code{AUTO_INCREMENT} column in ODBC
-* MyODBC bug report:: Reporting problems with MyODBC
-
-Using MySQL with Some Common Programs
-
-* Apache:: Using @strong{MySQL} with Apache
-* Borland C++::
-
-Problems and Common Errors
-
-* What is crashing:: How to determine what is causing problems
-* Crashing:: What to do if @strong{MySQL} keeps crashing
-* Link errors:: Problems when linking with the @strong{MySQL} client library
-* Common errors:: Some common errors when using @strong{MySQL}
-* Full disk:: How @strong{MySQL} handles a full disk
-* Multiple sql commands:: How to run SQL commands from a text file
-* Temporary files:: Where @strong{MySQL} stores temporary files
-* Problems with mysql.sock:: How to protect @file{/tmp/mysql.sock}
-* Changing MySQL user:: How to run @strong{MySQL} as a normal user
-* Resetting permissions:: How to reset a forgotten password.
-* File permissions :: Problems with file permissions
-* Not enough file handles:: File not found
-* Using DATE:: Problems using @code{DATE} columns
-* Timezone problems:: Timezone problems
-* Case sensitivity:: Case sensitivity in searches
-* Problems with NULL:: Problems with @code{NULL} values
-* Problems with alias:: Problems with @code{alias}
-* Deleting from related tables:: Deleting rows from related tables
-* No matching rows:: Solving problems with no matching rows
-* ALTER TABLE problems:: Problems with @code{ALTER TABLE}.
-* Change column order:: How to change the order of columns in a table
-* Temporary table problems::
-
-Some Common Errors When Using MySQL
-
-* Error Access denied:: @code{Access denied} Error
-* Gone away:: @code{MySQL server has gone away} error
-* Can not connect to server:: @code{Can't connect to [local] MySQL server} error
-* Blocked host:: @code{Host '...' is blocked} error
-* Too many connections:: @code{Too many connections} error
-* Non-transactional tables:: @code{Some non-transactional changed tables couldn't be rolled back} Error
-* Out of memory:: @code{Out of memory} error
-* Packet too large:: @code{Packet too large} error
-* Communication errors:: Communication errors / Aborted connection
-* Full table:: @code{The table is full} error
-* Cannot create:: @code{Can't create/write to file} Error
-* Commands out of sync:: @code{Commands out of sync} error in client
-* Ignoring user:: @code{Ignoring user} error
-* Cannot find table:: @code{Table 'xxx' doesn't exist} error
-* Cannot initialize character set::
-
-Solving Some Common Problems with MySQL
-
-* Log Replication:: Database replication with update log
-* Backup:: Database backups
-* Multiple servers:: Running multiple @strong{MySQL} servers on the same machine
-
-The MySQL log files
-
-* Error log::
-* Query log::
-* Update log::
-* Binary log::
-* Slow query log::
-
-MySQL APIs
-
-* C:: @strong{MySQL} C API
-* Perl:: @strong{MySQL} Perl API
-* Eiffel:: @strong{MySQL} Eiffel wrapper
-* Java:: @strong{MySQL} Java connectivity (JDBC)
-* PHP:: @strong{MySQL} PHP API
-* Cplusplus:: @strong{MySQL} C++ APIs
-* Python:: @strong{MySQL} Python APIs
-* Tcl:: @strong{MySQL} Tcl APIs
-
-MySQL C API
-
-* C API datatypes:: C API Datatypes
-* C API function overview:: C API Function Overview
-* C API functions:: C API Function Descriptions
-* C API problems::
-* Thread-safe clients::
-
-C API Function Descriptions
-
-* mysql_affected_rows:: @code{mysql_affected_rows()}
-* mysql_close:: @code{mysql_close()}
-* mysql_connect:: @code{mysql_connect()}
-* mysql_change_user:: @code{mysql_change_user()}
-* mysql_character_set_name:: @code{mysql_character_set_name()}
-* mysql_create_db:: @code{mysql_create_db()}
-* mysql_data_seek:: @code{mysql_data_seek()}
-* mysql_debug:: @code{mysql_debug()}
-* mysql_drop_db:: @code{mysql_drop_db()}
-* mysql_dump_debug_info:: @code{mysql_dump_debug_info()}
-* mysql_eof:: @code{mysql_eof()}
-* mysql_errno:: @code{mysql_errno()}
-* mysql_error:: @code{mysql_error()}
-* mysql_escape_string:: @code{mysql_escape_string()}
-* mysql_fetch_field:: @code{mysql_fetch_field()}
-* mysql_fetch_fields:: @code{mysql_fetch_fields()}
-* mysql_fetch_field_direct:: @code{mysql_fetch_field_direct()}
-* mysql_fetch_lengths:: @code{mysql_fetch_lengths()}
-* mysql_fetch_row:: @code{mysql_fetch_row()}
-* mysql_field_count:: @code{mysql_field_count()}
-* mysql_field_seek:: @code{mysql_field_seek()}
-* mysql_field_tell:: @code{mysql_field_tell()}
-* mysql_free_result:: @code{mysql_free_result()}
-* mysql_get_client_info:: @code{mysql_get_client_info()}
-* mysql_get_host_info:: @code{mysql_get_host_info()}
-* mysql_get_proto_info:: @code{mysql_get_proto_info()}
-* mysql_get_server_info:: @code{mysql_get_server_info()}
-* mysql_info:: @code{mysql_info()}
-* mysql_init:: @code{mysql_init()}
-* mysql_insert_id:: @code{mysql_insert_id()}
-* mysql_kill:: @code{mysql_kill()}
-* mysql_list_dbs:: @code{mysql_list_dbs()}
-* mysql_list_fields:: @code{mysql_list_fields()}
-* mysql_list_processes:: @code{mysql_list_processes()}
-* mysql_list_tables:: @code{mysql_list_tables()}
-* mysql_num_fields:: @code{mysql_num_fields()}
-* mysql_num_rows:: @code{mysql_num_rows()}
-* mysql_options:: @code{mysql_options()}
-* mysql_ping:: @code{mysql_ping()}
-* mysql_query:: @code{mysql_query()}
-* mysql_real_connect:: @code{mysql_real_connect()}
-* mysql_real_escape_string:: @code{mysql_real_escape_string()}
-* mysql_real_query:: @code{mysql_real_query()}
-* mysql_reload:: @code{mysql_reload()}
-* mysql_row_seek:: @code{mysql_row_seek()}
-* mysql_row_tell:: @code{mysql_row_tell()}
-* mysql_select_db:: @code{mysql_select_db()}
-* mysql_shutdown:: @code{mysql_shutdown()}
-* mysql_stat:: @code{mysql_stat()}
-* mysql_store_result:: @code{mysql_store_result()}
-* mysql_thread_id:: @code{mysql_thread_id()}
-* mysql_use_result:: @code{mysql_use_result()}
-
-Common questions and problems when using the C API
-
-* NULL mysql_store_result::
-* Query results::
-* Getting unique ID::
-* C API linking problems::
-
-Why Is It that After @code{mysql_query()} Returns Success, @code{mysql_store_result()} Sometimes Returns @code{NULL?}
-
-* Query results::
-* Getting unique ID::
-* C API linking problems::
-
-MySQL Perl API
-
-* DBI with DBD:: @code{DBI} with @code{DBD::mysql}
-* Perl DBI Class:: The @code{DBI} interface
-* DBI-info:: More @code{DBI}/@code{DBD} information
-
-MySQL PHP API
-
-* PHP problems:: Common problems with MySQL and PHP
-
-MySQL Internals
-
-* MySQL threads:: MySQL threads
-* MySQL test suite:: MySQL test suite
-
-MySQL Test Suite
-
-* running mysqltest::
-* extending mysqltest::
-* Reporting mysqltest bugs::
-
-Credits
-
-* Developers::
-* Contributors::
-* Supporters::
-
-MySQL change history
-
-* News-4.0.x:: Changes in release 4.0.x (Development; Alpha)
-* News-3.23.x:: Changes in release 3.23.x (Stable)
-* News-3.22.x:: Changes in release 3.22.x (Older; Still supported)
-* News-3.21.x:: Changes in release 3.21.x
-* News-3.20.x:: Changes in release 3.20.x
-* News-3.19.x:: Changes in release 3.19.x
-
-Changes in release 4.0.x (Development; Alpha)
-
-* News-4.0.0:: Changes in release 4.0.0
-
-Changes in release 3.23.x (Stable)
-
-* News-3.23.40:: Changes in release 3.23.40
-* News-3.23.39:: Changes in release 3.23.39
-* News-3.23.38:: Changes in release 3.23.38
-* News-3.23.37:: Changes in release 3.23.37
-* News-3.23.36:: Changes in release 3.23.36
-* News-3.23.35:: Changes in release 3.23.35
-* News-3.23.34a:: Changes in release 3.23.34a
-* News-3.23.34:: Changes in release 3.23.34
-* News-3.23.33:: Changes in release 3.23.33
-* News-3.23.32:: Changes in release 3.23.32
-* News-3.23.31:: Changes in release 3.23.31
-* News-3.23.30:: Changes in release 3.23.30
-* News-3.23.29:: Changes in release 3.23.29
-* News-3.23.28:: Changes in release 3.23.28
-* News-3.23.27:: Changes in release 3.23.27
-* News-3.23.26:: Changes in release 3.23.26
-* News-3.23.25:: Changes in release 3.23.25
-* News-3.23.24:: Changes in release 3.23.24
-* News-3.23.23:: Changes in release 3.23.23
-* News-3.23.22:: Changes in release 3.23.22
-* News-3.23.21:: Changes in release 3.23.21
-* News-3.23.20:: Changes in release 3.23.20
-* News-3.23.19:: Changes in release 3.23.19
-* News-3.23.18:: Changes in release 3.23.18
-* News-3.23.17:: Changes in release 3.23.17
-* News-3.23.16:: Changes in release 3.23.16
-* News-3.23.15:: Changes in release 3.23.15
-* News-3.23.14:: Changes in release 3.23.14
-* News-3.23.13:: Changes in release 3.23.13
-* News-3.23.12:: Changes in release 3.23.12
-* News-3.23.11:: Changes in release 3.23.11
-* News-3.23.10:: Changes in release 3.23.10
-* News-3.23.9:: Changes in release 3.23.9
-* News-3.23.8:: Changes in release 3.23.8
-* News-3.23.7:: Changes in release 3.23.7
-* News-3.23.6:: Changes in release 3.23.6
-* News-3.23.5:: Changes in release 3.23.5
-* News-3.23.4:: Changes in release 3.23.4
-* News-3.23.3:: Changes in release 3.23.3
-* News-3.23.2:: Changes in release 3.23.2
-* News-3.23.1:: Changes in release 3.23.1
-* News-3.23.0:: Changes in release 3.23.0
-
-Changes in release 3.22.x (Older; Still supported)
-
-* News-3.22.35:: Changes in release 3.22.35
-* News-3.22.34:: Changes in release 3.22.34
-* News-3.22.33:: Changes in release 3.22.33
-* News-3.22.32:: Changes in release 3.22.32
-* News-3.22.31:: Changes in release 3.22.31
-* News-3.22.30:: Changes in release 3.22.30
-* News-3.22.29:: Changes in release 3.22.29
-* News-3.22.28:: Changes in release 3.22.28
-* News-3.22.27:: Changes in release 3.22.27
-* News-3.22.26:: Changes in release 3.22.26
-* News-3.22.25:: Changes in release 3.22.25
-* News-3.22.24:: Changes in release 3.22.24
-* News-3.22.23:: Changes in release 3.22.23
-* News-3.22.22:: Changes in release 3.22.22
-* News-3.22.21:: Changes in release 3.22.21
-* News-3.22.20:: Changes in release 3.22.20
-* News-3.22.19:: Changes in release 3.22.19
-* News-3.22.18:: Changes in release 3.22.18
-* News-3.22.17:: Changes in release 3.22.17
-* News-3.22.16:: Changes in release 3.22.16
-* News-3.22.15:: Changes in release 3.22.15
-* News-3.22.14:: Changes in release 3.22.14
-* News-3.22.13:: Changes in release 3.22.13
-* News-3.22.12:: Changes in release 3.22.12
-* News-3.22.11:: Changes in release 3.22.11
-* News-3.22.10:: Changes in release 3.22.10
-* News-3.22.9:: Changes in release 3.22.9
-* News-3.22.8:: Changes in release 3.22.8
-* News-3.22.7:: Changes in release 3.22.7
-* News-3.22.6:: Changes in release 3.22.6
-* News-3.22.5:: Changes in release 3.22.5
-* News-3.22.4:: Changes in release 3.22.4
-* News-3.22.3:: Changes in release 3.22.3
-* News-3.22.2:: Changes in release 3.22.2
-* News-3.22.1:: Changes in release 3.22.1
-* News-3.22.0:: Changes in release 3.22.0
-
-Changes in release 3.21.x
-
-* News-3.21.33:: Changes in release 3.21.33
-* News-3.21.32:: Changes in release 3.21.32
-* News-3.21.31:: Changes in release 3.21.31
-* News-3.21.30:: Changes in release 3.21.30
-* News-3.21.29:: Changes in release 3.21.29
-* News-3.21.28:: Changes in release 3.21.28
-* News-3.21.27:: Changes in release 3.21.27
-* News-3.21.26:: Changes in release 3.21.26
-* News-3.21.25:: Changes in release 3.21.25
-* News-3.21.24:: Changes in release 3.21.24
-* News-3.21.23:: Changes in release 3.21.23
-* News-3.21.22:: Changes in release 3.21.22
-* News-3.21.21a:: Changes in release 3.21.21a
-* News-3.21.21:: Changes in release 3.21.21
-* News-3.21.20:: Changes in release 3.21.20
-* News-3.21.19:: Changes in release 3.21.19
-* News-3.21.18:: Changes in release 3.21.18
-* News-3.21.17:: Changes in release 3.21.17
-* News-3.21.16:: Changes in release 3.21.16
-* News-3.21.15:: Changes in release 3.21.15
-* News-3.21.14b:: Changes in release 3.21.14b
-* News-3.21.14a:: Changes in release 3.21.14a
-* News-3.21.13:: Changes in release 3.21.13
-* News-3.21.12:: Changes in release 3.21.12
-* News-3.21.11:: Changes in release 3.21.11
-* News-3.21.10:: Changes in release 3.21.10
-* News-3.21.9:: Changes in release 3.21.9
-* News-3.21.8:: Changes in release 3.21.8
-* News-3.21.7:: Changes in release 3.21.7
-* News-3.21.6:: Changes in release 3.21.6
-* News-3.21.5:: Changes in release 3.21.5
-* News-3.21.4:: Changes in release 3.21.4
-* News-3.21.3:: Changes in release 3.21.3
-* News-3.21.2:: Changes in release 3.21.2
-* News-3.21.0:: Changes in release 3.21.0
-
-Changes in release 3.20.x
-
-* News-3.20.18:: Changes in release 3.20.18
-* News-3.20.17:: Changes in release 3.20.17
-* News-3.20.16:: Changes in release 3.20.16
-* News-3.20.15:: Changes in release 3.20.15
-* News-3.20.14:: Changes in release 3.20.14
-* News-3.20.13:: Changes in release 3.20.13
-* News-3.20.11:: Changes in release 3.20.11
-* News-3.20.10:: Changes in release 3.20.10
-* News-3.20.9:: Changes in release 3.20.9
-* News-3.20.8:: Changes in release 3.20.8
-* News-3.20.7:: Changes in release 3.20.7
-* News-3.20.6:: Changes in release 3.20.6
-* News-3.20.3:: Changes in release 3.20.3
-* News-3.20.0:: Changes in releases 3.20.0
-
-Changes in release 3.19.x
-
-* News-3.19.5:: Changes in release 3.19.5
-* News-3.19.4:: Changes in release 3.19.4
-* News-3.19.3:: Changes in release 3.19.3
-
-Comments on porting to other systems
-
-* Debugging server:: Debugging a @strong{MySQL} server
-* Debugging client:: Debugging a @strong{MySQL} client
-* The DBUG package:: The DBUG package
-* Locking methods:: Locking methods
-* RTS-threads:: Comments about RTS threads
-* Thread packages:: Differences between different thread packages
-
-Debugging a MySQL server
-
-* Compiling for debugging::
-* Making trace files::
-* Using gdb on mysqld::
-* Using stack trace::
-* Using log files::
-* Reproduceable test case::
-
-@end detailmenu
@end menu
@cindex overview
@@ -1327,7 +317,8 @@ us.
you are looking for, you should give it a try. @strong{MySQL} also has a
very practical set of features developed in very close cooperation with
our users. You can find a performance comparison of @strong{MySQL}
-to some other database managers on our benchmark page. @xref{Benchmarks}.
+to some other database managers on our benchmark page.
+@xref{MySQL Benchmarks}.
@strong{MySQL} was originally developed to handle very large databases
much faster than existing solutions and has been successfully used in
@@ -5472,9 +4463,7 @@ For platform-specific bugs, see the sections about compiling and porting.
@menu
* Compare mSQL:: How @strong{MySQL} compares to @code{mSQL}
-* Protocol differences::
* Compare PostgreSQL:: How @strong{MySQL} compares with PostgreSQL
-* MySQL-PostgreSQL features::
@end menu
This section compares @strong{MySQL} to other popular databases.
@@ -5488,14 +4477,14 @@ For a list of all supported limits, functions, and types, see the
@code{crash-me} Web page at
@uref{http://www.mysql.com/information/crash-me.php}.
-@node Compare mSQL, Protocol differences, Comparisons, Comparisons
+@node Compare mSQL, Compare PostgreSQL, Comparisons, Comparisons
@subsection How MySQL Compares to @code{mSQL}
@table @strong
@item Performance
For a true comparison of speed, consult the growing @strong{MySQL} benchmark
-suite. @xref{Benchmarks}.
+suite. @xref{MySQL Benchmarks}.
Because there is no thread creation overhead, a small parser, few features, and
simple security, @code{mSQL} should be quicker at:
@@ -5554,7 +4543,7 @@ slower than @strong{MySQL} was seen. This is due to @code{mSQL}'s lack of a
join optimizer to order tables in the optimal order. However, if you put the
tables in exactly the right order in @code{mSQL}2 and the @code{WHERE} is
simple and uses index columns, the join will be relatively fast!
-@xref{Benchmarks}.
+@xref{MySQL Benchmarks}.
@item
@code{ORDER BY} and @code{GROUP BY}.
@item
@@ -5676,9 +4665,11 @@ For example, it changes instances of @code{msqlConnect()} to
@menu
* Using mSQL tools:: How to convert @code{mSQL} tools for @strong{MySQL}
+* Protocol differences::
+* Syntax differences::
@end menu
-@node Using mSQL tools, , Compare mSQL, Compare mSQL
+@node Using mSQL tools, Protocol differences, Compare mSQL, Compare mSQL
@subsubsection How to Convert @code{mSQL} Tools for MySQL
@cindex MySQL tools, conversion
@@ -5723,10 +4714,11 @@ Some incompatibilities exist as a result of @strong{MySQL} supporting
multiple connections to the server from the same process.
@end itemize
+@node Protocol differences, Syntax differences, Using mSQL tools, Compare mSQL
+@subsubsection How @code{mSQL} and MySQL Client/Server Communications Protocols Differ
+
@cindex communications protocols
@cindex mSQL vs. MySQL
-@node Protocol differences, Compare PostgreSQL, Compare mSQL, Comparisons
-@subsection How @code{mSQL} and MySQL Client/Server Communications Protocols Differ
There are enough differences that it is impossible (or at least not easy)
to support both.
@@ -5760,7 +4752,7 @@ If a connection is idle for 8 hours, the server closes the connection.
* Syntax differences::
@end menu
-@node Syntax differences, , Protocol differences, Protocol differences
+@node Syntax differences, , Protocol differences, Compare mSQL
@subsubsection How @code{mSQL} 2.0 SQL Syntax Differs from MySQL
@noindent
@@ -5948,7 +4940,8 @@ users.
@item
@end table
-@node Compare PostgreSQL, MySQL-PostgreSQL features, Protocol differences, Comparisons
+
+@node Compare PostgreSQL, , Compare mSQL, Comparisons
@subsection How MySQL Compares to PostgreSQL
@cindex PostgreSQL vs. MySQL, overview
@@ -5977,9 +4970,11 @@ can offer, you should use @code{PostgreSQL}.
@cindex PostgreSQL/MySQL, strategies
@menu
* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development strategies
+* MySQL-PostgreSQL features::
+* MySQL-PostgreSQL benchmarks::
@end menu
-@node MySQL-PostgreSQL goals, , Compare PostgreSQL, Compare PostgreSQL
+@node MySQL-PostgreSQL goals, MySQL-PostgreSQL features, Compare PostgreSQL, Compare PostgreSQL
@subsubsection MySQL and PostgreSQL development strategies
When adding things to MySQL we take pride to do an optimal, definite
@@ -6020,9 +5015,11 @@ reusable code and, in our opinion, fewer bugs. Because we are the
authors of the @strong{MySQL} server code we are better able to
coordinate new features and releases.
+
+@node MySQL-PostgreSQL features, MySQL-PostgreSQL benchmarks, MySQL-PostgreSQL goals, Compare PostgreSQL
+@subsubsection Featurewise Comparison of MySQL and PostgreSQL
+
@cindex PostgreSQL/MySQL, features
-@node MySQL-PostgreSQL features, , Compare PostgreSQL, Comparisons
-@subsection Featurevise Comparison of MySQL and PostgreSQL
On the @uref{http://www.mysql.com/information/crash-me.php, crash-me}
page you can find a list of those database constructs and limits that
@@ -6244,7 +5241,7 @@ in this section.
* MySQL-PostgreSQL benchmarks::
@end menu
-@node MySQL-PostgreSQL benchmarks, , MySQL-PostgreSQL features, MySQL-PostgreSQL features
+@node MySQL-PostgreSQL benchmarks, , MySQL-PostgreSQL features, Compare PostgreSQL
@subsubsection Benchmarking MySQL and PostgreSQL
@cindex PostgreSQL vs. MySQL, benchmarks
@@ -6956,30 +5953,18 @@ Time is given according to amount of work, not real time.
Nothing; In the long run we plan to be fully ANSI 92 / ANSI 99 compliant.
@end itemize
-@node Installing, Privilege system, Introduction, Top
-@chapter Installing MySQL
+@node Installing, Tutorial, Introduction, Top
+@chapter MySQL Installation
@cindex installing, overview
@menu
-* Getting MySQL:: How to get @strong{MySQL}
-* Which OS:: Operating systems supported by @strong{MySQL}
-* Which version:: Which @strong{MySQL} version to use
-* Many versions:: How and when updates are released
-* Installation layouts:: Installation layouts
-* Installing binary:: Installing a @strong{MySQL} binary distribution
+* Quick Standard Installation::
+* General Installation Issues::
* Installing source:: Installing a @strong{MySQL} source distribution
-* Installing source tree:: Installing @strong{MySQL} from development source tree
-* Compilation problems:: Problems compiling?
-* MIT-pthreads:: MIT-pthreads notes
-* Perl support:: Perl installation comments
-* Source install system issues:: System-specific issues
-* Windows:: Windows notes
-* OS/2:: OS/2 notes
-* MySQL binaries:: MySQL binaries
* Post-installation:: Post-installation setup and testing
-* Installing many servers:: Installing many servers on the same machine
* Upgrade:: Upgrading/Downgrading MySQL
+* Operating System Specific Notes::
@end menu
This chapter describes how to obtain and install @strong{MySQL}:
@@ -7015,14 +6000,188 @@ procedures apply whether you install @strong{MySQL} using a binary or
source distribution.
@end itemize
+@node Quick Standard Installation, General Installation Issues, Installing, Installing
+@section Quick Standard Installation of MySQL
+
+@c This node name is special
+@menu
+* Linux-RPM::
+* Windows installation::
+@end menu
+
+@node Linux-RPM, Windows installation, Quick Standard Installation, Quick Standard Installation
+@subsection Installing MySQL on Linux
+
+@cindex RPM file
+@cindex RedHat Package Manager
+
+The recommended way to install @strong{MySQL} on Linux is by using an RPM
+file. The @strong{MySQL} RPMs are currently being built on a RedHat Version
+6.2 system but should work on other versions of Linux that support @code{rpm}
+and use @code{glibc}.
+
+If you have problems with an RPM file, for example, if you receive the error
+``@code{Sorry, the host 'xxxx' could not be looked up}'', see
+@ref{Binary notes-Linux}.
+
+The RPM files you may want to use are:
+
+@itemize @bullet
+@item @code{MySQL-VERSION.i386.rpm}
+
+The @strong{MySQL} server. You will need this unless you only want to
+connect to a @strong{MySQL} server running on another machine.
+
+@item @code{MySQL-client-VERSION.i386.rpm}
+
+The standard @strong{MySQL} client programs. You probably always want to
+install this package.
+
+@item @code{MySQL-bench-VERSION.i386.rpm}
+
+Tests and benchmarks. Requires Perl and msql-mysql-modules RPMs.
+
+@item @code{MySQL-devel-VERSION.i386.rpm}
+
+Libraries and include files needed if you want to compile other
+@strong{MySQL} clients, such as the Perl modules.
+
+@item @code{MySQL-VERSION.src.rpm}
+
+This contains the source code for all of the above packages. It can also
+be used to try to build RPMs for other architectures (for example, Alpha
+or SPARC).
+@end itemize
+
+To see all files in an RPM package, run:
+@example
+shell> rpm -qpl MySQL-VERSION.i386.rpm
+@end example
+
+To perform a standard minimal installation, run:
+
+@example
+shell> rpm -i MySQL-VERSION.i386.rpm MySQL-client-VERSION.i386.rpm
+@end example
+
+To install just the client package, run:
+
+@example
+shell> rpm -i MySQL-client-VERSION.i386.rpm
+@end example
+
+The RPM places data in @file{/var/lib/mysql}. The RPM also creates the
+appropriate entries in @file{/etc/rc.d/} to start the server automatically
+at boot time. (This means that if you have performed a previous
+installation, you may want to make a copy of your previously installed
+@strong{MySQL} startup file if you made any changes to it, so you don't lose
+your changes.)
+
+After installing the RPM file(s), the @code{mysqld} daemon should be running
+and you should now be able to start using @strong{MySQL}.
+@xref{Post-installation}.
+
+If something goes wrong, you can find more information in the binary
+installation chapter. @xref{Installing binary}.
+
+@node Windows installation, , Linux-RPM, Quick Standard Installation
+@subsection Installing MySQL on Windows
+
+The following instructions apply to precompiled binary distributions.
+If you download a source distribution, you will have to compile and install
+it yourself.
+
+If you don't have a copy of the @strong{MySQL} distribution, you should
+first download one from @uref{http://www.mysql.com/downloads/mysql-3.23.html}.
+
+If you plan to connect to @strong{MySQL} from some other program, you will
+probably also need the @strong{MyODBC} driver. You can find this at the
+@strong{MyODBC} download page
+(@uref{http://www.mysql.com/downloads/api-myodbc.html}).
+
+To install either distribution, unzip it in some empty directory and run the
+@code{Setup.exe} program.
+
+By default, @strong{MySQL}-Windows is configured to be installed in
+@file{C:\mysql}. If you want to install @strong{MySQL} elsewhere,
+install it in @file{C:\mysql} first, then move the installation to
+where you want it. If you do move @strong{MySQL}, you must indicate
+where everything is located by supplying a @code{--basedir} option when
+you start the server. For example, if you have moved the @strong{MySQL}
+distribution to @file{D:\programs\mysql}, you must start @code{mysqld}
+like this:
+
+@example
+C:\> D:\programs\mysql\bin\mysqld --basedir D:\programs\mysql
+@end example
+
+Use @code{mysqld --help} to display all the options that @code{mysqld}
+understands!
+
+With all newer @strong{MySQL} versions, you can also create a
+@file{C:\my.cnf} file that holds any default options for the
+@strong{MySQL} server. Copy the file @file{\mysql\my-xxxxx.cnf} to
+@file{C:\my.cnf} and edit it to suit your setup. Note that you should
+specify all paths with @samp{/} instead of @samp{\}. If you use
+@samp{\}, you need to specify it twice, because @samp{\} is the escape
+character in @strong{MySQL}. @xref{Option files}.
+
+Starting with @strong{MySQL} 3.23.38, the Windows distribution includes
+both the normal and the @strong{MySQL-Max} binaries. The main benefit
+of using the normal @code{mysqld.exe} binary is that it's a little
+faster and uses less resources.
+
+Here is a list of the different @strong{MySQL} servers you can use:
+
+@multitable @columnfractions .25 .75
+@item @code{mysqld} @tab
+Compiled with full debugging and automatic memory allocation checking,
+symbolic links, BDB and InnoDB tables.
+@item @code{mysqld-opt} @tab
+Optimized binary with no support for transactional tables.
+@item @code{mysqld-nt} @tab
+Optimized binary for NT with support for named pipes. You can run this
+version on Win98, but in this case no named pipes are created and you must
+have TCP/IP installed.
+@item @code{mysqld-max} @tab
+Optimized binary with support for symbolic links, BDB and InnoDB tables.
+@item @code{mysqld-max-nt} @tab
+Like @code{mysqld-max}, but compiled with support for named pipes.
+@end multitable
+
+All of the above binaries are optimized for the Pentium Pro processor but
+should work on any Intel processor >= i386.
+
+NOTE: If you want to use InnoDB tables, there are certain startup
+options that must be specified in your @file{my.ini} file! @xref{InnoDB start}.
+
+
+@node General Installation Issues, Installing source, Quick Standard Installation, Installing
+@section General Installation Issues
+
+@c @node Methods of Installation, , ,
+@c @subsection Methods of Installation
+
+@c FIX: this needs to be written?
+
+@menu
+* Getting MySQL::
+* Which OS::
+* Which version::
+* Installation layouts::
+* Many versions::
+* MySQL binaries::
+@end menu
+
+@node Getting MySQL, Which OS, General Installation Issues, General Installation Issues
+@subsection How to Get MySQL
+
@cindex downloading
@cindex MySQL version
@cindex version, latest
@cindex getting MySQL
@cindex mirror sites
@cindex URLS for downloading MySQL
-@node Getting MySQL, Which OS, Installing, Installing
-@section How to Get MySQL
Check the @uref{http://www.mysql.com/, @strong{MySQL} home page} for
information about the current version and for downloading instructions.
@@ -7202,6 +6361,11 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@uref{ftp://mysql.tiszanet.hu/pub/mirrors/mysql/, FTP}
@item
+@c EMAIL: i.habencius@telnet.hu (Habencius Istvan)
+@image{Flags/hungary} Hungary [stop.hu] @
+@uref{http://mysql.mirror.stop.hu/, WWW}
+
+@item
@c EMAIL: mirrors@gm.is (Tomas Edwardsson)
@image{Flags/iceland} Iceland [GM] @
@uref{http://mysql.gm.is/, WWW}
@@ -7388,10 +6552,11 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@uref{http://ftp.plig.org/pub/mysql/, WWW}
@uref{ftp://ftp.plig.org/pub/mysql/, FTP}
-@item
+@c @item
+@c Not ok 20010808; Non-existent (Matt)
@c EMAIL: sean@telekon.co.uk (Sean Gibson)
-@image{Flags/great-britain} UK [Telekon Internet/UK] @
-@uref{ftp://ftp.telekon.co.uk/pub/mysql/, FTP}
+@c @image{Flags/great-britain} UK [Telekon Internet/UK] @
+@c @uref{ftp://ftp.telekon.co.uk/pub/mysql/, FTP}
@c @item
@c lance@uklinux.net (Lance)
@@ -7412,16 +6577,22 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@c @uref{ftp://sunsite.org.uk/packages/mysql/, FTP}
@item
-@c sander@paco.net (Alexander Ivanov)
+@c EMAIL: sander@paco.net (Alexander Ivanov)
@image{Flags/ukraine} Ukraine [PACO] @
@uref{http://mysql.paco.net.ua, WWW}
@uref{ftp://mysql.paco.net.ua/, FTP}
@item
-@c mizi@alkar.net (Alexander Ryumshin)
+@c EMAIL: mizi@alkar.net (Alexander Ryumshin)
@image{Flags/ukraine} Ukraine [ISP Alkar Teleport/Dnepropetrovsk] @
@uref{http://mysql.dp.ua/, WWW}
+@item
+@c EMAIL: bole@bolex.bolex.co.yu (Bosko Radivojevic)
+@image{Flags/yugoslavia} Yugoslavia [bolex.co.yu] @
+@uref{http://mysql.boa.org.yu/, WWW}
+@uref{ftp://ftp.linux.org.yu/pub/MySQL/, FTP}
+
@end itemize
@strong{North America:}
@@ -7545,6 +6716,17 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@image{Flags/usa} USA [adgrafix.com/Boston, MA] @
@uref{http://mysql.adgrafix.com/, WWW}
+@item
+@c EMAIL: Pjacob@netnumina.com (Philip Jacob)
+@image{Flags/usa} USA [netNumina/Cambridge, MA] @
+@uref{http://mysql.mirrors.netnumina.com/, WWW}
+
+@item
+@c EMAIL: hagler@ahaza.com (Mark Hagler)
+@image{Flags/usa} USA [Ahaza Systems/Seattle, WA] @
+@uref{http://mysql.mirrortree.com/, WWW}
+@uref{ftp://mysql.mirrortree.com/pub/mysql/, FTP}
+
@end itemize
@strong{South America:}
@@ -7602,14 +6784,21 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@uref{http://www2.linuxforum.net/mirror/mysql/, WWW}
@item
-@c EMAIL: Vincent_Fong@innovator.com.hk (Vincent Fong)
-@image{Flags/china} China [ISL/Hong Kong] @
-@uref{http://mysql.islnet.net, WWW}
+@c EMAIL: vfong@hklpg.org (Vincent Fong)
+@image{Flags/china} China [HKLPG/Hong Kong] @
+@uref{http://mysql.hklpg.org, WWW}
@item
+@c EMAIL: jason-wong@gremlins.com.hk (Gremlins Jason Wong)
+@image{Flags/china} China [Gremlins/Hong Kong] @
+@uref{http://mysql.gremlins.com.hk/, WWW}
+@uref{ftp://ftp.mirrors.gremlins.com.hk/mysql/, FTP}
+
+@c @item
+@c Not ok 20010308; Other content! (Tonu)
@c EMAIL: xcyber@yahoo.com (xcyber)
-@image{Flags/china} China [xcyber.org/Hong Kong] @
-@uref{http://mysql.xcyber.org/, WWW}
+@c @image{Flags/china} China [xcyber.org/Hong Kong] @
+@c @uref{http://mysql.xcyber.org/, WWW}
@c @item
@c Not ok 20010330; Non-existent! (Matt)
@@ -7623,27 +6812,16 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@c @image{Flags/china} China [Netfirm] @
@c @uref{http://mysql.netfirm.net, WWW}
-@c @item
-@c Not ok 20000919; Old site (Matt)
-@c EMAIL: ahmlhs@nmsvr.chosun.com (Ho-sun Lee)
-@c @image{Flags/south-korea} South Korea [KREONet] @
-@c @uref{http://linux.kreonet.re.kr/mysql/, WWW}
-
@item
-@c EMAIL: jasper@webiiz.com (Kang, Tae-jin)
-@image{Flags/south-korea} South Korea [Webiiz] @
-@uref{http://mysql.webiiz.com/, WWW}
+@c EMAIL: dnata@incaf.net (Denie Nataprawira)
+@image{Flags/indonesia} Indonesia [incaf.net] @
+@uref{http://mysql.incaf.net/, WWW}
@item
-@c EMAIL: hollywar@holywar.net (Oh Junseon)
-@image{Flags/south-korea} South Korea [PanworldNet] @
-@uref{http://mysql.holywar.net/, WWW}
-
-@c @item
-@c ftp -> remove old files
-@c EX: ahmlhs@nmsvr.chosun.com (Ho-sun Lee)
-@c @image{Flags/south-korea} South Korea [KREONet] @
-@c @uref{ftp://linux.kreonet.re.kr/pub/tools/db/mysql/, FTP}
+@c EMAIL: andika@piksi.itb.ac.id (Andika Triwidada)
+@image{Flags/indonesia} Indonesia [web.id] @
+@uref{http://mysql.itb.web.id/, WWW}
+@uref{ftp://mysql.itb.web.id/pub/MySQL/, FTP}
@item
@c Ok 980805
@@ -7672,6 +6850,28 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@c @uref{ftp://mirror.nucba.ac.jp/mirror/mysql, FTP}
@c @item
+@c Not ok 20000919; Old site (Matt)
+@c EMAIL: ahmlhs@nmsvr.chosun.com (Ho-sun Lee)
+@c @image{Flags/south-korea} South Korea [KREONet] @
+@c @uref{http://linux.kreonet.re.kr/mysql/, WWW}
+
+@item
+@c EMAIL: jasper@webiiz.com (Kang, Tae-jin)
+@image{Flags/south-korea} South Korea [Webiiz] @
+@uref{http://mysql.webiiz.com/, WWW}
+
+@item
+@c EMAIL: hollywar@holywar.net (Oh Junseon)
+@image{Flags/south-korea} South Korea [PanworldNet] @
+@uref{http://mysql.holywar.net/, WWW}
+
+@c @item
+@c ftp -> remove old files
+@c EX: ahmlhs@nmsvr.chosun.com (Ho-sun Lee)
+@c @image{Flags/south-korea} South Korea [KREONet] @
+@c @uref{ftp://linux.kreonet.re.kr/pub/tools/db/mysql/, FTP}
+
+@c @item
@c Removed 990308
@c EMAIL: terence@com5.net (Terence Chan)
@c @image{Flags/singapore} Singapore [Com5 Productions] @
@@ -7731,15 +6931,15 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@end itemize
-@strong{Australia:}
+@c @strong{Australia:}
-@itemize @bullet
-@item
+@c @itemize @bullet
+@c @item
@c Added 980610
@c EMAIL: jason@dstc.edu.au (Jason Andrade)
-@image{Flags/australia} Australia [AARNet/Queensland] @
-@uref{http://mysql.mirror.aarnet.edu.au/, WWW}
-@uref{ftp://mysql.mirror.aarnet.edu.au/, FTP}
+@c @image{Flags/australia} Australia [AARNet/Queensland] @
+@c @uref{http://mysql.mirror.aarnet.edu.au/, WWW}
+@c @uref{ftp://mysql.mirror.aarnet.edu.au/, FTP}
@c @item
@c Added 980805. Removed 000102 'no such directory'
@@ -7768,7 +6968,7 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@c EMAIL: lucifer@maths.uq.edu.au (David Conran)
@c @image{Flags/australia} Australia FTP @
@c @uref{ftp://ftp.sage-au.org.au/pub/database/mysql, [Sage]}
-@end itemize
+@c @end itemize
@strong{Africa:}
@@ -7790,13 +6990,14 @@ Please report bad or out-of-date mirrors to @email{webmaster@@mysql.com}.
@c END_OF_MIRROR_LISTING
+@node Which OS, Which version, Getting MySQL, General Installation Issues
+@subsection Operating Systems Supported by MySQL
+
@cindex operating systems, supported
@cindex native thread support
@cindex thread support
@cindex process support
@cindex support, for operating systems
-@node Which OS, Which version, Getting MySQL, Installing
-@section Operating Systems Supported by MySQL
We use GNU Autoconf, so it is possible to port @strong{MySQL} to all modern
systems with working Posix threads and a C++ compiler. (To compile only the
@@ -7923,13 +7124,14 @@ forth more effort into testing on and optimizing for that particular platform.
We are just stating our observations to help you make a
decision on which platform to use @strong{MySQL} on in your setup.
+@node Which version, Installation layouts, Which OS, General Installation Issues
+@subsection Which MySQL Version to Use
+
@cindex MySQL binary distribution
@cindex MySQL source distribution
@cindex release numbers
@cindex version, choosing
@cindex choosing, a MySQL version
-@node Which version, Many versions, Which OS, Installing
-@section Which MySQL Version to Use
The first decision to make is whether you want to use the latest development
release or the last stable release:
@@ -7981,17 +7183,17 @@ If you want to use the @code{MySQL-Max} RPM, you must first
install the standard @code{MySQL} RPM.
@item
-If you want to configure @code{mysqld} with some extra feature that are NOT in
-the standard binary distributions. Here is a list of the most common
-extra options that you may want to use:
+If you want to configure @code{mysqld} with some extra features that are
+NOT in the standard binary distributions. Here is a list of the most
+common extra options that you may want to use:
-@itemize @bullet
-@item --with-berkeley-db
-@item --with-innodb
-@item --with-raid
-@item --with-libwrap
-@item --with-named-z-lib (This is done for some of the binaries)
-@item --with-debug[=full]
+@itemize
+@item @code{--with-berkeley-db}
+@item @code{--with-innodb}
+@item @code{--with-raid}
+@item @code{--with-libwrap}
+@item @code{--with-named-z-lib (This is done for some of the binaries)}
+@item @code{--with-debug[=full]}
@end itemize
@item
@@ -8092,79 +7294,19 @@ hundreds of megabytes of data.
@item The @strong{MySQL} benchmark suite
This runs a range of common queries. It is also a test to see whether the
latest batch of optimizations actually made the code faster.
-@xref{Benchmarks}.
+@xref{MySQL Benchmarks}.
@item The @code{crash-me} test
This tries to determine what features the database supports and what its
-capabilities and limitations are. @xref{Benchmarks}.
+capabilities and limitations are. @xref{MySQL Benchmarks}.
@end table
Another test is that we use the newest @strong{MySQL} version in our internal
production environment, on at least one machine. We have more than 100
gigabytes of data to work with.
-@cindex releases, updating
-@cindex updating, releases of MySQL
-@node Many versions, Installation layouts, Which version, Installing
-@section How and When Updates Are Released
-
-@strong{MySQL} is evolving quite rapidly here at @strong{MySQL AB} and we
-want to share this with other @strong{MySQL} users. We try to make a release
-when we have very useful features that others seem to have a need for.
-
-We also try to help out users who request features that are easy to
-implement. We take note of what our licensed users want to have, and
-we especially take note of what our extended e-mail supported customers
-want and try to help them out.
-
-No one has to download a new release. The News section will tell you if
-the new release has something you really want. @xref{News}.
-
-We use the following policy when updating @strong{MySQL}:
-
-@itemize @bullet
-@item
-For each minor update, the last number in the version string is incremented.
-When there are major new features or minor incompatibilities with previous
-versions, the second number in the version string is incremented. When the
-file format changes, the first number is increased.
-
-@item
-Stable tested releases are meant to appear about 1-2 times a year, but
-if small bugs are found, a release with only bug fixes will be released.
-
-@item
-Working releases are meant to appear about every 1-8 weeks.
-
-@item
-Binary distributions for some platforms will be made by us for major releases.
-Other people may make binary distributions for other systems but probably
-less frequently.
-
-@item
-We usually make patches available as soon as we have located and fixed
-small bugs.
-
-@item
-For non-critical but annoying bugs, we will make patches available if they
-are sent to us. Otherwise we will combine many of them into a larger
-patch.
-
-@item
-If there is, by any chance, a fatal bug in a release we will make a new
-release as soon as possible. We would like other companies to do this,
-too.
-@end itemize
-
-The current stable release is Version 3.23; We have already moved active
-development to Version 4.0. Bugs will still be fixed in the stable version.
-We don't believe in a complete freeze, as this also leaves out bug fixes
-and things that ``must be done.'' ``Somewhat frozen'' means that we may
-add small things that ``almost surely will not affect anything that's
-already working.''
-
-@node Installation layouts, Installing binary, Many versions, Installing
-@section Installation Layouts
+@node Installation layouts, Many versions, Which version, General Installation Issues
+@subsection Installation Layouts
@cindex installation layouts
@cindex layout of installation
@@ -8228,485 +7370,132 @@ The header file and library directories are @file{include/mysql} and
You can create your own binary installation from a compiled source
distribution by executing the script @file{scripts/make_binary_distribution}.
-@cindex installing, binary distribution
-@cindex binary distributions, installing
-@node Installing binary, Installing source, Installation layouts, Installing
-@section Installing a MySQL Binary Distribution
-
-@menu
-* Linux-RPM:: Linux RPM files
-* Building clients:: Building client programs
-* Binary install system issues:: System-specific issues
-@end menu
-
-You need the following tools to install a @strong{MySQL} binary distribution:
-
-@itemize @bullet
-@item
-GNU @code{gunzip} to uncompress the distribution.
-
-@item
-A reasonable @code{tar} to unpack the distribution. GNU @code{tar} is
-known to work. Sun @code{tar} is known to have problems.
-@end itemize
-
-@cindex RPM, defined
-@cindex RedHat Package Manager
-An alternative installation method under Linux is to use RPM (RedHat Package
-Manager) distributions. @xref{Linux-RPM}.
-
-@c texi2html fails to split chapters if I use strong for all of this.
-If you run into problems, @strong{PLEASE ALWAYS USE} @code{mysqlbug} when
-posting questions to @email{mysql@@lists.mysql.com}. Even if the problem
-isn't a bug, @code{mysqlbug} gathers system information that will help others
-solve your problem. By not using @code{mysqlbug}, you lessen the likelihood
-of getting a solution to your problem! You will find @code{mysqlbug} in the
-@file{bin} directory after you unpack the distribution. @xref{Bug reports}.
-
-@cindex commands, for binary distribution
-The basic commands you must execute to install and use a @strong{MySQL}
-binary distribution are:
-
-@example
-shell> groupadd mysql
-shell> useradd -g mysql mysql
-shell> cd /usr/local
-shell> gunzip < /path/to/mysql-VERSION-OS.tar.gz | tar xvf -
-shell> ln -s mysql-VERSION-OS mysql
-shell> cd mysql
-shell> scripts/mysql_install_db
-shell> chown -R root /usr/local/mysql
-shell> chown -R mysql /usr/local/mysql/data
-shell> chgrp -R mysql /usr/local/mysql
-shell> chown -R root /usr/local/mysql/bin
-shell> bin/safe_mysqld --user=mysql &
-@end example
-
-@cindex adding, new users
-@cindex new users, adding
-@cindex users, adding
-
-You can add new users using the @code{bin/mysql_setpermission} script if
-you install the @code{DBI} and @code{Msql-Mysql-modules} Perl modules.
-
-A more detailed description follows.
+@node Many versions, MySQL binaries, Installation layouts, General Installation Issues
+@subsection How and When Updates Are Released
-To install a binary distribution, follow the steps below, then proceed
-to @ref{Post-installation}, for post-installation setup and testing:
-
-@enumerate
-@item
-Pick the directory under which you want to unpack the distribution, and move
-into it. In the example below, we unpack the distribution under
-@file{/usr/local} and create a directory @file{/usr/local/mysql} into which
-@strong{MySQL} is installed. (The following instructions therefore assume
-you have permission to create files in @file{/usr/local}. If that directory
-is protected, you will need to perform the installation as @code{root}.)
-
-@item
-Obtain a distribution file from one of the sites listed in
-@ref{Getting MySQL, , Getting @strong{MySQL}}.
-
-@strong{MySQL} binary distributions are provided as compressed @code{tar}
-archives and have names like @file{mysql-VERSION-OS.tar.gz}, where
-@code{VERSION} is a number (for example, @code{3.21.15}), and @code{OS}
-indicates the type of operating system for which the distribution is intended
-(for example, @code{pc-linux-gnu-i586}).
-
-@item
-If you see a binary distribution marked with the @code{-max} prefix, this
-means that the binary has support for transaction-safe tables and other
-features. @xref{mysqld-max, , @code{mysqld-max}}. Note that all binaries
-are built from the same @strong{MySQL} source distribution.
-
-@item
-Add a user and group for @code{mysqld} to run as:
-
-@example
-shell> groupadd mysql
-shell> useradd -g mysql mysql
-@end example
-
-These commands add the @code{mysql} group and the @code{mysql} user. The
-syntax for @code{useradd} and @code{groupadd} may differ slightly on different
-versions of Unix. They may also be called @code{adduser} and @code{addgroup}.
-You may wish to call the user and group something else instead of @code{mysql}.
-
-@item
-Change into the intended installation directory:
+@cindex releases, updating
+@cindex updating, releases of MySQL
-@example
-shell> cd /usr/local
-@end example
+@strong{MySQL} is evolving quite rapidly here at @strong{MySQL AB} and we
+want to share this with other @strong{MySQL} users. We try to make a release
+when we have very useful features that others seem to have a need for.
-@item
-Unpack the distribution and create the installation directory:
+We also try to help out users who request features that are easy to
+implement. We take note of what our licensed users want to have, and
+we especially take note of what our extended e-mail supported customers
+want and try to help them out.
-@example
-shell> gunzip < /path/to/mysql-VERSION-OS.tar.gz | tar xvf -
-shell> ln -s mysql-VERSION-OS mysql
-@end example
+No one has to download a new release. The News section will tell you if
+the new release has something you really want. @xref{News}.
-The first command creates a directory named @file{mysql-VERSION-OS}. The
-second command makes a symbolic link to that directory. This lets you refer
-more easily to the installation directory as @file{/usr/local/mysql}.
+We use the following policy when updating @strong{MySQL}:
+@itemize @bullet
@item
-Change into the installation directory:
-
-@example
-shell> cd mysql
-@end example
-
-You will find several files and subdirectories in the @code{mysql} directory.
-The most important for installation purposes are the @file{bin} and
-@file{scripts} subdirectories.
-
-@table @file
-@item bin
-@tindex PATH environment variable
-@tindex environment variable, PATH
-This directory contains client programs and the server
-You should add the full pathname of this directory to your
-@code{PATH} environment variable so that your shell finds the @strong{MySQL}
-programs properly. @xref{Environment variables}.
-
-@item scripts
-This directory contains the @code{mysql_install_db} script used to initialize
-the @code{mysql} database containing the grant tables that store the server
-access permissions.
-@end table
+For each minor update, the last number in the version string is incremented.
+When there are major new features or minor incompatibilities with previous
+versions, the second number in the version string is incremented. When the
+file format changes, the first number is increased.
@item
-If you would like to use @code{mysqlaccess} and have the @strong{MySQL}
-distribution in some non-standard place, you must change the location where
-@code{mysqlaccess} expects to find the @code{mysql} client. Edit the
-@file{bin/mysqlaccess} script at approximately line 18. Search for a line
-that looks like this:
-
-@example
-$MYSQL = '/usr/local/bin/mysql'; # path to mysql executable
-@end example
-
-Change the path to reflect the location where @code{mysql} actually is
-stored on your system. If you do not do this, you will get a @code{Broken
-pipe} error when you run @code{mysqlaccess}.
+Stable tested releases are meant to appear about 1-2 times a year, but
+if small bugs are found, a release with only bug fixes will be released.
@item
-Create the @strong{MySQL} grant tables (necessary only if you haven't
-installed @strong{MySQL} before):
-@example
-shell> scripts/mysql_install_db
-@end example
-
-Note that @strong{MySQL} versions older than Version 3.22.10 started the
-@strong{MySQL} server when you run @code{mysql_install_db}. This is no
-longer true!
+Working releases are meant to appear about every 1-8 weeks.
@item
-Change ownership of binaries to @code{root} and ownership of the data
-directory to the user that you will run @code{mysqld} as:
-
-@example
-shell> chown -R root /usr/local/mysql
-shell> chown -R mysql /usr/local/mysql/data
-shell> chgrp -R mysql /usr/local/mysql
-@end example
-
-The first command changes the @code{owner} attribute of the files to the
-@code{root} user, the second one changes the @code{owner} attribute of the
-data directory to the @code{mysql} user, and the third one changes the
-@code{group} attribute to the @code{mysql} group.
+Binary distributions for some platforms will be made by us for major releases.
+Other people may make binary distributions for other systems but probably
+less frequently.
@item
-If you want to install support for the Perl @code{DBI}/@code{DBD} interface,
-see @ref{Perl support}.
+We usually make patches available as soon as we have located and fixed
+small bugs.
@item
-If you would like @strong{MySQL} to start automatically when you boot your
-machine, you can copy @code{support-files/mysql.server} to the location where
-your system has its startup files. More information can be found in the
-@code{support-files/mysql.server} script itself and in
-@ref{Automatic start}.
-
-@end enumerate
-
-After everything has been unpacked and installed, you should initialize
-and test your distribution.
-
-You can start the @strong{MySQL} server with the following command:
-
-@example
-shell> bin/safe_mysqld --user=mysql &
-@end example
-
-@xref{safe_mysqld, , @code{safe_mysqld}}.
-
-@xref{Post-installation}.
-
-@cindex RPM file
-@cindex RedHat Package Manager
-@c This node name is special
-@node Linux-RPM, Building clients, Installing binary, Installing binary
-@subsection Linux RPM Notes
-
-The recommended way to install @strong{MySQL} on Linux is by using an RPM
-file. The @strong{MySQL} RPMs are currently being built on a RedHat Version
-6.2 system but should work on other versions of Linux that support @code{rpm}
-and use @code{glibc}.
-
-If you have problems with an RPM file, for example, if you receive the error
-``@code{Sorry, the host 'xxxx' could not be looked up}'', see
-@ref{Binary notes-Linux}.
-
-The RPM files you may want to use are:
-
-@itemize @bullet
-@item @code{MySQL-VERSION.i386.rpm}
-
-The @strong{MySQL} server. You will need this unless you only want to
-connect to a @strong{MySQL} server running on another machine.
-
-@item @code{MySQL-client-VERSION.i386.rpm}
-
-The standard @strong{MySQL} client programs. You probably always want to
-install this package.
-
-@item @code{MySQL-bench-VERSION.i386.rpm}
-
-Tests and benchmarks. Requires Perl and msql-mysql-modules RPMs.
-
-@item @code{MySQL-devel-VERSION.i386.rpm}
-
-Libraries and include files needed if you want to compile other
-@strong{MySQL} clients, such as the Perl modules.
-
-@item @code{MySQL-VERSION.src.rpm}
-
-This contains the source code for all of the above packages. It can also
-be used to try to build RPMs for other architectures (for example, Alpha
-or SPARC).
-@end itemize
-
-To see all files in an RPM package, run:
-@example
-shell> rpm -qpl MySQL-VERSION.i386.rpm
-@end example
-
-To perform a standard minimal installation, run:
-
-@example
-shell> rpm -i MySQL-VERSION.i386.rpm MySQL-client-VERSION.i386.rpm
-@end example
-
-To install just the client package, run:
-
-@example
-shell> rpm -i MySQL-client-VERSION.i386.rpm
-@end example
-
-The RPM places data in @file{/var/lib/mysql}. The RPM also creates the
-appropriate entries in @file{/etc/rc.d/} to start the server automatically
-at boot time. (This means that if you have performed a previous
-installation, you may want to make a copy of your previously installed
-@strong{MySQL} startup file if you made any changes to it, so you don't lose
-your changes.)
-
-After installing the RPM file(s), the @code{mysqld} daemon should be running
-and you should now be able to start using @strong{MySQL}.
-@xref{Post-installation}.
-
-If something goes wrong, you can find more information in the binary
-installation chapter. @xref{Installing binary}.
-
-@cindex client programs, building
-@cindex linking
-@cindex building, client programs
-@cindex programs, client
-@node Building clients, Binary install system issues, Linux-RPM, Installing binary
-@subsection Building Client Programs
-
-If you compile @strong{MySQL} clients that you've written yourself or that
-you obtain from a third party, they must be linked using the
-@code{-lmysqlclient -lz} option on the link command. You may also need to
-specify a @code{-L} option to tell the linker where to find the library. For
-example, if the library is installed in @file{/usr/local/mysql/lib}, use
-@code{-L/usr/local/mysql/lib -lmysqlclient -lz} on the link command.
-
-For clients that use @strong{MySQL} header files, you may need to specify a
-@code{-I} option when you compile them (for example,
-@code{-I/usr/local/mysql/include}), so the compiler can find the header
-files.
-
-@node Binary install system issues, , Building clients, Installing binary
-@subsection System-specific Issues
-
-@menu
-* Binary notes-Linux:: Linux notes for binary distribution
-* Binary notes-HP-UX:: HP-UX notes for binary distribution
-@end menu
-
-The following sections indicate some of the issues that have been observed
-on particular systems when installing @strong{MySQL} from a binary
-distribution or from RPM files.
-
-@cindex binary distributions, on Linux
-@cindex Linux, binary distribution
-@node Binary notes-Linux, Binary notes-HP-UX, Binary install system issues, Binary install system issues
-@subsubsection Linux Notes for Binary Distributions
-
-@strong{MySQL} needs at least Linux Version 2.0.
-
-The binary release is linked with @code{-static}, which means you do not
-normally need to worry about which version of the system libraries you
-have. You need not install LinuxThreads, either. A program linked with
-@code{-static} is slightly bigger than a dynamically linked program but
-also slightly faster (3-5%). One problem, however, is that you can't use
-user-definable functions (UDFs) with a statically linked program. If
-you are going to write or use UDF functions (this is something only for
-C or C++ programmers), you must compile @strong{MySQL} yourself, using
-dynamic linking.
-
-If you are using a @code{libc}-based system (instead of a @code{glibc2}
-system), you will probably get some problems with hostname resolving and
-@code{getpwnam()} with the binary release. (This is because @code{glibc}
-unfortunately depends on some external libraries to resolve hostnames
-and @code{getpwent()}, even when compiled with @code{-static}). In this
-case you probably get the following error message when you run
-@code{mysql_install_db}:
-
-@example
-Sorry, the host 'xxxx' could not be looked up
-@end example
-
-or the following error when you try to run @code{mysqld} with the @code{--user}
-option:
-
-@example
-getpwnam: No such file or directory
-@end example
-
-You can solve this problem in one of the following ways:
+For non-critical but annoying bugs, we will make patches available if they
+are sent to us. Otherwise we will combine many of them into a larger
+patch.
-@itemize @bullet
-@item
-Get a @strong{MySQL} source distribution (an RPM or the @code{tar.gz}
-distribution) and install this instead.
@item
-Execute @code{mysql_install_db --force}; This will not execute the
-@code{resolveip} test in @code{mysql_install_db}. The downside is that
-you can't use host names in the grant tables; you must use IP numbers
-instead (except for @code{localhost}). If you are using an old @strong{MySQL}
-release that doesn't support @code{--force}, you have to remove the
-@code{resolveip} test in @code{mysql_install} with an editor.
-@item
-Start @code{mysqld} with @code{su} instead of using @code{--user}.
+If there is, by any chance, a fatal bug in a release we will make a new
+release as soon as possible. We would like other companies to do this,
+too.
@end itemize
-The Linux-Intel binary and RPM releases of @strong{MySQL} are configured
-for the highest possible speed. We are always trying to use the fastest
-stable compiler available.
-
-@strong{MySQL} Perl support requires Version Perl 5.004_03 or newer.
-
-On some Linux 2.2 versions, you may get the error @code{Resource
-temporarily unavailable} when you do a lot of new connections to a
-@code{mysqld} server over TCP/IP.
-
-The problem is that Linux has a delay between when you close a TCP/IP
-socket and until this is actually freed by the system. As there is only
-room for a finite number of TCP/IP slots, you will get the above error if
-you try to do too many new TCP/IP connections during a small time, like
-when you run the @strong{MySQL} @file{test-connect} benchmark over
-TCP/IP.
+The current stable release is Version 3.23; We have already moved active
+development to Version 4.0. Bugs will still be fixed in the stable version.
+We don't believe in a complete freeze, as this also leaves out bug fixes
+and things that ``must be done.'' ``Somewhat frozen'' means that we may
+add small things that ``almost surely will not affect anything that's
+already working.''
-We have mailed about this problem a couple of times to different Linux
-mailing lists but have never been able to resolve this properly.
+@node MySQL binaries, , Many versions, General Installation Issues
+@subsection MySQL Binaries Compiled by MySQL AB
-The only known 'fix' to this problem is to use persistent connections in
-your clients or use sockets, if you are running the database server
-and clients on the same machine. We hope that the @code{Linux 2.4}
-kernel will fix this problem in the future.
-
-@cindex HP-UX, binary distribution
-@cindex binary distributions, on HP-UX
-@node Binary notes-HP-UX, , Binary notes-Linux, Binary install system issues
-@subsubsection HP-UX Notes for Binary Distributions
+@cindex binary distributions
-Some of the binary distributions of @strong{MySQL} for HP-UX is
-distributed as an HP depot file and as a tar file. To use the depot
-file you must be running at least HP-UX 10.x to have access to HP's
-software depot tools.
+As a service, we at @strong{MySQL AB} provide a set of binary distributions
+of @strong{MySQL} that are compiled at our site or at sites where customers
+kindly have given us access to their machines.
-The HP version of @strong{MySQL} was compiled on an HP 9000/8xx server
-under HP-UX 10.20, and uses MIT-pthreads. It is known to work well under
-this configuration. @strong{MySQL} Version 3.22.26 and newer can also be
-built with HP's native thread package.
+These distributions are generated with @code{scripts/make_binary_distribution}
+and are configured with the following compilers and options:
-Other configurations that may work:
+@table @asis
+@item SunOS 4.1.4 2 sun4c with @code{gcc} 2.7.2.1
+@code{CC=gcc CXX=gcc CXXFLAGS="-O3 -felide-constructors" ./configure --prefix=/usr/local/mysql --disable-shared --with-extra-charsets=complex --enable-assembler}
-@itemize @bullet
-@item
-HP 9000/7xx running HP-UX 10.20+
-@item
-HP 9000/8xx running HP-UX 10.30
-@end itemize
+@item SunOS 5.5.1 (and above) sun4u with @code{egcs} 1.0.3a or 2.90.27 or gcc 2.95.2 and newer
+@code{CC=gcc CFLAGS="-O3" CXX=gcc CXXFLAGS="-O3 -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex --enable-assembler}
-The following configurations almost definitely won't work:
+@item SunOS 5.6 i86pc with @code{gcc} 2.8.1
+@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
-@itemize @bullet
-@item
-HP 9000/7xx or 8xx running HP-UX 10.x where x < 2
-@item
-HP 9000/7xx or 8xx running HP-UX 9.x
-@end itemize
+@item Linux 2.0.33 i386 with @code{pgcc} 2.90.29 (@code{egcs} 1.0.3a)
+@code{CFLAGS="-O3 -mpentium -mstack-align-double" CXX=gcc CXXFLAGS="-O3 -mpentium -mstack-align-double -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --with-extra-charsets=complex}
-To install the distribution, use one of the commands below, where
-@code{/path/to/depot} is the full pathname of the depot file:
+@item Linux 2.2.x with x686 with @code{gcc} 2.95.2
+@code{CFLAGS="-O3 -mpentiumpro" CXX=gcc CXXFLAGS="-O3 -mpentiumpro -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charset=complex}
-@itemize @bullet
-@item
-To install everything, including the server, client and development tools:
+@item SCO 3.2v5.0.4 i386 with @code{gcc} 2.7-95q4
+@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
-@example
-shell> /usr/sbin/swinstall -s /path/to/depot mysql.full
-@end example
+@item AIX 2 4 with @code{gcc} 2.7.2.2
+@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
-@item
-To install only the server:
+@item OSF1 V4.0 564 alpha with @code{gcc} 2.8.1
+@code{CC=gcc CFLAGS=-O CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
-@example
-shell> /usr/sbin/swinstall -s /path/to/depot mysql.server
-@end example
+@item Irix 6.3 IP32 with @code{gcc} 2.8.0
+@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
-@item
-To install only the client package:
+@item BSDI BSD/OS 3.1 i386 with @code{gcc} 2.7.2.1
+@code{CC=gcc CXX=gcc CXXFLAGS=-O ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
-@example
-shell> /usr/sbin/swinstall -s /path/to/depot mysql.client
-@end example
+@item BSDI BSD/OS 2.1 i386 with @code{gcc} 2.7.2
+@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
+@end table
-@item
-To install only the development tools:
+Anyone who has more optimal options for any of the configurations listed
+above can always mail them to the developer's mailing list at
+@email{internals@@lists.mysql.com}.
-@example
-shell> /usr/sbin/swinstall -s /path/to/depot mysql.developer
-@end example
-@end itemize
+RPM distributions prior to @strong{MySQL} Version 3.22 are user-contributed.
+Beginning with Version 3.22, the RPMs are generated by us at
+@strong{MySQL AB}.
-The depot places binaries and libraries in @file{/opt/mysql} and data in
-@file{/var/opt/mysql}. The depot also creates the appropriate entries in
-@file{/etc/init.d} and @file{/etc/rc2.d} to start the server automatically
-at boot time. Obviously, this entails being @code{root} to install.
+If you want to compile a debug version of @strong{MySQL}, you should add
+@code{--with-debug} or @code{--with-debug=full} to the above configure lines
+and remove any @code{-fomit-frame-pointer} options.
-To install the HP-UX tar.gz distribution, you must have a copy of GNU
-@code{tar}.
+@node Installing source, Post-installation, General Installation Issues, Installing
+@section Installing a MySQL Source Distribution
@cindex installing, source distribution
@cindex source distribution, installing
@cindex installation overview
-@node Installing source, Installing source tree, Installing binary, Installing
-@section Installing a MySQL Source Distribution
Before you proceed with the source installation, check first to see if our
binary is available for your platform and if it will work for you. We
@@ -8770,6 +7559,9 @@ of getting a solution to your problem! You will find @code{mysqlbug} in the
* Quick install:: Quick installation overview
* Applying patches:: Applying patches
* configure options:: Typical @code{configure} options
+* Installing source tree::
+* Compilation problems::
+* MIT-pthreads::
@end menu
@node Quick install, Applying patches, Installing source, Installing source
@@ -8951,11 +7743,12 @@ running. @xref{Multiple servers}.
@xref{Post-installation}.
-@cindex patches, applying
-@cindex applying, patches
@node Applying patches, configure options, Quick install, Installing source
@subsection Applying Patches
+@cindex patches, applying
+@cindex applying, patches
+
Sometimes patches appear on the mailing list or are placed in the
@uref{http://www.mysql.com/Downloads/Patches, patches area} of the
@strong{MySQL} Web site.
@@ -8991,14 +7784,15 @@ You may need to bring down any currently running server before you run
systems do not allow you to install a new version of a program if it replaces
the version that is currently executing.
+@node configure options, Installing source tree, Applying patches, Installing source
+@subsection Typical @code{configure} Options
+
+@findex without-server option
@cindex @code{configure} script
@cindex options, configure
@cindex configuration options
-@findex without-server option
@cindex log files
@cindex files, log
-@node configure options, , Applying patches, Installing source
-@subsection Typical @code{configure} Options
The @code{configure} script gives you a great deal of control over how
you configure your @strong{MySQL} distribution. Typically you do this
@@ -9208,12 +8002,12 @@ applications. @xref{Thread-safe clients}.
@item
Options that pertain to particular systems can be found in the
-system-specific sections later in this chapter. @xref{Source install
-system issues}.
+system-specific section of this manual.
+@xref{Operating System Specific Notes}.
@end itemize
-@node Installing source tree, Compilation problems, Installing source, Installing
-@section Installing from the Development Source Tree
+@node Installing source tree, Compilation problems, configure options, Installing source
+@subsection Installing from the Development Source Tree
@cindex development source tree
@cindex BitKeeper tree
@@ -9313,13 +8107,14 @@ a description.
@end enumerate
+@node Compilation problems, MIT-pthreads, Installing source tree, Installing source
+@subsection Problems Compiling?
+
@cindex compiling, problems
@cindex problems, compiling
@cindex reconfiguring
@cindex @code{config.cache} file
@cindex files, @code{config.cache}
-@node Compilation problems, MIT-pthreads, Installing source tree, Installing
-@section Problems Compiling?
All @strong{MySQL} programs compile cleanly for us with no warnings on
Solaris using @code{gcc}. On other systems, warnings may occur due to
@@ -9533,10 +8328,11 @@ If you need to debug @code{mysqld} or a @strong{MySQL} client, run
link your clients with the new client library. @xref{Debugging client}.
@end itemize
+@node MIT-pthreads, , Compilation problems, Installing source
+@subsection MIT-pthreads Notes
+
@cindex MIT-pthreads
@cindex thread support, non-native
-@node MIT-pthreads, Perl support, Compilation problems, Installing
-@section MIT-pthreads Notes
This section describes some of the issues involved in using MIT-pthreads.
@@ -9623,626 +8419,1020 @@ We haven't gotten @code{readline} to work with MIT-pthreads. (This isn't
needed, but may be interesting for someone.)
@end itemize
-@cindex Perl, installing
-@cindex installing, Perl
-@node Perl support, Source install system issues, MIT-pthreads, Installing
-@section Perl Installation Comments
-
-@menu
-* Perl installation:: Installing Perl on Unix
-* ActiveState Perl:: Installing ActiveState Perl on Windows
-* Windows Perl:: Installing the @strong{MySQL} Perl distribution on Windows
-* Perl support problems:: Problems using the Perl @code{DBI}/@code{DBD} interface
-@end menu
-@node Perl installation, ActiveState Perl, Perl support, Perl support
-@subsection Installing Perl on Unix
+@node Post-installation, Upgrade, Installing source, Installing
+@section Post-installation Setup and Testing
-Perl support for @strong{MySQL} is provided by means of the
-@code{DBI}/@code{DBD} client interface. @xref{Perl}. The Perl
-@code{DBD}/@code{DBI} client code requires Perl Version 5.004 or later. The
-interface @strong{will not work} if you have an older version of Perl.
+@cindex post-installation, setup and testing
+@cindex testing, post-installation
+@cindex setup, post-installation
-@strong{MySQL} Perl support also requires that you've installed
-@strong{MySQL} client programming support. If you installed @strong{MySQL}
-from RPM files, client programs are in the client RPM, but client programming
-support is in the developer RPM. Make sure you've installed the latter RPM.
+@menu
+* mysql_install_db:: Problems running @code{mysql_install_db}
+* Starting server:: Problems starting the @strong{MySQL} server
+* Automatic start:: Starting and stopping @strong{MySQL} automatically
+@end menu
-As of Version 3.22.8, Perl support is distributed separately from the main
-@strong{MySQL} distribution. If you want to install Perl support, the files
-you will need can be obtained from
-@uref{http://www.mysql.com/Downloads/Contrib/}.
+Once you've installed @strong{MySQL} (from either a binary or source
+distribution), you need to initialize the grant tables, start the server,
+and make sure that the server works okay. You may also wish to arrange
+for the server to be started and stopped automatically when your system
+starts up and shuts down.
-The Perl distributions are provided as compressed @code{tar} archives and
-have names like @file{MODULE-VERSION.tar.gz}, where @code{MODULE} is the
-module name and @code{VERSION} is the version number. You should get the
-@code{Data-Dumper}, @code{DBI}, and @code{Msql-Mysql-modules} distributions
-and install them in that order. The installation procedure is shown below.
-The example shown is for the @code{Data-Dumper} module, but the procedure is
-the same for all three distributions:
+Normally you install the grant tables and start the server like this
+for installation from a source distribution:
+@cindex starting, the server
+@cindex server, starting
-@enumerate
-@item
-Unpack the distribution into the current directory:
@example
-shell> gunzip < Data-Dumper-VERSION.tar.gz | tar xvf -
+shell> ./scripts/mysql_install_db
+shell> cd mysql_installation_directory
+shell> ./bin/safe_mysqld --user=mysql &
@end example
-This command creates a directory named @file{Data-Dumper-VERSION}.
-@item
-Change into the top-level directory of the unpacked distribution:
+For a binary distribution (not RPM or pkg packages), do this:
+
@example
-shell> cd Data-Dumper-VERSION
+shell> cd mysql_installation_directory
+shell> ./bin/mysql_install_db
+shell> ./bin/safe_mysqld --user=mysql &
@end example
+This creates the @code{mysql} database which will hold all database
+privileges, the @code{test} database which you can use to test
+@strong{MySQL} and also privilege entries for the user that run
+@code{mysql_install_db} and a @code{root} user (without any passwords).
+This also starts the @code{mysqld} server.
+
+@code{mysql_install_db} will not overwrite any old privilege tables, so
+it should be safe to run in any circumstances. If you don't want to
+have the @code{test} database you can remove it with @code{mysqladmin -u
+root drop test}.
+
+Testing is most easily done from the top-level directory of the @strong{MySQL}
+distribution. For a binary distribution, this is your installation directory
+(typically something like @file{/usr/local/mysql}). For a source
+distribution, this is the main directory of your @strong{MySQL} source tree.
+@cindex testing, the server
+
+In the commands shown below in this section and in the following
+subsections, @code{BINDIR} is the path to the location in which programs
+like @code{mysqladmin} and @code{safe_mysqld} are installed. For a
+binary distribution, this is the @file{bin} directory within the
+distribution. For a source distribution, @code{BINDIR} is probably
+@file{/usr/local/bin}, unless you specified an installation directory
+other than @file{/usr/local} when you ran @code{configure}.
+@code{EXECDIR} is the location in which the @code{mysqld} server is
+installed. For a binary distribution, this is the same as
+@code{BINDIR}. For a source distribution, @code{EXECDIR} is probably
+@file{/usr/local/libexec}.
+
+Testing is described in detail below:
+@cindex testing, installation
+
+@enumerate
@item
-Build the distribution and compile everything:
+If necessary, start the @code{mysqld} server and set up the initial
+@strong{MySQL} grant tables containing the privileges that determine how
+users are allowed to connect to the server. This is normally done with the
+@code{mysql_install_db} script:
+
@example
-shell> perl Makefile.PL
-shell> make
-shell> make test
-shell> make install
+shell> scripts/mysql_install_db
@end example
-@end enumerate
-The @code{make test} command is important because it verifies that the
-module is working. Note that when you run that command during the
-@code{Msql-Mysql-modules} installation to exercise the interface code, the
-@strong{MySQL} server must be running or the test will fail.
+Typically, @code{mysql_install_db} needs to be run only the first time you
+install @strong{MySQL}. Therefore, if you are upgrading an existing
+installation, you can skip this step. (However, @code{mysql_install_db} is
+quite safe to use and will not update any tables that already exist, so if
+you are unsure of what to do, you can always run @code{mysql_install_db}.)
-It is a good idea to rebuild and reinstall the @code{Msql-Mysql-modules}
-distribution whenever you install a new release of @strong{MySQL},
-particularly if you notice symptoms such as all your @code{DBI} scripts
-dumping core after you upgrade @strong{MySQL}.
+@code{mysql_install_db} creates six tables (@code{user}, @code{db},
+@code{host}, @code{tables_priv}, @code{columns_priv}, and @code{func}) in the
+@code{mysql} database. A description of the initial privileges is given in
+@ref{Default privileges}. Briefly, these privileges allow the @strong{MySQL}
+@code{root} user to do anything, and allow anybody to create or use databases
+with a name of @code{'test'} or starting with @code{'test_'}.
-If you don't have the right to install Perl modules in the system directory
-or if you to install local Perl modules, the following reference may help
-you:
+If you don't set up the grant tables, the following error will appear in the
+log file when you start the server:
+@tindex host.frm, problems finding
@example
-@uref{http://www.iserver.com/support/contrib/perl5/modules.html}
+mysqld: Can't find file: 'host.frm'
@end example
-Look under the heading
-@code{Installing New Modules that Require Locally Installed Modules}.
+The above may also happen with a binary @strong{MySQL} distribution if you
+don't start @strong{MySQL} by executing exactly @code{./bin/safe_mysqld}!
+@xref{safe_mysqld, , @code{safe_mysqld}}.
-@node ActiveState Perl, Windows Perl, Perl installation, Perl support
-@subsection Installing ActiveState Perl on Windows
-@cindex installing, Perl on Windows
-@cindex Perl, installing on Windows
-@cindex ActiveState Perl
+You might need to run @code{mysql_install_db} as @code{root}. However,
+if you prefer, you can run the @strong{MySQL} server as an unprivileged
+(non-@code{root}) user, provided that user can read and write files in
+the database directory. Instructions for running @strong{MySQL} as an
+unprivileged user are given in @ref{Changing MySQL user, , Changing
+@strong{MySQL} user}.
-To install the @strong{MySQL} @code{DBD} module with ActiveState Perl on
-Windows, you should do the following:
+If you have problems with @code{mysql_install_db}, see
+@ref{mysql_install_db, , @code{mysql_install_db}}.
+
+There are some alternatives to running the @code{mysql_install_db}
+script as it is provided in the @strong{MySQL} distribution:
@itemize @bullet
-@item
-Get ActiveState Perl from
-@uref{http://www.activestate.com/Products/ActivePerl/index.html}
-and install it.
+@item
+You may want to edit @code{mysql_install_db} before running it, to change
+the initial privileges that are installed into the grant tables. This is
+useful if you want to install @strong{MySQL} on a lot of machines with the
+same privileges. In this case you probably should need only to add a few
+extra @code{INSERT} statements to the @code{mysql.user} and @code{mysql.db}
+tables!
@item
-Open a DOS shell.
+If you want to change things in the grant tables after installing them, you
+can run @code{mysql_install_db}, then use @code{mysql -u root mysql} to
+connect to the grant tables as the @strong{MySQL} @code{root} user and issue
+SQL statements to modify the grant tables directly.
-@item
-If required, set the HTTP_proxy variable. For example, you might try:
+@item
+It is possible to re-create the grant tables completely after they have
+already been created. You might want to do this if you've already installed
+the tables but then want to re-create them after editing
+@code{mysql_install_db}.
+@end itemize
-@example
-set HTTP_proxy=my.proxy.com:3128
-@end example
+For more information about these alternatives, see @ref{Default privileges}.
@item
-Start the PPM program:
+Start the @strong{MySQL} server like this:
@example
-C:\> c:\perl\bin\ppm.pl
+shell> cd mysql_installation_directory
+shell> bin/safe_mysqld &
@end example
+If you have problems starting the server, see @ref{Starting server}.
+
@item
-If you have not already done so, install @code{DBI}:
+Use @code{mysqladmin} to verify that the server is running. The following
+commands provide a simple test to check that the server is up and responding
+to connections:
@example
-ppm> install DBI
+shell> BINDIR/mysqladmin version
+shell> BINDIR/mysqladmin variables
@end example
-@item
-If this succeeds, run the following command:
+The output from @code{mysqladmin version} varies slightly depending on your
+platform and version of @strong{MySQL}, but should be similar to that shown
+below:
@example
-install ftp://ftp.de.uu.net/pub/CPAN/authors/id/JWIED/DBD-mysql-1.2212.x86.ppd
+shell> BINDIR/mysqladmin version
+mysqladmin Ver 8.14 Distrib 3.23.32, for linux on i586
+Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+This software comes with ABSOLUTELY NO WARRANTY. This is free software,
+and you are welcome to modify and redistribute it under the GPL license
+
+Server version 3.23.32-debug
+Protocol version 10
+Connection Localhost via Unix socket
+TCP port 3306
+UNIX socket /tmp/mysql.sock
+Uptime: 16 sec
+
+Threads: 1 Questions: 9 Slow queries: 0 Opens: 7 Flush tables: 2 Open tables: 0 Queries per second avg: 0.000 Memory in use: 132K Max memory used: 16773K
@end example
-@end itemize
-The above should work at least with ActiveState Perl Version 5.6.
+To get a feeling for what else you can do with @code{BINDIR/mysqladmin},
+invoke it with the @code{--help} option.
-If you can't get the above to work, you should instead install the
-@strong{MyODBC} driver and connect to @strong{MySQL} server through
-ODBC:
+@item
+Verify that you can shut down the server:
+@cindex server, shutdown
+@cindex shutting down, the server
@example
-use DBI;
-$dbh= DBI->connect("DBI:ODBC:$dsn","$user","$password") ||
- die "Got error $DBI::errstr when connecting to $dsn\n";
+shell> BINDIR/mysqladmin -u root shutdown
@end example
-@node Windows Perl, Perl support problems, ActiveState Perl, Perl support
-@subsection Installing the MySQL Perl Distribution on Windows
+@item
+Verify that you can restart the server. Do this using @code{safe_mysqld} or
+by invoking @code{mysqld} directly. For example:
-The @strong{MySQL} Perl distribution contains @code{DBI},
-@code{DBD:MySQL} and @code{DBD:ODBC}.
+@cindex server, restart
+@cindex restarting, the server
-@itemize @bullet
-@item
-Get the Perl distribution for Windows from
-@uref{http://www.mysql.com/download.html}.
+@example
+shell> BINDIR/safe_mysqld --log &
+@end example
-@item
-Unzip the distribution in @code{C:} so that you get a @file{C:\PERL} directory.
+If @code{safe_mysqld} fails, try running it from the @strong{MySQL}
+installation directory (if you are not already there). If that doesn't work,
+see @ref{Starting server}.
@item
-Add the directory @file{C:\PERL\BIN} to your path.
+Run some simple tests to verify that the server is working.
+The output should be similar to what is shown below:
-@item
-Add the directory @file{C:\PERL\BIN\MSWIN32-x86-thread} or
-@file{C:\PERL\BIN\MSWIN32-x86} to your path.
+@example
+shell> BINDIR/mysqlshow
++-----------+
+| Databases |
++-----------+
+| mysql |
++-----------+
-@item
-Test that @code{perl} works by executing @code{perl -v} in a DOS shell.
-@end itemize
+shell> BINDIR/mysqlshow mysql
+Database: mysql
++--------------+
+| Tables |
++--------------+
+| columns_priv |
+| db |
+| func |
+| host |
+| tables_priv |
+| user |
++--------------+
-@cindex problems, installing Perl
-@cindex Perl DBI/DBD, installation problems
-@node Perl support problems, , Windows Perl, Perl support
-@subsection Problems Using the Perl @code{DBI}/@code{DBD} Interface
+shell> BINDIR/mysql -e "select host,db,user from db" mysql
++------+--------+------+
+| host | db | user |
++------+--------+------+
+| % | test | |
+| % | test_% | |
++------+--------+------+
+@end example
-If Perl reports that it can't find the @file{../mysql/mysql.so} module,
-then the problem is probably that Perl can't locate the shared library
-@file{libmysqlclient.so}.
+There is also a benchmark suite in the @file{sql-bench} directory (under the
+@strong{MySQL} installation directory) that you can use to compare how
+@strong{MySQL} performs on different platforms. The @file{sql-bench/Results}
+directory contains the results from many runs against different databases and
+platforms. To run all tests, execute these commands:
-You can fix this by any of the following methods:
+@example
+shell> cd sql-bench
+shell> run-all-tests
+@end example
-@itemize @bullet
-@item
-Compile the @code{Msql-Mysql-modules} distribution with @code{perl
-Makefile.PL -static -config} rather than @code{perl Makefile.PL}.
+If you don't have the @file{sql-bench} directory, you are probably using an
+RPM for a binary distribution. (Source distribution RPMs include the
+benchmark directory.) In this case, you must first install the benchmark
+suite before you can use it. Beginning with @strong{MySQL} Version 3.22,
+there are benchmark RPM files named @file{mysql-bench-VERSION-i386.rpm} that
+contain benchmark code and data.
-@item
-Copy @code{libmysqlclient.so} to the directory where your other shared
-libraries are located (probably @file{/usr/lib} or @file{/lib}).
+If you have a source distribution, you can also run the tests in the
+@file{tests} subdirectory. For example, to run @file{auto_increment.tst}, do
+this:
-@item
-On Linux you can add the pathname of the directory where
-@file{libmysqlclient.so} is located to the @file{/etc/ld.so.conf} file.
+@example
+shell> BINDIR/mysql -vvf test < ./tests/auto_increment.tst
+@end example
-@tindex LD_RUN_PATH environment variable
-@tindex Environment variable, LD_RUN_PATH
-@item
-Add the pathname of the directory where @file{libmysqlclient.so} is located
-to the @code{LD_RUN_PATH} environment variable.
-@end itemize
+The expected results are shown in the @file{./tests/auto_increment.res} file.
+@end enumerate
-If you get the following errors from @code{DBD-mysql},
-you are probably using @code{gcc} (or using an old binary compiled with
-@code{gcc}):
+@node mysql_install_db, Starting server, Post-installation, Post-installation
+@subsection Problems Running @code{mysql_install_db}
+@cindex @code{mysql_install_db} script
+@cindex scripts, @code{mysql_install_db}
+
+The purpose of the @code{mysql_install_db} script is to generate new
+@strong{MySQL} privilege tables. It will not affect any other data!
+It will also not do anything if you already have @strong{MySQL} privilege
+tables installed!
+
+If you want to re-create your privilege tables, you should take down
+the @code{mysqld} server, if it's running, and then do something like:
@example
-/usr/bin/perl: can't resolve symbol '__moddi3'
-/usr/bin/perl: can't resolve symbol '__divdi3'
+mv mysql-data-directory/mysql mysql-data-directory/mysql-old
+mysql_install_db
@end example
-Add @code{-L/usr/lib/gcc-lib/... -lgcc} to the link command when the
-@file{mysql.so} library gets built (check the output from @code{make} for
-@file{mysql.so} when you compile the Perl client). The @code{-L} option
-should specify the pathname of the directory where @file{libgcc.a} is located
-on your system.
+This section lists problems you might encounter when you run
+@code{mysql_install_db}:
-Another cause of this problem may be that Perl and @strong{MySQL} aren't both
-compiled with @code{gcc}. In this case, you can solve the mismatch by
-compiling both with @code{gcc}.
+@table @strong
+@item @code{mysql_install_db} doesn't install the grant tables
-If you get the following error from @code{Msql-Mysql-modules}
-when you run the tests:
+You may find that @code{mysql_install_db} fails to install the grant
+tables and terminates after displaying the following messages:
@example
-t/00base............install_driver(mysql) failed: Can't load '../blib/arch/auto/DBD/mysql/mysql.so' for module DBD::mysql: ../blib/arch/auto/DBD/mysql/mysql.so: undefined symbol: uncompress at /usr/lib/perl5/5.00503/i586-linux/DynaLoader.pm line 169.
+starting mysqld daemon with databases from XXXXXX
+mysql daemon ended
@end example
-it means that you need to include the compression library, -lz, to the
-link line. This can be doing the following change in the file
-@file{lib/DBD/mysql/Install.pm}:
+In this case, you should examine the log file very carefully! The log
+should be located in the directory @file{XXXXXX} named by the error message,
+and should indicate why @code{mysqld} didn't start. If you don't understand
+what happened, include the log when you post a bug report using
+@code{mysqlbug}!
+@xref{Bug reports}.
-@example
-$sysliblist .= " -lm";
+@item There is already a @code{mysqld} daemon running
-to
+In this case, you probably don't have to run @code{mysql_install_db} at
+all. You have to run @code{mysql_install_db} only once, when you install
+@strong{MySQL} the first time.
-$sysliblist .= " -lm -lz";
-@end example
+@item Installing a second @code{mysqld} daemon doesn't work when one daemon is running
-After this, you MUST run 'make realclean' and then proceed with the
-installation from the beginning.
+This can happen when you already have an existing @strong{MySQL}
+installation, but want to put a new installation in a different place (for
+example, for testing, or perhaps you simply want to run two installations at
+the same time). Generally the problem that occurs when you try to run the
+second server is that it tries to use the same socket and port as the old one.
+In this case you will get the error message: @code{Can't start server: Bind on
+TCP/IP port: Address already in use} or @code{Can't start server : Bind on
+unix socket...}. @xref{Installing many servers}.
-If you want to use the Perl module on a system that doesn't support dynamic
-linking (like SCO) you can generate a static version of Perl that includes
-@code{DBI} and @code{DBD-mysql}. The way this works is that you generate a
-version of Perl with the @code{DBI} code linked in and install it on top of
-your current Perl. Then you use that to build a version of Perl that
-additionally has the @code{DBD} code linked in, and install that.
+@item You don't have write access to @file{/tmp}
+@cindex write access, tmp
+@cindex temporary file, write access
+@cindex files, @code{tmp}
-On SCO, you must have the following environment variables set:
+If you don't have write access to create a socket file at the default place
+(in @file{/tmp}) or permission to create temporary files in @file{/tmp,}
+you will get an error when running @code{mysql_install_db} or when
+starting or using @code{mysqld}.
+You can specify a different socket and temporary directory as follows:
+
+@tindex TMPDIR environment variable
+@tindex MYSQL_UNIX_PORT environment variable
+@tindex Environment variable, TMPDIR
+@tindex Environment variable, MYSQL_UNIX_PORT
@example
-shell> LD_LIBRARY_PATH=/lib:/usr/lib:/usr/local/lib:/usr/progressive/lib
-or
-shell> LD_LIBRARY_PATH=/usr/lib:/lib:/usr/local/lib:/usr/ccs/lib:/usr/progressive/lib:/usr/skunk/lib
-shell> LIBPATH=/usr/lib:/lib:/usr/local/lib:/usr/ccs/lib:/usr/progressive/lib:/usr/skunk/lib
-shell> MANPATH=scohelp:/usr/man:/usr/local1/man:/usr/local/man:/usr/skunk/man:
+shell> TMPDIR=/some_tmp_dir/
+shell> MYSQL_UNIX_PORT=/some_tmp_dir/mysqld.sock
+shell> export TMPDIR MYSQL_UNIX_PORT
@end example
-First, create a Perl that includes a statically linked @code{DBI} by running
-these commands in the directory where your @code{DBI} distribution is
-located:
+@file{some_tmp_dir} should be the path to some directory for which you
+have write permission. @xref{Environment variables}.
+
+After this you should be able to run @code{mysql_install_db} and start
+the server with these commands:
@example
-shell> perl Makefile.PL -static -config
-shell> make
-shell> make install
-shell> make perl
+shell> scripts/mysql_install_db
+shell> BINDIR/safe_mysqld &
@end example
-Then you must install the new Perl. The output of @code{make perl} will
-indicate the exact @code{make} command you will need to execute to perform
-the installation. On SCO, this is @code{make -f Makefile.aperl inst_perl
-MAP_TARGET=perl}.
+@item @code{mysqld} crashes immediately
-Next, use the just-created Perl to create another Perl that also includes a
-statically-linked @code{DBD::mysql} by running these commands in the
-directory where your @code{Msql-Mysql-modules} distribution is located:
+If you are running RedHat Version 5.0 with a version of @code{glibc} older than
+2.0.7-5, you should make sure you have installed all @code{glibc} patches!
+There is a lot of information about this in the @strong{MySQL} mail
+archives. Links to the mail archives are available online at
+@uref{http://www.mysql.com/documentation/}.
+Also, see @ref{Linux}.
+
+You can also start @code{mysqld} manually using the @code{--skip-grant-tables}
+option and add the privilege information yourself using @code{mysql}:
@example
-shell> perl Makefile.PL -static -config
-shell> make
-shell> make install
-shell> make perl
+shell> BINDIR/safe_mysqld --skip-grant-tables &
+shell> BINDIR/mysql -u root mysql
@end example
-Finally, you should install this new Perl. Again, the output of @code{make
-perl} indicates the command to use.
+From @code{mysql}, manually execute the SQL commands in
+@code{mysql_install_db}. Make sure you run @code{mysqladmin
+flush-privileges} or @code{mysqladmin reload} afterward to tell the server to
+reload the grant tables.
+@end table
-@node Source install system issues, Windows, Perl support, Installing
-@section System-specific Issues
+@node Starting server, Automatic start, mysql_install_db, Post-installation
+@subsection Problems Starting the MySQL Server
+@cindex server, starting problems
+@cindex problems, starting the server
-The following sections indicate some of the issues that have been observed to
-occur on particular systems when installing @strong{MySQL} from a source
-distribution.
+If you are going to use tables that support transactions (BDB, InnoDB),
+you should first create a my.cnf file and set startup options
+for the table types you plan to use. @xref{Table types}.
-@menu
-* Solaris:: Solaris notes
-* Solaris 2.7:: Solaris 2.7 / 2.8 notes
-* Solaris x86:: Solaris x86 notes
-* SunOS:: SunOS 4 notes
-* Linux:: Linux notes (all Linux versions)
-* Alpha-DEC-UNIX:: Alpha-DEC-UNIX notes
-* Alpha-DEC-OSF1:: Alpha-DEC-OSF1 notes
-* SGI-Irix:: SGI-Irix notes
-* FreeBSD:: FreeBSD notes
-* NetBSD:: NetBSD notes
-* OpenBSD:: OpenBSD 2.5 notes
-* BSDI:: BSD/OS notes
-* SCO:: SCO notes
-* SCO Unixware:: SCO Unixware 7.0 notes
-* IBM-AIX:: IBM-AIX notes
-* HP-UX 10.20:: HP-UX 10.20 notes
-* HP-UX 11.x:: HP-UX 11.x notes
-* Mac OS X:: Mac OS X notes
-* BEOS:: BeOS Notes
-@end menu
+Generally, you start the @code{mysqld} server in one of three ways:
+@itemize @bullet
+@item
+By invoking @code{mysql.server}. This script is used primarily at
+system startup and shutdown, and is described more fully in
+@ref{Automatic start}.
-@cindex Solaris installation problems
-@cindex problems, installing on Solaris
-@cindex tar, problems on Solaris
-@cindex errors, directory checksum
-@cindex checksum errors
-@node Solaris, Solaris 2.7, Source install system issues, Source install system issues
-@subsection Solaris Notes
+@item
+By invoking @code{safe_mysqld}, which tries to determine the proper options
+for @code{mysqld} and then runs it with those options. @xref{safe_mysqld, ,
+@code{safe_mysqld}}.
-On Solaris, you may run into trouble even before you get the @strong{MySQL}
-distribution unpacked! Solaris @code{tar} can't handle long file names, so
-you may see an error like this when you unpack @strong{MySQL}:
+@item
+On NT you should install @code{mysqld} as a service as follows:
+@example
+bin\mysqld-nt --install # Install MySQL as a service
+@end example
+You can now start/stop @code{mysqld} as follows:
@example
-x mysql-3.22.12-beta/bench/Results/ATIS-mysql_odbc-NT_4.0-cmp-db2,informix,ms-sql,mysql,oracle,solid,sybase, 0 bytes, 0 tape blocks
-tar: directory checksum error
+NET START mysql
+NET STOP mysql
@end example
-In this case, you must use GNU @code{tar} (@code{gtar}) to unpack the
-distribution. You can find a precompiled copy for Solaris at
-@uref{http://www.mysql.com/Downloads/}.
+Note that in this case you can't use any other options for @code{mysqld}!
-Sun native threads work only on Solaris 2.5 and higher. For Version 2.4 and
-earlier, @strong{MySQL} will automatically use MIT-pthreads.
-@xref{MIT-pthreads}.
+You can remove the service as follows:
+@example
+bin\mysqld-nt --remove # remove MySQL as a service
+@end example
-If you get the following error from configure:
+@item
+By invoking @code{mysqld} directly.
+@end itemize
+
+When the @code{mysqld} daemon starts up, it changes directory to the
+data directory. This is where it expects to write log files and the pid
+(process ID) file, and where it expects to find databases.
+
+The data directory location is hardwired in when the distribution is
+compiled. However, if @code{mysqld} expects to find the data directory
+somewhere other than where it really is on your system, it will not work
+properly. If you have problems with incorrect paths, you can find out
+what options @code{mysqld} allows and what the default path settings are by
+invoking @code{mysqld} with the @code{--help} option. You can override the
+defaults by specifying the correct pathnames as command-line arguments to
+@code{mysqld}. (These options can be used with @code{safe_mysqld} as well.)
+
+Normally you should need to tell @code{mysqld} only the base directory under
+which @strong{MySQL} is installed. You can do this with the @code{--basedir}
+option. You can also use @code{--help} to check the effect of changing path
+options (note that @code{--help} @emph{must} be the final option of the
+@code{mysqld} command). For example:
@example
-checking for restartable system calls... configure: error can not run test
-programs while cross compiling
+shell> EXECDIR/mysqld --basedir=/usr/local --help
@end example
-This means that you have something wrong with your compiler installation!
-In this case you should upgrade your compiler to a newer version. You may
-also be able to solve this problem by inserting the following row into the
-@file{config.cache} file:
+Once you determine the path settings you want, start the server without
+the @code{--help} option.
+
+Whichever method you use to start the server, if it fails to start up
+correctly, check the log file to see if you can find out why. Log files
+are located in the data directory (typically
+@file{/usr/local/mysql/data} for a binary distribution,
+@file{/usr/local/var} for a source distribution,
+@file{\mysql\data\mysql.err} on Windows.) Look in the data directory for
+files with names of the form @file{host_name.err} and
+@file{host_name.log} where @code{host_name} is the name of your server
+host. Then check the last few lines of these files:
@example
-ac_cv_sys_restartable_syscalls=$@{ac_cv_sys_restartable_syscalls='no'@}
+shell> tail host_name.err
+shell> tail host_name.log
@end example
-If you are using Solaris on a SPARC, the recommended compiler is
-@code{gcc} 2.95.2. You can find this at @uref{http://gcc.gnu.org/}.
-Note that @code{egcs} 1.1.1 and @code{gcc} 2.8.1 don't work reliably on
-SPARC!
+If you find something like the following in the log file:
+@example
+000729 14:50:10 bdb: Recovery function for LSN 1 27595 failed
+000729 14:50:10 bdb: warning: ./test/t1.db: No such file or directory
+000729 14:50:10 Can't init databases
+@end example
-The recommended @code{configure} line when using @code{gcc} 2.95.2 is:
+This means that you didn't start @code{mysqld} with @code{--bdb-no-recover}
+and Berkeley DB found something wrong with its log files when it
+tried to recover your databases. To be able to continue, you should
+move away the old Berkeley DB log file from the database directory to
+some other place, where you can later examine these. The log files are
+named @file{log.0000000001}, where the number will increase over time.
+
+If you are running @code{mysqld} with BDB table support and @code{mysqld} core
+dumps at start this could be because of some problems with the BDB
+recover log. In this case you can try starting @code{mysqld} with
+@code{--bdb-no-recover}. If this helps, then you should remove all
+@file{log.*} files from the data directory and try starting @code{mysqld}
+again.
+
+If you get the following error, it means that some other program (or another
+@code{mysqld} server) is already using the TCP/IP port or socket
+@code{mysqld} is trying to use:
@example
-CC=gcc CFLAGS="-O3" \
-CXX=gcc CXXFLAGS="-O3 -felide-constructors -fno-exceptions -fno-rtti" \
-./configure --prefix=/usr/local/mysql --with-low-memory --enable-assembler
+Can't start server: Bind on TCP/IP port: Address already in use
+ or
+Can't start server : Bind on unix socket...
@end example
-If you have a ultra sparc, you can get 4 % more performance by adding
-"-mcpu=v8 -Wa,-xarch=v8plusa" to CFLAGS and CXXFLAGS.
+Use @code{ps} to make sure that you don't have another @code{mysqld} server
+running. If you can't find another server running, you can try to execute
+the command @code{telnet your-host-name tcp-ip-port-number} and press
+@code{RETURN} a couple of times. If you don't get an error message like
+@code{telnet: Unable to connect to remote host: Connection refused},
+something is using the TCP/IP port @code{mysqld} is trying to use.
+See @ref{mysql_install_db} and @ref{Multiple servers}.
-If you have the Sun Workshop (SunPro) 4.2 (or newer) compiler, you can
-run @code{configure} like this:
+If @code{mysqld} is currently running, you can find out what path settings
+it is using by executing this command:
@example
-CC=cc CFLAGS="-Xa -fast -xO4 -native -xstrconst -mt" \
-CXX=CC CXXFLAGS="-noex -xO4 -mt" \
-./configure --prefix=/usr/local/mysql --enable-assembler
+shell> mysqladmin variables
@end example
-You may also have to edit the @code{configure} script to change this line:
+or
@example
-#if !defined(__STDC__) || __STDC__ != 1
+shell> mysqladmin -h 'your-host-name' variables
@end example
-to this:
+If @code{safe_mysqld} starts the server but you can't connect to it,
+you should make sure you have an entry in @file{/etc/hosts} that looks like
+this:
@example
-#if !defined(__STDC__)
+127.0.0.1 localhost
@end example
-If you turn on @code{__STDC__} with the @code{-Xc} option, the Sun compiler
-can't compile with the Solaris @file{pthread.h} header file. This is a Sun
-bug (broken compiler or broken include file).
+This problem occurs only on systems that don't have a working thread
+library and for which @strong{MySQL} must be configured to use MIT-pthreads.
-If @code{mysqld} issues the error message shown below when you run it, you have
-tried to compile @strong{MySQL} with the Sun compiler without enabling the
-multi-thread option (@code{-mt}):
+If you can't get @code{mysqld} to start you can try to make a trace file
+to find the problem. @xref{Making trace files}.
-@example
-libc internal error: _rmutex_unlock: rmutex not held
-@end example
+If you are using InnoDB tables, refer to the InnoDB-specific startup
+options. @xref{InnoDB start}.
-Add @code{-mt} to @code{CFLAGS} and @code{CXXFLAGS} and try again.
+If you are using BDB (Berkeley DB) tables, you should familiarize
+yourself with the different BDB specific startup options. @xref{BDB start}.
-If you get the following error when compiling @strong{MySQL} with @code{gcc},
-it means that your @code{gcc} is not configured for your version of Solaris:
+@node Automatic start, , Starting server, Post-installation
+@subsection Starting and Stopping MySQL Automatically
+@cindex starting, the server automatically
+@cindex stopping, the server
+@cindex server, starting and stopping
+
+The @code{mysql.server} and @code{safe_mysqld} scripts can be used to start
+the server automatically at system startup time. @code{mysql.server} can also
+be used to stop the server.
+
+The @code{mysql.server} script can be used to start or stop the server
+by invoking it with @code{start} or @code{stop} arguments:
@example
-shell> gcc -O3 -g -O2 -DDBUG_OFF -o thr_alarm ...
-./thr_alarm.c: In function `signal_hand':
-./thr_alarm.c:556: too many arguments to function `sigwait'
+shell> mysql.server start
+shell> mysql.server stop
@end example
-The proper thing to do in this case is to get the newest version of
-@code{gcc} and compile it with your current @code{gcc} compiler! At
-least for Solaris 2.5, almost all binary versions of @code{gcc} have
-old, unusable include files that will break all programs that use
-threads (and possibly other programs)!
+@code{mysql.server} can be found in the @file{share/mysql} directory
+under the @strong{MySQL} installation directory or in the @file{support-files}
+directory of the @strong{MySQL} source tree.
-Solaris doesn't provide static versions of all system libraries
-(@code{libpthreads} and @code{libdl}), so you can't compile @strong{MySQL}
-with @code{--static}. If you try to do so, you will get the error:
+Before @code{mysql.server} starts the server, it changes directory to
+the @strong{MySQL} installation directory, then invokes @code{safe_mysqld}.
+You might need to edit @code{mysql.server} if you have a binary distribution
+that you've installed in a non-standard location. Modify it to @code{cd}
+into the proper directory before it runs @code{safe_mysqld}. If you want the
+server to run as some specific user, add an appropriate @code{user} line
+to the @file{/etc/my.cnf} file, as shown later in this section.
-@example
-ld: fatal: library -ldl: not found
+@code{mysql.server stop} brings down the server by sending a signal to it.
+You can take down the server manually by executing @code{mysqladmin shutdown}.
-or
+You might want to add these start and stop commands to the appropriate places
+in your @file{/etc/rc*} files when you start using @strong{MySQL} for
+production applications. Note that if you modify @code{mysql.server}, then
+upgrade @strong{MySQL} sometime, your modified version will be overwritten,
+so you should make a copy of your edited version that you can reinstall.
-undefined reference to `dlopen'
+If your system uses @file{/etc/rc.local} to start external scripts, you
+should append the following to it:
+@example
+/bin/sh -c 'cd /usr/local/mysql ; ./bin/safe_mysqld --user=mysql &'
@end example
-If too many processes try to connect very rapidly to @code{mysqld}, you will
-see this error in the @strong{MySQL} log:
+You can also add options for @code{mysql.server} in a global
+@file{/etc/my.cnf} file. A typical @file{/etc/my.cnf} file might look like
+this:
@example
-Error in accept: Protocol error
+[mysqld]
+datadir=/usr/local/mysql/var
+socket=/tmp/mysqld.sock
+port=3306
+user=mysql
+
+[mysql.server]
+basedir=/usr/local/mysql
@end example
-You might try starting the server with the @code{--set-variable back_log=50}
-option as a workaround for this. @xref{Command-line options}.
+The @code{mysql.server} script understands the following options:
+@code{datadir}, @code{basedir}, and @code{pid-file}.
-If you are linking your own @strong{MySQL} client, you might get the
-following error when you try to execute it:
+The following table shows which option groups each of the startup scripts
+read from option files:
-@example
-ld.so.1: ./my: fatal: libmysqlclient.so.#: open failed: No such file or directory
-@end example
+@multitable @columnfractions .20 .80
+@item @strong{Script} @tab @strong{Option groups}
+@item @code{mysqld} @tab @code{mysqld} and @code{server}
+@item @code{mysql.server} @tab @code{mysql.server}, @code{mysqld}, and @code{server}
+@item @code{safe_mysqld} @tab @code{mysql.server}, @code{mysqld}, and @code{server}
+@end multitable
-The problem can be avoided by one of the following methods:
+@xref{Option files}.
+
+@node Upgrade, Operating System Specific Notes, Post-installation, Installing
+@section Upgrading/Downgrading MySQL
+
+@cindex upgrading
+@cindex downgrading
+
+You can always move the @strong{MySQL} form and data files between
+different versions on the same architecture as long as you have the same
+base version of @strong{MySQL}. The current base version is
+3. If you change the character set when running @strong{MySQL} (which may
+also change the sort order), you must run @code{myisamchk -r -q} on all
+tables. Otherwise your indexes may not be ordered correctly.
+
+If you are afraid of new versions, you can always rename your old
+@code{mysqld} to something like @code{mysqld}-'old-version-number'. If
+your new @code{mysqld} then does something unexpected, you can simply shut it
+down and restart with your old @code{mysqld}!
+
+When you do an upgrade you should also back up your old databases, of course.
+
+If after an upgrade, you experience problems with recompiled client programs,
+like @code{Commands out of sync} or unexpected core dumps, you probably have
+used an old header or library file when compiling your programs. In this
+case you should check the date for your @file{mysql.h} file and
+@file{libmysqlclient.a} library to verify that they are from the new
+@strong{MySQL} distribution. If not, please recompile your programs!
+
+If you get some problems that the new @code{mysqld} server doesn't want to
+start or that you can't connect without a password, check that you don't
+have some old @file{my.cnf} file from your old installation! You can
+check this with: @code{program-name --print-defaults}. If this outputs
+anything other than the program name, you have an active @code{my.cnf}
+file that will affect things!
+
+It is a good idea to rebuild and reinstall the @code{Msql-Mysql-modules}
+distribution whenever you install a new release of @strong{MySQL},
+particularly if you notice symptoms such as all your @code{DBI} scripts
+dumping core after you upgrade @strong{MySQL}.
+
+@menu
+* Upgrading-from-3.22:: Upgrading from a 3.22 version to 3.23
+* Upgrading-from-3.21:: Upgrading from a 3.21 version to 3.22
+* Upgrading-from-3.20:: Upgrading from a 3.20 version to 3.21
+* Upgrading-to-arch:: Upgrading to another architecture
+@end menu
+
+@node Upgrading-from-3.22, Upgrading-from-3.21, Upgrade, Upgrade
+@subsection Upgrading From Version 3.22 to Version 3.23
+
+@cindex compatibility, between MySQL versions
+@cindex upgrading, 3.22 to 3.23
+
+@strong{MySQL} Version 3.23 supports tables of the new @code{MyISAM} type and
+the old @code{ISAM} type. You don't have to convert your old tables to
+use these with Version 3.23. By default, all new tables will be created with
+type @code{MyISAM} (unless you start @code{mysqld} with the
+@code{--default-table-type=isam} option). You can change an @code{ISAM}
+table to a @code{MyISAM} table with @code{ALTER TABLE table_name TYPE=MyISAM}
+or the Perl script @code{mysql_convert_table_format}.
+
+Version 3.22 and 3.21 clients will work without any problems with a Version
+3.23 server.
+
+The following lists tell what you have to watch out for when upgrading to
+Version 3.23:
@itemize @bullet
@item
-Link the client with the following flag (instead of @code{-Lpath}):
-@code{-Wl,r/full-path-to-libmysqlclient.so}.
+All tables that uses the @code{tis620} character set must be fixed
+with @code{myisamchk -r} or @code{REPAIR TABLE}.
@item
-Copy @file{libmysqclient.so} to @file{/usr/lib}.
+If you do a @code{DROP DATABASE} on a symbolic linked database, both the
+link and the original database is deleted. (This didn't happen in 3.22
+because configure didn't detect the @code{readlink} system call).
-@tindex LD_RUN_PATH environment variable
-@tindex Environment variable, LD_RUN_PATH
@item
-Add the pathname of the directory where @file{libmysqlclient.so} is located
-to the @code{LD_RUN_PATH} environment variable before running your client.
-@end itemize
+@code{OPTIMIZE TABLE} now only works for @strong{MyISAM} tables.
+For other table types, you can use @code{ALTER TABLE} to optimize the table.
+During @code{OPTIMIZE TABLE} the table is now locked from other threads.
-When using the @code{--with-libwrap} configure option, you must also
-include the libraries that @file{libwrap.a} needs:
+@item
+The @strong{MySQL} client @code{mysql} is now by default started with the
+option @code{--no-named-commands (-g)}. This option can be disabled with
+@code{--enable-named-commands (-G)}. This may cause incompatibility problems in
+some cases, for example in SQL scripts that use named commands without a
+semicolon! Long format commands still work from the first line.
-@example
---with-libwrap="/opt/NUtcpwrapper-7.6/lib/libwrap.a -lnsl -lsocket
-@end example
+@item
+If you are using the @code{german} character sort order, you must repair
+all your tables with @code{isamchk -r}, as we have made some changes in
+the sort order!
-If you have problems with configure trying to link with @code{-lz} and
-you don't have @code{zlib} installed, you have two options:
+@item
+The default return type of @code{IF} will now depend on both arguments
+and not only the first argument.
+
+@item
+@code{AUTO_INCREMENT} will not work with negative numbers. The reason
+for this is that negative numbers caused problems when wrapping from -1 to 0.
+@code{AUTO_INCREMENT} is now for MyISAM tables handled at a lower level and
+is much faster than before. For MyISAM tables old numbers are also not reused
+anymore, even if you delete some rows from the table.
+
+@item
+@code{CASE}, @code{DELAYED}, @code{ELSE}, @code{END}, @code{FULLTEXT},
+@code{INNER}, @code{RIGHT}, @code{THEN} and @code{WHEN} are now reserved words.
+
+@item
+@code{FLOAT(X)} is now a true floating-point type and not a value with a
+fixed number of decimals.
+
+@item
+When declaring @code{DECIMAL(length,dec)} the length argument no longer
+includes a place for the sign or the decimal point.
+
+@item
+A @code{TIME} string must now be of one of the following formats:
+@code{[[[DAYS] [H]H:]MM:]SS[.fraction]} or
+@code{[[[[[H]H]H]H]MM]SS[.fraction]}
+
+@item
+@code{LIKE} now compares strings using the same character comparison rules
+as @code{'='}. If you require the old behavior, you can compile
+@strong{MySQL} with the @code{CXXFLAGS=-DLIKE_CMP_TOUPPER} flag.
+
+@item
+@code{REGEXP} is now case insensitive for normal (not binary) strings.
+
+@item
+When you check/repair tables you should use @code{CHECK TABLE}
+or @code{myisamchk} for @code{MyISAM} tables (@code{.MYI}) and
+@code{isamchk} for ISAM (@code{.ISM}) tables.
+
+@item
+If you want your @code{mysqldump} files to be compatible between
+@strong{MySQL} Version 3.22 and Version 3.23, you should not use the
+@code{--opt} or @code{--full} option to @code{mysqldump}.
+
+@item
+Check all your calls to @code{DATE_FORMAT()} to make sure there is a
+@samp{%} before each format character. (Later @strong{MySQL} Version 3.22
+did allow this syntax.)
-@itemize @bullet
@item
-If you want to be able to use the compressed communication protocol,
-you need to get and install zlib from ftp.gnu.org.
+@code{mysql_fetch_fields_direct} is now a function (it was a macro) and
+it returns a pointer to a @code{MYSQL_FIELD} instead of a
+@code{MYSQL_FIELD}.
@item
-Configure with @code{--with-named-z-libs=no}.
+@code{mysql_num_fields()} can no longer be used on a @code{MYSQL*} object (it's
+now a function that takes @code{MYSQL_RES*} as an argument. You should now
+use @code{mysql_field_count()} instead.
+
+@item
+In @strong{MySQL} Version 3.22, the output of @code{SELECT DISTINCT ...} was
+almost always sorted. In Version 3.23, you must use @code{GROUP BY} or
+@code{ORDER BY} to obtain sorted output.
+
+@item
+@code{SUM()} now returns @code{NULL}, instead of 0, if there is no matching
+rows. This is according to ANSI SQL.
+
+@item
+An @code{AND} or @code{OR} with @code{NULL} values will now return
+@code{NULL} instead of 0. This mostly affects queries that use @code{NOT}
+on an @code{AND/OR} expression as @code{NOT NULL} = @code{NULL}.
+@code{LPAD()} and @code{RPAD()} will shorten the result string if it's longer
+than the length argument.
@end itemize
-If you are using gcc and have problems with loading @code{UDF} functions
-into @strong{MySQL}, try adding @code{-lgcc} to the link line for the
-@code{UDF} function.
+@node Upgrading-from-3.21, Upgrading-from-3.20, Upgrading-from-3.22, Upgrade
+@subsection Upgrading from Version 3.21 to Version 3.22
-If you would like @strong{MySQL} to start automatically, you can copy
-@file{support-files/mysql.server} to @file{/etc/init.d} and create a
-symbolic link to it named @file{/etc/rc3.d/S99mysql.server}.
+@cindex compatibility, between MySQL versions
+@cindex upgrading, 3.21 to 3.22
-@node Solaris 2.7, Solaris x86, Solaris, Source install system issues
-@subsection Solaris 2.7/2.8 Notes
+Nothing that affects compatibility has changed between Version 3.21 and 3.22.
+The only pitfall is that new tables that are created with @code{DATE} type
+columns will use the new way to store the date. You can't access these new
+fields from an old version of @code{mysqld}.
-You can normally use a Solaris 2.6 binary on Solaris 2.7 and 2.8. Most
-of the Solaris 2.6 issues also apply for Solaris 2.7 and 2.8.
+After installing @strong{MySQL} Version 3.22, you should start the new server
+and then run the @code{mysql_fix_privilege_tables} script. This will add the
+new privileges that you need to use the @code{GRANT} command. If you forget
+this, you will get @code{Access denied} when you try to use @code{ALTER
+TABLE}, @code{CREATE INDEX}, or @code{DROP INDEX}. If your @strong{MySQL} root
+user requires a password, you should give this as an argument to
+@code{mysql_fix_privilege_tables}.
-Note that @strong{MySQL} Version 3.23.4 and above should be able to autodetect
-new versions of Solaris and enable workarounds for the following problems!
+The C API interface to @code{mysql_real_connect()} has changed. If you have
+an old client program that calls this function, you must place a @code{0} for
+the new @code{db} argument (or recode the client to send the @code{db}
+element for faster connections). You must also call @code{mysql_init()}
+before calling @code{mysql_real_connect()}! This change was done to allow
+the new @code{mysql_options()} function to save options in the @code{MYSQL}
+handler structure.
-Solaris 2.7 / 2.8 has some bugs in the include files. You may see the
-following error when you use @code{gcc}:
+The @code{mysqld} variable @code{key_buffer} has changed names to
+@code{key_buffer_size}, but you can still use the old name in your
+startup files.
-@example
-/usr/include/widec.h:42: warning: `getwc' redefined
-/usr/include/wchar.h:326: warning: this is the location of the previous
-definition
-@end example
+@node Upgrading-from-3.20, Upgrading-to-arch, Upgrading-from-3.21, Upgrade
+@subsection Upgrading from Version 3.20 to Version 3.21
+@cindex upgrading, 3.20 to 3.21
-If this occurs, you can do the following to fix the problem:
+If you are running a version older than Version 3.20.28 and want to
+switch to Version 3.21, you need to do the following:
-Copy @code{/usr/include/widec.h} to
-@code{.../lib/gcc-lib/os/gcc-version/include} and change line 41 from:
+You can start the @code{mysqld} Version 3.21 server with @code{safe_mysqld
+--old-protocol} to use it with clients from a Version 3.20 distribution.
+In this case, the new client function @code{mysql_errno()} will not
+return any server error, only @code{CR_UNKNOWN_ERROR} (but it
+works for client errors), and the server uses the old @code{password()}
+checking rather than the new one.
-@example
-#if !defined(lint) && !defined(__lint)
+If you are @strong{NOT} using the @code{--old-protocol} option to
+@code{mysqld}, you will need to make the following changes:
-to
+@itemize @bullet
+@item
+All client code must be recompiled. If you are using ODBC, you must get
+the new @strong{MyODBC} 2.x driver.
+@item
+The script @code{scripts/add_long_password} must be run to convert the
+@code{Password} field in the @code{mysql.user} table to @code{CHAR(16)}.
+@item
+All passwords must be reassigned in the @code{mysql.user} table (to get 62-bit
+rather than 31-bit passwords).
+@item
+The table format hasn't changed, so you don't have to convert any tables.
+@end itemize
-#if !defined(lint) && !defined(__lint) && !defined(getwc)
-@end example
+@strong{MySQL} Version 3.20.28 and above can handle the new @code{user} table
+format without affecting clients. If you have a @strong{MySQL} version earlier
+than Version 3.20.28, passwords will no longer work with it if you convert the
+@code{user} table. So to be safe, you should first upgrade to at least Version
+3.20.28 and then upgrade to Version 3.21.
-Alternatively, you can edit @file{/usr/include/widec.h} directly. Either
-way, after you make the fix, you should remove @file{config.cache} and run
-@code{configure} again!
+@cindex Protocol mismatch
+The new client code works with a 3.20.x @code{mysqld} server, so
+if you experience problems with 3.21.x, you can use the old 3.20.x server
+without having to recompile the clients again.
-If you get errors like this when you run @code{make}, it's because
-@code{configure} didn't detect the @file{curses.h} file (probably
-because of the error in @file{/usr/include/widec.h}):
+If you are not using the @code{--old-protocol} option to @code{mysqld},
+old clients will issue the error message:
@example
-In file included from mysql.cc:50:
-/usr/include/term.h:1060: syntax error before `,'
-/usr/include/term.h:1081: syntax error before `;'
+ERROR: Protocol mismatch. Server Version = 10 Client Version = 9
@end example
-The solution to this is to do one of the following:
+The new Perl @code{DBI}/@code{DBD} interface also supports the old
+@code{mysqlperl} interface. The only change you have to make if you use
+@code{mysqlperl} is to change the arguments to the @code{connect()} function.
+The new arguments are: @code{host}, @code{database}, @code{user},
+@code{password} (the @code{user} and @code{password} arguments have changed
+places).
+@xref{Perl DBI Class, , Perl @code{DBI} Class}.
+
+The following changes may affect queries in old applications:
@itemize @bullet
@item
-Configure with @code{CFLAGS=-DHAVE_CURSES_H CXXFLAGS=-DHAVE_CURSES_H ./configure}.
-
+@code{HAVING} must now be specified before any @code{ORDER BY} clause.
@item
-Edit @file{/usr/include/widec.h} as indicted above and rerun configure.
-
+The parameters to @code{LOCATE()} have been swapped.
@item
-Remove the @code{#define HAVE_TERM} line from @file{config.h} file and
-run @code{make} again.
+There are some new reserved words. The most notable are @code{DATE},
+@code{TIME}, and @code{TIMESTAMP}.
@end itemize
-If you get a problem that your linker can't find @code{-lz} when linking
-your client program, the problem is probably that your @file{libz.so} file is
-installed in @file{/usr/local/lib}. You can fix this by one of the
-following methods:
+@node Upgrading-to-arch, , Upgrading-from-3.20, Upgrade
+@subsection Upgrading to Another Architecture
-@itemize @bullet
-@item
-Add @file{/usr/local/lib} to @code{LD_LIBRARY_PATH}.
+@cindex upgrading, different architecture
-@item
-Add a link to @file{libz.so} from @file{/lib}.
+If you are using @strong{MySQL} Version 3.23, you can copy the @code{.frm},
+@code{.MYI}, and @code{.MYD} files between different architectures that
+support the same floating-point format. (@strong{MySQL} takes care of any
+byte swapping issues.)
-@item
-If you are using Solaris 8, you can install the optional zlib from your
-Solaris 8 CD distribution.
+The @strong{MySQL} @code{ISAM} data and index files (@file{.ISD} and
+@file{*.ISM}, respectively) are architecture-dependent and in some cases
+OS-dependent. If you want to move your applications to another machine
+that has a different architecture or OS than your current machine, you
+should not try to move a database by simply copying the files to the
+other machine. Use @code{mysqldump} instead.
-@item
-Configure @strong{MySQL} with the @code{--with-named-z-libs=no} option.
-@end itemize
+By default, @code{mysqldump} will create a file full of SQL statements.
+You can then transfer the file to the other machine and feed it as input
+to the @code{mysql} client.
-@node Solaris x86, SunOS, Solaris 2.7, Source install system issues
-@subsection Solaris x86 Notes
+Try @code{mysqldump --help} to see what options are available.
+If you are moving the data to a newer version of @strong{MySQL}, you should use
+@code{mysqldump --opt} with the newer version to get a fast, compact dump.
-On Solaris 2.8 on x86, @strong{mysqld} will core dump if you run
-'strip' in.
+The easiest (although not the fastest) way to move a database between two
+machines is to run the following commands on the machine on which the
+database is located:
-If you are using @code{gcc} or @code{egcs} on Solaris x86 and you
-experience problems with core dumps under load, you should use the
-following @code{configure} command:
+@example
+shell> mysqladmin -h 'other hostname' create db_name
+shell> mysqldump --opt db_name \
+ | mysql -h 'other hostname' db_name
+@end example
+
+If you want to copy a database from a remote machine over a slow network,
+you can use:
@example
-CC=gcc CFLAGS="-O3 -fomit-frame-pointer -DHAVE_CURSES_H" \
-CXX=gcc \
-CXXFLAGS="-O3 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti -DHAVE_CURSES_H" \
-./configure --prefix=/usr/local/mysql
+shell> mysqladmin create db_name
+shell> mysqldump -h 'other hostname' --opt --compress db_name \
+ | mysql db_name
@end example
-This will avoid problems with the @code{libstdc++} library and with C++
-exceptions.
+You can also store the result in a file, then transfer the file to the
+target machine and load the file into the database there. For example,
+you can dump a database to a file on the source machine like this:
-If this doesn't help, you should compile a debug version and run
-it with a trace file or under @code{gdb}. @xref{Using gdb on mysqld}.
+@example
+shell> mysqldump --quick db_name | gzip > db_name.contents.gz
+@end example
-@node SunOS, Linux, Solaris x86, Source install system issues
-@subsection SunOS 4 Notes
+(The file created in this example is compressed.) Transfer the file
+containing the database contents to the target machine and run these commands
+there:
-On SunOS 4, MIT-pthreads is needed to compile @strong{MySQL}, which in turn
-means you will need GNU @code{make}.
+@example
+shell> mysqladmin create db_name
+shell> gunzip < db_name.contents.gz | mysql db_name
+@end example
-Some SunOS 4 systems have problems with dynamic libraries and @code{libtool}.
-You can use the following @code{configure} line to avoid this problem:
+@cindex @code{mysqldump}
+@cindex @code{mysqlimport}
+You can also use @code{mysqldump} and @code{mysqlimport} to accomplish
+the database transfer.
+For big tables, this is much faster than simply using @code{mysqldump}.
+In the commands shown below, @code{DUMPDIR} represents the full pathname
+of the directory you use to store the output from @code{mysqldump}.
+
+First, create the directory for the output files and dump the database:
@example
-shell> ./configure --disable-shared --with-mysqld-ldflags=-all-static
+shell> mkdir DUMPDIR
+shell> mysqldump --tab=DUMPDIR db_name
@end example
-When compiling @code{readline}, you may get warnings about duplicate defines.
-These may be ignored.
+Then transfer the files in the @code{DUMPDIR} directory to some corresponding
+directory on the target machine and load the files into @strong{MySQL}
+there:
-When compiling @code{mysqld}, there will be some @code{implicit declaration
-of function} warnings. These may be ignored.
+@example
+shell> mysqladmin create db_name # create database
+shell> cat DUMPDIR/*.sql | mysql db_name # create tables in database
+shell> mysqlimport db_name DUMPDIR/*.txt # load data into tables
+@end example
-@node Linux, Alpha-DEC-UNIX, SunOS, Source install system issues
+Also, don't forget to copy the @code{mysql} database, because that's where the
+grant tables (@code{user}, @code{db}, @code{host}) are stored. You may have
+to run commands as the @strong{MySQL} @code{root} user on the new machine
+until you have the @code{mysql} database in place.
+
+After you import the @code{mysql} database on the new machine, execute
+@code{mysqladmin flush-privileges} so that the server reloads the grant table
+information.
+
+@node Operating System Specific Notes, , Upgrade, Installing
+@section Operating System Specific Notes
+
+@menu
+* Linux::
+* Windows::
+* Solaris::
+* BSD Notes::
+* Mac OS X::
+* Other Unix Notes::
+* OS/2::
+* BeOS::
+* Novell Netware::
+@end menu
+
+@node Linux, Windows, Operating System Specific Notes, Operating System Specific Notes
@subsection Linux Notes (All Linux Versions)
+@menu
+* Binary notes-Linux::
+* Linux-x86:: Linux-x86 notes
+* Linux-SPARC:: Linux-SPARC notes
+* Linux-Alpha:: Linux-Alpha notes
+* Linux-PowerPC::
+* Linux-MIPS::
+* Linux-IA64:: Linux-Ia64 notes
+@end menu
+
The notes below regarding @strong{glibc} apply only to the situation
when you build @strong{MySQL}
yourself. If you are running Linux on an x86 machine, in most cases it is
@@ -10310,10 +9500,10 @@ You should also add /etc/my.cnf:
@example
[safe_mysqld]
-open_files_limit=8192
+open-files-limit=8192
@end example
-The above should allow @strong{MySQL} to create up to 8192 connections/files.
+The above should allow @strong{MySQL} to create up to 8192 connections + files.
The @code{STACK_SIZE} constant in LinuxThreads controls the spacing of thread
stacks in the address space. It needs to be large enough so that there will
@@ -10495,19 +9685,87 @@ The following @code{configure} line should work with @code{fcc/FCC}:
CC=fcc CFLAGS="-O -K fast -K lib -K omitfp -Kpreex -D_GNU_SOURCE -DCONST=const -DNO_STRTOLL_PROTO" CXX=FCC CXXFLAGS="-O -K fast -K lib -K omitfp -K preex --no_exceptions --no_rtti -D_GNU_SOURCE -DCONST=const -Dalloca=__builtin_alloca -DNO_STRTOLL_PROTO '-D_EXTERN_INLINE=static __inline'" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-low-memory
@end example
-@menu
-* Linux-x86:: Linux-x86 notes
-* Linux-RedHat50:: RedHat 5.0 notes
-* Linux-RedHat51:: RedHat 5.1 notes
-* Linux-SPARC:: Linux-SPARC notes
-* Linux-Alpha:: Linux-Alpha notes
-* MKLinux:: MkLinux notes
-* Qube2:: Qube2 Linux notes
-* Linux-Ia64:: Linux-Ia64 notes
-@end menu
+@node Binary notes-Linux, Linux-x86, Linux, Linux
+@subsubsection Linux Notes for Binary Distributions
-@node Linux-x86, Linux-RedHat50, Linux, Linux
-@subsubsection Linux-x86 Notes
+@cindex binary distributions, on Linux
+@cindex Linux, binary distribution
+
+@strong{MySQL} needs at least Linux Version 2.0.
+
+The binary release is linked with @code{-static}, which means you do not
+normally need to worry about which version of the system libraries you
+have. You need not install LinuxThreads, either. A program linked with
+@code{-static} is slightly bigger than a dynamically linked program but
+also slightly faster (3-5%). One problem, however, is that you can't use
+user-definable functions (UDFs) with a statically linked program. If
+you are going to write or use UDF functions (this is something only for
+C or C++ programmers), you must compile @strong{MySQL} yourself, using
+dynamic linking.
+
+If you are using a @code{libc}-based system (instead of a @code{glibc2}
+system), you will probably get some problems with hostname resolving and
+@code{getpwnam()} with the binary release. (This is because @code{glibc}
+unfortunately depends on some external libraries to resolve hostnames
+and @code{getpwent()}, even when compiled with @code{-static}). In this
+case you probably get the following error message when you run
+@code{mysql_install_db}:
+
+@example
+Sorry, the host 'xxxx' could not be looked up
+@end example
+
+or the following error when you try to run @code{mysqld} with the @code{--user}
+option:
+
+@example
+getpwnam: No such file or directory
+@end example
+
+You can solve this problem in one of the following ways:
+
+@itemize @bullet
+@item
+Get a @strong{MySQL} source distribution (an RPM or the @code{tar.gz}
+distribution) and install this instead.
+@item
+Execute @code{mysql_install_db --force}; This will not execute the
+@code{resolveip} test in @code{mysql_install_db}. The downside is that
+you can't use host names in the grant tables; you must use IP numbers
+instead (except for @code{localhost}). If you are using an old @strong{MySQL}
+release that doesn't support @code{--force}, you have to remove the
+@code{resolveip} test in @code{mysql_install} with an editor.
+@item
+Start @code{mysqld} with @code{su} instead of using @code{--user}.
+@end itemize
+
+The Linux-Intel binary and RPM releases of @strong{MySQL} are configured
+for the highest possible speed. We are always trying to use the fastest
+stable compiler available.
+
+@strong{MySQL} Perl support requires Version Perl 5.004_03 or newer.
+
+On some Linux 2.2 versions, you may get the error @code{Resource
+temporarily unavailable} when you do a lot of new connections to a
+@code{mysqld} server over TCP/IP.
+
+The problem is that Linux has a delay between when you close a TCP/IP
+socket and until this is actually freed by the system. As there is only
+room for a finite number of TCP/IP slots, you will get the above error if
+you try to do too many new TCP/IP connections during a small time, like
+when you run the @strong{MySQL} @file{test-connect} benchmark over
+TCP/IP.
+
+We have mailed about this problem a couple of times to different Linux
+mailing lists but have never been able to resolve this properly.
+
+The only known 'fix' to this problem is to use persistent connections in
+your clients or use sockets, if you are running the database server
+and clients on the same machine. We hope that the @code{Linux 2.4}
+kernel will fix this problem in the future.
+
+@node Linux-x86, Linux-SPARC, Binary notes-Linux, Linux
+@subsubsection Linux x86 Notes
@strong{MySQL} requires @code{libc} Version 5.4.12 or newer. It's known to
work with @code{libc} 5.4.46. @code{glibc} Version 2.0.6 and later should
@@ -10551,9 +9809,7 @@ under the @strong{MySQL} installation directory or in the
If @code{mysqld} always core dumps when it starts up, the problem may be that
you have an old @file{/lib/libc.a}. Try renaming it, then remove
@file{sql/mysqld} and do a new @code{make install} and try again. This
-problem has been reported on some Slackware installations. RedHat Version 5.0
-also has a similar problem with some new @code{glibc} versions.
-@xref{Linux-RedHat50}.
+problem has been reported on some Slackware installations.
If you get the following error when linking @code{mysqld},
it means that your @file{libg++.a} is not installed correctly:
@@ -10569,84 +9825,8 @@ You can avoid using @file{libg++.a} by running @code{configure} like this:
shell> CXX=gcc ./configure
@end example
-@node Linux-RedHat50, Linux-RedHat51, Linux-x86, Linux
-@subsubsection RedHat Version 5.0 Notes
-
-If you have any problems with @strong{MySQL} on RedHat, you should start by
-upgrading @code{glibc} to the newest possible version!
-
-If you install all the official RedHat patches (including
-@code{glibc-2.0.7-19} and @code{glibc-devel-2.0.7-19}), both the
-binary and source distributions of @strong{MySQL} should work without
-any trouble!
-
-The updates are needed because there is a bug in @code{glibc} 2.0.5 in how
-@code{pthread_key_create} variables are freed. With @code{glibc} 2.0.5, you
-must use a statically linked @strong{MySQL} binary distribution. If you
-want to compile from source, you must install the corrected version of
-LinuxThreads from @uref{http://www.mysql.com/Downloads/Linux} or upgrade your
-@code{glibc}.
-
-If you have an incorrect version of @code{glibc} or LinuxThreads, the symptom
-is that @code{mysqld} crashes after each connection. For example,
-@code{mysqladmin version} will crash @code{mysqld} when it finishes!
-
-Another symptom of incorrect libraries is that @code{mysqld} crashes at
-once when it starts. On some Linux systems, this can be fixed by configuring
-like this:
-
-@example
-shell> ./configure --with-mysqld-ldflags=-all-static
-@end example
-
-On Redhat Version 5.0, the easy way out is to install the @code{glibc}
-2.0.7-19 RPM and run @code{configure} @strong{without} the
-@code{--with-mysqld-ldflags=-all-static} option.
-
-For the source distribution of @code{glibc} 2.0.7, a patch that is easy to
-apply and is tested with @strong{MySQL} may be found at:
-
-@example
-@uref{http://www.mysql.com/Downloads/Linux/glibc-2.0.7-total-patch.tar.gz}
-@end example
-
-If you experience crashes like these when you build @strong{MySQL}, you can
-always download the newest binary version of @strong{MySQL}. This is
-statically-linked to avoid library conflicts and should work on all Linux
-systems!
-
-@strong{MySQL} comes with an internal debugger that can generate
-trace files with a lot of information that can be used to find and solve a
-wide range of different problems.
-@xref{Debugging server}.
-
-@node Linux-RedHat51, Linux-SPARC, Linux-RedHat50, Linux
-@subsubsection RedHat Version 5.1 notes
-
-The @code{glibc} of RedHat Version 5.1 (@code{glibc} 2.0.7-13) has a memory
-leak, so to get a stable @strong{MySQL} version, you must upgrade @code{glibc},
-to 2.0.7-19, downgrade @code{glibc} or use a binary version of @code{mysqld}.
-If you don't do this, you will encounter memory problems (out of memory, etc.).
-The most common error in this case is:
-
-@example
-Can't create a new thread (errno 11). If you are not out of available
-memory, you can consult the manual for any possible OS dependent bug
-@end example
-
-After you have upgraded to @code{glibc} 2.0.7-19, you can configure
-@strong{MySQL} with dynamic linking (the default), but you @strong{cannot}
-run @code{configure} with the @code{--with-mysqld-ldflags=-all-static} option
-until you have installed @code{glibc} 2.0.7-19 from source!
-
-You can check which version of @code{glibc} you have with @code{rpm -q glibc}.
-
-Another reason for the above error is if you try to use more threads
-than your Linux kernel is configured for. In this case you should raise
-the limits in @file{include/linux/tasks.h} and recompile your kernel!
-
-@node Linux-SPARC, Linux-Alpha, Linux-RedHat51, Linux
-@subsubsection Linux-SPARC Notes
+@node Linux-SPARC, Linux-Alpha, Linux-x86, Linux
+@subsubsection Linux SPARC Notes
In some implementations, @code{readdir_r()} is broken. The symptom is that
@code{SHOW DATABASES} always returns an empty set. This can be fixed by
@@ -10661,8 +9841,8 @@ that is available at @code{vger.rutgers.edu} (a version of Linux that was
never merged with the official 2.0.30). You must also install LinuxThreads
Version 0.6 or newer.
-@node Linux-Alpha, MKLinux, Linux-SPARC, Linux
-@subsubsection Linux-Alpha Notes
+@node Linux-Alpha, Linux-PowerPC, Linux-SPARC, Linux
+@subsubsection Linux Alpha Notes
@strong{MySQL} Version 3.23.12 is the first @strong{MySQL} version that is
tested on Linux-Alpha. If you plan to use @strong{MySQL} on Linux-Alpha,
@@ -10707,21 +9887,21 @@ resulting image will core dump at start. In other words, @strong{DON'T}
use @code{--with-mysqld-ldflags=-all-static} with @code{gcc}.
@end itemize
-@node MKLinux, Qube2, Linux-Alpha, Linux
-@subsubsection MkLinux Notes
+@node Linux-PowerPC, Linux-MIPS, Linux-Alpha, Linux
+@subsubsection Linux PowerPC Notes
@strong{MySQL} should work on MkLinux with the newest @code{glibc} package
(tested with @code{glibc} 2.0.7).
-@node Qube2, Linux-Ia64, MKLinux, Linux
-@subsubsection Qube2 Linux Notes
+@node Linux-MIPS, Linux-IA64, Linux-PowerPC, Linux
+@subsubsection Linux MIPS Notes
To get @strong{MySQL} to work on Qube2, (Linux Mips), you need the
newest @code{glibc} libraries (@code{glibc-2.0.7-29C2} is known to
work). You must also use the @code{egcs} C++ compiler
(@code{egcs-1.0.2-9}, @code{gcc 2.95.2} or newer).
-@node Linux-Ia64, , Qube2, Linux
+@node Linux-IA64, , Linux-MIPS, Linux
@subsubsection Linux IA64 Notes
To get @strong{MySQL} to compile on Linux Ia64, we had to do the following
@@ -10749,251 +9929,854 @@ make_install
and @strong{mysqld} should be ready to run.
-@node Alpha-DEC-UNIX, Alpha-DEC-OSF1, Linux, Source install system issues
-@subsection Alpha-DEC-UNIX Notes (Tru64)
-If you are using egcs 1.1.2 on Digital Unix, you should upgrade to gcc
-2.95.2, as egcs on DEC has some serious bugs!
+@node Windows, Solaris, Linux, Operating System Specific Notes
+@subsection Windows Notes
-When compiling threaded programs under Digital Unix, the documentation
-recommends using the @code{-pthread} option for @code{cc} and @code{cxx} and
-the libraries @code{-lmach -lexc} (in addition to @code{-lpthread}). You
-should run @code{configure} something like this:
+This section describes installation and use of @strong{MySQL} on Windows.
+This information is also provided in the @file{README} file that comes
+with the @strong{MySQL} Windows distribution.
+
+@menu
+* Win95 start:: Starting @strong{MySQL} on Win95 / Win98
+* NT start:: Starting @strong{MySQL} on NT / Win2000
+* Windows running:: Running @strong{MySQL} on Windows
+* Windows and SSH:: Connecting to a remote @strong{MySQL} from Windows with SSH
+* Windows symbolic links:: Splitting data across different disks under Win32
+* Windows compiling:: Compiling MySQL clients on Windows.
+* Windows vs Unix:: @strong{MySQL}-Windows compared to Unix @strong{MySQL}
+@end menu
+
+@node Win95 start, NT start, Windows, Windows
+@subsubsection Starting MySQL on Windows 95 or Windows 98
+
+@strong{MySQL} uses TCP/IP to connect a client to a server. (This will
+allow any machine on your network to connect to your @strong{MySQL}
+server.) Because of this, you must install TCP/IP on your machine before
+starting @strong{MySQL}. You can find TCP/IP on your Windows CD-ROM.
+
+Note that if you are using an old Win95 release (for example OSR2), it's
+likely that you have an old Winsock package! @strong{MySQL} requires
+Winsock 2! You can get the newest Winsock from
+@uref{http://www.microsoft.com/}. Win98 has the new Winsock 2 library, so
+the above doesn't apply for Win98.
+
+To start the @code{mysqld} server, you should start an MS-DOS window and type:
@example
-CC="cc -pthread" CXX="cxx -pthread -O" \
-./configure --with-named-thread-libs="-lpthread -lmach -lexc -lc"
+C:\> C:\mysql\bin\mysqld
@end example
-When compiling @code{mysqld}, you may see a couple of warnings like this:
+This will start @code{mysqld} in the background without a window.
+
+You can kill the @strong{MySQL} server by executing:
@example
-mysqld.cc: In function void handle_connections()':
-mysqld.cc:626: passing long unsigned int *' as argument 3 of
-accept(int,sockadddr *, int *)'
+C:\> C:\mysql\bin\mysqladmin -u root shutdown
@end example
-You can safely ignore these warnings. They occur because @code{configure}
-can detect only errors, not warnings.
+Note that Win95 and Win98 don't support creation of named pipes.
+On Win95 and Win98, you can only use named pipes to connect to a
+remote @strong{MySQL} server running on a Windows NT server host.
+(The @strong{MySQL} server must also support named pipes, of
+course. For example, using @code{mysqld-opt} under NT will not allow
+named pipe connections. You should use either @code{mysqld-nt} or
+@code{mysqld-max-nt}.)
-If you start the server directly from the command line, you may have problems
-with it dying when you log out. (When you log out, your outstanding processes
-receive a @code{SIGHUP} signal.) If so, try starting the server like this:
+If @code{mysqld} doesn't start, please check the
+@file{\mysql\data\mysql.err} file to see if the server wrote any message
+there to indicate the cause of the problem. You can also try to start
+the server with @code{mysqld --standalone}; In this case, you may get
+some useful information on the screen that may help solve the problem.
+
+The last option is to start @code{mysqld} with @code{--standalone
+--debug}. In this case @code{mysqld} will write a log file
+@file{C:\mysqld.trace} that should contain the reason why @code{mysqld}
+doesn't start. @xref{Making trace files}.
+
+@node NT start, Windows running, Win95 start, Windows
+@subsubsection Starting MySQL on Windows NT or Windows 2000
+
+The Win95/Win98 section also applies to @strong{MySQL} on NT/Win2000, with
+the following differences:
+
+To get @strong{MySQL} to work with TCP/IP on NT, you must install
+service pack 3 (or newer)!
+
+Note that everything in the following that applies for NT also applies
+for Win2000!
+
+For NT/Win2000, the server name is @code{mysqld-nt}. Normally you
+should install @strong{MySQL} as a service on NT/Win2000:
@example
-shell> nohup mysqld [options] &
+C:\> C:\mysql\bin\mysqld-nt --install
@end example
-@code{nohup} causes the command following it to ignore any @code{SIGHUP}
-signal sent from the terminal. Alternatively, start the server by running
-@code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you.
-@xref{safe_mysqld, , @code{safe_mysqld}}.
+or
-If you get a problem when compiling mysys/get_opt.c, just remove the
-line #define _NO_PROTO from the start of that file!
+@example
+C:\> C:\mysql\bin\mysqld-max-nt --install
+@end example
-If you are using Compac's CC compiler, the following configure line should
-work:
+(Under Windows NT, you can actually install any of the server binaries
+as a service, but only those having names that end with @code{-nt.exe}
+provide support for named pipes.)
+
+You can start and stop the @strong{MySQL} service with these commands:
@example
-CC="cc -pthread"
-CFLAGS="-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host"
-CXX="cxx -pthread"
-CXXFLAGS="-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host"
-export CC CFLAGS CXX CXXFLAGS
-./configure \
---prefix=/usr/local/mysql \
---with-low-memory \
---enable-large-files \
---enable-shared=yes \
---with-named-thread-libs="-lpthread -lmach -lexc -lc"
-gnumake
+C:\> NET START mysql
+C:\> NET STOP mysql
@end example
-If you get a problem with libtool, when compiling with shared libraries
-as above, when linking @code{mysql}, you should be able to get around
-this by issuing:
+Note that in this case you can't use any other options for @code{mysqld-nt}!
+
+You can also run @code{mysqld-nt} as a stand-alone program on NT if you need
+to start @code{mysqld-nt} with any options! If you start @code{mysqld-nt}
+without options on NT, @code{mysqld-nt} tries to start itself as a service
+with the default service options. If you have stopped @code{mysqld-nt}, you
+have to start it with @code{NET START mysql}.
+
+The service is installed with the name @code{MySQL}. Once installed, it must
+be started using the Services Control Manager (SCM) Utility found in the
+Control Panel, or by using the @code{NET START MySQL} command. If any options
+are desired, they must be specified as ``Startup parameters'' in the SCM utility
+before you start the @strong{MySQL} service. Once running, @code{mysqld-nt}
+can be stopped using @code{mysqladmin}, or from the SCM utility or by using
+the command @code{NET STOP MySQL}. If you use SCM to stop @code{mysqld-nt},
+there is a strange message from SCM about @code{mysqld shutdown normally}.
+When run as a service, @code{mysqld-nt} has no access to a console and so no
+messages can be seen.
+
+On NT you can get the following service error messages:
+
+@multitable @columnfractions .3 .7
+@item Permission Denied @tab Means that it cannot find @code{mysqld-nt.exe}.
+@item Cannot Register @tab Means that the path is incorrect.
+@item Failed to install service. @tab Means that the service is already installed or that the Service Control Manager is in bad state.
+@end multitable
+
+If you have problems installing @code{mysqld-nt} as a service, try starting
+it with the full path:
@example
-cd mysql
-/bin/sh ../libtool --mode=link cxx -pthread -O3 -DDBUG_OFF \
--O4 -ansi_alias -ansi_args -fast -inline speed \
--speculate all \ -arch host -DUNDEF_HAVE_GETHOSTBYNAME_R \
--o mysql mysql.o readline.o sql_string.o completion_hash.o \
-../readline/libreadline.a -lcurses \
-../libmysql/.libs/libmysqlclient.so -lm
-cd ..
-gnumake
-gnumake install
-scripts/mysql_install_db
+C:\> C:\mysql\bin\mysqld-nt --install
@end example
-@node Alpha-DEC-OSF1, SGI-Irix, Alpha-DEC-UNIX, Source install system issues
-@subsection Alpha-DEC-OSF1 Notes
+If this doesn't work, you can get @code{mysqld-nt} to start properly by fixing
+the path in the registry!
-If you have problems compiling and have DEC @code{CC} and @code{gcc}
-installed, try running @code{configure} like this:
+If you don't want to start @code{mysqld-nt} as a service, you can start it as
+follows:
@example
-CC=cc CFLAGS=-O CXX=gcc CXXFLAGS=-O3 \
-./configure --prefix=/usr/local/mysql
+C:\> C:\mysql\bin\mysqld-nt --standalone
@end example
-If you get problems with the @file{c_asm.h} file, you can create and use
-a 'dummy' @file{c_asm.h} file with:
+or
@example
-touch include/c_asm.h
-CC=gcc CFLAGS=-I./include \
-CXX=gcc CXXFLAGS=-O3 \
-./configure --prefix=/usr/local/mysql
+C:\> C:\mysql\bin\mysqld --standalone --debug
@end example
-Note that the following problems with the @code{ld} program can be fixed
-by downloading the latest DEC (Compaq) patch kit from:
-@uref{http://ftp.support.compaq.com/public/unix/}.
+The last version gives you a debug trace in @file{C:\mysqld.trace}.
+@xref{Making trace files}.
-On OSF1 V4.0D and compiler "DEC C V5.6-071 on Digital Unix V4.0 (Rev. 878)"
-the compiler had some strange behavior (undefined @code{asm} symbols).
-@code{/bin/ld} also appears to be broken (problems with @code{_exit
-undefined} errors occuring while linking @code{mysqld}). On this system, we
-have managed to compile @strong{MySQL} with the following @code{configure}
-line, after replacing @code{/bin/ld} with the version from OSF 4.0C:
+@node Windows running, Windows and SSH, NT start, Windows
+@subsubsection Running MySQL on Windows
+
+@cindex TCP/IP
+@cindex named pipes
+
+@strong{MySQL} supports TCP/IP on all Windows platforms and named pipes on NT.
+The default is to use named pipes for local connections on NT and TCP/IP for
+all other cases if the client has TCP/IP installed. The host name specifies
+which protocol is used:
+
+@multitable @columnfractions .3 .7
+@strong{Host name} @tab @strong{Protocol}
+@item NULL (none) @tab On NT, try named pipes first; if that doesn't work, use TCP/IP. On Win95/Win98, TCP/IP is used.
+@item . @tab Named pipes
+@item localhost @tab TCP/IP to current host
+@item hostname @tab TCP/IP
+@end multitable
+
+You can force a @strong{MySQL} client to use named pipes by specifying the
+@code{--pipe} option or by specifying @code{.} as the host name. Use the
+@code{--socket} option to specify the name of the pipe.
+
+You can test whether or not @strong{MySQL} is working by executing the
+following commands:
@example
-CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql
+C:\> C:\mysql\bin\mysqlshow
+C:\> C:\mysql\bin\mysqlshow -u root mysql
+C:\> C:\mysql\bin\mysqladmin version status proc
+C:\> C:\mysql\bin\mysql test
@end example
-With the Digital compiler "C++ V6.1-029", the following should work:
+If @code{mysqld} is slow to answer to connections on Win95/Win98, there is
+probably a problem with your DNS. In this case, start @code{mysqld} with
+@code{--skip-name-resolve} and use only @code{localhost} and IP numbers in
+the @strong{MySQL} grant tables. You can also avoid DNS when connecting to a
+@code{mysqld-nt} @strong{MySQL} server running on NT by using the
+@code{--pipe} argument to specify use of named pipes. This works for most
+@strong{MySQL} clients.
+
+There are two versions of the @strong{MySQL} command-line tool:
+@multitable @columnfractions .25 .75
+@item @code{mysql} @tab Compiled on native Windows, which offers very limited text editing capabilities.
+@item @code{mysqlc} @tab Compiled with the Cygnus GNU compiler and libraries, which offers @code{readline} editing.
+@end multitable
+
+If you want to use @code{mysqlc.exe}, you must copy
+@file{C:\mysql\lib\cygwinb19.dll} to your Windows system directory
+(@file{\windows\system} or similar place).
+
+The default privileges on Windows give all local users full privileges
+to all databases without specifying a password. To make @strong{MySQL}
+more secure, you should set a password for all users and remove the row in
+the @code{mysql.user} table that has @code{Host='localhost'} and
+@code{User=''}.
+
+You should also add a password for the @code{root} user. The following
+example starts by removing the anonymous user that can be used by anyone
+to access the @code{test} database, then sets a @code{root} user password:
@example
-CC=cc -pthread
-CFLAGS=-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host
-CXX=cxx -pthread
-CXXFLAGS=-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host -noexceptions -nortti
-export CC CFLAGS CXX CXXFLAGS
-./configure --prefix=/usr/mysql/mysql --with-mysqld-ldflags=-all-static --disable-shared --with-named-thread-libs="-lmach -lexc -lc"
+C:\> C:\mysql\bin\mysql mysql
+mysql> DELETE FROM user WHERE Host='localhost' AND User='';
+mysql> QUIT
+C:\> C:\mysql\bin\mysqladmin reload
+C:\> C:\mysql\bin\mysqladmin -u root password your_password
@end example
-In some versions of OSF1, the @code{alloca()} function is broken. Fix
-this by removing the line in @file{config.h} that defines @code{'HAVE_ALLOCA'}.
+After you've set the password, if you want to take down the @code{mysqld}
+server, you can do so using this command:
-The @code{alloca()} function also may have an incorrect prototype in
-@code{/usr/include/alloca.h}. This warning resulting from this can be ignored.
+@example
+C:\> mysqladmin --user=root --password=your_password shutdown
+@end example
-@code{configure} will use the following thread libraries automatically:
-@code{--with-named-thread-libs="-lpthread -lmach -lexc -lc"}.
+If you are using the old shareware version of @strong{MySQL} Version
+3.21 under Windows, the above command will fail with an error:
+@code{parse error near 'SET OPTION password'}. The fix is in to upgrade
+to the current @strong{MySQL} version, which is freely available.
-When using @code{gcc}, you can also try running @code{configure} like this:
+With the current @strong{MySQL} versions you can easily add new users
+and change privileges with @code{GRANT} and @code{REVOKE} commands.
+@xref{GRANT}.
+
+@c FIX this is ugly, real ugly.
+@node Windows and SSH, Windows symbolic links, Windows running, Windows
+@subsubsection Connecting to a Remote MySQL from Windows with SSH
+
+@cindex SSH
+@cindex connecting, remotely with SSH
+
+Here is a note about how to connect to get a secure connection to remote
+@strong{MySQL} server with SSH (by David Carlson @email{dcarlson@@mplcomm.com}):
+
+@itemize @bullet
+@item
+Install an SSH client on your Windows machine. As a user, the best non-free
+one I've found is from @code{SecureCRT} from @uref{http://www.vandyke.com/}.
+Another option is @code{f-secure} from @uref{http://www.f-secure.com/}. You
+can also find some free ones on @strong{Google} at
+@uref{http://directory.google.com/Top/Computers/Security/Products_and_Tools/Cryptography/SSH/Clients/Windows/}.
+
+@item
+Start your Windows SSH client.
+Set @code{Host_Name = yourmysqlserver_URL_or_IP}.
+Set @code{userid=your_userid} to log in to your server (probably not the same
+as your @strong{MySQL} login/password.
+
+@item
+Set up port forwarding. Either do a remote forward (Set @code{local_port: 3306}, @code{remote_host: yourmysqlservername_or_ip}, @code{remote_port: 3306} )
+or a local forward (Set @code{port: 3306}, @code{host: localhost}, @code{remote port: 3306}).
+
+@item
+Save everything, otherwise you'll have to redo it the next time.
+
+@item
+Log in to your server with SSH session you just created.
+
+@item
+On your Windows machine, start some ODBC application (such as Access).
+
+@item
+Create a new file in Windows and link to @strong{MySQL} using the ODBC
+driver the same way you normally do, EXCEPT type in @code{localhost}
+for the @strong{MySQL} host server --- not @code{yourmysqlservername}.
+@end itemize
+
+You should now have an ODBC connection to @strong{MySQL}, encrypted using SSH.
+
+@node Windows symbolic links, Windows compiling, Windows and SSH, Windows
+@subsubsection Splitting Data Across Different Disks on Windows
+
+@cindex symbolic links
+@cindex using multiple disks to start data
+@cindex disks, splitting data across
+
+Beginning with @strong{MySQL} Version 3.23.16, the @code{mysqld-max}
+and @code{mysql-max-nt} servers in the @strong{MySQL} distribution are
+compiled with the @code{-DUSE_SYMDIR} option. This allows you to put a
+database on different disk by adding a symbolic link to it
+(in a manner similar to the way that symbolic links work on Unix).
+
+On Windows, you make a symbolic link to a database by creating a file
+that contains the path to the destination directory and saving this in
+the @file{mysql_data} directory under the filename @file{database.sym}.
+Note that the symbolic link will be used only if the directory
+@file{mysql_data_dir\database} doesn't exist.
+
+For example, if the @strong{MySQL} data directory is @file{C:\mysql\data}
+and you want to have database @code{foo} located at @file{D:\data\foo}, you
+should create the file @file{C:\mysql\data\foo.sym} that contains the
+text @code{D:\data\foo\}. After that, all tables created in the database
+@code{foo} will be created in @file{D:\data\foo}.
+
+Note that because of the speed penalty you get when opening every table,
+we have not enabled this by default even if you have compiled
+@strong{MySQL} with support for this. To enable symlinks you should put
+in your @code{my.cnf} or @code{my.ini} file the following entry:
@example
-shell> CFLAGS=-D_PTHREAD_USE_D4 CXX=gcc CXXFLAGS=-O3 ./configure ....
+[mysqld]
+use-symbolic-links
@end example
-If you have problems with signals (@strong{MySQL} dies unexpectedly
-under high load), you may have found an OS bug with threads and
-signals. In this case you can tell @strong{MySQL} not to use signals by
-configuring with:
+In @strong{MySQL} 4.0 we will enable symlinks by default. Then you
+should instead use the @code{skip-symlink} option if you want to
+disable this.
+
+@node Windows compiling, Windows vs Unix, Windows symbolic links, Windows
+@subsubsection Compiling MySQL Clients on Windows
+
+@cindex compiling, on Windows
+@cindex Windows, compiling on
+
+In your source files, you should include @file{windows.h} before you include
+@file{mysql.h}:
@example
-shell> CFLAGS=-DDONT_USE_THR_ALARM \
- CXXFLAGS=-DDONT_USE_THR_ALARM \
- ./configure ...
+#if defined(_WIN32) || defined(_WIN64)
+#include <windows.h>
+#endif
+#include <mysql.h>
@end example
-This doesn't affect the performance of @strong{MySQL}, but has the side
-effect that you can't kill clients that are ``sleeping'' on a connection with
-@code{mysqladmin kill} or @code{mysqladmin shutdown}. Instead, the client
-will die when it issues its next command.
+You can either link your code with the dynamic @file{libmysql.lib} library,
+which is just a wrapper to load in @file{libmysql.dll} on demand, or link
+with the static @file{mysqlclient.lib} library.
-With @code{gcc} 2.95.2, you will probably run into the following compile error:
+Note that as the mysqlclient libraries are compiled as threaded libraries,
+you should also compile your code to be multi-threaded!
+
+@node Windows vs Unix, , Windows compiling, Windows
+@subsubsection MySQL-Windows Compared to Unix MySQL
+
+@cindex Windows, versus Unix
+@cindex operating systems, Windows versus Unix
+
+@strong{MySQL}-Windows has by now proven itself to be very stable. This version
+of @strong{MySQL} has the same features as the corresponding Unix version
+with the following exceptions:
+
+@table @strong
+@item Win95 and threads
+Win95 leaks about 200 bytes of main memory for each thread creation.
+Each connection in @strong{MySQL} creates a new thread, so you shouldn't
+run @code{mysqld} for an extended time on Win95 if your server handles
+many connections! WinNT and Win98 don't suffer from this bug.
+
+@item Concurrent reads
+@strong{MySQL} depends on the @code{pread()} and @code{pwrite()} calls to be
+able to mix @code{INSERT} and @code{SELECT}. Currently we use mutexes
+to emulate @code{pread()}/@code{pwrite()}. We will, in the long run,
+replace the file level interface with a virtual interface so that we can
+use the @code{readfile()}/@code{writefile()} interface on NT to get more speed.
+The current implementation limits the number of open files @strong{MySQL}
+can use to 1024, which means that you will not be able to run as many
+concurrent threads on NT as on Unix.
+
+@item Blocking read
+@strong{MySQL} uses a blocking read for each connection.
+This means that:
+
+@itemize @bullet
+@item
+A connection will not be disconnected automatically after 8 hours, as happens
+with the Unix version of @strong{MySQL}.
+
+@item
+If a connection hangs, it's impossible to break it without killing
+@strong{MySQL}.
+
+@item
+@code{mysqladmin kill} will not work on a sleeping connection.
+
+@item
+@code{mysqladmin shutdown} can't abort as long as there are sleeping
+connections.
+@end itemize
+
+We plan to fix this problem when our Windows developers have figured out a
+nice workaround.
+
+@item UDF functions
+For the moment, @strong{MySQL}-Windows does not support user-definable
+functions.
+
+@item @code{DROP DATABASE}
+You can't drop a database that is in use by some thread.
+
+@item Killing @strong{MySQL} from the task manager
+You can't kill @strong{MySQL} from the task manager or with the shutdown
+utility in Win95. You must take it down with @code{mysqladmin shutdown}.
+
+@item Case-insensitive names
+Filenames are case insensitive on Windows, so database and table names
+are also case insensitive in @strong{MySQL} for Windows. The only
+restriction is that database and table names must be specified using the same
+case throughout a given statement. @xref{Name case sensitivity}.
+
+@item The @samp{\} directory character
+Pathname components in Win95 are separated by the @samp{\} character, which is
+also the escape character in @strong{MySQL}. If you are using @code{LOAD
+DATA INFILE} or @code{SELECT ... INTO OUTFILE}, you must double the @samp{\}
+character:
@example
-sql_acl.cc:1456: Internal compiler error in `scan_region', at except.c:2566
-Please submit a full bug report.
+mysql> LOAD DATA INFILE "C:\\tmp\\skr.txt" INTO TABLE skr;
+mysql> SELECT * INTO OUTFILE 'C:\\tmp\\skr.txt' FROM skr;
@end example
-To fix this you should change to the @code{sql} directory and do a ``cut
-and paste'' of the last @code{gcc} line, but change @code{-O3} to
-@code{-O0} (or add @code{-O0} immediately after @code{gcc} if you don't
-have any @code{-O} option on your compile line.) After this is done you
-can just change back to the top-level directly and run @code{make}
-again.
+Alternatively, use Unix style filenames with @samp{/} characters:
-@node SGI-Irix, FreeBSD, Alpha-DEC-OSF1, Source install system issues
-@subsection SGI-Irix Notes
+@example
+mysql> LOAD DATA INFILE "C:/tmp/skr.txt" INTO TABLE skr;
+mysql> SELECT * INTO OUTFILE 'C:/tmp/skr.txt' FROM skr;
+@end example
-If you are using Irix Version 6.5.3 or newer @code{mysqld} will only be able to
-create threads if you run it as a user with @code{CAP_SCHED_MGT}
-privileges (like @code{root}) or give the @code{mysqld} server this privilege
-with the following shell command:
+@item @code{Can't open named pipe} error
+If you use a @strong{MySQL} 3.22 version on NT with the newest mysql-clients
+you will get the following error:
@example
-shell> chcap "CAP_SCHED_MGT+epi" /opt/mysql/libexec/mysqld
+error 2017: can't open named pipe to host: . pipe...
@end example
-You may have to undefine some things in @file{config.h} after running
-@code{configure} and before compiling.
+@tindex .my.cnf file
+This is because the release version of @strong{MySQL} uses named pipes on NT
+by default. You can avoid this error by using the @code{--host=localhost}
+option to the new @strong{MySQL} clients or create an option file
+@file{C:\my.cnf} that contains the following information:
-In some Irix implementations, the @code{alloca()} function is broken. If the
-@code{mysqld} server dies on some @code{SELECT} statements, remove the lines
-from @file{config.h} that define @code{HAVE_ALLOC} and @code{HAVE_ALLOCA_H}.
-If @code{mysqladmin create} doesn't work, remove the line from @file{config.h}
-that defines @code{HAVE_READDIR_R}. You may have to remove the
-@code{HAVE_TERM_H} line as well.
+@example
+[client]
+host = localhost
+@end example
-SGI recommends that you install all of the patches on this page as a set:
-http://support.sgi.com/surfzone/patches/patchset/6.2_indigo.rps.html
+@item @code{Access denied for user} error
+If you get the error @code{Access denied for user: 'some-user@@unknown'
+to database 'mysql'} when accessing a @strong{MySQL} server on the same
+machine, this means that @strong{MySQL} can't resolve your host name
+properly.
-At the very minimum, you should install the latest kernel rollup, the
-latest @code{rld} rollup, and the latest @code{libc} rollup.
+To fix this, you should create a file @file{\windows\hosts} with the
+following information:
-You definitely need all the POSIX patches on this page, for pthreads support:
+@example
+127.0.0.1 localhost
+@end example
-@uref{http://support.sgi.com/surfzone/patches/patchset/6.2_posix.rps.html}
+@item @code{ALTER TABLE}
+While you are executing an @code{ALTER TABLE} statement, the table is locked
+from usage by other threads. This has to do with the fact that on Windows,
+you can't delete a file that is in use by another threads. (In the future,
+we may find some way to work around this problem.)
-If you get the something like the following error when compiling
-@file{mysql.cc}:
+@item @code{DROP TABLE} on a table that is in use by a @code{MERGE} table will not work
+The @code{MERGE} handler does its table mapping hidden from @strong{MySQL}.
+Because Windows doesn't allow you to drop files that are open, you first
+must flush all @code{MERGE} tables (with @code{FLUSH TABLES}) or drop the
+@code{MERGE} table before dropping the table. We will fix this at the same
+time we introduce @code{VIEW}s.
+@end table
+
+Here are some open issues for anyone who might want to help us with the Windows
+release:
+
+@cindex Windows, open issues
+
+@itemize @bullet
+@item
+Make a single-user @code{MYSQL.DLL} server. This should include everything in
+a standard @strong{MySQL} server, except thread creation. This will make
+@strong{MySQL} much easier to use in applications that don't need a true
+client/server and don't need to access the server from other hosts.
+
+@item
+Add some nice start and shutdown icons to the @strong{MySQL} installation.
+
+@item
+Create a tool to manage registry entries for the @strong{MySQL} startup
+options. The registry entry reading is already coded into @file{mysqld.cc},
+but it should be recoded to be more parameter oriented. The tool should
+also be able to update the @file{C:\my.cnf} option file if the user prefers
+to use that instead of the registry.
+
+@item
+When registering @code{mysqld} as a service with @code{--install} (on NT)
+it would be nice if you could also add default options on the command line.
+For the moment, the workaround is to list the parameters in the
+@file{C:\my.cnf} file instead.
+
+@item
+It would be real nice to be able to kill @code{mysqld} from the task manager.
+For the moment, you must use @code{mysqladmin shutdown}.
+
+@item
+Port @code{readline} to Windows for use in the @code{mysql} command line tool.
+
+@item
+GUI versions of the standard @strong{MySQL} clients (@code{mysql},
+@code{mysqlshow}, @code{mysqladmin}, and @code{mysqldump}) would be nice.
+
+@item
+It would be nice if the socket read and write functions in @file{net.c} were
+interruptible. This would make it possible to kill open threads with
+@code{mysqladmin kill} on Windows.
+
+@item
+@code{mysqld} always starts in the "C" locale and not in the default locale.
+We would like to have @code{mysqld} use the current locale for the sort order.
+
+@item
+Implement UDF functions with @code{.DLL}s.
+
+@item
+Add macros to use the faster thread-safe increment/decrement methods
+provided by Windows.
+
+@end itemize
+
+Other Windows-specific issues are described in the @file{README} file that
+comes with the @strong{MySQL}-Windows distribution.
+
+
+@node Solaris, BSD Notes, Windows, Operating System Specific Notes
+@subsection Solaris Notes
+
+@cindex Solaris installation problems
+@cindex problems, installing on Solaris
+@cindex tar, problems on Solaris
+@cindex errors, directory checksum
+@cindex checksum errors
+
+On Solaris, you may run into trouble even before you get the @strong{MySQL}
+distribution unpacked! Solaris @code{tar} can't handle long file names, so
+you may see an error like this when you unpack @strong{MySQL}:
@example
-"/usr/include/curses.h", line 82: error(1084): invalid combination of type
+x mysql-3.22.12-beta/bench/Results/ATIS-mysql_odbc-NT_4.0-cmp-db2,informix,ms-sql,mysql,oracle,solid,sybase, 0 bytes, 0 tape blocks
+tar: directory checksum error
@end example
-Type the following in the top-level directory of your @strong{MySQL} source
-tree:
+In this case, you must use GNU @code{tar} (@code{gtar}) to unpack the
+distribution. You can find a precompiled copy for Solaris at
+@uref{http://www.mysql.com/Downloads/}.
+
+Sun native threads work only on Solaris 2.5 and higher. For Version 2.4 and
+earlier, @strong{MySQL} will automatically use MIT-pthreads.
+@xref{MIT-pthreads}.
+
+If you get the following error from configure:
@example
-shell> extra/replace bool curses_bool < /usr/include/curses.h > include/curses.h
-shell> make
+checking for restartable system calls... configure: error can not run test
+programs while cross compiling
@end example
-There have also been reports of scheduling problems. If only one thread is
-running, things go slow. Avoid this by starting another client. This may
-lead to a 2-to-10-fold increase in execution speed thereafter for the other
-thread. This is a poorly understood problem with Irix threads; you may have
-to improvise to find solutions until this can be fixed.
+This means that you have something wrong with your compiler installation!
+In this case you should upgrade your compiler to a newer version. You may
+also be able to solve this problem by inserting the following row into the
+@file{config.cache} file:
-If you are compiling with @code{gcc}, you can use the following
-@code{configure} command:
+@example
+ac_cv_sys_restartable_syscalls=$@{ac_cv_sys_restartable_syscalls='no'@}
+@end example
+
+If you are using Solaris on a SPARC, the recommended compiler is
+@code{gcc} 2.95.2. You can find this at @uref{http://gcc.gnu.org/}.
+Note that @code{egcs} 1.1.1 and @code{gcc} 2.8.1 don't work reliably on
+SPARC!
+
+The recommended @code{configure} line when using @code{gcc} 2.95.2 is:
@example
-CC=gcc CXX=gcc CXXFLAGS=-O3 \
-./configure --prefix=/usr/local/mysql --with-thread-safe-client --with-named-thread-libs=-lpthread
+CC=gcc CFLAGS="-O3" \
+CXX=gcc CXXFLAGS="-O3 -felide-constructors -fno-exceptions -fno-rtti" \
+./configure --prefix=/usr/local/mysql --with-low-memory --enable-assembler
@end example
-On Irix 6.5.11 with native Irix C and C++ compilers ver. 7.3.1.2, the
-following is reported to work
+If you have a ultra sparc, you can get 4 % more performance by adding
+"-mcpu=v8 -Wa,-xarch=v8plusa" to CFLAGS and CXXFLAGS.
+
+If you have the Sun Workshop (SunPro) 4.2 (or newer) compiler, you can
+run @code{configure} like this:
@example
-CC=cc CXX=CC CFLAGS='-O3 -n32 -TARG:platform=IP22 -I/usr/local/include \
--L/usr/local/lib' CXXFLAGS='-O3 -n32 -TARG:platform=IP22 \
--I/usr/local/include -L/usr/local/lib' ./configure --prefix=/usr/local/mysql \
---with-berkeley-db --with-innodb \
---with-libwrap=/usr/local --with-named-curses-libs=/usr/local/lib/libncurses.a
+CC=cc CFLAGS="-Xa -fast -xO4 -native -xstrconst -mt" \
+CXX=CC CXXFLAGS="-noex -xO4 -mt" \
+./configure --prefix=/usr/local/mysql --enable-assembler
+@end example
+
+You may also have to edit the @code{configure} script to change this line:
+
+@example
+#if !defined(__STDC__) || __STDC__ != 1
+@end example
+
+to this:
+
+@example
+#if !defined(__STDC__)
+@end example
+
+If you turn on @code{__STDC__} with the @code{-Xc} option, the Sun compiler
+can't compile with the Solaris @file{pthread.h} header file. This is a Sun
+bug (broken compiler or broken include file).
+
+If @code{mysqld} issues the error message shown below when you run it, you have
+tried to compile @strong{MySQL} with the Sun compiler without enabling the
+multi-thread option (@code{-mt}):
+
+@example
+libc internal error: _rmutex_unlock: rmutex not held
+@end example
+
+Add @code{-mt} to @code{CFLAGS} and @code{CXXFLAGS} and try again.
+
+If you get the following error when compiling @strong{MySQL} with @code{gcc},
+it means that your @code{gcc} is not configured for your version of Solaris:
+
+@example
+shell> gcc -O3 -g -O2 -DDBUG_OFF -o thr_alarm ...
+./thr_alarm.c: In function `signal_hand':
+./thr_alarm.c:556: too many arguments to function `sigwait'
@end example
-@node FreeBSD, NetBSD, SGI-Irix, Source install system issues
-@subsection FreeBSD Notes
+The proper thing to do in this case is to get the newest version of
+@code{gcc} and compile it with your current @code{gcc} compiler! At
+least for Solaris 2.5, almost all binary versions of @code{gcc} have
+old, unusable include files that will break all programs that use
+threads (and possibly other programs)!
+
+Solaris doesn't provide static versions of all system libraries
+(@code{libpthreads} and @code{libdl}), so you can't compile @strong{MySQL}
+with @code{--static}. If you try to do so, you will get the error:
+
+@example
+ld: fatal: library -ldl: not found
+
+or
+
+undefined reference to `dlopen'
+@end example
+
+If too many processes try to connect very rapidly to @code{mysqld}, you will
+see this error in the @strong{MySQL} log:
+
+@example
+Error in accept: Protocol error
+@end example
+
+You might try starting the server with the @code{--set-variable back_log=50}
+option as a workaround for this. @xref{Command-line options}.
+
+If you are linking your own @strong{MySQL} client, you might get the
+following error when you try to execute it:
+
+@example
+ld.so.1: ./my: fatal: libmysqlclient.so.#: open failed: No such file or directory
+@end example
+
+The problem can be avoided by one of the following methods:
+
+@itemize @bullet
+@item
+Link the client with the following flag (instead of @code{-Lpath}):
+@code{-Wl,r/full-path-to-libmysqlclient.so}.
+
+@item
+Copy @file{libmysqclient.so} to @file{/usr/lib}.
+
+@tindex LD_RUN_PATH environment variable
+@tindex Environment variable, LD_RUN_PATH
+@item
+Add the pathname of the directory where @file{libmysqlclient.so} is located
+to the @code{LD_RUN_PATH} environment variable before running your client.
+@end itemize
+
+When using the @code{--with-libwrap} configure option, you must also
+include the libraries that @file{libwrap.a} needs:
+
+@example
+--with-libwrap="/opt/NUtcpwrapper-7.6/lib/libwrap.a -lnsl -lsocket
+@end example
+
+If you have problems with configure trying to link with @code{-lz} and
+you don't have @code{zlib} installed, you have two options:
+
+@itemize @bullet
+@item
+If you want to be able to use the compressed communication protocol,
+you need to get and install zlib from ftp.gnu.org.
+
+@item
+Configure with @code{--with-named-z-libs=no}.
+@end itemize
+
+If you are using gcc and have problems with loading @code{UDF} functions
+into @strong{MySQL}, try adding @code{-lgcc} to the link line for the
+@code{UDF} function.
+
+If you would like @strong{MySQL} to start automatically, you can copy
+@file{support-files/mysql.server} to @file{/etc/init.d} and create a
+symbolic link to it named @file{/etc/rc3.d/S99mysql.server}.
+
+
+@menu
+* Solaris 2.7::
+* Solaris x86::
+@end menu
+
+@node Solaris 2.7, Solaris x86, Solaris, Solaris
+@subsubsection Solaris 2.7/2.8 Notes
+
+You can normally use a Solaris 2.6 binary on Solaris 2.7 and 2.8. Most
+of the Solaris 2.6 issues also apply for Solaris 2.7 and 2.8.
+
+Note that @strong{MySQL} Version 3.23.4 and above should be able to autodetect
+new versions of Solaris and enable workarounds for the following problems!
+
+Solaris 2.7 / 2.8 has some bugs in the include files. You may see the
+following error when you use @code{gcc}:
+
+@example
+/usr/include/widec.h:42: warning: `getwc' redefined
+/usr/include/wchar.h:326: warning: this is the location of the previous
+definition
+@end example
+
+If this occurs, you can do the following to fix the problem:
+
+Copy @code{/usr/include/widec.h} to
+@code{.../lib/gcc-lib/os/gcc-version/include} and change line 41 from:
+
+@example
+#if !defined(lint) && !defined(__lint)
+
+to
+
+#if !defined(lint) && !defined(__lint) && !defined(getwc)
+@end example
+
+Alternatively, you can edit @file{/usr/include/widec.h} directly. Either
+way, after you make the fix, you should remove @file{config.cache} and run
+@code{configure} again!
+
+If you get errors like this when you run @code{make}, it's because
+@code{configure} didn't detect the @file{curses.h} file (probably
+because of the error in @file{/usr/include/widec.h}):
+
+@example
+In file included from mysql.cc:50:
+/usr/include/term.h:1060: syntax error before `,'
+/usr/include/term.h:1081: syntax error before `;'
+@end example
+
+The solution to this is to do one of the following:
+
+@itemize @bullet
+@item
+Configure with @code{CFLAGS=-DHAVE_CURSES_H CXXFLAGS=-DHAVE_CURSES_H ./configure}.
+
+@item
+Edit @file{/usr/include/widec.h} as indicted above and rerun configure.
+
+@item
+Remove the @code{#define HAVE_TERM} line from @file{config.h} file and
+run @code{make} again.
+@end itemize
+
+If you get a problem that your linker can't find @code{-lz} when linking
+your client program, the problem is probably that your @file{libz.so} file is
+installed in @file{/usr/local/lib}. You can fix this by one of the
+following methods:
+
+@itemize @bullet
+@item
+Add @file{/usr/local/lib} to @code{LD_LIBRARY_PATH}.
+
+@item
+Add a link to @file{libz.so} from @file{/lib}.
+
+@item
+If you are using Solaris 8, you can install the optional zlib from your
+Solaris 8 CD distribution.
+
+@item
+Configure @strong{MySQL} with the @code{--with-named-z-libs=no} option.
+@end itemize
+
+
+@node Solaris x86, , Solaris 2.7, Solaris
+@subsubsection Solaris x86 Notes
+
+On Solaris 2.8 on x86, @strong{mysqld} will core dump if you run
+'strip' in.
+
+If you are using @code{gcc} or @code{egcs} on Solaris x86 and you
+experience problems with core dumps under load, you should use the
+following @code{configure} command:
+
+@example
+CC=gcc CFLAGS="-O3 -fomit-frame-pointer -DHAVE_CURSES_H" \
+CXX=gcc \
+CXXFLAGS="-O3 -fomit-frame-pointer -felide-constructors -fno-exceptions -fno-rtti -DHAVE_CURSES_H" \
+./configure --prefix=/usr/local/mysql
+@end example
+
+This will avoid problems with the @code{libstdc++} library and with C++
+exceptions.
+
+If this doesn't help, you should compile a debug version and run
+it with a trace file or under @code{gdb}. @xref{Using gdb on mysqld}.
+
+
+@node BSD Notes, Mac OS X, Solaris, Operating System Specific Notes
+@subsection BSD Notes
+
+@menu
+* FreeBSD::
+* NetBSD::
+* OpenBSD::
+* OpenBSD 2.5::
+* OpenBSD 2.8::
+* BSDI::
+* BSDI2::
+* BSDI3::
+* BSDI4::
+@end menu
+
+
+@node FreeBSD, NetBSD, BSD Notes, BSD Notes
+@subsubsection FreeBSD Notes
FreeBSD 3.x is recommended for running @strong{MySQL} since the thread package
is much more integrated.
@@ -11064,21 +10847,23 @@ If you get problems with the current date in @strong{MySQL}, setting the
To get a secure and stable system you should only use FreeBSD kernels
that are marked @code{-STABLE}.
-@node NetBSD, OpenBSD, FreeBSD, Source install system issues
-@subsection NetBSD notes
+
+@node NetBSD, OpenBSD, FreeBSD, BSD Notes
+@subsubsection NetBSD notes
To compile on NetBSD you need GNU @code{make}. Otherwise the compile will
crash when @code{make} tries to run @code{lint} on C++ files.
-@node OpenBSD, BSDI, NetBSD, Source install system issues
-@subsection OpenBSD Notes
+
+@node OpenBSD, OpenBSD 2.5, NetBSD, BSD Notes
+@subsubsection OpenBSD Notes
@menu
* OpenBSD 2.5:: OpenBSD 2.5 Notes
* OpenBSD 2.8:: OpenBSD 2.8 Notes
@end menu
-@node OpenBSD 2.5, OpenBSD 2.8, OpenBSD, OpenBSD
+@node OpenBSD 2.5, OpenBSD 2.8, OpenBSD, BSD Notes
@subsubsection OpenBSD 2.5 Notes
On OpenBSD Version 2.5, you can compile @strong{MySQL} with native threads
@@ -11088,7 +10873,7 @@ with the following options:
CFLAGS=-pthread CXXFLAGS=-pthread ./configure --with-mit-threads=no
@end example
-@node OpenBSD 2.8, , OpenBSD 2.5, OpenBSD
+@node OpenBSD 2.8, BSDI, OpenBSD 2.5, BSD Notes
@subsubsection OpenBSD 2.8 Notes
Our users have reported that OpenBSD 2.8 has a threading bug which causes
@@ -11097,8 +10882,9 @@ but as of January 25th, 2001, it's only available in the ``-current'' branch.
The symptoms of this threading bug are: slow response, high load, high CPU
usage, and crashes.
-@node BSDI, SCO, OpenBSD, Source install system issues
-@subsection BSD/OS Notes
+
+@node BSDI, BSDI2, OpenBSD 2.8, BSD Notes
+@subsubsection BSD/OS Notes
@menu
* BSDI2:: BSD/OS 2.x notes
@@ -11106,7 +10892,7 @@ usage, and crashes.
* BSDI4:: BSD/OS 4.x notes
@end menu
-@node BSDI2, BSDI3, BSDI, BSDI
+@node BSDI2, BSDI3, BSDI, BSD Notes
@subsubsection BSD/OS Version 2.x Notes
If you get the following error when compiling @strong{MySQL}, your
@@ -11130,7 +10916,7 @@ If you are using @code{gcc}, you may also use have to use the
If you get problems with the current date in @strong{MySQL}, setting the
@code{TZ} variable will probably help. @xref{Environment variables}.
-@node BSDI3, BSDI4, BSDI2, BSDI
+@node BSDI3, BSDI4, BSDI2, BSD Notes
@subsubsection BSD/OS Version 3.x Notes
Upgrade to BSD/OS Version 3.1. If that is not possible, install
@@ -11170,7 +10956,7 @@ If this doesn't work and you are using @code{bash}, try switching to
@code{csh} or @code{sh}; some BSDI users have reported problems with
@code{bash} and @code{ulimit}.
-@node BSDI4, , BSDI3, BSDI
+@node BSDI4, , BSDI3, BSD Notes
@subsubsection BSD/OS Version 4.x Notes
BSDI Version 4.x has some thread-related bugs. If you want to use
@@ -11195,8 +10981,661 @@ Note that the above means that you can't symbolic link a database directories
to another database directory or symbolic link a table to another database
on BSDI! (Making a symbolic link to another disk is ok).
-@node SCO, SCO Unixware, BSDI, Source install system issues
-@subsection SCO Notes
+
+@node Mac OS X, Other Unix Notes, BSD Notes, Operating System Specific Notes
+@subsection Mac OS X Notes
+
+@menu
+* Mac OS X Public Beta::
+* Mac OS X Server::
+@end menu
+
+
+@node Mac OS X Public Beta, Mac OS X Server, Mac OS X, Mac OS X
+@subsubsection Mac OS X Public Beta
+
+@strong{MySQL} should work without any problems on Mac OS X Public Beta
+(Darwin). You don't need the pthread patches for this OS!
+
+
+@node Mac OS X Server, , Mac OS X Public Beta, Mac OS X
+@subsubsection Mac OS X Server
+
+Before trying to configure @strong{MySQL} on Mac OS X server you must
+first install the pthread package from
+@uref{http://www.prnet.de/RegEx/mysql.html}.
+
+Our binary for Mac OS X is compiled on Rhapsody 5.5 with the following
+configure line:
+
+@example
+CC=gcc CFLAGS="-O2 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O2 -fomit-frame-pointer" ./configure --prefix=/usr/local/mysql "--with-comment=Official MySQL binary" --with-extra-charsets=complex --disable-shared
+@end example
+
+You might want to also add aliases to your shell's resource file to
+access @code{mysql} and @code{mysqladmin} from the command line:
+
+@example
+alias mysql '/usr/local/mysql/bin/mysql'
+alias mysqladmin '/usr/local/mysql/bin/mysqladmin'
+@end example
+
+
+@node Other Unix Notes, OS/2, Mac OS X, Operating System Specific Notes
+@subsection Other Unix Notes
+
+@menu
+* Binary notes-HP-UX::
+* HP-UX 10.20::
+* HP-UX 11.x::
+* IBM-AIX::
+* SunOS::
+* Alpha-DEC-UNIX::
+* Alpha-DEC-OSF1::
+* SGI-Irix::
+* SCO::
+* SCO Unixware::
+@end menu
+
+
+@node Binary notes-HP-UX, HP-UX 10.20, Other Unix Notes, Other Unix Notes
+@subsubsection HP-UX Notes for Binary Distributions
+
+@cindex HP-UX, binary distribution
+@cindex binary distributions, on HP-UX
+
+Some of the binary distributions of @strong{MySQL} for HP-UX is
+distributed as an HP depot file and as a tar file. To use the depot
+file you must be running at least HP-UX 10.x to have access to HP's
+software depot tools.
+
+The HP version of @strong{MySQL} was compiled on an HP 9000/8xx server
+under HP-UX 10.20, and uses MIT-pthreads. It is known to work well under
+this configuration. @strong{MySQL} Version 3.22.26 and newer can also be
+built with HP's native thread package.
+
+Other configurations that may work:
+
+@itemize @bullet
+@item
+HP 9000/7xx running HP-UX 10.20+
+@item
+HP 9000/8xx running HP-UX 10.30
+@end itemize
+
+The following configurations almost definitely won't work:
+
+@itemize @bullet
+@item
+HP 9000/7xx or 8xx running HP-UX 10.x where x < 2
+@item
+HP 9000/7xx or 8xx running HP-UX 9.x
+@end itemize
+
+To install the distribution, use one of the commands below, where
+@code{/path/to/depot} is the full pathname of the depot file:
+
+@itemize @bullet
+@item
+To install everything, including the server, client and development tools:
+
+@example
+shell> /usr/sbin/swinstall -s /path/to/depot mysql.full
+@end example
+
+@item
+To install only the server:
+
+@example
+shell> /usr/sbin/swinstall -s /path/to/depot mysql.server
+@end example
+
+@item
+To install only the client package:
+
+@example
+shell> /usr/sbin/swinstall -s /path/to/depot mysql.client
+@end example
+
+@item
+To install only the development tools:
+
+@example
+shell> /usr/sbin/swinstall -s /path/to/depot mysql.developer
+@end example
+@end itemize
+
+The depot places binaries and libraries in @file{/opt/mysql} and data in
+@file{/var/opt/mysql}. The depot also creates the appropriate entries in
+@file{/etc/init.d} and @file{/etc/rc2.d} to start the server automatically
+at boot time. Obviously, this entails being @code{root} to install.
+
+To install the HP-UX tar.gz distribution, you must have a copy of GNU
+@code{tar}.
+
+
+@node HP-UX 10.20, HP-UX 11.x, Binary notes-HP-UX, Other Unix Notes
+@subsubsection HP-UX Version 10.20 Notes
+
+There are a couple of small problems when compiling @strong{MySQL} on
+HP-UX. We recommend that you use @code{gcc} instead of the HP-UX native
+compiler, because @code{gcc} produces better code!
+
+We recommend using gcc 2.95 on HP-UX. Don't use high optimization
+flags (like -O6) as this may not be safe on HP-UX.
+
+Note that MIT-pthreads can't be compiled with the HP-UX compiler
+because it can't compile @code{.S} (assembler) files.
+
+The following configure line should work:
+
+@example
+CFLAGS="-DHPUX -I/opt/dce/include" CXXFLAGS="-DHPUX -I/opt/dce/include -felide-constructors -fno-exceptions -fno-rtti" CXX=gcc ./configure --with-pthread --with-named-thread-libs='-ldce' --prefix=/usr/local/mysql --disable-shared
+@end example
+
+If you are compiling @code{gcc} 2.95 yourself, you should NOT link it with
+the DCE libraries (@code{libdce.a} or @code{libcma.a}) if you want to compile
+@strong{MySQL} with MIT-pthreads. If you mix the DCE and MIT-pthreads
+packages you will get a @code{mysqld} to which you cannot connect. Remove
+the DCE libraries while you compile @code{gcc} 2.95!
+
+
+@node HP-UX 11.x, IBM-AIX, HP-UX 10.20, Other Unix Notes
+@subsubsection HP-UX Version 11.x Notes
+
+For HP-UX Version 11.x we recommend @strong{MySQL} Version 3.23.15 or later.
+
+Because of some critical bugs in the standard HP-UX libraries, you should
+install the following patches before trying to run @strong{MySQL} on HP-UX 11.0:
+
+@example
+PHKL_22840 Streams cumulative
+PHNE_22397 ARPA cumulative
+@end example
+
+This will solve a problem that one gets @code{EWOULDBLOCK} from @code{recv()}
+and @code{EBADF} from @code{accept()} in threaded applications.
+
+If you are using @code{gcc} 2.95.1 on an unpatched HP-UX 11.x system,
+you will get the error:
+
+@example
+In file included from /usr/include/unistd.h:11,
+ from ../include/global.h:125,
+ from mysql_priv.h:15,
+ from item.cc:19:
+/usr/include/sys/unistd.h:184: declaration of C function ...
+/usr/include/sys/pthread.h:440: previous declaration ...
+In file included from item.h:306,
+ from mysql_priv.h:158,
+ from item.cc:19:
+@end example
+
+The problem is that HP-UX doesn't define @code{pthreads_atfork()} consistently.
+It has conflicting prototypes in
+@file{/usr/include/sys/unistd.h}:184 and
+@file{/usr/include/sys/pthread.h}:440 (details below).
+
+One solution is to copy @file{/usr/include/sys/unistd.h} into
+@file{mysql/include} and edit @file{unistd.h} and change it to match
+the definition in @file{pthread.h}. Here's the diff:
+
+@example
+183,184c183,184
+< extern int pthread_atfork(void (*prepare)(), void (*parent)(),
+< void (*child)());
+---
+> extern int pthread_atfork(void (*prepare)(void), void (*parent)(void),
+> void (*child)(void));
+@end example
+
+After this, the following configure line should work:
+
+@example
+CFLAGS="-fomit-frame-pointer -O3 -fpic" CXX=gcc CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti -O3" ./configure --prefix=/usr/local/mysql --disable-shared
+@end example
+
+Here is some information that a HP-UX Version 11.x user sent us about compiling
+@strong{MySQL} with HP-UX:x compiler:
+
+@example
+ Environment:
+ proper compilers.
+ setenv CC cc
+ setenv CXX aCC
+ flags
+ setenv CFLAGS -D_REENTRANT
+ setenv CXXFLAGS -D_REENTRANT
+ setenv CPPFLAGS -D_REENTRANT
+ % aCC -V
+ aCC: HP ANSI C++ B3910B X.03.14.06
+ % cc -V /tmp/empty.c
+ cpp.ansi: HP92453-01 A.11.02.00 HP C Preprocessor (ANSI)
+ ccom: HP92453-01 A.11.01.00 HP C Compiler
+ cc: "/tmp/empty.c", line 1: warning 501: Empty source file.
+
+ configuration:
+ ./configure --with-pthread \
+ --prefix=/source-control/mysql \
+ --with-named-thread-libs=-lpthread \
+ --with-low-memory
+
+ added '#define _CTYPE_INCLUDED' to include/m_ctype.h. This
+ symbol is the one defined in HP's /usr/include/ctype.h:
+
+ /* Don't include std ctype.h when this is included */
+ #define _CTYPE_H
+ #define __CTYPE_INCLUDED
+ #define _CTYPE_INCLUDED
+ #define _CTYPE_USING /* Don't put names in global namespace. */
+@end example
+
+@itemize @bullet
+@item
+I had to use the compile-time flag @code{-D_REENTRANT} to get the compiler
+to recognize the prototype for @code{localtime_r}. Alternatively I could have
+supplied the prototype for @code{localtime_r}. But I wanted to catch other
+bugs without needing to run into them. I wasn't sure where I needed it, so I
+added it to all flags.
+@item
+The optimization flags used by @strong{MySQL} (-O3) are not recognized by HP's
+compilers. I did not change the flags.
+@end itemize
+
+If you get the following error from @code{configure}
+
+@example
+checking for cc option to accept ANSI C... no
+configure: error: MySQL requires a ANSI C compiler (and a C++ compiler). Try gcc. See the Installation chapter in the Reference Manual.
+@end example
+
+Check that you don't have the path to the K&R compiler before the path
+to the HP-UX C and C++ compiler.
+
+
+@node IBM-AIX, SunOS, HP-UX 11.x, Other Unix Notes
+@subsubsection IBM-AIX notes
+
+@cindex problems, installing on IBM-AIX
+
+Automatic detection of @code{xlC} is missing from Autoconf, so a
+@code{configure} command something like this is needed when compiling
+@strong{MySQL} (This example uses the IBM compiler):
+
+@example
+export CC="xlc_r -ma -O3 -qstrict -qoptimize=3 -qmaxmem=8192 "
+export CXX="xlC_r -ma -O3 -qstrict -qoptimize=3 -qmaxmem=8192"
+export CFLAGS="-I /usr/local/include"
+export LDLFAGS="-L /usr/local/lib"
+export CPPFLAGS=$CFLAGS
+export CXXFLAGS=$CFLAGS
+
+./configure --prefix=/usr/local \
+ --localstatedir=/var/mysql \
+ --sysconfdir=/etc/mysql \
+ --sbindir='/usr/local/bin' \
+ --libexecdir='/usr/local/bin' \
+ --enable-thread-safe-client \
+ --enable-large-files
+@end example
+
+Above are the options used to compile the @strong{MySQL} distribution that
+can be found at @uref{http://www-frec.bull.com/}.
+
+If you change the @code{-O3} to @code{-O2} in the above configure line,
+you must also remove the @code{-qstrict} option (this is a limitation in
+the IBM C compiler).
+
+If you are using @code{gcc} or @code{egcs} to compile @strong{MySQL}, you
+@strong{MUST} use the @code{-fno-exceptions} flag, as the exception
+handling in @code{gcc}/@code{egcs} is not thread safe! (This is tested with
+@code{egcs} 1.1.). There are also some known problems with IBM's assembler,
+which may cause it to generate bad code when used with gcc.
+
+We recommend the following @code{configure} line with @code{egcs} and
+@code{gcc 2.95} on AIX:
+
+@example
+CC="gcc -pipe -mcpu=power -Wa,-many" \
+CXX="gcc -pipe -mcpu=power -Wa,-many" \
+CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti" \
+./configure --prefix=/usr/local/mysql --with-low-memory
+@end example
+
+The @code{-Wa,-many} is necessary for the compile to be successful. IBM is
+aware of this problem but is in to hurry to fix it because of the workaround
+available. We don't know if the @code{-fno-exceptions} is required with
+@code{gcc 2.95}, but as @strong{MySQL} doesn't use exceptions and the above
+option generates faster code, we recommend that you should always use this
+option with @code{egcs / gcc}.
+
+If you get a problem with assembler code try changing the -mcpu=xxx to
+match your cpu. Typically power2, power, or powerpc may need to be used,
+alternatively you might need to use 604 or 604e. I'm not positive but I
+would think using "power" would likely be safe most of the time, even on
+a power2 machine.
+
+If you don't know what your cpu is then do a "uname -m", this will give
+you back a string that looks like "000514676700", with a format of
+xxyyyyyymmss where xx and ss are always 0's, yyyyyy is a unique system
+id and mm is the id of the CPU Planar. A chart of these values can be
+found at
+@uref{http://www.rs6000.ibm.com/doc_link/en_US/a_doc_lib/cmds/aixcmds5/uname.htm}.
+This will give you a machine type and a machine model you can use to
+determine what type of cpu you have.
+
+If you have problems with signals (@strong{MySQL} dies unexpectedly
+under high load) you may have found an OS bug with threads and
+signals. In this case you can tell @strong{MySQL} not to use signals by
+configuring with:
+
+@example
+shell> CFLAGS=-DDONT_USE_THR_ALARM CXX=gcc \
+ CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti -DDONT_USE_THR_ALARM" \
+ ./configure --prefix=/usr/local/mysql --with-debug --with-low-memory
+@end example
+
+This doesn't affect the performance of @strong{MySQL}, but has the side
+effect that you can't kill clients that are ``sleeping'' on a connection with
+@code{mysqladmin kill} or @code{mysqladmin shutdown}. Instead, the client
+will die when it issues its next command.
+
+On some versions of AIX, linking with @code{libbind.a} makes
+@code{getservbyname} core dump. This is an AIX bug and should be reported
+to IBM.
+
+For AIX 4.2.1 and gcc you have to do the following changes.
+
+After configuring, edit @file{config.h} and @file{include/my_config.h}
+and change the line that says
+
+@example
+#define HAVE_SNPRINTF 1
+@end example
+
+to
+
+@example
+#undef HAVE_SNPRINTF
+@end example
+
+And finally, in @file{mysqld.cc} you need to add a prototype for initgoups.
+
+@example
+#ifdef _AIX41
+extern "C" int initgroups(const char *,int);
+#endif
+@end example
+
+
+@node SunOS, Alpha-DEC-UNIX, IBM-AIX, Other Unix Notes
+@subsubsection SunOS 4 Notes
+
+On SunOS 4, MIT-pthreads is needed to compile @strong{MySQL}, which in turn
+means you will need GNU @code{make}.
+
+Some SunOS 4 systems have problems with dynamic libraries and @code{libtool}.
+You can use the following @code{configure} line to avoid this problem:
+
+@example
+shell> ./configure --disable-shared --with-mysqld-ldflags=-all-static
+@end example
+
+When compiling @code{readline}, you may get warnings about duplicate defines.
+These may be ignored.
+
+When compiling @code{mysqld}, there will be some @code{implicit declaration
+of function} warnings. These may be ignored.
+
+
+@node Alpha-DEC-UNIX, Alpha-DEC-OSF1, SunOS, Other Unix Notes
+@subsubsection Alpha-DEC-UNIX Notes (Tru64)
+
+If you are using egcs 1.1.2 on Digital Unix, you should upgrade to gcc
+2.95.2, as egcs on DEC has some serious bugs!
+
+When compiling threaded programs under Digital Unix, the documentation
+recommends using the @code{-pthread} option for @code{cc} and @code{cxx} and
+the libraries @code{-lmach -lexc} (in addition to @code{-lpthread}). You
+should run @code{configure} something like this:
+
+@example
+CC="cc -pthread" CXX="cxx -pthread -O" \
+./configure --with-named-thread-libs="-lpthread -lmach -lexc -lc"
+@end example
+
+When compiling @code{mysqld}, you may see a couple of warnings like this:
+
+@example
+mysqld.cc: In function void handle_connections()':
+mysqld.cc:626: passing long unsigned int *' as argument 3 of
+accept(int,sockadddr *, int *)'
+@end example
+
+You can safely ignore these warnings. They occur because @code{configure}
+can detect only errors, not warnings.
+
+If you start the server directly from the command line, you may have problems
+with it dying when you log out. (When you log out, your outstanding processes
+receive a @code{SIGHUP} signal.) If so, try starting the server like this:
+
+@example
+shell> nohup mysqld [options] &
+@end example
+
+@code{nohup} causes the command following it to ignore any @code{SIGHUP}
+signal sent from the terminal. Alternatively, start the server by running
+@code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you.
+@xref{safe_mysqld, , @code{safe_mysqld}}.
+
+If you get a problem when compiling mysys/get_opt.c, just remove the
+line #define _NO_PROTO from the start of that file!
+
+If you are using Compac's CC compiler, the following configure line should
+work:
+
+@example
+CC="cc -pthread"
+CFLAGS="-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host"
+CXX="cxx -pthread"
+CXXFLAGS="-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host"
+export CC CFLAGS CXX CXXFLAGS
+./configure \
+--prefix=/usr/local/mysql \
+--with-low-memory \
+--enable-large-files \
+--enable-shared=yes \
+--with-named-thread-libs="-lpthread -lmach -lexc -lc"
+gnumake
+@end example
+
+If you get a problem with libtool, when compiling with shared libraries
+as above, when linking @code{mysql}, you should be able to get around
+this by issuing:
+
+@example
+cd mysql
+/bin/sh ../libtool --mode=link cxx -pthread -O3 -DDBUG_OFF \
+-O4 -ansi_alias -ansi_args -fast -inline speed \
+-speculate all \ -arch host -DUNDEF_HAVE_GETHOSTBYNAME_R \
+-o mysql mysql.o readline.o sql_string.o completion_hash.o \
+../readline/libreadline.a -lcurses \
+../libmysql/.libs/libmysqlclient.so -lm
+cd ..
+gnumake
+gnumake install
+scripts/mysql_install_db
+@end example
+
+
+@node Alpha-DEC-OSF1, SGI-Irix, Alpha-DEC-UNIX, Other Unix Notes
+@subsubsection Alpha-DEC-OSF1 Notes
+
+If you have problems compiling and have DEC @code{CC} and @code{gcc}
+installed, try running @code{configure} like this:
+
+@example
+CC=cc CFLAGS=-O CXX=gcc CXXFLAGS=-O3 \
+./configure --prefix=/usr/local/mysql
+@end example
+
+If you get problems with the @file{c_asm.h} file, you can create and use
+a 'dummy' @file{c_asm.h} file with:
+
+@example
+touch include/c_asm.h
+CC=gcc CFLAGS=-I./include \
+CXX=gcc CXXFLAGS=-O3 \
+./configure --prefix=/usr/local/mysql
+@end example
+
+Note that the following problems with the @code{ld} program can be fixed
+by downloading the latest DEC (Compaq) patch kit from:
+@uref{http://ftp.support.compaq.com/public/unix/}.
+
+On OSF1 V4.0D and compiler "DEC C V5.6-071 on Digital Unix V4.0 (Rev. 878)"
+the compiler had some strange behavior (undefined @code{asm} symbols).
+@code{/bin/ld} also appears to be broken (problems with @code{_exit
+undefined} errors occuring while linking @code{mysqld}). On this system, we
+have managed to compile @strong{MySQL} with the following @code{configure}
+line, after replacing @code{/bin/ld} with the version from OSF 4.0C:
+
+@example
+CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql
+@end example
+
+With the Digital compiler "C++ V6.1-029", the following should work:
+
+@example
+CC=cc -pthread
+CFLAGS=-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host
+CXX=cxx -pthread
+CXXFLAGS=-O4 -ansi_alias -ansi_args -fast -inline speed -speculate all -arch host -noexceptions -nortti
+export CC CFLAGS CXX CXXFLAGS
+./configure --prefix=/usr/mysql/mysql --with-mysqld-ldflags=-all-static --disable-shared --with-named-thread-libs="-lmach -lexc -lc"
+@end example
+
+In some versions of OSF1, the @code{alloca()} function is broken. Fix
+this by removing the line in @file{config.h} that defines @code{'HAVE_ALLOCA'}.
+
+The @code{alloca()} function also may have an incorrect prototype in
+@code{/usr/include/alloca.h}. This warning resulting from this can be ignored.
+
+@code{configure} will use the following thread libraries automatically:
+@code{--with-named-thread-libs="-lpthread -lmach -lexc -lc"}.
+
+When using @code{gcc}, you can also try running @code{configure} like this:
+
+@example
+shell> CFLAGS=-D_PTHREAD_USE_D4 CXX=gcc CXXFLAGS=-O3 ./configure ....
+@end example
+
+If you have problems with signals (@strong{MySQL} dies unexpectedly
+under high load), you may have found an OS bug with threads and
+signals. In this case you can tell @strong{MySQL} not to use signals by
+configuring with:
+
+@example
+shell> CFLAGS=-DDONT_USE_THR_ALARM \
+ CXXFLAGS=-DDONT_USE_THR_ALARM \
+ ./configure ...
+@end example
+
+This doesn't affect the performance of @strong{MySQL}, but has the side
+effect that you can't kill clients that are ``sleeping'' on a connection with
+@code{mysqladmin kill} or @code{mysqladmin shutdown}. Instead, the client
+will die when it issues its next command.
+
+With @code{gcc} 2.95.2, you will probably run into the following compile error:
+
+@example
+sql_acl.cc:1456: Internal compiler error in `scan_region', at except.c:2566
+Please submit a full bug report.
+@end example
+
+To fix this you should change to the @code{sql} directory and do a ``cut
+and paste'' of the last @code{gcc} line, but change @code{-O3} to
+@code{-O0} (or add @code{-O0} immediately after @code{gcc} if you don't
+have any @code{-O} option on your compile line.) After this is done you
+can just change back to the top-level directly and run @code{make}
+again.
+
+
+@node SGI-Irix, SCO, Alpha-DEC-OSF1, Other Unix Notes
+@subsubsection SGI Irix Notes
+
+If you are using Irix Version 6.5.3 or newer @code{mysqld} will only be able to
+create threads if you run it as a user with @code{CAP_SCHED_MGT}
+privileges (like @code{root}) or give the @code{mysqld} server this privilege
+with the following shell command:
+
+@example
+shell> chcap "CAP_SCHED_MGT+epi" /opt/mysql/libexec/mysqld
+@end example
+
+You may have to undefine some things in @file{config.h} after running
+@code{configure} and before compiling.
+
+In some Irix implementations, the @code{alloca()} function is broken. If the
+@code{mysqld} server dies on some @code{SELECT} statements, remove the lines
+from @file{config.h} that define @code{HAVE_ALLOC} and @code{HAVE_ALLOCA_H}.
+If @code{mysqladmin create} doesn't work, remove the line from @file{config.h}
+that defines @code{HAVE_READDIR_R}. You may have to remove the
+@code{HAVE_TERM_H} line as well.
+
+SGI recommends that you install all of the patches on this page as a set:
+http://support.sgi.com/surfzone/patches/patchset/6.2_indigo.rps.html
+
+At the very minimum, you should install the latest kernel rollup, the
+latest @code{rld} rollup, and the latest @code{libc} rollup.
+
+You definitely need all the POSIX patches on this page, for pthreads support:
+
+@uref{http://support.sgi.com/surfzone/patches/patchset/6.2_posix.rps.html}
+
+If you get the something like the following error when compiling
+@file{mysql.cc}:
+
+@example
+"/usr/include/curses.h", line 82: error(1084): invalid combination of type
+@end example
+
+Type the following in the top-level directory of your @strong{MySQL} source
+tree:
+
+@example
+shell> extra/replace bool curses_bool < /usr/include/curses.h > include/curses.h
+shell> make
+@end example
+
+There have also been reports of scheduling problems. If only one thread is
+running, things go slow. Avoid this by starting another client. This may
+lead to a 2-to-10-fold increase in execution speed thereafter for the other
+thread. This is a poorly understood problem with Irix threads; you may have
+to improvise to find solutions until this can be fixed.
+
+If you are compiling with @code{gcc}, you can use the following
+@code{configure} command:
+
+@example
+CC=gcc CXX=gcc CXXFLAGS=-O3 \
+./configure --prefix=/usr/local/mysql --with-thread-safe-client --with-named-thread-libs=-lpthread
+@end example
+
+On Irix 6.5.11 with native Irix C and C++ compilers ver. 7.3.1.2, the
+following is reported to work
+
+@example
+CC=cc CXX=CC CFLAGS='-O3 -n32 -TARG:platform=IP22 -I/usr/local/include \
+-L/usr/local/lib' CXXFLAGS='-O3 -n32 -TARG:platform=IP22 \
+-I/usr/local/include -L/usr/local/lib' ./configure --prefix=/usr/local/mysql \
+--with-berkeley-db --with-innodb \
+--with-libwrap=/usr/local --with-named-curses-libs=/usr/local/lib/libncurses.a
+@end example
+
+
+@node SCO, SCO Unixware, SGI-Irix, Other Unix Notes
+@subsubsection SCO Notes
The current port is tested only on a ``sco3.2v5.0.4'' and
``sco3.2v5.0.5'' system. There has also been a lot of progress on a
@@ -11352,8 +11791,9 @@ if they were compiled with @code{icc} or @code{cc}.
Perl works best when compiled with @code{cc}.
-@node SCO Unixware, IBM-AIX, SCO, Source install system issues
-@subsection SCO Unixware Version 7.0 Notes
+
+@node SCO Unixware, , SCO, Other Unix Notes
+@subsubsection SCO Unixware Version 7.0 Notes
You must use a version of @strong{MySQL} at least as recent as Version 3.22.13
because that version fixes some portability problems under Unixware.
@@ -11367,1651 +11807,2557 @@ CC=cc CXX=CC ./configure --prefix=/usr/local/mysql
If you want to use @code{gcc}, you must use @code{gcc} 2.95.2 or newer.
-@node IBM-AIX, HP-UX 10.20, SCO Unixware, Source install system issues
-@subsection IBM-AIX notes
-@cindex problems, installing on IBM-AIX
-Automatic detection of @code{xlC} is missing from Autoconf, so a
-@code{configure} command something like this is needed when compiling
-@strong{MySQL} (This example uses the IBM compiler):
+@menu
+* OS/2::
+@end menu
+
+@node OS/2, BeOS, Other Unix Notes, Operating System Specific Notes
+@subsection OS/2 Notes
+
+@strong{MySQL} uses quite a few open files. Because of this, you should add
+something like the following to your @file{CONFIG.SYS} file:
@example
-export CC="xlc_r -ma -O3 -qstrict -qoptimize=3 -qmaxmem=8192 "
-export CXX="xlC_r -ma -O3 -qstrict -qoptimize=3 -qmaxmem=8192"
-export CFLAGS="-I /usr/local/include"
-export LDLFAGS="-L /usr/local/lib"
-export CPPFLAGS=$CFLAGS
-export CXXFLAGS=$CFLAGS
+SET EMXOPT=-c -n -h1024
+@end example
-./configure --prefix=/usr/local \
- --localstatedir=/var/mysql \
- --sysconfdir=/etc/mysql \
- --sbindir='/usr/local/bin' \
- --libexecdir='/usr/local/bin' \
- --enable-thread-safe-client \
- --enable-large-files
+If you don't do this, you will probably run into the following error:
+
+@example
+File 'xxxx' not found (Errcode: 24)
@end example
-Above are the options used to compile the @strong{MySQL} distribution that
-can be found at @uref{http://www-frec.bull.com/}.
+When using @strong{MySQL} with OS/2 Warp 3, FixPack 29 or above is
+required. With OS/2 Warp 4, FixPack 4 or above is required. This is a
+requirement of the Pthreads library. @strong{MySQL} must be installed
+in a partition that supports long filenames such as HPFS, FAT32, etc.
-If you change the @code{-O3} to @code{-O2} in the above configure line,
-you must also remove the @code{-qstrict} option (this is a limitation in
-the IBM C compiler).
+The @file{INSTALL.CMD} script must be run from OS/2's own @file{CMD.EXE}
+and may not work with replacement shells such as @file{4OS2.EXE}.
-If you are using @code{gcc} or @code{egcs} to compile @strong{MySQL}, you
-@strong{MUST} use the @code{-fno-exceptions} flag, as the exception
-handling in @code{gcc}/@code{egcs} is not thread safe! (This is tested with
-@code{egcs} 1.1.). There are also some known problems with IBM's assembler,
-which may cause it to generate bad code when used with gcc.
+The @file{scripts/mysql-install-db} script has been renamed. It is now called
+@file{install.cmd} and is a REXX script, which will set up the default
+@strong{MySQL} security settings and create the WorkPlace Shell icons
+for @strong{MySQL}.
-We recommend the following @code{configure} line with @code{egcs} and
-@code{gcc 2.95} on AIX:
+Dynamic module support is compiled in but not fully tested. Dynamic
+modules should be compiled using the Pthreads run-time library.
@example
-CC="gcc -pipe -mcpu=power -Wa,-many" \
-CXX="gcc -pipe -mcpu=power -Wa,-many" \
-CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti" \
-./configure --prefix=/usr/local/mysql --with-low-memory
+gcc -Zdll -Zmt -Zcrtdll=pthrdrtl -I../include -I../regex -I.. \
+ -o example udf_example.cc -L../lib -lmysqlclient udf_example.def
+mv example.dll example.udf
@end example
-The @code{-Wa,-many} is necessary for the compile to be successful. IBM is
-aware of this problem but is in to hurry to fix it because of the workaround
-available. We don't know if the @code{-fno-exceptions} is required with
-@code{gcc 2.95}, but as @strong{MySQL} doesn't use exceptions and the above
-option generates faster code, we recommend that you should always use this
-option with @code{egcs / gcc}.
+@strong{Note:} Due to limitations in OS/2, UDF module name stems must not
+exceed 8 characters. Modules are stored in the @file{/mysql2/udf}
+directory; the @code{safe-mysqld.cmd} script will put this directory in
+the @code{BEGINLIBPATH} environment variable. When using UDF modules,
+specified extensions are ignored --- it is assumed to be @file{.udf}.
+For example, in Unix, the shared module might be named @file{example.so}
+and you would load a function from it like this:
-If you get a problem with assembler code try changing the -mcpu=xxx to
-match your cpu. Typically power2, power, or powerpc may need to be used,
-alternatively you might need to use 604 or 604e. I'm not positive but I
-would think using "power" would likely be safe most of the time, even on
-a power2 machine.
+@example
+mysql> CREATE FUNCTION metaphon RETURNS STRING SONAME "example.so";
+@end example
-If you don't know what your cpu is then do a "uname -m", this will give
-you back a string that looks like "000514676700", with a format of
-xxyyyyyymmss where xx and ss are always 0's, yyyyyy is a unique system
-id and mm is the id of the CPU Planar. A chart of these values can be
-found at
-@uref{http://www.rs6000.ibm.com/doc_link/en_US/a_doc_lib/cmds/aixcmds5/uname.htm}.
-This will give you a machine type and a machine model you can use to
-determine what type of cpu you have.
+Is OS/2, the module would be named @file{example.udf}, but you would not
+specify the module extension:
-If you have problems with signals (@strong{MySQL} dies unexpectedly
-under high load) you may have found an OS bug with threads and
-signals. In this case you can tell @strong{MySQL} not to use signals by
-configuring with:
+@example
+mysql> CREATE FUNCTION metaphon RETURNS STRING SONAME "example";
+@end example
+
+
+@node BeOS, Novell Netware, OS/2, Operating System Specific Notes
+@subsection BeOS Notes
+
+We are really interested in getting @strong{MySQL} to work on BeOS, but
+unfortunately we don't have any person who knows BeOS or has time to do
+a port.
+
+We are interested in finding someone to do a port, and we will help them
+with any technical questions they may have while doing the port.
+
+We have previously talked with some BeOS developers that have said that
+@strong{MySQL} is 80% ported to BeOS, but we haven't heard from them
+in a while.
+
+
+@node Novell Netware, , BeOS, Operating System Specific Notes
+@subsection Novell Netware Notes
+
+We are really interested in getting @strong{MySQL} to work on Netware, but
+unfortunately we don't have any person who knows Netware or has time to do
+a port.
+
+We are interested in finding someone to do a port, and we will help them
+with any technical questions they may have while doing the port.
+
+
+@node Tutorial, MySQL Database Administration, Installing, Top
+@chapter Introduction to MySQL: A MySQL Tutorial
+
+@cindex tutorial
+@cindex terminal monitor, defined
+@cindex monitor, terminal
+@cindex options, provided by MySQL
+
+@menu
+* Connecting-disconnecting:: Connecting to and disconnecting from the server
+* Entering queries:: Entering queries
+* Database use:: Creating and using a database
+* Getting information:: Getting information about databases and tables
+* Examples:: Examples
+* Batch mode:: Using @code{mysql} in batch mode
+* Twin:: Queries from twin project
+* Apache::
+@end menu
+
+This chapter provides a tutorial introduction to @strong{MySQL} by showing
+how to use the @code{mysql} client program to create and use a simple
+database. @code{mysql} (sometimes referred to as the ``terminal monitor'' or
+just ``monitor'') is an interactive program that allows you to connect to a
+@strong{MySQL} server, run queries, and view the results. @code{mysql} may
+also be used in batch mode: you place your queries in a file beforehand, then
+tell @code{mysql} to execute the contents of the file. Both ways of using
+@code{mysql} are covered here.
+
+To see a list of options provided by @code{mysql}, invoke it with
+the @code{--help} option:
@example
-shell> CFLAGS=-DDONT_USE_THR_ALARM CXX=gcc \
- CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti -DDONT_USE_THR_ALARM" \
- ./configure --prefix=/usr/local/mysql --with-debug --with-low-memory
+shell> mysql --help
@end example
-This doesn't affect the performance of @strong{MySQL}, but has the side
-effect that you can't kill clients that are ``sleeping'' on a connection with
-@code{mysqladmin kill} or @code{mysqladmin shutdown}. Instead, the client
-will die when it issues its next command.
+This chapter assumes that @code{mysql} is installed on your machine and that
+a @strong{MySQL} server is available to which you can connect. If this is
+not true, contact your @strong{MySQL} administrator. (If @emph{you} are the
+administrator, you will need to consult other sections of this manual.)
-On some versions of AIX, linking with @code{libbind.a} makes
-@code{getservbyname} core dump. This is an AIX bug and should be reported
-to IBM.
+This chapter describes the entire process of setting up and using a
+database. If you are interested only in accessing an already-existing
+database, you may want to skip over the sections that describe how to
+create the database and the tables it contains.
-For AIX 4.2.1 and gcc you have to do the following changes.
+Because this chapter is tutorial in nature, many details are necessarily left
+out. Consult the relevant sections of the manual for more
+information on the topics covered here.
-After configuring, edit @file{config.h} and @file{include/my_config.h}
-and change the line that says
+@node Connecting-disconnecting, Entering queries, Tutorial, Tutorial
+@section Connecting to and Disconnecting from the Server
+
+@cindex connecting, to the server
+@cindex disconnecting, from the server
+@cindex server, connecting
+@cindex server, disconnecting
+
+To connect to the server, you'll usually need to provide a @strong{MySQL}
+user name when you invoke @code{mysql} and, most likely, a password. If the
+server runs on a machine other than the one where you log in, you'll also
+need to specify a hostname. Contact your administrator to find out what
+connection parameters you should use to connect (that is, what host, user name,
+and password to use). Once you know the proper parameters, you should be
+able to connect like this:
@example
-#define HAVE_SNPRINTF 1
+shell> mysql -h host -u user -p
+Enter password: ********
@end example
-to
+The @code{********} represents your password; enter it when @code{mysql}
+displays the @code{Enter password:} prompt.
+
+If that works, you should see some introductory information followed by a
+@code{mysql>} prompt:
+
@example
-#undef HAVE_SNPRINTF
+shell> mysql -h host -u user -p
+Enter password: ********
+Welcome to the MySQL monitor. Commands end with ; or \g.
+Your MySQL connection id is 459 to server version: 3.22.20a-log
+
+Type 'help' for help.
+
+mysql>
@end example
-And finally, in @file{mysqld.cc} you need to add a prototype for initgoups.
+The prompt tells you that @code{mysql} is ready for you to enter commands.
+
+Some @strong{MySQL} installations allow users to connect as the anonymous
+(unnamed) user to the server running on the local host. If this is the case
+on your machine, you should be able to connect to that server by invoking
+@code{mysql} without any options:
@example
-#ifdef _AIX41
-extern "C" int initgroups(const char *,int);
-#endif
+shell> mysql
@end example
-@node HP-UX 10.20, HP-UX 11.x, IBM-AIX, Source install system issues
-@subsection HP-UX Version 10.20 Notes
+After you have connected successfully, you can disconnect any time by typing
+@code{QUIT} at the @code{mysql>} prompt:
-There are a couple of small problems when compiling @strong{MySQL} on
-HP-UX. We recommend that you use @code{gcc} instead of the HP-UX native
-compiler, because @code{gcc} produces better code!
+@example
+mysql> QUIT
+Bye
+@end example
-We recommend using gcc 2.95 on HP-UX. Don't use high optimization
-flags (like -O6) as this may not be safe on HP-UX.
+You can also disconnect by pressing Control-D.
-Note that MIT-pthreads can't be compiled with the HP-UX compiler
-because it can't compile @code{.S} (assembler) files.
+Most examples in the following sections assume you are connected to the
+server. They indicate this by the @code{mysql>} prompt.
-The following configure line should work:
+@node Entering queries, Database use, Connecting-disconnecting, Tutorial
+@section Entering Queries
+
+@cindex running, queries
+@cindex queries, entering
+@cindex entering, queries
+
+Make sure you are connected to the server, as discussed in the previous
+section. Doing so will not in itself select any database to work with, but
+that's okay. At this point, it's more important to find out a little about
+how to issue queries than to jump right in creating tables, loading data
+into them, and retrieving data from them. This section describes the basic
+principles of entering commands, using several queries you can try out to
+familiarize yourself with how @code{mysql} works.
+
+Here's a simple command that asks the server to tell you its version number
+and the current date. Type it in as shown below following the @code{mysql>}
+prompt and hit the RETURN key:
@example
-CFLAGS="-DHPUX -I/opt/dce/include" CXXFLAGS="-DHPUX -I/opt/dce/include -felide-constructors -fno-exceptions -fno-rtti" CXX=gcc ./configure --with-pthread --with-named-thread-libs='-ldce' --prefix=/usr/local/mysql --disable-shared
+mysql> SELECT VERSION(), CURRENT_DATE;
++--------------+--------------+
+| version() | CURRENT_DATE |
++--------------+--------------+
+| 3.22.20a-log | 1999-03-19 |
++--------------+--------------+
+1 row in set (0.01 sec)
+mysql>
@end example
-If you are compiling @code{gcc} 2.95 yourself, you should NOT link it with
-the DCE libraries (@code{libdce.a} or @code{libcma.a}) if you want to compile
-@strong{MySQL} with MIT-pthreads. If you mix the DCE and MIT-pthreads
-packages you will get a @code{mysqld} to which you cannot connect. Remove
-the DCE libraries while you compile @code{gcc} 2.95!
+This query illustrates several things about @code{mysql}:
-@node HP-UX 11.x, Mac OS X, HP-UX 10.20, Source install system issues
-@subsection HP-UX Version 11.x Notes
+@itemize @bullet
+@item
+A command normally consists of a SQL statement followed by a semicolon.
+(There are some exceptions where a semicolon is not needed. @code{QUIT},
+mentioned earlier, is one of them. We'll get to others later.)
-For HP-UX Version 11.x we recommend @strong{MySQL} Version 3.23.15 or later.
+@item
+When you issue a command, @code{mysql} sends it to the server for execution
+and displays the results, then prints another @code{mysql>} to indicate
+that it is ready for another command.
-Because of some critical bugs in the standard HP-UX libraries, you should
-install the following patches before trying to run @strong{MySQL} on HP-UX 11.0:
+@item
+@code{mysql} displays query output as a table (rows and columns). The first
+row contains labels for the columns. The rows following are the query
+results. Normally, column labels are the names of the columns you fetch from
+database tables. If you're retrieving the value of an expression rather than
+a table column (as in the example just shown), @code{mysql} labels the column
+using the expression itself.
+
+@item
+@code{mysql} shows how many rows were returned and how long the query took
+to execute, which gives you a rough idea of server performance. These values
+are imprecise because they represent wall clock time (not CPU or machine
+time), and because they are affected by factors such as server load and
+network latency. (For brevity, the ``rows in set'' line is not shown in
+the remaining examples in this chapter.)
+@end itemize
+
+Keywords may be entered in any lettercase. The following queries are
+equivalent:
@example
-PHKL_22840 Streams cumulative
-PHNE_22397 ARPA cumulative
+mysql> SELECT VERSION(), CURRENT_DATE;
+mysql> select version(), current_date;
+mysql> SeLeCt vErSiOn(), current_DATE;
@end example
-This will solve a problem that one gets @code{EWOULDBLOCK} from @code{recv()}
-and @code{EBADF} from @code{accept()} in threaded applications.
+Here's another query. It demonstrates that you can use @code{mysql} as a
+simple calculator:
-If you are using @code{gcc} 2.95.1 on an unpatched HP-UX 11.x system,
-you will get the error:
+@example
+mysql> SELECT SIN(PI()/4), (4+1)*5;
++-------------+---------+
+| SIN(PI()/4) | (4+1)*5 |
++-------------+---------+
+| 0.707107 | 25 |
++-------------+---------+
+@end example
+
+The commands shown thus far have been relatively short, single-line
+statements. You can even enter multiple statements on a single line.
+Just end each one with a semicolon:
@example
-In file included from /usr/include/unistd.h:11,
- from ../include/global.h:125,
- from mysql_priv.h:15,
- from item.cc:19:
-/usr/include/sys/unistd.h:184: declaration of C function ...
-/usr/include/sys/pthread.h:440: previous declaration ...
-In file included from item.h:306,
- from mysql_priv.h:158,
- from item.cc:19:
+mysql> SELECT VERSION(); SELECT NOW();
++--------------+
+| version() |
++--------------+
+| 3.22.20a-log |
++--------------+
+
++---------------------+
+| NOW() |
++---------------------+
+| 1999-03-19 00:15:33 |
++---------------------+
@end example
-The problem is that HP-UX doesn't define @code{pthreads_atfork()} consistently.
-It has conflicting prototypes in
-@file{/usr/include/sys/unistd.h}:184 and
-@file{/usr/include/sys/pthread.h}:440 (details below).
+A command need not be given all on a single line, so lengthy commands that
+require several lines are not a problem. @code{mysql} determines where your
+statement ends by looking for the terminating semicolon, not by looking for
+the end of the input line. (In other words, @code{mysql}
+accepts free-format input: it collects input lines but does not execute them
+until it sees the semicolon.)
-One solution is to copy @file{/usr/include/sys/unistd.h} into
-@file{mysql/include} and edit @file{unistd.h} and change it to match
-the definition in @file{pthread.h}. Here's the diff:
+Here's a simple multiple-line statement:
@example
-183,184c183,184
-< extern int pthread_atfork(void (*prepare)(), void (*parent)(),
-< void (*child)());
----
-> extern int pthread_atfork(void (*prepare)(void), void (*parent)(void),
-> void (*child)(void));
+mysql> SELECT
+ -> USER()
+ -> ,
+ -> CURRENT_DATE;
++--------------------+--------------+
+| USER() | CURRENT_DATE |
++--------------------+--------------+
+| joesmith@@localhost | 1999-03-18 |
++--------------------+--------------+
@end example
-After this, the following configure line should work:
+In this example, notice how the prompt changes from @code{mysql>} to
+@code{->} after you enter the first line of a multiple-line query. This is
+how @code{mysql} indicates that it hasn't seen a complete statement and is
+waiting for the rest. The prompt is your friend, because it provides
+valuable feedback. If you use that feedback, you will always be aware of
+what @code{mysql} is waiting for.
+
+If you decide you don't want to execute a command that you are in the
+process of entering, cancel it by typing @code{\c}:
@example
-CFLAGS="-fomit-frame-pointer -O3 -fpic" CXX=gcc CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti -O3" ./configure --prefix=/usr/local/mysql --disable-shared
+mysql> SELECT
+ -> USER()
+ -> \c
+mysql>
@end example
-Here is some information that a HP-UX Version 11.x user sent us about compiling
-@strong{MySQL} with HP-UX:x compiler:
+Here, too, notice the prompt. It switches back to @code{mysql>} after you
+type @code{\c}, providing feedback to indicate that @code{mysql} is ready
+for a new command.
+
+The following table shows each of the prompts you may see and summarizes what
+they mean about the state that @code{mysql} is in:
+
+@cindex prompts, meanings
+@multitable @columnfractions .10 .9
+@item @strong{Prompt} @tab @strong{Meaning}
+@item @code{mysql>} @tab Ready for new command.
+@item @code{@ @ @ @ ->} @tab Waiting for next line of multiple-line command.
+@item @code{@ @ @ @ '>} @tab Waiting for next line, collecting a string that begins
+with a single quote (@samp{'}).
+@item @code{@ @ @ @ ">} @tab Waiting for next line, collecting a string that begins
+with a double quote (@samp{"}).
+@end multitable
+
+Multiple-line statements commonly occur by accident when you intend to
+issue a command on a single line, but forget the terminating semicolon. In
+this case, @code{mysql} waits for more input:
@example
- Environment:
- proper compilers.
- setenv CC cc
- setenv CXX aCC
- flags
- setenv CFLAGS -D_REENTRANT
- setenv CXXFLAGS -D_REENTRANT
- setenv CPPFLAGS -D_REENTRANT
- % aCC -V
- aCC: HP ANSI C++ B3910B X.03.14.06
- % cc -V /tmp/empty.c
- cpp.ansi: HP92453-01 A.11.02.00 HP C Preprocessor (ANSI)
- ccom: HP92453-01 A.11.01.00 HP C Compiler
- cc: "/tmp/empty.c", line 1: warning 501: Empty source file.
+mysql> SELECT USER()
+ ->
+@end example
- configuration:
- ./configure --with-pthread \
- --prefix=/source-control/mysql \
- --with-named-thread-libs=-lpthread \
- --with-low-memory
+If this happens to you (you think you've entered a statement but the only
+response is a @code{->} prompt), most likely @code{mysql} is waiting for the
+semicolon. If you don't notice what the prompt is telling you, you might sit
+there for a while before realizing what you need to do. Enter a semicolon to
+complete the statement, and @code{mysql} will execute it:
- added '#define _CTYPE_INCLUDED' to include/m_ctype.h. This
- symbol is the one defined in HP's /usr/include/ctype.h:
+@example
+mysql> SELECT USER()
+ -> ;
++--------------------+
+| USER() |
++--------------------+
+| joesmith@@localhost |
++--------------------+
+@end example
- /* Don't include std ctype.h when this is included */
- #define _CTYPE_H
- #define __CTYPE_INCLUDED
- #define _CTYPE_INCLUDED
- #define _CTYPE_USING /* Don't put names in global namespace. */
+The @code{'>} and @code{">} prompts occur during string collection.
+In @strong{MySQL}, you can write strings surrounded by either @samp{'}
+or @samp{"} characters (for example, @code{'hello'} or @code{"goodbye"}),
+and @code{mysql} lets you enter strings that span multiple lines. When you
+see a @code{'>} or @code{">} prompt, it means that you've entered a line
+containing a string that begins with a @samp{'} or @samp{"} quote character,
+but have not yet entered the matching quote that terminates the string.
+That's fine if you really are entering a multiple-line string, but how likely
+is that? Not very. More often, the @code{'>} and @code{">} prompts indicate
+that you've inadvertantly left out a quote character. For example:
+
+@example
+mysql> SELECT * FROM my_table WHERE name = "Smith AND age < 30;
+ ">
@end example
-@itemize @bullet
-@item
-I had to use the compile-time flag @code{-D_REENTRANT} to get the compiler
-to recognize the prototype for @code{localtime_r}. Alternatively I could have
-supplied the prototype for @code{localtime_r}. But I wanted to catch other
-bugs without needing to run into them. I wasn't sure where I needed it, so I
-added it to all flags.
-@item
-The optimization flags used by @strong{MySQL} (-O3) are not recognized by HP's
-compilers. I did not change the flags.
-@end itemize
+If you enter this @code{SELECT} statement, then hit RETURN and wait for the
+result, nothing will happen. Instead of wondering why this
+query takes so long, notice the clue provided by the @code{">} prompt. It
+tells you that @code{mysql} expects to see the rest of an unterminated
+string. (Do you see the error in the statement? The string @code{"Smith} is
+missing the second quote.)
-If you get the following error from @code{configure}
+At this point, what do you do? The simplest thing is to cancel the command.
+However, you cannot just type @code{\c} in this case, because @code{mysql}
+interprets it as part of the string that it is collecting! Instead, enter
+the closing quote character (so @code{mysql} knows you've finished the
+string), then type @code{\c}:
@example
-checking for cc option to accept ANSI C... no
-configure: error: MySQL requires a ANSI C compiler (and a C++ compiler). Try gcc. See the Installation chapter in the Reference Manual.
+mysql> SELECT * FROM my_table WHERE name = "Smith AND age < 30;
+ "> "\c
+mysql>
@end example
-Check that you don't have the path to the K&R compiler before the path
-to the HP-UX C and C++ compiler.
+The prompt changes back to @code{mysql>}, indicating that @code{mysql}
+is ready for a new command.
-@node Mac OS X, BEOS, HP-UX 11.x, Source install system issues
-@subsection Mac OS X Notes
+It's important to know what the @code{'>} and @code{">} prompts signify,
+because if you mistakenly enter an unterminated string, any further lines you
+type will appear to be ignored by @code{mysql} --- including a line
+containing @code{QUIT}! This can be quite confusing, especially if you
+don't know that you need to supply the terminating quote before you can
+cancel the current command.
+
+@node Database use, Getting information, Entering queries, Tutorial
+@section Creating and Using a Database
+
+@cindex databases, creating
+@cindex databases, using
+@cindex creating, databases
@menu
-* Mac OS X Public Data::
-* Mac OS X Server::
+* Creating database:: Creating a database
+* Creating tables:: Creating a table
+* Loading tables:: Loading data into a table
+* Retrieving data:: Retrieving information from a table
@end menu
-@node Mac OS X Public Data, Mac OS X Server, Mac OS X, Mac OS X
-@subsubsection Mac OS X Public beta
+Now that you know how to enter commands, it's time to access a database.
-@strong{MySQL} should work without any problems on Mac OS X Public Beta
-(Darwin). You don't need the pthread patches for this OS!
+Suppose you have several pets in your home (your menagerie) and you'd
+like to keep track of various types of information about them. You can do so
+by creating tables to hold your data and loading them with the desired
+information. Then you can answer different sorts of questions about your
+animals by retrieving data from the tables. This section shows you how to:
-@node Mac OS X Server, , Mac OS X Public Data, Mac OS X
-@subsubsection Mac OS X Server
+@itemize @bullet
+@item
+Create a database
+@item
+Create a table
+@item
+Load data into the table
+@item
+Retrieve data from the table in various ways
+@item
+Use multiple tables
+@end itemize
-Before trying to configure @strong{MySQL} on Mac OS X server you must
-first install the pthread package from
-@uref{http://www.prnet.de/RegEx/mysql.html}.
+The menagerie database will be simple (deliberately), but it is not difficult
+to think of real-world situations in which a similar type of database might
+be used. For example, a database like this could be used by a farmer to keep
+track of livestock, or by a veterinarian to keep track of patient records.
+A menagerie distribution containing some of the queries and sample data used
+in the following sections can be obtained from the @strong{MySQL} Web site.
+It's available in either
+@uref{http://www.mysql.com/Downloads/Contrib/Examples/menagerie.tar.gz,compressed @code{tar} format}
+or
+@uref{http://www.mysql.com/Downloads/Contrib/Examples/menagerie.zip,Zip format}.
-Our binary for Mac OS X is compiled on Rhapsody 5.5 with the following
-configure line:
+Use the @code{SHOW} statement to find out what databases currently exist
+on the server:
@example
-CC=gcc CFLAGS="-O2 -fomit-frame-pointer" CXX=gcc CXXFLAGS="-O2 -fomit-frame-pointer" ./configure --prefix=/usr/local/mysql "--with-comment=Official MySQL binary" --with-extra-charsets=complex --disable-shared
+mysql> SHOW DATABASES;
++----------+
+| Database |
++----------+
+| mysql |
+| test |
+| tmp |
++----------+
@end example
-You might want to also add aliases to your shell's resource file to
-access @code{mysql} and @code{mysqladmin} from the command line:
+The list of databases is probably different on your machine, but the
+@code{mysql} and @code{test} databases are likely to be among them. The
+@code{mysql} database is required because it describes user access
+privileges. The @code{test} database is often provided as a workspace for
+users to try things out.
+
+If the @code{test} database exists, try to access it:
@example
-alias mysql '/usr/local/mysql/bin/mysql'
-alias mysqladmin '/usr/local/mysql/bin/mysqladmin'
+mysql> USE test
+Database changed
@end example
-@node BEOS, , Mac OS X, Source install system issues
-@subsection BeOS Notes
-
-We are really interested in getting @strong{MySQL} to work on BeOS, but
-unfortunately we don't have any person who knows BeOS or has time to do
-a port.
+Note that @code{USE}, like @code{QUIT}, does not require a semicolon. (You
+can terminate such statements with a semicolon if you like; it does no harm.)
+The @code{USE} statement is special in another way, too: it must be given on
+a single line.
-We are interested in finding someone to do a port, and we will help them
-with any technical questions they may have while doing the port.
+You can use the @code{test} database (if you have access to it) for the
+examples that follow, but anything you create in that database can be
+removed by anyone else with access to it. For this reason, you should
+probably ask your @strong{MySQL} administrator for permission to use a
+database of your own. Suppose you want to call yours @code{menagerie}. The
+administrator needs to execute a command like this:
-We have previously talked with some BeOS developers that have said that
-@strong{MySQL} is 80% ported to BeOS, but we haven't heard from them
-in a while.
+@example
+mysql> GRANT ALL ON menagerie.* TO your_mysql_name;
+@end example
-@node Windows, OS/2, Source install system issues, Installing
-@section Windows Notes
+where @code{your_mysql_name} is the @strong{MySQL} user name assigned to
+you.
-This section describes installation and use of @strong{MySQL} on Windows.
-This information is also provided in the @file{README} file that comes
-with the @strong{MySQL} Windows distribution.
+@node Creating database, Creating tables, Database use, Database use
+@subsection Creating and Selecting a Database
-@menu
-* Windows installation:: Installing @strong{MySQL} on Windows
-* Win95 start:: Starting @strong{MySQL} on Win95 / Win98
-* NT start:: Starting @strong{MySQL} on NT / Win2000
-* Windows running:: Running @strong{MySQL} on Windows
-* Windows and SSH:: Connecting to a remote @strong{MySQL} from Windows with SSH
-* Windows symbolic links:: Splitting data across different disks under Win32
-* Windows compiling:: Compiling MySQL clients on Windows.
-* Windows vs Unix:: @strong{MySQL}-Windows compared to Unix @strong{MySQL}
-@end menu
+@cindex selecting, databases
+@cindex databases, selecting
-@node Windows installation, Win95 start, Windows, Windows
-@subsection Installing MySQL on Windows
+If the administrator creates your database for you when setting up your
+permissions, you can begin using it. Otherwise, you need to create it
+yourself:
-The following instructions apply to precompiled binary distributions.
-If you download a source distribution, you will have to compile and install
-it yourself.
+@example
+mysql> CREATE DATABASE menagerie;
+@end example
-If you don't have a copy of the @strong{MySQL} distribution, you should
-first download one from @uref{http://www.mysql.com/downloads/mysql-3.23.html}.
+Under Unix, database names are case sensitive (unlike SQL keywords), so you
+must always refer to your database as @code{menagerie}, not as
+@code{Menagerie}, @code{MENAGERIE}, or some other variant. This is also true
+for table names. (Under Windows, this restriction does not apply, although
+you must refer to databases and tables using the same lettercase throughout a
+given query.)
-If you plan to connect to @strong{MySQL} from some other program, you will
-probably also need the @strong{MyODBC} driver. You can find this at the
-@strong{MyODBC} download page
-(@uref{http://www.mysql.com/downloads/api-myodbc.html}).
+Creating a database does not select it for use; you must do that explicitly.
+To make @code{menagerie} the current database, use this command:
-To install either distribution, unzip it in some empty directory and run the
-@code{Setup.exe} program.
+@example
+mysql> USE menagerie
+Database changed
+@end example
-By default, @strong{MySQL}-Windows is configured to be installed in
-@file{C:\mysql}. If you want to install @strong{MySQL} elsewhere,
-install it in @file{C:\mysql} first, then move the installation to
-where you want it. If you do move @strong{MySQL}, you must indicate
-where everything is located by supplying a @code{--basedir} option when
-you start the server. For example, if you have moved the @strong{MySQL}
-distribution to @file{D:\programs\mysql}, you must start @code{mysqld}
-like this:
+Your database needs to be created only once, but you must select it for use
+each time you begin a @code{mysql} session. You can do this by issuing a
+@code{USE} statement as shown above. Alternatively, you can select the
+database on the command line when you invoke @code{mysql}. Just specify its
+name after any connection parameters that you might need to provide. For
+example:
@example
-C:\> D:\programs\mysql\bin\mysqld --basedir D:\programs\mysql
+shell> mysql -h host -u user -p menagerie
+Enter password: ********
@end example
-Use @code{mysqld --help} to display all the options that @code{mysqld}
-understands!
+Note that @code{menagerie} is not your password on the command just shown.
+If you want to supply your password on the command line after the @code{-p}
+option, you must do so with no intervening space (for example, as
+@code{-pmypassword}, not as @code{-p mypassword}). However, putting your
+password on the command line is not recommended, because doing so exposes it
+to snooping by other users logged in on your machine.
-With all newer @strong{MySQL} versions, you can also create a
-@file{C:\my.cnf} file that holds any default options for the
-@strong{MySQL} server. Copy the file @file{\mysql\my-xxxxx.cnf} to
-@file{C:\my.cnf} and edit it to suit your setup. Note that you should
-specify all paths with @samp{/} instead of @samp{\}. If you use
-@samp{\}, you need to specify it twice, because @samp{\} is the escape
-character in @strong{MySQL}. @xref{Option files}.
+@node Creating tables, Loading tables, Creating database, Database use
+@subsection Creating a Table
-Starting with @strong{MySQL} 3.23.38, the Windows distribution includes
-both the normal and the @strong{MySQL-Max} binaries. The main benefit
-of using the normal @code{mysqld.exe} binary is that it's a little
-faster and uses less resources.
+@cindex tables, creating
+@cindex creating, tables
-Here is a list of the different @strong{MySQL} servers you can use:
+Creating the database is the easy part, but at this point it's empty, as
+@code{SHOW TABLES} will tell you:
-@multitable @columnfractions .25 .75
-@item @code{mysqld} @tab
-Compiled with full debugging and automatic memory allocation checking,
-symbolic links, BDB and InnoDB tables.
-@item @code{mysqld-opt} @tab
-Optimized binary with no support for transactional tables.
-@item @code{mysqld-nt} @tab
-Optimized binary for NT with support for named pipes. You can run this
-version on Win98, but in this case no named pipes are created and you must
-have TCP/IP installed.
-@item @code{mysqld-max} @tab
-Optimized binary with support for symbolic links, BDB and InnoDB tables.
-@item @code{mysqld-max-nt} @tab
-Like @code{mysqld-max}, but compiled with support for named pipes.
-@end multitable
+@example
+mysql> SHOW TABLES;
+Empty set (0.00 sec)
+@end example
-All of the above binaries are optimized for the Pentium Pro processor but
-should work on any Intel processor >= i386.
+The harder part is deciding what the structure of your database should be:
+what tables you will need and what columns will be in each of them.
-NOTE: If you want to use InnoDB tables, there are certain startup
-options that must be specified in your @file{my.ini} file! @xref{InnoDB start}.
+You'll want a table that contains a record for each of your pets. This can
+be called the @code{pet} table, and it should contain, as a bare minimum,
+each animal's name. Because the name by itself is not very interesting, the
+table should contain other information. For example, if more than one person
+in your family keeps pets, you might want to list each animal's owner. You
+might also want to record some basic descriptive information such as species
+and sex.
-@node Win95 start, NT start, Windows installation, Windows
-@subsection Starting MySQL on Windows 95 or Windows 98
+How about age? That might be of interest, but it's not a good thing to store
+in a database. Age changes as time passes, which means you'd have to update
+your records often. Instead, it's better to store a fixed value such as
+date of birth. Then, whenever you need age, you can calculate it as the
+difference between the current date and the birth date. @strong{MySQL}
+provides functions for doing date arithmetic, so this is not difficult.
+Storing birth date rather than age has other advantages, too:
-@strong{MySQL} uses TCP/IP to connect a client to a server. (This will
-allow any machine on your network to connect to your @strong{MySQL}
-server.) Because of this, you must install TCP/IP on your machine before
-starting @strong{MySQL}. You can find TCP/IP on your Windows CD-ROM.
+@itemize @bullet
+@item
+You can use the database for tasks such as generating reminders for upcoming
+pet birthdays. (If you think this type of query is somewhat silly, note that
+it is the same question you might ask in the context of a business database
+to identify clients to whom you'll soon need to send out birthday greetings,
+for that computer-assisted personal touch.)
-Note that if you are using an old Win95 release (for example OSR2), it's
-likely that you have an old Winsock package! @strong{MySQL} requires
-Winsock 2! You can get the newest Winsock from
-@uref{http://www.microsoft.com/}. Win98 has the new Winsock 2 library, so
-the above doesn't apply for Win98.
+@item
+You can calculate age in relation to dates other than the current date. For
+example, if you store death date in the database, you can easily calculate
+how old a pet was when it died.
+@end itemize
-To start the @code{mysqld} server, you should start an MS-DOS window and type:
+You can probably think of other types of information that would be useful in
+the @code{pet} table, but the ones identified so far are sufficient for now:
+name, owner, species, sex, birth, and death.
+
+Use a @code{CREATE TABLE} statement to specify the layout of your table:
@example
-C:\> C:\mysql\bin\mysqld
+mysql> CREATE TABLE pet (name VARCHAR(20), owner VARCHAR(20),
+ -> species VARCHAR(20), sex CHAR(1), birth DATE, death DATE);
@end example
-This will start @code{mysqld} in the background without a window.
+@code{VARCHAR} is a good choice for the @code{name}, @code{owner}, and
+@code{species} columns because the column values will vary in length. The
+lengths of those columns need not all be the same, and need not be
+@code{20}. You can pick any length from @code{1} to @code{255}, whatever
+seems most reasonable to you. (If you make a poor choice and it turns
+out later that you need a longer field, @strong{MySQL} provides an
+@code{ALTER TABLE} statement.)
-You can kill the @strong{MySQL} server by executing:
+Animal sex can be represented in a variety of ways, for example, @code{"m"}
+and @code{"f"}, or perhaps @code{"male"} and @code{"female"}. It's simplest
+to use the single characters @code{"m"} and @code{"f"}.
+
+The use of the @code{DATE} data type for the @code{birth} and @code{death}
+columns is a fairly obvious choice.
+
+Now that you have created a table, @code{SHOW TABLES} should produce some
+output:
@example
-C:\> C:\mysql\bin\mysqladmin -u root shutdown
+mysql> SHOW TABLES;
++---------------------+
+| Tables in menagerie |
++---------------------+
+| pet |
++---------------------+
@end example
-Note that Win95 and Win98 don't support creation of named pipes.
-On Win95 and Win98, you can only use named pipes to connect to a
-remote @strong{MySQL} server running on a Windows NT server host.
-(The @strong{MySQL} server must also support named pipes, of
-course. For example, using @code{mysqld-opt} under NT will not allow
-named pipe connections. You should use either @code{mysqld-nt} or
-@code{mysqld-max-nt}.)
+To verify that your table was created the way you expected, use
+a @code{DESCRIBE} statement:
-If @code{mysqld} doesn't start, please check the
-@file{\mysql\data\mysql.err} file to see if the server wrote any message
-there to indicate the cause of the problem. You can also try to start
-the server with @code{mysqld --standalone}; In this case, you may get
-some useful information on the screen that may help solve the problem.
+@example
+mysql> DESCRIBE pet;
++---------+-------------+------+-----+---------+-------+
+| Field | Type | Null | Key | Default | Extra |
++---------+-------------+------+-----+---------+-------+
+| name | varchar(20) | YES | | NULL | |
+| owner | varchar(20) | YES | | NULL | |
+| species | varchar(20) | YES | | NULL | |
+| sex | char(1) | YES | | NULL | |
+| birth | date | YES | | NULL | |
+| death | date | YES | | NULL | |
++---------+-------------+------+-----+---------+-------+
+@end example
-The last option is to start @code{mysqld} with @code{--standalone
---debug}. In this case @code{mysqld} will write a log file
-@file{C:\mysqld.trace} that should contain the reason why @code{mysqld}
-doesn't start. @xref{Making trace files}.
+You can use @code{DESCRIBE} any time, for example, if you forget the names of
+the columns in your table or what types they are.
-@node NT start, Windows running, Win95 start, Windows
-@subsection Starting MySQL on Windows NT or Windows 2000
+@node Loading tables, Retrieving data, Creating tables, Database use
+@subsection Loading Data into a Table
-The Win95/Win98 section also applies to @strong{MySQL} on NT/Win2000, with
-the following differences:
+@cindex loading, tables
+@cindex tables, loading data
+@cindex data, loading into tables
-To get @strong{MySQL} to work with TCP/IP on NT, you must install
-service pack 3 (or newer)!
+After creating your table, you need to populate it. The @code{LOAD DATA} and
+@code{INSERT} statements are useful for this.
-Note that everything in the following that applies for NT also applies
-for Win2000!
+Suppose your pet records can be described as shown below.
+(Observe that @strong{MySQL} expects dates in @code{YYYY-MM-DD} format;
+this may be different than what you are used to.)
-For NT/Win2000, the server name is @code{mysqld-nt}. Normally you
-should install @strong{MySQL} as a service on NT/Win2000:
+@multitable @columnfractions .16 .16 .16 .16 .16 .16
+@item @strong{name} @tab @strong{owner} @tab @strong{species} @tab @strong{sex} @tab @strong{birth} @tab @strong{death}
+@item Fluffy @tab Harold @tab cat @tab f @tab 1993-02-04 @tab
+@item Claws @tab Gwen @tab cat @tab m @tab 1994-03-17 @tab
+@item Buffy @tab Harold @tab dog @tab f @tab 1989-05-13 @tab
+@item Fang @tab Benny @tab dog @tab m @tab 1990-08-27 @tab
+@item Bowser @tab Diane @tab dog @tab m @tab 1989-08-31 @tab 1995-07-29
+@item Chirpy @tab Gwen @tab bird @tab f @tab 1998-09-11 @tab
+@item Whistler @tab Gwen @tab bird @tab @tab 1997-12-09 @tab
+@item Slim @tab Benny @tab snake @tab m @tab 1996-04-29 @tab
+@end multitable
-@example
-C:\> C:\mysql\bin\mysqld-nt --install
-@end example
+Because you are beginning with an empty table, an easy way to populate it is to
+create a text file containing a row for each of your animals, then load the
+contents of the file into the table with a single statement.
-or
+You could create a text file @file{pet.txt} containing one record per line,
+with values separated by tabs, and given in the order in which the columns
+were listed in the @code{CREATE TABLE} statement. For missing values (such
+as unknown sexes or death dates for animals that are still living), you can
+use @code{NULL} values. To represent these in your text file, use
+@code{\N}. For example, the record for Whistler the bird would look like
+this (where the whitespace between values is a single tab character):
+
+@multitable @columnfractions .15 .15 .15 .15 .25 .15
+@item @code{Whistler} @tab @code{Gwen} @tab @code{bird} @tab @code{\N} @tab @code{1997-12-09} @tab @code{\N}
+@end multitable
+
+To load the text file @file{pet.txt} into the @code{pet} table, use this
+command:
@example
-C:\> C:\mysql\bin\mysqld-max-nt --install
+mysql> LOAD DATA LOCAL INFILE "pet.txt" INTO TABLE pet;
@end example
-(Under Windows NT, you can actually install any of the server binaries
-as a service, but only those having names that end with @code{-nt.exe}
-provide support for named pipes.)
+You can specify the column value separator and end of line marker explicitly
+in the @code{LOAD DATA} statement if you wish, but the defaults are tab and
+linefeed. These are sufficient for the statement to read the file
+@file{pet.txt} properly.
-You can start and stop the @strong{MySQL} service with these commands:
+When you want to add new records one at a time, the @code{INSERT} statement
+is useful. In its simplest form, you supply values for each column, in the
+order in which the columns were listed in the @code{CREATE TABLE} statement.
+Suppose Diane gets a new hamster named Puffball. You could add a new record
+using an @code{INSERT} statement like this:
@example
-C:\> NET START mysql
-C:\> NET STOP mysql
+mysql> INSERT INTO pet
+ -> VALUES ('Puffball','Diane','hamster','f','1999-03-30',NULL);
@end example
-Note that in this case you can't use any other options for @code{mysqld-nt}!
+Note that string and date values are specified as quoted strings here. Also,
+with @code{INSERT}, you can insert @code{NULL} directly to represent a
+missing value. You do not use @code{\N} like you do with @code{LOAD DATA}.
-You can also run @code{mysqld-nt} as a stand-alone program on NT if you need
-to start @code{mysqld-nt} with any options! If you start @code{mysqld-nt}
-without options on NT, @code{mysqld-nt} tries to start itself as a service
-with the default service options. If you have stopped @code{mysqld-nt}, you
-have to start it with @code{NET START mysql}.
+From this example, you should be able to see that there would be a lot more
+typing involved to load
+your records initially using several @code{INSERT} statements rather
+than a single @code{LOAD DATA} statement.
-The service is installed with the name @code{MySQL}. Once installed, it must
-be started using the Services Control Manager (SCM) Utility found in the
-Control Panel, or by using the @code{NET START MySQL} command. If any options
-are desired, they must be specified as ``Startup parameters'' in the SCM utility
-before you start the @strong{MySQL} service. Once running, @code{mysqld-nt}
-can be stopped using @code{mysqladmin}, or from the SCM utility or by using
-the command @code{NET STOP MySQL}. If you use SCM to stop @code{mysqld-nt},
-there is a strange message from SCM about @code{mysqld shutdown normally}.
-When run as a service, @code{mysqld-nt} has no access to a console and so no
-messages can be seen.
+@node Retrieving data, , Loading tables, Database use
+@subsection Retrieving Information from a Table
-On NT you can get the following service error messages:
+@cindex data, retrieving
+@cindex tables, retrieving data
+@cindex retrieving, data from tables
+@cindex unloading, tables
-@multitable @columnfractions .3 .7
-@item Permission Denied @tab Means that it cannot find @code{mysqld-nt.exe}.
-@item Cannot Register @tab Means that the path is incorrect.
-@item Failed to install service. @tab Means that the service is already installed or that the Service Control Manager is in bad state.
-@end multitable
+@menu
+* Selecting all:: Selecting all data
+* Selecting rows:: Selecting particular rows
+* Selecting columns:: Selecting particular columns
+* Sorting rows:: Sorting rows
+* Date calculations:: Date calculations
+* Working with NULL:: Working with @code{NULL} values
+* Pattern matching:: Pattern matching
+* Counting rows:: Counting rows
+* Multiple tables::
+@end menu
-If you have problems installing @code{mysqld-nt} as a service, try starting
-it with the full path:
+The @code{SELECT} statement is used to pull information from a table.
+The general form of the statement is:
@example
-C:\> C:\mysql\bin\mysqld-nt --install
+SELECT what_to_select
+FROM which_table
+WHERE conditions_to_satisfy
@end example
-If this doesn't work, you can get @code{mysqld-nt} to start properly by fixing
-the path in the registry!
+@code{what_to_select} indicates what you want to see. This can be a list of
+columns, or @code{*} to indicate ``all columns.'' @code{which_table}
+indicates the table from which you want to retrieve data. The @code{WHERE}
+clause is optional. If it's present, @code{conditions_to_satisfy} specifies
+conditions that rows must satisfy to qualify for retrieval.
-If you don't want to start @code{mysqld-nt} as a service, you can start it as
-follows:
+@node Selecting all, Selecting rows, Retrieving data, Retrieving data
+@subsubsection Selecting All Data
+
+The simplest form of @code{SELECT} retrieves everything from a table:
@example
-C:\> C:\mysql\bin\mysqld-nt --standalone
+mysql> SELECT * FROM pet;
++----------+--------+---------+------+------------+------------+
+| name | owner | species | sex | birth | death |
++----------+--------+---------+------+------------+------------+
+| Fluffy | Harold | cat | f | 1993-02-04 | NULL |
+| Claws | Gwen | cat | m | 1994-03-17 | NULL |
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
+| Fang | Benny | dog | m | 1990-08-27 | NULL |
+| Bowser | Diane | dog | m | 1998-08-31 | 1995-07-29 |
+| Chirpy | Gwen | bird | f | 1998-09-11 | NULL |
+| Whistler | Gwen | bird | NULL | 1997-12-09 | NULL |
+| Slim | Benny | snake | m | 1996-04-29 | NULL |
+| Puffball | Diane | hamster | f | 1999-03-30 | NULL |
++----------+--------+---------+------+------------+------------+
@end example
-or
+This form of @code{SELECT} is useful if you want to review your entire table,
+for instance, after you've just loaded it with your initial dataset. As it
+happens, the output just shown reveals an error in your data file: Bowser
+appears to have been born after he died! Consulting your original pedigree
+papers, you find that the correct birth year is 1989, not 1998.
+
+There are are least a couple of ways to fix this:
+
+@itemize @bullet
+@item
+Edit the file @file{pet.txt} to correct the error, then empty the table
+and reload it using @code{DELETE} and @code{LOAD DATA}:
@example
-C:\> C:\mysql\bin\mysqld --standalone --debug
+mysql> SET AUTOCOMMIT=1; # Used for quick re-create of the table
+mysql> DELETE FROM pet;
+mysql> LOAD DATA LOCAL INFILE "pet.txt" INTO TABLE pet;
@end example
-The last version gives you a debug trace in @file{C:\mysqld.trace}.
-@xref{Making trace files}.
+However, if you do this, you must also re-enter the record for Puffball.
-@node Windows running, Windows and SSH, NT start, Windows
-@subsection Running MySQL on Windows
+@item
+Fix only the erroneous record with an @code{UPDATE} statement:
-@cindex TCP/IP
-@cindex named pipes
+@example
+mysql> UPDATE pet SET birth = "1989-08-31" WHERE name = "Bowser";
+@end example
+@end itemize
-@strong{MySQL} supports TCP/IP on all Windows platforms and named pipes on NT.
-The default is to use named pipes for local connections on NT and TCP/IP for
-all other cases if the client has TCP/IP installed. The host name specifies
-which protocol is used:
+As shown above, it is easy to retrieve an entire table. But typically you
+don't want to do that, particularly when the table becomes large. Instead,
+you're usually more interested in answering a particular question, in which
+case you specify some constraints on the information you want. Let's look at
+some selection queries in terms of questions about your pets that they
+answer.
-@multitable @columnfractions .3 .7
-@strong{Host name} @tab @strong{Protocol}
-@item NULL (none) @tab On NT, try named pipes first; if that doesn't work, use TCP/IP. On Win95/Win98, TCP/IP is used.
-@item . @tab Named pipes
-@item localhost @tab TCP/IP to current host
-@item hostname @tab TCP/IP
-@end multitable
+@node Selecting rows, Selecting columns, Selecting all, Retrieving data
+@subsubsection Selecting Particular Rows
-You can force a @strong{MySQL} client to use named pipes by specifying the
-@code{--pipe} option or by specifying @code{.} as the host name. Use the
-@code{--socket} option to specify the name of the pipe.
+@cindex rows, selecting
+@cindex tables, selecting rows
-You can test whether or not @strong{MySQL} is working by executing the
-following commands:
+You can select only particular rows from your table. For example, if you want
+to verify the change that you made to Bowser's birth date, select Bowser's
+record like this:
@example
-C:\> C:\mysql\bin\mysqlshow
-C:\> C:\mysql\bin\mysqlshow -u root mysql
-C:\> C:\mysql\bin\mysqladmin version status proc
-C:\> C:\mysql\bin\mysql test
+mysql> SELECT * FROM pet WHERE name = "Bowser";
++--------+-------+---------+------+------------+------------+
+| name | owner | species | sex | birth | death |
++--------+-------+---------+------+------------+------------+
+| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
++--------+-------+---------+------+------------+------------+
@end example
-If @code{mysqld} is slow to answer to connections on Win95/Win98, there is
-probably a problem with your DNS. In this case, start @code{mysqld} with
-@code{--skip-name-resolve} and use only @code{localhost} and IP numbers in
-the @strong{MySQL} grant tables. You can also avoid DNS when connecting to a
-@code{mysqld-nt} @strong{MySQL} server running on NT by using the
-@code{--pipe} argument to specify use of named pipes. This works for most
-@strong{MySQL} clients.
+The output confirms that the year is correctly recorded now as 1989, not 1998.
-There are two versions of the @strong{MySQL} command-line tool:
-@multitable @columnfractions .25 .75
-@item @code{mysql} @tab Compiled on native Windows, which offers very limited text editing capabilities.
-@item @code{mysqlc} @tab Compiled with the Cygnus GNU compiler and libraries, which offers @code{readline} editing.
-@end multitable
+String comparisons are normally case insensitive, so you can specify the
+name as @code{"bowser"}, @code{"BOWSER"}, etc. The query result will be
+the same.
-If you want to use @code{mysqlc.exe}, you must copy
-@file{C:\mysql\lib\cygwinb19.dll} to your Windows system directory
-(@file{\windows\system} or similar place).
+You can specify conditions on any column, not just @code{name}. For example,
+if you want to know which animals were born after 1998, test the @code{birth}
+column:
-The default privileges on Windows give all local users full privileges
-to all databases without specifying a password. To make @strong{MySQL}
-more secure, you should set a password for all users and remove the row in
-the @code{mysql.user} table that has @code{Host='localhost'} and
-@code{User=''}.
+@example
+mysql> SELECT * FROM pet WHERE birth >= "1998-1-1";
++----------+-------+---------+------+------------+-------+
+| name | owner | species | sex | birth | death |
++----------+-------+---------+------+------------+-------+
+| Chirpy | Gwen | bird | f | 1998-09-11 | NULL |
+| Puffball | Diane | hamster | f | 1999-03-30 | NULL |
++----------+-------+---------+------+------------+-------+
+@end example
-You should also add a password for the @code{root} user. The following
-example starts by removing the anonymous user that can be used by anyone
-to access the @code{test} database, then sets a @code{root} user password:
+You can combine conditions, for example, to locate female dogs:
@example
-C:\> C:\mysql\bin\mysql mysql
-mysql> DELETE FROM user WHERE Host='localhost' AND User='';
-mysql> QUIT
-C:\> C:\mysql\bin\mysqladmin reload
-C:\> C:\mysql\bin\mysqladmin -u root password your_password
+mysql> SELECT * FROM pet WHERE species = "dog" AND sex = "f";
++-------+--------+---------+------+------------+-------+
+| name | owner | species | sex | birth | death |
++-------+--------+---------+------+------------+-------+
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
++-------+--------+---------+------+------------+-------+
@end example
-After you've set the password, if you want to take down the @code{mysqld}
-server, you can do so using this command:
+The preceding query uses the @code{AND} logical operator. There is also an
+@code{OR} operator:
@example
-C:\> mysqladmin --user=root --password=your_password shutdown
+mysql> SELECT * FROM pet WHERE species = "snake" OR species = "bird";
++----------+-------+---------+------+------------+-------+
+| name | owner | species | sex | birth | death |
++----------+-------+---------+------+------------+-------+
+| Chirpy | Gwen | bird | f | 1998-09-11 | NULL |
+| Whistler | Gwen | bird | NULL | 1997-12-09 | NULL |
+| Slim | Benny | snake | m | 1996-04-29 | NULL |
++----------+-------+---------+------+------------+-------+
@end example
-If you are using the old shareware version of @strong{MySQL} Version
-3.21 under Windows, the above command will fail with an error:
-@code{parse error near 'SET OPTION password'}. The fix is in to upgrade
-to the current @strong{MySQL} version, which is freely available.
-
-With the current @strong{MySQL} versions you can easily add new users
-and change privileges with @code{GRANT} and @code{REVOKE} commands.
-@xref{GRANT}.
+@code{AND} and @code{OR} may be intermixed. If you do that, it's a good idea
+to use parentheses to indicate how conditions should be grouped:
-@c FIX this is ugly, real ugly.
-@cindex SSH
-@cindex connecting, remotely with SSH
-@node Windows and SSH, Windows symbolic links, Windows running, Windows
-@subsection Connecting to a Remote MySQL from Windows with SSH
+@example
+mysql> SELECT * FROM pet WHERE (species = "cat" AND sex = "m")
+ -> OR (species = "dog" AND sex = "f");
++-------+--------+---------+------+------------+-------+
+| name | owner | species | sex | birth | death |
++-------+--------+---------+------+------------+-------+
+| Claws | Gwen | cat | m | 1994-03-17 | NULL |
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
++-------+--------+---------+------+------------+-------+
+@end example
-Here is a note about how to connect to get a secure connection to remote
-@strong{MySQL} server with SSH (by David Carlson @email{dcarlson@@mplcomm.com}):
+@node Selecting columns, Sorting rows, Selecting rows, Retrieving data
+@subsubsection Selecting Particular Columns
-@itemize @bullet
-@item
-Install an SSH client on your Windows machine. As a user, the best non-free
-one I've found is from @code{SecureCRT} from @uref{http://www.vandyke.com/}.
-Another option is @code{f-secure} from @uref{http://www.f-secure.com/}. You
-can also find some free ones on @strong{Google} at
-@uref{http://directory.google.com/Top/Computers/Security/Products_and_Tools/Cryptography/SSH/Clients/Windows/}.
+@cindex columns, selecting
+@cindex tables, selecting columns
-@item
-Start your Windows SSH client.
-Set @code{Host_Name = yourmysqlserver_URL_or_IP}.
-Set @code{userid=your_userid} to log in to your server (probably not the same
-as your @strong{MySQL} login/password.
+If you don't want to see entire rows from your table, just name the columns
+in which you're interested, separated by commas. For example, if you want to
+know when your animals were born, select the @code{name} and @code{birth}
+columns:
-@item
-Set up port forwarding. Either do a remote forward (Set @code{local_port: 3306}, @code{remote_host: yourmysqlservername_or_ip}, @code{remote_port: 3306} )
-or a local forward (Set @code{port: 3306}, @code{host: localhost}, @code{remote port: 3306}).
+@example
+mysql> SELECT name, birth FROM pet;
++----------+------------+
+| name | birth |
++----------+------------+
+| Fluffy | 1993-02-04 |
+| Claws | 1994-03-17 |
+| Buffy | 1989-05-13 |
+| Fang | 1990-08-27 |
+| Bowser | 1989-08-31 |
+| Chirpy | 1998-09-11 |
+| Whistler | 1997-12-09 |
+| Slim | 1996-04-29 |
+| Puffball | 1999-03-30 |
++----------+------------+
+@end example
-@item
-Save everything, otherwise you'll have to redo it the next time.
+To find out who owns pets, use this query:
-@item
-Log in to your server with SSH session you just created.
+@example
+mysql> SELECT owner FROM pet;
++--------+
+| owner |
++--------+
+| Harold |
+| Gwen |
+| Harold |
+| Benny |
+| Diane |
+| Gwen |
+| Gwen |
+| Benny |
+| Diane |
++--------+
+@end example
-@item
-On your Windows machine, start some ODBC application (such as Access).
+@findex DISTINCT
+However, notice that the query simply retrieves the @code{owner} field from
+each record, and some of them appear more than once. To minimize the output,
+retrieve each unique output record just once by adding the keyword
+@code{DISTINCT}:
-@item
-Create a new file in Windows and link to @strong{MySQL} using the ODBC
-driver the same way you normally do, EXCEPT type in @code{localhost}
-for the @strong{MySQL} host server --- not @code{yourmysqlservername}.
-@end itemize
+@example
+mysql> SELECT DISTINCT owner FROM pet;
++--------+
+| owner |
++--------+
+| Benny |
+| Diane |
+| Gwen |
+| Harold |
++--------+
+@end example
-You should now have an ODBC connection to @strong{MySQL}, encrypted using SSH.
+You can use a @code{WHERE} clause to combine row selection with column
+selection. For example, to get birth dates for dogs and cats only,
+use this query:
-@cindex symbolic links
-@cindex using multiple disks to start data
-@cindex disks, splitting data across
-@node Windows symbolic links, Windows compiling, Windows and SSH, Windows
-@subsection Splitting Data Across Different Disks on Windows
+@example
+mysql> SELECT name, species, birth FROM pet
+ -> WHERE species = "dog" OR species = "cat";
++--------+---------+------------+
+| name | species | birth |
++--------+---------+------------+
+| Fluffy | cat | 1993-02-04 |
+| Claws | cat | 1994-03-17 |
+| Buffy | dog | 1989-05-13 |
+| Fang | dog | 1990-08-27 |
+| Bowser | dog | 1989-08-31 |
++--------+---------+------------+
+@end example
-Beginning with @strong{MySQL} Version 3.23.16, the @code{mysqld-max}
-and @code{mysql-max-nt} servers in the @strong{MySQL} distribution are
-compiled with the @code{-DUSE_SYMDIR} option. This allows you to put a
-database on different disk by adding a symbolic link to it
-(in a manner similar to the way that symbolic links work on Unix).
+@node Sorting rows, Date calculations, Selecting columns, Retrieving data
+@subsubsection Sorting Rows
-On Windows, you make a symbolic link to a database by creating a file
-that contains the path to the destination directory and saving this in
-the @file{mysql_data} directory under the filename @file{database.sym}.
-Note that the symbolic link will be used only if the directory
-@file{mysql_data_dir\database} doesn't exist.
+@cindex rows, sorting
+@cindex sorting, table rows
+@cindex sorting, data
+@cindex tables, sorting rows
-For example, if the @strong{MySQL} data directory is @file{C:\mysql\data}
-and you want to have database @code{foo} located at @file{D:\data\foo}, you
-should create the file @file{C:\mysql\data\foo.sym} that contains the
-text @code{D:\data\foo\}. After that, all tables created in the database
-@code{foo} will be created in @file{D:\data\foo}.
+You may have noticed in the preceding examples that the result rows are
+displayed in no particular order. However, it's often easier to examine
+query output when the rows are sorted in some meaningful way. To sort a
+result, use an @code{ORDER BY} clause.
-Note that because of the speed penalty you get when opening every table,
-we have not enabled this by default even if you have compiled
-@strong{MySQL} with support for this. To enable symlinks you should put
-in your @code{my.cnf} or @code{my.ini} file the following entry:
+Here are animal birthdays, sorted by date:
@example
-[mysqld]
-use-symbolic-links
+mysql> SELECT name, birth FROM pet ORDER BY birth;
++----------+------------+
+| name | birth |
++----------+------------+
+| Buffy | 1989-05-13 |
+| Bowser | 1989-08-31 |
+| Fang | 1990-08-27 |
+| Fluffy | 1993-02-04 |
+| Claws | 1994-03-17 |
+| Slim | 1996-04-29 |
+| Whistler | 1997-12-09 |
+| Chirpy | 1998-09-11 |
+| Puffball | 1999-03-30 |
++----------+------------+
@end example
-In @strong{MySQL} 4.0 we will enable symlinks by default. Then you
-should instead use the @code{skip-symlink} option if you want to
-disable this.
+To sort in reverse order, add the @code{DESC} (descending) keyword to the
+name of the column you are sorting by:
-@cindex compiling, on Windows
-@cindex Windows, compiling on
-@node Windows compiling, Windows vs Unix, Windows symbolic links, Windows
-@subsection Compiling MySQL Clients on Windows
+@example
+mysql> SELECT name, birth FROM pet ORDER BY birth DESC;
++----------+------------+
+| name | birth |
++----------+------------+
+| Puffball | 1999-03-30 |
+| Chirpy | 1998-09-11 |
+| Whistler | 1997-12-09 |
+| Slim | 1996-04-29 |
+| Claws | 1994-03-17 |
+| Fluffy | 1993-02-04 |
+| Fang | 1990-08-27 |
+| Bowser | 1989-08-31 |
+| Buffy | 1989-05-13 |
++----------+------------+
+@end example
-In your source files, you should include @file{windows.h} before you include
-@file{mysql.h}:
+You can sort on multiple columns. For example, to sort by type of
+animal, then by birth date within animal type with youngest animals first,
+use the following query:
@example
-#if defined(_WIN32) || defined(_WIN64)
-#include <windows.h>
-#endif
-#include <mysql.h>
+mysql> SELECT name, species, birth FROM pet ORDER BY species, birth DESC;
++----------+---------+------------+
+| name | species | birth |
++----------+---------+------------+
+| Chirpy | bird | 1998-09-11 |
+| Whistler | bird | 1997-12-09 |
+| Claws | cat | 1994-03-17 |
+| Fluffy | cat | 1993-02-04 |
+| Fang | dog | 1990-08-27 |
+| Bowser | dog | 1989-08-31 |
+| Buffy | dog | 1989-05-13 |
+| Puffball | hamster | 1999-03-30 |
+| Slim | snake | 1996-04-29 |
++----------+---------+------------+
@end example
-You can either link your code with the dynamic @file{libmysql.lib} library,
-which is just a wrapper to load in @file{libmysql.dll} on demand, or link
-with the static @file{mysqlclient.lib} library.
+Note that the @code{DESC} keyword applies only to the column name immediately
+preceding it (@code{birth}); @code{species} values are still sorted in
+ascending order.
-Note that as the mysqlclient libraries are compiled as threaded libraries,
-you should also compile your code to be multi-threaded!
+@node Date calculations, Working with NULL, Sorting rows, Retrieving data
+@subsubsection Date Calculations
-@cindex Windows, versus Unix
-@cindex operating systems, Windows versus Unix
-@node Windows vs Unix, , Windows compiling, Windows
-@subsection MySQL-Windows Compared to Unix MySQL
+@cindex date calculations
+@cindex calculating, dates
+@cindex extracting, dates
+@cindex age, calculating
-@strong{MySQL}-Windows has by now proven itself to be very stable. This version
-of @strong{MySQL} has the same features as the corresponding Unix version
-with the following exceptions:
+@strong{MySQL} provides several functions that you can use to perform
+calculations on dates, for example, to calculate ages or extract
+parts of dates.
-@table @strong
-@item Win95 and threads
-Win95 leaks about 200 bytes of main memory for each thread creation.
-Each connection in @strong{MySQL} creates a new thread, so you shouldn't
-run @code{mysqld} for an extended time on Win95 if your server handles
-many connections! WinNT and Win98 don't suffer from this bug.
+To determine how many years old each of your pets is, compute age as the
+difference between the birth date and the current date. Do this by
+converting the two dates to days, take the difference, and divide by 365 (the
+number of days in a year):
-@item Concurrent reads
-@strong{MySQL} depends on the @code{pread()} and @code{pwrite()} calls to be
-able to mix @code{INSERT} and @code{SELECT}. Currently we use mutexes
-to emulate @code{pread()}/@code{pwrite()}. We will, in the long run,
-replace the file level interface with a virtual interface so that we can
-use the @code{readfile()}/@code{writefile()} interface on NT to get more speed.
-The current implementation limits the number of open files @strong{MySQL}
-can use to 1024, which means that you will not be able to run as many
-concurrent threads on NT as on Unix.
+@example
+mysql> SELECT name, (TO_DAYS(NOW())-TO_DAYS(birth))/365 FROM pet;
++----------+-------------------------------------+
+| name | (TO_DAYS(NOW())-TO_DAYS(birth))/365 |
++----------+-------------------------------------+
+| Fluffy | 6.15 |
+| Claws | 5.04 |
+| Buffy | 9.88 |
+| Fang | 8.59 |
+| Bowser | 9.58 |
+| Chirpy | 0.55 |
+| Whistler | 1.30 |
+| Slim | 2.92 |
+| Puffball | 0.00 |
++----------+-------------------------------------+
+@end example
-@item Blocking read
-@strong{MySQL} uses a blocking read for each connection.
-This means that:
+Although the query works, there are some things about it that could be
+improved. First, the result could be scanned more easily if the rows were
+presented in some order. Second, the heading for the age column isn't very
+meaningful.
-@itemize @bullet
-@item
-A connection will not be disconnected automatically after 8 hours, as happens
-with the Unix version of @strong{MySQL}.
+The first problem can be handled by adding an @code{ORDER BY name} clause to
+sort the output by name. To deal with the column heading, provide a name for
+the column so that a different label appears in the output (this is called a
+column alias):
-@item
-If a connection hangs, it's impossible to break it without killing
-@strong{MySQL}.
+@example
+mysql> SELECT name, (TO_DAYS(NOW())-TO_DAYS(birth))/365 AS age
+ -> FROM pet ORDER BY name;
++----------+------+
+| name | age |
++----------+------+
+| Bowser | 9.58 |
+| Buffy | 9.88 |
+| Chirpy | 0.55 |
+| Claws | 5.04 |
+| Fang | 8.59 |
+| Fluffy | 6.15 |
+| Puffball | 0.00 |
+| Slim | 2.92 |
+| Whistler | 1.30 |
++----------+------+
+@end example
-@item
-@code{mysqladmin kill} will not work on a sleeping connection.
+To sort the output by @code{age} rather than @code{name}, just use a
+different @code{ORDER BY} clause:
-@item
-@code{mysqladmin shutdown} can't abort as long as there are sleeping
-connections.
-@end itemize
+@example
+mysql> SELECT name, (TO_DAYS(NOW())-TO_DAYS(birth))/365 AS age
+ -> FROM pet ORDER BY age;
++----------+------+
+| name | age |
++----------+------+
+| Puffball | 0.00 |
+| Chirpy | 0.55 |
+| Whistler | 1.30 |
+| Slim | 2.92 |
+| Claws | 5.04 |
+| Fluffy | 6.15 |
+| Fang | 8.59 |
+| Bowser | 9.58 |
+| Buffy | 9.88 |
++----------+------+
+@end example
-We plan to fix this problem when our Windows developers have figured out a
-nice workaround.
+A similar query can be used to determine age at death for animals that have
+died. You determine which animals these are by checking whether or not the
+@code{death} value is @code{NULL}. Then, for those with non-@code{NULL}
+values, compute the difference between the @code{death} and @code{birth}
+values:
-@item UDF functions
-For the moment, @strong{MySQL}-Windows does not support user-definable
-functions.
+@example
+mysql> SELECT name, birth, death, (TO_DAYS(death)-TO_DAYS(birth))/365 AS age
+ -> FROM pet WHERE death IS NOT NULL ORDER BY age;
++--------+------------+------------+------+
+| name | birth | death | age |
++--------+------------+------------+------+
+| Bowser | 1989-08-31 | 1995-07-29 | 5.91 |
++--------+------------+------------+------+
+@end example
-@item @code{DROP DATABASE}
-You can't drop a database that is in use by some thread.
+The query uses @code{death IS NOT NULL} rather than @code{death != NULL}
+because @code{NULL} is a special value. This is explained later.
+@xref{Working with NULL, , Working with @code{NULL}}.
-@item Killing @strong{MySQL} from the task manager
-You can't kill @strong{MySQL} from the task manager or with the shutdown
-utility in Win95. You must take it down with @code{mysqladmin shutdown}.
+What if you want to know which animals have birthdays next month? For this
+type of calculation, year and day are irrelevant; you simply want to extract
+the month part of the @code{birth} column. @strong{MySQL} provides several
+date-part extraction functions, such as @code{YEAR()}, @code{MONTH()}, and
+@code{DAYOFMONTH()}. @code{MONTH()} is the appropriate function here. To
+see how it works, run a simple query that displays the value of both
+@code{birth} and @code{MONTH(birth)}:
-@item Case-insensitive names
-Filenames are case insensitive on Windows, so database and table names
-are also case insensitive in @strong{MySQL} for Windows. The only
-restriction is that database and table names must be specified using the same
-case throughout a given statement. @xref{Name case sensitivity}.
+@example
+mysql> SELECT name, birth, MONTH(birth) FROM pet;
++----------+------------+--------------+
+| name | birth | MONTH(birth) |
++----------+------------+--------------+
+| Fluffy | 1993-02-04 | 2 |
+| Claws | 1994-03-17 | 3 |
+| Buffy | 1989-05-13 | 5 |
+| Fang | 1990-08-27 | 8 |
+| Bowser | 1989-08-31 | 8 |
+| Chirpy | 1998-09-11 | 9 |
+| Whistler | 1997-12-09 | 12 |
+| Slim | 1996-04-29 | 4 |
+| Puffball | 1999-03-30 | 3 |
++----------+------------+--------------+
+@end example
-@item The @samp{\} directory character
-Pathname components in Win95 are separated by the @samp{\} character, which is
-also the escape character in @strong{MySQL}. If you are using @code{LOAD
-DATA INFILE} or @code{SELECT ... INTO OUTFILE}, you must double the @samp{\}
-character:
+Finding animals with birthdays in the upcoming month is easy, too. Suppose
+the current month is April. Then the month value is @code{4} and you look
+for animals born in May (month 5) like this:
@example
-mysql> LOAD DATA INFILE "C:\\tmp\\skr.txt" INTO TABLE skr;
-mysql> SELECT * INTO OUTFILE 'C:\\tmp\\skr.txt' FROM skr;
+mysql> SELECT name, birth FROM pet WHERE MONTH(birth) = 5;
++-------+------------+
+| name | birth |
++-------+------------+
+| Buffy | 1989-05-13 |
++-------+------------+
@end example
-Alternatively, use Unix style filenames with @samp{/} characters:
+There is a small complication if the current month is December, of course.
+You don't just add one to the month number (@code{12}) and look for animals
+born in month 13, because there is no such month. Instead, you look for
+animals born in January (month 1).
+
+You can even write the query so that it works no matter what the current
+month is. That way you don't have to use a particular month number
+in the query. @code{DATE_ADD()} allows you to add a time interval to a
+given date. If you add a month to the value of @code{NOW()}, then extract
+the month part with @code{MONTH()}, the result produces the month in which to
+look for birthdays:
@example
-mysql> LOAD DATA INFILE "C:/tmp/skr.txt" INTO TABLE skr;
-mysql> SELECT * INTO OUTFILE 'C:/tmp/skr.txt' FROM skr;
+mysql> SELECT name, birth FROM pet
+ -> WHERE MONTH(birth) = MONTH(DATE_ADD(NOW(), INTERVAL 1 MONTH));
@end example
-@item @code{Can't open named pipe} error
-If you use a @strong{MySQL} 3.22 version on NT with the newest mysql-clients
-you will get the following error:
+A different way to accomplish the same task is to add @code{1} to get the
+next month after the current one (after using the modulo function (@code{MOD})
+to wrap around the month value to @code{0} if it is currently
+@code{12}):
@example
-error 2017: can't open named pipe to host: . pipe...
+mysql> SELECT name, birth FROM pet
+ -> WHERE MONTH(birth) = MOD(MONTH(NOW()), 12) + 1;
@end example
-@tindex .my.cnf file
-This is because the release version of @strong{MySQL} uses named pipes on NT
-by default. You can avoid this error by using the @code{--host=localhost}
-option to the new @strong{MySQL} clients or create an option file
-@file{C:\my.cnf} that contains the following information:
+Note that @code{MONTH} returns a number between 1 and 12. And
+@code{MOD(something,12)} returns a number between 0 and 11. So the
+addition has to be after the @code{MOD()} otherwise we would go from
+November (11) to January (1).
+
+@node Working with NULL, Pattern matching, Date calculations, Retrieving data
+@subsubsection Working with @code{NULL} Values
+
+@findex NULL
+@cindex NULL value
+
+The @code{NULL} value can be surprising until you get used to it.
+Conceptually, @code{NULL} means missing value or unknown value and it
+is treated somewhat differently than other values. To test for @code{NULL},
+you cannot use the arithmetic comparison operators such as @code{=}, @code{<},
+or @code{!=}. To demonstrate this for yourself, try the following query:
@example
-[client]
-host = localhost
+mysql> SELECT 1 = NULL, 1 != NULL, 1 < NULL, 1 > NULL;
++----------+-----------+----------+----------+
+| 1 = NULL | 1 != NULL | 1 < NULL | 1 > NULL |
++----------+-----------+----------+----------+
+| NULL | NULL | NULL | NULL |
++----------+-----------+----------+----------+
@end example
-@item @code{Access denied for user} error
-If you get the error @code{Access denied for user: 'some-user@@unknown'
-to database 'mysql'} when accessing a @strong{MySQL} server on the same
-machine, this means that @strong{MySQL} can't resolve your host name
-properly.
-
-To fix this, you should create a file @file{\windows\hosts} with the
-following information:
+Clearly you get no meaningful results from these comparisons. Use
+the @code{IS NULL} and @code{IS NOT NULL} operators instead:
@example
-127.0.0.1 localhost
+mysql> SELECT 1 IS NULL, 1 IS NOT NULL;
++-----------+---------------+
+| 1 IS NULL | 1 IS NOT NULL |
++-----------+---------------+
+| 0 | 1 |
++-----------+---------------+
@end example
-@item @code{ALTER TABLE}
-While you are executing an @code{ALTER TABLE} statement, the table is locked
-from usage by other threads. This has to do with the fact that on Windows,
-you can't delete a file that is in use by another threads. (In the future,
-we may find some way to work around this problem.)
+In @strong{MySQL}, 0 or @code{NULL} means false and anything else means true.
+The default truth value from a boolean operation is 1.
-@item @code{DROP TABLE} on a table that is in use by a @code{MERGE} table will not work
-The @code{MERGE} handler does its table mapping hidden from @strong{MySQL}.
-Because Windows doesn't allow you to drop files that are open, you first
-must flush all @code{MERGE} tables (with @code{FLUSH TABLES}) or drop the
-@code{MERGE} table before dropping the table. We will fix this at the same
-time we introduce @code{VIEW}s.
-@end table
+This special treatment of @code{NULL} is why, in the previous section, it
+was necessary to determine which animals are no longer alive using
+@code{death IS NOT NULL} instead of @code{death != NULL}.
-Here are some open issues for anyone who might want to help us with the Windows
-release:
+@node Pattern matching, Counting rows, Working with NULL, Retrieving data
+@subsubsection Pattern Matching
-@cindex Windows, open issues
+@cindex pattern matching
+@cindex matching, patterns
+@cindex expressions, extended
-@itemize @bullet
-@item
-Make a single-user @code{MYSQL.DLL} server. This should include everything in
-a standard @strong{MySQL} server, except thread creation. This will make
-@strong{MySQL} much easier to use in applications that don't need a true
-client/server and don't need to access the server from other hosts.
+@strong{MySQL} provides standard SQL pattern matching as well as a form of
+pattern matching based on extended regular expressions similar to those used
+by Unix utilities such as @code{vi}, @code{grep}, and @code{sed}.
-@item
-Add some nice start and shutdown icons to the @strong{MySQL} installation.
+SQL pattern matching allows you to use @samp{_} to match any single
+character and @samp{%} to match an arbitrary number of characters (including
+zero characters). In @strong{MySQL}, SQL patterns are case insensitive by
+default. Some examples are shown below. Note that you do not use @code{=}
+or @code{!=} when you use SQL patterns; use the @code{LIKE} or @code{NOT
+LIKE} comparison operators instead.
-@item
-Create a tool to manage registry entries for the @strong{MySQL} startup
-options. The registry entry reading is already coded into @file{mysqld.cc},
-but it should be recoded to be more parameter oriented. The tool should
-also be able to update the @file{C:\my.cnf} option file if the user prefers
-to use that instead of the registry.
+To find names beginning with @samp{b}:
-@item
-When registering @code{mysqld} as a service with @code{--install} (on NT)
-it would be nice if you could also add default options on the command line.
-For the moment, the workaround is to list the parameters in the
-@file{C:\my.cnf} file instead.
+@example
+mysql> SELECT * FROM pet WHERE name LIKE "b%";
++--------+--------+---------+------+------------+------------+
+| name | owner | species | sex | birth | death |
++--------+--------+---------+------+------------+------------+
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
+| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
++--------+--------+---------+------+------------+------------+
+@end example
-@item
-It would be real nice to be able to kill @code{mysqld} from the task manager.
-For the moment, you must use @code{mysqladmin shutdown}.
+To find names ending with @samp{fy}:
-@item
-Port @code{readline} to Windows for use in the @code{mysql} command line tool.
+@example
+mysql> SELECT * FROM pet WHERE name LIKE "%fy";
++--------+--------+---------+------+------------+-------+
+| name | owner | species | sex | birth | death |
++--------+--------+---------+------+------------+-------+
+| Fluffy | Harold | cat | f | 1993-02-04 | NULL |
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
++--------+--------+---------+------+------------+-------+
+@end example
+
+To find names containing a @samp{w}:
+
+@example
+mysql> SELECT * FROM pet WHERE name LIKE "%w%";
++----------+-------+---------+------+------------+------------+
+| name | owner | species | sex | birth | death |
++----------+-------+---------+------+------------+------------+
+| Claws | Gwen | cat | m | 1994-03-17 | NULL |
+| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
+| Whistler | Gwen | bird | NULL | 1997-12-09 | NULL |
++----------+-------+---------+------+------------+------------+
+@end example
+
+To find names containing exactly five characters, use the @samp{_} pattern
+character:
+
+@example
+mysql> SELECT * FROM pet WHERE name LIKE "_____";
++-------+--------+---------+------+------------+-------+
+| name | owner | species | sex | birth | death |
++-------+--------+---------+------+------------+-------+
+| Claws | Gwen | cat | m | 1994-03-17 | NULL |
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
++-------+--------+---------+------+------------+-------+
+@end example
+
+The other type of pattern matching provided by @strong{MySQL} uses extended
+regular expressions. When you test for a match for this type of pattern, use
+the @code{REGEXP} and @code{NOT REGEXP} operators (or @code{RLIKE} and
+@code{NOT RLIKE}, which are synonyms).
+
+Some characteristics of extended regular expressions are:
+@itemize @bullet
@item
-GUI versions of the standard @strong{MySQL} clients (@code{mysql},
-@code{mysqlshow}, @code{mysqladmin}, and @code{mysqldump}) would be nice.
+@samp{.} matches any single character.
@item
-It would be nice if the socket read and write functions in @file{net.c} were
-interruptible. This would make it possible to kill open threads with
-@code{mysqladmin kill} on Windows.
+A character class @samp{[...]} matches any character within the brackets.
+For example, @samp{[abc]} matches @samp{a}, @samp{b}, or @samp{c}. To name a
+range of characters, use a dash. @samp{[a-z]} matches any lowercase letter,
+whereas @samp{[0-9]} matches any digit.
@item
-@code{mysqld} always starts in the "C" locale and not in the default locale.
-We would like to have @code{mysqld} use the current locale for the sort order.
+@samp{*} matches zero or more instances of the thing preceding it. For
+example, @samp{x*} matches any number of @samp{x} characters,
+@samp{[0-9]*} matches any number of digits, and @samp{.*} matches any
+number of anything.
@item
-Implement UDF functions with @code{.DLL}s.
+Regular expressions are case sensitive, but you can use a character class to
+match both lettercases if you wish. For example, @samp{[aA]} matches
+lowercase or uppercase @samp{a} and @samp{[a-zA-Z]} matches any letter in
+either case.
@item
-Add macros to use the faster thread-safe increment/decrement methods
-provided by Windows.
+The pattern matches if it occurs anywhere in the value being tested.
+(SQL patterns match only if they match the entire value.)
+@item
+To anchor a pattern so that it must match the beginning or end of the value
+being tested, use @samp{^} at the beginning or @samp{$} at the end of the
+pattern.
@end itemize
-Other Windows-specific issues are described in the @file{README} file that
-comes with the @strong{MySQL}-Windows distribution.
-
-@node OS/2, MySQL binaries, Windows, Installing
-@section OS/2 Notes
+To demonstrate how extended regular expressions work, the @code{LIKE} queries
+shown above are rewritten below to use @code{REGEXP}.
-@strong{MySQL} uses quite a few open files. Because of this, you should add
-something like the following to your @file{CONFIG.SYS} file:
+To find names beginning with @samp{b}, use @samp{^} to match the beginning of
+the name:
@example
-SET EMXOPT=-c -n -h1024
+mysql> SELECT * FROM pet WHERE name REGEXP "^b";
++--------+--------+---------+------+------------+------------+
+| name | owner | species | sex | birth | death |
++--------+--------+---------+------+------------+------------+
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
+| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
++--------+--------+---------+------+------------+------------+
@end example
-If you don't do this, you will probably run into the following error:
+Prior to @strong{MySQL} Version 3.23.4, @code{REGEXP} is case sensitive,
+and the previous query will return no rows. To match either lowercase or
+uppercase @samp{b}, use this query instead:
@example
-File 'xxxx' not found (Errcode: 24)
+mysql> SELECT * FROM pet WHERE name REGEXP "^[bB]";
@end example
-When using @strong{MySQL} with OS/2 Warp 3, FixPack 29 or above is
-required. With OS/2 Warp 4, FixPack 4 or above is required. This is a
-requirement of the Pthreads library. @strong{MySQL} must be installed
-in a partition that supports long filenames such as HPFS, FAT32, etc.
-
-The @file{INSTALL.CMD} script must be run from OS/2's own @file{CMD.EXE}
-and may not work with replacement shells such as @file{4OS2.EXE}.
+From @strong{MySQL} 3.23.4 on, to force a @code{REGEXP} comparison to
+be case sensitive, use the @code{BINARY} keyword to make one of the
+strings a binary string. This query will match only lowercase @samp{b}
+at the beginning of a name:
-The @file{scripts/mysql-install-db} script has been renamed. It is now called
-@file{install.cmd} and is a REXX script, which will set up the default
-@strong{MySQL} security settings and create the WorkPlace Shell icons
-for @strong{MySQL}.
+@example
+mysql> SELECT * FROM pet WHERE name REGEXP BINARY "^b";
+@end example
-Dynamic module support is compiled in but not fully tested. Dynamic
-modules should be compiled using the Pthreads run-time library.
+To find names ending with @samp{fy}, use @samp{$} to match the end of the
+name:
@example
-gcc -Zdll -Zmt -Zcrtdll=pthrdrtl -I../include -I../regex -I.. \
- -o example udf_example.cc -L../lib -lmysqlclient udf_example.def
-mv example.dll example.udf
+mysql> SELECT * FROM pet WHERE name REGEXP "fy$";
++--------+--------+---------+------+------------+-------+
+| name | owner | species | sex | birth | death |
++--------+--------+---------+------+------------+-------+
+| Fluffy | Harold | cat | f | 1993-02-04 | NULL |
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
++--------+--------+---------+------+------------+-------+
@end example
-@strong{Note:} Due to limitations in OS/2, UDF module name stems must not
-exceed 8 characters. Modules are stored in the @file{/mysql2/udf}
-directory; the @code{safe-mysqld.cmd} script will put this directory in
-the @code{BEGINLIBPATH} environment variable. When using UDF modules,
-specified extensions are ignored --- it is assumed to be @file{.udf}.
-For example, in Unix, the shared module might be named @file{example.so}
-and you would load a function from it like this:
+To find names containing a lowercase or uppercase @samp{w}, use this query:
@example
-mysql> CREATE FUNCTION metaphon RETURNS STRING SONAME "example.so";
+mysql> SELECT * FROM pet WHERE name REGEXP "w";
++----------+-------+---------+------+------------+------------+
+| name | owner | species | sex | birth | death |
++----------+-------+---------+------+------------+------------+
+| Claws | Gwen | cat | m | 1994-03-17 | NULL |
+| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
+| Whistler | Gwen | bird | NULL | 1997-12-09 | NULL |
++----------+-------+---------+------+------------+------------+
@end example
-Is OS/2, the module would be named @file{example.udf}, but you would not
-specify the module extension:
+Because a regular expression pattern matches if it occurs anywhere in the
+value, it is not necessary in the previous query to put a wild card on either
+side of the pattern to get it to match the entire value like it would be if
+you used a SQL pattern.
+
+To find names containing exactly five characters, use @samp{^} and @samp{$}
+to match the beginning and end of the name, and five instances of @samp{.}
+in between:
@example
-mysql> CREATE FUNCTION metaphon RETURNS STRING SONAME "example";
+mysql> SELECT * FROM pet WHERE name REGEXP "^.....$";
++-------+--------+---------+------+------------+-------+
+| name | owner | species | sex | birth | death |
++-------+--------+---------+------+------------+-------+
+| Claws | Gwen | cat | m | 1994-03-17 | NULL |
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
++-------+--------+---------+------+------------+-------+
@end example
-@cindex binary distributions
-@node MySQL binaries, Post-installation, OS/2, Installing
-@section MySQL Binaries
-
-As a service, we at @strong{MySQL AB} provide a set of binary distributions
-of @strong{MySQL} that are compiled at our site or at sites where customers
-kindly have given us access to their machines.
+You could also write the previous query using the @samp{@{n@}}
+``repeat-@code{n}-times'' operator:
-These distributions are generated with @code{scripts/make_binary_distribution}
-and are configured with the following compilers and options:
+@example
+mysql> SELECT * FROM pet WHERE name REGEXP "^.@{5@}$";
++-------+--------+---------+------+------------+-------+
+| name | owner | species | sex | birth | death |
++-------+--------+---------+------+------------+-------+
+| Claws | Gwen | cat | m | 1994-03-17 | NULL |
+| Buffy | Harold | dog | f | 1989-05-13 | NULL |
++-------+--------+---------+------+------------+-------+
+@end example
-@table @asis
-@item SunOS 4.1.4 2 sun4c with @code{gcc} 2.7.2.1
-@code{CC=gcc CXX=gcc CXXFLAGS="-O3 -felide-constructors" ./configure --prefix=/usr/local/mysql --disable-shared --with-extra-charsets=complex --enable-assembler}
+@node Counting rows, Multiple tables, Pattern matching, Retrieving data
+@subsubsection Counting Rows
-@item SunOS 5.5.1 (and above) sun4u with @code{egcs} 1.0.3a or 2.90.27 or gcc 2.95.2 and newer
-@code{CC=gcc CFLAGS="-O3" CXX=gcc CXXFLAGS="-O3 -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex --enable-assembler}
+@cindex rows, counting
+@cindex tables, counting rows
+@cindex counting, table rows
-@item SunOS 5.6 i86pc with @code{gcc} 2.8.1
-@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
+Databases are often used to answer the question, ``How often does a certain
+type of data occur in a table?'' For example, you might want to know how
+many pets you have, or how many pets each owner has, or you might want to
+perform various kinds of censuses on your animals.
-@item Linux 2.0.33 i386 with @code{pgcc} 2.90.29 (@code{egcs} 1.0.3a)
-@code{CFLAGS="-O3 -mpentium -mstack-align-double" CXX=gcc CXXFLAGS="-O3 -mpentium -mstack-align-double -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --with-extra-charsets=complex}
+Counting the total number of animals you have is the same question as ``How
+many rows are in the @code{pet} table?'' because there is one record per pet.
+The @code{COUNT()} function counts the number of non-@code{NULL} results, so
+the query to count your animals looks like this:
-@item Linux 2.2.x with x686 with @code{gcc} 2.95.2
-@code{CFLAGS="-O3 -mpentiumpro" CXX=gcc CXXFLAGS="-O3 -mpentiumpro -felide-constructors -fno-exceptions -fno-rtti" ./configure --prefix=/usr/local/mysql --enable-assembler --with-mysqld-ldflags=-all-static --disable-shared --with-extra-charset=complex}
+@example
+mysql> SELECT COUNT(*) FROM pet;
++----------+
+| COUNT(*) |
++----------+
+| 9 |
++----------+
+@end example
-@item SCO 3.2v5.0.4 i386 with @code{gcc} 2.7-95q4
-@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
+Earlier, you retrieved the names of the people who owned pets. You can
+use @code{COUNT()} if you want to find out how many pets each owner has:
-@item AIX 2 4 with @code{gcc} 2.7.2.2
-@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
+@example
+mysql> SELECT owner, COUNT(*) FROM pet GROUP BY owner;
++--------+----------+
+| owner | COUNT(*) |
++--------+----------+
+| Benny | 2 |
+| Diane | 2 |
+| Gwen | 3 |
+| Harold | 2 |
++--------+----------+
+@end example
-@item OSF1 V4.0 564 alpha with @code{gcc} 2.8.1
-@code{CC=gcc CFLAGS=-O CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-low-memory --with-extra-charsets=complex}
+Note the use of @code{GROUP BY} to group together all records for each
+@code{owner}. Without it, all you get is an error message:
-@item Irix 6.3 IP32 with @code{gcc} 2.8.0
-@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
+@example
+mysql> SELECT owner, COUNT(owner) FROM pet;
+ERROR 1140 at line 1: Mixing of GROUP columns (MIN(),MAX(),COUNT()...)
+with no GROUP columns is illegal if there is no GROUP BY clause
+@end example
-@item BSDI BSD/OS 3.1 i386 with @code{gcc} 2.7.2.1
-@code{CC=gcc CXX=gcc CXXFLAGS=-O ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
+@code{COUNT()} and @code{GROUP BY} are useful for characterizing your
+data in various ways. The following examples show different ways to
+perform animal census operations.
-@item BSDI BSD/OS 2.1 i386 with @code{gcc} 2.7.2
-@code{CC=gcc CXX=gcc CXXFLAGS=-O3 ./configure --prefix=/usr/local/mysql --with-extra-charsets=complex}
-@end table
+Number of animals per species:
-Anyone who has more optimal options for any of the configurations listed
-above can always mail them to the developer's mailing list at
-@email{internals@@lists.mysql.com}.
+@example
+mysql> SELECT species, COUNT(*) FROM pet GROUP BY species;
++---------+----------+
+| species | COUNT(*) |
++---------+----------+
+| bird | 2 |
+| cat | 2 |
+| dog | 3 |
+| hamster | 1 |
+| snake | 1 |
++---------+----------+
+@end example
-RPM distributions prior to @strong{MySQL} Version 3.22 are user-contributed.
-Beginning with Version 3.22, the RPMs are generated by us at
-@strong{MySQL AB}.
+Number of animals per sex:
-If you want to compile a debug version of @strong{MySQL}, you should add
-@code{--with-debug} or @code{--with-debug=full} to the above configure lines
-and remove any @code{-fomit-frame-pointer} options.
+@example
+mysql> SELECT sex, COUNT(*) FROM pet GROUP BY sex;
++------+----------+
+| sex | COUNT(*) |
++------+----------+
+| NULL | 1 |
+| f | 4 |
+| m | 4 |
++------+----------+
+@end example
-@cindex post-installation, setup and testing
-@cindex testing, post-installation
-@cindex setup, post-installation
-@node Post-installation, Installing many servers, MySQL binaries, Installing
-@section Post-installation Setup and Testing
+(In this output, @code{NULL} indicates sex unknown.)
-@menu
-* mysql_install_db:: Problems running @code{mysql_install_db}
-* Starting server:: Problems starting the @strong{MySQL} server
-* Automatic start:: Starting and stopping @strong{MySQL} automatically
-* Command-line options:: Command-line options
-* Option files:: Option files
-@end menu
+Number of animals per combination of species and sex:
-Once you've installed @strong{MySQL} (from either a binary or source
-distribution), you need to initialize the grant tables, start the server,
-and make sure that the server works okay. You may also wish to arrange
-for the server to be started and stopped automatically when your system
-starts up and shuts down.
+@example
+mysql> SELECT species, sex, COUNT(*) FROM pet GROUP BY species, sex;
++---------+------+----------+
+| species | sex | COUNT(*) |
++---------+------+----------+
+| bird | NULL | 1 |
+| bird | f | 1 |
+| cat | f | 1 |
+| cat | m | 1 |
+| dog | f | 1 |
+| dog | m | 2 |
+| hamster | f | 1 |
+| snake | m | 1 |
++---------+------+----------+
+@end example
-Normally you install the grant tables and start the server like this
-for installation from a source distribution:
-@cindex starting, the server
-@cindex server, starting
+You need not retrieve an entire table when you use @code{COUNT()}. For
+example, the previous query, when performed just on dogs and cats, looks like
+this:
@example
-shell> ./scripts/mysql_install_db
-shell> cd mysql_installation_directory
-shell> ./bin/safe_mysqld --user=mysql &
+mysql> SELECT species, sex, COUNT(*) FROM pet
+ -> WHERE species = "dog" OR species = "cat"
+ -> GROUP BY species, sex;
++---------+------+----------+
+| species | sex | COUNT(*) |
++---------+------+----------+
+| cat | f | 1 |
+| cat | m | 1 |
+| dog | f | 1 |
+| dog | m | 2 |
++---------+------+----------+
@end example
-For a binary distribution (not RPM or pkg packages), do this:
+Or, if you wanted the number of animals per sex only for known-sex animals:
@example
-shell> cd mysql_installation_directory
-shell> ./bin/mysql_install_db
-shell> ./bin/safe_mysqld --user=mysql &
+mysql> SELECT species, sex, COUNT(*) FROM pet
+ -> WHERE sex IS NOT NULL
+ -> GROUP BY species, sex;
++---------+------+----------+
+| species | sex | COUNT(*) |
++---------+------+----------+
+| bird | f | 1 |
+| cat | f | 1 |
+| cat | m | 1 |
+| dog | f | 1 |
+| dog | m | 2 |
+| hamster | f | 1 |
+| snake | m | 1 |
++---------+------+----------+
@end example
-This creates the @code{mysql} database which will hold all database
-privileges, the @code{test} database which you can use to test
-@strong{MySQL} and also privilege entries for the user that run
-@code{mysql_install_db} and a @code{root} user (without any passwords).
-This also starts the @code{mysqld} server.
+@node Multiple tables, , Counting rows, Retrieving data
+@subsubsection Using More Than one Table
-@code{mysql_install_db} will not overwrite any old privilege tables, so
-it should be safe to run in any circumstances. If you don't want to
-have the @code{test} database you can remove it with @code{mysqladmin -u
-root drop test}.
+@cindex tables, multiple
-Testing is most easily done from the top-level directory of the @strong{MySQL}
-distribution. For a binary distribution, this is your installation directory
-(typically something like @file{/usr/local/mysql}). For a source
-distribution, this is the main directory of your @strong{MySQL} source tree.
-@cindex testing, the server
+The @code{pet} table keeps track of which pets you have. If you want to
+record other information about them, such as events in their lives like
+visits to the vet or when litters are born, you need another table. What
+should this table look like? It needs:
-In the commands shown below in this section and in the following
-subsections, @code{BINDIR} is the path to the location in which programs
-like @code{mysqladmin} and @code{safe_mysqld} are installed. For a
-binary distribution, this is the @file{bin} directory within the
-distribution. For a source distribution, @code{BINDIR} is probably
-@file{/usr/local/bin}, unless you specified an installation directory
-other than @file{/usr/local} when you ran @code{configure}.
-@code{EXECDIR} is the location in which the @code{mysqld} server is
-installed. For a binary distribution, this is the same as
-@code{BINDIR}. For a source distribution, @code{EXECDIR} is probably
-@file{/usr/local/libexec}.
+@itemize @bullet
+@item
+To contain the pet name so you know which animal each event pertains
+to.
-Testing is described in detail below:
-@cindex testing, installation
+@item
+A date so you know when the event occurred.
-@enumerate
@item
-If necessary, start the @code{mysqld} server and set up the initial
-@strong{MySQL} grant tables containing the privileges that determine how
-users are allowed to connect to the server. This is normally done with the
-@code{mysql_install_db} script:
+A field to describe the event.
+
+@item
+An event type field, if you want to be able to categorize events.
+@end itemize
+
+Given these considerations, the @code{CREATE TABLE} statement for the
+@code{event} table might look like this:
@example
-shell> scripts/mysql_install_db
+mysql> CREATE TABLE event (name VARCHAR(20), date DATE,
+ -> type VARCHAR(15), remark VARCHAR(255));
@end example
-Typically, @code{mysql_install_db} needs to be run only the first time you
-install @strong{MySQL}. Therefore, if you are upgrading an existing
-installation, you can skip this step. (However, @code{mysql_install_db} is
-quite safe to use and will not update any tables that already exist, so if
-you are unsure of what to do, you can always run @code{mysql_install_db}.)
+As with the @code{pet} table, it's easiest to load the initial records
+by creating a tab-delimited text file containing the information:
-@code{mysql_install_db} creates six tables (@code{user}, @code{db},
-@code{host}, @code{tables_priv}, @code{columns_priv}, and @code{func}) in the
-@code{mysql} database. A description of the initial privileges is given in
-@ref{Default privileges}. Briefly, these privileges allow the @strong{MySQL}
-@code{root} user to do anything, and allow anybody to create or use databases
-with a name of @code{'test'} or starting with @code{'test_'}.
+@multitable @columnfractions .15 .15 .15 .55
+@item Fluffy @tab 1995-05-15 @tab litter @tab 4 kittens, 3 female, 1 male
+@item Buffy @tab 1993-06-23 @tab litter @tab 5 puppies, 2 female, 3 male
+@item Buffy @tab 1994-06-19 @tab litter @tab 3 puppies, 3 female
+@item Chirpy @tab 1999-03-21 @tab vet @tab needed beak straightened
+@item Slim @tab 1997-08-03 @tab vet @tab broken rib
+@item Bowser @tab 1991-10-12 @tab kennel
+@item Fang @tab 1991-10-12 @tab kennel
+@item Fang @tab 1998-08-28 @tab birthday @tab Gave him a new chew toy
+@item Claws @tab 1998-03-17 @tab birthday @tab Gave him a new flea collar
+@item Whistler @tab 1998-12-09 @tab birthday @tab First birthday
+@end multitable
-If you don't set up the grant tables, the following error will appear in the
-log file when you start the server:
+Load the records like this:
-@tindex host.frm, problems finding
@example
-mysqld: Can't find file: 'host.frm'
+mysql> LOAD DATA LOCAL INFILE "event.txt" INTO TABLE event;
@end example
-The above may also happen with a binary @strong{MySQL} distribution if you
-don't start @strong{MySQL} by executing exactly @code{./bin/safe_mysqld}!
-@xref{safe_mysqld, , @code{safe_mysqld}}.
+Based on what you've learned from the queries you've run on the @code{pet}
+table, you should be able to perform retrievals on the records in the
+@code{event} table; the principles are the same. But when is the
+@code{event} table by itself insufficient to answer questions you might ask?
-You might need to run @code{mysql_install_db} as @code{root}. However,
-if you prefer, you can run the @strong{MySQL} server as an unprivileged
-(non-@code{root}) user, provided that user can read and write files in
-the database directory. Instructions for running @strong{MySQL} as an
-unprivileged user are given in @ref{Changing MySQL user, , Changing
-@strong{MySQL} user}.
+Suppose you want to find out the ages of each pet when they had their
+litters. The @code{event} table indicates when this occurred, but to
+calculate the age of the mother, you need her birth date. Because that is
+stored in the @code{pet} table, you need both tables for the query:
-If you have problems with @code{mysql_install_db}, see
-@ref{mysql_install_db, , @code{mysql_install_db}}.
+@example
+mysql> SELECT pet.name, (TO_DAYS(date) - TO_DAYS(birth))/365 AS age, remark
+ -> FROM pet, event
+ -> WHERE pet.name = event.name AND type = "litter";
++--------+------+-----------------------------+
+| name | age | remark |
++--------+------+-----------------------------+
+| Fluffy | 2.27 | 4 kittens, 3 female, 1 male |
+| Buffy | 4.12 | 5 puppies, 2 female, 3 male |
+| Buffy | 5.10 | 3 puppies, 3 female |
++--------+------+-----------------------------+
+@end example
-There are some alternatives to running the @code{mysql_install_db}
-script as it is provided in the @strong{MySQL} distribution:
+There are several things to note about this query:
@itemize @bullet
@item
-You may want to edit @code{mysql_install_db} before running it, to change
-the initial privileges that are installed into the grant tables. This is
-useful if you want to install @strong{MySQL} on a lot of machines with the
-same privileges. In this case you probably should need only to add a few
-extra @code{INSERT} statements to the @code{mysql.user} and @code{mysql.db}
-tables!
+The @code{FROM} clause lists two tables because the query needs to pull
+information from both of them.
@item
-If you want to change things in the grant tables after installing them, you
-can run @code{mysql_install_db}, then use @code{mysql -u root mysql} to
-connect to the grant tables as the @strong{MySQL} @code{root} user and issue
-SQL statements to modify the grant tables directly.
+When combining (joining) information from multiple tables, you need to
+specify how records in one table can be matched to records in the other.
+This is easy because they both have a @code{name} column. The query uses
+@code{WHERE} clause to match up records in the two tables based on the
+@code{name} values.
@item
-It is possible to re-create the grant tables completely after they have
-already been created. You might want to do this if you've already installed
-the tables but then want to re-create them after editing
-@code{mysql_install_db}.
+Because the @code{name} column occurs in both tables, you must be specific
+about which table you mean when referring to the column. This is done
+by prepending the table name to the column name.
@end itemize
-For more information about these alternatives, see @ref{Default privileges}.
-
-@item
-Start the @strong{MySQL} server like this:
+You need not have two different tables to perform a join. Sometimes it is
+useful to join a table to itself, if you want to compare records in a table
+to other records in that same table. For example, to find breeding pairs
+among your pets, you can join the @code{pet} table with itself to pair up
+males and females of like species:
@example
-shell> cd mysql_installation_directory
-shell> bin/safe_mysqld &
+mysql> SELECT p1.name, p1.sex, p2.name, p2.sex, p1.species
+ -> FROM pet AS p1, pet AS p2
+ -> WHERE p1.species = p2.species AND p1.sex = "f" AND p2.sex = "m";
++--------+------+--------+------+---------+
+| name | sex | name | sex | species |
++--------+------+--------+------+---------+
+| Fluffy | f | Claws | m | cat |
+| Buffy | f | Fang | m | dog |
+| Buffy | f | Bowser | m | dog |
++--------+------+--------+------+---------+
@end example
-If you have problems starting the server, see @ref{Starting server}.
+In this query, we specify aliases for the table name in order
+to refer to the columns and keep straight which instance of the table
+each column reference is associated with.
-@item
-Use @code{mysqladmin} to verify that the server is running. The following
-commands provide a simple test to check that the server is up and responding
-to connections:
+@node Getting information, Examples, Database use, Tutorial
+@section Getting Information About Databases and Tables
+
+@cindex databases, information about
+@cindex tables, information about
+@findex DESCRIBE
+
+What if you forget the name of a database or table, or what the structure of
+a given table is (for example, what its columns are called)? @strong{MySQL}
+addresses this problem through several statements that provide information
+about the databases and tables it supports.
+
+You have already seen @code{SHOW DATABASES}, which lists the databases
+managed by the server. To find out which database is currently selected,
+use the @code{DATABASE()} function:
@example
-shell> BINDIR/mysqladmin version
-shell> BINDIR/mysqladmin variables
+mysql> SELECT DATABASE();
++------------+
+| DATABASE() |
++------------+
+| menagerie |
++------------+
@end example
-The output from @code{mysqladmin version} varies slightly depending on your
-platform and version of @strong{MySQL}, but should be similar to that shown
-below:
+If you haven't selected any database yet, the result is blank.
+
+To find out what tables the current database contains (for example, when
+you're not sure about the name of a table), use this command:
@example
-shell> BINDIR/mysqladmin version
-mysqladmin Ver 8.14 Distrib 3.23.32, for linux on i586
-Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-This software comes with ABSOLUTELY NO WARRANTY. This is free software,
-and you are welcome to modify and redistribute it under the GPL license
+mysql> SHOW TABLES;
++---------------------+
+| Tables in menagerie |
++---------------------+
+| event |
+| pet |
++---------------------+
+@end example
-Server version 3.23.32-debug
-Protocol version 10
-Connection Localhost via Unix socket
-TCP port 3306
-UNIX socket /tmp/mysql.sock
-Uptime: 16 sec
+If you want to find out about the structure of a table, the @code{DESCRIBE}
+command is useful; it displays information about each of a table's columns:
-Threads: 1 Questions: 9 Slow queries: 0 Opens: 7 Flush tables: 2 Open tables: 0 Queries per second avg: 0.000 Memory in use: 132K Max memory used: 16773K
+@example
+mysql> DESCRIBE pet;
++---------+-------------+------+-----+---------+-------+
+| Field | Type | Null | Key | Default | Extra |
++---------+-------------+------+-----+---------+-------+
+| name | varchar(20) | YES | | NULL | |
+| owner | varchar(20) | YES | | NULL | |
+| species | varchar(20) | YES | | NULL | |
+| sex | char(1) | YES | | NULL | |
+| birth | date | YES | | NULL | |
+| death | date | YES | | NULL | |
++---------+-------------+------+-----+---------+-------+
@end example
-To get a feeling for what else you can do with @code{BINDIR/mysqladmin},
-invoke it with the @code{--help} option.
+@code{Field} indicates the column name, @code{Type} is the data type for
+the column, @code{Null} indicates whether or not the column can contain
+@code{NULL} values, @code{Key} indicates whether or not the column is
+indexed, and @code{Default} specifies the column's default value.
-@item
-Verify that you can shut down the server:
-@cindex server, shutdown
-@cindex shutting down, the server
+If you have indexes on a table,
+@code{SHOW INDEX FROM tbl_name} produces information about them.
-@example
-shell> BINDIR/mysqladmin -u root shutdown
-@end example
+@node Examples, Batch mode, Getting information, Tutorial
+@section Examples of Common Queries
-@item
-Verify that you can restart the server. Do this using @code{safe_mysqld} or
-by invoking @code{mysqld} directly. For example:
+@cindex queries, examples
+@cindex examples, queries
-@cindex server, restart
-@cindex restarting, the server
+Here are examples of how to solve some common problems with
+@strong{MySQL}.
+
+Some of the examples use the table @code{shop} to hold the price of each
+article (item number) for certain traders (dealers). Supposing that each
+trader has a single fixed price per article, then (@code{item},
+@code{trader}) is a primary key for the records.
+
+Start the command line tool @code{mysql} and select a database:
@example
-shell> BINDIR/safe_mysqld --log &
+mysql your-database-name
@end example
-If @code{safe_mysqld} fails, try running it from the @strong{MySQL}
-installation directory (if you are not already there). If that doesn't work,
-see @ref{Starting server}.
+(In most @strong{MySQL} installations, you can use the database-name 'test').
-@item
-Run some simple tests to verify that the server is working.
-The output should be similar to what is shown below:
+You can create the example table as:
@example
-shell> BINDIR/mysqlshow
-+-----------+
-| Databases |
-+-----------+
-| mysql |
-+-----------+
+CREATE TABLE shop (
+ article INT(4) UNSIGNED ZEROFILL DEFAULT '0000' NOT NULL,
+ dealer CHAR(20) DEFAULT '' NOT NULL,
+ price DOUBLE(16,2) DEFAULT '0.00' NOT NULL,
+ PRIMARY KEY(article, dealer));
-shell> BINDIR/mysqlshow mysql
-Database: mysql
-+--------------+
-| Tables |
-+--------------+
-| columns_priv |
-| db |
-| func |
-| host |
-| tables_priv |
-| user |
-+--------------+
+INSERT INTO shop VALUES
+(1,'A',3.45),(1,'B',3.99),(2,'A',10.99),(3,'B',1.45),(3,'C',1.69),
+(3,'D',1.25),(4,'D',19.95);
+@end example
-shell> BINDIR/mysql -e "select host,db,user from db" mysql
-+------+--------+------+
-| host | db | user |
-+------+--------+------+
-| % | test | |
-| % | test_% | |
-+------+--------+------+
+Okay, so the example data is:
+
+@example
+mysql> SELECT * FROM shop;
+
++---------+--------+-------+
+| article | dealer | price |
++---------+--------+-------+
+| 0001 | A | 3.45 |
+| 0001 | B | 3.99 |
+| 0002 | A | 10.99 |
+| 0003 | B | 1.45 |
+| 0003 | C | 1.69 |
+| 0003 | D | 1.25 |
+| 0004 | D | 19.95 |
++---------+--------+-------+
@end example
-There is also a benchmark suite in the @file{sql-bench} directory (under the
-@strong{MySQL} installation directory) that you can use to compare how
-@strong{MySQL} performs on different platforms. The @file{sql-bench/Results}
-directory contains the results from many runs against different databases and
-platforms. To run all tests, execute these commands:
+@menu
+* example-Maximum-column:: The maximum value for a column
+* example-Maximum-row:: The row holding the maximum of a certain column
+* example-Maximum-column-group:: Maximum of column per group
+* example-Maximum-column-group-row:: The rows holding the group-wise maximum of a certain field
+* example-user-variables:: Using user variables
+* example-Foreign keys:: Using foreign keys
+* Searching on two keys::
+* Calculating days::
+@end menu
+
+@node example-Maximum-column, example-Maximum-row, Examples, Examples
+@subsection The Maximum Value for a Column
+
+``What's the highest item number?''
@example
-shell> cd sql-bench
-shell> run-all-tests
+SELECT MAX(article) AS article FROM shop
+
++---------+
+| article |
++---------+
+| 4 |
++---------+
@end example
-If you don't have the @file{sql-bench} directory, you are probably using an
-RPM for a binary distribution. (Source distribution RPMs include the
-benchmark directory.) In this case, you must first install the benchmark
-suite before you can use it. Beginning with @strong{MySQL} Version 3.22,
-there are benchmark RPM files named @file{mysql-bench-VERSION-i386.rpm} that
-contain benchmark code and data.
+@node example-Maximum-row, example-Maximum-column-group, example-Maximum-column, Examples
+@subsection The Row Holding the Maximum of a Certain Column
-If you have a source distribution, you can also run the tests in the
-@file{tests} subdirectory. For example, to run @file{auto_increment.tst}, do
-this:
+``Find number, dealer, and price of the most expensive article.''
+
+In ANSI SQL this is easily done with a sub-query:
@example
-shell> BINDIR/mysql -vvf test < ./tests/auto_increment.tst
+SELECT article, dealer, price
+FROM shop
+WHERE price=(SELECT MAX(price) FROM shop)
@end example
-The expected results are shown in the @file{./tests/auto_increment.res} file.
+In @strong{MySQL} (which does not yet have sub-selects), just do it in
+two steps:
+
+@enumerate
+@item
+Get the maximum price value from the table with a @code{SELECT} statement.
+@item
+Using this value compile the actual query:
+@example
+SELECT article, dealer, price
+FROM shop
+WHERE price=19.95
+@end example
@end enumerate
-@node mysql_install_db, Starting server, Post-installation, Post-installation
-@subsection Problems Running @code{mysql_install_db}
-@cindex @code{mysql_install_db} script
-@cindex scripts, @code{mysql_install_db}
+Another solution is to sort all rows descending by price and only
+get the first row using the @strong{MySQL} specific @code{LIMIT} clause:
-The purpose of the @code{mysql_install_db} script is to generate new
-@strong{MySQL} privilege tables. It will not affect any other data!
-It will also not do anything if you already have @strong{MySQL} privilege
-tables installed!
+@example
+SELECT article, dealer, price
+FROM shop
+ORDER BY price DESC
+LIMIT 1
+@end example
-If you want to re-create your privilege tables, you should take down
-the @code{mysqld} server, if it's running, and then do something like:
+@strong{NOTE}: If there are several most expensive articles (for example, each 19.95)
+the @code{LIMIT} solution shows only one of them!
+
+@node example-Maximum-column-group, example-Maximum-column-group-row, example-Maximum-row, Examples
+@subsection Maximum of Column per Group
+
+``What's the highest price per article?''
@example
-mv mysql-data-directory/mysql mysql-data-directory/mysql-old
-mysql_install_db
+SELECT article, MAX(price) AS price
+FROM shop
+GROUP BY article
+
++---------+-------+
+| article | price |
++---------+-------+
+| 0001 | 3.99 |
+| 0002 | 10.99 |
+| 0003 | 1.69 |
+| 0004 | 19.95 |
++---------+-------+
@end example
-This section lists problems you might encounter when you run
-@code{mysql_install_db}:
+@node example-Maximum-column-group-row, example-user-variables, example-Maximum-column-group, Examples
+@subsection The Rows Holding the Group-wise Maximum of a Certain Field
-@table @strong
-@item @code{mysql_install_db} doesn't install the grant tables
+``For each article, find the dealer(s) with the most expensive price.''
-You may find that @code{mysql_install_db} fails to install the grant
-tables and terminates after displaying the following messages:
+In ANSI SQL, I'd do it with a sub-query like this:
@example
-starting mysqld daemon with databases from XXXXXX
-mysql daemon ended
+SELECT article, dealer, price
+FROM shop s1
+WHERE price=(SELECT MAX(s2.price)
+ FROM shop s2
+ WHERE s1.article = s2.article);
@end example
-In this case, you should examine the log file very carefully! The log
-should be located in the directory @file{XXXXXX} named by the error message,
-and should indicate why @code{mysqld} didn't start. If you don't understand
-what happened, include the log when you post a bug report using
-@code{mysqlbug}!
-@xref{Bug reports}.
+In @strong{MySQL} it's best do it in several steps:
-@item There is already a @code{mysqld} daemon running
+@enumerate
+@item
+Get the list of (article,maxprice).
+@item
+For each article get the corresponding rows that have the stored maximum
+price.
+@end enumerate
-In this case, you probably don't have to run @code{mysql_install_db} at
-all. You have to run @code{mysql_install_db} only once, when you install
-@strong{MySQL} the first time.
+This can easily be done with a temporary table:
-@item Installing a second @code{mysqld} daemon doesn't work when one daemon is running
+@example
+CREATE TEMPORARY TABLE tmp (
+ article INT(4) UNSIGNED ZEROFILL DEFAULT '0000' NOT NULL,
+ price DOUBLE(16,2) DEFAULT '0.00' NOT NULL);
-This can happen when you already have an existing @strong{MySQL}
-installation, but want to put a new installation in a different place (for
-example, for testing, or perhaps you simply want to run two installations at
-the same time). Generally the problem that occurs when you try to run the
-second server is that it tries to use the same socket and port as the old one.
-In this case you will get the error message: @code{Can't start server: Bind on
-TCP/IP port: Address already in use} or @code{Can't start server : Bind on
-unix socket...}. @xref{Installing many servers}.
+LOCK TABLES shop read;
-@item You don't have write access to @file{/tmp}
-@cindex write access, tmp
-@cindex temporary file, write access
-@cindex files, @code{tmp}
+INSERT INTO tmp SELECT article, MAX(price) FROM shop GROUP BY article;
-If you don't have write access to create a socket file at the default place
-(in @file{/tmp}) or permission to create temporary files in @file{/tmp,}
-you will get an error when running @code{mysql_install_db} or when
-starting or using @code{mysqld}.
+SELECT shop.article, dealer, shop.price FROM shop, tmp
+WHERE shop.article=tmp.article AND shop.price=tmp.price;
-You can specify a different socket and temporary directory as follows:
+UNLOCK TABLES;
-@tindex TMPDIR environment variable
-@tindex MYSQL_UNIX_PORT environment variable
-@tindex Environment variable, TMPDIR
-@tindex Environment variable, MYSQL_UNIX_PORT
-@example
-shell> TMPDIR=/some_tmp_dir/
-shell> MYSQL_UNIX_PORT=/some_tmp_dir/mysqld.sock
-shell> export TMPDIR MYSQL_UNIX_PORT
+DROP TABLE tmp;
@end example
-@file{some_tmp_dir} should be the path to some directory for which you
-have write permission. @xref{Environment variables}.
+If you don't use a @code{TEMPORARY} table, you must also lock the 'tmp' table.
-After this you should be able to run @code{mysql_install_db} and start
-the server with these commands:
+``Can it be done with a single query?''
+
+Yes, but only by using a quite inefficient trick that I call the
+``MAX-CONCAT trick'':
@example
-shell> scripts/mysql_install_db
-shell> BINDIR/safe_mysqld &
+SELECT article,
+ SUBSTRING( MAX( CONCAT(LPAD(price,6,'0'),dealer) ), 7) AS dealer,
+ 0.00+LEFT( MAX( CONCAT(LPAD(price,6,'0'),dealer) ), 6) AS price
+FROM shop
+GROUP BY article;
+
++---------+--------+-------+
+| article | dealer | price |
++---------+--------+-------+
+| 0001 | B | 3.99 |
+| 0002 | A | 10.99 |
+| 0003 | C | 1.69 |
+| 0004 | D | 19.95 |
++---------+--------+-------+
@end example
-@item @code{mysqld} crashes immediately
+The last example can, of course, be made a bit more efficient by doing the
+splitting of the concatenated column in the client.
-If you are running RedHat Version 5.0 with a version of @code{glibc} older than
-2.0.7-5, you should make sure you have installed all @code{glibc} patches!
-There is a lot of information about this in the @strong{MySQL} mail
-archives. Links to the mail archives are available online at
-@uref{http://www.mysql.com/documentation/}.
-Also, see @ref{Linux}.
+@node example-user-variables, example-Foreign keys, example-Maximum-column-group-row, Examples
+@subsection Using user variables
-You can also start @code{mysqld} manually using the @code{--skip-grant-tables}
-option and add the privilege information yourself using @code{mysql}:
+You can use @strong{MySQL} user variables to remember results without
+having to store them in a temporary variables in the client.
+@xref{Variables}.
+
+For example, to find the articles with the highest and lowest price you
+can do:
@example
-shell> BINDIR/safe_mysqld --skip-grant-tables &
-shell> BINDIR/mysql -u root mysql
-@end example
+select @@min_price:=min(price),@@max_price:=max(price) from shop;
+select * from shop where price=@@min_price or price=@@max_price;
-From @code{mysql}, manually execute the SQL commands in
-@code{mysql_install_db}. Make sure you run @code{mysqladmin
-flush-privileges} or @code{mysqladmin reload} afterward to tell the server to
-reload the grant tables.
-@end table
++---------+--------+-------+
+| article | dealer | price |
++---------+--------+-------+
+| 0003 | D | 1.25 |
+| 0004 | D | 19.95 |
++---------+--------+-------+
+@end example
-@node Starting server, Automatic start, mysql_install_db, Post-installation
-@subsection Problems Starting the MySQL Server
-@cindex server, starting problems
-@cindex problems, starting the server
+@node example-Foreign keys, Searching on two keys, example-user-variables, Examples
+@subsection Using Foreign Keys
-If you are going to use tables that support transactions (BDB, InnoDB),
-you should first create a my.cnf file and set startup options
-for the table types you plan to use. @xref{Table types}.
+@cindex foreign keys
+@cindex keys, foreign
-Generally, you start the @code{mysqld} server in one of three ways:
+You don't need foreign keys to join 2 tables.
-@itemize @bullet
-@item
-By invoking @code{mysql.server}. This script is used primarily at
-system startup and shutdown, and is described more fully in
-@ref{Automatic start}.
+The only thing @strong{MySQL} doesn't do is @code{CHECK} to make sure that
+the keys you use really exist in the table(s) you're referencing and it
+doesn't automatically delete rows from table with a foreign key
+definition. If you use your keys like normal, it'll work just fine:
-@item
-By invoking @code{safe_mysqld}, which tries to determine the proper options
-for @code{mysqld} and then runs it with those options. @xref{safe_mysqld, ,
-@code{safe_mysqld}}.
-@item
-On NT you should install @code{mysqld} as a service as follows:
@example
-bin\mysqld-nt --install # Install MySQL as a service
+CREATE TABLE persons (
+ id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
+ name CHAR(60) NOT NULL,
+ PRIMARY KEY (id)
+);
+
+CREATE TABLE shirts (
+ id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
+ style ENUM('t-shirt', 'polo', 'dress') NOT NULL,
+ color ENUM('red', 'blue', 'orange', 'white', 'black') NOT NULL,
+ owner SMALLINT UNSIGNED NOT NULL REFERENCES persons,
+ PRIMARY KEY (id)
+);
+
+
+INSERT INTO persons VALUES (NULL, 'Antonio Paz');
+
+INSERT INTO shirts VALUES
+(NULL, 'polo', 'blue', LAST_INSERT_ID()),
+(NULL, 'dress', 'white', LAST_INSERT_ID()),
+(NULL, 't-shirt', 'blue', LAST_INSERT_ID());
+
+
+INSERT INTO persons VALUES (NULL, 'Lilliana Angelovska');
+
+INSERT INTO shirts VALUES
+(NULL, 'dress', 'orange', LAST_INSERT_ID()),
+(NULL, 'polo', 'red', LAST_INSERT_ID()),
+(NULL, 'dress', 'blue', LAST_INSERT_ID()),
+(NULL, 't-shirt', 'white', LAST_INSERT_ID());
+
+
+SELECT * FROM persons;
++----+---------------------+
+| id | name |
++----+---------------------+
+| 1 | Antonio Paz |
+| 2 | Lilliana Angelovska |
++----+---------------------+
+
+SELECT * FROM shirts;
++----+---------+--------+-------+
+| id | style | color | owner |
++----+---------+--------+-------+
+| 1 | polo | blue | 1 |
+| 2 | dress | white | 1 |
+| 3 | t-shirt | blue | 1 |
+| 4 | dress | orange | 2 |
+| 5 | polo | red | 2 |
+| 6 | dress | blue | 2 |
+| 7 | t-shirt | white | 2 |
++----+---------+--------+-------+
+
+
+SELECT s.* FROM persons p, shirts s
+ WHERE p.name LIKE 'Lilliana%'
+ AND s.owner = p.id
+ AND s.color <> 'white';
+
++----+-------+--------+-------+
+| id | style | color | owner |
++----+-------+--------+-------+
+| 4 | dress | orange | 2 |
+| 5 | polo | red | 2 |
+| 6 | dress | blue | 2 |
++----+-------+--------+-------+
@end example
-You can now start/stop @code{mysqld} as follows:
+@node Searching on two keys, Calculating days, example-Foreign keys, Examples
+@subsection Searching on Two Keys
+
+@findex UNION
+@cindex searching, two keys
+@cindex keys, searching on two
+
+@strong{MySQL} doesn't yet optimize when you search on two different
+keys combined with @code{OR} (Searching on one key with different @code{OR}
+parts is optimized quite good):
+
@example
-NET START mysql
-NET STOP mysql
+SELECT field1_index, field2_index FROM test_table WHERE field1_index = '1'
+OR field2_index = '1'
@end example
-Note that in this case you can't use any other options for @code{mysqld}!
+The reason is that we haven't yet had time to come up with an efficient
+way to handle this in the general case. (The @code{AND} handling is,
+in comparison, now completely general and works very well).
+
+For the moment you can solve this very efficiently by using a
+@code{TEMPORARY} table. This type of optimization is also very good if
+you are using very complicated queries where the SQL server does the
+optimizations in the wrong order.
-You can remove the service as follows:
@example
-bin\mysqld-nt --remove # remove MySQL as a service
+CREATE TEMPORARY TABLE tmp
+SELECT field1_index, field2_index FROM test_table WHERE field1_index = '1';
+INSERT INTO tmp
+SELECT field1_index, field2_index FROM test_table WHERE field2_index = '1';
+SELECT * from tmp;
+DROP TABLE tmp;
@end example
-@item
-By invoking @code{mysqld} directly.
-@end itemize
+The above way to solve this query is in effect an @code{UNION} of two queries.
-When the @code{mysqld} daemon starts up, it changes directory to the
-data directory. This is where it expects to write log files and the pid
-(process ID) file, and where it expects to find databases.
+@node Calculating days, , Searching on two keys, Examples
+@subsection Calculating visits per day
-The data directory location is hardwired in when the distribution is
-compiled. However, if @code{mysqld} expects to find the data directory
-somewhere other than where it really is on your system, it will not work
-properly. If you have problems with incorrect paths, you can find out
-what options @code{mysqld} allows and what the default path settings are by
-invoking @code{mysqld} with the @code{--help} option. You can override the
-defaults by specifying the correct pathnames as command-line arguments to
-@code{mysqld}. (These options can be used with @code{safe_mysqld} as well.)
+@findex BIT_OR
+@findex BIT_COUNT
+@findex <<
+@cindex bit_functions, example
-Normally you should need to tell @code{mysqld} only the base directory under
-which @strong{MySQL} is installed. You can do this with the @code{--basedir}
-option. You can also use @code{--help} to check the effect of changing path
-options (note that @code{--help} @emph{must} be the final option of the
-@code{mysqld} command). For example:
+The following shows an idea of how you can use the bit group functions
+to calculate the number of days per month a user has visited a web page.
@example
-shell> EXECDIR/mysqld --basedir=/usr/local --help
+CREATE TABLE t1 (year YEAR(4), month INT(2) UNSIGNED ZEROFILL, day INT(2) UNSIGNED ZEROFILL);
+INSERT INTO t1 VALUES(2000,1,1),(2000,1,20),(2000,1,30),(2000,2,2),(2000,2,23),(2000,2,23);
+
+SELECT year,month,BIT_COUNT(BIT_OR(1<<day)) AS days FROM t1 GROUP BY year,month;
+
+Which returns:
+
++------+-------+------+
+| year | month | days |
++------+-------+------+
+| 2000 | 01 | 3 |
+| 2000 | 02 | 2 |
++------+-------+------+
@end example
-Once you determine the path settings you want, start the server without
-the @code{--help} option.
+The above calculates how many different days was used for a given
+year/month combination, with automatic removal of duplicate entries.
-Whichever method you use to start the server, if it fails to start up
-correctly, check the log file to see if you can find out why. Log files
-are located in the data directory (typically
-@file{/usr/local/mysql/data} for a binary distribution,
-@file{/usr/local/var} for a source distribution,
-@file{\mysql\data\mysql.err} on Windows.) Look in the data directory for
-files with names of the form @file{host_name.err} and
-@file{host_name.log} where @code{host_name} is the name of your server
-host. Then check the last few lines of these files:
+@node Batch mode, Twin, Examples, Tutorial
+@section Using @code{mysql} in Batch Mode
+
+@cindex modes, batch
+@cindex batch mode
+@cindex running, batch mode
+@cindex script files
+@cindex files, script
+
+In the previous sections, you used @code{mysql} interactively to enter
+queries and view the results. You can also run @code{mysql} in batch
+mode. To do this, put the commands you want to run in a file, then
+tell @code{mysql} to read its input from the file:
@example
-shell> tail host_name.err
-shell> tail host_name.log
+shell> mysql < batch-file
@end example
-If you find something like the following in the log file:
+If you need to specify connection parameters on the command line, the
+command might look like this:
+
@example
-000729 14:50:10 bdb: Recovery function for LSN 1 27595 failed
-000729 14:50:10 bdb: warning: ./test/t1.db: No such file or directory
-000729 14:50:10 Can't init databases
+shell> mysql -h host -u user -p < batch-file
+Enter password: ********
@end example
-This means that you didn't start @code{mysqld} with @code{--bdb-no-recover}
-and Berkeley DB found something wrong with its log files when it
-tried to recover your databases. To be able to continue, you should
-move away the old Berkeley DB log file from the database directory to
-some other place, where you can later examine these. The log files are
-named @file{log.0000000001}, where the number will increase over time.
+When you use @code{mysql} this way, you are creating a script file, then
+executing the script.
-If you are running @code{mysqld} with BDB table support and @code{mysqld} core
-dumps at start this could be because of some problems with the BDB
-recover log. In this case you can try starting @code{mysqld} with
-@code{--bdb-no-recover}. If this helps, then you should remove all
-@file{log.*} files from the data directory and try starting @code{mysqld}
-again.
+Why use a script? Here are a few reasons:
-If you get the following error, it means that some other program (or another
-@code{mysqld} server) is already using the TCP/IP port or socket
-@code{mysqld} is trying to use:
+@itemize @bullet
+@item
+If you run a query repeatedly (say, every day or every week), making it a
+script allows you to avoid retyping it each time you execute it.
+
+@item
+You can generate new queries from existing ones that are similar by copying
+and editing script files.
+
+@item
+Batch mode can also be useful while you're developing a query, particularly
+for multiple-line commands or multiple-statement sequences of commands. If
+you make a mistake, you don't have to retype everything. Just edit your
+script to correct the error, then tell @code{mysql} to execute it again.
+
+@item
+If you have a query that produces a lot of output, you can run the output
+through a pager rather than watching it scroll off the top of your screen:
@example
-Can't start server: Bind on TCP/IP port: Address already in use
- or
-Can't start server : Bind on unix socket...
+shell> mysql < batch-file | more
@end example
-Use @code{ps} to make sure that you don't have another @code{mysqld} server
-running. If you can't find another server running, you can try to execute
-the command @code{telnet your-host-name tcp-ip-port-number} and press
-@code{RETURN} a couple of times. If you don't get an error message like
-@code{telnet: Unable to connect to remote host: Connection refused},
-something is using the TCP/IP port @code{mysqld} is trying to use.
-See @ref{mysql_install_db} and @ref{Multiple servers}.
-
-If @code{mysqld} is currently running, you can find out what path settings
-it is using by executing this command:
+@item
+You can catch the output in a file for further processing:
@example
-shell> mysqladmin variables
+shell> mysql < batch-file > mysql.out
@end example
-or
+@item
+You can distribute your script to other people so they can run the commands,
+too.
+
+@item
+Some situations do not allow for interactive use, for example, when you run
+a query from a @code{cron} job. In this case, you must use batch mode.
+@end itemize
+
+The default output format is different (more concise) when you run
+@code{mysql} in batch mode than when you use it interactively. For
+example, the output of @code{SELECT DISTINCT species FROM pet} looks like
+this when run interactively:
@example
-shell> mysqladmin -h 'your-host-name' variables
++---------+
+| species |
++---------+
+| bird |
+| cat |
+| dog |
+| hamster |
+| snake |
++---------+
@end example
-If @code{safe_mysqld} starts the server but you can't connect to it,
-you should make sure you have an entry in @file{/etc/hosts} that looks like
-this:
+But like this when run in batch mode:
@example
-127.0.0.1 localhost
+species
+bird
+cat
+dog
+hamster
+snake
@end example
-This problem occurs only on systems that don't have a working thread
-library and for which @strong{MySQL} must be configured to use MIT-pthreads.
+If you want to get the interactive output format in batch mode, use
+@code{mysql -t}. To echo to the output the commands that are executed, use
+@code{mysql -vvv}.
-If you can't get @code{mysqld} to start you can try to make a trace file
-to find the problem. @xref{Making trace files}.
+@node Twin, Apache, Batch mode, Tutorial
+@section Queries from Twin Project
-If you are using InnoDB tables, refer to the InnoDB-specific startup
-options. @xref{InnoDB start}.
+@cindex Twin Studies, queries
+@cindex queries, Twin Studeis project
-If you are using BDB (Berkeley DB) tables, you should familiarize
-yourself with the different BDB specific startup options. @xref{BDB start}.
+At Analytikerna and Lentus, we have been doing the systems and field work
+for a big research project. This project is a collaboration between the
+Institute of Environmental Medicine at Karolinska Institutet Stockholm
+and the Section on Clinical Research in Aging and Psychology at the
+University of Southern California.
-@node Automatic start, Command-line options, Starting server, Post-installation
-@subsection Starting and Stopping MySQL Automatically
-@cindex starting, the server automatically
-@cindex stopping, the server
-@cindex server, starting and stopping
+The project involves a screening part where all twins in Sweden older
+than 65 years are interviewed by telephone. Twins who meet certain
+criteria are passed on to the next stage. In this latter stage, twins who
+want to participate are visited by a doctor/nurse team. Some of the
+examinations include physical and neuropsychological examination,
+laboratory testing, neuroimaging, psychological status assessment, and family
+history collection. In addition, data are collected on medical and
+environmental risk factors.
-The @code{mysql.server} and @code{safe_mysqld} scripts can be used to start
-the server automatically at system startup time. @code{mysql.server} can also
-be used to stop the server.
+More information about Twin studies can be found at:
-The @code{mysql.server} script can be used to start or stop the server
-by invoking it with @code{start} or @code{stop} arguments:
+@example
+@url{http://www.imm.ki.se/TWIN/TWINUKW.HTM}
+@end example
+
+The latter part of the project is administered with a Web interface
+written using Perl and @strong{MySQL}.
+
+Each night all data from the interviews are moved into a @strong{MySQL}
+database.
+
+@menu
+* Twin pool:: Find all non-distributed twins
+* Twin event:: Show a table on twin pair status
+@end menu
+
+@node Twin pool, Twin event, Twin, Twin
+@subsection Find all Non-distributed Twins
+
+The following query is used to determine who goes into the second part of the
+project:
@example
-shell> mysql.server start
-shell> mysql.server stop
+select
+ concat(p1.id, p1.tvab) + 0 as tvid,
+ concat(p1.christian_name, " ", p1.surname) as Name,
+ p1.postal_code as Code,
+ p1.city as City,
+ pg.abrev as Area,
+ if(td.participation = "Aborted", "A", " ") as A,
+ p1.dead as dead1,
+ l.event as event1,
+ td.suspect as tsuspect1,
+ id.suspect as isuspect1,
+ td.severe as tsevere1,
+ id.severe as isevere1,
+ p2.dead as dead2,
+ l2.event as event2,
+ h2.nurse as nurse2,
+ h2.doctor as doctor2,
+ td2.suspect as tsuspect2,
+ id2.suspect as isuspect2,
+ td2.severe as tsevere2,
+ id2.severe as isevere2,
+ l.finish_date
+from
+ twin_project as tp
+ /* For Twin 1 */
+ left join twin_data as td on tp.id = td.id and tp.tvab = td.tvab
+ left join informant_data as id on tp.id = id.id and tp.tvab = id.tvab
+ left join harmony as h on tp.id = h.id and tp.tvab = h.tvab
+ left join lentus as l on tp.id = l.id and tp.tvab = l.tvab
+ /* For Twin 2 */
+ left join twin_data as td2 on p2.id = td2.id and p2.tvab = td2.tvab
+ left join informant_data as id2 on p2.id = id2.id and p2.tvab = id2.tvab
+ left join harmony as h2 on p2.id = h2.id and p2.tvab = h2.tvab
+ left join lentus as l2 on p2.id = l2.id and p2.tvab = l2.tvab,
+ person_data as p1,
+ person_data as p2,
+ postal_groups as pg
+where
+ /* p1 gets main twin and p2 gets his/her twin. */
+ /* ptvab is a field inverted from tvab */
+ p1.id = tp.id and p1.tvab = tp.tvab and
+ p2.id = p1.id and p2.ptvab = p1.tvab and
+ /* Just the sceening survey */
+ tp.survey_no = 5 and
+ /* Skip if partner died before 65 but allow emigration (dead=9) */
+ (p2.dead = 0 or p2.dead = 9 or
+ (p2.dead = 1 and
+ (p2.death_date = 0 or
+ (((to_days(p2.death_date) - to_days(p2.birthday)) / 365)
+ >= 65))))
+ and
+ (
+ /* Twin is suspect */
+ (td.future_contact = 'Yes' and td.suspect = 2) or
+ /* Twin is suspect - Informant is Blessed */
+ (td.future_contact = 'Yes' and td.suspect = 1 and id.suspect = 1) or
+ /* No twin - Informant is Blessed */
+ (ISNULL(td.suspect) and id.suspect = 1 and id.future_contact = 'Yes') or
+ /* Twin broken off - Informant is Blessed */
+ (td.participation = 'Aborted'
+ and id.suspect = 1 and id.future_contact = 'Yes') or
+ /* Twin broken off - No inform - Have partner */
+ (td.participation = 'Aborted' and ISNULL(id.suspect) and p2.dead = 0))
+ and
+ l.event = 'Finished'
+ /* Get at area code */
+ and substring(p1.postal_code, 1, 2) = pg.code
+ /* Not already distributed */
+ and (h.nurse is NULL or h.nurse=00 or h.doctor=00)
+ /* Has not refused or been aborted */
+ and not (h.status = 'Refused' or h.status = 'Aborted'
+ or h.status = 'Died' or h.status = 'Other')
+order by
+ tvid;
@end example
-@code{mysql.server} can be found in the @file{share/mysql} directory
-under the @strong{MySQL} installation directory or in the @file{support-files}
-directory of the @strong{MySQL} source tree.
+Some explanations:
+@table @asis
+@item @code{concat(p1.id, p1.tvab) + 0 as tvid}
+We want to sort on the concatenated @code{id} and @code{tvab} in
+numerical order. Adding @code{0} to the result causes @strong{MySQL} to
+treat the result as a number.
+@item column @code{id}
+This identifies a pair of twins. It is a key in all tables.
+@item column @code{tvab}
+This identifies a twin in a pair. It has a value of @code{1} or @code{2}.
+@item column @code{ptvab}
+This is an inverse of @code{tvab}. When @code{tvab} is @code{1} this is
+@code{2}, and vice versa. It exists to save typing and to make it easier for
+@strong{MySQL} to optimize the query.
+@end table
-Before @code{mysql.server} starts the server, it changes directory to
-the @strong{MySQL} installation directory, then invokes @code{safe_mysqld}.
-You might need to edit @code{mysql.server} if you have a binary distribution
-that you've installed in a non-standard location. Modify it to @code{cd}
-into the proper directory before it runs @code{safe_mysqld}. If you want the
-server to run as some specific user, add an appropriate @code{user} line
-to the @file{/etc/my.cnf} file, as shown later in this section.
+This query demonstrates, among other things, how to do lookups on a
+table from the same table with a join (@code{p1} and @code{p2}). In the example, this
+is used to check whether a twin's partner died before the age of 65. If so,
+the row is not returned.
-@code{mysql.server stop} brings down the server by sending a signal to it.
-You can take down the server manually by executing @code{mysqladmin shutdown}.
+All of the above exist in all tables with twin-related information. We
+have a key on both @code{id,tvab} (all tables), and @code{id,ptvab}
+(@code{person_data}) to make queries faster.
-You might want to add these start and stop commands to the appropriate places
-in your @file{/etc/rc*} files when you start using @strong{MySQL} for
-production applications. Note that if you modify @code{mysql.server}, then
-upgrade @strong{MySQL} sometime, your modified version will be overwritten,
-so you should make a copy of your edited version that you can reinstall.
+On our production machine (A 200MHz UltraSPARC), this query returns
+about 150-200 rows and takes less than one second.
+
+The current number of records in the tables used above:
+@multitable @columnfractions .3 .5
+@item @strong{Table} @tab @strong{Rows}
+@item @code{person_data} @tab 71074
+@item @code{lentus} @tab 5291
+@item @code{twin_project} @tab 5286
+@item @code{twin_data} @tab 2012
+@item @code{informant_data} @tab 663
+@item @code{harmony} @tab 381
+@item @code{postal_groups} @tab 100
+@end multitable
+
+@node Twin event, , Twin pool, Twin
+@subsection Show a Table on Twin Pair Status
+
+Each interview ends with a status code called @code{event}. The query
+shown below is used to display a table over all twin pairs combined by
+event. This indicates in how many pairs both twins are finished, in how many
+pairs one twin is finished and the other refused, and so on.
-If your system uses @file{/etc/rc.local} to start external scripts, you
-should append the following to it:
@example
-/bin/sh -c 'cd /usr/local/mysql ; ./bin/safe_mysqld --user=mysql &'
+select
+ t1.event,
+ t2.event,
+ count(*)
+from
+ lentus as t1,
+ lentus as t2,
+ twin_project as tp
+where
+ /* We are looking at one pair at a time */
+ t1.id = tp.id
+ and t1.tvab=tp.tvab
+ and t1.id = t2.id
+ /* Just the sceening survey */
+ and tp.survey_no = 5
+ /* This makes each pair only appear once */
+ and t1.tvab='1' and t2.tvab='2'
+group by
+ t1.event, t2.event;
+
@end example
-You can also add options for @code{mysql.server} in a global
-@file{/etc/my.cnf} file. A typical @file{/etc/my.cnf} file might look like
-this:
+
+@node Apache, , Twin, Tutorial
+@section Using MySQL with Apache
+
+@cindex Apache
+
+The Contrib section includes programs that let you authenticate your
+users from a @strong{MySQL} database and also let you log your log files
+into a @strong{MySQL} table. @xref{Contrib}.
+
+You can change the Apache logging format to be easily readable by
+@strong{MySQL} by putting the following into the Apache configuration file:
@example
-[mysqld]
-datadir=/usr/local/mysql/var
-socket=/tmp/mysqld.sock
-port=3306
-user=mysql
+LogFormat \
+ "\"%h\",%@{%Y%m%d%H%M%S@}t,%>s,\"%b\",\"%@{Content-Type@}o\", \
+ \"%U\",\"%@{Referer@}i\",\"%@{User-Agent@}i\""
+@end example
-[mysql.server]
-basedir=/usr/local/mysql
+In @strong{MySQL} you can do something like this:
+
+@example
+LOAD DATA INFILE '/local/access_log' INTO TABLE table_name
+FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' ESCAPED BY '\\'
@end example
-The @code{mysql.server} script understands the following options:
-@code{datadir}, @code{basedir}, and @code{pid-file}.
-The following table shows which option groups each of the startup scripts
-read from option files:
+@node MySQL Database Administration, MySQL Optimization, Tutorial, Top
+@chapter MySQL Database Administration
-@multitable @columnfractions .20 .80
-@item @strong{Script} @tab @strong{Option groups}
-@item @code{mysqld} @tab @code{mysqld} and @code{server}
-@item @code{mysql.server} @tab @code{mysql.server}, @code{mysqld}, and @code{server}
-@item @code{safe_mysqld} @tab @code{mysql.server}, @code{mysqld}, and @code{server}
-@end multitable
+@menu
+* Configuring MySQL::
+* Privilege system::
+* User Account Management::
+* Disaster Prevention::
+* Database Administration::
+* Localization::
+* Server-Side Scripts::
+* Client-Side Scripts::
+* Log Files::
+* Replication::
+@end menu
-@xref{Option files}.
+
+@node Configuring MySQL, Privilege system, MySQL Database Administration, MySQL Database Administration
+@section Configuring MySQL
+
+
+@menu
+* Command-line options::
+* Option files::
+* Installing many servers::
+* Multiple servers::
+@end menu
+
+@node Command-line options, Option files, Configuring MySQL, Configuring MySQL
+@subsection mysqld Command-line Options
@findex command-line options
@cindex options, command-line
@cindex mysqld options
-@node Command-line options, Option files, Automatic start, Post-installation
-@subsection mysqld Command-line Options
@code{mysqld} accepts the following command-line options:
@@ -13268,12 +14614,14 @@ Print out warnings like @code{Aborted connection...} to the @code{.err} file.
@xref{Communication errors}.
@end table
+
+@node Option files, Installing many servers, Command-line options, Configuring MySQL
+@subsection my.cnf Option Files
+
@cindex default options
@cindex option files
@cindex creating, default startup options
@cindex startup options, default
-@node Option files, , Command-line options, Post-installation
-@subsection Option Files
@strong{MySQL} can, since Version 3.22, read default startup options for the
server and for clients from option files.
@@ -13438,8 +14786,10 @@ shell> my_print_defaults client mysql
The above output contains all options for the groups 'client' and 'mysql'.
-@node Installing many servers, Upgrade, Post-installation, Installing
-@section Installing Many Servers on the Same Machine
+
+@node Installing many servers, Multiple servers, Option files, Configuring MySQL
+@subsection Installing Many Servers on the Same Machine
+
@cindex post-install, many servers
@cindex Installing many servers
@cindex Starting many servers
@@ -13511,346 +14861,148 @@ start them with @code{./bin/safe_mysqld} then in most cases the only
option you need to add/change is the @code{socket} and @code{port}
argument to @code{safe_mysqld}.
-@node Upgrade, , Installing many servers, Installing
-@section Upgrading/Downgrading MySQL
-@cindex upgrading
-@cindex downgrading
-
-You can always move the @strong{MySQL} form and data files between
-different versions on the same architecture as long as you have the same
-base version of @strong{MySQL}. The current base version is
-3. If you change the character set when running @strong{MySQL} (which may
-also change the sort order), you must run @code{myisamchk -r -q} on all
-tables. Otherwise your indexes may not be ordered correctly.
-
-If you are afraid of new versions, you can always rename your old
-@code{mysqld} to something like @code{mysqld}-'old-version-number'. If
-your new @code{mysqld} then does something unexpected, you can simply shut it
-down and restart with your old @code{mysqld}!
-
-When you do an upgrade you should also back up your old databases, of course.
-
-If after an upgrade, you experience problems with recompiled client programs,
-like @code{Commands out of sync} or unexpected core dumps, you probably have
-used an old header or library file when compiling your programs. In this
-case you should check the date for your @file{mysql.h} file and
-@file{libmysqlclient.a} library to verify that they are from the new
-@strong{MySQL} distribution. If not, please recompile your programs!
-
-If you get some problems that the new @code{mysqld} server doesn't want to
-start or that you can't connect without a password, check that you don't
-have some old @file{my.cnf} file from your old installation! You can
-check this with: @code{program-name --print-defaults}. If this outputs
-anything other than the program name, you have an active @code{my.cnf}
-file that will affect things!
-
-It is a good idea to rebuild and reinstall the @code{Msql-Mysql-modules}
-distribution whenever you install a new release of @strong{MySQL},
-particularly if you notice symptoms such as all your @code{DBI} scripts
-dumping core after you upgrade @strong{MySQL}.
@menu
-* Upgrading-from-3.22:: Upgrading from a 3.22 version to 3.23
-* Upgrading-from-3.21:: Upgrading from a 3.21 version to 3.22
-* Upgrading-from-3.20:: Upgrading from a 3.20 version to 3.21
-* Upgrading-to-arch:: Upgrading to another architecture
+* Multiple servers::
@end menu
-@cindex compatibility, between MySQL versions
-@cindex upgrading, 3.22 to 3.23
-@node Upgrading-from-3.22, Upgrading-from-3.21, Upgrade, Upgrade
-@subsection Upgrading From Version 3.22 to Version 3.23
-
-@strong{MySQL} Version 3.23 supports tables of the new @code{MyISAM} type and
-the old @code{ISAM} type. You don't have to convert your old tables to
-use these with Version 3.23. By default, all new tables will be created with
-type @code{MyISAM} (unless you start @code{mysqld} with the
-@code{--default-table-type=isam} option). You can change an @code{ISAM}
-table to a @code{MyISAM} table with @code{ALTER TABLE table_name TYPE=MyISAM}
-or the Perl script @code{mysql_convert_table_format}.
-
-Version 3.22 and 3.21 clients will work without any problems with a Version
-3.23 server.
-
-The following lists tell what you have to watch out for when upgrading to
-Version 3.23:
-
-@itemize @bullet
-@item
-All tables that uses the @code{tis620} character set must be fixed
-with @code{myisamchk -r} or @code{REPAIR TABLE}.
-@item
-If you do a @code{DROP DATABASE} on a symbolic linked database, both the
-link and the original database is deleted. (This didn't happen in 3.22
-because configure didn't detect the @code{readlink} system call).
-@item
-@code{OPTIMIZE TABLE} now only works for @strong{MyISAM} tables.
-For other table types, you can use @code{ALTER TABLE} to optimize the table.
-During @code{OPTIMIZE TABLE} the table is now locked from other threads.
-@item
-The @strong{MySQL} client @code{mysql} is now by default started with the
-option @code{--no-named-commands (-g)}. This option can be disabled with
-@code{--enable-named-commands (-G)}. This may cause incompatibility problems in
-some cases, for example in SQL scripts that use named commands without a
-semicolon! Long format commands still work from the first line.
-@item
-If you are using the @code{german} character sort order, you must repair
-all your tables with @code{isamchk -r}, as we have made some changes in
-the sort order!
-@item The default return type of @code{IF} will now depend on both arguments
-and not only the first argument.
-@item @code{AUTO_INCREMENT} will not work with negative numbers. The reason
-for this is that negative numbers caused problems when wrapping from -1 to 0.
-@code{AUTO_INCREMENT} is now for MyISAM tables handled at a lower level and
-is much faster than before. For MyISAM tables old numbers are also not reused
-anymore, even if you delete some rows from the table.
-@item @code{CASE}, @code{DELAYED}, @code{ELSE}, @code{END}, @code{FULLTEXT}, @code{INNER}, @code{RIGHT}, @code{THEN} and @code{WHEN} are now reserved words.
-@item @code{FLOAT(X)} is now a true floating-point type and not a value with
-a fixed number of decimals.
-@item When declaring @code{DECIMAL(length,dec)} the length argument no
-longer includes a place for the sign or the decimal point.
-@item A @code{TIME} string must now be of one of the following formats:
-@code{[[[DAYS] [H]H:]MM:]SS[.fraction]} or
-@code{[[[[[H]H]H]H]MM]SS[.fraction]}
-@item @code{LIKE} now compares strings using the same character
-comparison rules as @code{'='}. If you require the old behavior, you
-can compile @strong{MySQL} with the @code{CXXFLAGS=-DLIKE_CMP_TOUPPER}
-flag.
-@item @code{REGEXP} is now case insensitive for normal (not binary) strings.
-@item When you check/repair tables you should use @code{CHECK TABLE}
-or @code{myisamchk} for @code{MyISAM} tables (@code{.MYI}) and
-@code{isamchk} for ISAM (@code{.ISM}) tables.
-@item If you want your @code{mysqldump} files to be compatible between
-@strong{MySQL} Version 3.22 and Version 3.23, you should not use the
-@code{--opt} or @code{--full} option to @code{mysqldump}.
-@item Check all your calls to @code{DATE_FORMAT()} to make sure there is a
-@samp{%} before each format character. (Later @strong{MySQL} Version 3.22
-did allow this syntax.)
-@item
-@code{mysql_fetch_fields_direct} is now a function (it was a macro) and
-it returns a pointer to a @code{MYSQL_FIELD} instead of a
-@code{MYSQL_FIELD}.
-@item
-@code{mysql_num_fields()} can no longer be used on a @code{MYSQL*} object (it's
-now a function that takes @code{MYSQL_RES*} as an argument. You should now
-use @code{mysql_field_count()} instead.
-@item
-In @strong{MySQL} Version 3.22, the output of @code{SELECT DISTINCT ...} was
-almost always sorted. In Version 3.23, you must use @code{GROUP BY} or
-@code{ORDER BY} to obtain sorted output.
-@item
-@code{SUM()} now returns @code{NULL}, instead of 0, if there is no matching
-rows. This is according to ANSI SQL.
-@item An @code{AND} or @code{OR} with @code{NULL} values will now return
-@code{NULL} instead of 0. This mostly affects queries that use @code{NOT}
-on an @code{AND/OR} expression as @code{NOT NULL} = @code{NULL}.
-@code{LPAD()} and @code{RPAD()} will shorten the result string if it's longer
-than the length argument.
-@end itemize
-
-@cindex compatibility, between MySQL versions
-@node Upgrading-from-3.21, Upgrading-from-3.20, Upgrading-from-3.22, Upgrade
-@subsection Upgrading from Version 3.21 to Version 3.22
-@cindex upgrading, 3.21 to 3.22
-
-Nothing that affects compatibility has changed between Version 3.21 and 3.22.
-The only pitfall is that new tables that are created with @code{DATE} type
-columns will use the new way to store the date. You can't access these new
-fields from an old version of @code{mysqld}.
-
-After installing @strong{MySQL} Version 3.22, you should start the new server
-and then run the @code{mysql_fix_privilege_tables} script. This will add the
-new privileges that you need to use the @code{GRANT} command. If you forget
-this, you will get @code{Access denied} when you try to use @code{ALTER
-TABLE}, @code{CREATE INDEX}, or @code{DROP INDEX}. If your @strong{MySQL} root
-user requires a password, you should give this as an argument to
-@code{mysql_fix_privilege_tables}.
-
-The C API interface to @code{mysql_real_connect()} has changed. If you have
-an old client program that calls this function, you must place a @code{0} for
-the new @code{db} argument (or recode the client to send the @code{db}
-element for faster connections). You must also call @code{mysql_init()}
-before calling @code{mysql_real_connect()}! This change was done to allow
-the new @code{mysql_options()} function to save options in the @code{MYSQL}
-handler structure.
-
-The @code{mysqld} variable @code{key_buffer} has changed names to
-@code{key_buffer_size}, but you can still use the old name in your
-startup files.
-
-@node Upgrading-from-3.20, Upgrading-to-arch, Upgrading-from-3.21, Upgrade
-@subsection Upgrading from Version 3.20 to Version 3.21
-@cindex upgrading, 3.20 to 3.21
-
-If you are running a version older than Version 3.20.28 and want to
-switch to Version 3.21, you need to do the following:
-
-You can start the @code{mysqld} Version 3.21 server with @code{safe_mysqld
---old-protocol} to use it with clients from a Version 3.20 distribution.
-In this case, the new client function @code{mysql_errno()} will not
-return any server error, only @code{CR_UNKNOWN_ERROR} (but it
-works for client errors), and the server uses the old @code{password()}
-checking rather than the new one.
-
-If you are @strong{NOT} using the @code{--old-protocol} option to
-@code{mysqld}, you will need to make the following changes:
+@node Multiple servers, , Installing many servers, Configuring MySQL
+@subsection Running Multiple MySQL Servers on the Same Machine
-@itemize @bullet
-@item
-All client code must be recompiled. If you are using ODBC, you must get
-the new @strong{MyODBC} 2.x driver.
-@item
-The script @code{scripts/add_long_password} must be run to convert the
-@code{Password} field in the @code{mysql.user} table to @code{CHAR(16)}.
-@item
-All passwords must be reassigned in the @code{mysql.user} table (to get 62-bit
-rather than 31-bit passwords).
-@item
-The table format hasn't changed, so you don't have to convert any tables.
-@end itemize
+@cindex multiple servers
+@cindex servers, multiple
+@cindex running, multiple servers
-@strong{MySQL} Version 3.20.28 and above can handle the new @code{user} table
-format without affecting clients. If you have a @strong{MySQL} version earlier
-than Version 3.20.28, passwords will no longer work with it if you convert the
-@code{user} table. So to be safe, you should first upgrade to at least Version
-3.20.28 and then upgrade to Version 3.21.
+There are circumstances when you might want to run multiple servers on the same
+machine. For example, you might want to test a new @strong{MySQL} release
+while leaving your existing production setup undisturbed. Or you might
+be an Internet service provider that wants to provide independent
+@strong{MySQL} installations for different customers.
-@cindex Protocol mismatch
-The new client code works with a 3.20.x @code{mysqld} server, so
-if you experience problems with 3.21.x, you can use the old 3.20.x server
-without having to recompile the clients again.
+If you want to run multiple servers, the easiest way is to compile the servers
+with different TCP/IP ports and socket files so they are not
+both listening to the same TCP/IP port or socket file. @xref{mysqld_multi, ,
+@code{mysqld_multi}}.
-If you are not using the @code{--old-protocol} option to @code{mysqld},
-old clients will issue the error message:
+Assume an existing server is configured for the default port number and
+socket file. Then configure the new server with a @code{configure} command
+something like this:
@example
-ERROR: Protocol mismatch. Server Version = 10 Client Version = 9
+shell> ./configure --with-tcp-port=port_number \
+ --with-unix-socket-path=file_name \
+ --prefix=/usr/local/mysql-3.22.9
@end example
-The new Perl @code{DBI}/@code{DBD} interface also supports the old
-@code{mysqlperl} interface. The only change you have to make if you use
-@code{mysqlperl} is to change the arguments to the @code{connect()} function.
-The new arguments are: @code{host}, @code{database}, @code{user},
-@code{password} (the @code{user} and @code{password} arguments have changed
-places).
-@xref{Perl DBI Class, , Perl @code{DBI} Class}.
-
-The following changes may affect queries in old applications:
-
-@itemize @bullet
-@item
-@code{HAVING} must now be specified before any @code{ORDER BY} clause.
-@item
-The parameters to @code{LOCATE()} have been swapped.
-@item
-There are some new reserved words. The most notable are @code{DATE},
-@code{TIME}, and @code{TIMESTAMP}.
-@end itemize
-
-@cindex upgrading, different architecture
-@node Upgrading-to-arch, , Upgrading-from-3.20, Upgrade
-@subsection Upgrading to Another Architecture
+Here @code{port_number} and @code{file_name} should be different than the
+default port number and socket file pathname, and the @code{--prefix} value
+should specify an installation directory different than the one under which
+the existing @strong{MySQL} installation is located.
-If you are using @strong{MySQL} Version 3.23, you can copy the @code{.frm},
-@code{.MYI}, and @code{.MYD} files between different architectures that
-support the same floating-point format. (@strong{MySQL} takes care of any
-byte swapping issues.)
+You can check the socket used by any currently executing @strong{MySQL} server
+with this command:
-The @strong{MySQL} @code{ISAM} data and index files (@file{.ISD} and
-@file{*.ISM}, respectively) are architecture-dependent and in some cases
-OS-dependent. If you want to move your applications to another machine
-that has a different architecture or OS than your current machine, you
-should not try to move a database by simply copying the files to the
-other machine. Use @code{mysqldump} instead.
+@example
+shell> mysqladmin -h hostname --port=port_number variables
+@end example
-By default, @code{mysqldump} will create a file full of SQL statements.
-You can then transfer the file to the other machine and feed it as input
-to the @code{mysql} client.
+Note that if you specify ``@code{localhost}'' as a hostname, @code{mysqladmin}
+will default to using Unix sockets instead of TCP/IP.
-Try @code{mysqldump --help} to see what options are available.
-If you are moving the data to a newer version of @strong{MySQL}, you should use
-@code{mysqldump --opt} with the newer version to get a fast, compact dump.
+If you have a @strong{MySQL} server running on the port you used, you will
+get a list of some of the most important configurable variables in
+@strong{MySQL}, including the socket name.
-The easiest (although not the fastest) way to move a database between two
-machines is to run the following commands on the machine on which the
-database is located:
+You don't have to recompile a new @strong{MySQL} server just to start with
+a different port and socket. You can change the port and socket to be used
+by specifying them at run time as options to @code{safe_mysqld}:
@example
-shell> mysqladmin -h 'other hostname' create db_name
-shell> mysqldump --opt db_name \
- | mysql -h 'other hostname' db_name
+shell> /path/to/safe_mysqld --socket=file_name --port=port_number
@end example
-If you want to copy a database from a remote machine over a slow network,
-you can use:
-
-@example
-shell> mysqladmin create db_name
-shell> mysqldump -h 'other hostname' --opt --compress db_name \
- | mysql db_name
-@end example
+@code{mysqld_multi} can also take @code{safe_mysqld} (or @code{mysqld})
+as an argument and pass the options from a configuration file to
+@code{safe_mysqld} and further to @code{mysqld}.
-You can also store the result in a file, then transfer the file to the
-target machine and load the file into the database there. For example,
-you can dump a database to a file on the source machine like this:
+If you run the new server on the same database directory as another
+server with logging enabled, you should also specify the name of the log
+files to @code{safe_mysqld} with @code{--log}, @code{--log-update}, or
+@code{--log-slow-queries}. Otherwise, both servers may be trying to
+write to the same log file.
-@example
-shell> mysqldump --quick db_name | gzip > db_name.contents.gz
-@end example
+@strong{WARNING}: Normally you should never have two servers that update
+data in the same database! If your OS doesn't support fault-free system
+locking, this may lead to unpleasant surprises!
-(The file created in this example is compressed.) Transfer the file
-containing the database contents to the target machine and run these commands
-there:
+If you want to use another database directory for the second server, you
+can use the @code{--datadir=path} option to @code{safe_mysqld}.
-@example
-shell> mysqladmin create db_name
-shell> gunzip < db_name.contents.gz | mysql db_name
-@end example
+@strong{NOTE} also that starting several @strong{MySQL} servers
+(@code{mysqlds}) in different machines and letting them access one data
+directory over @code{NFS} is generally a @strong{BAD IDEA}! The problem
+is that the @code{NFS} will become the bottleneck with the speed. It is
+not meant for such use. And last but not least, you would still have to
+come up with a solution how to make sure that two or more @code{mysqlds}
+are not interfering with each other. At the moment there is no platform
+that would 100% reliable do the file locking (@code{lockd} daemon
+usually) in every situation. Yet there would be one more possible risk
+with @code{NFS}; it would make the work even more complicated for
+@code{lockd} daemon to handle. So make it easy for your self and forget
+about the idea. The working solution is to have one computer with an
+operating system that efficiently handles threads and have several CPUs
+in it.
-@cindex @code{mysqldump}
-@cindex @code{mysqlimport}
-You can also use @code{mysqldump} and @code{mysqlimport} to accomplish
-the database transfer.
-For big tables, this is much faster than simply using @code{mysqldump}.
-In the commands shown below, @code{DUMPDIR} represents the full pathname
-of the directory you use to store the output from @code{mysqldump}.
+When you want to connect to a @strong{MySQL} server that is running with
+a different port than the port that is compiled into your client, you
+can use one of the following methods:
-First, create the directory for the output files and dump the database:
+@itemize @bullet
+@item
+Start the client with @code{--host 'hostname' --port=port_number} to connect
+with TCP/IP, or @code{[--host localhost] --socket=file_name} to connect via
+a Unix socket.
-@example
-shell> mkdir DUMPDIR
-shell> mysqldump --tab=DUMPDIR db_name
-@end example
+@item
+In your C or Perl programs, you can give the port or socket arguments
+when connecting to the @strong{MySQL} server.
-Then transfer the files in the @code{DUMPDIR} directory to some corresponding
-directory on the target machine and load the files into @strong{MySQL}
-there:
+@item
+If your are using the Perl @code{DBD::mysql} module you can read the options
+from the @strong{MySQL} option files. @xref{Option files}.
@example
-shell> mysqladmin create db_name # create database
-shell> cat DUMPDIR/*.sql | mysql db_name # create tables in database
-shell> mysqlimport db_name DUMPDIR/*.txt # load data into tables
+$dsn = "DBI:mysql:test;mysql_read_default_group=client;mysql_read_default_file=/usr/local/mysql/data/my.cnf"
+$dbh = DBI->connect($dsn, $user, $password);
@end example
-Also, don't forget to copy the @code{mysql} database, because that's where the
-grant tables (@code{user}, @code{db}, @code{host}) are stored. You may have
-to run commands as the @strong{MySQL} @code{root} user on the new machine
-until you have the @code{mysql} database in place.
+@item
+@tindex MYSQL_UNIX_PORT environment variable
+@tindex MYSQL_TCP_PORT environment variable
+@tindex environment variable, MYSQL_UNIX_PORT
+@tindex environment variable, MYSQL_TCP_PORT
+Set the @code{MYSQL_UNIX_PORT} and @code{MYSQL_TCP_PORT} environment variables
+to point to the Unix socket and TCP/IP port before you start your clients.
+If you normally use a specific socket or port, you should place commands
+to set these environment variables in your @file{.login} file.
+@xref{Environment variables}.
-After you import the @code{mysql} database on the new machine, execute
-@code{mysqladmin flush-privileges} so that the server reloads the grant table
-information.
+@item
+@tindex .my.cnf file
+Specify the default socket and TCP/IP port in the @file{.my.cnf} file in your
+home directory. @xref{Option files}.
+@end itemize
-@node Privilege system, Reference, Installing, Top
-@chapter The MySQL Access Privilege System
+@node Privilege system, User Account Management, Configuring MySQL, MySQL Database Administration
+@section General Security Issues and the MySQL Access Privilege System
+
@cindex system, security
@cindex access privileges
@cindex privileges, access
@cindex security system
@cindex ACLs
-
@strong{MySQL} has an advanced but non-standard security/privilege
system. This section describes how it works.
@@ -13859,22 +15011,17 @@ system. This section describes how it works.
* Security:: How to make @strong{MySQL} secure against crackers
* Privileges options::
* What Privileges:: What the privilege system does
-* User names:: @strong{MySQL} user names and passwords
-* Connecting:: Connecting to the @strong{MySQL} server
-* Password security:: Keeping your password secure
-* Privileges provided:: Privileges provided by @strong{MySQL}
* Privileges:: How the privilege system works
+* Privileges provided:: Privileges provided by @strong{MySQL}
+* Connecting:: Connecting to the @strong{MySQL} server
* Connection access:: Access control, stage 1: Connection verification
* Request access:: Access control, stage 2: Request verification
-* Privilege changes:: When privilege changes take effect
-* Default privileges:: Setting up the initial @strong{MySQL} privileges
-* Adding users:: Adding new users to @strong{MySQL}
-* Passwords:: How to set up passwords
* Access denied:: Causes of @code{Access denied} errors
@end menu
+
@node General security, Security, Privilege system, Privilege system
-@section General Security
+@subsection General Security Guidelines
Anyone using @strong{MySQL} on a computer connected to the Internet
should read this section to avoid the most common security mistakes.
@@ -14058,8 +15205,10 @@ actually mean that it is encrypted. If you need high security, you should
consult with a security expert.
@end itemize
+
@node Security, Privileges options, General security, Privilege system
-@section How to Make MySQL Secure Against Crackers
+@subsection How to Make MySQL Secure Against Crackers
+
@cindex crackers, security against
@cindex security, against crackers
@@ -14177,7 +15326,7 @@ can do this by setting the @code{max_user_connections} variable in
@end itemize
@node Privileges options, What Privileges, Security, Privilege system
-@section Startup Options for @code{mysqld} Concerning Security
+@subsection Startup Options for @code{mysqld} Concerning Security
The following @code{mysqld} options affect networking security:
@@ -14219,8 +15368,10 @@ some kind of privilege.
@end table
-@node What Privileges, User names, Privileges options, Privilege system
-@section What the Privilege System Does
+
+@node What Privileges, Privileges, Privileges options, Privilege system
+@subsection What the Privilege System Does
+
@cindex system, privilege
@cindex privilege system
@cindex passwords, security
@@ -14234,243 +15385,214 @@ Additional functionality includes the ability to have an anonymous user and
to grant privileges for @strong{MySQL}-specific functions such as @code{LOAD
DATA INFILE} and administrative operations.
-@node User names, Connecting, What Privileges, Privilege system
-@section MySQL User Names and Passwords
-@cindex user names, and passwords
-@cindex passwords, for users
-There are several distinctions between the way user names and passwords are
-used by @strong{MySQL} and the way they are used by Unix or Windows:
+@node Privileges, Privileges provided, What Privileges, Privilege system
+@subsection How the Privilege System Works
-@itemize @bullet
-@item
-User names, as used by @strong{MySQL} for authentication purposes, have
-nothing to do with Unix user names (login names) or Windows user names. Most
-@strong{MySQL} clients by default try to log in using the current Unix user
-name as the @strong{MySQL} user name, but that is for convenience only.
-Client programs allow a different name to be specified with the @code{-u} or
-@code{--user} options. This means that you can't make a database secure in
-any way unless all @strong{MySQL} user names have passwords. Anyone may
-attempt to connect to the server using any name, and they will succeed if
-they specify any name that doesn't have a password.
+@cindex privilege system, described
-@item
-@strong{MySQL} user names can be up to 16 characters long; Unix user names
-typically are limited to 8 characters.
+The @strong{MySQL} privilege system ensures that all users may do exactly the
+things that they are supposed to be allowed to do. When you connect to a
+@strong{MySQL} server, your identity is determined by @strong{the host from
+which you connect} and @strong{the user name you specify}. The system grants
+privileges according to your identity and @strong{what you want to do}.
+@strong{MySQL} considers both your hostname and user name in identifying you
+because there is little reason to assume that a given user name belongs to
+the same person everywhere on the Internet. For example, the user
+@code{bill} who connects from @code{whitehouse.gov} need not be the same
+person as the user @code{bill} who connects from @code{microsoft.com}.
+@strong{MySQL} handles this by allowing you to distinguish users on different
+hosts that happen to have the same name: you can grant @code{bill} one set
+of privileges for connections from @code{whitehouse.gov}, and a different set
+of privileges for connections from @code{microsoft.com}.
+
+@strong{MySQL} access control involves two stages:
+
+@itemize @bullet
@item
-@strong{MySQL} passwords have nothing to do with Unix passwords. There is no
-necessary connection between the password you use to log in to a Unix machine
-and the password you use to access a database on that machine.
+Stage 1: The server checks whether or not you are even allowed to connect.
@item
-@strong{MySQL} encrypts passwords using a different algorithm than the
-one used during the Unix login process. See the descriptions of the
-@code{PASSWORD()} and @code{ENCRYPT()} functions in @ref{Miscellaneous
-functions}. Note that even if the password is stored 'scrambled', and
-knowing your 'scrambled' password is enough to be able to connect to
-the @strong{MySQL} server!
+Stage 2: Assuming you can connect, the server checks each request you issue
+to see whether or not you have sufficient privileges to perform it. For
+example, if you try to select rows from a table in a database or drop a table
+from the database, the server makes sure you have the @strong{select}
+privilege for the table or the @strong{drop} privilege for the database.
@end itemize
-@strong{MySQL} users and they privileges are normally created with the
-@code{GRANT} command. @xref{GRANT}.
-
-When you login to a @strong{MySQL} server with a command line client you
-should specify the password with @code{--password=your-password}.
-@xref{Connecting}.
-
-@example
-mysql --user=monty --password=guess database_name
-@end example
-
-If you want the client to prompt for a password, you should use
-@code{--password} without any argument
-
-@example
-mysql --user=monty --password database_name
-@end example
-
-or the short form:
+The server uses the @code{user}, @code{db}, and @code{host} tables in the
+@code{mysql} database at both stages of access control. The fields in these
+grant tables are shown below:
-@example
-mysql -u monty -p database_name
-@end example
+@multitable @columnfractions .2 .25 .25 .25
+@item @strong{Table name} @tab @code{user} @tab @code{db} @tab @code{host}
-Note that in the last example the password is @strong{NOT} 'database_name'.
+@item @strong{Scope fields} @tab @code{Host} @tab @code{Host} @tab @code{Host}
+@item @tab @code{User} @tab @code{Db} @tab @code{Db}
+@item @tab @code{Password} @tab @code{User} @tab
-If you want to use the @code{-p} option to supply a password you should do like this:
+@item @strong{Privilege fields} @tab @code{Select_priv} @tab @code{Select_priv} @tab @code{Select_priv}
+@item @tab @code{Insert_priv} @tab @code{Insert_priv} @tab @code{Insert_priv}
+@item @tab @code{Update_priv} @tab @code{Update_priv} @tab @code{Update_priv}
+@item @tab @code{Delete_priv} @tab @code{Delete_priv} @tab @code{Delete_priv}
+@item @tab @code{Index_priv} @tab @code{Index_priv} @tab @code{Index_priv}
+@item @tab @code{Alter_priv} @tab @code{Alter_priv} @tab @code{Alter_priv}
+@item @tab @code{Create_priv} @tab @code{Create_priv} @tab @code{Create_priv}
+@item @tab @code{Drop_priv} @tab @code{Drop_priv} @tab @code{Drop_priv}
+@item @tab @code{Grant_priv} @tab @code{Grant_priv} @tab @code{Grant_priv}
+@item @tab @code{References_priv} @tab @tab
+@item @tab @code{Reload_priv} @tab @tab
+@item @tab @code{Shutdown_priv} @tab @tab
+@item @tab @code{Process_priv} @tab @tab
+@item @tab @code{File_priv} @tab @tab
+@end multitable
-@example
-mysql -u monty -pguess database_name
-@end example
+For the second stage of access control (request verification), the server
+may, if the request involves tables, additionally consult the
+@code{tables_priv} and @code{columns_priv} tables. The fields in these
+tables are shown below:
-On some system the library call that @strong{MySQL} uses to prompt for a
-password will automatically cut the password to 8 characters. Internally
-@strong{MySQL} doesn't have any limit for the length of the password.
+@multitable @columnfractions .2 .25 .25
+@item @strong{Table name} @tab @code{tables_priv} @tab @code{columns_priv}
-@node Connecting, Password security, User names, Privilege system
-@section Connecting to the MySQL Server
-@cindex connecting, to the server
-@cindex default hostname
-@cindex hostname, default
-@cindex server, connecting
+@item @strong{Scope fields} @tab @code{Host} @tab @code{Host}
+@item @tab @code{Db} @tab @code{Db}
+@item @tab @code{User} @tab @code{User}
+@item @tab @code{Table_name} @tab @code{Table_name}
+@item @tab @tab @code{Column_name}
-@strong{MySQL} client programs generally require that you specify connection
-parameters when you want to access a @strong{MySQL} server: the host you want
-to connect to, your user name, and your password. For example, the
-@code{mysql} client can be started like this (optional arguments are enclosed
-between @samp{[} and @samp{]}):
+@item @strong{Privilege fields} @tab @code{Table_priv} @tab @code{Column_priv}
+@item @tab @code{Column_priv} @tab
-@example
-shell> mysql [-h host_name] [-u user_name] [-pyour_pass]
-@end example
+@item @strong{Other fields} @tab @code{Timestamp} @tab @code{Timestamp}
+@item @tab @code{Grantor} @tab
+@end multitable
-Alternate forms of the @code{-h}, @code{-u}, and @code{-p} options are
-@code{--host=host_name}, @code{--user=user_name}, and
-@code{--password=your_pass}. Note that there is @emph{no space} between
-@code{-p} or @code{--password=} and the password following it.
+Each grant table contains scope fields and privilege fields.
-@strong{NOTE:} Specifying a password on the command line is not secure!
-Any user on your system may then find out your password by typing a command
-like: @code{ps auxww}. @xref{Option files}.
+Scope fields determine the scope of each entry in the tables, that is, the
+context in which the entry applies. For example, a @code{user} table entry
+with @code{Host} and @code{User} values of @code{'thomas.loc.gov'} and
+@code{'bob'} would be used for authenticating connections made to the server
+by @code{bob} from the host @code{thomas.loc.gov}. Similarly, a @code{db}
+table entry with @code{Host}, @code{User}, and @code{Db} fields of
+@code{'thomas.loc.gov'}, @code{'bob'} and @code{'reports'} would be used when
+@code{bob} connects from the host @code{thomas.loc.gov} to access the
+@code{reports} database. The @code{tables_priv} and @code{columns_priv}
+tables contain scope fields indicating tables or table/column combinations
+to which each entry applies.
-@code{mysql} uses default values for connection parameters that are missing
-from the command line:
+@cindex case sensitivity, in access checking
+For access-checking purposes, comparisons of @code{Host} values are
+case insensitive. @code{User}, @code{Password}, @code{Db}, and
+@code{Table_name} values are case sensitive.
+@code{Column_name} values are case insensitive in @strong{MySQL} Version
+3.22.12 or later.
-@itemize @bullet
-@item
-The default hostname is @code{localhost}.
+Privilege fields indicate the privileges granted by a table entry, that is,
+what operations can be performed. The server combines the information in the
+various grant tables to form a complete description of a user's privileges.
+The rules used to do this are described in @ref{Request access}.
-@item
-The default user name is your Unix login name.
+Scope fields are strings, declared as shown below; the default value for
+each is the empty string:
-@item
-No password is supplied if @code{-p} is missing.
-@end itemize
+@multitable @columnfractions .15 .15 .7
+@item @strong{Field name} @tab @strong{Type}
+@item @code{Host} @tab @code{CHAR(60)}
+@item @code{User} @tab @code{CHAR(16)}
+@item @code{Password} @tab @code{CHAR(16)}
+@item @code{Db} @tab @code{CHAR(64)} @tab (@code{CHAR(60)} for the
+@code{tables_priv} and @code{columns_priv} tables)
+@item @code{Table_name} @tab @code{CHAR(60)}
+@item @code{Column_name} @tab @code{CHAR(60)}
+@end multitable
-Thus, for a Unix user @code{joe}, the following commands are equivalent:
+In the @code{user}, @code{db} and @code{host} tables,
+all privilege fields are declared as @code{ENUM('N','Y')} --- each can have a
+value of @code{'N'} or @code{'Y'}, and the default value is @code{'N'}.
-@example
-shell> mysql -h localhost -u joe
-shell> mysql -h localhost
-shell> mysql -u joe
-shell> mysql
-@end example
+In the @code{tables_priv} and @code{columns_priv} tables, the privilege
+fields are declared as @code{SET} fields:
-Other @strong{MySQL} clients behave similarly.
+@multitable @columnfractions .2 .2 .6
+@item @strong{Table name} @tab @strong{Field name} @tab @strong{Possible set elements}
+@item @code{tables_priv} @tab @code{Table_priv} @tab @code{'Select', 'Insert',
+'Update', 'Delete', 'Create', 'Drop', 'Grant', 'References', 'Index', 'Alter'}
+@item @code{tables_priv} @tab @code{Column_priv} @tab @code{'Select', 'Insert',
+'Update', 'References'}
+@item @code{columns_priv} @tab @code{Column_priv} @tab @code{'Select', 'Insert',
+'Update', 'References'}
+@end multitable
-On Unix systems, you can specify different default values to be used when you
-make a connection, so that you need not enter them on the command line each
-time you invoke a client program. This can be done in a couple of ways:
+Briefly, the server uses the grant tables like this:
@itemize @bullet
@item
-@tindex .my.cnf file
-You can specify connection parameters in the @code{[client]} section of the
-@file{.my.cnf} configuration file in your home directory. The relevant
-section of the file might look like this:
-
-@example
-[client]
-host=host_name
-user=user_name
-password=your_pass
-@end example
-
-@xref{Option files}.
+The @code{user} table scope fields determine whether to allow or reject
+incoming connections. For allowed connections, any privileges granted in
+the @code{user} table indicate the user's global (superuser) privileges.
+These privileges apply to @strong{all} databases on the server.
@item
-@tindex MYSQL_HOST environment variable
-@tindex Environment variable, MYSQL_HOST
-@tindex MYSQL_PWD environment variable
-@tindex Environment variable, MYSQL_PWD
-@tindex USER environment variable
-@tindex Environment variable, USER
-You can specify connection parameters using environment variables. The
-host can be specified for @code{mysql} using @code{MYSQL_HOST}. The
-@strong{MySQL} user name can be specified using @code{USER} (this is for
-Windows only). The password can be specified using @code{MYSQL_PWD}
-(but this is insecure; see the next section). @xref{Environment variables}.
-@end itemize
-
-@node Password security, Privileges provided, Connecting, Privilege system
-@section Keeping Your Password Secure
-
-It is inadvisable to specify your password in a way that exposes it to
-discovery by other users. The methods you can use to specify your password
-when you run client programs are listed below, along with an assessment of
-the risks of each method:
+The @code{db} and @code{host} tables are used together:
-@itemize @bullet
+@itemize @minus
@item
-Never give a normal user access to the @code{mysql.user} table. Knowing
-the encrypted password for a user makes it possible to login as this
-user. The passwords are only scrambled so that one shouldn't be able to
-see the real password you used (if you happen to use a similar password
-with your other applications).
+The @code{db} table scope fields determine which users can access which
+databases from which hosts. The privilege fields determine which operations
+are allowed.
@item
-Use a @code{-pyour_pass} or @code{--password=your_pass} option on the command
-line. This is convenient but insecure, because your password becomes visible
-to system status programs (such as @code{ps}) that may be invoked by other
-users to display command lines. (@strong{MySQL} clients typically overwrite
-the command-line argument with zeroes during their initialization sequence,
-but there is still a brief interval during which the value is visible.)
+The @code{host} table is used as an extension of the @code{db} table when you
+want a given @code{db} table entry to apply to several hosts. For example,
+if you want a user to be able to use a database from several hosts in
+your network, leave the @code{Host} value empty in the user's @code{db} table
+entry, then populate the @code{host} table with an entry for each of those
+hosts. This mechanism is described more detail in @ref{Request access}.
+@end itemize
@item
-Use a @code{-p} or @code{--password} option (with no @code{your_pass} value
-specified). In this case, the client program solicits the password from
-the terminal:
-@findex -p option
-@findex -password option
-
-@example
-shell> mysql -u user_name -p
-Enter password: ********
-@end example
+The @code{tables_priv} and @code{columns_priv} tables are similar to
+the @code{db} table, but are more fine-grained: they apply at the
+table and column levels rather than at the database level.
+@end itemize
-The @samp{*} characters represent your password.
+Note that administrative privileges (@strong{reload}, @strong{shutdown},
+etc.) are specified only in the @code{user} table. This is because
+administrative operations are operations on the server itself and are not
+database-specific, so there is no reason to list such privileges in the
+other grant tables. In fact, only the @code{user} table need
+be consulted to determine whether or not you can perform an administrative
+operation.
-It is more secure to enter your password this way than to specify it on the
-command line because it is not visible to other users. However, this method
-of entering a password is suitable only for programs that you run
-interactively. If you want to invoke a client from a script that runs
-non-interactively, there is no opportunity to enter the password from the
-terminal. On some systems, you may even find that the first line of your
-script is read and interpreted (incorrectly) as your password!
+The @strong{file} privilege is specified only in the @code{user} table, too.
+It is not an administrative privilege as such, but your ability to read or
+write files on the server host is independent of the database you are
+accessing.
-@item
-@tindex .my.cnf file
-Store your password in a configuration file. For example, you can list your
-password in the @code{[client]} section of the @file{.my.cnf} file in your
-home directory:
+The @code{mysqld} server reads the contents of the grant tables once, when it
+starts up. Changes to the grant tables take effect as indicated in
+@ref{Privilege changes}.
-@example
-[client]
-password=your_pass
-@end example
+When you modify the contents of the grant tables, it is a good idea to make
+sure that your changes set up privileges the way you want. For help in
+diagnosing problems, see @ref{Access denied}. For advice on security issues,
+@pxref{Security}.
-If you store your password in @file{.my.cnf}, the file should not be group or
-world readable or writable. Make sure the file's access mode is @code{400}
-or @code{600}.
+A useful
+diagnostic tool is the @code{mysqlaccess} script, which Yves Carlier has
+provided for the @strong{MySQL} distribution. Invoke @code{mysqlaccess} with
+the @code{--help} option to find out how it works.
+Note that @code{mysqlaccess} checks access using only the @code{user},
+@code{db} and @code{host} tables. It does not check table- or column-level
+privileges.
-@xref{Option files}.
-@item
-You can store your password in the @code{MYSQL_PWD} environment variable, but
-this method must be considered extremely insecure and should not be used.
-Some versions of @code{ps} include an option to display the environment of
-running processes; your password will be in plain sight for all to see if
-you set @code{MYSQL_PWD}. Even on systems without such a version of
-@code{ps}, it is unwise to assume there is no other method to observe process
-environments. @xref{Environment variables}.
-@end itemize
-All in all, the safest methods are to have the client program prompt for the
-password or to specify the password in a properly protected @file{.my.cnf}
-file.
+@node Privileges provided, Connecting, Privileges, Privilege system
+@subsection Privileges Provided by MySQL
-@node Privileges provided, Privileges, Password security, Privilege system
-@section Privileges Provided by MySQL
@cindex privilege information, location
Information about user privileges is stored in the @code{user}, @code{db},
@@ -14617,210 +15739,97 @@ You cannot specify that a user has privileges to create or drop tables
in a database but not to create or drop the database itself.
@end itemize
-@node Privileges, Connection access, Privileges provided, Privilege system
-@section How the Privilege System Works
-@cindex privilege system, described
-
-The @strong{MySQL} privilege system ensures that all users may do exactly the
-things that they are supposed to be allowed to do. When you connect to a
-@strong{MySQL} server, your identity is determined by @strong{the host from
-which you connect} and @strong{the user name you specify}. The system grants
-privileges according to your identity and @strong{what you want to do}.
-
-@strong{MySQL} considers both your hostname and user name in identifying you
-because there is little reason to assume that a given user name belongs to
-the same person everywhere on the Internet. For example, the user
-@code{bill} who connects from @code{whitehouse.gov} need not be the same
-person as the user @code{bill} who connects from @code{microsoft.com}.
-@strong{MySQL} handles this by allowing you to distinguish users on different
-hosts that happen to have the same name: you can grant @code{bill} one set
-of privileges for connections from @code{whitehouse.gov}, and a different set
-of privileges for connections from @code{microsoft.com}.
-
-@strong{MySQL} access control involves two stages:
-
-@itemize @bullet
-@item
-Stage 1: The server checks whether or not you are even allowed to connect.
-
-@item
-Stage 2: Assuming you can connect, the server checks each request you issue
-to see whether or not you have sufficient privileges to perform it. For
-example, if you try to select rows from a table in a database or drop a table
-from the database, the server makes sure you have the @strong{select}
-privilege for the table or the @strong{drop} privilege for the database.
-@end itemize
-
-The server uses the @code{user}, @code{db}, and @code{host} tables in the
-@code{mysql} database at both stages of access control. The fields in these
-grant tables are shown below:
-
-@multitable @columnfractions .2 .25 .25 .25
-@item @strong{Table name} @tab @code{user} @tab @code{db} @tab @code{host}
-@item @strong{Scope fields} @tab @code{Host} @tab @code{Host} @tab @code{Host}
-@item @tab @code{User} @tab @code{Db} @tab @code{Db}
-@item @tab @code{Password} @tab @code{User} @tab
+@node Connecting, Connection access, Privileges provided, Privilege system
+@subsection Connecting to the MySQL Server
-@item @strong{Privilege fields} @tab @code{Select_priv} @tab @code{Select_priv} @tab @code{Select_priv}
-@item @tab @code{Insert_priv} @tab @code{Insert_priv} @tab @code{Insert_priv}
-@item @tab @code{Update_priv} @tab @code{Update_priv} @tab @code{Update_priv}
-@item @tab @code{Delete_priv} @tab @code{Delete_priv} @tab @code{Delete_priv}
-@item @tab @code{Index_priv} @tab @code{Index_priv} @tab @code{Index_priv}
-@item @tab @code{Alter_priv} @tab @code{Alter_priv} @tab @code{Alter_priv}
-@item @tab @code{Create_priv} @tab @code{Create_priv} @tab @code{Create_priv}
-@item @tab @code{Drop_priv} @tab @code{Drop_priv} @tab @code{Drop_priv}
-@item @tab @code{Grant_priv} @tab @code{Grant_priv} @tab @code{Grant_priv}
-@item @tab @code{References_priv} @tab @tab
-@item @tab @code{Reload_priv} @tab @tab
-@item @tab @code{Shutdown_priv} @tab @tab
-@item @tab @code{Process_priv} @tab @tab
-@item @tab @code{File_priv} @tab @tab
-@end multitable
-
-For the second stage of access control (request verification), the server
-may, if the request involves tables, additionally consult the
-@code{tables_priv} and @code{columns_priv} tables. The fields in these
-tables are shown below:
-
-@multitable @columnfractions .2 .25 .25
-@item @strong{Table name} @tab @code{tables_priv} @tab @code{columns_priv}
-
-@item @strong{Scope fields} @tab @code{Host} @tab @code{Host}
-@item @tab @code{Db} @tab @code{Db}
-@item @tab @code{User} @tab @code{User}
-@item @tab @code{Table_name} @tab @code{Table_name}
-@item @tab @tab @code{Column_name}
+@cindex connecting, to the server
+@cindex default hostname
+@cindex hostname, default
+@cindex server, connecting
-@item @strong{Privilege fields} @tab @code{Table_priv} @tab @code{Column_priv}
-@item @tab @code{Column_priv} @tab
+@strong{MySQL} client programs generally require that you specify connection
+parameters when you want to access a @strong{MySQL} server: the host you want
+to connect to, your user name, and your password. For example, the
+@code{mysql} client can be started like this (optional arguments are enclosed
+between @samp{[} and @samp{]}):
-@item @strong{Other fields} @tab @code{Timestamp} @tab @code{Timestamp}
-@item @tab @code{Grantor} @tab
-@end multitable
+@example
+shell> mysql [-h host_name] [-u user_name] [-pyour_pass]
+@end example
-Each grant table contains scope fields and privilege fields.
+Alternate forms of the @code{-h}, @code{-u}, and @code{-p} options are
+@code{--host=host_name}, @code{--user=user_name}, and
+@code{--password=your_pass}. Note that there is @emph{no space} between
+@code{-p} or @code{--password=} and the password following it.
-Scope fields determine the scope of each entry in the tables, that is, the
-context in which the entry applies. For example, a @code{user} table entry
-with @code{Host} and @code{User} values of @code{'thomas.loc.gov'} and
-@code{'bob'} would be used for authenticating connections made to the server
-by @code{bob} from the host @code{thomas.loc.gov}. Similarly, a @code{db}
-table entry with @code{Host}, @code{User}, and @code{Db} fields of
-@code{'thomas.loc.gov'}, @code{'bob'} and @code{'reports'} would be used when
-@code{bob} connects from the host @code{thomas.loc.gov} to access the
-@code{reports} database. The @code{tables_priv} and @code{columns_priv}
-tables contain scope fields indicating tables or table/column combinations
-to which each entry applies.
+@strong{NOTE:} Specifying a password on the command line is not secure!
+Any user on your system may then find out your password by typing a command
+like: @code{ps auxww}. @xref{Option files}.
-@cindex case sensitivity, in access checking
-For access-checking purposes, comparisons of @code{Host} values are
-case insensitive. @code{User}, @code{Password}, @code{Db}, and
-@code{Table_name} values are case sensitive.
-@code{Column_name} values are case insensitive in @strong{MySQL} Version
-3.22.12 or later.
+@code{mysql} uses default values for connection parameters that are missing
+from the command line:
-Privilege fields indicate the privileges granted by a table entry, that is,
-what operations can be performed. The server combines the information in the
-various grant tables to form a complete description of a user's privileges.
-The rules used to do this are described in @ref{Request access}.
+@itemize @bullet
+@item
+The default hostname is @code{localhost}.
-Scope fields are strings, declared as shown below; the default value for
-each is the empty string:
+@item
+The default user name is your Unix login name.
-@multitable @columnfractions .15 .15 .7
-@item @strong{Field name} @tab @strong{Type}
-@item @code{Host} @tab @code{CHAR(60)}
-@item @code{User} @tab @code{CHAR(16)}
-@item @code{Password} @tab @code{CHAR(16)}
-@item @code{Db} @tab @code{CHAR(64)} @tab (@code{CHAR(60)} for the
-@code{tables_priv} and @code{columns_priv} tables)
-@item @code{Table_name} @tab @code{CHAR(60)}
-@item @code{Column_name} @tab @code{CHAR(60)}
-@end multitable
+@item
+No password is supplied if @code{-p} is missing.
+@end itemize
-In the @code{user}, @code{db} and @code{host} tables,
-all privilege fields are declared as @code{ENUM('N','Y')} --- each can have a
-value of @code{'N'} or @code{'Y'}, and the default value is @code{'N'}.
+Thus, for a Unix user @code{joe}, the following commands are equivalent:
-In the @code{tables_priv} and @code{columns_priv} tables, the privilege
-fields are declared as @code{SET} fields:
+@example
+shell> mysql -h localhost -u joe
+shell> mysql -h localhost
+shell> mysql -u joe
+shell> mysql
+@end example
-@multitable @columnfractions .2 .2 .6
-@item @strong{Table name} @tab @strong{Field name} @tab @strong{Possible set elements}
-@item @code{tables_priv} @tab @code{Table_priv} @tab @code{'Select', 'Insert',
-'Update', 'Delete', 'Create', 'Drop', 'Grant', 'References', 'Index', 'Alter'}
-@item @code{tables_priv} @tab @code{Column_priv} @tab @code{'Select', 'Insert',
-'Update', 'References'}
-@item @code{columns_priv} @tab @code{Column_priv} @tab @code{'Select', 'Insert',
-'Update', 'References'}
-@end multitable
+Other @strong{MySQL} clients behave similarly.
-Briefly, the server uses the grant tables like this:
+On Unix systems, you can specify different default values to be used when you
+make a connection, so that you need not enter them on the command line each
+time you invoke a client program. This can be done in a couple of ways:
@itemize @bullet
@item
-The @code{user} table scope fields determine whether to allow or reject
-incoming connections. For allowed connections, any privileges granted in
-the @code{user} table indicate the user's global (superuser) privileges.
-These privileges apply to @strong{all} databases on the server.
-
-@item
-The @code{db} and @code{host} tables are used together:
+@tindex .my.cnf file
+You can specify connection parameters in the @code{[client]} section of the
+@file{.my.cnf} configuration file in your home directory. The relevant
+section of the file might look like this:
-@itemize @minus
-@item
-The @code{db} table scope fields determine which users can access which
-databases from which hosts. The privilege fields determine which operations
-are allowed.
+@example
+[client]
+host=host_name
+user=user_name
+password=your_pass
+@end example
-@item
-The @code{host} table is used as an extension of the @code{db} table when you
-want a given @code{db} table entry to apply to several hosts. For example,
-if you want a user to be able to use a database from several hosts in
-your network, leave the @code{Host} value empty in the user's @code{db} table
-entry, then populate the @code{host} table with an entry for each of those
-hosts. This mechanism is described more detail in @ref{Request access}.
-@end itemize
+@xref{Option files}.
@item
-The @code{tables_priv} and @code{columns_priv} tables are similar to
-the @code{db} table, but are more fine-grained: they apply at the
-table and column levels rather than at the database level.
+@tindex MYSQL_HOST environment variable
+@tindex Environment variable, MYSQL_HOST
+@tindex MYSQL_PWD environment variable
+@tindex Environment variable, MYSQL_PWD
+@tindex USER environment variable
+@tindex Environment variable, USER
+You can specify connection parameters using environment variables. The
+host can be specified for @code{mysql} using @code{MYSQL_HOST}. The
+@strong{MySQL} user name can be specified using @code{USER} (this is for
+Windows only). The password can be specified using @code{MYSQL_PWD}
+(but this is insecure; see the next section). @xref{Environment variables}.
@end itemize
-Note that administrative privileges (@strong{reload}, @strong{shutdown},
-etc.) are specified only in the @code{user} table. This is because
-administrative operations are operations on the server itself and are not
-database-specific, so there is no reason to list such privileges in the
-other grant tables. In fact, only the @code{user} table need
-be consulted to determine whether or not you can perform an administrative
-operation.
-The @strong{file} privilege is specified only in the @code{user} table, too.
-It is not an administrative privilege as such, but your ability to read or
-write files on the server host is independent of the database you are
-accessing.
-
-The @code{mysqld} server reads the contents of the grant tables once, when it
-starts up. Changes to the grant tables take effect as indicated in
-@ref{Privilege changes}.
+@node Connection access, Request access, Connecting, Privilege system
+@subsection Access Control, Stage 1: Connection Verification
-When you modify the contents of the grant tables, it is a good idea to make
-sure that your changes set up privileges the way you want. For help in
-diagnosing problems, see @ref{Access denied}. For advice on security issues,
-@pxref{Security}.
-
-A useful
-diagnostic tool is the @code{mysqlaccess} script, which Yves Carlier has
-provided for the @strong{MySQL} distribution. Invoke @code{mysqlaccess} with
-the @code{--help} option to find out how it works.
-Note that @code{mysqlaccess} checks access using only the @code{user},
-@code{db} and @code{host} tables. It does not check table- or column-level
-privileges.
-
-@node Connection access, Request access, Privileges, Privilege system
-@section Access Control, Stage 1: Connection Verification
@cindex access control
@cindex control access
@cindex connecting, verification
@@ -15025,8 +16034,9 @@ as the @code{User} field value, but by the entry with no user name!
If you have problems connecting to the server, print out the @code{user}
table and sort it by hand to see where the first match is being made.
-@node Request access, Privilege changes, Connection access, Privilege system
-@section Access Control, Stage 2: Request Verification
+
+@node Request access, Access denied, Connection access, Privilege system
+@subsection Access Control, Stage 2: Request Verification
Once you establish a connection, the server enters Stage 2. For each request
that comes in on the connection, the server checks whether you have
@@ -15224,8 +16234,657 @@ Naturally, you should always test your entries in the grant tables (for
example, using @code{mysqlaccess}) to make sure your access privileges are
actually set up the way you think they are.
-@node Privilege changes, Default privileges, Request access, Privilege system
-@section When Privilege Changes Take Effect
+
+@node Access denied, , Request access, Privilege system
+@subsection Causes of @code{Access denied} Errors
+
+If you encounter @code{Access denied} errors when you try to connect to the
+@strong{MySQL} server, the list below indicates some courses of
+action you can take to correct the problem:
+
+@itemize @bullet
+@item
+After installing @strong{MySQL}, did you run the @code{mysql_install_db}
+script to set up the initial grant table contents? If not, do so.
+@xref{Default privileges}. Test the initial privileges by executing
+this command:
+
+@example
+shell> mysql -u root test
+@end example
+
+The server should let you connect without error. You should also make sure
+you have a file @file{user.MYD} in the @strong{MySQL} database directory.
+Ordinarily, this is @file{PATH/var/mysql/user.MYD}, where @code{PATH} is the
+pathname to the @strong{MySQL} installation root.
+
+@item
+After a fresh installation, you should connect to the server and set up
+your users and their access permissions:
+
+@example
+shell> mysql -u root mysql
+@end example
+
+The server should let you connect because the @strong{MySQL} @code{root} user
+has no password initially. That is also a security risk, so setting the
+@code{root} password is something you should do while you're setting up
+your other @strong{MySQL} users.
+
+If you try to connect as @code{root} and get this error:
+
+@example
+Access denied for user: '@@unknown' to database mysql
+@end example
+
+this means that you don't have an entry in the @code{user} table with a
+@code{User} column value of @code{'root'} and that @code{mysqld} cannot
+resolve the hostname for your client. In this case, you must restart the
+server with the @code{--skip-grant-tables} option and edit your
+@file{/etc/hosts} or @file{\windows\hosts} file to add an entry for your
+host.
+
+@item
+If you get an error like the following:
+
+@example
+shell> mysqladmin -u root -pxxxx ver
+Access denied for user: 'root@@localhost' (Using password: YES)
+@end example
+
+It means that you are using a wrong password. @xref{Passwords}.
+
+If you have forgot the root password, you can restart @code{mysqld} with
+@code{--skip-grant-tables} to change the password. You can find more
+about this option later on in this manual section.
+
+If you get the above error even if you haven't specified a password,
+this means that you a wrong password in some @code{my.ini}
+file. @xref{Option files}. You can avoid using option files with the @code{--no-defaults} option, as follows:
+
+@example
+shell> mysqladmin --no-defaults -u root ver
+@end example
+
+@item
+@cindex @code{mysql_fix_privilege_tables}
+If you updated an existing @strong{MySQL} installation from a version earlier
+than Version 3.22.11 to Version 3.22.11 or later, did you run the
+@code{mysql_fix_privilege_tables} script? If not, do so. The structure of
+the grant tables changed with @strong{MySQL} Version 3.22.11 when the
+@code{GRANT} statement became functional.
+
+@item
+If your privileges seem to have changed in the middle of a session, it may be
+that a superuser has changed them. Reloading the grant tables affects new
+client connections, but it also affects existing connections as indicated in
+@ref{Privilege changes}.
+
+@item
+If you can't get your password to work, remember that you must use
+the @code{PASSWORD()} function if you set the password with the
+@code{INSERT}, @code{UPDATE}, or @code{SET PASSWORD} statements. The
+@code{PASSWORD()} function is unnecessary if you specify the password using
+the @code{GRANT ... INDENTIFIED BY} statement or the @code{mysqladmin
+password} command.
+@xref{Passwords}.
+
+@item
+@code{localhost} is a synonym for your local hostname, and is also the
+default host to which clients try to connect if you specify no host
+explicitly. However, connections to @code{localhost} do not work if you are
+running on a system that uses MIT-pthreads (@code{localhost} connections are
+made using Unix sockets, which are not supported by MIT-pthreads). To avoid
+this problem on such systems, you should use the @code{--host} option to name
+the server host explicitly. This will make a TCP/IP connection to the
+@code{mysqld} server. In this case, you must have your real hostname in
+@code{user} table entries on the server host. (This is true even if you are
+running a client program on the same host as the server.)
+
+@item
+If you get an @code{Access denied} error when trying to connect to the
+database with @code{mysql -u user_name db_name}, you may have a problem
+with the @code{user} table. Check this by executing @code{mysql -u root
+mysql} and issuing this SQL statement:
+
+@example
+mysql> SELECT * FROM user;
+@end example
+
+The result should include an entry with the @code{Host} and @code{User}
+columns matching your computer's hostname and your @strong{MySQL} user name.
+
+@item
+The @code{Access denied} error message will tell you who you are trying
+to log in as, the host from which you are trying to connect, and whether
+or not you were using a password. Normally, you should have one entry in
+the @code{user} table that exactly matches the hostname and user name
+that were given in the error message. For example if you get an error
+message that contains @code{Using password: NO}, this means that you
+tried to login without an password.
+
+@item
+If you get the following error when you try to connect from a different host
+than the one on which the @strong{MySQL} server is running, then there is no
+row in the @code{user} table that matches that host:
+
+@example
+Host ... is not allowed to connect to this MySQL server
+@end example
+
+You can fix this by using the command-line tool @code{mysql} (on the
+server host!) to add a row to the @code{user}, @code{db}, or @code{host}
+table for the user/hostname combination from which you are trying to
+connect and then execute @code{mysqladmin flush-privileges}. If you are
+not running @strong{MySQL} Version 3.22 and you don't know the IP number or
+hostname of the machine from which you are connecting, you should put an
+entry with @code{'%'} as the @code{Host} column value in the @code{user}
+table and restart @code{mysqld} with the @code{--log} option on the
+server machine. After trying to connect from the client machine, the
+information in the @strong{MySQL} log will indicate how you really did
+connect. (Then replace the @code{'%'} in the @code{user} table entry
+with the actual hostname that shows up in the log. Otherwise, you'll
+have a system that is insecure.)
+
+Another reason for this error on Linux is that you are using a binary
+@strong{MySQL} version that is compiled with a different glibc version
+than the one you are using. In this case you should either upgrade your
+OS/glibc or download the source @strong{MySQL} version and compile this
+yourself. A source RPM is normally trivial to compile and install, so
+this isn't a big problem.
+
+@item
+If you get an error message where the hostname is not shown or where the
+hostname is an IP, even if you try to connect with a hostname:
+
+@example
+shell> mysqladmin -u root -pxxxx -h some-hostname ver
+Access denied for user: 'root@' (Using password: YES)
+@end example
+
+This means that @strong{MySQL} got some error when trying to resolve the
+IP to a hostname. In this case you can execute @code{mysqladmin
+flush-hosts} to reset the internal DNS cache. @xref{DNS}.
+
+Some permanent solutions are:
+
+@itemize @minus
+@item
+Try to find out what is wrong with your DNS server and fix this.
+
+@item
+Specify IPs instead of hostnames in the @strong{MySQL} privilege tables.
+
+@item
+Start @code{mysqld} with @code{--skip-name-resolve}.
+
+@item
+Start @code{mysqld} with @code{--skip-host-cache}.
+
+@item
+Connect to @code{localhost} if you are running the server and the client
+on the same machine.
+
+@item
+Put the client machine names in @code{/etc/hosts}.
+@end itemize
+
+@item
+If @code{mysql -u root test} works but @code{mysql -h your_hostname -u root
+test} results in @code{Access denied}, then you may not have the correct name
+for your host in the @code{user} table. A common problem here is that the
+@code{Host} value in the user table entry specifies an unqualified hostname,
+but your system's name resolution routines return a fully qualified domain
+name (or vice-versa). For example, if you have an entry with host
+@code{'tcx'} in the @code{user} table, but your DNS tells @strong{MySQL} that
+your hostname is @code{'tcx.subnet.se'}, the entry will not work. Try adding
+an entry to the @code{user} table that contains the IP number of your host as
+the @code{Host} column value. (Alternatively, you could add an entry to the
+@code{user} table with a @code{Host} value that contains a wild card---for
+example, @code{'tcx.%'}. However, use of hostnames ending with @samp{%} is
+@emph{insecure} and is @emph{not} recommended!)
+
+@item
+If @code{mysql -u user_name test} works but @code{mysql -u user_name
+other_db_name} doesn't work, you don't have an entry for @code{other_db_name}
+listed in the @code{db} table.
+
+@item
+If @code{mysql -u user_name db_name} works when executed on the server
+machine, but @code{mysql -u host_name -u user_name db_name} doesn't work when
+executed on another client machine, you don't have the client machine listed
+in the @code{user} table or the @code{db} table.
+
+@item
+If you can't figure out why you get @code{Access denied}, remove from the
+@code{user} table all entries that have @code{Host} values containing
+wild cards (entries that contain @samp{%} or @samp{_}). A very common error
+is to insert a new entry with @code{Host}=@code{'%'} and
+@code{User}=@code{'some user'}, thinking that this will allow you to specify
+@code{localhost} to connect from the same machine. The reason that this
+doesn't work is that the default privileges include an entry with
+@code{Host}=@code{'localhost'} and @code{User}=@code{''}. Because that entry
+has a @code{Host} value @code{'localhost'} that is more specific than
+@code{'%'}, it is used in preference to the new entry when connecting from
+@code{localhost}! The correct procedure is to insert a second entry with
+@code{Host}=@code{'localhost'} and @code{User}=@code{'some_user'}, or to
+remove the entry with @code{Host}=@code{'localhost'} and
+@code{User}=@code{''}.
+
+@item
+If you get the following error, you may have a problem with the @code{db} or
+@code{host} table:
+
+@example
+Access to database denied
+@end example
+
+If the entry selected from the @code{db} table has an empty value in the
+@code{Host} column, make sure there are one or more corresponding entries in
+the @code{host} table specifying which hosts the @code{db} table entry
+applies to.
+
+If you get the error when using the SQL commands @code{SELECT ...
+INTO OUTFILE} or @code{LOAD DATA INFILE}, your entry in the @code{user} table
+probably doesn't have the @strong{file} privilege enabled.
+
+@item
+@cindex configuration files
+@cindex environment variables
+@tindex .my.cnf file
+Remember that client programs will use connection parameters specified
+in configuration files or environment variables. @xref{Environment
+variables}. If a client seems to be sending the wrong default
+connection parameters when you don't specify them on the command line,
+check your environment and the @file{.my.cnf} file in your home
+directory. You might also check the system-wide @strong{MySQL}
+configuration files, though it is far less likely that client connection
+parameters will be specified there. @xref{Option files}. If you get
+@code{Access denied} when you run a client without any options, make
+sure you haven't specified an old password in any of your option files!
+@xref{Option files}.
+
+@item
+If you make changes to the grant tables directly (using an @code{INSERT} or
+@code{UPDATE} statement) and your changes seem to be ignored, remember
+that you must issue a @code{FLUSH PRIVILEGES} statement or execute a
+@code{mysqladmin flush-privileges} command to cause the server to re-read
+the privilege tables. Otherwise your changes have no effect until the
+next time the server is restarted. Remember that after you set the
+@code{root} password with an @code{UPDATE} command, you won't need to
+specify it until after you flush the privileges, because the server
+won't know you've changed the password yet!
+
+@item
+If you have access problems with a Perl, PHP, Python, or ODBC program, try to
+connect to the server with @code{mysql -u user_name db_name} or @code{mysql
+-u user_name -pyour_pass db_name}. If you are able to connect using the
+@code{mysql} client, there is a problem with your program and not with the
+access privileges. (Note that there is no space between @code{-p} and the
+password; you can also use the @code{--password=your_pass} syntax to specify
+the password. If you use the @code{-p} option alone, @strong{MySQL} will
+prompt you for the password.)
+
+@item
+For testing, start the @code{mysqld} daemon with the
+@code{--skip-grant-tables} option. Then you can change the @strong{MySQL}
+grant tables and use the @code{mysqlaccess} script to check whether or not
+your modifications have the desired effect. When you are satisfied with your
+changes, execute @code{mysqladmin flush-privileges} to tell the @code{mysqld}
+server to start using the new grant tables. @strong{Note:} Reloading the
+grant tables overrides the @code{--skip-grant-tables} option. This allows
+you to tell the server to begin using the grant tables again without bringing
+it down and restarting it.
+
+@item
+If everything else fails, start the @code{mysqld} daemon with a debugging
+option (for example, @code{--debug=d,general,query}). This will print host and
+user information about attempted connections, as well as information about
+each command issued. @xref{Making trace files}.
+
+@item
+If you have any other problems with the @strong{MySQL} grant tables and
+feel you must post the problem to the mailing list, always provide a
+dump of the @strong{MySQL} grant tables. You can dump the tables with
+the @code{mysqldump mysql} command. As always, post your problem using
+the @code{mysqlbug} script. @xref{Bug reports}. In some cases you may need
+to restart @code{mysqld} with @code{--skip-grant-tables} to run
+@code{mysqldump}.
+@end itemize
+
+
+@node User Account Management, Disaster Prevention, Privilege system, MySQL Database Administration
+@section MySQL User Account Management
+
+@menu
+* GRANT::
+* User names::
+* Privilege changes::
+* Default privileges::
+* Adding users::
+* Passwords::
+* Password security::
+@end menu
+
+
+@node GRANT, User names, User Account Management, User Account Management
+@subsection @code{GRANT} and @code{REVOKE} Syntax
+
+@findex GRANT
+@findex REVOKE
+
+@cindex privileges, granting
+@cindex privileges, revoking
+@cindex global privileges
+@cindex revoking, privleges
+@cindex granting, privleges
+
+@example
+GRANT priv_type [(column_list)] [, priv_type [(column_list)] ...]
+ ON @{tbl_name | * | *.* | db_name.*@}
+ TO user_name [IDENTIFIED BY 'password']
+ [, user_name [IDENTIFIED BY 'password'] ...]
+ [WITH GRANT OPTION]
+
+REVOKE priv_type [(column_list)] [, priv_type [(column_list)] ...]
+ ON @{tbl_name | * | *.* | db_name.*@}
+ FROM user_name [, user_name ...]
+@end example
+
+@code{GRANT} is implemented in @strong{MySQL} Version 3.22.11 or later. For
+earlier @strong{MySQL} versions, the @code{GRANT} statement does nothing.
+
+The @code{GRANT} and @code{REVOKE} commands allow system administrators
+to create users and grant and revoke rights to @strong{MySQL} users at
+four privilege levels:
+
+@table @strong
+@item Global level
+Global privileges apply to all databases on a given server. These privileges
+are stored in the @code{mysql.user} table.
+
+@item Database level
+Database privileges apply to all tables in a given database. These privileges
+are stored in the @code{mysql.db} and @code{mysql.host} tables.
+
+@item Table level
+Table privileges apply to all columns in a given table. These privileges are
+stored in the @code{mysql.tables_priv} table.
+
+@item Column level
+Column privileges apply to single columns in a given table. These privileges are
+stored in the @code{mysql.columns_priv} table.
+@end table
+
+If you give a grant for a users that doesn't exists, that user is created.
+For examples of how @code{GRANT} works, see @ref{Adding users}.
+
+For the @code{GRANT} and @code{REVOKE} statements, @code{priv_type} may be
+specified as any of the following:
+
+@example
+ALL PRIVILEGES FILE RELOAD
+ALTER INDEX SELECT
+CREATE INSERT SHUTDOWN
+DELETE PROCESS UPDATE
+DROP REFERENCES USAGE
+@end example
+
+@code{ALL} is a synonym for @code{ALL PRIVILEGES}. @code{REFERENCES} is not
+yet implemented. @code{USAGE} is currently a synonym for ``no privileges.''
+It can be used when you want to create a user that has no privileges.
+
+To revoke the @strong{grant} privilege from a user, use a @code{priv_type}
+value of @code{GRANT OPTION}:
+
+@example
+REVOKE GRANT OPTION ON ... FROM ...;
+@end example
+
+The only @code{priv_type} values you can specify for a table are @code{SELECT},
+@code{INSERT}, @code{UPDATE}, @code{DELETE}, @code{CREATE}, @code{DROP},
+@code{GRANT}, @code{INDEX}, and @code{ALTER}.
+
+The only @code{priv_type} values you can specify for a column (that is, when
+you use a @code{column_list} clause) are @code{SELECT}, @code{INSERT}, and
+@code{UPDATE}.
+
+You can set global privileges by using @code{ON *.*} syntax. You can set
+database privileges by using @code{ON db_name.*} syntax. If you specify
+@code{ON *} and you have a current database, you will set the privileges for
+that database. (@strong{WARNING:} If you specify @code{ON *} and you
+@emph{don't} have a current database, you will affect the global privileges!)
+
+In order to accommodate granting rights to users from arbitrary hosts,
+@strong{MySQL} supports specifying the @code{user_name} value in the form
+@code{user@@host}. If you want to specify a @code{user} string
+containing special characters (such as @samp{-}), or a @code{host} string
+containing special characters or wild-card characters (such as @samp{%}), you
+can quote the user or host name (for example, @code{'test-user'@@'test-hostname'}).
+
+You can specify wild cards in the hostname. For example,
+@code{user@@"%.loc.gov"} applies to @code{user} for any host in the
+@code{loc.gov} domain, and @code{user@@"144.155.166.%"} applies to @code{user}
+for any host in the @code{144.155.166} class C subnet.
+
+The simple form @code{user} is a synonym for @code{user@@"%"}.
+@strong{NOTE:} If you allow anonymous users to connect to the @strong{MySQL}
+server (which is the default), you should also add all local users as
+@code{user@@localhost} because otherwise the anonymous user entry for the
+local host in the @code{mysql.user} table will be used when the user tries to
+log into the @strong{MySQL} server from the local machine! Anonymous users
+are defined by inserting entries with @code{User=''} into the
+@code{mysql.user} table. You can verify if this applies to you by executing
+this query:
+
+@example
+mysql> SELECT Host,User FROM mysql.user WHERE User='';
+@end example
+
+For the moment, @code{GRANT} only supports host, table, database, and
+column names up to 60 characters long. A user name can be up to 16
+characters.
+
+The privileges for a table or column are formed from the
+logical OR of the privileges at each of the four privilege
+levels. For example, if the @code{mysql.user} table specifies that a
+user has a global @strong{select} privilege, this can't be denied by an
+entry at the database, table, or column level.
+
+The privileges for a column can be calculated as follows:
+
+@example
+global privileges
+OR (database privileges AND host privileges)
+OR table privileges
+OR column privileges
+@end example
+
+In most cases, you grant rights to a user at only one of the privilege
+levels, so life isn't normally as complicated as above. The details of the
+privilege-checking procedure are presented in
+@ref{Privilege system}.
+
+If you grant privileges for a user/hostname combination that does not exist
+in the @code{mysql.user} table, an entry is added and remains there until
+deleted with a @code{DELETE} command. In other words, @code{GRANT} may
+create @code{user} table entries, but @code{REVOKE} will not remove them;
+you must do that explicitly using @code{DELETE}.
+
+@cindex passwords, setting
+In @strong{MySQL} Version 3.22.12 or later,
+if a new user is created or if you have global grant privileges, the user's
+password will be set to the password specified by the @code{IDENTIFIED BY}
+clause, if one is given. If the user already had a password, it is replaced
+by the new one.
+
+@strong{WARNING:} If you create a new user but do not specify an
+@code{IDENTIFIED BY} clause, the user has no password. This is insecure.
+
+Passwords can also be set with the @code{SET PASSWORD} command.
+@xref{SET OPTION, , @code{SET OPTION}}.
+
+If you grant privileges for a database, an entry in the @code{mysql.db}
+table is created if needed. When all privileges for the database have been
+removed with @code{REVOKE}, this entry is deleted.
+
+If a user doesn't have any privileges on a table, the table is not displayed
+when the user requests a list of tables (for example, with a @code{SHOW TABLES}
+statement).
+
+The @code{WITH GRANT OPTION} clause gives the user the ability to give
+to other users any privileges the user has at the specified privilege level.
+You should be careful to whom you give the @strong{grant} privilege, as two
+users with different privileges may be able to join privileges!
+
+You cannot grant another user a privilege you don't have yourself;
+the @strong{grant} privilege allows you to give away only those privileges
+you possess.
+
+Be aware that when you grant a user the @strong{grant} privilege at a
+particular privilege level, any privileges the user already possesses (or
+is given in the future!) at that level are also grantable by that user.
+Suppose you grant a user the @strong{insert} privilege on a database. If
+you then grant the @strong{select} privilege on the database and specify
+@code{WITH GRANT OPTION}, the user can give away not only the @strong{select}
+privilege, but also @strong{insert}. If you then grant the @strong{update}
+privilege to the user on the database, the user can give away the
+@strong{insert}, @strong{select} and @strong{update}.
+
+You should not grant @strong{alter} privileges to a normal user. If you
+do that, the user can try to subvert the privilege system by renaming
+tables!
+
+Note that if you are using table or column privileges for even one user, the
+server examines table and column privileges for all users and this will slow
+down @strong{MySQL} a bit.
+
+When @code{mysqld} starts, all privileges are read into memory.
+Database, table, and column privileges take effect at once, and
+user-level privileges take effect the next time the user connects.
+Modifications to the grant tables that you perform using @code{GRANT} or
+@code{REVOKE} are noticed by the server immediately.
+If you modify the grant tables manually (using @code{INSERT}, @code{UPDATE},
+etc.), you should execute a @code{FLUSH PRIVILEGES} statement or run
+@code{mysqladmin flush-privileges} to tell the server to reload the grant
+tables.
+@xref{Privilege changes}.
+
+@cindex ANSI SQL, differences from
+The biggest differences between the ANSI SQL and @strong{MySQL} versions of
+@code{GRANT} are:
+
+@itemize @bullet
+@item
+In @strong{MySQL} privileges are given for an username + hostname combination
+and not only for an username.
+
+@item
+ANSI SQL doesn't have global or database-level privileges, and ANSI SQL
+doesn't support all privilege types that @strong{MySQL} supports.
+@strong{MySQL} doesn't support the ANSI SQL @code{TRIGGER}, @code{EXECUTE} or
+@code{UNDER} privileges.
+
+@item
+ANSI SQL privileges are structured in a hierarchal manner. If you remove
+an user, all privileges the user has granted are revoked. In
+@strong{MySQL} the granted privileges are not automatically revoked, but
+you have to revoke these yourself if needed.
+
+@item
+If you in @strong{MySQL} have the @code{INSERT} grant on only part of the
+columns in a table, you can execute @code{INSERT} statements on the
+table; The columns for which you don't have the @code{INSERT} privilege
+will set to their default values. ANSI SQL requires you to have the
+@code{INSERT} privilege on all columns.
+
+@item
+When you drop a table in ANSI SQL, all privileges for the table are revoked.
+If you revoke a privilege in ANSI SQL, all privileges that were granted based
+on this privilege are also revoked. In @strong{MySQL}, privileges can be
+dropped only with explicit @code{REVOKE} commands or by manipulating the
+@strong{MySQL} grant tables.
+@end itemize
+
+
+@node User names, Privilege changes, GRANT, User Account Management
+@subsection MySQL User Names and Passwords
+
+@cindex user names, and passwords
+@cindex passwords, for users
+
+There are several distinctions between the way user names and passwords are
+used by @strong{MySQL} and the way they are used by Unix or Windows:
+
+@itemize @bullet
+@item
+User names, as used by @strong{MySQL} for authentication purposes, have
+nothing to do with Unix user names (login names) or Windows user names. Most
+@strong{MySQL} clients by default try to log in using the current Unix user
+name as the @strong{MySQL} user name, but that is for convenience only.
+Client programs allow a different name to be specified with the @code{-u} or
+@code{--user} options. This means that you can't make a database secure in
+any way unless all @strong{MySQL} user names have passwords. Anyone may
+attempt to connect to the server using any name, and they will succeed if
+they specify any name that doesn't have a password.
+
+@item
+@strong{MySQL} user names can be up to 16 characters long; Unix user names
+typically are limited to 8 characters.
+
+@item
+@strong{MySQL} passwords have nothing to do with Unix passwords. There is no
+necessary connection between the password you use to log in to a Unix machine
+and the password you use to access a database on that machine.
+
+@item
+@strong{MySQL} encrypts passwords using a different algorithm than the
+one used during the Unix login process. See the descriptions of the
+@code{PASSWORD()} and @code{ENCRYPT()} functions in @ref{Miscellaneous
+functions}. Note that even if the password is stored 'scrambled', and
+knowing your 'scrambled' password is enough to be able to connect to
+the @strong{MySQL} server!
+@end itemize
+
+@strong{MySQL} users and they privileges are normally created with the
+@code{GRANT} command. @xref{GRANT}.
+
+When you login to a @strong{MySQL} server with a command line client you
+should specify the password with @code{--password=your-password}.
+@xref{Connecting}.
+
+@example
+mysql --user=monty --password=guess database_name
+@end example
+
+If you want the client to prompt for a password, you should use
+@code{--password} without any argument
+
+@example
+mysql --user=monty --password database_name
+@end example
+
+or the short form:
+
+@example
+mysql -u monty -p database_name
+@end example
+
+Note that in the last example the password is @strong{NOT} 'database_name'.
+
+If you want to use the @code{-p} option to supply a password you should do like this:
+
+@example
+mysql -u monty -pguess database_name
+@end example
+
+On some system the library call that @strong{MySQL} uses to prompt for a
+password will automatically cut the password to 8 characters. Internally
+@strong{MySQL} doesn't have any limit for the length of the password.
+
+
+@node Privilege changes, Default privileges, User names, User Account Management
+@subsection When Privilege Changes Take Effect
When @code{mysqld} starts, all grant table contents are read into memory and
become effective at that point.
@@ -15257,6 +16916,10 @@ command.
Global privilege changes and password changes take effect the next time the
client connects.
+
+@node Default privileges, Adding users, Privilege changes, User Account Management
+@subsection Setting Up the Initial MySQL Privileges
+
@cindex privileges, default
@cindex default, privileges
@cindex root password
@@ -15264,8 +16927,6 @@ client connects.
@cindex users, root
@cindex anonymous user
@cindex password, root user
-@node Default privileges, Adding users, Privilege changes, Privilege system
-@section Setting Up the Initial MySQL Privileges
After installing @strong{MySQL}, you set up the initial access privileges by
running @code{scripts/mysql_install_db}.
@@ -15361,13 +17022,16 @@ you should NOT delete the @file{.frm} files. If you accidentally do this,
you should copy them back from your @strong{MySQL} distribution before
running @code{mysql_install_db}.
+
+@node Adding users, Passwords, Default privileges, User Account Management
+@subsection Adding New Users to MySQL
+
+@findex GRANT statement
+@findex statements, GRANT
+
@cindex privileges, adding
@cindex adding, new user privileges
@cindex user privileges, adding
-@findex GRANT statement
-@findex statements, GRANT
-@node Adding users, Passwords, Default privileges, Privilege system
-@section Adding New Users to MySQL
You can add users two different ways: by using @code{GRANT} statements
or by manipulating the @strong{MySQL} grant tables directly. The
@@ -15553,12 +17217,15 @@ You can find these utilities in the
@uref{http://www.mysql.com/Downloads/Contrib/,Contrib directory of the @strong{MySQL}
Website}.
-@cindex passwords, setting
+
+@node Passwords, Password security, Adding users, User Account Management
+@subsection Setting Up Passwords
+
@findex PASSWORD()
@findex SET PASSWORD statement
+
+@cindex passwords, setting
@cindex setting, passwords
-@node Passwords, Access denied, Adding users, Privilege system
-@section Setting Up Passwords
In most cases you should use @code{GRANT} to set up your users/passwords,
so the following only applies for advanced users. @xref{GRANT, , @code{GRANT}}.
@@ -15628,319 +17295,9401 @@ your Unix password and your @strong{MySQL} password are the same, that
@code{PASSWORD()} will result in the same encrypted value as is stored in the
Unix password file. @xref{User names}.
-@node Access denied, , Passwords, Privilege system
-@section Causes of @code{Access denied} Errors
-If you encounter @code{Access denied} errors when you try to connect to the
-@strong{MySQL} server, the list below indicates some courses of
-action you can take to correct the problem:
+@node Password security, , Passwords, User Account Management
+@subsection Keeping Your Password Secure
+
+It is inadvisable to specify your password in a way that exposes it to
+discovery by other users. The methods you can use to specify your password
+when you run client programs are listed below, along with an assessment of
+the risks of each method:
@itemize @bullet
@item
-After installing @strong{MySQL}, did you run the @code{mysql_install_db}
-script to set up the initial grant table contents? If not, do so.
-@xref{Default privileges}. Test the initial privileges by executing
-this command:
+Never give a normal user access to the @code{mysql.user} table. Knowing
+the encrypted password for a user makes it possible to login as this
+user. The passwords are only scrambled so that one shouldn't be able to
+see the real password you used (if you happen to use a similar password
+with your other applications).
+
+@item
+Use a @code{-pyour_pass} or @code{--password=your_pass} option on the command
+line. This is convenient but insecure, because your password becomes visible
+to system status programs (such as @code{ps}) that may be invoked by other
+users to display command lines. (@strong{MySQL} clients typically overwrite
+the command-line argument with zeroes during their initialization sequence,
+but there is still a brief interval during which the value is visible.)
+
+@item
+Use a @code{-p} or @code{--password} option (with no @code{your_pass} value
+specified). In this case, the client program solicits the password from
+the terminal:
+@findex -p option
+@findex -password option
@example
-shell> mysql -u root test
+shell> mysql -u user_name -p
+Enter password: ********
@end example
-The server should let you connect without error. You should also make sure
-you have a file @file{user.MYD} in the @strong{MySQL} database directory.
-Ordinarily, this is @file{PATH/var/mysql/user.MYD}, where @code{PATH} is the
-pathname to the @strong{MySQL} installation root.
+The @samp{*} characters represent your password.
+
+It is more secure to enter your password this way than to specify it on the
+command line because it is not visible to other users. However, this method
+of entering a password is suitable only for programs that you run
+interactively. If you want to invoke a client from a script that runs
+non-interactively, there is no opportunity to enter the password from the
+terminal. On some systems, you may even find that the first line of your
+script is read and interpreted (incorrectly) as your password!
@item
-After a fresh installation, you should connect to the server and set up
-your users and their access permissions:
+@tindex .my.cnf file
+Store your password in a configuration file. For example, you can list your
+password in the @code{[client]} section of the @file{.my.cnf} file in your
+home directory:
@example
-shell> mysql -u root mysql
+[client]
+password=your_pass
@end example
-The server should let you connect because the @strong{MySQL} @code{root} user
-has no password initially. That is also a security risk, so setting the
-@code{root} password is something you should do while you're setting up
-your other @strong{MySQL} users.
+If you store your password in @file{.my.cnf}, the file should not be group or
+world readable or writable. Make sure the file's access mode is @code{400}
+or @code{600}.
-If you try to connect as @code{root} and get this error:
+@xref{Option files}.
+@item
+You can store your password in the @code{MYSQL_PWD} environment variable, but
+this method must be considered extremely insecure and should not be used.
+Some versions of @code{ps} include an option to display the environment of
+running processes; your password will be in plain sight for all to see if
+you set @code{MYSQL_PWD}. Even on systems without such a version of
+@code{ps}, it is unwise to assume there is no other method to observe process
+environments. @xref{Environment variables}.
+@end itemize
+
+All in all, the safest methods are to have the client program prompt for the
+password or to specify the password in a properly protected @file{.my.cnf}
+file.
+
+
+@node Disaster Prevention, Database Administration, User Account Management, MySQL Database Administration
+@section Disaster Prevention and Recovery
+
+@menu
+* Backup::
+* BACKUP TABLE::
+* RESTORE TABLE::
+* CHECK TABLE::
+* REPAIR TABLE::
+* Table maintenance::
+* Maintenance regimen::
+* Table-info::
+@end menu
+
+
+@node Backup, BACKUP TABLE, Disaster Prevention, Disaster Prevention
+@subsection Database Backups
+
+@cindex databases, backups
+@cindex backups
+
+Because @strong{MySQL} tables are stored as files, it is easy to do a
+backup. To get a consistent backup, do a @code{LOCK TABLES} on the
+relevant tables followed by @code{FLUSH TABLES} for the tables.
+@xref{LOCK TABLES, , @code{LOCK TABLES}}.
+@xref{FLUSH, , @code{FLUSH}}.
+You only need a read lock; this allows other threads to continue to
+query the tables while you are making a copy of the files in the
+database directory. The @code{FLUSH TABLE} is needed to ensure that
+the all active index pages is written to disk before you start the backup.
+
+If you want to make a SQL level backup of a table, you can use
+@code{SELECT INTO OUTFILE} or @code{BACKUP TABLE}. @xref{SELECT}.
+@xref{BACKUP TABLE}.
+
+Another way to back up a database is to use the @code{mysqldump} program or
+the @code{mysqlhotcopy script}. @xref{mysqldump, , @code{mysqldump}}.
+@xref{mysqlhotcopy, , @code{mysqlhotcopy}}.
+
+@enumerate
+@item
+Do a full backup of your databases:
@example
-Access denied for user: '@@unknown' to database mysql
+shell> mysqldump --tab=/path/to/some/dir --opt --full
+
+or
+
+shell> mysqlhotcopy database /path/to/some/dir
@end example
-this means that you don't have an entry in the @code{user} table with a
-@code{User} column value of @code{'root'} and that @code{mysqld} cannot
-resolve the hostname for your client. In this case, you must restart the
-server with the @code{--skip-grant-tables} option and edit your
-@file{/etc/hosts} or @file{\windows\hosts} file to add an entry for your
-host.
+You can also simply copy all table files (@file{*.frm}, @file{*.MYD}, and
+@file{*.MYI} files) as long as the server isn't updating anything.
+The script @code{mysqlhotcopy} does use this method.
@item
-If you get an error like the following:
+@cindex log files, names
+Stop @code{mysqld} if it's running, then start it with the
+@code{--log-update[=file_name]} option. @xref{Update log}. The update
+log file(s) provide you with the information you need to replicate
+changes to the database that are made subsequent to the point at which
+you executed @code{mysqldump}.
+@end enumerate
+
+If you have to restore something, try to recover your tables using
+@code{REPAIR TABLE} or @code{myisamchk -r} first. That should work in
+99.9% of all cases. If @code{myisamchk} fails, try the following
+procedure: (This will only work if you have started @strong{MySQL} with
+@code{--log-update}. @xref{Update log}.):
+
+@enumerate
+@item
+Restore the original @code{mysqldump} backup.
+@item
+Execute the following command to re-run the updates in the binary log:
@example
-shell> mysqladmin -u root -pxxxx ver
-Access denied for user: 'root@@localhost' (Using password: YES)
+shell> mysqlbinlog hostname-bin.[0-9]* | mysql
@end example
-It means that you are using a wrong password. @xref{Passwords}.
+If you are using the update log you can use:
-If you have forgot the root password, you can restart @code{mysqld} with
-@code{--skip-grant-tables} to change the password. You can find more
-about this option later on in this manual section.
+@example
+shell> ls -1 -t -r hostname.[0-9]* | xargs cat | mysql
+@end example
+@end enumerate
-If you get the above error even if you haven't specified a password,
-this means that you a wrong password in some @code{my.ini}
-file. @xref{Option files}. You can avoid using option files with the @code{--no-defaults} option, as follows:
+@code{ls} is used to get all the update log files in the right order.
+
+You can also do selective backups with @code{SELECT * INTO OUTFILE 'file_name'
+FROM tbl_name} and restore with @code{LOAD DATA INFILE 'file_name' REPLACE
+...} To avoid duplicate records, you need a @code{PRIMARY KEY} or a
+@code{UNIQUE} key in the table. The @code{REPLACE} keyword causes old records
+to be replaced with new ones when a new record duplicates an old record on
+a unique key value.
+
+If you get performance problems in making backups on your system, you can
+solve this by setting up replication and do the backups on the slave
+instead of on the master. @xref{Replication Intro}.
+
+If you are using a Veritas file system, you can do:
+
+@enumerate
+@item
+Execute in a client (perl ?) @code{FLUSH TABLES WITH READ LOCK}
+
+@item
+Fork a shell or execute in another client @code{mount vxfs snapshot}.
+
+@item
+Execute in the first client @code{UNLOCK TABLES}
+
+@item
+Copy files from snapshot
+
+@item
+Unmount snapshot
+@end enumerate
+
+
+@node BACKUP TABLE, RESTORE TABLE, Backup, Disaster Prevention
+@subsection @code{BACKUP TABLE} Syntax
+
+@findex BACKUP TABLE
+
+@cindex backups, database
@example
-shell> mysqladmin --no-defaults -u root ver
+BACKUP TABLE tbl_name[,tbl_name...] TO '/path/to/backup/directory'
@end example
+Make a copy of all the table files to the backup directory that are the
+minimum needed to restore it. Currenlty only works for @code{MyISAM}
+tables. For @code{MyISAM} table, copies @code{.frm} (definition) and
+@code{.MYD} (data) files. The index file can be rebuilt from those two.
+
+Before using this command, please see @xref{Backup}.
+
+During the backup, read lock will be held for each table, one at time,
+as they are being backed up. If you want to backup several tables as
+a snapshot, you must first issue @code{LOCK TABLES} obtaining a read
+lock for each table in the group.
+
+The command returns a table with the following columns:
+
+@multitable @columnfractions .35 .65
+@item @strong{Column} @tab @strong{Value}
+@item Table @tab Table name
+@item Op @tab Always ``backup''
+@item Msg_type @tab One of @code{status}, @code{error}, @code{info} or @code{warning}.
+@item Msg_text @tab The message.
+@end multitable
+
+Note that @code{BACKUP TABLE} is only available in @strong{MySQL}
+version 3.23.25 and later.
+
+
+@node RESTORE TABLE, CHECK TABLE, BACKUP TABLE, Disaster Prevention
+@subsection @code{RESTORE TABLE} Syntax
+
+@findex RESTORE TABLE
+
+@example
+RESTORE TABLE tbl_name[,tbl_name...] FROM '/path/to/backup/directory'
+@end example
+
+Restores the table(s) from the backup that was made with
+@code{BACKUP TABLE}. Existing tables will not be overwritten - if you
+try to restore over an existing table, you will get an error. Restore
+will take longer than BACKUP due to the need to rebuilt the index. The
+more keys you have, the longer it is going to take. Just as
+@code{BACKUP TABLE}, currently only works of @code{MyISAM} tables.
+
+
+The command returns a table with the following columns:
+
+@multitable @columnfractions .35 .65
+@item @strong{Column} @tab @strong{Value}
+@item Table @tab Table name
+@item Op @tab Always ``restore''
+@item Msg_type @tab One of @code{status}, @code{error}, @code{info} or @code{warning}.
+@item Msg_text @tab The message.
+@end multitable
+
+
+@node CHECK TABLE, REPAIR TABLE, RESTORE TABLE, Disaster Prevention
+@subsection @code{CHECK TABLE} Syntax
+
+@findex CHECK TABLE
+
+@example
+CHECK TABLE tbl_name[,tbl_name...] [option [option...]]
+
+option = QUICK | FAST | MEDIUM | EXTEND | CHANGED
+@end example
+
+@code{CHECK TABLE} only works on @code{MyISAM} tables. On
+@code{MyISAM} tables it's the same thing as running @code{myisamchk -m
+table_name} on the table.
+
+If you don't specify any option @code{MEDIUM} is used.
+
+Checks the table(s) for errors. For @code{MyISAM} tables the key statistics
+is updated. The command returns a table with the following columns:
+
+@multitable @columnfractions .35 .65
+@item @strong{Column} @tab @strong{Value}
+@item Table @tab Table name.
+@item Op @tab Always ``check''.
+@item Msg_type @tab One of @code{status}, @code{error}, @code{info}, or @code{warning}.
+@item Msg_text @tab The message.
+@end multitable
+
+Note that you can get many rows of information for each checked
+table. The last row will be of @code{Msg_type status} and should
+normally be @code{OK}. If you don't get @code{OK}, or @code{Not
+checked} you should normally run a repair of the table. @xref{Table
+maintenance}. @code{Not checked} means that the table the given @code{TYPE}
+told @strong{MySQL} that there wasn't any need to check the table.
+
+The different check types stand for the following:
+
+@multitable @columnfractions .20 .80
+@item @strong{Type} @tab @strong{Meaning}
+@item @code{QUICK} @tab Don't scan the rows to check for wrong links.
+@item @code{FAST} @tab Only check tables which haven't been closed properly.
+@item @code{CHANGED} @tab Only check tables which have been changed since last check or haven't been closed properly.
+@item @code{MEDIUM} @tab Scan rows to verify that deleted links are ok. This also calculates a key checksum for the rows and verifies this with a calcualted checksum for the keys.
+@item @code{EXTENDED} @tab Do a full key lookup for all keys for each row. This ensures that the table is 100 % consistent, but will take a long time!
+@end multitable
+
+For dynamic sized @code{MyISAM} tables a started check will always
+do a @code{MEDIUM} check. For static size rows we skip the row scan
+for @code{QUICK} and @code{FAST} as the rows are very seldom corrupted.
+
+You can combine check options as in:
+
+@example
+CHECK TABLE test_table FAST QUICK;
+@end example
+
+Which only would do a quick check on the table if it wasn't closed properly.
+
+@strong{NOTE:} that in some case @code{CHECK TABLE} will change the
+table! This happens if the table is marked as 'corrupted' or 'not
+closed properly' but @code{CHECK TABLE} didn't find any problems in the
+table. In this case @code{CHECK TABLE} will mark the table as ok.
+
+If a table is corrupted, then it's most likely that the problem is in
+the indexes and not in the data part. All of the above check types
+checks the indexes throughly and should thus find most errors.
+
+If you just want to check a table that you assume is ok, you should use
+no check options or the @code{QUICK} option. The later should be used
+when you are in a hurry and can take the very small risk that
+@code{QUICK} didn't find an error in the data file (In most cases
+@strong{MySQL} should find, under normal usage, any error in the data
+file. If this happens then the table will be marked as 'corrupted',
+in which case the table can't be used until it's repaired).
+
+@code{FAST} and @code{CHANGED} are mostly intended to be used from a
+script (for example to be executed from cron) if you want to check your
+table from time to time. In most cases you @code{FAST} is to be prefered
+over @code{CHANGED}. (The only case when it isn't is when you suspect a
+bug you have found a bug in the @code{MyISAM} code.).
+
+@code{EXTENDED} is only to be used after you have run a normal check but
+still get strange errors from a table when @strong{MySQL} tries to
+update a row or find a row by key (this is VERY unlikely to happen if a
+normal check has succeeded!).
+
+Some things reported by check table, can't be corrected automatically:
+
+@itemize @bullet
@item
-@cindex @code{mysql_fix_privilege_tables}
-If you updated an existing @strong{MySQL} installation from a version earlier
-than Version 3.22.11 to Version 3.22.11 or later, did you run the
-@code{mysql_fix_privilege_tables} script? If not, do so. The structure of
-the grant tables changed with @strong{MySQL} Version 3.22.11 when the
-@code{GRANT} statement became functional.
+@code{Found row where the auto_increment column has the value 0}.
+
+This means that you have in the table a row where the
+@code{auto_increment} index column contains the value 0.
+(It's possible to create a row where the auto_increment column is 0 by
+explicitely setting the column to 0 with an @code{UPDATE} statement)
+
+This isn't an error in itself, but could cause trouble if you decide to
+dump the table and restore it or do an @code{ALTER TABLE} on the
+table. In this case the auto_increment column will change value,
+according to the rules of auto_increment columns, which could cause
+problems like a duplicate key error.
+
+To get rid of the warning, just execute an @code{UPDATE} statement
+to set the column to some other value than 0.
+@end itemize
+
+@node REPAIR TABLE, Table maintenance, CHECK TABLE, Disaster Prevention
+@subsection @code{REPAIR TABLE} Syntax
+
+@findex REPAIR TABLE
+
+@example
+REPAIR TABLE tbl_name[,tbl_name...] [QUICK] [EXTENDED]
+@end example
+
+@code{REPAIR TABLE} only works on @code{MyISAM} tables and is the same
+as running @code{myisamchk -r table_name} on the table.
+
+Normally you should never have to run this command, but if disaster strikes
+you are very likely to get back all your data from a MyISAM table with
+@code{REPAIR TABLE}. If your tables get corrupted a lot you should
+try to find the reason for this! @xref{Crashing}. @xref{MyISAM table problems}.
+
+@code{REPAIR TABLE} repairs a possible corrupted table. The command returns a
+table with the following columns:
+
+@multitable @columnfractions .35 .65
+@item @strong{Column} @tab @strong{Value}
+@item Table @tab Table name
+@item Op @tab Always ``repair''
+@item Msg_type @tab One of @code{status}, @code{error}, @code{info} or @code{warning}.
+@item Msg_text @tab The message.
+@end multitable
+
+Note that you can get many rows of information for each repaired
+table. The last one row will be of @code{Msg_type status} and should
+normally be @code{OK}. If you don't get @code{OK}, you should try
+repairing the table with @code{myisamchk -o}, as @code{REPAIR TABLE}
+does not yet implement all the options of @code{myisamchk}. In the near
+future, we will make it more flexible.
+
+If @code{QUICK} is given then @strong{MySQL} will try to do a
+@code{REPAIR} of only the index tree.
+
+If you use @code{EXTENDED} then @strong{MySQL} will create the index row
+by row instead of creating one index at a time with sorting; This may be
+better than sorting on fixed-length keys if you have long @code{char()}
+keys that compress very good.
+
+
+@node Table maintenance, Maintenance regimen, REPAIR TABLE, Disaster Prevention
+@subsection Using @code{myisamchk} for Table Maintenance and Crash Recovery
+
+Starting with @strong{MySQL} Version 3.23.13, you can check MyISAM
+tables with the @code{CHECK TABLE} command. @xref{CHECK TABLE}. You can
+repair tables with the @code{REPAIR TABLE} command. @xref{REPAIR TABLE}.
+
+To check/repair MyISAM tables (@code{.MYI} and @code{.MYD}) you should
+use the @code{myisamchk} utility. To check/repair ISAM tables
+(@code{.ISM} and @code{.ISD}) you should use the @code{isamchk}
+utility. @xref{Table types}.
+
+In the following text we will talk about @code{myisamchk}, but everything
+also applies to the old @code{isamchk}.
+
+You can use the @code{myisamchk} utility to get information about your
+database tables, check and repair them, or optimize them. The following
+sections describe how to invoke @code{myisamchk} (including a
+description of its options), how to set up a table maintenance schedule,
+and how to use @code{myisamchk} to perform its various functions.
+
+You can, in most cases, also use the command @code{OPTIMIZE TABLES} to
+optimize and repair tables, but this is not as fast or reliable (in case
+of real fatal errors) as @code{myisamchk}. On the other hand,
+@code{OPTIMIZE TABLE} is easier to use and you don't have to worry about
+flushing tables.
+@xref{OPTIMIZE TABLE, , @code{OPTIMIZE TABLE}}.
+
+Even that the repair in @code{myisamchk} is quite secure, it's always a
+good idea to make a backup BEFORE doing a repair (or anything that could
+make a lot of changes to a table)
+
+@menu
+* myisamchk syntax::
+* myisamchk general options::
+* myisamchk check options::
+* myisamchk repair options::
+* myisamchk other options::
+* myisamchk memory::
+* Crash recovery::
+* Check::
+* Repair::
+* Optimization::
+@end menu
+
+@node myisamchk syntax, myisamchk general options, Table maintenance, Table maintenance
+@subsubsection @code{myisamchk} Invocation Syntax
+
+@code{myisamchk} is invoked like this:
+
+@example
+shell> myisamchk [options] tbl_name
+@end example
+
+The @code{options} specify what you want @code{myisamchk} to do. They are
+described below. (You can also get a list of options by invoking
+@code{myisamchk --help}.) With no options, @code{myisamchk} simply checks your
+table. To get more information or to tell @code{myisamchk} to take corrective
+action, specify options as described below and in the following sections.
+
+@code{tbl_name} is the database table you want to check/repair. If you run
+@code{myisamchk} somewhere other than in the database directory, you must
+specify the path to the file, because @code{myisamchk} has no idea where your
+database is located. Actually, @code{myisamchk} doesn't care whether or not
+the files you are working on are located in a database directory; you can
+copy the files that correspond to a database table into another location and
+perform recovery operations on them there.
+
+You can name several tables on the @code{myisamchk} command line if you
+wish. You can also specify a name as an index file
+name (with the @file{.MYI} suffix), which allows you to specify all
+tables in a directory by using the pattern @file{*.MYI}.
+For example, if you are in a database directory, you can check all the
+tables in the directory like this:
+
+@example
+shell> myisamchk *.MYI
+@end example
+
+If you are not in the database directory, you can check all the tables there
+by specifying the path to the directory:
+
+@example
+shell> myisamchk /path/to/database_dir/*.MYI
+@end example
+
+You can even check all tables in all databases by specifying a wild card
+with the path to the @strong{MySQL} data directory:
+
+@example
+shell> myisamchk /path/to/datadir/*/*.MYI
+@end example
+
+The recommended way to quickly check all tables is:
+
+@example
+myisamchk --silent --fast /path/to/datadir/*/*.MYI
+isamchk --silent /path/to/datadir/*/*.ISM
+@end example
+
+If you want to check all tables and repair all tables that are corrupted,
+you can use the following line:
+
+@example
+myisamchk --silent --force --fast --update-state -O key_buffer=64M -O sort_buffer=64M -O read_buffer=1M -O write_buffer=1M /path/to/datadir/*/*.MYI
+isamchk --silent --force -O key_buffer=64M -O sort_buffer=64M -O read_buffer=1M -O write_buffer=1M /path/to/datadir/*/*.ISM
+@end example
+
+The above assumes that you have more than 64 M free.
+
+Note that if you get an error like:
+
+@example
+myisamchk: warning: 1 clients is using or hasn't closed the table properly
+@end example
+
+This means that you are trying to check a table that has been updated by
+the another program (like the @code{mysqld} server) that hasn't yet closed
+the file or that has died without closing the file properly.
+
+If you @code{mysqld} is running, you must force a sync/close of all
+tables with @code{FLUSH TABLES} and ensure that no one is using the
+tables while you are running @code{myisamchk}. In @strong{MySQL} Version 3.23
+the easiest way to avoid this problem is to use @code{CHECK TABLE}
+instead of @code{myisamchk} to check tables.
+
+@menu
+* myisamchk general options::
+* myisamchk check options::
+* myisamchk repair options::
+* myisamchk other options::
+@end menu
+
+
+@node myisamchk general options, myisamchk check options, myisamchk syntax, Table maintenance
+@subsubsection General Options for @code{myisamchk}
+
+@cindex options, @code{myisamchk}
+@cindex @code{myisamchk}, options
+
+@code{myisamchk} supports the following options.
+
+@table @code
+@item -# or --debug=debug_options
+Output debug log. The @code{debug_options} string often is
+@code{'d:t:o,filename'}.
+@item -? or --help
+Display a help message and exit.
+@item -O var=option, --set-variable var=option
+Set the value of a variable. The possible variables and their default values
+for myisamchk can be examined with @code{myisamchk --help}:
+@multitable @columnfractions .3 .7
+@item key_buffer_size @tab 523264
+@item read_buffer_size @tab 262136
+@item write_buffer_size @tab 262136
+@item sort_buffer_size @tab 2097144
+@item sort_key_blocks @tab 16
+@item decode_bits @tab 9
+@end multitable
+
+@code{sort_buffer_size} is used when the keys are repaired by sorting
+keys, which is the normal case when you use @code{--recover}.
+
+@code{key_buffer_size} is used when you are checking the table with
+@code{--extended-check} or when the keys are repaired by inserting key
+row by row in to the table (like when doing normal inserts). Repairing
+through the key buffer is used in the following cases:
+
+@itemize @bullet
@item
-If your privileges seem to have changed in the middle of a session, it may be
-that a superuser has changed them. Reloading the grant tables affects new
-client connections, but it also affects existing connections as indicated in
-@ref{Privilege changes}.
+If you use @code{--safe-recover}.
+@item
+If you are using a @code{FULLTEXT} index.
+@item
+If the temporary files needed to sort the keys would be more than twice
+as big as when creating the key file directly. This is often the case
+when you have big @code{CHAR}, @code{VARCHAR} or @code{TEXT} keys as the
+sort needs to store the whole keys during sorting. If you have lots
+of temporary space and you can force @code{myisamchk} to repair by sorting
+you can use the @code{--sort-recover} option.
+@end itemize
+
+Reparing through the key buffer takes much less disk space than using
+sorting, but is also much slower.
+
+If you want a faster repair, set the above variables to about 1/4 of your
+available memory. You can set both variables to big values, as only one
+of the above buffers will be used at a time.
+@item -s or --silent
+Silent mode. Write output only when errors occur. You can use @code{-s}
+twice (@code{-ss}) to make @code{myisamchk} very silent.
+@item -v or --verbose
+Verbose mode. Print more information. This can be used with @code{-d} and
+@code{-e}. Use @code{-v} multiple times (@code{-vv}, @code{-vvv}) for more
+verbosity!
+@item -V or --version
+Print the @code{myisamchk} version and exit.
+@item -w or, --wait
+Instead of giving an error if the table is locked, wait until the table
+is unlocked before continuing. Note that if you are running @code{mysqld}
+on the table with @code{--skip-locking}, the table can only be locked
+by another @code{myisamchk} command.
+@end table
+
+
+@node myisamchk check options, myisamchk repair options, myisamchk general options, Table maintenance
+@subsubsection Check Options for @code{myisamchk}
+
+@cindex check options, myisamchk
+@cindex tables, checking
+
+@table @code
+@item -c or --check
+Check table for errors. This is the default operation if you are not
+giving @code{myisamchk} any options that override this.
+
+@item -e or --extend-check
+Check the table VERY thoroughly (which is quite slow if you have many
+indexes). This option should only be used in extreme cases. Normally,
+@code{myisamchk} or @code{myisamchk --medium-check} should, in most
+cases, be able to find out if there are any errors in the table.
+
+If you are using @code{--extended-check} and have much memory, you should
+increase the value of @code{key_buffer_size} a lot!
+
+@item -F or --fast
+Check only tables that haven't been closed properly.
+@item -C or --check-only-changed
+Check only tables that have changed since the last check.
+@item -f or --force
+Restart @code{myisamchk} with @code{-r} (repair) on the table, if
+@code{myisamchk} finds any errors in the table.
+@item -i or --information
+Print informational statistics about the table that is checked.
+@item -m or --medium-check
+Faster than extended-check, but only finds 99.99% of all errors.
+Should, however, be good enough for most cases.
+@item -U or --update-state
+Store in the @file{.MYI} file when the table was checked and if the table crashed. This should be used to get full benefit of the
+@code{--check-only-changed} option, but you shouldn't use this
+option if the @code{mysqld} server is using the table and you are
+running @code{mysqld} with @code{--skip-locking}.
+@item -T or --read-only
+Don't mark table as checked. This is useful if you use @code{myisamchk}
+to check a table that is in use by some other application that doesn't
+use locking (like @code{mysqld --skip-locking}).
+@end table
+
+
+@node myisamchk repair options, myisamchk other options, myisamchk check options, Table maintenance
+@subsubsection Repair Options for myisamchk
+
+@cindex repair options, myisamchk
+@cindex files, repairing
+
+The following options are used if you start @code{myisamchk} with
+@code{-r} or @code{-o}:
+
+@table @code
+@item -D # or --data-file-length=#
+Max length of data file (when re-creating data file when it's 'full').
+@item -e or --extend-check
+Try to recover every possible row from the data file.
+Normally this will also find a lot of garbage rows. Don't use this option
+if you are not totally desperate.
+@item -f or --force
+Overwrite old temporary files (@code{table_name.TMD}) instead of aborting.
+@item -k # or keys-used=#
+If you are using ISAM, tells the ISAM table handler to update only the
+first @code{#} indexes. If you are using @code{MyISAM}, tells which keys
+to use, where each binary bit stands for one key (first key is bit 0).
+This can be used to get faster inserts! Deactivated indexes can be
+reactivated by using @code{myisamchk -r}. keys.
+@item -l or --no-symlinks
+Do not follow symbolic links. Normally @code{myisamchk} repairs the
+table a symlink points at. This option doesn't exist in MySQL 4.0,
+as MySQL 4.0 will not remove symlinks during repair.
+@item -r or --recover
+Can fix almost anything except unique keys that aren't unique
+(which is an extremely unlikely error with ISAM/MyISAM tables).
+If you want to recover a table, this is the option to try first. Only if
+myisamchk reports that the table can't be recovered by @code{-r}, you
+should then try @code{-o}. (Note that in the unlikely case that @code{-r}
+fails, the data file is still intact.)
+If you have lots of memory, you should increase the size of
+@code{sort_buffer_size}!
+@item -o or --safe-recover
+Uses an old recovery method (reads through all rows in order and updates
+all index trees based on the found rows); this is a magnitude slower
+than @code{-r}, but can handle a couple of very unlikely cases that
+@code{-r} cannot handle. This recovery method also uses much less disk
+space than @code{-r}. Normally one should always first repair with
+@code{-r}, and only if this fails use @code{-o}.
+
+If you have lots of memory, you should increase the size of
+@code{key_buffer_size}!
+@item -n or --sort-recover
+Force @code{myisamchk} to use sorting to resolve the keys even if the
+temporary files should be very big. This will not have any effect if you have
+fulltext keys in the table.
+
+@item --character-sets-dir=...
+Directory where character sets are stored.
+@item --set-character-set=name
+Change the character set used by the index
+@item .t or --tmpdir=path
+Path for storing temporary files. If this is not set, @code{myisamchk} will
+use the environment variable @code{TMPDIR} for this.
+@item -q or --quick
+Faster repair by not modifying the data file. One can give a second
+@code{-q} to force @code{myisamchk} to modify the original datafile in case
+of duplicate keys
+@item -u or --unpack
+Unpack file packed with myisampack.
+@end table
+
+@node myisamchk other options, myisamchk memory, myisamchk repair options, Table maintenance
+@subsubsection Other Options for @code{myisamchk}
+
+Other actions that @code{myisamchk} can do, besides repair and check tables:
+
+@table @code
+@item -a or --analyze
+Analyze the distribution of keys. This improves join performance by
+enabling the join optimizer to better choose in which order it should
+join the tables and which keys it should use:
+@code{myisamchk --describe --verbose table_name'} or using @code{SHOW KEYS} in
+@strong{MySQL}.
+@item -d or --description
+Prints some information about table.
+@item -A or --set-auto-increment[=value]
+Force auto_increment to start at this or higher value. If no value is
+given, then sets the next auto_increment value to the highest used value
+for the auto key + 1.
+@item -S or --sort-index
+Sort the index tree blocks in high-low order.
+This will optimize seeks and will make table scanning by key faster.
+@item -R or --sort-records=#
+Sorts records according to an index. This makes your data much more localized
+and may speed up ranged @code{SELECT} and @code{ORDER BY} operations on
+this index. (It may be VERY slow to do a sort the first time!)
+To find out a table's index numbers, use @code{SHOW INDEX}, which shows a
+table's indexes in the same order that @code{myisamchk} sees them. Indexes are
+numbered beginning with 1.
+@end table
+
+@node myisamchk memory, Crash recovery, myisamchk other options, Table maintenance
+@subsubsection @code{myisamchk} Memory Usage
+
+@cindex memory usage, myisamchk
+
+Memory allocation is important when you run @code{myisamchk}.
+@code{myisamchk} uses no more memory than you specify with the @code{-O}
+options. If you are going to use @code{myisamchk} on very large files,
+you should first decide how much memory you want it to use. The default
+is to use only about 3M to fix things. By using larger values, you can
+get @code{myisamchk} to operate faster. For example, if you have more
+than 32M RAM, you could use options such as these (in addition to any
+other options you might specify):
+
+@example
+shell> myisamchk -O sort=16M -O key=16M -O read=1M -O write=1M ...
+@end example
+
+Using @code{-O sort=16M} should probably be enough for most cases.
+
+Be aware that @code{myisamchk} uses temporary files in @code{TMPDIR}. If
+@code{TMPDIR} points to a memory file system, you may easily get out of
+memory errors. If this happens, set @code{TMPDIR} to point at some directory
+with more space and restart @code{myisamchk}.
+
+When repairing, @code{myisamchk} will also need a lot of disk space:
+
+@itemize @bullet
@item
-If you can't get your password to work, remember that you must use
-the @code{PASSWORD()} function if you set the password with the
-@code{INSERT}, @code{UPDATE}, or @code{SET PASSWORD} statements. The
-@code{PASSWORD()} function is unnecessary if you specify the password using
-the @code{GRANT ... INDENTIFIED BY} statement or the @code{mysqladmin
-password} command.
-@xref{Passwords}.
+Double the size of the record file (the original one and a copy). This
+space is not needed if one does a repair with @code{--quick}, as in this
+case only the index file will be re-created. This space is needed on the
+same disk as the original record file!
+@item
+Space for the new index file that replaces the old one. The old
+index file is truncated at start, so one usually ignore this space.
+This space is needed on the same disk as the original index file!
+@item
+When using @code{--recover} or @code{--sort-recover}
+(but not when using @code{--safe-recover}, you will need space for a
+sort buffer for:
+@code{(largest_key + row_pointer_length)*number_of_rows * 2}.
+You can check the length of the keys and the row_pointer_length with
+@code{myisamchk -dv table}.
+This space is allocated on the temporary disk (specified by @code{TMPDIR} or
+@code{--tmpdir=#}).
+@end itemize
+
+If you have a problem with disk space during repair, you can try to use
+@code{--safe-recover} instead of @code{--recover}.
+
+
+@node Crash recovery, Check, myisamchk memory, Table maintenance
+@subsubsection Using @code{myisamchk} for Crash Recovery
+
+@cindex crash, recovery
+@cindex recovery, from crash
+
+If you run @code{mysqld} with @code{--skip-locking} (which is the default on
+some systems, like Linux), you can't reliably use @code{myisamchk} to
+check a table when @code{mysqld} is using the same table. If you
+can be sure that no one is accessing the tables through @code{mysqld}
+while you run @code{myisamchk}, you only have to do @code{mysqladmin
+flush-tables} before you start checking the tables. If you can't
+guarantee the above, then you must take down @code{mysqld} while you
+check the tables. If you run @code{myisamchk} while @code{mysqld} is updating
+the tables, you may get a warning that a table is corrupt even if it
+isn't.
+
+If you are not using @code{--skip-locking}, you can use @code{myisamchk}
+to check tables at any time. While you do this, all clients that try
+to update the table will wait until @code{myisamchk} is ready before
+continuing.
+
+If you use @code{myisamchk} to repair or optimize tables, you
+@strong{MUST} always ensure that the @code{mysqld} server is not using
+the table (this also applies if you are using @code{--skip-locking}).
+If you don't take down @code{mysqld} you should at least do a
+@code{mysqladmin flush-tables} before you run @code{myisamchk}.
+
+This chapter describes how to check for and deal with data corruption
+in @strong{MySQL} databases. If your tables get corrupted a lot you should
+try to find the reason for this! @xref{Crashing}.
+
+The @code{MyISAM} table section contains reason for why a table could be
+corrupted. @xref{MyISAM table problems}.
+
+When performing crash recovery, it is important to understand that each table
+@code{tbl_name} in a database corresponds to three files in the database
+directory:
+
+@multitable @columnfractions .2 .8
+@item @strong{File} @tab @strong{Purpose}
+@item @file{tbl_name.frm} @tab Table definition (form) file
+@item @file{tbl_name.MYD} @tab Data file
+@item @file{tbl_name.MYI} @tab Index file
+@end multitable
+
+Each of these three file types is subject to corruption in various ways, but
+problems occur most often in data files and index files.
+
+@code{myisamchk} works by creating a copy of the @file{.MYD} (data) file
+row by row. It ends the repair stage by removing the old @file{.MYD}
+file and renaming the new file to the original file name. If you use
+@code{--quick}, @code{myisamchk} does not create a temporary @file{.MYD}
+file, but instead assumes that the @file{.MYD} file is correct and only
+generates a new index file without touching the @file{.MYD} file. This
+is safe, because @code{myisamchk} automatically detects if the
+@file{.MYD} file is corrupt and aborts the repair in this case. You can
+also give two @code{--quick} options to @code{myisamchk}. In this case,
+@code{myisamchk} does not abort on some errors (like duplicate key) but
+instead tries to resolve them by modifying the @file{.MYD}
+file. Normally the use of two @code{--quick} options is useful only if
+you have too little free disk space to perform a normal repair. In this
+case you should at least make a backup before running @code{myisamchk}.
+
+
+@node Check, Repair, Crash recovery, Table maintenance
+@subsubsection How to Check Tables for Errors
+
+@cindex checking, tables for errors
+@cindex tables, error checking
+@cindex errors, checking tables for
+
+To check a MyISAM table, use the following commands:
+
+@table @code
+@item myisamchk tbl_name
+This finds 99.99% of all errors. What it can't find is corruption that
+involves @strong{ONLY} the data file (which is very unusual). If you want
+to check a table, you should normally run @code{myisamchk} without options or
+with either the @code{-s} or @code{--silent} option.
+
+@item myisamchk -m tbl_name
+This finds 99.999% of all errors. It checks first all index entries for errors and
+then it reads through all rows. It calculates a checksum for all keys in
+the rows and verifies that they checksum matches the checksum for the keys
+in the index tree.
+
+@item myisamchk -e tbl_name
+This does a complete and thorough check of all data (@code{-e} means
+``extended check''). It does a check-read of every key for each row to verify
+that they indeed point to the correct row. This may take a LONG time on a
+big table with many keys. @code{myisamchk} will normally stop after the first
+error it finds. If you want to obtain more information, you can add the
+@code{--verbose} (@code{-v}) option. This causes @code{myisamchk} to keep
+going, up through a maximum of 20 errors. In normal usage, a simple
+@code{myisamchk} (with no arguments other than the table name) is sufficient.
+
+@item myisamchk -e -i tbl_name
+Like the previous command, but the @code{-i} option tells @code{myisamchk} to
+print some informational statistics, too.
+@end table
+
+@node Repair, Optimization, Check, Table maintenance
+@subsubsection How to Repair Tables
+
+@cindex tables, repairing
+@cindex repairing, tables
+
+In the following section we only talk about using @code{myisamchk} on
+@code{MyISAM} tables (extensions @code{.MYI} and @code{.MYD}). If you
+are using @code{ISAM} tables (extensions @code{.ISM} and @code{.ISD}),
+you should use @code{isamchk} instead.
+
+Starting with @strong{MySQL} Version 3.23.14, you can repair MyISAM
+tables with the @code{REPAIR TABLE} command. @xref{REPAIR TABLE}.
+
+The symptoms of a corrupted table include queries that abort unexpectedly
+and observable errors such as these:
+
+@itemize @bullet
@item
-@code{localhost} is a synonym for your local hostname, and is also the
-default host to which clients try to connect if you specify no host
-explicitly. However, connections to @code{localhost} do not work if you are
-running on a system that uses MIT-pthreads (@code{localhost} connections are
-made using Unix sockets, which are not supported by MIT-pthreads). To avoid
-this problem on such systems, you should use the @code{--host} option to name
-the server host explicitly. This will make a TCP/IP connection to the
-@code{mysqld} server. In this case, you must have your real hostname in
-@code{user} table entries on the server host. (This is true even if you are
-running a client program on the same host as the server.)
+@file{tbl_name.frm} is locked against change
+@item
+Can't find file @file{tbl_name.MYI} (Errcode: ###)
+@item
+Unexpected end of file
+@item
+Record file is crashed
+@item
+Got error ### from table handler
+
+To get more information about the error you can run @code{perror ###}. Here
+is the most common errors that indicates a problem with the table:
+
+@example
+shell> perror 126 127 132 134 135 136 141 144 145
+126 = Index file is crashed / Wrong file format
+127 = Record-file is crashed
+132 = Old database file
+134 = Record was already deleted (or record file crashed)
+135 = No more room in record file
+136 = No more room in index file
+141 = Duplicate unique key or constraint on write or update
+144 = Table is crashed and last repair failed
+145 = Table was marked as crashed and should be repaired
+@end example
+
+Note that error 135, no more room in record file, is not an error that
+can be fixed by a simple repair. In this case you have to do:
+
+@example
+ALTER TABLE table MAX_ROWS=xxx AVG_ROW_LENGTH=yyy;
+@end example
+@end itemize
+
+In the other cases, you must repair your tables. @code{myisamchk}
+can usually detect and fix most things that go wrong.
+
+The repair process involves up to four stages, described below. Before you
+begin, you should @code{cd} to the database directory and check the
+permissions of the table files. Make sure they are readable by the Unix user
+that @code{mysqld} runs as (and to you, because you need to access the files
+you are checking). If it turns out you need to modify files, they must also
+be writable by you.
+
+If you are using @strong{MySQL} Version 3.23.16 and above, you can (and
+should) use the @code{CHECK} and @code{REPAIR} commands to check and repair
+@code{MyISAM} tables. @xref{CHECK TABLE}. @xref{REPAIR TABLE}.
+
+The manual section about table maintenance includes the options to
+@code{isamchk}/@code{myisamchk}. @xref{Table maintenance}.
+
+The following section is for the cases where the above command fails or
+if you want to use the extended features that @code{isamchk}/@code{myisamchk} provides.
+
+If you are going to repair a table from the command line, you must first
+take down the @code{mysqld} server. Note that when you do
+@code{mysqladmin shutdown} on a remote server, the @code{mysqld} server
+will still be alive for a while after @code{mysqladmin} returns, until
+all queries are stopped and all keys have been flushed to disk.
+
+@noindent
+@strong{Stage 1: Checking your tables}
+
+Run @code{myisamchk *.MYI} or @code{myisamchk -e *.MYI} if you have
+more time. Use the @code{-s} (silent) option to suppress unnecessary
+information.
+
+If the @code{mysqld} server is done you should use the --update option to tell
+@code{myisamchk} to mark the table as 'checked'.
+
+You have to repair only those tables for which @code{myisamchk} announces an
+error. For such tables, proceed to Stage 2.
+
+If you get weird errors when checking (such as @code{out of
+memory} errors), or if @code{myisamchk} crashes, go to Stage 3.
+
+@noindent
+@strong{Stage 2: Easy safe repair}
+
+NOTE: If you want repairing to go much faster, you should add: @code{-O
+sort_buffer=# -O key_buffer=#} (where # is about 1/4 of the available
+memory) to all @code{isamchk/myisamchk} commands.
+
+First, try @code{myisamchk -r -q tbl_name} (@code{-r -q} means ``quick
+recovery mode''). This will attempt to repair the index file without
+touching the data file. If the data file contains everything that it
+should and the delete links point at the correct locations within the
+data file, this should work, and the table is fixed. Start repairing the
+next table. Otherwise, use the following procedure:
+
+@enumerate
@item
-If you get an @code{Access denied} error when trying to connect to the
-database with @code{mysql -u user_name db_name}, you may have a problem
-with the @code{user} table. Check this by executing @code{mysql -u root
-mysql} and issuing this SQL statement:
+Make a backup of the data file before continuing.
+
+@item
+Use @code{myisamchk -r tbl_name} (@code{-r} means ``recovery mode''). This will
+remove incorrect records and deleted records from the data file and
+reconstruct the index file.
+
+@item
+If the preceding step fails, use @code{myisamchk --safe-recover tbl_name}.
+Safe recovery mode uses an old recovery method that handles a few cases that
+regular recovery mode doesn't (but is slower).
+@end enumerate
+
+If you get weird errors when repairing (such as @code{out of
+memory} errors), or if @code{myisamchk} crashes, go to Stage 3.
+
+@noindent
+@strong{Stage 3: Difficult repair}
+
+You should only reach this stage if the first 16K block in the index file is
+destroyed or contains incorrect information, or if the index file is
+missing. In this case, it's necessary to create a new index file. Do so as
+follows:
+
+@enumerate
+@item
+Move the data file to some safe place.
+
+@item
+Use the table description file to create new (empty) data and index files:
@example
-mysql> SELECT * FROM user;
+shell> mysql db_name
+mysql> SET AUTOCOMMIT=1;
+mysql> TRUNCATE TABLE table_name;
+mysql> quit
@end example
-The result should include an entry with the @code{Host} and @code{User}
-columns matching your computer's hostname and your @strong{MySQL} user name.
+If your SQL version doesn't have @code{TRUNCATE TABLE}, use @code{DELETE FROM
+table_name} instead.
@item
-The @code{Access denied} error message will tell you who you are trying
-to log in as, the host from which you are trying to connect, and whether
-or not you were using a password. Normally, you should have one entry in
-the @code{user} table that exactly matches the hostname and user name
-that were given in the error message. For example if you get an error
-message that contains @code{Using password: NO}, this means that you
-tried to login without an password.
+Copy the old data file back onto the newly created data file.
+(Don't just move the old file back onto the new file; you want to retain
+a copy in case something goes wrong.)
+@end enumerate
+Go back to Stage 2. @code{myisamchk -r -q} should work now. (This shouldn't
+be an endless loop.)
+
+@noindent
+@strong{Stage 4: Very difficult repair}
+
+You should reach this stage only if the description file has also
+crashed. That should never happen, because the description file isn't changed
+after the table is created:
+
+@enumerate
@item
-If you get the following error when you try to connect from a different host
-than the one on which the @strong{MySQL} server is running, then there is no
-row in the @code{user} table that matches that host:
+Restore the description file from a backup and go back to Stage 3. You can
+also restore the index file and go back to Stage 2. In the latter case, you
+should start with @code{myisamchk -r}.
+
+@item
+If you don't have a backup but know exactly how the table was created, create
+a copy of the table in another database. Remove the new data file, then move
+the description and index files from the other database to your crashed
+database. This gives you new description and index files, but leaves
+the data file alone. Go back to Stage 2 and attempt to reconstruct
+the index file.
+@end enumerate
+
+
+@node Optimization, , Repair, Table maintenance
+@subsubsection Table Optimization
+
+@cindex tables, optimizing
+@cindex optimizing, tables
+
+To coalesce fragmented records and eliminate wasted space resulting from
+deleting or updating records, run @code{myisamchk} in recovery mode:
@example
-Host ... is not allowed to connect to this MySQL server
+shell> myisamchk -r tbl_name
@end example
-You can fix this by using the command-line tool @code{mysql} (on the
-server host!) to add a row to the @code{user}, @code{db}, or @code{host}
-table for the user/hostname combination from which you are trying to
-connect and then execute @code{mysqladmin flush-privileges}. If you are
-not running @strong{MySQL} Version 3.22 and you don't know the IP number or
-hostname of the machine from which you are connecting, you should put an
-entry with @code{'%'} as the @code{Host} column value in the @code{user}
-table and restart @code{mysqld} with the @code{--log} option on the
-server machine. After trying to connect from the client machine, the
-information in the @strong{MySQL} log will indicate how you really did
-connect. (Then replace the @code{'%'} in the @code{user} table entry
-with the actual hostname that shows up in the log. Otherwise, you'll
-have a system that is insecure.)
+You can optimize a table in the same way using the SQL @code{OPTIMIZE TABLE}
+statement. @code{OPTIMIZE TABLE} does a repair of the table, a key
+analyzes and also sorts the index tree to give faster key lookups.
+There is also no possibility of unwanted interaction between a utility
+and the server, because the server does all the work when you use
+@code{OPTIMIZE TABLE}. @xref{OPTIMIZE TABLE}.
-Another reason for this error on Linux is that you are using a binary
-@strong{MySQL} version that is compiled with a different glibc version
-than the one you are using. In this case you should either upgrade your
-OS/glibc or download the source @strong{MySQL} version and compile this
-yourself. A source RPM is normally trivial to compile and install, so
-this isn't a big problem.
+@code{myisamchk} also has a number of other options you can use to improve
+the performance of a table:
+
+@table @code
+@item -S, --sort-index
+@item -R index_num, --sort-records=index_num
+@item -a, --analyze
+@end table
+
+For a full description of the option. @xref{myisamchk syntax}.
+
+
+@node Maintenance regimen, Table-info, Table maintenance, Disaster Prevention
+@subsection Setting Up a Table Maintenance Regimen
+
+@cindex maintaining, tables
+@cindex tables, maintenance regimen
+
+Starting with @strong{MySQL} Version 3.23.13, you can check MyISAM
+tables with the @code{CHECK TABLE} command. @xref{CHECK TABLE}. You can
+repair tables with the @code{REPAIR TABLE} command. @xref{REPAIR TABLE}.
+
+It is a good idea to perform table checks on a regular basis rather than
+waiting for problems to occur. For maintenance purposes, you can use
+@code{myisamchk -s} to check tables. The @code{-s} option (short for
+@code{--silent}) causes @code{myisamchk} to run in silent mode, printing
+messages only when errors occur.
+
+@tindex .pid (process ID) file
+It's also a good idea to check tables when the server starts up.
+For example, whenever the machine has done a reboot in the middle of an
+update, you usually need to check all the tables that could have been
+affected. (This is an ``expected crashed table''.) You could add a test to
+@code{safe_mysqld} that runs @code{myisamchk} to check all tables that have
+been modified during the last 24 hours if there is an old @file{.pid}
+(process ID) file left after a reboot. (The @file{.pid} file is created by
+@code{mysqld} when it starts up and removed when it terminates normally. The
+presence of a @file{.pid} file at system startup time indicates that
+@code{mysqld} terminated abnormally.)
+
+An even better test would be to check any table whose last-modified time
+is more recent than that of the @file{.pid} file.
+
+You should also check your tables regularly during normal system
+operation. At @strong{MySQL AB}, we run a @code{cron} job to check all
+our important tables once a week, using a line like this in a @file{crontab}
+file:
+
+@example
+35 0 * * 0 /path/to/myisamchk --fast --silent /path/to/datadir/*/*.MYI
+@end example
+
+This prints out information about crashed tables so we can examine and repair
+them when needed.
+
+As we haven't had any unexpectedly crashed tables (tables that become
+corrupted for reasons other than hardware trouble)
+for a couple of years now (this is really true), once a week is
+more than enough for us.
+
+We recommend that to start with, you execute @code{myisamchk -s} each
+night on all tables that have been updated during the last 24 hours,
+until you come to trust @strong{MySQL} as much as we do.
+
+@cindex tables, defragment
+Normally you don't need to maintain @strong{MySQL} tables that much. If
+you are changing tables with dynamic size rows (tables with @code{VARCHAR},
+@code{BLOB} or @code{TEXT} columns) or have tables with many deleted rows
+you may want to from time to time (once a month?) defragment/reclaim space
+from the tables.
+
+You can do this by using @code{OPTIMIZE TABLE} on the tables in question or
+if you can take the @code{mysqld} server down for a while do:
+
+@example
+isamchk -r --silent --sort-index -O sort_buffer_size=16M */*.ISM
+myisamchk -r --silent --sort-index -O sort_buffer_size=16M */*.MYI
+@end example
+
+
+@node Table-info, , Maintenance regimen, Disaster Prevention
+@subsection Getting Information About a Table
+
+@cindex tables, information
+
+To get a description of a table or statistics about it, use the commands shown
+below. We explain some of the information in more detail later:
+
+@table @code
+@item myisamchk -d tbl_name
+Runs @code{myisamchk} in ``describe mode'' to produce a description of
+your table. If you start the @strong{MySQL} server using the
+@code{--skip-locking} option, @code{myisamchk} may report an error for a
+table that is updated while it runs. However, because @code{myisamchk}
+doesn't change the table in describe mode, there isn't any risk of
+destroying data.
+
+@item myisamchk -d -v tbl_name
+To produce more information about what @code{myisamchk} is doing, add @code{-v}
+to tell it to run in verbose mode.
+
+@item myisamchk -eis tbl_name
+Shows only the most important information from a table. It is slow because it
+must read the whole table.
+
+@item myisamchk -eiv tbl_name
+This is like @code{-eis}, but tells you what is being done.
+@end table
+
+@cindex examples, @code{myisamchk} output
+@cindex @code{myisamchk}, example output
+Example of @code{myisamchk -d} output:
+@example
+MyISAM file: company.MYI
+Record format: Fixed length
+Data records: 1403698 Deleted blocks: 0
+Recordlength: 226
+
+table description:
+Key Start Len Index Type
+1 2 8 unique double
+2 15 10 multip. text packed stripped
+3 219 8 multip. double
+4 63 10 multip. text packed stripped
+5 167 2 multip. unsigned short
+6 177 4 multip. unsigned long
+7 155 4 multip. text
+8 138 4 multip. unsigned long
+9 177 4 multip. unsigned long
+ 193 1 text
+@end example
+
+Example of @code{myisamchk -d -v} output:
+@example
+MyISAM file: company
+Record format: Fixed length
+File-version: 1
+Creation time: 1999-10-30 12:12:51
+Recover time: 1999-10-31 19:13:01
+Status: checked
+Data records: 1403698 Deleted blocks: 0
+Datafile parts: 1403698 Deleted data: 0
+Datafilepointer (bytes): 3 Keyfile pointer (bytes): 3
+Max datafile length: 3791650815 Max keyfile length: 4294967294
+Recordlength: 226
+
+table description:
+Key Start Len Index Type Rec/key Root Blocksize
+1 2 8 unique double 1 15845376 1024
+2 15 10 multip. text packed stripped 2 25062400 1024
+3 219 8 multip. double 73 40907776 1024
+4 63 10 multip. text packed stripped 5 48097280 1024
+5 167 2 multip. unsigned short 4840 55200768 1024
+6 177 4 multip. unsigned long 1346 65145856 1024
+7 155 4 multip. text 4995 75090944 1024
+8 138 4 multip. unsigned long 87 85036032 1024
+9 177 4 multip. unsigned long 178 96481280 1024
+ 193 1 text
+@end example
+
+Example of @code{myisamchk -eis} output:
+@example
+Checking MyISAM file: company
+Key: 1: Keyblocks used: 97% Packed: 0% Max levels: 4
+Key: 2: Keyblocks used: 98% Packed: 50% Max levels: 4
+Key: 3: Keyblocks used: 97% Packed: 0% Max levels: 4
+Key: 4: Keyblocks used: 99% Packed: 60% Max levels: 3
+Key: 5: Keyblocks used: 99% Packed: 0% Max levels: 3
+Key: 6: Keyblocks used: 99% Packed: 0% Max levels: 3
+Key: 7: Keyblocks used: 99% Packed: 0% Max levels: 3
+Key: 8: Keyblocks used: 99% Packed: 0% Max levels: 3
+Key: 9: Keyblocks used: 98% Packed: 0% Max levels: 4
+Total: Keyblocks used: 98% Packed: 17%
+
+Records: 1403698 M.recordlength: 226 Packed: 0%
+Recordspace used: 100% Empty space: 0% Blocks/Record: 1.00
+Record blocks: 1403698 Delete blocks: 0
+Recorddata: 317235748 Deleted data: 0
+Lost space: 0 Linkdata: 0
+
+User time 1626.51, System time 232.36
+Maximum resident set size 0, Integral resident set size 0
+Non physical pagefaults 0, Physical pagefaults 627, Swaps 0
+Blocks in 0 out 0, Messages in 0 out 0, Signals 0
+Voluntary context switches 639, Involuntary context switches 28966
+@end example
+
+Example of @code{myisamchk -eiv} output:
+@example
+Checking MyISAM file: company
+Data records: 1403698 Deleted blocks: 0
+- check file-size
+- check delete-chain
+block_size 1024:
+index 1:
+index 2:
+index 3:
+index 4:
+index 5:
+index 6:
+index 7:
+index 8:
+index 9:
+No recordlinks
+- check index reference
+- check data record references index: 1
+Key: 1: Keyblocks used: 97% Packed: 0% Max levels: 4
+- check data record references index: 2
+Key: 2: Keyblocks used: 98% Packed: 50% Max levels: 4
+- check data record references index: 3
+Key: 3: Keyblocks used: 97% Packed: 0% Max levels: 4
+- check data record references index: 4
+Key: 4: Keyblocks used: 99% Packed: 60% Max levels: 3
+- check data record references index: 5
+Key: 5: Keyblocks used: 99% Packed: 0% Max levels: 3
+- check data record references index: 6
+Key: 6: Keyblocks used: 99% Packed: 0% Max levels: 3
+- check data record references index: 7
+Key: 7: Keyblocks used: 99% Packed: 0% Max levels: 3
+- check data record references index: 8
+Key: 8: Keyblocks used: 99% Packed: 0% Max levels: 3
+- check data record references index: 9
+Key: 9: Keyblocks used: 98% Packed: 0% Max levels: 4
+Total: Keyblocks used: 9% Packed: 17%
+
+- check records and index references
+[LOTS OF ROW NUMBERS DELETED]
+
+Records: 1403698 M.recordlength: 226 Packed: 0%
+Recordspace used: 100% Empty space: 0% Blocks/Record: 1.00
+Record blocks: 1403698 Delete blocks: 0
+Recorddata: 317235748 Deleted data: 0
+Lost space: 0 Linkdata: 0
+
+User time 1639.63, System time 251.61
+Maximum resident set size 0, Integral resident set size 0
+Non physical pagefaults 0, Physical pagefaults 10580, Swaps 0
+Blocks in 4 out 0, Messages in 0 out 0, Signals 0
+Voluntary context switches 10604, Involuntary context switches 122798
+@end example
+
+Here are the sizes of the data and index files for the table used in the
+preceding examples:
+
+@example
+-rw-rw-r-- 1 monty tcx 317235748 Jan 12 17:30 company.MYD
+-rw-rw-r-- 1 davida tcx 96482304 Jan 12 18:35 company.MYM
+@end example
+
+Explanations for the types of information @code{myisamchk} produces are
+given below. The ``keyfile'' is the index file. ``Record'' and ``row''
+are synonymous:
+
+@table @code
+@item ISAM file
+Name of the ISAM (index) file.
+
+@item Isam-version
+Version of ISAM format. Currently always 2.
+
+@item Creation time
+When the data file was created.
+
+@item Recover time
+When the index/data file was last reconstructed.
+
+@item Data records
+How many records are in the table.
+
+@item Deleted blocks
+How many deleted blocks still have reserved space.
+You can optimize your table to minimize this space.
+@xref{Optimization}.
+
+@item Datafile: Parts
+For dynamic record format, this indicates how many data blocks there are. For
+an optimized table without fragmented records, this is the same as @code{Data
+records}.
+
+@item Deleted data
+How many bytes of non-reclaimed deleted data there are.
+You can optimize your table to minimize this space.
+@xref{Optimization}.
+
+@item Datafile pointer
+The size of the data file pointer, in bytes. It is usually 2, 3, 4, or 5
+bytes. Most tables manage with 2 bytes, but this cannot be controlled
+from @strong{MySQL} yet. For fixed tables, this is a record address. For
+dynamic tables, this is a byte address.
+
+@item Keyfile pointer
+The size of the index file pointer, in bytes. It is usually 1, 2, or 3
+bytes. Most tables manage with 2 bytes, but this is calculated
+automatically by @strong{MySQL}. It is always a block address.
+
+@item Max datafile length
+How long the table's data file (@code{.MYD} file) can become, in bytes.
+
+@item Max keyfile length
+How long the table's key file (@code{.MYI} file) can become, in bytes.
+
+@item Recordlength
+How much space each record takes, in bytes.
+
+@item Record format
+The format used to store table rows.
+The examples shown above use @code{Fixed length}.
+Other possible values are @code{Compressed} and @code{Packed}.
+
+@item table description
+A list of all keys in the table. For each key, some low-level information
+is presented:
+
+@table @code
+@item Key
+This key's number.
+
+@item Start
+Where in the record this index part starts.
+
+@item Len
+How long this index part is. For packed numbers, this should always be
+the full length of the column. For strings, it may be shorter than the full
+length of the indexed column, because you can index a prefix of a string
+column.
+
+@item Index
+@code{unique} or @code{multip.} (multiple). Indicates whether or not one value
+can exist multiple times in this index.
+
+@item Type
+What data-type this index part has. This is an ISAM data-type
+with the options @code{packed}, @code{stripped} or @code{empty}.
+
+@item Root
+Address of the root index block.
+
+@item Blocksize
+The size of each index block. By default this is 1024, but the value may be
+changed at compile time.
+
+@item Rec/key
+This is a statistical value used by the optimizer. It tells how many
+records there are per value for this key. A unique key always has a
+value of 1. This may be updated after a table is loaded (or greatly
+changed) with @code{myisamchk -a}. If this is not updated at all, a default
+value of 30 is given.
+@end table
@item
-If you get an error message where the hostname is not shown or where the
-hostname is an IP, even if you try to connect with a hostname:
+In the first example above, the 9th key is a multi-part key with two parts.
+
+@item Keyblocks used
+What percentage of the keyblocks are used. Because the table used in the
+examples had just been reorganized with @code{myisamchk}, the values are very
+high (very near the theoretical maximum).
+
+@item Packed
+@strong{MySQL} tries to pack keys with a common suffix. This can only be used
+for @code{CHAR}/@code{VARCHAR}/@code{DECIMAL} keys. For long strings like
+names, this can significantly reduce the space used. In the third example
+above, the 4th key is 10 characters long and a 60% reduction in space is
+achieved.
+
+@item Max levels
+How deep the B-tree for this key is. Large tables with long keys get high
+values.
+
+@item Records
+How many rows are in the table.
+
+@item M.recordlength
+The average record length. For tables with fixed-length records, this is the
+exact record length.
+
+@item Packed
+@strong{MySQL} strips spaces from the end of strings. The @code{Packed}
+value indicates the percentage of savings achieved by doing this.
+
+@item Recordspace used
+What percentage of the data file is used.
+
+@item Empty space
+What percentage of the data file is unused.
+
+@item Blocks/Record
+Average number of blocks per record (that is, how many links a fragmented
+record is composed of). This is always 1 for fixed-format tables. This value
+should stay as close to 1.0 as possible. If it gets too big, you can
+reorganize the table with @code{myisamchk}.
+@xref{Optimization}.
+
+@item Recordblocks
+How many blocks (links) are used. For fixed format, this is the same as the number
+of records.
+
+@item Deleteblocks
+How many blocks (links) are deleted.
+
+@item Recorddata
+How many bytes in the data file are used.
+
+@item Deleted data
+How many bytes in the data file are deleted (unused).
+
+@item Lost space
+If a record is updated to a shorter length, some space is lost. This is
+the sum of all such losses, in bytes.
+
+@item Linkdata
+When the dynamic table format is used, record fragments are linked with
+pointers (4 to 7 bytes each). @code{Linkdata} is the sum of the amount of
+storage used by all such pointers.
+@end table
+
+If a table has been compressed with @code{myisampack}, @code{myisamchk
+-d} prints additional information about each table column. See
+@ref{myisampack, , @code{myisampack}}, for an example of this
+information and a description of what it means.
+
+
+@node Database Administration, Localization, Disaster Prevention, MySQL Database Administration
+@section Database Administration Language Reference
+
+
+@menu
+* OPTIMIZE TABLE::
+* ANALYZE TABLE::
+* FLUSH::
+* KILL::
+* SHOW::
+@end menu
+
+@node OPTIMIZE TABLE, ANALYZE TABLE, Database Administration, Database Administration
+@subsection @code{OPTIMIZE TABLE} Syntax
+
+@findex OPTIMIZE TABLE
+
+@cindex tables, defragmenting
+@cindex tables, fragmentation
@example
-shell> mysqladmin -u root -pxxxx -h some-hostname ver
-Access denied for user: 'root@' (Using password: YES)
+OPTIMIZE TABLE tbl_name[,tbl_name]...
@end example
-This means that @strong{MySQL} got some error when trying to resolve the
-IP to a hostname. In this case you can execute @code{mysqladmin
-flush-hosts} to reset the internal DNS cache. @xref{DNS}.
+@code{OPTIMIZE TABLE} should be used if you have deleted a large part of a
+table or if you have made many changes to a table with variable-length rows
+(tables that have @code{VARCHAR}, @code{BLOB}, or @code{TEXT} columns).
+Deleted records are maintained in a linked list and subsequent @code{INSERT}
+operations reuse old record positions. You can use @code{OPTIMIZE TABLE} to
+reclaim the unused space and to defragment the data file.
-Some permanent solutions are:
+For the moment @code{OPTIMIZE TABLE} only works on @strong{MyISAM} and
+@code{BDB} tables. For @code{BDB} tables, @code{OPTIMIZE TABLE} is
+currently mapped to @code{ANALYZE TABLE}. @xref{ANALYZE TABLE}.
-@itemize @minus
+You can get optimize table to work on other table types by starting
+@code{mysqld} with @code{--skip-new} or @code{--safe-mode}, but in this
+case @code{OPTIMIZE TABLE} is just mapped to @code{ALTER TABLE}.
+
+@code{OPTIMIZE TABLE} works the following way:
+@itemize @bullet
@item
-Try to find out what is wrong with your DNS server and fix this.
+If the table has deleted or split rows, repair the table.
@item
-Specify IPs instead of hostnames in the @strong{MySQL} privilege tables.
+If the index pages are not sorted, sort them.
@item
-Start @code{mysqld} with @code{--skip-name-resolve}.
+If the statistics are not up to date (and the repair couldn't be done
+by sorting the index), update them.
+@end itemize
+
+@code{OPTIMIZE TABLE} for @code{MyISAM} tables is equvialent of running
+@code{myisamchk --quick --check-changed-tables --sort-index --analyze}
+on the table.
+
+Note that the table is locked during the time @code{OPTIMIZE TABLE} is
+running!
+
+
+@node ANALYZE TABLE, FLUSH, OPTIMIZE TABLE, Database Administration
+@subsection @code{ANALYZE TABLE} Syntax
+
+@findex ANALYZE TABLE
+
+@example
+ANALYZE TABLE tbl_name[,tbl_name...]
+@end example
+
+Analyze and store the key distribution for the table. During the
+analyze the table is locked with a read lock. This works on
+@code{MyISAM} and @code{BDB} tables.
+
+This is equivalent to running @code{myisamchk -a} on the table.
+
+@strong{MySQL} uses the stored key distribution to decide in which order
+tables should be joined when one does a join on something else than a
+constant.
+
+The command returns a table with the following columns:
+
+@multitable @columnfractions .35 .65
+@item @strong{Column} @tab @strong{Value}
+@item Table @tab Table name
+@item Op @tab Always ``analyze''
+@item Msg_type @tab One of @code{status}, @code{error}, @code{info} or @code{warning}.
+@item Msg_text @tab The message.
+@end multitable
+
+You can check the stored key distribution with the @code{SHOW INDEX} command.
+@xref{SHOW DATABASE INFO}.
+
+If the table hasn't changed since the last @code{ANALYZE TABLE} command,
+the table will not be analyzed again.
+
+
+@node FLUSH, KILL, ANALYZE TABLE, Database Administration
+@subsection @code{FLUSH} Syntax
+
+@findex FLUSH
+
+@cindex @code{mysqladmin}
+@cindex clearing, caches
+@cindex caches, clearing
+
+@example
+FLUSH flush_option [,flush_option]
+@end example
+
+You should use the @code{FLUSH} command if you want to clear some of the
+internal caches @strong{MySQL} uses. To execute @code{FLUSH}, you must have
+the @strong{RELOAD} privilege.
+
+@code{flush_option} can be any of the following:
+
+@multitable @columnfractions .15 .85
+@item @code{HOSTS} @tab Empties the host cache tables. You should flush the
+host tables if some of your hosts change IP number or if you get the
+error message @code{Host ... is blocked}. When more than
+@code{max_connect_errors} errors occur in a row for a given host while
+connection to the @strong{MySQL} server, @strong{MySQL} assumes
+something is wrong and blocks the host from further connection requests.
+Flushing the host tables allows the host to attempt to connect
+again. @xref{Blocked host}.) You can start @code{mysqld} with
+@code{-O max_connection_errors=999999999} to avoid this error message.
+
+@item @code{LOGS} @tab Closes and reopens all log files.
+If you have specified the update log file or a binary log file without
+an extension, the extension number of the log file will be incremented
+by one relative to the previous file. If you have used an extension in
+the file name, @strong{MySQL} will close and reopen the update log file.
+@xref{Update log}. This is the same thing as sending the @code{SIGHUP}
+signal to the @code{mysqld} server.
+
+@item @code{PRIVILEGES} @tab Reloads the privileges from the grant tables in
+the @code{mysql} database.
+
+@item @code{TABLES} @tab Closes all open tables and force all tables in use to be closed.
+
+@item @code{[TABLE | TABLES] table_name [,table_name...]} @tab Flushes only the given tables.
+
+@item @code{TABLES WITH READ LOCK} @tab Closes all open tables and locks all tables for all databases with a read until one executes @code{UNLOCK TABLES}. This is very convenient way to get backups if you have a file system, like Veritas,that can take snapshots in time.
+
+@item @code{STATUS} @tab Resets most status variables to zero. This is something one should only use when debugging a query.
+@end multitable
+
+You can also access each of the commands shown above with the @code{mysqladmin}
+utility, using the @code{flush-hosts}, @code{flush-logs}, @code{reload},
+or @code{flush-tables} commands.
+
+Take also a look at the @code{RESET} command used with
+replication. @xref{Replication SQL}.
+
+
+
+@node KILL, SHOW, FLUSH, Database Administration
+@subsection @code{KILL} Syntax
+
+@findex KILL
+
+@cindex @code{mysqladmin}
+
+@example
+KILL thread_id
+@end example
+
+Each connection to @code{mysqld} runs in a separate thread. You can see
+which threads are running with the @code{SHOW PROCESSLIST} command and kill
+a thread with the @code{KILL thread_id} command.
+
+If you have the @strong{process} privilege, you can see and kill all threads.
+Otherwise, you can see and kill only your own threads.
+
+You can also use the @code{mysqladmin processlist} and @code{mysqladmin kill}
+commands to examine and kill threads.
+
+When you do a @code{KILL}, a thread specific @code{kill flag} is set for
+the thread.
+
+In most cases it may take some time for the thread to die as the kill
+flag is only checked at specific intervals.
+
+@itemize @bullet
@item
-Start @code{mysqld} with @code{--skip-host-cache}.
+In @code{SELECT}, @code{ORDER BY} and @code{GROUP BY} loops, the flag is
+checked after reading a block of rows. If the kill flag is set the
+statement is aborted
@item
-Connect to @code{localhost} if you are running the server and the client
-on the same machine.
+When doing an @code{ALTER TABLE} the kill flag is checked before each block of
+rows are read from the original table. If the kill flag was set the command
+is aborted and the temporary table is deleted.
@item
-Put the client machine names in @code{/etc/hosts}.
+When doing an @code{UPDATE TABLE} and @code{DELETE TABLE}, the kill flag
+is checked after each block read and after each updated or delete
+row. If the kill flag is set the statement is aborted. Note that if you
+are not using transactions, the changes will not be rolled back!
+@item
+@code{GET_LOCK()} will abort with @code{NULL}.
+@item
+An @code{INSERT DELAYED} thread will quickly flush all rows it has in
+memory and die.
+@item
+If the thread is in the table lock handler (state: @code{Locked}),
+the table lock will be quickly aborted.
+@item
+If the thread is waiting for free disk space in a @code{write} call, the
+write is aborted with an disk full error message.
@end itemize
+
+@menu
+* SHOW::
+@end menu
+
+@node SHOW, , KILL, Database Administration
+@subsection @code{SHOW} Syntax
+
+@c FIX more index hits needed
+
+@findex SHOW DATABASE INFO
+@findex SHOW DATABASES
+@findex SHOW TABLES
+@findex SHOW COLUMNS
+@findex SHOW FIELDS
+@findex SHOW INDEX
+@findex SHOW KEYS
+@findex SHOW STATUS
+@findex SHOW VARIABLES
+@findex SHOW PROCESSLIST
+@findex SHOW TABLE STATUS
+@findex SHOW GRANTS
+@findex SHOW CREATE TABLE
+@findex SHOW MASTER STATUS
+@findex SHOW MASTER LOGS
+@findex SHOW SLAVE STATUS
+
+@example
+ SHOW DATABASES [LIKE wild]
+or SHOW [OPEN] TABLES [FROM db_name] [LIKE wild]
+or SHOW [FULL] COLUMNS FROM tbl_name [FROM db_name] [LIKE wild]
+or SHOW INDEX FROM tbl_name [FROM db_name]
+or SHOW TABLE STATUS [FROM db_name] [LIKE wild]
+or SHOW STATUS [LIKE wild]
+or SHOW VARIABLES [LIKE wild]
+or SHOW LOGS
+or SHOW [FULL] PROCESSLIST
+or SHOW GRANTS FOR user
+or SHOW CREATE TABLE table_name
+or SHOW MASTER STATUS
+or SHOW MASTER LOGS
+or SHOW SLAVE STATUS
+@end example
+
+@code{SHOW} provides information about databases, tables, columns, or
+status information about the server. If the @code{LIKE wild} part is
+used, the @code{wild} string can be a string that uses the SQL @samp{%}
+and @samp{_} wild-card characters.
+
+@menu
+* SHOW DATABASE INFO::
+* SHOW TABLE STATUS::
+* SHOW STATUS::
+* SHOW VARIABLES::
+* SHOW LOGS::
+* SHOW PROCESSLIST::
+* SHOW GRANTS::
+* SHOW CREATE TABLE::
+@end menu
+
+
+@node SHOW DATABASE INFO, SHOW TABLE STATUS, SHOW, SHOW
+@subsubsection Retrieving information about Database, Tables, Columns, and Indexes
+
+@cindex displaying, information, @code{SHOW}
+
+You can use @code{db_name.tbl_name} as an alternative to the @code{tbl_name
+FROM db_name} syntax. These two statements are equivalent:
+
+@example
+mysql> SHOW INDEX FROM mytable FROM mydb;
+mysql> SHOW INDEX FROM mydb.mytable;
+@end example
+
+@code{SHOW DATABASES} lists the databases on the @strong{MySQL} server
+host. You can also get this list using the @code{mysqlshow} command.
+
+@code{SHOW TABLES} lists the tables in a given database. You can also
+get this list using the @code{mysqlshow db_name} command.
+
+@strong{NOTE:} If a user doesn't have any privileges for a table, the table
+will not show up in the output from @code{SHOW TABLES} or @code{mysqlshow
+db_name}.
+
+@code{SHOW OPEN TABLES} lists the tables that are currently open in
+the table cache. @xref{Table cache}. The @code{Comment} field tells
+how many times the table is @code{cached} and @code{in_use}.
+
+@code{SHOW COLUMNS} lists the columns in a given table. If you specify
+the @code{FULL} option, you will also get the privileges you have for
+each column. If the column types are different than you expect them to
+be based on a @code{CREATE TABLE} statement, note that @strong{MySQL}
+sometimes changes column types. @xref{Silent column changes}.
+
+The @code{DESCRIBE} statement provides information similar to
+@code{SHOW COLUMNS}.
+@xref{DESCRIBE, , @code{DESCRIBE}}.
+
+@code{SHOW FIELDS} is a synonym for @code{SHOW COLUMNS}, and
+@code{SHOW KEYS} is a synonym for @code{SHOW INDEX}. You can also
+list a table's columns or indexes with @code{mysqlshow db_name tbl_name}
+or @code{mysqlshow -k db_name tbl_name}.
+
+@code{SHOW INDEX} returns the index information in a format that closely
+resembles the @code{SQLStatistics} call in ODBC. The following columns
+are returned:
+
+@multitable @columnfractions .35 .65
+@item @strong{Column} @tab @strong{Meaning}
+@item @code{Table} @tab Name of the table.
+@item @code{Non_unique} @tab 0 if the index can't contain duplicates.
+@item @code{Key_name} @tab Name of the index.
+@item @code{Seq_in_index} @tab Column sequence number in index,
+ starting with 1.
+@item @code{Column_name} @tab Column name.
+@item @code{Collation} @tab How the column is sorted in the index.
+ In @strong{MySQL}, this can have values
+ @samp{A} (Ascending) or @code{NULL} (Not
+ sorted).
+@item @code{Cardinality} @tab Number of unique values in the index.
+ This is updated by running
+ @code{isamchk -a}.
+@item @code{Sub_part} @tab Number of indexed characters if the
+ column is only partly indexed.
+ @code{NULL} if the entire key is indexed.
+@item @code{Comment} @tab Various remarks. For now, it tells
+ whether index is FULLTEXT or not.
+@end multitable
+
+Note that as the @code{Cardinality} is counted based on statistics
+stored as integers, it's not necessarily accurate for small tables.
+
+
+@node SHOW TABLE STATUS, SHOW STATUS, SHOW DATABASE INFO, SHOW
+@subsubsection @code{SHOW TABLE STATUS}
+
+@cindex displaying, table status
+@cindex tables, displaying status
+@cindex status, tables
+
+@example
+SHOW TABLE STATUS [FROM db_name] [LIKE wild]
+@end example
+
+@code{SHOW TABLE STATUS} (new in Version 3.23) works likes @code{SHOW
+STATUS}, but provides a lot of information about each table. You can
+also get this list using the @code{mysqlshow --status db_name} command.
+The following columns are returned:
+
+@multitable @columnfractions .30 .70
+@item @strong{Column} @tab @strong{Meaning}
+@item @code{Name} @tab Name of the table.
+@item @code{Type} @tab Type of table. @xref{Table types}.
+@item @code{Row_format} @tab The row storage format (Fixed, Dynamic, or Compressed).
+@item @code{Rows} @tab Number of rows.
+@item @code{Avg_row_length} @tab Average row length.
+@item @code{Data_length} @tab Length of the data file.
+@item @code{Max_data_length} @tab Max length of the data file.
+@item @code{Index_length} @tab Length of the index file.
+@item @code{Data_free} @tab Number of allocated but not used bytes.
+@item @code{Auto_increment} @tab Next autoincrement value.
+@item @code{Create_time} @tab When the table was created.
+@item @code{Update_time} @tab When the data file was last updated.
+@item @code{Check_time} @tab When the table was last checked.
+@item @code{Create_options} @tab Extra options used with @code{CREATE TABLE}.
+@item @code{Comment} @tab The comment used when creating the table (or some information why @strong{MySQL} couldn't access the table information).
+@end multitable
+
+@code{InnoDB} tables will report the free space in the tablespace
+in the table comment.
+
+
+@node SHOW STATUS, SHOW VARIABLES, SHOW TABLE STATUS, SHOW
+@subsubsection @code{SHOW STATUS}
+
+@cindex @code{mysqladmin}
+@code{SHOW STATUS} provides server status information
+(like @code{mysqladmin extended-status}). The output resembles that shown
+below, though the format and numbers probably differ:
+
+@example
++--------------------------+------------+
+| Variable_name | Value |
++--------------------------+------------+
+| Aborted_clients | 0 |
+| Aborted_connects | 0 |
+| Bytes_received | 155372598 |
+| Bytes_sent | 1176560426 |
+| Connections | 30023 |
+| Created_tmp_disk_tables | 0 |
+| Created_tmp_tables | 8340 |
+| Created_tmp_files | 60 |
+| Delayed_insert_threads | 0 |
+| Delayed_writes | 0 |
+| Delayed_errors | 0 |
+| Flush_commands | 1 |
+| Handler_delete | 462604 |
+| Handler_read_first | 105881 |
+| Handler_read_key | 27820558 |
+| Handler_read_next | 390681754 |
+| Handler_read_prev | 6022500 |
+| Handler_read_rnd | 30546748 |
+| Handler_read_rnd_next | 246216530 |
+| Handler_update | 16945404 |
+| Handler_write | 60356676 |
+| Key_blocks_used | 14955 |
+| Key_read_requests | 96854827 |
+| Key_reads | 162040 |
+| Key_write_requests | 7589728 |
+| Key_writes | 3813196 |
+| Max_used_connections | 0 |
+| Not_flushed_key_blocks | 0 |
+| Not_flushed_delayed_rows | 0 |
+| Open_tables | 1 |
+| Open_files | 2 |
+| Open_streams | 0 |
+| Opened_tables | 44600 |
+| Questions | 2026873 |
+| Select_full_join | 0 |
+| Select_full_range_join | 0 |
+| Select_range | 99646 |
+| Select_range_check | 0 |
+| Select_scan | 30802 |
+| Slave_running | OFF |
+| Slave_open_temp_tables | 0 |
+| Slow_launch_threads | 0 |
+| Slow_queries | 0 |
+| Sort_merge_passes | 30 |
+| Sort_range | 500 |
+| Sort_rows | 30296250 |
+| Sort_scan | 4650 |
+| Table_locks_immediate | 1920382 |
+| Table_locks_waited | 0 |
+| Threads_cached | 0 |
+| Threads_created | 30022 |
+| Threads_connected | 1 |
+| Threads_running | 1 |
+| Uptime | 80380 |
++--------------------------+------------+
+@end example
+
+@cindex variables, status
+The status variables listed above have the following meaning:
+
+@multitable @columnfractions .35 .65
+@item @strong{Variable} @tab @strong{Meaning}
+@item @code{Aborted_clients} @tab Number of connections aborted because the client died without closing the connection properly. @xref{Communication errors}.
+@item @code{Aborted_connects} @tab Number of tries to connect to the @strong{MySQL} server that failed. @xref{Communication errors}.
+@item @code{Bytes_received} @tab Number of bytes received from all clients.
+@item @code{Bytes_sent} @tab Number of bytes sent to all clients.
+@item @code{Connections} @tab Number of connection attempts to the @strong{MySQL} server.
+@item @code{Created_tmp_disk_tables} @tab Number of implicit temporary tables on disk created while executing statements.
+@item @code{Created_tmp_tables} @tab Number of implicit temporary tables in memory created while executing statements.
+@item @code{Created_tmp_files} @tab How many temporary files @code{mysqld} have created.
+@item @code{Delayed_insert_threads} @tab Number of delayed insert handler threads in use.
+@item @code{Delayed_writes} @tab Number of rows written with @code{INSERT DELAYED}.
+@item @code{Delayed_errors} @tab Number of rows written with @code{INSERT DELAYED} for which some error occurred (probably @code{duplicate key}).
+@item @code{Flush_commands} @tab Number of executed @code{FLUSH} commands.
+@item @code{Handler_delete} @tab Number of times a row was deleted from a table.
+@item @code{Handler_read_first} @tab Number of times the first entry was read from an index.
+If this is high, it suggests that the server is doing a lot of full index scans, for example,
+@code{SELECT col1 FROM foo}, assuming that col1 is indexed.
+@item @code{Handler_read_key} @tab Number of requests to read a row based on a key. If this
+is high, it is a good indication that your queries and tables are properly indexed.
+@item @code{Handler_read_next} @tab Number of requests to read next row in key order. This
+will be incremented if you are querying an index column with a range constraint. This also
+will be incremented if you are doing an index scan.
+@item @code{Handler_read_rnd} @tab Number of requests to read a row based on a fixed position.
+This will be high if you are doing a lot of queries that require sorting of the result.
+@item @code{Handler_read_rnd_next} @tab Number of requests to read the next row in the datafile.
+This will be high if you are doing a lot of table scans. Generally this suggests that your tables
+are not properly indexed or that your queries are not written to take advantage of the indexes you
+have.
+@item @code{Handler_update} @tab Number of requests to update a row in a table.
+@item @code{Handler_write} @tab Number of requests to insert a row in a table.
+@item @code{Key_blocks_used} @tab The number of used blocks in the key cache.
+@item @code{Key_read_requests} @tab The number of requests to read a key block from the cache.
+@item @code{Key_reads} @tab The number of physical reads of a key block from disk.
+@item @code{Key_write_requests} @tab The number of requests to write a key block to the cache.
+@item @code{Key_writes} @tab The number of physical writes of a key block to disk.
+@item @code{Max_used_connections} @tab The maximum number of connections in use simultaneously.
+@item @code{Not_flushed_key_blocks} @tab Keys blocks in the key cache that has changed but hasn't yet been flushed to disk.
+@item @code{Not_flushed_delayed_rows} @tab Number of rows waiting to be written in @code{INSERT DELAY} queues.
+@item @code{Open_tables} @tab Number of tables that are open.
+@item @code{Open_files} @tab Number of files that are open.
+@item @code{Open_streams} @tab Number of streams that are open (used mainly for logging).
+@item @code{Opened_tables} @tab Number of tables that have been opened.
+@item @code{Select_full_join} @tab Number of joins without keys (Should be 0).
+@item @code{Select_full_range_join} @tab Number of joins where we used a range search on reference table.
+@item @code{Select_range} @tab Number of joins where we used ranges on the first table. (It's normally not critical even if this is big.)
+@item @code{Select_scan} @tab Number of joins where we scanned the first table.
+@item @code{Select_range_check} @tab Number of joins without keys where we check for key usage after each row (Should be 0).
+@item @code{Questions} @tab Number of queries sent to the server.
+@item @code{Slave_open_temp_tables} @tab Number of temporary tables currently
+open by the slave thread
+@item @code{Slow_launch_threads} @tab Number of threads that have taken more than @code{slow_launch_time} to connect.
+@item @code{Slow_queries} @tab Number of queries that have taken more than @code{long_query_time}. @xref{Slow query log}.
+@item @code{Sort_merge_passes} @tab Number of merges the sort has to do. If this value is large you should consider increasing @code{sort_buffer}.
+@item @code{Sort_range} @tab Number of sorts that where done with ranges.
+@item @code{Sort_rows} @tab Number of sorted rows.
+@item @code{Sort_scan} @tab Number of sorts that where done by scanning the table.
+@item @code{Table_locks_immediate} @tab Number of times a table lock was
+acquired immediately. Available after 3.23.33.
+@item @code{Table_locks_waited} @tab Number of times a table lock could not
+be acquired immediately and a wait was needed. If this is high, and you
+have performance problems, you should first optimize your queries, and then
+either split your table(s) or use replication. Available after 3.23.33.
+@item @code{Threads_cached} @tab Number of threads in the thread cache.
+@item @code{Threads_connected} @tab Number of currently open connections.
+@item @code{Threads_created} @tab Number of threads created to handle connections.
+@item @code{Threads_running} @tab Number of threads that are not sleeping.
+@item @code{Uptime} @tab How many seconds the server has been up.
+@end multitable
+
+Some comments about the above:
+
+@itemize @bullet
@item
-If @code{mysql -u root test} works but @code{mysql -h your_hostname -u root
-test} results in @code{Access denied}, then you may not have the correct name
-for your host in the @code{user} table. A common problem here is that the
-@code{Host} value in the user table entry specifies an unqualified hostname,
-but your system's name resolution routines return a fully qualified domain
-name (or vice-versa). For example, if you have an entry with host
-@code{'tcx'} in the @code{user} table, but your DNS tells @strong{MySQL} that
-your hostname is @code{'tcx.subnet.se'}, the entry will not work. Try adding
-an entry to the @code{user} table that contains the IP number of your host as
-the @code{Host} column value. (Alternatively, you could add an entry to the
-@code{user} table with a @code{Host} value that contains a wild card---for
-example, @code{'tcx.%'}. However, use of hostnames ending with @samp{%} is
-@emph{insecure} and is @emph{not} recommended!)
+If @code{Opened_tables} is big, then your @code{table_cache}
+variable is probably too small.
+@item
+If @code{key_reads} is big, then your @code{key_cache} is probably too
+small. The cache hit rate can be calculated with
+@code{key_reads}/@code{key_read_requests}.
+@item
+If @code{Handler_read_rnd} is big, then you probably have a lot of
+queries that require @strong{MySQL} to scan whole tables or you have
+joins that don't use keys properly.
+@item
+If @code{Threads_created} is big, you may want to increase the
+@code{thread_cache_size} variable.
+@end itemize
+
+@node SHOW VARIABLES, SHOW LOGS, SHOW STATUS, SHOW
+@subsubsection @code{SHOW VARIABLES}
+
+@example
+SHOW VARIABLES [LIKE wild]
+@end example
+
+@code{SHOW VARIABLES} shows the values of some @strong{MySQL} system
+variables. You can also get this information using the @code{mysqladmin
+variables} command. If the default values are unsuitable, you can set most
+of these variables using command-line options when @code{mysqld} starts up.
+@xref{Command-line options}.
+
+The output resembles that shown below, though the format and numbers may
+differ somewhat:
+
+@example
++-------------------------+---------------------------+
+| Variable_name | Value |
++-------------------------+---------------------------+
+| ansi_mode | OFF |
+| back_log | 50 |
+| basedir | /my/monty/ |
+| bdb_cache_size | 16777216 |
+| bdb_log_buffer_size | 32768 |
+| bdb_home | /my/monty/data/ |
+| bdb_max_lock | 10000 |
+| bdb_logdir | |
+| bdb_shared_data | OFF |
+| bdb_tmpdir | /tmp/ |
+| binlog_cache_size | 32768 |
+| concurrent_insert | ON |
+| connect_timeout | 5 |
+| datadir | /my/monty/data/ |
+| delay_key_write | ON |
+| delayed_insert_limit | 100 |
+| delayed_insert_timeout | 300 |
+| delayed_queue_size | 1000 |
+| flush | OFF |
+| flush_time | 0 |
+| have_bdb | YES |
+| have_innodb | YES |
+| have_raid | YES |
+| have_ssl | NO |
+| init_file | |
+| interactive_timeout | 28800 |
+| join_buffer_size | 131072 |
+| key_buffer_size | 16776192 |
+| language | /my/monty/share/english/ |
+| large_files_support | ON |
+| log | OFF |
+| log_update | OFF |
+| log_bin | OFF |
+| log_slave_updates | OFF |
+| long_query_time | 10 |
+| low_priority_updates | OFF |
+| lower_case_table_names | 0 |
+| max_allowed_packet | 1048576 |
+| max_binlog_cache_size | 4294967295 |
+| max_connections | 100 |
+| max_connect_errors | 10 |
+| max_delayed_threads | 20 |
+| max_heap_table_size | 16777216 |
+| max_join_size | 4294967295 |
+| max_sort_length | 1024 |
+| max_tmp_tables | 32 |
+| max_write_lock_count | 4294967295 |
+| myisam_recover_options | DEFAULT |
+| myisam_sort_buffer_size | 8388608 |
+| net_buffer_length | 16384 |
+| net_read_timeout | 30 |
+| net_retry_count | 10 |
+| net_write_timeout | 60 |
+| open_files_limit | 0 |
+| pid_file | /my/monty/data/donna.pid |
+| port | 3306 |
+| protocol_version | 10 |
+| record_buffer | 131072 |
+| query_buffer_size | 0 |
+| safe_show_database | OFF |
+| server_id | 0 |
+| skip_locking | ON |
+| skip_networking | OFF |
+| skip_show_database | OFF |
+| slow_launch_time | 2 |
+| socket | /tmp/mysql.sock |
+| sort_buffer | 2097116 |
+| table_cache | 64 |
+| table_type | MYISAM |
+| thread_cache_size | 4 |
+| thread_stack | 65536 |
+| tmp_table_size | 1048576 |
+| tmpdir | /tmp/ |
+| version | 3.23.29a-gamma-debug |
+| wait_timeout | 28800 |
++-------------------------+---------------------------+
+@end example
+
+Each option is described below. Values for buffer sizes, lengths, and stack
+sizes are given in bytes. You can specify values with a suffix of @samp{K}
+or @samp{M} to indicate kilobytes or megabytes. For example, @code{16M}
+indicates 16 megabytes. The case of suffix letters does not matter;
+@code{16M} and @code{16m} are equivalent:
+
+@cindex variables, values
+@table @code
+@item @code{ansi_mode}.
+Is @code{ON} if @code{mysqld} was started with @code{--ansi}.
+@xref{ANSI mode}.
+
+@item @code{back_log}
+The number of outstanding connection requests @strong{MySQL} can have. This
+comes into play when the main @strong{MySQL} thread gets @strong{VERY}
+many connection requests in a very short time. It then takes some time
+(although very little) for the main thread to check the connection and start
+a new thread. The @code{back_log} value indicates how many requests can be
+stacked during this short time before @strong{MySQL} momentarily stops
+answering new requests. You need to increase this only if you expect a large
+number of connections in a short period of time.
+
+In other words, this value is the size of the listen queue for incoming
+TCP/IP connections. Your operating system has its own limit on the size
+of this queue. The manual page for the Unix @code{listen(2)} system
+call should have more details. Check your OS documentation for the
+maximum value for this variable. Attempting to set @code{back_log}
+higher than your operating system limit will be ineffective.
+
+@item @code{basedir}
+The value of the @code{--basedir} option.
+
+@item @code{bdb_cache_size}
+The buffer that is allocated to cache index and rows for @code{BDB}
+tables. If you don't use @code{BDB} tables, you should start
+@code{mysqld} with @code{--skip-bdb} to not waste memory for this
+cache.
+
+@item @code{bdb_log_buffer_size}
+The buffer that is allocated to cache index and rows for @code{BDB}
+tables. If you don't use @code{BDB} tables, you should set this to 0 or
+start @code{mysqld} with @code{--skip-bdb} to not waste memory for this
+cache.
+
+@item @code{bdb_home}
+The value of the @code{--bdb-home} option.
+
+@item @code{bdb_max_lock}
+The maximum number of locks (1000 by default) you can have active on a
+BDB table. You should increase this if you get errors of type @code{bdb:
+Lock table is out of available locks} or @code{Got error 12 from ...}
+when you have do long transactions or when @code{mysqld} has to examine
+a lot of rows to calculate the query.
+
+@item @code{bdb_logdir}
+The value of the @code{--bdb-logdir} option.
+
+@item @code{bdb_shared_data}
+Is @code{ON} if you are using @code{--bdb-shared-data}.
+
+@item @code{bdb_tmpdir}
+The value of the @code{--bdb-tmpdir} option.
+
+@item @code{binlog_cache_size}. The size of the cache to hold the SQL
+statements for the binary log during a transaction. If you often use
+big, multi-statement transactions you can increase this to get more
+performance. @xref{COMMIT}.
+
+@item @code{character_set}
+The default character set.
+
+@item @code{character_sets}
+The supported character sets.
+
+@item @code{concurrent_inserts}
+If @code{ON} (the default), @strong{MySQL} will allow you to use @code{INSERT}
+on @code{MyISAM} tables at the same time as you run @code{SELECT} queries
+on them. You can turn this option off by starting @code{mysqld} with @code{--safe}
+or @code{--skip-new}.
+
+@cindex timeout
+@item @code{connect_timeout}
+The number of seconds the @code{mysqld} server is waiting for a connect
+packet before responding with @code{Bad handshake}.
+
+@item @code{datadir}
+The value of the @code{--datadir} option.
+
+@item @code{delay_key_write}
+If enabled (is on by default), @strong{MySQL} will honor the
+@code{delay_key_write} option @code{CREATE TABLE}. This means that the
+key buffer for tables with this option will not get flushed on every
+index update, but only when a table is closed. This will speed up
+writes on keys a lot, but you should add automatic checking of all tables
+with @code{myisamchk --fast --force} if you use this. Note that if you
+start @code{mysqld} with the @code{--delay-key-write-for-all-tables}
+option this means that all tables will be treated as if they were
+created with the @code{delay_key_write} option. You can clear this flag
+by starting @code{mysqld} with @code{--skip-new} or @code{--safe-mode}.
+
+@item @code{delayed_insert_limit}
+After inserting @code{delayed_insert_limit} rows, the @code{INSERT
+DELAYED} handler will check if there are any @code{SELECT} statements
+pending. If so, it allows these to execute before continuing.
+
+@item @code{delayed_insert_timeout}
+How long a @code{INSERT DELAYED} thread should wait for @code{INSERT}
+statements before terminating.
+
+@item @code{delayed_queue_size}
+What size queue (in rows) should be allocated for handling @code{INSERT
+DELAYED}. If the queue becomes full, any client that does @code{INSERT
+DELAYED} will wait until there is room in the queue again.
+
+@item @code{flush}
+This is @code{ON} if you have started @strong{MySQL} with the @code{--flush}
+option.
+
+@item @code{flush_time}
+If this is set to a non-zero value, then every @code{flush_time} seconds all
+tables will be closed (to free up resources and sync things to disk). We
+only recommend this option on Win95, Win98, or on systems where you have
+very little resources.
+
+@item @code{have_bdb}
+@code{YES} if @code{mysqld} supports Berkeley DB tables. @code{DISABLED}
+if @code{--skip-bdb} is used.
+@item @code{have_innodb}
+@code{YES} if @code{mysqld} supports InnoDB tables. @code{DISABLED}
+if @code{--skip-innodb} is used.
+@item @code{have_raid}
+@code{YES} if @code{mysqld} supports the @code{RAID} option.
+@item @code{have_ssl}
+@code{YES} if @code{mysqld} supports SSL (encryption) on the client/server
+protocol.
+
+@item @code{init_file}
+The name of the file specified with the @code{--init-file} option when
+you start the server. This is a file of SQL statements you want the
+server to execute when it starts.
+
+@item @code{interactive_timeout}
+The number of seconds the server waits for activity on an interactive
+connection before closing it. An interactive client is defined as a
+client that uses the @code{CLIENT_INTERACTIVE} option to
+@code{mysql_real_connect()}. See also @code{wait_timeout}.
+
+@item @code{join_buffer_size}
+The size of the buffer that is used for full joins (joins that do not
+use indexes). The buffer is allocated one time for each full join
+between two tables. Increase this value to get a faster full join when
+adding indexes is not possible. (Normally the best way to get fast joins
+is to add indexes.)
+
+@c Make texi2html support index @anchor{Index cache size}. Then change
+@c some xrefs to point here
+@cindex indexes, block size
+@item @code{key_buffer_size}
+Index blocks are buffered and are shared by all threads.
+@code{key_buffer_size} is the size of the buffer used for index blocks.
+
+Increase this to get better index handling (for all reads and multiple
+writes) to as much as you can afford; 64M on a 256M machine that mainly
+runs @strong{MySQL} is quite common. If you, however, make this too big
+(more than 50% of your total memory?) your system may start to page and
+become REALLY slow. Remember that because @strong{MySQL} does not cache
+data read, that you will have to leave some room for the OS filesystem
+cache.
+
+You can check the performance of the key buffer by doing @code{show
+status} and examine the variables @code{Key_read_requests},
+@code{Key_reads}, @code{Key_write_requests}, and @code{Key_writes}. The
+@code{Key_reads/Key_read_request} ratio should normally be < 0.01.
+The @code{Key_write/Key_write_requests} is usually near 1 if you are
+using mostly updates/deletes but may be much smaller if you tend to
+do updates that affect many at the same time or if you are
+using @code{delay_key_write}. @xref{SHOW}.
+
+To get even more speed when writing many rows at the same time, use
+@code{LOCK TABLES}. @xref{LOCK TABLES, , @code{LOCK TABLES}}.
+
+@item @code{language}
+The language used for error messages.
+
+@item @code{large_file_support}
+If @code{mysqld} was compiled with options for big file support.
+
+@item @code{locked_in_memory}
+If @code{mysqld} was locked in memory with @code{--memlock}
+
+@item @code{log}
+If logging of all queries is enabled.
+
+@item @code{log_update}
+If the update log is enabled.
+
+@item @code{log_bin}
+If the binary log is enabled.
+
+@item @code{log_slave_updates}
+If the updates from the slave should be logged.
+
+@item @code{long_query_time}
+If a query takes longer than this (in seconds), the @code{Slow_queries} counter
+will be incremented. If you are using @code{--log-slow-queries}, the query
+will be logged to the slow query logfile. @xref{Slow query log}.
+
+@item @code{lower_case_table_names}
+If set to 1 table names are stored in lowercase on disk. This will enable
+you to access the table names case-insensitive also on Unix.
+@xref{Name case sensitivity}.
+
+@item @code{max_allowed_packet}
+The maximum size of one packet. The message buffer is initialized to
+@code{net_buffer_length} bytes, but can grow up to @code{max_allowed_packet}
+bytes when needed. This value by default is small, to catch big (possibly
+wrong) packets. You must increase this value if you are using big
+@code{BLOB} columns. It should be as big as the biggest @code{BLOB} you want
+to use. The current protocol limits @code{max_allowed_packet} to 16M.
+
+@item @code{max_binlog_cache_size}
+If a multi-statement transaction requires more than this amount of memory,
+one will get the error "Multi-statement transaction required more than
+'max_binlog_cache_size' bytes of storage".
+
+@item @code{max_binlog_size}
+Available after 3.23.33. If a write to the binary (replication) log exceeds
+the given value, rotate the logs. You cannot set it to less than 1024 bytes,
+or more than 1 GB. Default is 1 GB.
+
+@item @code{max_connections}
+The number of simultaneous clients allowed. Increasing this value increases
+the number of file descriptors that @code{mysqld} requires. See below for
+comments on file descriptor limits. @xref{Too many connections}.
+
+@item @code{max_connect_errors}
+If there is more than this number of interrupted connections from a host
+this host will be blocked from further connections. You can unblock a host
+with the command @code{FLUSH HOSTS}.
+
+@item @code{max_delayed_threads}
+Don't start more than this number of threads to handle @code{INSERT DELAYED}
+statements. If you try to insert data into a new table after all @code{INSERT
+DELAYED} threads are in use, the row will be inserted as if the
+@code{DELAYED} attribute wasn't specified.
+
+@item @code{max_heap_table_size}
+Don't allow creation of heap tables bigger than this.
+
+@item @code{max_join_size}
+Joins that are probably going to read more than @code{max_join_size}
+records return an error. Set this value if your users tend to perform joins
+that lack a @code{WHERE} clause, that take a long time, and that return
+millions of rows.
+
+@item @code{max_sort_length}
+The number of bytes to use when sorting @code{BLOB} or @code{TEXT}
+values (only the first @code{max_sort_length} bytes of each value
+are used; the rest are ignored).
+
+@item @code{max_user_connections}
+The maximum number of active connections for a single user (0 = no limit).
+
+@item @code{max_tmp_tables}
+(This option doesn't yet do anything.)
+Maximum number of temporary tables a client can keep open at the same time.
+
+@item @code{max_write_lock_count}
+After this many write locks, allow some read locks to run in between.
+
+@item @code{myisam_recover_options}
+The value of the @code{--myisam-recover} option.
+
+@item @code{myisam_sort_buffer_size}
+The buffer that is allocated when sorting the index when doing a
+@code{REPAIR} or when creating indexes with @code{CREATE INDEX} or
+@code{ALTER TABLE}.
+
+@item @code{myisam_max_extra_sort_file_size}.
+If the creating of the temporary file for fast index creation would be
+this much bigger than using the key cache, then prefer the key cache
+method. This is mainly used to force long character keys in large
+tables to use the slower key cache method to create the index.
+@strong{NOTE} that this parameter is given in megabytes!
+
+@item @code{myisam_max_sort_file_size}
+The maximum size of the temporary file @strong{MySQL} is allowed to use
+while recreating the index (during @code{REPAIR}, @code{ALTER TABLE}
+or @code{LOAD DATA INFILE}. If the file size would be bigger than this,
+the index will be created through the key cache (which is slower).
+@strong{NOTE} that this parameter is given in megabytes!
+
+@item @code{net_buffer_length}
+The communication buffer is reset to this size between queries. This
+should not normally be changed, but if you have very little memory, you
+can set it to the expected size of a query. (That is, the expected length of
+SQL statements sent by clients. If statements exceed this length, the buffer
+is automatically enlarged, up to @code{max_allowed_packet} bytes.)
+
+@item @code{net_read_timeout}
+Number of seconds to wait for more data from a connection before aborting
+the read. Note that when we don't expect data from a connection, the timeout
+is defined by @code{write_timeout}. See also @code{slave_read_timeout}.
+
+@item @code{net_retry_count}
+If a read on a communication port is interrupted, retry this many times
+before giving up. This value should be quite high on @code{FreeBSD} as
+internal interrupts are sent to all threads.
+
+@item @code{net_write_timeout}
+Number of seconds to wait for a block to be written to a connection before
+aborting the write.
+
+@item @code{open_files_limit}
+If this is not 0, then @code{mysqld} will use this value to reserve file
+descriptors to use with @code{setrlimit()}. If this value is 0 then
+@code{mysqld} will reserve @code{max_connections*5} or
+@code{max_connections + table_cache*2} (whichever is larger) number of
+files. You should try increasing this if @code{mysqld} gives you the
+error 'Too many open files'.
+
+@item @code{pid_file}
+The value of the @code{--pid-file} option.
+
+@item @code{port}
+The value of the @code{--port} option.
+
+@item @code{protocol_version}
+The protocol version used by the @strong{MySQL} server.
+
+@item @code{record_buffer}
+Each thread that does a sequential scan allocates a buffer of this
+size for each table it scans. If you do many sequential scans, you may
+want to increase this value.
+
+@item @code{query_buffer_size}
+The initial allocation of the query buffer. If most of your queries are
+long (like when inserting blobs), you should increase this!
+
+@item @code{safe_show_databases}
+Don't show databases for which the user doesn't have any database or
+table privileges. This can improve security if you're concerned about
+people being able to see what databases other users have. See also
+@code{skip_show_databases}.
+
+@item @code{server_id}
+The value of the @code{--server-id} option.
+
+@item @code{skip_locking}
+Is OFF if @code{mysqld} uses external locking.
+
+@item @code{skip_networking}
+Is ON if we only allow local (socket) connections.
+
+@item @code{skip_show_databases}
+This prevents people from doing @code{SHOW DATABASES} if they don't have
+the @code{PROCESS_PRIV} privilege. This can improve security if you're
+concerned about people being able to see what databases other users
+have. See also @code{safe_show_databases}.
+
+@item @code{slave_read_timeout}
+Number of seconds to wait for more data from a master/slave connection
+before aborting the read.
+
+@item @code{slow_launch_time}
+If creating the thread takes longer than this value (in seconds), the
+@code{Slow_launch_threads} counter will be incremented.
+
+@item @code{socket}
+The Unix socket used by the server.
+
+@item @code{sort_buffer}
+Each thread that needs to do a sort allocates a buffer of this
+size. Increase this value for faster @code{ORDER BY} or @code{GROUP BY}
+operations.
+@xref{Temporary files}.
+
+@item @code{table_cache}
+The number of open tables for all threads. Increasing this value
+increases the number of file descriptors that @code{mysqld} requires.
+@strong{MySQL} needs two file descriptors for each unique open table.
+See below for comments on file descriptor limits. You can check if you
+need to increase the table cache by checking the @code{Opened_tables}
+variable. @xref{SHOW}. If this variable is big and you don't do
+@code{FLUSH TABLES} a lot (which just forces all tables to be closed and
+reopenend), then you should increase the value of this variable.
+
+Make sure that your operating system can handle the number of open file
+descriptors implied by the @code{table_cache} setting. If @code{table_cache}
+is set too high, @strong{MySQL} may run out of file descriptors and refuse
+connections, fail to perform queries, and be very unreliable.
+
+For information about how the table cache works, see @ref{Table cache}.
+
+@item @code{table_type}
+The default table type
+
+@item @code{thread_cache_size}
+How many threads we should keep in a cache for reuse. When a
+client disconnects, the client's threads are put in the cache if there
+aren't more than @code{thread_cache_size} threads from before. All new
+threads are first taken from the cache, and only when the cache is empty
+is a new thread created. This variable can be increased to improve
+performance if you have a lot of new connections. (Normally this doesn't
+give a notable performance improvement if you have a good
+thread implementation.) By examing the difference between
+the @code{Connections} and @code{Threads_created} you can see how efficient
+the current thread cache is for you.
+
+@item @code{thread_concurrency}
+On Solaris, @code{mysqld} will call @code{thr_setconcurrency()} with
+this value. @code{thr_setconcurrency()} permits the application to give
+the threads system a hint for the desired number of threads that should
+be run at the same time.
+
+@item @code{thread_stack}
+The stack size for each thread. Many of the limits detected by the
+@code{crash-me} test are dependent on this value. The default is
+large enough for normal operation. @xref{MySQL Benchmarks}.
+
+@item @code{timezone}
+The timezone for the server.
+
+@item @code{tmp_table_size}
+If an in-memory temporary table exceeds this size, @strong{MySQL}
+will automatically convert it to an on-disk @code{MyISAM} table.
+Increase the value of @code{tmp_table_size} if you do many advanced
+@code{GROUP BY} queries and you have lots of memory.
+
+@item @code{tmpdir}
+The directory used for temporary files and temporary tables.
+
+@item @code{version}
+The version number for the server.
+
+@item @code{wait_timeout}
+The number of seconds the server waits for activity on a connection before
+closing it. See also @code{interactive_timeout}.
+@end table
+
+The manual section that describes tuning @strong{MySQL} contains some
+information of how to tune the above variables. @xref{Server parameters}.
+
+
+@node SHOW LOGS, SHOW PROCESSLIST, SHOW VARIABLES, SHOW
+@subsubsection @code{SHOW LOGS}
+
+@code{SHOW LOGS} shows you status information about existing log
+files. It currently only displays information about Berkeley DB log
+files.
+
+@itemize @bullet
+@item @code{File} shows the full path to the log file
+@item @code{Type} shows the type of the log file (@code{BDB} for Berkeley
+DB log files)
+@item @code{Status} shows the status of the log file (@code{FREE} if the
+file can be removed, or @code{IN USE} if the file is needed by the transaction
+subsystem)
+@end itemize
+
+
+@node SHOW PROCESSLIST, SHOW GRANTS, SHOW LOGS, SHOW
+@subsubsection @code{SHOW PROCESSLIST}
+
+@findex threads
+@findex PROCESSLIST
+
+@cindex threads, display
+@cindex processes, display
+
+@code{SHOW PROCESSLIST} shows you which threads are running. You can
+also get this information using the @code{mysqladmin processlist}
+command. If you have the @strong{process} privilege, you can see all
+threads. Otherwise, you can see only your own threads. @xref{KILL, ,
+@code{KILL}}. If you don't use the @code{FULL} option, then only
+the first 100 characters of each query will be shown.
+
+This command is very useful if you get the 'too many connections' error
+message and want to find out what's going on. @strong{MySQL} reserves
+one extra connection for a client with the @code{Process_priv} privilege
+to ensure that you should always be able to login and check the system
+(assuming you are not giving this privilege to all your users).
+
+
+@node SHOW GRANTS, SHOW CREATE TABLE, SHOW PROCESSLIST, SHOW
+@subsubsection @code{SHOW GRANTS}
+
+@cindex privileges, display
+
+@code{SHOW GRANTS FOR user} lists the grant commands that must be issued to
+duplicate the grants for a user.
+
+@example
+mysql> SHOW GRANTS FOR root@@localhost;
++---------------------------------------------------------------------+
+| Grants for root@@localhost |
++---------------------------------------------------------------------+
+| GRANT ALL PRIVILEGES ON *.* TO 'root'@@'localhost' WITH GRANT OPTION |
++---------------------------------------------------------------------+
+@end example
+
+
+@node SHOW CREATE TABLE, , SHOW GRANTS, SHOW
+@subsubsection @code{SHOW CREATE TABLE}
+
+Shows a @code{CREATE TABLE} statement that will create the given table:
+
+@example
+mysql> show create table t\G
+*************************** 1. row ***************************
+ Table: t
+Create Table: CREATE TABLE t (
+ id int(11) default NULL auto_increment,
+ s char(60) default NULL,
+ PRIMARY KEY (id)
+) TYPE=MyISAM
+
+@end example
+
+@code{SHOW CREATE TABLE} will quote table and column names according to
+@code{SQL_QUOTE_SHOW_CREATE} option.
+@ref{SET OPTION, , @code{SET OPTION SQL_QUOTE_SHOW_CREATE}}.
+
+
+@node Localization, Server-Side Scripts, Database Administration, MySQL Database Administration
+@section MySQL Localization and International Usage
+
+@menu
+* Character sets::
+* Languages::
+* Adding character set::
+* Character arrays::
+* String collating::
+* Multi-byte characters::
+@end menu
+
+
+@node Character sets, Languages, Localization, Localization
+@subsection The Character Set Used for Data and Sorting
+
+@cindex character sets
+@cindex data, character sets
+@cindex sorting, character sets
+
+By default, @strong{MySQL} uses the ISO-8859-1 (Latin1) character set
+with sorting according to Swedish/Finnish. This is the character set suitable
+in the USA and western Europe.
+
+All standard @strong{MySQL} binaries are compiled with
+@code{--with-extra-charsets=complex}. This will add code to all
+standard programs to be able to handle @code{latin1} and all multi-byte
+character sets within the binary. Other character sets will be
+loaded from a character-set definition file when needed.
+
+The character set determines what characters are allowed in names and how
+things are sorted by the @code{ORDER BY} and @code{GROUP BY} clauses of
+the @code{SELECT} statement.
+
+You can change the character set with the @code{--default-character-set}
+option when you start the server. The character sets available depend
+on the @code{--with-charset=charset} and @code{--with-extra-charset=
+list-of-charset | complex | all} options to @code{configure}, and the
+character set configuration files listed in
+@file{SHAREDIR/charsets/Index}. @xref{configure options}.
+
+If you change the character set when running @strong{MySQL} (which may
+also change the sort order), you must run myisamchk -r -q on all
+tables. Otherwise your indexes may not be ordered correctly.
+
+When a client connects to a @strong{MySQL} server, the server sends the
+default character set in use to the client. The client will switch to
+use this character set for this connection.
+
+One should use @code{mysql_real_escape_string()} when escaping strings
+for a SQL query. @code{mysql_real_escape_string()} is identical to the
+old @code{mysql_escape_string()} function, except that it takes the MYSQL
+connection handle as the first parameter.
+
+If the client is compiled with different paths than where the server is
+installed and the user who configured @strong{MySQL} didn't included all
+character sets in the @strong{MySQL} binary, one must specify for
+the client where it can find the additional character sets it will need
+if the server runs with a different character set than the client.
+
+One can specify this by putting in a @strong{MySQL} option file:
+
+@example
+[client]
+character-sets-dir=/usr/local/mysql/share/mysql/charsets
+@end example
+
+where the path points to where the dynamic @strong{MySQL} character sets
+are stored.
+
+One can force the client to use specific character set by specifying:
+
+@example
+[client]
+default-character-set=character-set-name
+@end example
+
+but normally this is never needed.
+
+
+@node Languages, Adding character set, Character sets, Localization
+@subsection Non-English Error Messages
+
+@cindex error messages, languages
+@cindex messages, languages
+@cindex files, error messages
+@cindex language support
+
+@code{mysqld} can issue error messages in the following languages:
+Czech, Danish, Dutch, English (the default), Estonian, French, German, Greek,
+Hungarian, Italian, Japanese, Korean, Norwegian, Norwegian-ny, Polish,
+Portuguese, Romanian, Russian, Slovak, Spanish, and Swedish.
+
+To start @code{mysqld} with a particular language, use either the
+@code{--language=lang} or @code{-L lang} options. For example:
+
+@example
+shell> mysqld --language=swedish
+@end example
+
+or:
+
+@example
+shell> mysqld --language=/usr/local/share/swedish
+@end example
+
+Note that all language names are specified in lowercase.
+
+The language files are located (by default) in
+@file{@var{mysql_base_dir}/share/@var{LANGUAGE}/}.
+
+To update the error message file, you should edit the @file{errmsg.txt} file
+and execute the following command to generate the @file{errmsg.sys} file:
+
+@example
+shell> comp_err errmsg.txt errmsg.sys
+@end example
+
+If you upgrade to a newer version of @strong{MySQL}, remember to repeat
+your changes with the new @file{errmsg.txt} file.
+
+
+@node Adding character set, Character arrays, Languages, Localization
+@subsection Adding a New Character Set
+
+@cindex character sets, adding
+@cindex adding, character sets
+
+To add another character set to @strong{MySQL}, use the following procedure.
+
+Decide if the set is simple or complex. If the character set
+does not need to use special string collating routines for
+sorting and does not need multi-byte character support, it is
+simple. If it needs either of those features, it is complex.
+
+For example, @code{latin1} and @code{danish} are simple charactersets while
+@code{big5} or @code{czech} are complex character sets.
+
+In the following section, we have assumed that you name your character
+set @code{MYSET}.
+
+For a simple character set do the following:
+
+@enumerate
@item
-If @code{mysql -u user_name test} works but @code{mysql -u user_name
-other_db_name} doesn't work, you don't have an entry for @code{other_db_name}
-listed in the @code{db} table.
+Add MYSET to the end of the @file{sql/share/charsets/Index} file
+Assign an unique number to it.
@item
-If @code{mysql -u user_name db_name} works when executed on the server
-machine, but @code{mysql -u host_name -u user_name db_name} doesn't work when
-executed on another client machine, you don't have the client machine listed
-in the @code{user} table or the @code{db} table.
+Create the file @file{sql/share/charsets/MYSET.conf}.
+(You can use @file{sql/share/charsets/latin1.conf} as a base for this).
+
+The syntax for the file very simple:
+@itemize @bullet
@item
-If you can't figure out why you get @code{Access denied}, remove from the
-@code{user} table all entries that have @code{Host} values containing
-wild cards (entries that contain @samp{%} or @samp{_}). A very common error
-is to insert a new entry with @code{Host}=@code{'%'} and
-@code{User}=@code{'some user'}, thinking that this will allow you to specify
-@code{localhost} to connect from the same machine. The reason that this
-doesn't work is that the default privileges include an entry with
-@code{Host}=@code{'localhost'} and @code{User}=@code{''}. Because that entry
-has a @code{Host} value @code{'localhost'} that is more specific than
-@code{'%'}, it is used in preference to the new entry when connecting from
-@code{localhost}! The correct procedure is to insert a second entry with
-@code{Host}=@code{'localhost'} and @code{User}=@code{'some_user'}, or to
-remove the entry with @code{Host}=@code{'localhost'} and
-@code{User}=@code{''}.
+Comments start with a '#' character and proceed to the end of the line.
+@item
+Words are separated by arbitrary amounts of whitespace.
+@item
+When defining the character set, every word must be a number in hexadecimal
+format
+@item
+The @code{ctype} array takes up the first 257 words. The
+@code{to_lower}, @code{to_upper} and @code{sort_order} arrays take up
+256 words each after that.
+@end itemize
+
+@xref{Character arrays}.
@item
-If you get the following error, you may have a problem with the @code{db} or
-@code{host} table:
+Add the character set name to the @code{CHARSETS_AVAILABLE} and
+@code{COMPILED_CHARSETS} lists in @code{configure.in}.
+
+@item
+Reconfigure, recompile, and test.
+
+@end enumerate
+
+For a complex character set do the following:
+
+@enumerate
+@item
+Create the file @file{strings/ctype-MYSET.c} in the @strong{MySQL} source
+distribution.
+
+@item
+Add MYSET to the end of the @file{sql/share/charsets/Index} file.
+Assign an unique number to it.
+
+@item
+Look at one of the existing @file{ctype-*.c} files to see what needs to
+be defined, for example @file{strings/ctype-big5.c}. Note that the
+arrays in your file must have names like @code{ctype_MYSET},
+@code{to_lower_MYSET}, and so on. This corresponds to the arrays
+in the simple character set. @xref{Character arrays}. For a complex
+character set
+
+@item
+Near the top of the file, place a special comment like this:
@example
-Access to database denied
+/*
+ * This comment is parsed by configure to create ctype.c,
+ * so don't change it unless you know what you are doing.
+ *
+ * .configure. number_MYSET=MYNUMBER
+ * .configure. strxfrm_multiply_MYSET=N
+ * .configure. mbmaxlen_MYSET=N
+ */
@end example
-If the entry selected from the @code{db} table has an empty value in the
-@code{Host} column, make sure there are one or more corresponding entries in
-the @code{host} table specifying which hosts the @code{db} table entry
-applies to.
+The @code{configure} program uses this comment to include
+the character set into the @strong{MySQL} library automatically.
-If you get the error when using the SQL commands @code{SELECT ...
-INTO OUTFILE} or @code{LOAD DATA INFILE}, your entry in the @code{user} table
-probably doesn't have the @strong{file} privilege enabled.
+The strxfrm_multiply and mbmaxlen lines will be explained in
+the following sections. Only include them if you the string
+collating functions or the multi-byte character set functions,
+respectively.
@item
-@cindex configuration files
+You should then create some of the following functions:
+
+@itemize @bullet
+@item @code{my_strncoll_MYSET()}
+@item @code{my_strcoll_MYSET()}
+@item @code{my_strxfrm_MYSET()}
+@item @code{my_like_range_MYSET()}
+@end itemize
+
+@xref{String collating}.
+
+@item
+Add the character set name to the @code{CHARSETS_AVAILABLE} and
+@code{COMPILED_CHARSETS} lists in @code{configure.in}.
+
+@item
+Reconfigure, recompile, and test.
+@end enumerate
+
+The file @file{sql/share/charsets/README} includes some more instructions.
+
+If you want to have the character set included in the @strong{MySQL}
+distribution, mail a patch to @email{internals@@lists.mysql.com}.
+
+
+@node Character arrays, String collating, Adding character set, Localization
+@subsection The character definition arrays
+
+@code{to_lower[]} and @code{to_upper[]} are simple arrays that hold the
+lowercase and uppercase characters corresponding to each member of the
+character set. For example:
+
+@example
+to_lower['A'] should contain 'a'
+to_upper['a'] should contain 'A'
+@end example
+
+@code{sort_order[]} is a map indicating how characters should be ordered for
+comparison and sorting purposes. For many character sets, this is the same as
+@code{to_upper[]} (which means sorting will be case insensitive).
+@strong{MySQL} will sort characters based on the value of
+@code{sort_order[character]}. For more complicated sorting rules, see
+the discussion of string collating below. @xref{String collating}.
+
+@code{ctype[]} is an array of bit values, with one element for one character.
+(Note that @code{to_lower[]}, @code{to_upper[]}, and @code{sort_order[]}
+are indexed by character value, but @code{ctype[]} is indexed by character
+value + 1. This is an old legacy to be able to handle EOF.)
+
+You can find the following bitmask definitions in @file{m_ctype.h}:
+
+@example
+#define _U 01 /* Uppercase */
+#define _L 02 /* Lowercase */
+#define _N 04 /* Numeral (digit) */
+#define _S 010 /* Spacing character */
+#define _P 020 /* Punctuation */
+#define _C 040 /* Control character */
+#define _B 0100 /* Blank */
+#define _X 0200 /* heXadecimal digit */
+@end example
+
+The @code{ctype[]} entry for each character should be the union of the
+applicable bitmask values that describe the character. For example,
+@code{'A'} is an uppercase character (@code{_U}) as well as a
+hexadecimal digit (@code{_X}), so @code{ctype['A'+1]} should contain the
+value:
+
+@example
+_U + _X = 01 + 0200 = 0201
+@end example
+
+
+@node String collating, Multi-byte characters, Character arrays, Localization
+@subsection String Collating Support
+
+@cindex collating, strings
+@cindex string collating
+
+If the sorting rules for your language are too complex to be handled
+with the simple @code{sort_order[]} table, you need to use the string
+collating functions.
+
+Right now the best documentation on this is the character sets that are
+already implemented. Look at the big5, czech, gbk, sjis, and tis160
+character sets for examples.
+
+You must specify the @code{strxfrm_multiply_MYSET=N} value in the
+special comment at the top of the file. @code{N} should be set to
+the maximum ratio the strings may grow during @code{my_strxfrm_MYSET} (it
+must be a positive integer).
+
+
+@node Multi-byte characters, , String collating, Localization
+@subsection Multi-byte Character Support
+
+@cindex characters, multi-byte
+@cindex multi-byte characters
+
+If your want to add support for a new character set that includes
+multi-byte characters, you need to use the multi-byte character
+functions.
+
+Right now the best documentation on this is the character sets that are
+already implemented. Look at the euc_kr, gb2312, gbk, sjis and ujis
+character sets for examples. These are implemented in the
+@code{ctype-'charset'.c} files in the @file{strings} directory.
+
+You must specify the @code{mbmaxlen_MYSET=N} value in the special
+comment at the top of the source file. @code{N} should be set to the
+size in bytes of the largest character in the set.
+
+
+@node Server-Side Scripts, Client-Side Scripts, Localization, MySQL Database Administration
+@section MySQL Server-Side Scripts and Utilities
+
+@menu
+* Server-Side Overview::
+* safe_mysqld::
+* mysqld_multi::
+* myisampack::
+* mysqld-max::
+@end menu
+
+
+@node Server-Side Overview, safe_mysqld, Server-Side Scripts, Server-Side Scripts
+@subsection Overview of the Server-Side Scripts and Utilities
+
@cindex environment variables
-@tindex .my.cnf file
-Remember that client programs will use connection parameters specified
-in configuration files or environment variables. @xref{Environment
-variables}. If a client seems to be sending the wrong default
-connection parameters when you don't specify them on the command line,
-check your environment and the @file{.my.cnf} file in your home
-directory. You might also check the system-wide @strong{MySQL}
-configuration files, though it is far less likely that client connection
-parameters will be specified there. @xref{Option files}. If you get
-@code{Access denied} when you run a client without any options, make
-sure you haven't specified an old password in any of your option files!
+@cindex programs, list of
+
+All @strong{MySQL} clients that communicate with the server using the
+@code{mysqlclient} library use the following environment variables:
+
+@tindex MYSQL_UNIX_PORT environment variable
+@tindex Environment variable, MYSQL_UNIX_PORT
+@tindex MYSQL_TCP_PORT environment variable
+@tindex Environment variable, MYSQL_TCP_PORT
+@tindex MYSQL_PWD environment variable
+@tindex Environment variable, MYSQL_PWD
+@tindex MYSQL_DEBUG environment variable
+@tindex Environment variable, MYSQL_DEBUG
+@multitable @columnfractions .25 .75
+@item @strong{Name} @tab @strong{Description}
+@item @code{MYSQL_UNIX_PORT} @tab The default socket; used for connections to @code{localhost}
+@item @code{MYSQL_TCP_PORT} @tab The default TCP/IP port
+@item @code{MYSQL_PWD} @tab The default password
+@item @code{MYSQL_DEBUG} @tab Debug-trace options when debugging
+@item @code{TMPDIR} @tab The directory where temporary tables/files are created
+@end multitable
+
+Use of @code{MYSQL_PWD} is insecure.
+@xref{Connecting}.
+
+@tindex MYSQL_HISTFILE environment variable
+@tindex Environment variable, MYSQL_HISTFILE
+@tindex HOME environment variable
+@tindex Environment variable, HOME
+@cindex history file
+@cindex command line history
+@tindex .mysql_history file
+The @file{mysql} client uses the file named in the @code{MYSQL_HISTFILE}
+environment variable to save the command-line history. The default value for
+the history file is @file{$HOME/.mysql_history}, where @code{$HOME} is the
+value of the @code{HOME} environment variable. @xref{Environment variables}.
+
+All @strong{MySQL} programs take many different options. However, every
+@strong{MySQL} program provides a @code{--help} option that you can use
+to get a full description of the program's different options. For example, try
+@code{mysql --help}.
+
+You can override default options for all standard client programs with an
+option file. @ref{Option files}.
+
+The list below briefly describes the @strong{MySQL} programs:
+
+@table @code
+
+@cindex @code{myisamchk}
+@item myisamchk
+Utility to describe, check, optimize, and repair @strong{MySQL} tables.
+Because @code{myisamchk} has many functions, it is described in its own
+chapter. @xref{Maintenance}.
+
+@cindex @code{make_binary_distribution}
+@item make_binary_distribution
+Makes a binary release of a compiled @strong{MySQL}. This could be sent
+by FTP to @file{/pub/mysql/Incoming} on @code{support.mysql.com} for the
+convenience of other @strong{MySQL} users.
+
+@cindex @code{msql2mysql}
+@item msql2mysql
+A shell script that converts @code{mSQL} programs to @strong{MySQL}. It doesn't
+handle all cases, but it gives a good start when converting.
+
+@cindex @code{mysqlaccess}
+@item mysqlaccess
+A script that checks the access privileges for a host, user, and database
+combination.
+
+@cindex @code{mysqladmin}
+@item mysqladmin
+Utility for performing administrative operations, such as creating or
+dropping databases, reloading the grant tables, flushing tables to disk, and
+reopening log files. @code{mysqladmin} can also be used to retrieve version,
+process, and status information from the server.
+@xref{mysqladmin, , @code{mysqladmin}}.
+
+@cindex @code{mysqlbug}
+@item mysqlbug
+The @strong{MySQL} bug report script. This script should always be used when
+filing a bug report to the @strong{MySQL} list.
+
+@cindex @code{mysqld}
+@item mysqld
+The SQL daemon. This should always be running.
+
+@cindex @code{mysqldump}
+@item mysqldump
+Dumps a @strong{MySQL} database into a file as SQL statements or
+as tab-separated text files. Enhanced freeware originally by Igor Romanenko.
+@xref{mysqldump, , @code{mysqldump}}.
+
+@cindex @code{mysqlimport}
+@item mysqlimport
+Imports text files into their respective tables using @code{LOAD DATA
+INFILE}. @xref{mysqlimport, , @code{mysqlimport}}.
+
+@cindex @code{mysqlshow}
+@item mysqlshow
+Displays information about databases, tables, columns, and indexes.
+
+@cindex @code{mysql_install_db}
+@item mysql_install_db
+Creates the @strong{MySQL} grant tables with default privileges. This is
+usually executed only once, when first installing @strong{MySQL}
+on a system.
+
+@cindex @code{replace}
+@item replace
+A utility program that is used by @code{msql2mysql}, but that has more
+general applicability as well. @code{replace} changes strings in place in
+files or on the standard input. Uses a finite state machine to match longer
+strings first. Can be used to swap strings. For example, this command
+swaps @code{a} and @code{b} in the given files:
+
+@example
+shell> replace a b b a -- file1 file2 ...
+@end example
+@end table
+
+
+@node safe_mysqld, mysqld_multi, Server-Side Overview, Server-Side Scripts
+@subsection safe_mysqld, the wrapper around mysqld
+
+@cindex tools, safe_mysqld
+@cindex scripts
+@cindex @code{safe_mysqld}
+
+@code{safe_mysqld} is the recommended way to start a @code{mysqld}
+daemon on Unix. @code{safe_mysqld} adds some safety features such as
+restarting the server when an error occurs and logging run-time
+information to a log file.
+
+If you don't use @code{--mysqld=#} or @code{--mysqld-version=#}
+@code{safe_mysqld} will use an executable named @code{mysqld-max} if it
+exists. If not, @code{safe_mysqld} will start @code{mysqld}.
+This makes it very easy to test to use @code{mysqld-max} instead of
+@code{mysqld}; Just copy @code{mysqld-max} to where you have
+@code{mysqld} and it will be used.
+
+Normally one should never edit the @code{safe_mysqld} script, but
+instead put the options to @code{safe_mysqld} in the
+@code{[safe_mysqld]} section in the @code{my.cnf}
+file. @code{safe_mysqld} will read all options from the @code{[mysqld]},
+@code{[server]} and @code{[safe_mysqld]} sections from the option files.
@xref{Option files}.
+Note that all options on the command line to @code{safe_mysqld} are passed
+to @code{mysqld}. If you wants to use any options in @code{safe_mysqld} that
+@code{mysqld} doesn't support, you must specify these in the option file.
+
+Most of the options to @code{safe_mysqld} are the same as the options to
+@code{mysqld}. @xref{Command-line options}.
+
+@code{safe_mysqld} supports the following options:
+
+@table @code
+@item --basedir=path
+@item --core-file-size=#
+Size of the core file @code{mysqld} should be able to create. Passed to @code{ulimit -c}.
+@item --datadir=path
+@item --defaults-extra-file=path
+@item --defaults-file=path
+@item --err-log=path
+@item --ledir=path
+Path to @code{mysqld}
+@item --log=path
+@item --mysqld=mysqld-version
+Name of the @code{mysqld} version in the @code{ledir} directory you want to start.
+@item --mysqld-version=version
+Similar to @code{--mysqld=} but here you only give the suffix for @code{mysqld}.
+For example if you use @code{--mysqld-version=max}, @code{safe_mysqld} will
+start the @code{ledir/mysqld-max} version. If the argument to
+@code{--mysqld-version} is empty, @code{ledir/mysqld} will be used.
+@item --no-defaults
+@item --open-files-limit=#
+Number of files @code{mysqld} should be able to open. Passed to @code{ulimit -n}. Note that you need to start @code{safe_mysqld} as root for this to work properly!
+@item --pid-file=path
+@item --port=#
+@item --socket=path
+@item --timezone=#
+Set the timezone (the @code{TZ}) variable to the value of this parameter.
+@item --user=#
+@end table
+
+The @code{safe_mysqld} script is written so that it normally is able to start
+a server that was installed from either a source or a binary version of
+@strong{MySQL}, even if these install the server in slightly different
+locations. @code{safe_mysqld} expects one of these conditions to be true:
+
+@itemize @bullet
@item
-If you make changes to the grant tables directly (using an @code{INSERT} or
-@code{UPDATE} statement) and your changes seem to be ignored, remember
-that you must issue a @code{FLUSH PRIVILEGES} statement or execute a
-@code{mysqladmin flush-privileges} command to cause the server to re-read
-the privilege tables. Otherwise your changes have no effect until the
-next time the server is restarted. Remember that after you set the
-@code{root} password with an @code{UPDATE} command, you won't need to
-specify it until after you flush the privileges, because the server
-won't know you've changed the password yet!
+The server and databases can be found relative to the directory from which
+@code{safe_mysqld} is invoked. @code{safe_mysqld} looks under its working
+directory for @file{bin} and @file{data} directories (for binary
+distributions) or for @file{libexec} and @file{var} directories (for source
+distributions). This condition should be met if you execute
+@code{safe_mysqld} from your @strong{MySQL} installation directory (for
+example, @file{/usr/local/mysql} for a binary distribution).
@item
-If you have access problems with a Perl, PHP, Python, or ODBC program, try to
-connect to the server with @code{mysql -u user_name db_name} or @code{mysql
--u user_name -pyour_pass db_name}. If you are able to connect using the
-@code{mysql} client, there is a problem with your program and not with the
-access privileges. (Note that there is no space between @code{-p} and the
-password; you can also use the @code{--password=your_pass} syntax to specify
-the password. If you use the @code{-p} option alone, @strong{MySQL} will
-prompt you for the password.)
+If the server and databases cannot be found relative to the working directory,
+@code{safe_mysqld} attempts to locate them by absolute pathnames. Typical
+locations are @file{/usr/local/libexec} and @file{/usr/local/var}.
+The actual locations are determined when the distribution was built from which
+@code{safe_mysqld} comes. They should be correct if
+@strong{MySQL} was installed in a standard location.
+@end itemize
+
+Because @code{safe_mysqld} will try to find the server and databases relative
+to its own working directory, you can install a binary distribution of
+@strong{MySQL} anywhere, as long as you start @code{safe_mysqld} from the
+@strong{MySQL} installation directory:
+
+@example
+shell> cd mysql_installation_directory
+shell> bin/safe_mysqld &
+@end example
+
+If @code{safe_mysqld} fails, even when invoked from the @strong{MySQL}
+installation directory, you can modify it to use the path to @code{mysqld}
+and the pathname options that are correct for your system. Note that if you
+upgrade @strong{MySQL} in the future, your modified version of
+@code{safe_mysqld} will be overwritten, so you should make a copy of your
+edited version that you can reinstall.
+
+
+@node mysqld_multi, myisampack, safe_mysqld, Server-Side Scripts
+@subsection mysqld_multi, program for managing multiple @strong{MySQL} servers
+
+@cindex tools, mysqld_multi
+@cindex scripts
+@cindex multi mysqld
+@cindex @code{mysqld_multi}
+
+@code{mysqld_multi} is meant for managing several @code{mysqld}
+processes running in different UNIX sockets and TCP/IP ports.
+
+The program will search for group(s) named [mysqld#] from my.cnf (or the
+given --config-file=...), where # can be any positive number starting
+from 1. These groups should be the same as the usual @code{[mysqld]}
+group (e.g. options to mysqld, see @strong{MySQL} manual for detailed
+information about this group), but with those port, socket etc. options
+that are wanted for each separate @code{mysqld} processes. The number in
+the group name has another function; it can be used for starting,
+stopping, or reporting some specific @code{mysqld} servers with this
+program. See the usage and options below for more information.
+
+@example
+Usage: mysqld_multi [OPTIONS] @{start|stop|report@} [GNR,GNR,GNR...]
+or mysqld_multi [OPTIONS] @{start|stop|report@} [GNR-GNR,GNR,GNR-GNR,...]
+@end example
+
+The GNR above means the group number. You can start, stop or report
+any GNR, or several of them at the same time. (See --example) The GNRs
+list can be comma separated, or a dash combined, of which the latter
+means that all the GNRs between GNR1-GNR2 will be affected. Without
+GNR argument all the found groups will be either started, stopped, or
+reported. Note that you must not have any white spaces in the GNR
+list. Anything after a white space is ignored.
+@code{mysqld_multi} supports the following options:
+
+@table @code
+@cindex config-file option
+@item --config-file=...
+Alternative config file. NOTE: This will not affect this program's own
+options (group @code{[mysqld_multi]}), but only groups
+[mysqld#]. Without this option everything will be searched from the
+ordinary my.cnf file.
+@cindex example option
+@item --example
+Give an example of a config file.
+@cindex help option
+@item --help
+Print this help and exit.
+@cindex log option
+@item --log=...
+Log file. Full path to and the name for the log file. NOTE: If the file
+exists, everything will be appended.
+@cindex mysqladmin option
+@item --mysqladmin=...
+@code{mysqladmin} binary to be used for a server shutdown.
+@cindex mysqld option
+@item --mysqld=...
+@code{mysqld} binary to be used. Note that you can give
+@code{safe_mysqld} to this option also. The options are passed to
+@code{mysqld}. Just make sure you have @code{mysqld} in your environment
+variable @code{PATH} or fix @code{safe_mysqld}.
+@cindex no-log option
+@item --no-log
+Print to stdout instead of the log file. By default the log file is
+turned on.
+@cindex password option
+@item --password=...
+Password for user for @code{mysqladmin}.
+@cindex tcp-ip option
+@item --tcp-ip
+Connect to the @strong{MySQL} server(s) via the TCP/IP port instead of
+the UNIX socket. This affects stopping and reporting. If a socket file
+is missing, the server may still be running, but can be accessed only
+via the TCP/IP port. By default connecting is done via the UNIX socket.
+@cindex user option
+@item --user=...
+@strong{MySQL} user for @code{mysqladmin}.
+@cindex version option
+@item --version
+Print the version number and exit.
+@end table
+
+Some notes about @code{mysqld_multi}:
+
+@itemize @bullet
@item
-For testing, start the @code{mysqld} daemon with the
-@code{--skip-grant-tables} option. Then you can change the @strong{MySQL}
-grant tables and use the @code{mysqlaccess} script to check whether or not
-your modifications have the desired effect. When you are satisfied with your
-changes, execute @code{mysqladmin flush-privileges} to tell the @code{mysqld}
-server to start using the new grant tables. @strong{Note:} Reloading the
-grant tables overrides the @code{--skip-grant-tables} option. This allows
-you to tell the server to begin using the grant tables again without bringing
-it down and restarting it.
+Make sure that the @strong{MySQL} user, who is stopping the
+@code{mysqld} services (e.g using the @code{mysqladmin}) have the same
+password and username for all the data directories accessed (to the
+'mysql' database) And make sure that the user has the 'Shutdown_priv'
+privilege! If you have many data- directories and many different 'mysql'
+databases with different passwords for the @strong{MySQL} 'root' user,
+you may want to create a common 'multi_admin' user for each using the
+same password (see below). Example how to do it:
+@example
+shell> mysql -u root -S /tmp/mysql.sock -proot_password -e
+"GRANT SHUTDOWN ON *.* TO multi_admin@@localhost IDENTIFIED BY 'multipass'"
+@xref{Privileges}.
+@end example
+You will have to do the above for each @code{mysqld} running in each
+data directory, that you have (just change the socket, -S=...)
+@item
+@code{pid-file} is very important, if you are using @code{safe_mysqld}
+to start @code{mysqld} (e.g. --mysqld=safe_mysqld) Every @code{mysqld}
+should have its own @code{pid-file}. The advantage using
+@code{safe_mysqld} instead of @code{mysqld} directly here is, that
+@code{safe_mysqld} 'guards' every @code{mysqld} process and will restart
+it, if a @code{mysqld} process fails due to signal kill -9, or
+similar. (Like segmentation fault, which @strong{MySQL} should never do,
+of course ;) Please note that @code{safe_mysqld} script may require that
+you start it from a certain place. This means that you may have to CD to
+a certain directory, before you start the @code{mysqld_multi}. If
+you have problems starting, please see the @code{safe_mysqld}
+script. Check especially the lines:
+@example
+--------------------------------------------------------------------------
+MY_PWD=`pwd` Check if we are starting this relative (for the binary
+release) if test -d /data/mysql -a -f ./share/mysql/english/errmsg.sys
+-a -x ./bin/mysqld
+--------------------------------------------------------------------------
+@xref{safe_mysqld, , @code{safe_mysqld}}.
+@end example
+The above test should be successful, or you may encounter problems.
+@item
+Beware of the dangers starting multiple @code{mysqlds} in the same data
+directory. Use separate data directories, unless you @strong{KNOW} what
+you are doing!
+@item
+The socket file and the TCP/IP port must be different for every @code{mysqld}.
+@item
+The first and fifth @code{mysqld} group were intentionally left out from
+the example. You may have 'gaps' in the config file. This gives you
+more flexibility. The order in which the @code{mysqlds} are started or
+stopped depends on the order in which they appear in the config file.
+@item
+When you want to refer to a certain group using GNR with this program,
+just use the number in the end of the group name ( [mysqld# <== ).
+@item
+You may want to use option '--user' for @code{mysqld}, but in order to
+do this you need to be root when you start the @code{mysqld_multi}
+script. Having the option in the config file doesn't matter; you will
+just get a warning, if you are not the superuser and the @code{mysqlds}
+are started under @strong{YOUR} UNIX account. @strong{IMPORTANT}: Make
+sure that the @code{pid-file} and the data directory are
+read+write(+execute for the latter one) accessible for @strong{THAT}
+UNIX user, who the specific @code{mysqld} process is started
+as. @strong{DON'T} use the UNIX root account for this, unless you
+@strong{KNOW} what you are doing!
+@item
+@strong{MOST IMPORTANT}: Make sure that you understand the meanings of
+the options that are passed to the @code{mysqlds} and why @strong{WOULD
+YOU WANT} to have separate @code{mysqld} processes. Starting multiple
+@code{mysqlds} in one data directory @strong{WILL NOT} give you extra
+performance in a threaded system!
+@end itemize
+
+@xref{Multiple servers}.
+
+This is an example of the config file on behalf of @code{mysqld_multi}.
+
+@example
+# This file should probably be in your home dir (~/.my.cnf) or /etc/my.cnf
+# Version 2.1 by Jani Tolonen
+
+[mysqld_multi]
+mysqld = /usr/local/bin/safe_mysqld
+mysqladmin = /usr/local/bin/mysqladmin
+user = multi_admin
+password = multipass
+
+[mysqld2]
+socket = /tmp/mysql.sock2
+port = 3307
+pid-file = /usr/local/mysql/var2/hostname.pid2
+datadir = /usr/local/mysql/var2
+language = /usr/local/share/mysql/english
+user = john
+
+[mysqld3]
+socket = /tmp/mysql.sock3
+port = 3308
+pid-file = /usr/local/mysql/var3/hostname.pid3
+datadir = /usr/local/mysql/var3
+language = /usr/local/share/mysql/swedish
+user = monty
+
+[mysqld4]
+socket = /tmp/mysql.sock4
+port = 3309
+pid-file = /usr/local/mysql/var4/hostname.pid4
+datadir = /usr/local/mysql/var4
+language = /usr/local/share/mysql/estonia
+user = tonu
+
+[mysqld6]
+socket = /tmp/mysql.sock6
+port = 3311
+pid-file = /usr/local/mysql/var6/hostname.pid6
+datadir = /usr/local/mysql/var6
+language = /usr/local/share/mysql/japanese
+user = jani
+@end example
+
+@xref{Option files}.
+
+
+@node myisampack, mysqld-max, mysqld_multi, Server-Side Scripts
+@subsection myisampack, The MySQL Compressed Read-only Table Generator
+
+@cindex compressed tables
+@cindex tables, compressed
+@cindex MyISAM, compressed tables
+@cindex @code{myisampack}
+@cindex @code{pack_isam}
+
+@code{myisampack} is used to compress MyISAM tables, and @code{pack_isam}
+is used to compress ISAM tables. Because ISAM tables are deprecated, we
+will only discuss @code{myisampack} here, but everything said about
+@code{myisampack} should also be true for @code{pack_isam}.
+@code{myisampack} works by compressing each column in the table separately.
+The information needed to decompress columns is read into memory when the
+table is opened. This results in much better performance when accessing
+individual records, because you only have to uncompress exactly one record, not
+a much larger disk block as when using Stacker on MS-DOS.
+Usually, @code{myisampack} packs the data file 40%-70%.
+
+@strong{MySQL} uses memory mapping (@code{mmap()}) on compressed tables and
+falls back to normal read/write file usage if @code{mmap()} doesn't work.
+
+There are currently two limitations with @code{myisampack}:
+@itemize @bullet
@item
-If everything else fails, start the @code{mysqld} daemon with a debugging
-option (for example, @code{--debug=d,general,query}). This will print host and
-user information about attempted connections, as well as information about
-each command issued. @xref{Making trace files}.
+After packing, the table is read-only.
+@item
+@code{myisampack} can also pack @code{BLOB} or @code{TEXT} columns. The
+older @code{pack_isam} could not do this.
+@end itemize
+
+Fixing these limitations is on our TODO list but with low priority.
+
+@code{myisampack} is invoked like this:
+
+@example
+shell> myisampack [options] filename ...
+@end example
+
+Each filename should be the name of an index (@file{.MYI}) file. If you
+are not in the database directory, you should specify the pathname to the
+file. It is permissible to omit the @file{.MYI} extension.
+
+@code{myisampack} supports the following options:
+
+@table @code
+@item -b, --backup
+Make a backup of the table as @code{tbl_name.OLD}.
+
+@item -#, --debug=debug_options
+Output debug log. The @code{debug_options} string often is
+@code{'d:t:o,filename'}.
+
+@item -f, --force
+Force packing of the table even if it becomes bigger or if the temporary file
+exists. @code{myisampack} creates a temporary file named @file{tbl_name.TMD}
+while it compresses the table. If you kill @code{myisampack}, the @file{.TMD}
+file may not be deleted. Normally, @code{myisampack} exits with an error if
+it finds that @file{tbl_name.TMD} exists. With @code{--force},
+@code{myisampack} packs the table anyway.
+
+@item -?, --help
+Display a help message and exit.
+
+@item -j big_tbl_name, --join=big_tbl_name
+Join all tables named on the command line into a single table
+@code{big_tbl_name}. All tables that are to be combined
+MUST be identical (same column names and types, same indexes, etc.).
+
+@item -p #, --packlength=#
+Specify the record length storage size, in bytes. The value should be 1, 2,
+or 3. (@code{myisampack} stores all rows with length pointers of 1, 2, or 3
+bytes. In most normal cases, @code{myisampack} can determine the right length
+value before it begins packing the file, but it may notice during the packing
+process that it could have used a shorter length. In this case,
+@code{myisampack} will print a note that the next time you pack the same file,
+you could use a shorter record length.)
+
+@item -s, --silent
+Silent mode. Write output only when errors occur.
+
+@item -t, --test
+Don't actually pack table, just test packing it.
+
+@item -T dir_name, --tmp_dir=dir_name
+Use the named directory as the location in which to write the temporary table.
+
+@item -v, --verbose
+Verbose mode. Write information about progress and packing result.
+
+@item -V, --version
+Display version information and exit.
+
+@item -w, --wait
+
+Wait and retry if table is in use. If the @code{mysqld} server was
+invoked with the @code{--skip-locking} option, it is not a good idea to
+invoke @code{myisampack} if the table might be updated during the
+packing process.
+@end table
+
+@cindex examples, compressed tables
+The sequence of commands shown below illustrates a typical table compression
+session:
+
+@example
+shell> ls -l station.*
+-rw-rw-r-- 1 monty my 994128 Apr 17 19:00 station.MYD
+-rw-rw-r-- 1 monty my 53248 Apr 17 19:00 station.MYI
+-rw-rw-r-- 1 monty my 5767 Apr 17 19:00 station.frm
+
+shell> myisamchk -dvv station
+
+MyISAM file: station
+Isam-version: 2
+Creation time: 1996-03-13 10:08:58
+Recover time: 1997-02-02 3:06:43
+Data records: 1192 Deleted blocks: 0
+Datafile: Parts: 1192 Deleted data: 0
+Datafile pointer (bytes): 2 Keyfile pointer (bytes): 2
+Max datafile length: 54657023 Max keyfile length: 33554431
+Recordlength: 834
+Record format: Fixed length
+
+table description:
+Key Start Len Index Type Root Blocksize Rec/key
+1 2 4 unique unsigned long 1024 1024 1
+2 32 30 multip. text 10240 1024 1
+
+Field Start Length Type
+1 1 1
+2 2 4
+3 6 4
+4 10 1
+5 11 20
+6 31 1
+7 32 30
+8 62 35
+9 97 35
+10 132 35
+11 167 4
+12 171 16
+13 187 35
+14 222 4
+15 226 16
+16 242 20
+17 262 20
+18 282 20
+19 302 30
+20 332 4
+21 336 4
+22 340 1
+23 341 8
+24 349 8
+25 357 8
+26 365 2
+27 367 2
+28 369 4
+29 373 4
+30 377 1
+31 378 2
+32 380 8
+33 388 4
+34 392 4
+35 396 4
+36 400 4
+37 404 1
+38 405 4
+39 409 4
+40 413 4
+41 417 4
+42 421 4
+43 425 4
+44 429 20
+45 449 30
+46 479 1
+47 480 1
+48 481 79
+49 560 79
+50 639 79
+51 718 79
+52 797 8
+53 805 1
+54 806 1
+55 807 20
+56 827 4
+57 831 4
+
+shell> myisampack station.MYI
+Compressing station.MYI: (1192 records)
+- Calculating statistics
+
+normal: 20 empty-space: 16 empty-zero: 12 empty-fill: 11
+pre-space: 0 end-space: 12 table-lookups: 5 zero: 7
+Original trees: 57 After join: 17
+- Compressing file
+87.14%
+
+shell> ls -l station.*
+-rw-rw-r-- 1 monty my 127874 Apr 17 19:00 station.MYD
+-rw-rw-r-- 1 monty my 55296 Apr 17 19:04 station.MYI
+-rw-rw-r-- 1 monty my 5767 Apr 17 19:00 station.frm
+
+shell> myisamchk -dvv station
+
+MyISAM file: station
+Isam-version: 2
+Creation time: 1996-03-13 10:08:58
+Recover time: 1997-04-17 19:04:26
+Data records: 1192 Deleted blocks: 0
+Datafile: Parts: 1192 Deleted data: 0
+Datafilepointer (bytes): 3 Keyfile pointer (bytes): 1
+Max datafile length: 16777215 Max keyfile length: 131071
+Recordlength: 834
+Record format: Compressed
+
+table description:
+Key Start Len Index Type Root Blocksize Rec/key
+1 2 4 unique unsigned long 10240 1024 1
+2 32 30 multip. text 54272 1024 1
+
+Field Start Length Type Huff tree Bits
+1 1 1 constant 1 0
+2 2 4 zerofill(1) 2 9
+3 6 4 no zeros, zerofill(1) 2 9
+4 10 1 3 9
+5 11 20 table-lookup 4 0
+6 31 1 3 9
+7 32 30 no endspace, not_always 5 9
+8 62 35 no endspace, not_always, no empty 6 9
+9 97 35 no empty 7 9
+10 132 35 no endspace, not_always, no empty 6 9
+11 167 4 zerofill(1) 2 9
+12 171 16 no endspace, not_always, no empty 5 9
+13 187 35 no endspace, not_always, no empty 6 9
+14 222 4 zerofill(1) 2 9
+15 226 16 no endspace, not_always, no empty 5 9
+16 242 20 no endspace, not_always 8 9
+17 262 20 no endspace, no empty 8 9
+18 282 20 no endspace, no empty 5 9
+19 302 30 no endspace, no empty 6 9
+20 332 4 always zero 2 9
+21 336 4 always zero 2 9
+22 340 1 3 9
+23 341 8 table-lookup 9 0
+24 349 8 table-lookup 10 0
+25 357 8 always zero 2 9
+26 365 2 2 9
+27 367 2 no zeros, zerofill(1) 2 9
+28 369 4 no zeros, zerofill(1) 2 9
+29 373 4 table-lookup 11 0
+30 377 1 3 9
+31 378 2 no zeros, zerofill(1) 2 9
+32 380 8 no zeros 2 9
+33 388 4 always zero 2 9
+34 392 4 table-lookup 12 0
+35 396 4 no zeros, zerofill(1) 13 9
+36 400 4 no zeros, zerofill(1) 2 9
+37 404 1 2 9
+38 405 4 no zeros 2 9
+39 409 4 always zero 2 9
+40 413 4 no zeros 2 9
+41 417 4 always zero 2 9
+42 421 4 no zeros 2 9
+43 425 4 always zero 2 9
+44 429 20 no empty 3 9
+45 449 30 no empty 3 9
+46 479 1 14 4
+47 480 1 14 4
+48 481 79 no endspace, no empty 15 9
+49 560 79 no empty 2 9
+50 639 79 no empty 2 9
+51 718 79 no endspace 16 9
+52 797 8 no empty 2 9
+53 805 1 17 1
+54 806 1 3 9
+55 807 20 no empty 3 9
+56 827 4 no zeros, zerofill(2) 2 9
+57 831 4 no zeros, zerofill(1) 2 9
+@end example
+
+The information printed by @code{myisampack} is described below:
+
+@table @code
+@item normal
+The number of columns for which no extra packing is used.
+
+@item empty-space
+The number of columns containing
+values that are only spaces; these will occupy 1 bit.
+
+@item empty-zero
+The number of columns containing
+values that are only binary 0's; these will occupy 1 bit.
+
+@item empty-fill
+The number of integer columns that don't occupy the full byte range of their
+type; these are changed to a smaller type (for example, an @code{INTEGER}
+column may be changed to @code{MEDIUMINT}).
+
+@item pre-space
+The number of decimal columns that are stored with leading spaces. In this
+case, each value will contain a count for the number of leading spaces.
+
+@item end-space
+The number of columns that have a lot of trailing spaces. In this case, each
+value will contain a count for the number of trailing spaces.
+
+@item table-lookup
+The column had only a small number of different values, which were
+converted to an @code{ENUM} before Huffman compression.
+
+@item zero
+The number of columns for which all values are zero.
+
+@item Original trees
+The initial number of Huffman trees.
+
+@item After join
+The number of distinct Huffman trees left after joining
+trees to save some header space.
+@end table
+
+After a table has been compressed, @code{myisamchk -dvv} prints additional
+information about each field:
+
+@table @code
+@item Type
+The field type may contain the following descriptors:
+
+@table @code
+@item constant
+All rows have the same value.
+
+@item no endspace
+Don't store endspace.
+
+@item no endspace, not_always
+Don't store endspace and don't do end space compression for all values.
+
+@item no endspace, no empty
+Don't store endspace. Don't store empty values.
+
+@item table-lookup
+The column was converted to an @code{ENUM}.
+
+@item zerofill(n)
+The most significant @code{n} bytes in the value are always 0 and are not
+stored.
+
+@item no zeros
+Don't store zeros.
+
+@item always zero
+0 values are stored in 1 bit.
+@end table
+
+@item Huff tree
+The Huffman tree associated with the field.
+
+@item Bits
+The number of bits used in the Huffman tree.
+@end table
+
+After you have run @code{pack_isam}/@code{myisampack} you must run
+@code{isamchk}/@code{myisamchk} to re-create the index. At this time you
+can also sort the index blocks and create statistics needed for
+the @strong{MySQL} optimizer to work more efficiently:
+
+@example
+myisamchk -rq --analyze --sort-index table_name.MYI
+isamchk -rq --analyze --sort-index table_name.ISM
+@end example
+After you have installed the packed table into the @strong{MySQL} database
+directory you should do @code{mysqladmin flush-tables} to force @code{mysqld}
+to start using the new table.
+
+If you want to unpack a packed table, you can do this with the
+@code{--unpack} option to @code{isamchk} or @code{myisamchk}.
+
+
+@node mysqld-max, , myisampack, Server-Side Scripts
+@subsection mysqld-max, An extended mysqld server
+
+@cindex @code{mysqld-max}
+
+@code{mysqld-max} is the MySQL server (@code{mysqld}) configured with
+the following configure options:
+
+@multitable @columnfractions .3 .7
+@item @strong{Option} @tab @strong{Comment}
+@item --with-server-suffix=-max @tab Add a suffix to the @code{mysqld} version string.
+@item --with-bdb @tab Support for Berkeley DB (BDB) tables
+@item --with-innodb @tab Support for InnoDB tables.
+@item CFLAGS=-DUSE_SYMDIR @tab Symbolic links support for Windows.
+@end multitable
+
+You can find the @strong{MySQL}-max binaries at
+@uref{http://www.mysql.com/downloads/mysql-max-3.23.html}.
+
+The Windows @strong{MySQL} 3.23 binary distribution includes both the
+standard @strong{mysqld.exe} binary and the @code{mysqld-max.exe} binary.
+@uref{http://www.mysql.com/downloads/mysql-3.23.html}.
+@xref{Windows installation}.
+
+Note that as Berkeley DB and InnoDB are not available for all platforms,
+some of the @code{Max} binaries may not have support for both of these.
+You can check which table types are supported by doing the following
+query:
+
+@example
+mysql> show variables like "have_%";
++---------------+-------+
+| Variable_name | Value |
++---------------+-------+
+| have_bdb | YES |
+| have_innodb | NO |
+| have_isam | YES |
+| have_raid | YES |
+| have_ssl | NO |
++---------------+-------+
+@end example
+
+The meaning of the values are:
+
+@multitable @columnfractions .3 .7
+@item @strong{Value} @tab @strong{Meaning}.
+@item YES @tab The option is activated and usable.
+@item NO @tab @strong{MySQL} is not compiled with support for this option.
+@item DISABLED @tab The xxxx option is disabled because one started @code{mysqld} with @code{--skip-xxxx} or because one didn't start @code{mysqld} with all needed options to enable the option. In this case the @code{hostname.err} file should contain a reason for why the option is disabled.
+@end multitable
+
+@strong{NOTE}: To be able to create InnoDB tables you @strong{MUST} edit
+your startup options to include at least the @code{innodb_data_file_path}
+option. @xref{InnoDB start}.
+
+To get better performance for BDB tables, you should add some configuration
+options for these too. @xref{BDB start}.
+
+@code{safe_mysqld} will automatically try to start any @code{mysqld} binary
+with the @code{-max} prefix. This makes it very easy to test out a
+another @code{mysqld} binary in an existing installation. Just
+run @code{configure} with the options you want and then install the
+new @code{mysqld} binary as @code{mysqld-max} in the same directory
+where your old @code{mysqld} binary is. @xref{safe_mysqld, , @code{safe_mysqld}}.
+
+The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld}
+feature. It just installs the @code{mysqld-max} executable and
+@code{safe_mysqld} will automatically use this executable when
+@code{safe_mysqld} is restarted.
+
+The following table shows which table types our standard @strong{MySQL-Max}
+binaries includes:
+
+@multitable @columnfractions .4 .3 .3
+@item @strong{System} @tab @strong{BDB} @tab @strong{InnoDB}
+@item AIX 4.3 @tab N @tab Y
+@item HP-UX 11.0 @tab N @tab Y
+@item Linux-Alpha @tab N @tab Y
+@item Linux-Intel @tab Y @tab Y
+@item Linux-Ia64 @tab N @tab Y
+@item Solaris-intel @tab N @tab Y
+@item Solaris-sparc @tab Y @tab Y
+@item SCO OSR5 @tab Y @tab Y
+@item UnixWare @tab Y @tab Y
+@item Windows/NT @tab Y @tab Y
+@end multitable
+
+
+@node Client-Side Scripts, Log Files, Server-Side Scripts, MySQL Database Administration
+@section MySQL Client-Side Scripts and Utilities
+
+@menu
+* Client-Side Overview::
+* mysql::
+* mysqladmin::
+* mysqldump::
+* mysqlhotcopy::
+* mysqlimport::
+* mysqlshow::
+* perror::
+* Batch Commands::
+@end menu
+
+
+@node Client-Side Overview, mysql, Client-Side Scripts, Client-Side Scripts
+@subsection Overview of the Client-Side Scripts and Utilities
+
+@cindex environment variables
+@cindex programs, list of
+
+All @strong{MySQL} clients that communicate with the server using the
+@code{mysqlclient} library use the following environment variables:
+
+@tindex MYSQL_UNIX_PORT environment variable
+@tindex Environment variable, MYSQL_UNIX_PORT
+@tindex MYSQL_TCP_PORT environment variable
+@tindex Environment variable, MYSQL_TCP_PORT
+@tindex MYSQL_PWD environment variable
+@tindex Environment variable, MYSQL_PWD
+@tindex MYSQL_DEBUG environment variable
+@tindex Environment variable, MYSQL_DEBUG
+@multitable @columnfractions .25 .75
+@item @strong{Name} @tab @strong{Description}
+@item @code{MYSQL_UNIX_PORT} @tab The default socket; used for connections to @code{localhost}
+@item @code{MYSQL_TCP_PORT} @tab The default TCP/IP port
+@item @code{MYSQL_PWD} @tab The default password
+@item @code{MYSQL_DEBUG} @tab Debug-trace options when debugging
+@item @code{TMPDIR} @tab The directory where temporary tables/files are created
+@end multitable
+
+Use of @code{MYSQL_PWD} is insecure.
+@xref{Connecting}.
+
+@tindex MYSQL_HISTFILE environment variable
+@tindex Environment variable, MYSQL_HISTFILE
+@tindex HOME environment variable
+@tindex Environment variable, HOME
+@cindex history file
+@cindex command line history
+@tindex .mysql_history file
+The @file{mysql} client uses the file named in the @code{MYSQL_HISTFILE}
+environment variable to save the command-line history. The default value for
+the history file is @file{$HOME/.mysql_history}, where @code{$HOME} is the
+value of the @code{HOME} environment variable. @xref{Environment variables}.
+
+All @strong{MySQL} programs take many different options. However, every
+@strong{MySQL} program provides a @code{--help} option that you can use
+to get a full description of the program's different options. For example, try
+@code{mysql --help}.
+
+You can override default options for all standard client programs with an
+option file. @ref{Option files}.
+
+The list below briefly describes the @strong{MySQL} programs:
+
+@table @code
+
+@cindex @code{myisamchk}
+@item myisamchk
+Utility to describe, check, optimize, and repair @strong{MySQL} tables.
+Because @code{myisamchk} has many functions, it is described in its own
+chapter. @xref{Maintenance}.
+
+@cindex @code{make_binary_distribution}
+@item make_binary_distribution
+Makes a binary release of a compiled @strong{MySQL}. This could be sent
+by FTP to @file{/pub/mysql/Incoming} on @code{support.mysql.com} for the
+convenience of other @strong{MySQL} users.
+
+@cindex @code{msql2mysql}
+@item msql2mysql
+A shell script that converts @code{mSQL} programs to @strong{MySQL}. It doesn't
+handle all cases, but it gives a good start when converting.
+
+@cindex @code{mysqlaccess}
+@item mysqlaccess
+A script that checks the access privileges for a host, user, and database
+combination.
+
+@cindex @code{mysqladmin}
+@item mysqladmin
+Utility for performing administrative operations, such as creating or
+dropping databases, reloading the grant tables, flushing tables to disk, and
+reopening log files. @code{mysqladmin} can also be used to retrieve version,
+process, and status information from the server.
+@xref{mysqladmin, , @code{mysqladmin}}.
+
+@cindex @code{mysqlbug}
+@item mysqlbug
+The @strong{MySQL} bug report script. This script should always be used when
+filing a bug report to the @strong{MySQL} list.
+
+@cindex @code{mysqld}
+@item mysqld
+The SQL daemon. This should always be running.
+
+@cindex @code{mysqldump}
+@item mysqldump
+Dumps a @strong{MySQL} database into a file as SQL statements or
+as tab-separated text files. Enhanced freeware originally by Igor Romanenko.
+@xref{mysqldump, , @code{mysqldump}}.
+
+@cindex @code{mysqlimport}
+@item mysqlimport
+Imports text files into their respective tables using @code{LOAD DATA
+INFILE}. @xref{mysqlimport, , @code{mysqlimport}}.
+
+@cindex @code{mysqlshow}
+@item mysqlshow
+Displays information about databases, tables, columns, and indexes.
+
+@cindex @code{mysql_install_db}
+@item mysql_install_db
+Creates the @strong{MySQL} grant tables with default privileges. This is
+usually executed only once, when first installing @strong{MySQL}
+on a system.
+
+@cindex @code{replace}
+@item replace
+A utility program that is used by @code{msql2mysql}, but that has more
+general applicability as well. @code{replace} changes strings in place in
+files or on the standard input. Uses a finite state machine to match longer
+strings first. Can be used to swap strings. For example, this command
+swaps @code{a} and @code{b} in the given files:
+
+@example
+shell> replace a b b a -- file1 file2 ...
+@end example
+@end table
+
+
+@node mysql, mysqladmin, Client-Side Overview, Client-Side Scripts
+@subsection The Command-line Tool
+
+@cindex command line tool
+@cindex tools, command line
+@cindex scripts
+@cindex @code{mysql}
+
+@code{mysql} is a simple SQL shell (with GNU @code{readline} capabilities).
+It supports interactive and non-interactive use. When used interactively,
+query results are presented in an ASCII-table format. When used
+non-interactively (for example, as a filter), the result is presented in
+tab-separated format. (The output format can be changed using command-line
+options.) You can run scripts simply like this:
+
+@example
+shell> mysql database < script.sql > output.tab
+@end example
+
+If you have problems due to insufficient memory in the client, use the
+@code{--quick} option! This forces @code{mysql} to use
+@code{mysql_use_result()} rather than @code{mysql_store_result()} to
+retrieve the result set.
+
+Using @code{mysql} is very easy. Just start it as follows:
+@code{mysql database} or @code{mysql --user=user_name --password=your_password database}. Type a SQL statement, end it with @samp{;}, @samp{\g}, or @samp{\G}
+and press RETURN/ENTER.
+
+@cindex command line options
+@cindex options, command line
+@cindex startup parameters
+@code{mysql} supports the following options:
+
+@table @code
+@cindex help option
+@item -?, --help
+Display this help and exit.
+@cindex automatic rehash option
+@item -A, --no-auto-rehash
+No automatic rehashing. One has to use 'rehash' to get table and field
+completion. This gives a quicker start of mysql.
+@cindex batch option
+@item -B, --batch
+Print results with a tab as separator, each row on a new line. Doesn't use
+history file.
+@cindex character sets option
+@item --character-sets-dir=...
+Directory where character sets are located.
+@cindex compress option.
+@item -C, --compress
+Use compression in server/client protocol.
+@cindex debug option
+@item -#, --debug[=...]
+Debug log. Default is 'd:t:o,/tmp/mysql.trace'.
+@cindex database option
+@item -D, --database=...
+Database to use. This is mainly useful in the @code{my.cnf} file.
+@cindex default character set option
+@item --default-character-set=...
+Set the default character set.
+@cindex execute option
+@item -e, --execute=...
+Execute command and quit. (Output like with --batch)
+@cindex vertical option
+@item -E, --vertical
+Print the output of a query (rows) vertically. Without this option you
+can also force this output by ending your statements with @code{\G}.
+@cindex force option
+@item -f, --force
+Continue even if we get a SQL error.
+@cindex no-named-commands option
+@item -g, --no-named-commands
+Named commands are disabled. Use \* form only, or use named commands
+only in the beginning of a line ending with a semicolon (;). Since
+Version 10.9, the client now starts with this option ENABLED by default!
+With the -g option, long format commands will still work from the first
+line, however.
+@cindex enable-named-commands option
+@item -G, --enable-named-commands
+Named commands are @strong{enabled}. Long format commands are allowed as
+well as shortened \* commands.
+@cindex ignore space option.
+@item -i, --ignore-space
+Ignore space after function names.
+@cindex host option
+@item -h, --host=...
+Connect to the given host.
+@cindex html option
+@item -H, --html
+Produce HTML output.
+@cindex skip line numbers option
+@item -L, --skip-line-numbers
+Don't write line number for errors. Useful when one wants to compare result
+files that includes error messages
+@cindex no pager option
+@item --no-pager
+Disable pager and print to stdout. See interactive help (\h) also.
+@cindex no tee option
+@item --no-tee
+Disable outfile. See interactive help (\h) also.
+@cindex unbuffered option.
+@item -n, --unbuffered
+Flush buffer after each query.
+@cindex skip column names option
+@item -N, --skip-column-names
+Don't write column names in results.
+@cindex set variable option
+@item -O, --set-variable var=option
+Give a variable a value. @code{--help} lists variables.
+@cindex one database option
+@item -o, --one-database
+Only update the default database. This is useful for skipping updates to
+other database in the update log.
+@cindex pager option
+@item @code{--pager[=...]}
+Output type. Default is your @code{ENV} variable @code{PAGER}. Valid
+pagers are less, more, cat [> filename], etc. See interactive help (\h)
+also. This option does not work in batch mode. Pager works only in UNIX.
+@cindex password option
+@item -p[password], --password[=...]
+Password to use when connecting to server. If a password is not given on
+the command line, you will be prompted for it. Note that if you use the
+short form @code{-p} you can't have a space between the option and the
+password.
+@item -P --port=...
+TCP/IP port number to use for connection.
+@cindex quick option
+@item -q, --quick
+Don't cache result, print it row-by-row. This may slow down the server
+if the output is suspended. Doesn't use history file.
+@cindex raw option
+@item -r, --raw
+Write column values without escape conversion. Used with @code{--batch}
+@cindex silent option
+@item -s, --silent
+Be more silent.
+@item -S --socket=...
+Socket file to use for connection.
+@cindex table option
+@item -t --table
+Output in table format. This is default in non-batch mode.
+@item -T, --debug-info
+Print some debug information at exit.
+@cindex tee option
+@item --tee=...
+Append everything into outfile. See interactive help (\h) also. Does not
+work in batch mode.
+@cindex user option
+@item -u, --user=#
+User for login if not current user.
+@cindex safe updates option
+@item -U, --safe-updates[=#], --i-am-a-dummy[=#]
+Only allow @code{UPDATE} and @code{DELETE} that uses keys. See below for
+more information about this option. You can reset this option if you have
+it in your @code{my.cnf} file by using @code{--safe-updates=0}.
+@cindex verbose option
+@item -v, --verbose
+More verbose output (-v -v -v gives the table output format).
+@cindex version option
+@item -V, --version
+Output version information and exit.
+@cindex wait option
+@item -w, --wait
+Wait and retry if connection is down instead of aborting.
+@end table
+
+You can also set the following variables with @code{-O} or
+@code{--set-variable}:
+
+@cindex timeout
+@multitable @columnfractions .3 .2 .5
+@item Variablename @tab Default @tab Description
+@item connect_timeout @tab 0 @tab Number of seconds before timeout connection.
+@item max_allowed_packet @tab 16777216 @tab Max packetlength to send/receive from to server
+@item net_buffer_length @tab 16384 @tab Buffer for TCP/IP and socket communication
+@item select_limit @tab 1000 @tab Automatic limit for SELECT when using --i-am-a-dummy
+@item max_join_size @tab 1000000 @tab Automatic limit for rows in a join when using --i-am-a-dummy.
+@end multitable
+
+If you type 'help' on the command line, @code{mysql} will print out the
+commands that it supports:
+
+@cindex commands, list of
+@example
+mysql> help
+
+MySQL commands:
+help (\h) Display this text.
+? (\h) Synonym for `help'.
+clear (\c) Clear command.
+connect (\r) Reconnect to the server. Optional arguments are db and host.
+edit (\e) Edit command with $EDITOR.
+ego (\G) Send command to mysql server, display result vertically.
+exit (\q) Exit mysql. Same as quit.
+go (\g) Send command to mysql server.
+nopager (\n) Disable pager, print to stdout.
+notee (\t) Don't write into outfile.
+pager (\P) Set PAGER [to_pager]. Print the query results via PAGER.
+print (\p) Print current command.
+quit (\q) Quit mysql.
+rehash (\#) Rebuild completion hash.
+source (\.) Execute a SQL script file. Takes a file name as an argument.
+status (\s) Get status information from the server.
+tee (\T) Set outfile [to_outfile]. Append everything into given outfile.
+use (\u) Use another database. Takes database name as argument.
+@end example
+
+From the above, pager only works in UNIX.
+
+@cindex status command
+The @code{status} command gives you some information about the
+connection and the server you are using. If you are running in the
+@code{--safe-updates} mode, @code{status} will also print the values for
+the @code{mysql} variables that affect your queries.
+
+@cindex @code{safe-mode} command
+A useful startup option for beginners (introduced in @strong{MySQL}
+Version 3.23.11) is @code{--safe-updates} (or @code{--i-am-a-dummy} for
+users that has at some time done a @code{DELETE FROM table_name} but
+forgot the @code{WHERE} clause). When using this option, @code{mysql}
+sends the following command to the @strong{MySQL} server when opening
+the connection:
+
+@example
+SET SQL_SAFE_UPDATES=1,SQL_SELECT_LIMIT=#select_limit#,
+ SQL_MAX_JOIN_SIZE=#max_join_size#"
+@end example
+
+where @code{#select_limit#} and @code{#max_join_size#} are variables that
+can be set from the @code{mysql} command line. @xref{SET OPTION, @code{SET}}.
+
+The effect of the above is:
+
+@itemize @bullet
@item
-If you have any other problems with the @strong{MySQL} grant tables and
-feel you must post the problem to the mailing list, always provide a
-dump of the @strong{MySQL} grant tables. You can dump the tables with
-the @code{mysqldump mysql} command. As always, post your problem using
-the @code{mysqlbug} script. @xref{Bug reports}. In some cases you may need
-to restart @code{mysqld} with @code{--skip-grant-tables} to run
-@code{mysqldump}.
+You are not allowed to do an @code{UPDATE} or @code{DELETE} statement
+if you don't have a key constraint in the @code{WHERE} part. One can,
+however, force an @code{UPDATE/DELETE} by using @code{LIMIT}:
+@example
+UPDATE table_name SET not_key_column=# WHERE not_key_column=# LIMIT 1;
+@end example
+@item
+All big results are automatically limited to @code{#select_limit#} rows.
+@item
+@code{SELECT}'s that will probably need to examine more than
+@code{#max_join_size} row combinations will be aborted.
+@end itemize
+
+Some useful hints about the @code{mysql} client:
+
+Some data is much more readable when displayed vertically, instead of
+the usual horizontal box type output. For example longer text, which
+includes new lines, is often much easier to be read with vertical
+output.
+
+@example
+mysql> select * from mails where length(txt) < 300 limit 300,1\G
+*************************** 1. row ***************************
+ msg_nro: 3068
+ date: 2000-03-01 23:29:50
+time_zone: +0200
+mail_from: Monty
+ reply: monty@@no.spam.com
+ mail_to: "Thimble Smith" <tim@@no.spam.com>
+ sbj: UTF-8
+ txt: >>>>> "Thimble" == Thimble Smith writes:
+
+Thimble> Hi. I think this is a good idea. Is anyone familiar with UTF-8
+Thimble> or Unicode? Otherwise I'll put this on my TODO list and see what
+Thimble> happens.
+
+Yes, please do that.
+
+Regards,
+Monty
+ file: inbox-jani-1
+ hash: 190402944
+1 row in set (0.09 sec)
+@end example
+
+@itemize @bullet
+@item
+For logging, you can use the @code{tee} option. The @code{tee} can be
+started with option @code{--tee=...}, or from the command line
+interactively with command @code{tee}. All the data displayed on the
+screen will also be appended into a given file. This can be very useful
+for debugging purposes also. The @code{tee} can be disabled from the
+command line with command @code{notee}. Executing @code{tee} again
+starts logging again. Without a parameter the previous file will be
+used. Note that @code{tee} will flush the results into the file after
+each command, just before the command line appears again waiting for the
+next command.
+
+@item
+Browsing, or searching the results in the interactive mode in UNIX less,
+more, or any other similar program, is now possible with option
+@code{--pager[=...]}. Without argument, @code{mysql} client will look
+for environment variable PAGER and set @code{pager} to that.
+@code{pager} can be started from the interactive command line with
+command @code{pager} and disabled with command @code{nopager}. The
+command takes an argument optionally and the @code{pager} will be set to
+that. Command @code{pager} can be called without an argument, but this
+requires that the option @code{--pager} was used, or the @code{pager}
+will default to stdout. @code{pager} works only in UNIX, since it uses
+the popen() function, which doesn't exist in Windows. In Windows, the
+@code{tee} option can be used instead, although it may not be as handy
+as @code{pager} can be in some situations.
+
+@item
+A few tips about @code{pager}: You can use it to write to a file:
+@example
+mysql> pager cat > /tmp/log.txt
+@end example
+and the results will only go to a file. You can also pass any options
+for the programs that you want to use with the @code{pager}:
+@example
+mysql> pager less -n -i -S
+@end example
+From the above do note the option '-S'. You may find it very useful when
+browsing the results; try the option with horizontal output (end
+commands with '\g', or ';') and with vertical output (end commands with
+'\G'). Sometimes a very wide result set is hard to be read from the screen,
+with option -S to less you can browse the results within the interactive
+less from left to right, preventing lines longer than your screen from
+being continued to the next line. This can make the result set much more
+readable. You can swith the mode between on and off within the interactive
+less with '-S'. See the 'h' for more help about less.
+
+@item
+Last (unless you already understood this from the above examples ;) you
+can combine very complex ways to handle the results, for example the
+following would send the results to two files in two different
+directories, on two different hard-disks mounted on /dr1 and /dr2, yet
+let the results still be seen on the screen via less:
+@example
+mysql> pager cat | tee /dr1/tmp/res.txt | tee /dr2/tmp/res2.txt | less -n -i -S
+@end example
+
+@item
+You can also combine the two functions above; have the @code{tee}
+enabled, @code{pager} set to 'less' and you will be able to browse the
+results in unix 'less' and still have everything appended into a file
+the same time. The difference between @code{UNIX tee} used with the
+@code{pager} and the @code{mysql} client in-built @code{tee}, is that
+the in-built @code{tee} works even if you don't have the @code{UNIX tee}
+available. The in-built @code{tee} also logs everything that is printed
+on the screen, where the @code{UNIX tee} used with @code{pager} doesn't
+log quite that much. Last, but not least, the interactive @code{tee} is
+more handy to switch on and off, when you want to log something into a
+file, but want to be able to turn the feature off sometimes.
+@end itemize
+
+
+@node mysqladmin, mysqldump, mysql, Client-Side Scripts
+@subsection mysqladmin, Administrating a MySQL Server
+
+@cindex administration, server
+@cindex server administration
+@cindex @code{mysladmn}
+
+A utility for performing administrative operations. The syntax is:
+
+@example
+shell> mysqladmin [OPTIONS] command [command-option] command ...
+@end example
+
+You can get a list of the options your version of @code{mysqladmin} supports
+by executing @code{mysqladmin --help}.
+
+The current @code{mysqladmin} supports the following commands:
+
+@table @code
+@item create databasename
+Create a new database.
+
+@item drop databasename
+Delete a database and all its tables.
+
+@item extended-status
+Gives an extended status message from the server.
+
+@item flush-hosts
+Flush all cached hosts.
+
+@item flush-logs
+Flush all logs.
+
+@item flush-tables
+Flush all tables.
+
+@item flush-privileges
+Reload grant tables (same as reload).
+
+@item kill id,id,...
+Kill mysql threads.
+
+@item password
+Set a new password. Change old password to new-password.
+
+@item ping
+Check if mysqld is alive.
+
+@item processlist
+Show list of active threads in server.
+
+@item reload
+Reload grant tables.
+
+@item refresh
+Flush all tables and close and open logfiles.
+
+@item shutdown
+Take server down.
+
+@item slave-start
+Start slave replication thread.
+
+@item slave-stop
+Stop slave replication thread.
+
+@item status
+Gives a short status message from the server.
+
+@item variables
+Prints variables available.
+
+@item version
+Get version info from server.
+@end table
+
+All commands can be shortened to their unique prefix. For example:
+
+@example
+shell> mysqladmin proc stat
++----+-------+-----------+----+-------------+------+-------+------+
+| Id | User | Host | db | Command | Time | State | Info |
++----+-------+-----------+----+-------------+------+-------+------+
+| 6 | monty | localhost | | Processlist | 0 | | |
++----+-------+-----------+----+-------------+------+-------+------+
+Uptime: 10077 Threads: 1 Questions: 9 Slow queries: 0 Opens: 6 Flush tables: 1 Open tables: 2 Memory in use: 1092K Max memory used: 1116K
+@end example
+
+@cindex status command, results
+The @code{mysqladmin status} command result has the following columns:
+
+@cindex uptime
+@multitable @columnfractions .3 .7
+@item Uptime @tab Number of seconds the @strong{MySQL} server has been up.
+@cindex threads
+@item Threads @tab Number of active threads (clients).
+@cindex questions
+@item Questions @tab Number of questions from clients since @code{mysqld} was started.
+@cindex slow queries
+@item Slow queries @tab Queries that have taken more than @code{long_query_time} seconds. @xref{Slow query log}.
+@cindex opens
+@item Opens @tab How many tables @code{mysqld} has opened.
+@cindex flush tables
+@cindex tables, flush
+@item Flush tables @tab Number of @code{flush ...}, @code{refresh}, and @code{reload} commands.
+@cindex open tables
+@item Open tables @tab Number of tables that are open now.
+@cindex memory use
+@item Memory in use @tab Memory allocated directly by the @code{mysqld} code (only available when @strong{MySQL} is compiled with --with-debug=full).
+@cindex max memory used
+@item Max memory used @tab Maximum memory allocated directly by the @code{mysqld} code (only available when @strong{MySQL} is compiled with --with-debug=full).
+@end multitable
+
+If you do @code{myslqadmin shutdown} on a socket (in other words, on a
+the computer where @code{mysqld} is running), @code{mysqladmin} will
+wait until the @strong{MySQL} @code{pid-file} is removed to ensure that
+the @code{mysqld} server has stopped properly.
+
+
+@node mysqldump, mysqlhotcopy, mysqladmin, Client-Side Scripts
+@subsection mysqldump, Dumping Table Structure and Data
+
+@cindex dumping, databases
+@cindex databases, dumping
+@cindex tables, dumping
+@cindex backing up, databases
+
+@cindex @code{mysqldump}
+Utility to dump a database or a collection of database for backup or for
+transferring the data to another SQL server (not necessarily a @strong{MySQL}
+server). The dump will contain SQL statements to create the table
+and/or populate the table.
+
+If you are doing a backup on the server, you should consider using
+the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, , @code{mysqlhotcopy}}.
+
+@example
+shell> mysqldump [OPTIONS] database [tables]
+OR mysqldump [OPTIONS] --databases [OPTIONS] DB1 [DB2 DB3...]
+OR mysqldump [OPTIONS] --all-databases [OPTIONS]
+@end example
+
+If you don't give any tables or use the @code{--databases} or
+@code{--all-databases}, the whole database(s) will be dumped.
+
+You can get a list of the options your version of @code{mysqldump} supports
+by executing @code{mysqldump --help}.
+
+Note that if you run @code{mysqldump} without @code{--quick} or
+@code{--opt}, @code{mysqldump} will load the whole result set into
+memory before dumping the result. This will probably be a problem if
+you are dumping a big database.
+
+Note that if you are using a new copy of the @code{mysqldump} program
+and you are going to do a dump that will be read into a very old @strong{MySQL}
+server, you should not use the @code{--opt} or @code{-e} options.
+
+@code{mysqldump} supports the following options:
+
+@table @code
+@item --add-locks
+Add @code{LOCK TABLES} before and @code{UNLOCK TABLE} after each table dump.
+(To get faster inserts into @strong{MySQL}.)
+@item --add-drop-table
+Add a @code{drop table} before each create statement.
+@item -A, --all-databases
+Dump all the databases. This will be same as @code{--databases} with all
+databases selected.
+@item -a, --all
+Include all @strong{MySQL}-specific create options.
+@item --allow-keywords
+Allow creation of column names that are keywords. This works by
+prefixing each column name with the table name.
+@item -c, --complete-insert
+Use complete insert statements (with column names).
+@item -C, --compress
+Compress all information between the client and the server if both support
+compression.
+@item -B, --databases
+To dump several databases. Note the difference in usage. In this case
+no tables are given. All name arguments are regarded as database names.
+@code{USE db_name;} will be included in the output before each new database.
+@item --delayed
+Insert rows with the @code{INSERT DELAYED} command.
+@item -e, --extended-insert
+Use the new multiline @code{INSERT} syntax. (Gives more compact and
+faster inserts statements.)
+@item -#, --debug[=option_string]
+Trace usage of the program (for debugging).
+@item --help
+Display a help message and exit.
+@item --fields-terminated-by=...
+@itemx --fields-enclosed-by=...
+@itemx --fields-optionally-enclosed-by=...
+@itemx --fields-escaped-by=...
+@itemx --lines-terminated-by=...
+These options are used with the @code{-T} option and have the same
+meaning as the corresponding clauses for @code{LOAD DATA INFILE}.
+@xref{LOAD DATA, , @code{LOAD DATA}}.
+@item -F, --flush-logs
+Flush log file in the @strong{MySQL} server before starting the dump.
+@item -f, --force,
+Continue even if we get a SQL error during a table dump.
+@item -h, --host=..
+Dump data from the @strong{MySQL} server on the named host. The default host
+is @code{localhost}.
+@item -l, --lock-tables.
+Lock all tables before starting the dump. The tables are locked with
+@code{READ LOCAL} to allow concurrent inserts in the case of @code{MyISAM}
+tables.
+@item -n, --no-create-db
+'CREATE DATABASE /*!32312 IF NOT EXISTS*/ db_name;' will not be put in the
+output. The above line will be added otherwise, if --databases or
+--all-databases option was given.
+@item -t, --no-create-info
+Don't write table creation information (The @code{CREATE TABLE} statement.)
+@item -d, --no-data
+Don't write any row information for the table. This is very useful if you
+just want to get a dump of the structure for a table!
+@item --opt
+Same as @code{--quick --add-drop-table --add-locks --extended-insert
+--lock-tables}. Should give you the fastest possible dump for reading
+into a @strong{MySQL} server.
+@item -pyour_pass, --password[=your_pass]
+The password to use when connecting to the server. If you specify
+no @samp{=your_pass} part,
+@code{mysqldump} you will be prompted for a password.
+@item -P port_num, --port=port_num
+The TCP/IP port number to use for connecting to a host. (This is used for
+connections to hosts other than @code{localhost}, for which Unix sockets are
+used.)
+@item -q, --quick
+Don't buffer query, dump directly to stdout. Uses @code{mysql_use_result()}
+to do this.
+@item -r, --result-file=...
+Direct output to a given file. This option should be used in MSDOS,
+because it prevents new line '\n' from being converted to '\n\r' (new
+line + carriage return).
+@item -S /path/to/socket, --socket=/path/to/socket
+The socket file to use when connecting to @code{localhost} (which is the
+default host).
+@item --tables
+Overrides option --databases (-B).
+@item -T, --tab=path-to-some-directory
+Creates a @code{table_name.sql} file, that contains the SQL CREATE commands,
+and a @code{table_name.txt} file, that contains the data, for each give table.
+@strong{NOTE}: This only works if @code{mysqldump} is run on the same
+machine as the @code{mysqld} daemon. The format of the @code{.txt} file
+is made according to the @code{--fields-xxx} and @code{--lines--xxx} options.
+@item -u user_name, --user=user_name
+The @strong{MySQL} user name to use when connecting to the server. The
+default value is your Unix login name.
+@item -O var=option, --set-variable var=option
+Set the value of a variable. The possible variables are listed below.
+@item -v, --verbose
+Verbose mode. Print out more information on what the program does.
+@item -V, --version
+Print version information and exit.
+@item -w, --where='where-condition'
+Dump only selected records. Note that QUOTES are mandatory:
+
+@example
+"--where=user='jimf'" "-wuserid>1" "-wuserid<1"
+@end example
+@item -O net_buffer_length=#, where # < 16M
+When creating multi-row-insert statements (as with option
+@code{--extended-insert} or @code{--opt}), @code{mysqldump} will create
+rows up to @code{net_buffer_length} length. If you increase this
+variable, you should also ensure that the @code{max_allowed_packet}
+variable in the @strong{MySQL} server is bigger than the
+@code{net_buffer_length}.
+@end table
+
+The most normal use of @code{mysqldump} is probably for making a backup of
+whole databases. @xref{Backup}.
+
+@example
+mysqldump --opt database > backup-file.sql
+@end example
+
+You can read this back into @strong{MySQL} with:
+
+@example
+mysql database < backup-file.sql
+@end example
+
+or
+
+@example
+mysql -e "source /patch-to-backup/backup-file.sql" database
+@end example
+
+However, it's also very useful to populate another @strong{MySQL} server with
+information from a database:
+
+@example
+mysqldump --opt database | mysql ---host=remote-host -C database
+@end example
+
+It is possible to dump several databases with one command:
+
+@example
+mysqldump --databases database1 [database2 database3...] > my_databases.sql
+@end example
+
+If all the databases are wanted, one can use:
+
+@example
+mysqldump --all-databases > all_databases.sql
+@end example
+
+
+@node mysqlhotcopy, mysqlimport, mysqldump, Client-Side Scripts
+@subsection mysqlhotcopy, Copying MySQL Databases and Tables
+
+@cindex dumping, databases
+@cindex databases, dumping
+@cindex tables, dumping
+@cindex backing up, databases
+
+@code{mysqlhotcopy} is a perl script that uses @code{LOCK TABLES},
+@code{FLUSH TABLES} and @code{cp} or @code{scp} to quickly make a backup
+of a database. It's the fastest way to make a backup of the database,
+of single tables but it can only be run on the same machine where the
+database directories are.
+
+@example
+mysqlhotcopy db_name [/path/to/new_directory]
+
+mysqlhotcopy db_name_1 ... db_name_n /path/to/new_directory
+
+mysqlhotcopy db_name./regex/
+@end example
+
+@code{mysqlhotcopy} supports the following options:
+
+@table @code
+@item -?, --help
+Display a help screen and exit
+@item -u, --user=#
+User for database login
+@item -p, --password=#
+Password to use when connecting to server
+@item -P, --port=#
+Port to use when connecting to local server
+@item -S, --socket=#
+Socket to use when connecting to local server
+@item --allowold
+Don't abort if target already exists (rename it _old)
+@item --keepold
+Don't delete previous (now renamed) target when done
+@item --noindices
+Don't include full index files in copy to make the backup smaller and faster
+The indexes can later be reconstructed with @code{myisamchk -rq.}.
+@item --method=#
+Method for copy (@code{cp} or @code{scp}).
+@item -q, --quiet
+Be silent except for errors
+@item --debug
+Enable debug
+@item -n, --dryrun
+Report actions without doing them
+@item --regexp=#
+Copy all databases with names matching regexp
+@item --suffix=#
+Suffix for names of copied databases
+@item --checkpoint=#
+Insert checkpoint entry into specified db.table
+@item --flushlog
+Flush logs once all tables are locked.
+@item --tmpdir=#
+Temporary directory (instead of /tmp).
+@end table
+
+You can use @code{perldoc mysqlhotcopy} to get a more complete
+documentation for @code{mysqlhotcopy}.
+
+@code{mysqlhotcopy} reads the groups @code{[client]} and @code{[mysqlhotcopy]}
+from the option files.
+
+To be able to execute @code{mysqlhotcopy} you need write access to the
+backup directory, @code{SELECT} privilege to the tables you are about to
+copy and the @strong{MySQL} @code{Reload} privilege (to be able to
+execute @code{FLUSH TABLES}).
+
+
+@node mysqlimport, mysqlshow, mysqlhotcopy, Client-Side Scripts
+@subsection mysqlimport, Importing Data from Text Files
+
+@cindex importing, data
+@cindex data, importing
+@cindex files, text
+@cindex text files, importing
+@cindex @code{mysqlimport}
+
+@code{mysqlimport} provides a command-line interface to the @code{LOAD DATA
+INFILE} SQL statement. Most options to @code{mysqlimport} correspond
+directly to the same options to @code{LOAD DATA INFILE}.
+@xref{LOAD DATA, , @code{LOAD DATA}}.
+
+@code{mysqlimport} is invoked like this:
+
+@example
+shell> mysqlimport [options] database textfile1 [textfile2....]
+@end example
+
+For each text file named on the command line,
+@code{mysqlimport} strips any extension from the filename and uses the result
+to determine which table to import the file's contents into. For example,
+files named @file{patient.txt}, @file{patient.text}, and @file{patient} would
+all be imported into a table named @code{patient}.
+
+@code{mysqlimport} supports the following options:
+
+@table @code
+@item -c, --columns=...
+This option takes a comma-separated list of field names as an argument.
+The field list is used to create a proper @code{LOAD DATA INFILE} command,
+which is then passed to @strong{MySQL}. @xref{LOAD DATA, , @code{LOAD DATA}}.
+
+@item -C, --compress
+Compress all information between the client and the server if both support
+compression.
+
+@item -#, --debug[=option_string]
+Trace usage of the program (for debugging).
+
+@item -d, --delete
+Empty the table before importing the text file.
+
+@item --fields-terminated-by=...
+@itemx --fields-enclosed-by=...
+@itemx --fields-optionally-enclosed-by=...
+@itemx --fields-escaped-by=...
+@itemx --lines-terminated-by=...
+These options have the same meaning as the corresponding clauses for
+@code{LOAD DATA INFILE}. @xref{LOAD DATA, , @code{LOAD DATA}}.
+
+@item -f, --force
+Ignore errors. For example, if a table for a text file doesn't exist,
+continue processing any remaining files. Without @code{--force},
+@code{mysqlimport} exits if a table doesn't exist.
+
+@item --help
+Display a help message and exit.
+
+@item -h host_name, --host=host_name
+Import data to the @strong{MySQL} server on the named host. The default host
+is @code{localhost}.
+
+@item -i, --ignore
+See the description for the @code{--replace} option.
+
+@item -l, --lock-tables
+Lock @strong{ALL} tables for writing before processing any text files. This
+ensures that all tables are synchronized on the server.
+
+@item -L, --local
+Read input files from the client. By default, text files are assumed to be on
+the server if you connect to @code{localhost} (which is the default host).
+
+@item -pyour_pass, --password[=your_pass]
+The password to use when connecting to the server. If you specify
+no @samp{=your_pass} part,
+@code{mysqlimport} you will be prompted for a password.
+
+@item -P port_num, --port=port_num
+The TCP/IP port number to use for connecting to a host. (This is used for
+connections to hosts other than @code{localhost}, for which Unix sockets are
+used.)
+
+@item -r, --replace
+The @code{--replace} and @code{--ignore} options control handling of input
+records that duplicate existing records on unique key values. If you specify
+@code{--replace}, new rows replace existing rows that have the same unique key
+value. If you specify @code{--ignore}, input rows that duplicate an existing
+row on a unique key value are skipped. If you don't specify either option, an
+error occurs when a duplicate key value is found, and the rest of the text
+file is ignored.
+
+@item -s, --silent
+Silent mode. Write output only when errors occur.
+
+@item -S /path/to/socket, --socket=/path/to/socket
+The socket file to use when connecting to @code{localhost} (which is the
+default host).
+
+@item -u user_name, --user=user_name
+The @strong{MySQL} user name to use when connecting to the server. The
+default value is your Unix login name.
+
+@item -v, --verbose
+Verbose mode. Print out more information what the program does.
+
+@item -V, --version
+Print version information and exit.
+@end table
+
+Here is a sample run using @code{mysqlimport}:
+
+@example
+$ mysql --version
+mysql Ver 9.33 Distrib 3.22.25, for pc-linux-gnu (i686)
+$ uname -a
+Linux xxx.com 2.2.5-15 #1 Mon Apr 19 22:21:09 EDT 1999 i586 unknown
+$ mysql -e 'CREATE TABLE imptest(id INT, n VARCHAR(30))' test
+$ ed
+a
+100 Max Sydow
+101 Count Dracula
+.
+w imptest.txt
+32
+q
+$ od -c imptest.txt
+0000000 1 0 0 \t M a x S y d o w \n 1 0
+0000020 1 \t C o u n t D r a c u l a \n
+0000040
+$ mysqlimport --local test imptest.txt
+test.imptest: Records: 2 Deleted: 0 Skipped: 0 Warnings: 0
+$ mysql -e 'SELECT * FROM imptest' test
++------+---------------+
+| id | n |
++------+---------------+
+| 100 | Max Sydow |
+| 101 | Count Dracula |
++------+---------------+
+@end example
+
+
+@node mysqlshow, perror, mysqlimport, Client-Side Scripts
+@subsection Showing Databases, Tables, and Columns
+
+@cindex databases, displaying
+@cindex displaying, database information
+@cindex tables, displaying
+@cindex columns, displaying
+@cindex showing, database information
+
+@code{mysqlshow} can be used to quickly look at which databases exist,
+their tables, and the table's columns.
+
+With the @code{mysql} program you can get the same information with the
+@code{SHOW} commands. @xref{SHOW}.
+
+@code{mysqlshow} is invoked like this:
+
+@example
+shell> mysqlshow [OPTIONS] [database [table [column]]]
+@end example
+
+@itemize @bullet
+@item
+If no database is given, all matching databases are shown.
+@item
+If no table is given, all matching tables in the database are shown.
+@item
+If no column is given, all matching columns and column types in the table
+are shown.
@end itemize
-@node Reference, Table types, Privilege system, Top
+Note that in newer @strong{MySQL} versions, you only see those
+database/tables/columns for which you have some privileges.
+
+If the last argument contains a shell or SQL wild-card (@code{*}, @code{?},
+@code{%} or @code{_}) then only what's matched by the wild card is shown.
+This may cause some confusion when you try to display the columns for a
+table with a @code{_} as in this case @code{mysqlshow} only shows you
+the table names that match the pattern. This is easily fixed by
+adding an extra @code{%} last on the command line (as a separate
+argument).
+
+
+@node perror, Batch Commands, mysqlshow, Client-Side Scripts
+@subsection perror, Explaining Error Codes
+
+@cindex error messages, displaying
+@cindex perror
+
+@code{perror} can be used to print error message(s). @code{perror} can
+be invoked like this:
+
+@example
+shell> perror [OPTIONS] [ERRORCODE [ERRORCODE...]]
+
+For example:
+
+shell> perror 64 79
+Error code 64: Machine is not on the network
+Error code 79: Can not access a needed shared library
+@end example
+
+@code{perror} can be used to display a description for a system error
+code, or an MyISAM/ISAM table handler error code. The error messages
+are mostly system dependent.
+
+
+@node Batch Commands, , perror, Client-Side Scripts
+@subsection How to Run SQL Commands from a Text File
+
+@c FIX add the 'source' command
+
+The @code{mysql} client typically is used interactively, like this:
+
+@example
+shell> mysql database
+@end example
+
+However, it's also possible to put your SQL commands in a file and tell
+@code{mysql} to read its input from that file. To do so, create a text
+file @file{text_file} that contains the commands you wish to execute.
+Then invoke @code{mysql} as shown below:
+
+@example
+shell> mysql database < text_file
+@end example
+
+You can also start your text file with a @code{USE db_name} statement. In
+this case, it is unnecessary to specify the database name on the command
+line:
+
+@example
+shell> mysql < text_file
+@end example
+
+@xref{Client-Side Scripts}.
+
+
+@node Log Files, Replication, Client-Side Scripts, MySQL Database Administration
+@section The MySQL Log Files
+
+@cindex Log files
+
+@strong{MySQL} has several different log files that can help you find
+out what's going on inside @code{mysqld}:
+
+@multitable @columnfractions .3 .7
+@item The error log @tab Problems encountering starting, running or stopping @code{mysqld}.
+@item The isam log @tab Logs all changes to the ISAM tables. Used only for debugging the isam code.
+@item The query log @tab Established connections and executed queries.
+@item The update log @tab Deprecated: Stores all statements that changes data
+@item The binary log @tab Stores all statements that changes something. Used also for replication
+@item The slow log @tab Stores all queries that took more than @code{long_query_time} to execute or didn't use indexes.
+@end multitable
+
+All logs can be found in the @code{mysqld} data directory. You can
+force @code{mysqld} to reopen the log files (or in some cases
+switch to a new log) by executing @code{FLUSH LOGS}. @xref{FLUSH}.
+
+@menu
+* Error log::
+* Query log::
+* Update log::
+* Binary log::
+* Slow query log::
+* Log file maintenance::
+@end menu
+
+
+@node Error log, Query log, Log Files, Log Files
+@subsection The Error Log
+
+@code{mysqld} writes all errors to the stderr, which the
+@code{safe_mysqld} script redirects to a file called
+@code{'hostname'.err}. (On Windows, @code{mysqld} writes this directly
+to @file{\mysql\data\mysql.err}).
+
+This contains information indicating when @code{mysqld} was started and
+stopped and also any critical errors found when running. If @code{mysqld}
+dies unexpectedly and @code{safe_mysqld} needs to restart @code{mysqld},
+@code{safe_mysqld} will write a @code{restarted mysqld} row in this
+file. This log also holds a warning if @code{mysqld} notices a table
+that needs to be automatically checked or repaired.
+
+On some operating systems, the error log will contain a stack trace
+for where @code{mysqld} died. This can be used to find out where
+@code{mysqld} died. @xref{Using stack trace}.
+
+
+@node Query log, Update log, Error log, Log Files
+@subsection The General Query Log
+
+@cindex query log
+@cindex files, query log
+
+If you want to know what happens within @code{mysqld}, you should start
+it with @code{--log[=file]}. This will log all connections and queries
+to the log file (by default named @file{'hostname'.log}). This log can
+be very useful when you suspect an error in a client and want to know
+exactly what @code{mysqld} thought the client sent to it.
+
+By default, the @code{mysql.server} script starts the @strong{MySQL}
+server with the @code{-l} option. If you need better performance when
+you start using @strong{MySQL} in a production environment, you can
+remove the @code{-l} option from @code{mysql.server} or change it to
+@code{--log-binary}.
+
+The entries in this log are written as @code{mysqld} receives the questions.
+This may be different than the order in which the statements are executed.
+This is in contrast to the update log and the binary log which are written
+after the query is executed, but before any locks are released.
+
+
+@node Update log, Binary log, Query log, Log Files
+@subsection The Update Log
+
+@cindex update log
+@cindex files, update log
+
+@strong{NOTE}: The update log is replaced by the binary
+log. @xref{Binary log}. With this you can do anything that you can do
+with the update log.
+
+When started with the @code{--log-update[=file_name]} option,
+@code{mysqld} writes a log file containing all SQL commands that update
+data. If no filename is given, it defaults to the name of the host
+machine. If a filename is given, but it doesn't contain a path, the file
+is written in the data directory. If @file{file_name} doesn't have an
+extension, @code{mysqld} will create log file names like so:
+@file{file_name.###}, where @code{###} is a number that is incremented each
+time you execute @code{mysqladmin refresh}, execute @code{mysqladmin
+flush-logs}, execute the @code{FLUSH LOGS} statement, or restart the server.
+
+@strong{NOTE:} For the above scheme to work, you should NOT create
+your own files with the same filename as the update log + some extensions
+that may be regarded as a number, in the directory used by the update log!
+
+If you use the @code{--log} or @code{-l} options, @code{mysqld} writes a
+general log with a filename of @file{hostname.log}, and restarts and
+refreshes do not cause a new log file to be generated (although it is closed
+and reopened). In this case you can copy it (on Unix) by doing:
+
+@example
+mv hostname.log hostname-old.log
+mysqladmin flush-logs
+cp hostname-old.log to-backup-directory
+rm hostname-old.log
+@end example
+
+Update logging is smart because it logs only statements that really update
+data. So an @code{UPDATE} or a @code{DELETE} with a @code{WHERE} that finds no
+rows is not written to the log. It even skips @code{UPDATE} statements that
+set a column to the value it already has.
+
+The update logging is done immediately after a query completes but before
+any locks are released or any commit is done. This ensures that the log
+will be logged in the execution order.
+
+If you want to update a database from update log files, you could do the
+following (assuming your update logs have names of the form
+@file{file_name.###}):
+
+@example
+shell> ls -1 -t -r file_name.[0-9]* | xargs cat | mysql
+@end example
+
+@code{ls} is used to get all the log files in the right order.
+
+This can be useful if you have to revert to backup files after a crash
+and you want to redo the updates that occurred between the time of the backup
+and the crash.
+
+
+@node Binary log, Slow query log, Update log, Log Files
+@subsection The Binary Update Log
+
+@cindex binary log
+@cindex files, binary log
+
+In the future the binary log will replace the update log, so we
+recommend you to switch to this log format as soon as possible!
+
+The binary log contains all information that is available in the update
+log in a more efficient format. It also contains information about how long
+every query that updated the database took.
+
+The binary log is also used when you are replicating a slave from a master.
+@xref{Replication}.
+
+When started with the @code{--log-bin[=file_name]} option, @code{mysqld}
+writes a log file containing all SQL commands that update data. If no
+file name is given, it defaults to the name of the host machine followed
+by @code{-bin}. If file name is given, but it doesn't contain a path, the
+file is written in the data directory.
+
+You can use the following options to @code{mysqld} to affect what is logged
+to the binary log:
+
+@multitable @columnfractions .4 .6
+@item @code{binlog-do-db=database_name} @tab
+Tells the master it should log updates for the specified database, and
+exclude all others not explicitly mentioned.
+(Example: @code{binlog-do-db=some_database})
+
+@item @code{binlog-ignore-db=database_name} @tab
+Tells the master that updates to the given database should not be logged
+to the binary log (Example: @code{binlog-ignore-db=some_database})
+@end multitable
+
+To the binary log filename @code{mysqld} will append an extension that is a
+number that is incremented each time you execute @code{mysqladmin
+refresh}, execute @code{mysqladmin flush-logs}, execute the @code{FLUSH LOGS}
+statement or restart the server.
+
+To be able to know which different binary log files have been used,
+@code{mysqld} will also create a binary log index file that
+contains the name of all used binary log files. By default this has the
+same name as the binary log file, with the extension @code{'.index'}.
+You can change the name of the binary log index file with the
+@code{--log-bin-index=[filename]} option.
+
+If you are using replication, you should not delete old binary log
+files until you are sure that no slave will ever need to use them.
+One way to do this is to do @code{mysqladmin flush-logs} once a day and then
+remove any logs that are more than 3 days old.
+
+You can examine the binary log file with the @code{mysqlbinlog} command.
+For example, you can update a @strong{MySQL} server from the binary log
+as follows:
+
+@example
+mysqlbinlog log-file | mysql -h server_name
+@end example
+
+You can also use the @code{mysqlbinlog} program to read the binary log
+directly from a remote @strong{MySQL} server!
+
+@code{mysqlbinlog --help} will give you more information of how to use
+this program!
+
+If you are using @code{BEGIN [WORK]} or @code{SET AUTOCOMMIT=0}, you must
+use the @strong{MySQL} binary log for backups instead of the old update log.
+
+The binary logging is done immediately after a query completes but before
+any locks are released or any commit is done. This ensures that the log
+will be logged in the execution order.
+
+All updates (@code{UPDATE}, @code{DELETE} or @code{INSERT}) that change
+a transactional table (like BDB tables) are cached until a @code{COMMIT}.
+Any updates to a non-transactional table are stored in the binary log at
+once. Every thread will, on start, allocate a buffer of
+@code{binlog_cache_size} to buffer queries. If a query is bigger than
+this, the thread will open a temporary file to handle the bigger cache.
+The temporary file will be deleted when the thread ends.
+
+The @code{max_binlog_cache_size} can be used to restrict the total size used
+to cache a multi-transaction query.
+
+If you are using the update or binary log, concurrent inserts will
+not work together with @code{CREATE ... INSERT} and @code{INSERT ... SELECT}.
+This is to ensure that you can recreate an exact copy of your tables by
+applying the log on a backup.
+
+
+@node Slow query log, Log file maintenance, Binary log, Log Files
+@subsection The Slow Query Log
+
+@cindex slow query log
+@cindex files, slow query log
+
+When started with the @code{--log-slow-queries[=file_name]} option,
+@code{mysqld} writes a log file containing all SQL commands that took
+more than @code{long_query_time} to execute. The time to get the initial
+table locks are not counted as execution time.
+
+The slow query log is logged after the query is executed and after all
+locks has been released. This may be different than the order in which
+the statements are executed.
+
+If no file name is given, it defaults to the name of the host machine
+suffixed with @code{-slow.log}. If a filename is given, but doesn't
+contain a path, the file is written in the data directory.
+
+The slow query log can be used to find queries that take a long time to
+execute and are thus candidates for optimization. With a large log, that
+can become a difficult task. You can pipe the slow query log through the
+@code{mysqldumpslow} command to get a summary of the queries which
+appear in the log.
+
+You are using @code{--log-long-format} then also queries that are not
+using indexes are printed. @xref{Command-line options}.
+
+
+@node Log file maintenance, , Slow query log, Log Files
+@subsection Log File Maintenance
+
+@cindex files, log
+@cindex maintaining, log files
+@cindex log files, maintaining
+
+@strong{MySQL} has a lot of log files which make it easy to see what is
+going. @xref{Log Files}. One must however from time to time clean up
+after @code{MysQL} to ensure that the logs don't take up too much disk
+space.
+
+When using @strong{MySQL} with log files, you will, from time to time,
+want to remove/backup old log files and tell @strong{MySQL} to start
+logging on new files. @xref{Backup}.
+
+On a Linux (@code{Redhat}) installation, you can use the
+@code{mysql-log-rotate} script for this. If you installed @strong{MySQL}
+from an RPM distribution, the script should have been installed
+automatically. Note that you should be careful with this if you are using
+the log for replication!
+
+On other systems you must install a short script yourself that you
+start from @code{cron} to handle log files.
+
+You can force @strong{MySQL} to start using new log files by using
+@code{mysqladmin flush-logs} or by using the SQL command @code{FLUSH LOGS}.
+If you are using @strong{MySQL} Version 3.21 you must use @code{mysqladmin refresh}.
+
+The above command does the following:
+
+@itemize @bullet
+@item
+If standard logging (@code{--log}) or slow query logging
+(@code{--log-slow-queries}) is used, closes and reopens the log file.
+(@file{mysql.log} and @file{`hostname`-slow.log} as default).
+@item
+If update logging (@code{--log-update}) is used, closes the update log and
+opens a new log file with a higher sequence number.
+@end itemize
+
+If you are using only an update log, you only have to flush the logs and then
+move away the old update log files to a backup.
+If you are using the normal logging, you can do something like:
+
+@example
+shell> cd mysql-data-directory
+shell> mv mysql.log mysql.old
+shell> mysqladmin flush-logs
+@end example
+
+and then take a backup and remove @file{mysql.old}.
+
+
+@node Replication, , Log Files, MySQL Database Administration
+@section Replication in MySQL
+
+@cindex replication
+@cindex increasing, speed
+@cindex speed, increasing
+@cindex databases, replicating
+
+@menu
+* Replication Intro::
+* Replication Implementation::
+* Replication HOWTO::
+* Replication Features::
+* Replication Options::
+* Replication SQL::
+* Replication FAQ::
+* Replication Problems::
+@end menu
+
+This chapter describes the various replication features in @strong{MySQL}.
+It serves as a reference to the options available with replication.
+You will be introduced to replication and learn how to implement it.
+Towards the end, there are some frequently asked questions and descriptions
+of problems and how to solve them.
+
+
+@node Replication Intro, Replication Implementation, Replication, Replication
+@subsection Introduction
+
+One way replication can be used is to increase both robustness and
+speed. For robustness you can have two systems and can switch to the backup if
+you have problems with the master. The extra speed is achieved by
+sending a part of the non-updating queries to the replica server. Of
+course this only works if non-updating queries dominate, but that is the
+normal case.
+
+Starting in Version 3.23.15, @strong{MySQL} supports one-way replication
+internally. One server acts as the master, while the other acts as the
+slave. Note that one server could play the roles of master in one pair
+and slave in the other. The master server keeps a binary log of updates
+(@xref{Binary log}.) and an index file to binary logs to keep track of
+log rotation. The slave, upon connecting, informs the master where it
+left off since the last successfully propagated update, catches up on
+the updates, and then blocks and waits for the master to notify it of
+the new updates.
+
+Note that if you are replicating a database, all updates to this
+database should be done through the master!
+
+On older servers one can use the update log to do simple replication.
+@xref{Log Replication}.
+
+Another benefit of using replication is that one can get live backups of
+the system by doing a backup on a slave instead of doing it on the
+master. @xref{Backup}.
+
+
+@node Replication Implementation, Replication HOWTO, Replication Intro, Replication
+@subsection Replication Implementation Overview
+
+@cindex master-slave setup
+
+@strong{MySQL} replication is based on the server keeping track of all
+changes to your database (updates, deletes, etc) in the binary
+log. (@xref{Binary log}.) and the slave server(s) reading the saved
+queries from the master server's binary log so that the slave can
+execute the same queries on its copy of the data.
+
+It is @strong{very important} to realize that the binary log is simply a
+record starting from a fixed point in time (the moment you enable binary
+logging). Any slaves which you set up will need copies of all the data
+from your master as it existed the moment that you enabled binary
+logging on the master. If you start your slaves with data that doesn't
+agree with what was on the master @strong{when the binary log was
+started}, your slaves may fail.
+
+A future version (4.0) of @strong{MySQL} will remove the need to keep a
+(possibly large) snapshot of data for new slaves that you might wish to
+set up through the live backup functionality with no locking required.
+However, at this time, it is necessary to block all writes either with a
+global read lock or by shutting down the master while taking a snapshot.
+
+Once a slave is properly configured and running, it will simply connect
+to the master and wait for updates to process. If the master goes away
+or the slave loses connectivity with your master, it will keep trying to
+connect every @code{master-connect-retry} seconds until it is able to
+reconnect and resume listening for updates.
+
+Each slave keeps track of where it left off. The master server has no
+knowledge of how many slaves there are or which ones are up-to-date at
+any given time.
+
+The next section explains the master/slave setup process in more detail.
+
+
+@node Replication HOWTO, Replication Features, Replication Implementation, Replication
+@subsection How To Set Up Replication
+
+Below is a quick description of how to set up complete replication on
+your current @strong{MySQL} server. It assumes you want to replicate all
+your databases and have not configured replication before. You will need
+to shutdown your master server briefly to complete the steps outlined
+below.
+
+@enumerate
+@item
+Make sure you have a recent version of @strong{MySQL} installed on the master
+and slave(s).
+
+Use Version 3.23.29 or higher. Previous releases used a different binary
+log format and had bugs which have been fixed in newer releases. Please,
+do not report bugs until you have verified that the problem is present
+in the latest release.
+
+@item
+Set up special a replication user on the master with the @code{FILE}
+privilege and permission to connect from all the slaves. If the user is
+only doing replication (which is recommended), you don't need to grant any
+additional privileges.
+
+For example, to create a user named @code{repl} which can access your
+master from any host, you might use this command:
+
+@example
+GRANT FILE ON *.* TO repl@@"%" IDENTIFIED BY '<password>';
+@end example
+
+@item
+Shut down @strong{MySQL} on the master.
+
+@example
+mysqladmin -u root -p<password> shutdown
+@end example
+
+@item
+Snapshot all the data on your master server.
+
+The easiest way to do this (on Unix) is to simply use @strong{tar} to
+produce an archive of your entire data directory. The exact data
+directory location depends on your installation.
+
+@example
+tar -cvf /tmp/mysql-snapshot.tar /path/to/data-dir
+@end example
+
+Windows users can use WinZip or similar software to create an archive of
+the data directory.
+
+@item
+In @code{my.cnf} on the master add @code{log-bin} and
+@code{server-id=unique number} to the @code{[mysqld]} section and
+restart it. It is very important that the id of the slave is different from
+the id of the master. Think of @code{server-id} as something similar
+to the IP address - it uniquely identifies the server instance in the
+community of replication partners.
+
+@example
+[mysqld]
+log-bin
+server-id=1
+@end example
+
+@item
+Restart @strong{MySQL} on the master.
+
+@item
+Add the following to @code{my.cnf} on the slave(s):
+
+@example
+master-host=<hostname of the master>
+master-user=<replication user name>
+master-password=<replication user password>
+master-port=<TCP/IP port for master>
+server-id=<some unique number between 2 and 2^32-1>
+@end example
+
+replacing the values in <> with what is relevant to your system.
+
+@code{server-id} must be different for each server participating in
+replication. If you don't specify a server-id, it will be set to 1 if
+you have not defined @code{master-host}, else it will be set to 2. Note
+that in the case of @code{server-id} omission the master will refuse
+connections from all slaves, and the slave will refuse to connect to a
+master. Thus, omitting @code{server-id} is only good for backup with a
+binary log.
+
+
+@item
+Copy the snapshot data into your data directory on your slave(s). Make
+sure that the privileges on the files and directories are correct. The
+user which @strong{MySQL} runs as needs to be able to read and write to
+them, just as on the master.
+
+@item Restart the slave(s).
+
+@end enumerate
+
+After you have done the above, the slave(s) should connect to the master
+and catch up on any updates which happened since the snapshot was taken.
+
+If you have forgotten to set @code{server-id} for the slave you will get
+the following error in the error log file:
+
+@example
+Warning: one should set server_id to a non-0 value if master_host is set.
+The server will not act as a slave.
+@end example
+
+If you have forgot to do this for the master, the slaves will not be
+able to connect to the master.
+
+If a slave is not able to replicate for any reason, you will find error
+messages in the error log on the slave.
+
+Once a slave is replicating, you will find a file called
+@code{master.info} in the same directory as your error log. The
+@code{master.info} file is used by the slave to keep track of how much
+of the master's binary log is has processed. @strong{Do not} remove or
+edit the file, unless you really know what you are doing. Even in that case,
+it is preferred that you use @code{CHANGE MASTER TO} command.
+
+
+@menu
+* Replication Features::
+* Replication Options::
+* Replication SQL::
+* Replication FAQ::
+* Replication Problems::
+@end menu
+
+@node Replication Features, Replication Options, Replication HOWTO, Replication
+@subsection Replication Features and Known Problems
+
+@cindex options, replication
+@cindex @code{my.cnf} file
+@cindex files,@code{my.cnf}
+
+Below is an explanation of what is supported and what is not:
+
+@itemize @bullet
+@item
+Replication will be done correctly with @code{AUTO_INCREMENT},
+@code{LAST_INSERT_ID}, and @code{TIMESTAMP} values.
+@item
+@code{RAND()} in updates does not replicate properly. Use
+@code{RAND(some_non_rand_expr)} if you are replicating updates with
+@code{RAND()}. You can, for example, use @code{UNIX_TIMESTAMP()} for the
+argument to @code{RAND()}.
+@item
+You have to use the same character set (@code{--default-character-set})
+on the master and the slave. If not, you may get duplicate key errors on
+the slave, because a key that is regarded as unique on the master may
+not be that in the other character set.
+@item
+@code{LOAD DATA INFILE} will be handled properly as long as the file
+still resides on the master server at the time of update
+propagation. @code{LOAD LOCAL DATA INFILE} will be skipped.
+@item
+Update queries that use user variables are not replication-safe (yet).
+@item
+@code{FLUSH} commands are not stored in the binary log and are because
+of this not replicated to the slaves. This is not normally a problem as
+@code{FLUSH} doesn't change anything. This does however mean that if you
+update the @code{MySQL} privilege tables directly without using
+@code{GRANT} statement and you replicate the @code{MySQL} privilege
+database, you must do a @code{FLUSH PRIVILEGES} on your slaves to put
+the new privileges into effect.
+@item
+Temporary tables starting in 3.23.29 are replicated properly with the
+exception of the case when you shut down slave server ( not just slave thread),
+you have some temporary tables open, and the are used in subsequent updates.
+To deal with this problem, to shut down the slave, do @code{SLAVE STOP}, then
+check @code{Slave_open_temp_tables} variable to see if it is 0, then issue
+@code{mysqladmin shutdown}. If the number is not 0, restart the slave thread
+with @code{SLAVE START} and see
+if you have better luck next time. There will be a cleaner solution, but it
+has to wait until version 4.0.
+In earlier versions temporary tables are not being replicated properly - we
+recommend that you either upgrade, or execute @code{SET SQL_LOG_BIN=0} on
+your clients before all queries with temp tables.
+@item
+@strong{MySQL} only supports one master and many slaves. We will in 4.x
+add a voting algorithm to automatically change master if something goes
+wrong with the current master. We will also introduce 'agent' processes
+to help doing load balancing by sending select queries to different
+slaves.
+@item
+Starting in Version 3.23.26, it is safe to connect servers in a circular
+master-slave relationship with @code{log-slave-updates} enabled.
+Note, however, that many queries will not work right in this kind of
+setup unless your client code is written to take care of the potential
+problems that can happen from updates that occur in different sequence
+on different servers.
+
+This means that you can do a setup like the following:
+
+@example
+A -> B -> C -> A
+@end example
+
+This setup will only works if you only do non conflicting updates
+between the tables. In other words, if you insert data in A and C, you
+should never insert a row in A that may have a conflicting key with a
+row insert in C. You should also not update the sam rows on two servers
+if the order in which the updates are applied matters.
+
+Note that the log format has changed in Version 3.23.26 so that
+pre-3.23.26 slaves will not be able to read it.
+@item
+If the query on the slave gets an error, the slave thread will
+terminate, and a message will appear in the @code{.err} file. You should
+then connect to the slave manually, fix the cause of the error (for
+example, non-existent table), and then run @code{SLAVE START} sql
+command (available starting in Version 3.23.16). In Version 3.23.15, you
+will have to restart the server.
+@item
+If connection to the master is lost, the slave will retry immediately,
+and then in case of failure every @code{master-connect-retry} (default
+60) seconds. Because of this, it is safe to shut down the master, and
+then restart it after a while. The slave will also be able to deal with
+network connectivity outages.
+@item
+Shutting down the slave (cleanly) is also safe, as it keeps track of
+where it left off. Unclean shutdowns might produce problems, especially
+if disk cache was not synced before the system died. Your system fault
+tolerance will be greatly increased if you have a good UPS.
+@item
+If the master is listening on a non-standard port, you will also need to
+specify this with @code{master-port} parameter in @code{my.cnf} .
+@item
+In Version 3.23.15, all of the tables and databases will be
+replicated. Starting in Version 3.23.16, you can restrict replication to
+a set of databases with @code{replicate-do-db} directives in
+@code{my.cnf} or just exclude a set of databases with
+@code{replicate-ignore-db}. Note that up until Version 3.23.23, there was a bug
+that did not properly deal with @code{LOAD DATA INFILE} if you did it in
+a database that was excluded from replication.
+@item
+Starting in Version 3.23.16, @code{SET SQL_LOG_BIN = 0} will turn off
+replication (binary) logging on the master, and @code{SET SQL_LOG_BIN =
+1} will turn in back on - you must have the process privilege to do
+this.
+@item
+Starting in Version 3.23.19, you can clean up stale replication leftovers when
+something goes wrong and you want a clean start with @code{FLUSH MASTER}
+and @code{FLUSH SLAVE} commands. In Version 3.23.26 we have renamed them to
+@code{RESET MASTER} and @code{RESET SLAVE} respectively to clarify
+what they do. The old @code{FLUSH} variants still work, though, for
+compatibility.
+
+@item
+Starting in Version 3.23.21, you can use @code{LOAD TABLE FROM MASTER} for
+network backup and to set up replication initially. We have recently
+received a number of bug reports concerning it that we are investigating, so
+we recommend that you use it only in testing until we make it more stable.
+@item
+Starting in Version 3.23.23, you can change masters and adjust log position
+with @code{CHANGE MASTER TO}.
+@item
+Starting in Version 3.23.23, you tell the master that updates in certain
+databases should not be logged to the binary log with @code{binlog-ignore-db}.
+@item
+Starting in Version 3.23.26, you can use @code{replicate-rewrite-db} to tell
+the slave to apply updates from one database on the master to the one
+with a different name on the slave.
+@item
+Starting in Version 3.23.28, you can use @code{PURGE MASTER LOGS TO 'log-name'}
+to get rid of old logs while the slave is running.
+@end itemize
+
+
+@node Replication Options, Replication SQL, Replication Features, Replication
+@subsection Replication Options in my.cnf
+
+If you are using replication, we recommend you to use @strong{MySQL} Version
+3.23.30 or later. Older versions work, but they do have some bugs and are
+missing some features.
+
+On both master and slave you need to use the @code{server-id} option.
+This sets an unique replication id. You should pick a unique value in the
+range between 1 to 2^32-1 for each master and slave.
+Example: @code{server-id=3}
+
+The following table has the options you can use for the @strong{MASTER}:
+
+@multitable @columnfractions .3 .7
+
+@item @strong{Option} @tab @strong{Description}
+@item @code{log-bin=filename} @tab
+Write to a binary update log to the specified location. Note that if you
+give it a parameter with an extension (for example,
+@code{log-bin=/mysql/logs/replication.log} ) versions up to 3.23.24 will
+not work right during replication if you do @code{FLUSH LOGS} . The
+problem is fixed in Version 3.23.25. If you are using this kind of log
+name, @code{FLUSH LOGS} will be ignored on binlog. To clear the log, run
+@code{FLUSH MASTER}, and do not forget to run @code{FLUSH SLAVE} on all
+slaves. In Version 3.23.26 and in later versions you should use
+@code{RESET MASTER} and @code{RESET SLAVE}
+
+@item @code{log-bin-index=filename} @tab
+Because the user could issue the @code{FLUSH LOGS} command, we need to
+know which log is currently active and which ones have been rotated out
+and in what sequence. This information is stored in the binary log index file.
+The default is `hostname`.index. You can use this option if you want to
+be a rebel. (Example: @code{log-bin-index=db.index})
+
+@item @code{sql-bin-update-same} @tab
+If set, setting @code{SQL_LOG_BIN} to a value will automatically set
+@code{SQL_LOG_UPDATE} to the same value and vice versa.
+
+@item @code{binlog-do-db=database_name} @tab
+Tells the master it should log updates for the specified database, and
+exclude all others not explicitly mentioned.
+(Example: @code{binlog-do-db=some_database})
+
+@item @code{binlog-ignore-db=database_name} @tab
+Tells the master that updates to the given database should not be logged
+to the binary log (Example: @code{binlog-ignore-db=some_database})
+@end multitable
+
+The following table has the options you can use for the @strong{SLAVE}:
+
+@multitable @columnfractions .3 .7
+
+@item @strong{Option} @tab @strong{Description}
+@item @code{master-host=host} @tab
+Master hostname or IP address for replication. If not set, the slave
+thread will not be started.
+(Example: @code{master-host=db-master.mycompany.com})
+
+@item @code{master-user=username} @tab
+The user the slave thread will us for authentication when connecting to
+the master. The user must have @code{FILE} privilege. If the master user
+is not set, user @code{test} is assumed. (Example:
+@code{master-user=scott})
+
+@item @code{master-password=password} @tab
+The password the slave thread will authenticate with when connecting to
+the master. If not set, an empty password is assumed. (Example:
+@code{master-password=tiger})
+
+@item @code{master-port=portnumber} @tab
+The port the master is listening on. If not set, the compiled setting of
+@code{MYSQL_PORT} is assumed. If you have not tinkered with
+@code{configure} options, this should be 3306. (Example:
+@code{master-port=3306})
+
+@item @code{master-connect-retry=seconds} @tab
+The number of seconds the slave thread will sleep before retrying to
+connect to the master in case the master goes down or the connection is
+lost. Default is 60. (Example: @code{master-connect-retry=60})
+
+@item @code{master-info-file=filename} @tab
+The location of the file that remembers where we left off on the master
+during the replication process. The default is master.info in the data
+directory. Sasha: The only reason I see for ever changing the default
+is the desire to be rebelious. (Example:
+@code{master-info-file=master.info})
+
+@item @code{replicate-do-table=db_name.table_name} @tab
+Tells the slave thread to restrict replication to the specified database.
+To specify more than one table, use the directive multiple times,
+once for each table. .
+(Example: @code{replicate-do-table=some_db.some_table})
+
+@item @code{replicate-ignore-table=db_name.table_name} @tab
+Tells the slave thread to not replicate to the specified table. To
+specify more than one table to ignore, use the directive multiple
+times, once for each table.(Example:
+@code{replicate-ignore-table=db_name.some_table})
+
+@item @code{replicate-wild-do-table=db_name.table_name} @tab
+Tells the slave thread to restrict replication to the tables that match the
+specified wildcard pattern. .
+To specify more than one table, use the directive multiple times,
+once for each table. .
+(Example: @code{replicate-do-table=foo%.bar%} will replicate only updates
+to tables in all databases that start with foo and whose table names
+start with bar)
+
+@item @code{replicate-wild-ignore-table=db_name.table_name} @tab
+Tells the slave thread to not replicate to the tables that match the given
+wild card pattern. To
+specify more than one table to ignore, use the directive multiple
+times, once for each table.(Example:
+@code{replicate-ignore-table=foo%.bar%} - will not upates to tables in all databases that start with foo and whose table names
+start with bar)
+
+@item @code{replicate-ignore-db=database_name} @tab
+Tells the slave thread to not replicate to the specified database. To
+specify more than one database to ignore, use the directive multiple
+times, once for each database. This option will not work if you use cross
+database updates. If you need cross database updates to work, make sure
+you have 3.23.28 or later, and use
+@code{replicate-wild-ignore-table=db_name.%}(Example:
+@code{replicate-ignore-db=some_db})
+
+@item @code{replicate-do-db=database_name} @tab
+Tells the slave thread to restrict replication to the specified database.
+To specify more than one database, use the directive multiple times,
+once for each database. Note that this will only work if you do not use
+cross-database queries such as @code{UPDATE some_db.some_table SET
+foo='bar'} while having selected a different or no database. If you need
+cross database updates to work, make sure
+you have 3.23.28 or later, and use
+@code{replicate-wild-do-table=db_name.%}
+(Example: @code{replicate-do-db=some_db})
+
+@item @code{log-slave-updates} @tab
+Tells the slave to log the updates from the slave thread to the binary
+log. Off by default. You will need to turn it on if you plan to
+daisy-chain the slaves.
+
+@item @code{replicate-rewrite-db=from_name->to_name} @tab
+Updates to a database with a different name than the original (Example:
+@code{replicate-rewrite-db=master_db_name->slave_db_name}
+
+@item @code{skip-slave-start} @tab
+Tells the slave server not to start the slave on the startup. The user
+can start it later with @code{SLAVE START}.
+
+@item @code{slave_read_timeout=#} @tab
+Number of seconds to wait for more data from the master before aborting
+the read.
+@end multitable
+
+
+@node Replication SQL, Replication FAQ, Replication Options, Replication
+@subsection SQL Commands Related to Replication
+
+@cindex SQL commands, replication
+@cindex commands, replication
+@cindex replication, commands
+
+Replication can be controlled through the SQL interface. Below is the
+summary of commands:
+
+@multitable @columnfractions .30 .70
+@item @strong{Command} @tab @strong{Description}
+
+@item @code{SLAVE START}
+ @tab Starts the slave thread. (Slave)
+
+@item @code{SLAVE STOP}
+ @tab Stops the slave thread. (Slave)
+
+@item @code{SET SQL_LOG_BIN=0}
+ @tab Disables update logging if the user has process privilege.
+ Ignored otherwise. (Master)
+
+@item @code{SET SQL_LOG_BIN=1}
+ @tab Re-enables update logging if the user has process privilege.
+ Ignored otherwise. (Master)
+
+@item @code{SET SQL_SLAVE_SKIP_COUNTER=n}
+ @tab Skip the next @code{n} events from the master. Only valid when
+the slave thread is not running, otherwise, gives an error. Useful for
+recovering from replication glitches.
+
+@item @code{RESET MASTER}
+ @tab Deletes all binary logs listed in the index file, resetting the binlog
+index file to be empty. In pre-3.23.26 versions, @code{FLUSH MASTER} (Master)
+
+@item @code{RESET SLAVE}
+ @tab Makes the slave forget its replication position in the master
+logs. In pre 3.23.26 versions the command was called
+@code{FLUSH SLAVE}(Slave)
+
+@item @code{LOAD TABLE tblname FROM MASTER}
+ @tab Downloads a copy of the table from master to the slave. (Slave)
+
+@item @code{CHANGE MASTER TO master_def_list}
+ @tab Changes the master parameters to the values specified in
+@code{master_def_list} and restarts the slave thread. @code{master_def_list}
+is a comma-separated list of @code{master_def} where @code{master_def} is
+one of the following: @code{MASTER_HOST}, @code{MASTER_USER},
+@code{MASTER_PASSWORD}, @code{MASTER_PORT}, @code{MASTER_CONNECT_RETRY},
+@code{MASTER_LOG_FILE}, @code{MASTER_LOG_POS}. Example:
+
+@example
+
+CHANGE MASTER TO
+ MASTER_HOST='master2.mycompany.com',
+ MASTER_USER='replication',
+ MASTER_PASSWORD='bigs3cret',
+ MASTER_PORT=3306,
+ MASTER_LOG_FILE='master2-bin.001',
+ MASTER_LOG_POS=4;
+
+@end example
+
+You only need to specify the values that need to be changed. The values that
+you omit will stay the same with the exception of when you change the host or
+the port. In that case, the slave will assume that since you are connecting to
+a different host or a different port, the master is different. Therefore, the
+old values of log and position are not applicable anymore, and will
+automatically be reset to an empty string and 0, respectively (the start
+values). Note that if you restart the slave, it will remember its last master.
+If this is not desirable, you should delete the @file{master.info} file before
+restarting, and the slave will read its master from @code{my.cnf} or the
+command line. (Slave)
+
+@item @code{SHOW MASTER STATUS} @tab Provides status information on the binlog of the master. (Master)
+
+@item @code{SHOW SLAVE STATUS} @tab Provides status information on essential parameters of the slave thread. (Slave)
+@item @code{SHOW MASTER LOGS} @tab Only available starting in Version 3.23.28. Lists the binary logs on the master. You should use this command prior to @code{PURGE MASTER LOGS TO} to find out how far you should go.
+
+@item @code{PURGE MASTER LOGS TO 'logname'}
+ @tab Available starting in Version 3.23.28. Deletes all the
+replication logs that are listed in the log
+index as being prior to the specified log, and removed them from the
+log index, so that the given log now becomes first. Example:
+
+@example
+PURGE MASTER LOGS TO 'mysql-bin.010'
+@end example
+
+This command will do nothing and fail with an error if you have an
+active slave that is currently reading one of the logs you are trying to
+delete. However, if you have a dormant slave, and happen to purge one of
+the logs it wants to read, the slave will be unable to replicate once it
+comes up. The command is safe to run while slaves are replicating - you
+do not need to stop them.
+
+You must first check all the slaves with @code{SHOW SLAVE STATUS} to
+see which log they are on, then do a listing of the logs on the
+master with @code{SHOW MASTER LOGS}, find the earliest log among all
+the slaves (if all the slaves are up to date, this will be the
+last log on the list), backup all the logs you are about to delete
+(optional) and purge up to the target log.
+
+@end multitable
+
+
+@node Replication FAQ, Replication Problems, Replication SQL, Replication
+@subsection Replication FAQ
+
+@cindex @code{Binlog_Dump}
+@strong{Q}: Why do I sometimes see more than one @code{Binlog_Dump} thread on
+the master after I have restarted the slave?
+
+@strong{A}: @code{Binlog_Dump} is a continuous process that is handled by the
+server in the following way:
+
+@itemize @bullet
+@item
+Catch up on the updates.
+@item
+Once there are no more updates left, go into @code{pthread_cond_wait()},
+from which we can be awakened either by an update or a kill.
+@item
+On wake up, check the reason. If we are not supposed to die, continue
+the @code{Binlog_dump} loop.
+@item
+If there is some fatal error, such as detecting a dead client,
+terminate the loop.
+@end itemize
+
+So if the slave thread stops on the slave, the corresponding
+@code{Binlog_Dump} thread on the master will not notice it until after
+at least one update to the master (or a kill), which is needed to wake
+it up from @code{pthread_cond_wait()}. In the meantime, the slave
+could have opened another connection, which resulted in another
+@code{Binlog_Dump} thread.
+
+The above problem should not be present in Version 3.23.26 and later
+versions. In Version 3.23.26 we added @code{server-id} to each
+replication server, and now all the old zombie threads are killed on the
+master when a new replication thread connects from the same slave
+
+@strong{Q}: How do I rotate replication logs?
+
+@strong{A}: In Version 3.23.28 you should use @code{PURGE MASTER LOGS
+TO} command after determining which logs can be deleted, and optionally
+backing them up first. In earlier versions the process is much more
+painful, and cannot be safely done without stopping all the slaves in
+the case that you plan to re-use log names. You will need to stop the
+slave threads, edit the binary log index file, delete all the old logs,
+restart the master, start slave threads, and then remove the old log files.
+
+
+@strong{Q}: How do I upgrade on a hot replication setup?
+
+@strong{A}: If you are upgrading pre-3.23.26 versions, you should just
+lock the master tables, let the slave catch up, then run @code{FLUSH
+MASTER} on the master, and @code{FLUSH SLAVE} on the slave to reset the
+logs, then restart new versions of the master and the slave. Note that
+the slave can stay down for some time - since the master is logging
+all the updates, the slave will be able to catch up once it is up and
+can connect.
+
+After 3.23.26, we have locked the replication protocol for modifications, so
+you can upgrade masters and slave on the fly to a newer 3.23 version and you
+can have different versions of @strong{MySQL} running on the slave and the
+master, as long as they are both newer than 3.23.26.
+
+@cindex replication, two-way
+@strong{Q}: What issues should I be aware of when setting up two-way
+replication?
+
+@strong{A}: @strong{MySQL} replication currently does not support any
+locking protocol between master and slave to guarantee the atomicity of
+a distributed (cross-server) update. In in other words, it is possible
+for client A to make an update to co-master 1, and in the meantime,
+before it propagates to co-master 2, client B could make an update to
+co-master 2 that will make the update of client A work differently than
+it did on co-master 1. Thus when the update of client A will make it
+to co-master 2, it will produce tables that will be different than
+what you have on co-master 1, even after all the updates from co-master
+2 have also propagated. So you should not co-chain two servers in a
+two-way replication relationship, unless you are sure that you updates
+can safely happen in any order, or unless you take care of mis-ordered
+updates somehow in the client code.
+
+
+You must also realize that two-way replication actually does not improve
+performance very much, if at all, as far as updates are concerned. Both
+servers need to do the same amount of updates each, as you would have
+one server do. The only difference is that there will be a little less
+lock contention, because the updates originating on another server will
+be serialized in one slave thread. This benefit, though, might be
+offset by network delays.
+
+@cindex performance, improving
+@cindex increasing, performance
+@strong{Q}: How can I use replication to improve performance of my system?
+
+@strong{A}: You should set up one server as the master, and direct all
+writes to it, and configure as many slaves as you have the money and
+rackspace for, distributing the reads among the master and the slaves.
+You can also start the slaves with @code{--skip-bdb},
+@code{--low-priority-updates} and @code{--delay-key-write-for-all-tables}
+to get speed improvements for the slave. In this case the slave will
+use non-transactional @code{MyISAM} tables instead of @code{BDB} tables
+to get more speed.
+
+@strong{Q}: What should I do to prepare my client code to use
+performance-enhancing replication?
+
+@strong{A}:
+If the part of your code that is responsible for database access has
+been properly abstracted/modularized, converting it to run with the
+replicated setup should be very smooth and easy - just change the
+implementation of your database access to read from some slave or the
+master, and to always write to the master. If your code does not have
+this level of abstraction,
+setting up a replicated system will give you an opportunity/motivation
+to it clean up.
+ You should start by creating a wrapper library
+/module with the following functions:
+
+@itemize
+@item
+@code{safe_writer_connect()}
+@item
+@code{safe_reader_connect()}
+@item
+@code{safe_reader_query()}
+@item
+@code{safe_writer_query()}
+@end itemize
+
+@code{safe_} means that the function will take care of handling all
+the error conditions.
+
+You should then convert your client code to use the wrapper library.
+It may be a painful and scary process at first, but it will pay off in
+the long run. All applications that follow the above pattern will be
+able to take advantage of one-master/many slaves solution. The
+code will be a lot easier to maintain, and adding troubleshooting
+options will be trivial. You will just need to modify one or two
+functions, for example, to log how long each query took, or which
+query, among your many thousands, gave you an error. If you have written a lot of code already,
+you may want to automate the conversion task by using Monty's
+@code{replace} utility, which comes with the standard distribution of
+@strong{MySQL}, or just write your own Perl script. Hopefully, your
+code follows some recognizable pattern. If not, then you are probably
+better off re-writing it anyway, or at least going through and manually
+beating it into a pattern.
+
+Note that, of course, you can use different names for the
+functions. What is important is having unified interface for connecting
+for reads, connecting for writes, doing a read, and doing a write.
+
+
+@strong{Q}: When and how much can @strong{MySQL} replication improve the performance
+of my system?
+
+@strong{A}: @strong{MySQL} replication is most beneficial for a system
+with frequent reads and not so frequent writes. In theory, by using a
+one master/many slaves setup you can scale by adding more slaves until
+you either run out of network bandwidth, or your update
+load grows to the point
+that the master cannot handle it.
+
+In order to determine how many slaves you can get before the added
+benefits begin to level out, and how much you can improve performance
+of your site, you need to know your query patterns, and empirically
+ (by benchmarking) determine the relationship between the throughput
+on reads (reads per second, or @code{max_reads}) and on writes
+@code{max_writes}) on a typical master and a typical slave. The
+example below will show you a rather simplified calculation of what you
+can get with replication for our imagined system.
+
+Let's say our system load consists of 10% writes and 90% reads, and we
+have determined that @code{max_reads} = 1200 - 2 * @code{max_writes},
+or in other words, our system can do 1200 reads per second with no
+writes, our average write is twice as slow as average read,
+and the relationship is
+linear. Let us suppose that our master and slave are of the same
+capacity, and we have N slaves and 1 master. Then we have for each
+server (master or slave):
+
+@code{reads = 1200 - 2 * writes} (from bencmarks)
+
+@code{reads = 9* writes / (N + 1) } (reads split, but writes go
+to all servers)
+
+@code{9*writes/(N+1) + 2 * writes = 1200}
+
+@code{writes = 1200/(2 + 9/(N+1)}
+
+So if N = 0, which means we have no replication, our system can handle
+1200/11, about 109 writes per second (which means we will have 9 times
+as many reads due to the nature of our application).
+
+If N = 1, we can get up to 184 writes per second.
+
+If N = 8, we get up to 400.
+
+If N = 17, 480 writes.
+
+Eventually as N approaches infinity (and our budget negative infinity),
+we can get very close to 600 writes per second, increasing system
+throughput about 5.5 times. However, with only 8 servers, we increased
+it almost 4 times already.
+
+Note that our computations assumed infinite network bandwidth, and
+neglected several other factors that could turn out to be significant on
+your system. In many cases, you may not be able to make a computation
+similar to the one above that will accurately predict what will happen
+on your system if you add N replication slaves. However, answering the
+following questions should help you decided whether and how much, if at
+all, the replication will improve the performance of your system:
+
+@itemize @bullet
+@item
+What is the read/write ratio on your system?
+@item
+How much more write load can one server handle if you reduce the reads?
+@item
+How many slaves do you have bandwidth for on your network?
+@end itemize
+
+@strong{Q}: How can I use replication to provide redundancy/high
+availability?
+
+@strong{A}: With the currently available features, you would have to
+set up a master and a slave (or several slaves), and write a script
+that will monitor the
+master to see if it is up, and instruct your applications and
+the slaves of the master change in case of failure. Some suggestions:
+
+@itemize @bullet
+@item
+To tell a slave to change the master use the @code{CHANGE MASTER TO} command.
+@item
+A good way to keep your applications informed where the master is by
+having a dynamic DNS entry for the master. With @strong{bind} you can
+use @code{nsupdate} to dynamically update your DNS.
+@item
+You should run your slaves with the @code{log-bin} option and without
+@code{log-slave-updates}. This way the slave will be ready to become a
+master as soon as you issue @code{STOP SLAVE}; @code{RESET MASTER}, and
+@code{CHANGE MASTER TO} on the other slaves. It will also help you catch
+spurious updates that may happen because of misconfiguration of the
+slave (ideally, you want to configure access rights so that no client
+can update the slave, except for the slave thread) combined with the
+bugs in your client programs (they should never update the slave
+directly).
+
+@end itemize
+
+We are currently working on integrating an automatic master election
+system into @strong{MySQL}, but until it is ready, you will have to
+create your own monitoring tools.
+
+
+@node Replication Problems, , Replication FAQ, Replication
+@subsection Troubleshooting Replication
+
+If you have followed the instructions, and your replication setup is not
+working, first eliminate the user error factor by checking the following:
+
+@itemize @bullet
+@item
+Is the master logging to the binary log? Check with @code{SHOW MASTER STATUS}.
+If it is, @code{Position} will be non-zero. If not, verify that you have
+given the master @code{log-bin} option and have set @code{server-id}.
+@item
+Is the slave running? Check with @code{SHOW SLAVE STATUS}. The answer is found
+in @code{Slave_running} column. If not, verify slave options and check the
+error log for messages.
+@item
+If the slave is running, did it establish connection with the master? Do
+@code{SHOW PROCESSLIST}, find the thread with @code{system user} value in
+@code{User} column and @code{none} in the @code{Host} column, and check the
+@code{State} column. If it says @code{connecting to master}, verify the
+privileges for the replication user on the master, master host name, your
+DNS setup, whether the master is actually running, whether it is reachable
+from the slave, and if all that seems ok, read the error logs.
+@item
+If the slave was running, but then stopped, look at SHOW SLAVE STATUS
+output and check the error logs. It usually
+happens when some query that succeeded on the master fails on the slave. This
+should never happen if you have taken a proper snapshot of the master, and
+never modify the data on the slave outside of the slave thread. If it does,
+it is a bug, read below on how to report it.
+@item
+If a query on that succeeded on the master refuses to run on the slave, and
+a full database resync ( the proper thing to do ) does not seem feasible,
+try the following:
+@itemize @minus
+@item
+First see if there is some stray record in the way. Understand how it got
+there, then delete it and run @code{SLAVE START}
+@item
+If the above does not work or does not apply, try to understand if it would
+be safe to make the update manually ( if needed) and then ignore the next
+query from the master.
+@item
+If you have decided you can skip the next query, do
+@code{SET SQL_SLAVE_SKIP_COUNTER=1; SLAVE START;} to skip a query that
+does not use auto_increment, or last_insert_id or
+@code{SET SQL_SLAVE_SKIP_COUNTER=2; SLAVE START;} otherwise. The reason
+auto_increment/last_insert_id queries are different is that they take
+two events in the binary log of the master.
+
+@item
+If you are sure the slave started out perfectly in sync with the master,
+and no one has updated the tables involved outside of slave thread,
+report the bug, so
+you will not have to do the above tricks again.
+@end itemize
+@item
+Make sure you are not running into an old bug by upgrading to the most recent
+version.
+@item
+If all else fails, read the error logs. If they are big,
+@code{grep -i slave /path/to/your-log.err} on the slave. There is no
+generic pattern to search for on the master, as the only errors it logs
+are general system errors - if it can, it will send the error to the slave
+when things go wrong.
+@end itemize
+
+When you have determined that there is no user error involved, and replication
+still either does not work at all or is unstable, it is time to start working
+on a bug report. We need to get as much info as possible from you to be able
+to track down the bug. Please do spend some time and effort preparing a good
+bug report. Ideally, we would like to have a test case in the format found in
+@code{mysql-test/t/rpl*} directory of the source tree. If you submit a test
+case like that, you can expect a patch within a day or two in most cases,
+although, of course, you mileage may vary depending on a number of factors.
+
+Second best option is a just program with easily configurable connection
+arguments for the master and the slave that will demonstrate the problem on our
+systems. You can write one in Perl or in C, depending on which language you
+know better.
+
+If you have one of the above ways to demonstrate the bug, use
+@code{mysqlbug} to prepare a bug report and send it to
+@email{bugs@@lists.mysql.com}. If you have a phantom - a problem that
+does occur but you cannot duplicate "at will":
+
+@itemize @bullet
+@item
+Verify that there is no user error involved. For example, if you update the
+slave outside of the slave thread, the data will be out of sync, and you can
+have unique key violations on updates, in which case the slave thread will
+stop and wait for you to clean up the tables manually to bring them in sync.
+@item
+Run slave with @code{log-slave-updates} and @code{log-bin} - this will keep
+a log of all updates on the slave.
+@item
+Save all evidence before resetting the replication. If we have no or only
+sketchy information, it would take us a while to track down the problem. The
+evidence you should collect is:
+@itemize @minus
+@item
+All binary logs on the master
+@item
+All binary log on the slave
+@item
+The output of @code{SHOW MASTER STATUS} on the master at the time
+you have discovered the problem
+@item
+The output of @code{SHOW SLAVE STATUS} on the master at the time
+you have discovered the problem
+@item
+Error logs on the master and on the slave
+@end itemize
+@item
+Use @code{mysqlbinlog} to examine the binary logs. The following should
+be helpful
+to find the trouble query, for example:
+@example
+mysqlbinlog -j pos_from_slave_status /path/to/log_from_slave_status | head
+@end example
+@end itemize
+
+Once you have collected the evidence on the phantom problem, try hard to
+isolate it into a separate test case first. Then report the problem to
+@email{bugs@@lists.mysql.com} with as much info as possible.
+
+
+@node MySQL Optimization, Reference, MySQL Database Administration, Top
+@chapter MySQL Optimization
+
+@menu
+* Optimize Overview::
+* Query Speed::
+* Locking Issues::
+* Optimizing Database Structure::
+* Optimizing the Server::
+* Disk issues::
+@end menu
+
+
+Optimization is a complicated task because it ultimately requires
+understanding of the whole system. While it may be possible to do some
+local optimizations with small knowledge of your system or application,
+the more optimal you want your system to become the more you will have
+to know about it.
+
+This chapter will try to explain and give some examples of different
+ways to optimize @strong{MySQL}. Remember, however, that there are
+always some (increasingly harder) additional ways to make the system
+even faster.
+
+
+@node Optimize Overview, Query Speed, MySQL Optimization, MySQL Optimization
+@section Optimization Overview
+
+The most important part for getting a system fast is of course the basic
+design. You also need to know what kinds of things your system will be
+doing, and what your bottlenecks are.
+
+The most common bottlenecks are:
+@itemize @bullet
+@item Disk seeks.
+It takes time for the disk to find a piece of data. With modern disks in
+1999, the mean time for this is usually lower than 10ms, so we can in
+theory do about 1000 seeks a second. This time improves slowly with new
+disks and is very hard to optimize for a single table. The way to
+optimize this is to spread the data on more than one disk.
+
+@item Disk reading/writing.
+When the disk is at the correct position we need to read the data. With
+modern disks in 1999, one disk delivers something like 10-20Mb/s. This
+is easier to optimize than seeks because you can read in parallel from
+multiple disks.
+
+@item CPU cycles.
+When we have the data in main memory (or if it already were
+there) we need to process it to get to our result. Having small
+tables compared to the memory is the most common limiting
+factor. But then, with small tables speed is usually not the problem.
+
+@item Memory bandwidth.
+When the CPU needs more data than can fit in the CPU cache the main
+memory bandwidth becomes a bottleneck. This is an uncommon bottleneck
+for most systems, but one should be aware of it.
+@end itemize
+
+
+@menu
+* Design Limitations::
+* Portability::
+* Internal use::
+* MySQL Benchmarks::
+* Custom Benchmarks::
+@end menu
+
+@node Design Limitations, Portability, Optimize Overview, Optimize Overview
+@subsection MySQL Design Limitations/Tradeoffs
+
+@cindex design, limitations
+@cindex limitations, design
+
+Because @strong{MySQL} uses extremely fast table locking (multiple readers /
+single writers) the biggest remaining problem is a mix of a steady stream of
+inserts and slow selects on the same table.
+
+We believe that for a huge number of systems the extremely fast
+performance in other cases make this choice a win. This case is usually
+also possible to solve by having multiple copies of the table, but it
+takes more effort and hardware.
+
+We are also working on some extensions to solve this problem for some
+common application niches.
+
+
+@node Portability, Internal use, Design Limitations, Optimize Overview
+@subsection Portability
+
+@cindex portability
+@cindex crash-me program
+@cindex programs, crash-me
+
+Because all SQL servers implement different parts of SQL, it takes work to
+write portable SQL applications. For very simple selects/inserts it is
+very easy, but the more you need the harder it gets. If you want an
+application that is fast with many databases it becomes even harder!
+
+To make a complex application portable you need to choose a number of
+SQL servers that it should work with.
+
+You can use the @strong{MySQL} crash-me program/web-page
+@uref{http://www.mysql.com/information/crash-me.php} to find functions,
+types, and limits you can use with a selection of database
+servers. Crash-me now tests far from everything possible, but it
+is still comprehensive with about 450 things tested.
+
+For example, you shouldn't have column names longer than 18 characters
+if you want to be able to use Informix or DB2.
+
+Both the @strong{MySQL} benchmarks and crash-me programs are very
+database-independent. By taking a look at how we have handled this, you
+can get a feeling for what you have to do to write your application
+database-independent. The benchmarks themselves can be found in the
+@file{sql-bench} directory in the @strong{MySQL} source
+distribution. They are written in Perl with DBI database interface
+(which solves the access part of the problem).
+
+See @uref{http://www.mysql.com/information/benchmarks.html} for the results
+from this benchmark.
+
+As you can see in these results, all databases have some weak points. That
+is, they have different design compromises that lead to different
+behavior.
+
+If you strive for database independence, you need to get a good feeling
+for each SQL server's bottlenecks. @strong{MySQL} is VERY fast in
+retrieving and updating things, but will have a problem in mixing slow
+readers/writers on the same table. Oracle, on the other hand, has a big
+problem when you try to access rows that you have recently updated
+(until they are flushed to disk). Transaction databases in general are
+not very good at generating summary tables from log tables, as in this
+case row locking is almost useless.
+
+To get your application @emph{really} database-independent, you need to define
+an easy extendable interface through which you manipulate your data. As
+C++ is available on most systems, it makes sense to use a C++ classes
+interface to the databases.
+
+If you use some specific feature for some database (like the
+@code{REPLACE} command in @strong{MySQL}), you should code a method for
+the other SQL servers to implement the same feature (but slower). With
+@strong{MySQL} you can use the @code{/*! */} syntax to add
+@strong{MySQL}-specific keywords to a query. The code inside
+@code{/**/} will be treated as a comment (ignored) by most other SQL
+servers.
+
+If REAL high performance is more important than exactness, as in some
+Web applications, a possibility is to create an application layer that
+caches all results to give you even higher performance. By letting
+old results 'expire' after a while, you can keep the cache reasonably
+fresh. This is quite nice in case of extremely high load, in which case
+you can dynamically increase the cache and set the expire timeout higher
+until things get back to normal.
+
+In this case the table creation information should contain information
+of the initial size of the cache and how often the table should normally
+be refreshed.
+
+@node Internal use, MySQL Benchmarks, Portability, Optimize Overview
+@subsection What Have We Used MySQL For?
+
+@cindex uses, of MySQL
+@cindex customers, of MySQL
+
+During @strong{MySQL} initial development, the features of @strong{MySQL}
+were made to fit our largest customer. They handle data warehousing for a
+couple of the biggest retailers in Sweden.
+
+From all stores, we get weekly summaries of all bonus card transactions,
+and we are expected to provide useful information for the store owners
+to help them find how their advertisement campaigns are affecting their
+customers.
+
+The data is quite huge (about 7 million summary transactions per month),
+and we have data for 4-10 years that we need to present to the users.
+We got weekly requests from the customers that they want to get
+'instant' access to new reports from this data.
+
+We solved this by storing all information per month in compressed
+'transaction' tables. We have a set of simple macros (script) that
+generates summary tables grouped by different criteria (product group,
+customer id, store ...) from the transaction tables. The reports are
+Web pages that are dynamically generated by a small Perl script that
+parses a Web page, executes the SQL statements in it, and inserts the
+results. We would have used PHP or mod_perl instead but they were
+not available at that time.
+
+For graphical data we wrote a simple tool in @code{C} that can produce
+GIFs based on the result of a SQL query (with some processing of the
+result). This is also dynamically executed from the Perl script that
+parses the @code{HTML} files.
+
+In most cases a new report can simply be done by copying an existing
+script and modifying the SQL query in it. In some cases, we will need to
+add more fields to an existing summary table or generate a new one, but
+this is also quite simple, as we keep all transactions tables on disk.
+(Currently we have at least 50G of transactions tables and 200G of other
+customer data.)
+
+We also let our customers access the summary tables directly with ODBC
+so that the advanced users can themselves experiment with the data.
+
+We haven't had any problems handling this with quite modest Sun Ultra
+SPARCstation (2x200 Mhz). We recently upgraded one of our servers to a 2
+CPU 400 Mhz UltraSPARC, and we are now planning to start handling
+transactions on the product level, which would mean a ten-fold increase
+of data. We think we can keep up with this by just adding more disk to
+our systems.
+
+We are also experimenting with Intel-Linux to be able to get more CPU
+power cheaper. Now that we have the binary portable database format (new
+in Version 3.23), we will start to use this for some parts of the application.
+
+Our initial feelings are that Linux will perform much better on
+low-to-medium load and Solaris will perform better when you start to get a
+high load because of extreme disk IO, but we don't yet have anything
+conclusive about this. After some discussion with a Linux Kernel
+developer, this might be a side effect of Linux giving so much resources
+to the batch job that the interactive performance gets very low. This
+makes the machine feel very slow and unresponsive while big batches are
+going. Hopefully this will be better handled in future Linux Kernels.
+
+
+@node MySQL Benchmarks, Custom Benchmarks, Internal use, Optimize Overview
+@subsection The MySQL Benchmark Suite
+
+@cindex benchmark suite
+@cindex crash-me program
+
+This should contain a technical description of the @strong{MySQL}
+benchmark suite (and @code{crash-me}), but that description is not
+written yet. Currently, you can get a good idea of the benchmark by
+looking at the code and results in the @file{sql-bench} directory in any
+@strong{MySQL} source distributions.
+
+This benchmark suite is meant to be a benchmark that will tell any user
+what things a given SQL implementation performs well or poorly at.
+
+Note that this benchmark is single threaded, so it measures the minimum
+time for the operations. We plan to in the future add a lot of
+multi-threaded tests to the benchmark suite.
+
+For example, (run on the same NT 4.0 machine):
+
+@multitable @columnfractions .6 .2 .2
+@strong{Reading 2000000 rows by index} @tab @strong{Seconds} @tab @strong{Seconds}
+@item mysql @tab 367 @tab 249
+@item mysql_odbc @tab 464
+@item db2_odbc @tab 1206
+@item informix_odbc @tab 121126
+@item ms-sql_odbc @tab 1634
+@item oracle_odbc @tab 20800
+@item solid_odbc @tab 877
+@item sybase_odbc @tab 17614
+@end multitable
+
+@multitable @columnfractions .6 .2 .2
+@strong{Inserting (350768) rows} @tab @strong{Seconds} @tab @strong{Seconds}
+@item mysql @tab 381 @tab 206
+@item mysql_odbc @tab 619
+@item db2_odbc @tab 3460
+@item informix_odbc @tab 2692
+@item ms-sql_odbc @tab 4012
+@item oracle_odbc @tab 11291
+@item solid_odbc @tab 1801
+@item sybase_odbc @tab 4802
+@end multitable
+
+In the above test @strong{MySQL} was run with a 8M index cache.
+
+We have gather some more benchmark results at
+@uref{http://www.mysql.com/information/benchmarks.html}.
+
+Note that Oracle is not included because they asked to be removed. All
+Oracle benchmarks have to be passed by Oracle! We believe that makes
+Oracle benchmarks @strong{VERY} biased because the above benchmarks are
+supposed to show what a standard installation can do for a single
+client.
+
+To run the benchmark suite, you have to download a @strong{MySQL} source
+distribution, install the perl DBI driver, the perl DBD driver for the
+database you want to test and then do:
+
+@example
+cd sql-bench
+perl run-all-tests --server=#
+@end example
+
+where # is one of supported servers. You can get a list of all options
+and supported servers by doing @code{run-all-tests --help}.
+
+@cindex crash-me
+@code{crash-me} tries to determine what features a database supports and
+what its capabilities and limitations are by actually running
+queries. For example, it determines:
+
+@itemize @bullet
+@item
+What column types are supported
+@item
+How many indexes are supported
+@item
+What functions are supported
+@item
+How big a query can be
+@item
+How big a @code{VARCHAR} column can be
+@end itemize
+
+We can find the result from crash-me on a lot of different databases at
+@uref{http://www.mysql.com/information/crash-me.php}.
+
+
+@node Custom Benchmarks, , MySQL Benchmarks, Optimize Overview
+@subsection Using Your Own Benchmarks
+
+@cindex benchmarks
+@cindex performance, benchmarks
+
+You should definitely benchmark your application and database to find
+out where the bottlenecks are. By fixing it (or by replacing the
+bottleneck with a 'dummy module') you can then easily identify the next
+bottleneck (and so on). Even if the overall performance for your
+application is sufficient, you should at least make a plan for each
+bottleneck, and decide how to solve it if someday you really need the
+extra performance.
+
+For an example of portable benchmark programs, look at the @strong{MySQL}
+benchmark suite. @xref{MySQL Benchmarks, , @strong{MySQL} Benchmarks}. You
+can take any program from this suite and modify it for your needs. By doing
+this, you can try different solutions to your problem and test which is really
+the fastest solution for you.
+
+It is very common that some problems only occur when the system is very
+heavily loaded. We have had many customers who contact us when they
+have a (tested) system in production and have encountered load problems. In
+every one of these cases so far, it has been problems with basic design
+(table scans are NOT good at high load) or OS/Library issues. Most of
+this would be a @strong{LOT} easier to fix if the systems were not
+already in production.
+
+To avoid problems like this, you should put some effort into benchmarking
+your whole application under the worst possible load! You can use
+Super Smack for this, and it is available at:
+@uref{http://www.mysql.com/Downloads/super-smack/super-smack-1.0.tar.gz}.
+As the name suggests, it can bring your system down to its knees if you ask it,
+so make sure to use it only on your development systems.
+
+
+@node Query Speed, Locking Issues, Optimize Overview, MySQL Optimization
+@section Optimizing @code{SELECT}s and Other Queries
+
+@cindex queries, speed of
+@cindex permission checks, effect on speed
+@cindex speed, of queries
+
+First, one thing that affects all queries: The more complex permission
+system setup you have, the more overhead you get.
+
+If you do not have any @code{GRANT} statements done, @strong{MySQL} will
+optimize the permission checking somewhat. So if you have a very high
+volume it may be worth the time to avoid grants. Otherwise more
+permission check results in a larger overhead.
+
+If your problem is with some explicit @strong{MySQL} function, you can
+always time this in the @strong{MySQL} client:
+
+@example
+mysql> select benchmark(1000000,1+1);
++------------------------+
+| benchmark(1000000,1+1) |
++------------------------+
+| 0 |
++------------------------+
+1 row in set (0.32 sec)
+@end example
+
+The above shows that @strong{MySQL} can execute 1,000,000 @code{+}
+expressions in 0.32 seconds on a @code{PentiumII 400MHz}.
+
+All @strong{MySQL} functions should be very optimized, but there may be
+some exceptions, and the @code{benchmark(loop_count,expression)} is a
+great tool to find out if this is a problem with your query.
+
+@menu
+* EXPLAIN::
+* Estimating performance:: Estimating query performance
+* SELECT speed:: Speed of @code{SELECT} queries
+* Where optimizations:: How MySQL optimizes @code{WHERE} clauses
+* DISTINCT optimization:: How MySQL Optimizes @code{DISTINCT}
+* LEFT JOIN optimization:: How MySQL optimizes @code{LEFT JOIN}
+* LIMIT optimization:: How MySQL optimizes @code{LIMIT}
+* Insert speed:: Speed of @code{INSERT} queries
+* Update speed:: Speed of @code{UPDATE} queries
+* Delete speed:: Speed of @code{DELETE} queries
+* Tips::
+@end menu
+
+
+@node EXPLAIN, Estimating performance, Query Speed, Query Speed
+@subsection @code{EXPLAIN} Syntax (Get Information About a @code{SELECT})
+
+@findex EXPLAIN
+@findex SELECT, optimizing
+
+@example
+ EXPLAIN tbl_name
+or EXPLAIN SELECT select_options
+@end example
+
+@code{EXPLAIN tbl_name} is a synonym for @code{DESCRIBE tbl_name} or
+@code{SHOW COLUMNS FROM tbl_name}.
+
+When you precede a @code{SELECT} statement with the keyword @code{EXPLAIN},
+@strong{MySQL} explains how it would process the @code{SELECT}, providing
+information about how tables are joined and in which order.
+
+With the help of @code{EXPLAIN}, you can see when you must add indexes
+to tables to get a faster @code{SELECT} that uses indexes to find the
+records. You can also see if the optimizer joins the tables in an optimal
+order. To force the optimizer to use a specific join order for a
+@code{SELECT} statement, add a @code{STRAIGHT_JOIN} clause.
+
+For non-simple joins, @code{EXPLAIN} returns a row of information for each
+table used in the @code{SELECT} statement. The tables are listed in the order
+they would be read. @strong{MySQL} resolves all joins using a single-sweep
+multi-join method. This means that @strong{MySQL} reads a row from the first
+table, then finds a matching row in the second table, then in the third table
+and so on. When all tables are processed, it outputs the selected columns and
+backtracks through the table list until a table is found for which there are
+more matching rows. The next row is read from this table and the process
+continues with the next table.
+
+Output from @code{EXPLAIN} includes the following columns:
+
+@table @code
+@item table
+The table to which the row of output refers.
+
+@item type
+The join type. Information about the various types is given below.
+
+@item possible_keys
+The @code{possible_keys} column indicates which indexes @strong{MySQL}
+could use to find the rows in this table. Note that this column is
+totally independent of the order of the tables. That means that some of
+the keys in possible_keys may not be usable in practice with the
+generated table order.
+
+If this column is empty, there are no relevant indexes. In this case,
+you may be able to improve the performance of your query by examining
+the @code{WHERE} clause to see if it refers to some column or columns
+that would be suitable for indexing. If so, create an appropriate index
+and check the query with @code{EXPLAIN} again. @xref{ALTER TABLE}.
+
+To see what indexes a table has, use @code{SHOW INDEX FROM tbl_name}.
+
+@item key
+The @code{key} column indicates the key that @strong{MySQL} actually
+decided to use. The key is @code{NULL} if no index was chosen. If
+@strong{MySQL} chooses the wrong index, you can probably force
+@strong{MySQL} to use another index by using @code{myisamchk --analyze},
+@xref{myisamchk syntax}, or by using @code{USE INDEX/IGNORE INDEX}.
+@xref{JOIN}.
+
+@item key_len
+The @code{key_len} column indicates the length of the key that
+@strong{MySQL} decided to use. The length is @code{NULL} if the
+@code{key} is @code{NULL}. Note that this tells us how many parts of a
+multi-part key @strong{MySQL} will actually use.
+
+@item ref
+The @code{ref} column shows which columns or constants are used with the
+@code{key} to select rows from the table.
+
+@item rows
+The @code{rows} column indicates the number of rows @strong{MySQL}
+believes it must examine to execute the query.
+
+@item Extra
+This column contains additional information of how @strong{MySQL} will
+resolve the query. Here is an explanation of the different text
+strings that can be found in this column:
+
+@table @code
+@item Distinct
+@strong{MySQL} will not continue searching for more rows for the current row
+combination after it has found the first matching row.
+
+@item Not exists
+@strong{MySQL} was able to do a @code{LEFT JOIN} optimization on the
+query and will not examine more rows in this table for the previous row
+combination after it finds one row that matches the @code{LEFT JOIN} criteria.
+
+Here is an example for this:
+
+@example
+SELECT * FROM t1 LEFT JOIN t2 ON t1.id=t2.id WHERE t2.id IS NULL;
+@end example
+
+Assume that @code{t2.id} is defined with @code{NOT NULL}. In this case
+@strong{MySQL} will scan @code{t1} and look up the rows in @code{t2}
+through @code{t1.id}. If @strong{MySQL} finds a matching row in
+@code{t2}, it knows that @code{t2.id} can never be @code{NULL}, and will
+not scan through the rest of the rows in @code{t2} that has the same
+@code{id}. In other words, for each row in @code{t1}, @strong{MySQL}
+only needs to do a single lookup in @code{t2}, independent of how many
+matching rows there are in @code{t2}.
+
+@item @code{range checked for each record (index map: #)}
+@strong{MySQL} didn't find a real good index to use. It will, instead, for
+each row combination in the preceding tables, do a check on which index to
+use (if any), and use this index to retrieve the rows from the table. This
+isn't very fast but is faster than having to do a join without
+an index.
+
+@item Using filesort
+@strong{MySQL} will need to do an extra pass to find out how to retrieve
+the rows in sorted order. The sort is done by going through all rows
+according to the @code{join type} and storing the sort key + pointer to
+the row for all rows that match the @code{WHERE}. Then the keys are
+sorted. Finally the rows are retrieved in sorted order.
+
+@item Using index
+The column information is retrieved from the table using only
+information in the index tree without having to do an additional seek to
+read the actual row. This can be done when all the used columns for
+the table are part of the same index.
+
+@item Using temporary
+To resolve the query @strong{MySQL} will need to create a
+temporary table to hold the result. This typically happens if you do an
+@code{ORDER BY} on a different column set than you did a @code{GROUP
+BY} on.
+
+@item Where used
+A @code{WHERE} clause will be used to restrict which rows will be
+matched against the next table or sent to the client. If you don't have
+this information and the table is of type @code{ALL} or @code{index},
+you may have something wrong in your query (if you don't intend to
+fetch/examine all rows from the table).
+@end table
+
+If you want to get your queries as fast as possible, you should look out for
+@code{Using filesort} and @code{Using temporary}.
+@end table
+
+The different join types are listed below, ordered from best to worst type:
+
+@cindex system table
+@cindex tables, system
+@table @code
+@item system
+The table has only one row (= system table). This is a special case of
+the @code{const} join type.
+
+@cindex constant table
+@cindex tables, constant
+@item const
+The table has at most one matching row, which will be read at the start
+of the query. Because there is only one row, values from the column in
+this row can be regarded as constants by the rest of the
+optimizer. @code{const} tables are very fast as they are read only once!
+
+@item eq_ref
+One row will be read from this table for each combination of rows from
+the previous tables. This is the best possible join type, other than the
+@code{const} types. It is used when all parts of an index are used by
+the join and the index is @code{UNIQUE} or a @code{PRIMARY KEY}.
+
+@item ref
+All rows with matching index values will be read from this table for each
+combination of rows from the previous tables. @code{ref} is used if the join
+uses only a leftmost prefix of the key, or if the key is not @code{UNIQUE}
+or a @code{PRIMARY KEY} (in other words, if the join cannot select a single
+row based on the key value). If the key that is used matches only a few rows,
+this join type is good.
+
+@item range
+Only rows that are in a given range will be retrieved, using an index to
+select the rows. The @code{key} column indicates which index is used.
+The @code{key_len} contains the longest key part that was used.
+The @code{ref} column will be NULL for this type.
+
+@item index
+This is the same as @code{ALL}, except that only the index tree is
+scanned. This is usually faster than @code{ALL}, as the index file is usually
+smaller than the data file.
+
+@item ALL
+A full table scan will be done for each combination of rows from the
+previous tables. This is normally not good if the table is the first
+table not marked @code{const}, and usually @strong{very} bad in all other
+cases. You normally can avoid @code{ALL} by adding more indexes, so that
+the row can be retrieved based on constant values or column values from
+earlier tables.
+@end table
+
+You can get a good indication of how good a join is by multiplying all values
+in the @code{rows} column of the @code{EXPLAIN} output. This should tell you
+roughly how many rows @strong{MySQL} must examine to execute the query. This
+number is also used when you restrict queries with the @code{max_join_size}
+variable.
+@xref{Server parameters}.
+
+The following example shows how a @code{JOIN} can be optimized progressively
+using the information provided by @code{EXPLAIN}.
+
+Suppose you have the @code{SELECT} statement shown below, that you examine
+using @code{EXPLAIN}:
+
+@example
+EXPLAIN SELECT tt.TicketNumber, tt.TimeIn,
+ tt.ProjectReference, tt.EstimatedShipDate,
+ tt.ActualShipDate, tt.ClientID,
+ tt.ServiceCodes, tt.RepetitiveID,
+ tt.CurrentProcess, tt.CurrentDPPerson,
+ tt.RecordVolume, tt.DPPrinted, et.COUNTRY,
+ et_1.COUNTRY, do.CUSTNAME
+ FROM tt, et, et AS et_1, do
+ WHERE tt.SubmitTime IS NULL
+ AND tt.ActualPC = et.EMPLOYID
+ AND tt.AssignedPC = et_1.EMPLOYID
+ AND tt.ClientID = do.CUSTNMBR;
+@end example
+
+For this example, assume that:
+
+@itemize @bullet
+@item
+The columns being compared have been declared as follows:
+
+@multitable @columnfractions .1 .2 .7
+@item @strong{Table} @tab @strong{Column} @tab @strong{Column type}
+@item @code{tt} @tab @code{ActualPC} @tab @code{CHAR(10)}
+@item @code{tt} @tab @code{AssignedPC} @tab @code{CHAR(10)}
+@item @code{tt} @tab @code{ClientID} @tab @code{CHAR(10)}
+@item @code{et} @tab @code{EMPLOYID} @tab @code{CHAR(15)}
+@item @code{do} @tab @code{CUSTNMBR} @tab @code{CHAR(15)}
+@end multitable
+
+@item
+The tables have the indexes shown below:
+
+@multitable @columnfractions .1 .9
+@item @strong{Table} @tab @strong{Index}
+@item @code{tt} @tab @code{ActualPC}
+@item @code{tt} @tab @code{AssignedPC}
+@item @code{tt} @tab @code{ClientID}
+@item @code{et} @tab @code{EMPLOYID} (primary key)
+@item @code{do} @tab @code{CUSTNMBR} (primary key)
+@end multitable
+
+@item
+The @code{tt.ActualPC} values aren't evenly distributed.
+@end itemize
+
+Initially, before any optimizations have been performed, the @code{EXPLAIN}
+statement produces the following information:
+
+@example
+table type possible_keys key key_len ref rows Extra
+et ALL PRIMARY NULL NULL NULL 74
+do ALL PRIMARY NULL NULL NULL 2135
+et_1 ALL PRIMARY NULL NULL NULL 74
+tt ALL AssignedPC,ClientID,ActualPC NULL NULL NULL 3872
+ range checked for each record (key map: 35)
+@end example
+
+Because @code{type} is @code{ALL} for each table, this output indicates that
+@strong{MySQL} is doing a full join for all tables! This will take quite a
+long time, as the product of the number of rows in each table must be
+examined! For the case at hand, this is @code{74 * 2135 * 74 * 3872 =
+45,268,558,720} rows. If the tables were bigger, you can only imagine how
+long it would take.
+
+One problem here is that @strong{MySQL} can't (yet) use indexes on columns
+efficiently if they are declared differently. In this context,
+@code{VARCHAR} and @code{CHAR} are the same unless they are declared as
+different lengths. Because @code{tt.ActualPC} is declared as @code{CHAR(10)}
+and @code{et.EMPLOYID} is declared as @code{CHAR(15)}, there is a length
+mismatch.
+
+To fix this disparity between column lengths, use @code{ALTER TABLE} to
+lengthen @code{ActualPC} from 10 characters to 15 characters:
+
+@example
+mysql> ALTER TABLE tt MODIFY ActualPC VARCHAR(15);
+@end example
+
+Now @code{tt.ActualPC} and @code{et.EMPLOYID} are both @code{VARCHAR(15)}.
+Executing the @code{EXPLAIN} statement again produces this result:
+
+@example
+table type possible_keys key key_len ref rows Extra
+tt ALL AssignedPC,ClientID,ActualPC NULL NULL NULL 3872 where used
+do ALL PRIMARY NULL NULL NULL 2135
+ range checked for each record (key map: 1)
+et_1 ALL PRIMARY NULL NULL NULL 74
+ range checked for each record (key map: 1)
+et eq_ref PRIMARY PRIMARY 15 tt.ActualPC 1
+@end example
+
+This is not perfect, but is much better (the product of the @code{rows}
+values is now less by a factor of 74). This version is executed in a couple
+of seconds.
+
+A second alteration can be made to eliminate the column length mismatches
+for the @code{tt.AssignedPC = et_1.EMPLOYID} and @code{tt.ClientID =
+do.CUSTNMBR} comparisons:
+
+@example
+mysql> ALTER TABLE tt MODIFY AssignedPC VARCHAR(15),
+ MODIFY ClientID VARCHAR(15);
+@end example
+
+Now @code{EXPLAIN} produces the output shown below:
+
+@example
+table type possible_keys key key_len ref rows Extra
+et ALL PRIMARY NULL NULL NULL 74
+tt ref AssignedPC,ClientID,ActualPC ActualPC 15 et.EMPLOYID 52 where used
+et_1 eq_ref PRIMARY PRIMARY 15 tt.AssignedPC 1
+do eq_ref PRIMARY PRIMARY 15 tt.ClientID 1
+@end example
+
+This is almost as good as it can get.
+
+The remaining problem is that, by default, @strong{MySQL} assumes that values
+in the @code{tt.ActualPC} column are evenly distributed, and that isn't the
+case for the @code{tt} table. Fortunately, it is easy to tell @strong{MySQL}
+about this:
+
+@example
+shell> myisamchk --analyze PATH_TO_MYSQL_DATABASE/tt
+shell> mysqladmin refresh
+@end example
+
+Now the join is perfect, and @code{EXPLAIN} produces this result:
+
+@example
+table type possible_keys key key_len ref rows Extra
+tt ALL AssignedPC,ClientID,ActualPC NULL NULL NULL 3872 where used
+et eq_ref PRIMARY PRIMARY 15 tt.ActualPC 1
+et_1 eq_ref PRIMARY PRIMARY 15 tt.AssignedPC 1
+do eq_ref PRIMARY PRIMARY 15 tt.ClientID 1
+@end example
+
+Note that the @code{rows} column in the output from @code{EXPLAIN} is an
+educated guess from the @strong{MySQL} join optimizer. To optimize a
+query, you should check if the numbers are even close to the truth. If not,
+you may get better performance by using @code{STRAIGHT_JOIN} in your
+@code{SELECT} statement and trying to list the tables in a different order in
+the @code{FROM} clause.
+
+
+@node Estimating performance, SELECT speed, EXPLAIN, Query Speed
+@subsection Estimating Query Performance
+
+@cindex estimating, query performance
+@cindex queries, estimating performance
+@cindex performance, estimating
+
+In most cases you can estimate the performance by counting disk seeks.
+For small tables, you can usually find the row in 1 disk seek (as the
+index is probably cached). For bigger tables, you can estimate that
+(using B++ tree indexes) you will need: @code{log(row_count) /
+log(index_block_length / 3 * 2 / (index_length + data_pointer_length)) +
+1} seeks to find a row.
+
+In @strong{MySQL} an index block is usually 1024 bytes and the data
+pointer is usually 4 bytes. A 500,000 row table with an
+index length of 3 (medium integer) gives you:
+@code{log(500,000)/log(1024/3*2/(3+4)) + 1} = 4 seeks.
+
+As the above index would require about 500,000 * 7 * 3/2 = 5.2M,
+(assuming that the index buffers are filled to 2/3, which is typical)
+you will probably have much of the index in memory and you will probably
+only need 1-2 calls to read data from the OS to find the row.
+
+For writes, however, you will need 4 seek requests (as above) to find
+where to place the new index and normally 2 seeks to update the index
+and write the row.
+
+Note that the above doesn't mean that your application will slowly
+degenerate by N log N! As long as everything is cached by the OS or SQL
+server things will only go marginally slower while the table gets
+bigger. After the data gets too big to be cached, things will start to
+go much slower until your applications is only bound by disk-seeks
+(which increase by N log N). To avoid this, increase the index cache as
+the data grows. @xref{Server parameters}.
+
+
+@node SELECT speed, Where optimizations, Estimating performance, Query Speed
+@subsection Speed of @code{SELECT} Queries
+
+@findex SELECT speed
+
+@cindex speed, of queries
+
+In general, when you want to make a slow @code{SELECT ... WHERE} faster, the
+first thing to check is whether or not you can add an index. @xref{MySQL
+indexes, , @strong{MySQL} indexes}. All references between different tables
+should usually be done with indexes. You can use the @code{EXPLAIN} command
+to determine which indexes are used for a @code{SELECT}.
+@xref{EXPLAIN, , @code{EXPLAIN}}.
+
+Some general tips:
+
+@itemize @bullet
+@item
+To help @strong{MySQL} optimize queries better, run @code{myisamchk
+--analyze} on a table after it has been loaded with relevant data. This
+updates a value for each index part that indicates the average number of
+rows that have the same value. (For unique indexes, this is always 1,
+of course.). @strong{MySQL} will use this to decide which index to
+choose when you connect two tables with 'a non-constant expression'.
+You can check the result from the @code{analyze} run by doing @code{SHOW
+INDEX FROM table_name} and examining the @code{Cardinality} column.
+
+@item
+To sort an index and data according to an index, use @code{myisamchk
+--sort-index --sort-records=1} (if you want to sort on index 1). If you
+have a unique index from which you want to read all records in order
+according to that index, this is a good way to make that faster. Note,
+however, that this sorting isn't written optimally and will take a long
+time for a large table!
+@end itemize
+
+
+@node Where optimizations, DISTINCT optimization, SELECT speed, Query Speed
+@subsection How MySQL Optimizes @code{WHERE} Clauses
+
+@findex WHERE
+
+@cindex optimizations
+
+The @code{WHERE} optimizations are put in the @code{SELECT} part here because
+they are mostly used with @code{SELECT}, but the same optimizations apply for
+@code{WHERE} in @code{DELETE} and @code{UPDATE} statements.
+
+Also note that this section is incomplete. @strong{MySQL} does many
+optimizations, and we have not had time to document them all.
+
+Some of the optimizations performed by @strong{MySQL} are listed below:
+
+@itemize @bullet
+@item
+Removal of unnecessary parentheses:
+@example
+ ((a AND b) AND c OR (((a AND b) AND (c AND d))))
+-> (a AND b AND c) OR (a AND b AND c AND d)
+@end example
+@item
+Constant folding:
+@example
+ (a<b AND b=c) AND a=5
+-> b>5 AND b=c AND a=5
+@end example
+@item
+Constant condition removal (needed because of constant folding):
+@example
+ (B>=5 AND B=5) OR (B=6 AND 5=5) OR (B=7 AND 5=6)
+-> B=5 OR B=6
+@end example
+@item
+Constant expressions used by indexes are evaluated only once.
+@item
+@code{COUNT(*)} on a single table without a @code{WHERE} is retrieved
+directly from the table information. This is also done for any @code{NOT NULL}
+expression when used with only one table.
+@item
+Early detection of invalid constant expressions. @strong{MySQL} quickly
+detects that some @code{SELECT} statements are impossible and returns no rows.
+@item
+@code{HAVING} is merged with @code{WHERE} if you don't use @code{GROUP BY}
+or group functions (@code{COUNT()}, @code{MIN()}...).
+@item
+For each sub-join, a simpler @code{WHERE} is constructed to get a fast
+@code{WHERE} evaluation for each sub-join and also to skip records as
+soon as possible.
+@cindex constant table
+@cindex tables, constant
+@item
+All constant tables are read first, before any other tables in the query.
+A constant table is:
+@itemize @minus
+@item
+An empty table or a table with 1 row.
+@item
+A table that is used with a @code{WHERE} clause on a @code{UNIQUE}
+index, or a @code{PRIMARY KEY}, where all index parts are used with constant
+expressions and the index parts are defined as @code{NOT NULL}.
+@end itemize
+All the following tables are used as constant tables:
+@example
+mysql> SELECT * FROM t WHERE primary_key=1;
+mysql> SELECT * FROM t1,t2
+ WHERE t1.primary_key=1 AND t2.primary_key=t1.id;
+@end example
+
+@item
+The best join combination to join the tables is found by trying all
+possibilities. If all columns in @code{ORDER BY} and in @code{GROUP
+BY} come from the same table, then this table is preferred first when
+joining.
+@item
+If there is an @code{ORDER BY} clause and a different @code{GROUP BY}
+clause, or if the @code{ORDER BY} or @code{GROUP BY} contains columns
+from tables other than the first table in the join queue, a temporary
+table is created.
+@item
+If you use @code{SQL_SMALL_RESULT}, @strong{MySQL} will use an in-memory
+temporary table.
+@item
+Each table index is queried, and the best index that spans fewer than 30% of
+the rows is used. If no such index can be found, a quick table scan is used.
+@item
+In some cases, @strong{MySQL} can read rows from the index without even
+consulting the data file. If all columns used from the index are numeric,
+then only the index tree is used to resolve the query.
+@item
+Before each record is output, those that do not match the @code{HAVING} clause
+are skipped.
+@end itemize
+
+Some examples of queries that are very fast:
+
+@example
+mysql> SELECT COUNT(*) FROM tbl_name;
+mysql> SELECT MIN(key_part1),MAX(key_part1) FROM tbl_name;
+mysql> SELECT MAX(key_part2) FROM tbl_name
+ WHERE key_part_1=constant;
+mysql> SELECT ... FROM tbl_name
+ ORDER BY key_part1,key_part2,... LIMIT 10;
+mysql> SELECT ... FROM tbl_name
+ ORDER BY key_part1 DESC,key_part2 DESC,... LIMIT 10;
+@end example
+
+The following queries are resolved using only the index tree (assuming
+the indexed columns are numeric):
+
+@example
+mysql> SELECT key_part1,key_part2 FROM tbl_name WHERE key_part1=val;
+mysql> SELECT COUNT(*) FROM tbl_name
+ WHERE key_part1=val1 AND key_part2=val2;
+mysql> SELECT key_part2 FROM tbl_name GROUP BY key_part1;
+@end example
+
+The following queries use indexing to retrieve the rows in sorted
+order without a separate sorting pass:
+
+@example
+mysql> SELECT ... FROM tbl_name ORDER BY key_part1,key_part2,... ;
+mysql> SELECT ... FROM tbl_name ORDER BY key_part1 DESC,key_part2 DESC,... ;
+@end example
+
+@node DISTINCT optimization, LEFT JOIN optimization, Where optimizations, Query Speed
+@subsection How MySQL Optimizes @code{DISTINCT}
+
+@findex DISTINCT
+
+@cindex optimizing, DISTINCT
+
+@code{DISTINCT} is converted to a @code{GROUP BY} on all columns,
+@code{DISTINCT} combined with @code{ORDER BY} will in many cases also
+need a temporary table.
+
+When combining @code{LIMIT #} with @code{DISTINCT}, @strong{MySQL} will stop
+as soon as it finds @code{#} unique rows.
+
+If you don't use columns from all used tables, @strong{MySQL} will stop
+the scanning of the not used tables as soon as it has found the first match.
+
+@example
+SELECT DISTINCT t1.a FROM t1,t2 where t1.a=t2.a;
+@end example
+
+In the case, assuming t1 is used before t2 (check with @code{EXPLAIN}), then
+@strong{MySQL} will stop reading from t2 (for that particular row in t1)
+when the first row in t2 is found.
+
+
+@node LEFT JOIN optimization, LIMIT optimization, DISTINCT optimization, Query Speed
+@subsection How MySQL Optimizes @code{LEFT JOIN} and @code{RIGHT JOIN}
+
+@findex LEFT JOIN
+
+@cindex optimizing, LEFT JOIN
+
+@code{A LEFT JOIN B} in @strong{MySQL} is implemented as follows:
+
+@itemize @bullet
+@item
+The table @code{B} is set to be dependent on table @code{A} and all tables
+that @code{A} is dependent on.
+@item
+The table @code{A} is set to be dependent on all tables (except @code{B})
+that are used in the @code{LEFT JOIN} condition.
+@item
+All @code{LEFT JOIN} conditions are moved to the @code{WHERE} clause.
+@item
+All standard join optimizations are done, with the exception that a table is
+always read after all tables it is dependent on. If there is a circular
+dependence then @strong{MySQL} will issue an error.
+@item
+All standard @code{WHERE} optimizations are done.
+@item
+If there is a row in @code{A} that matches the @code{WHERE} clause, but there
+wasn't any row in @code{B} that matched the @code{LEFT JOIN} condition,
+then an extra @code{B} row is generated with all columns set to @code{NULL}.
+@item
+If you use @code{LEFT JOIN} to find rows that don't exist in some
+table and you have the following test: @code{column_name IS NULL} in the
+@code{WHERE} part, where column_name is a column that is declared as
+@code{NOT NULL}, then @strong{MySQL} will stop searching after more rows
+(for a particular key combination) after it has found one row that
+matches the @code{LEFT JOIN} condition.
+@end itemize
+
+@code{RIGHT JOIN} is implemented analogously as @code{LEFT JOIN}.
+
+The table read order forced by @code{LEFT JOIN} and @code{STRAIGHT JOIN}
+will help the join optimizer (which calculates in which order tables
+should be joined) to do its work much more quickly, as there are fewer
+table permutations to check.
+
+Note that the above means that if you do a query of type:
+
+@example
+SELECT * FROM a,b LEFT JOIN c ON (c.key=a.key) LEFT JOIN d (d.key=a.key) WHERE b.key=d.key
+@end example
+
+@strong{MySQL} will do a full scan on @code{b} as the @code{LEFT
+JOIN} will force it to be read before @code{d}.
+
+The fix in this case is to change the query to:
+
+@example
+SELECT * FROM b,a LEFT JOIN c ON (c.key=a.key) LEFT JOIN d (d.key=a.key) WHERE b.key=d.key
+@end example
+
+
+@node LIMIT optimization, Insert speed, LEFT JOIN optimization, Query Speed
+@subsection How MySQL Optimizes @code{LIMIT}
+
+@findex LIMIT
+
+@cindex optimizing, LIMIT
+
+In some cases @strong{MySQL} will handle the query differently when you are
+using @code{LIMIT #} and not using @code{HAVING}:
+
+@itemize @bullet
+@item
+If you are selecting only a few rows with @code{LIMIT}, @strong{MySQL}
+will use indexes in some cases when it normally would prefer to do a
+full table scan.
+@item
+If you use @code{LIMIT #} with @code{ORDER BY}, @strong{MySQL} will end the
+sorting as soon as it has found the first @code{#} lines instead of sorting
+the whole table.
+@item
+When combining @code{LIMIT #} with @code{DISTINCT}, @strong{MySQL} will stop
+as soon as it finds @code{#} unique rows.
+@item
+In some cases a @code{GROUP BY} can be resolved by reading the key in order
+(or do a sort on the key) and then calculate summaries until the
+key value changes. In this case @code{LIMIT #} will not calculate any
+unnecessary @code{GROUP BY}'s.
+@item
+As soon as @strong{MySQL} has sent the first @code{#} rows to the client, it
+will abort the query.
+@item
+@code{LIMIT 0} will always quickly return an empty set. This is useful
+to check the query and to get the column types of the result columns.
+@item
+The size of temporary tables uses the @code{LIMIT #} to calculate how much
+space is needed to resolve the query.
+@end itemize
+
+@node Insert speed, Update speed, LIMIT optimization, Query Speed
+@subsection Speed of @code{INSERT} Queries
+
+@findex INSERT
+
+@cindex speed, inserting
+@cindex inserting, speed of
+
+The time to insert a record consists approximately of:
+
+@itemize @bullet
+@item
+Connect: (3)
+@item
+Sending query to server: (2)
+@item
+Parsing query: (2)
+@item
+Inserting record: (1 x size of record)
+@item
+Inserting indexes: (1 x number of indexes)
+@item
+Close: (1)
+@end itemize
+
+where the numbers are somewhat proportional to the overall time. This
+does not take into consideration the initial overhead to open tables
+(which is done once for each concurrently running query).
+
+The size of the table slows down the insertion of indexes by N log N
+(B-trees).
+
+Some ways to speed up inserts:
+
+@itemize @bullet
+@item
+If you are inserting many rows from the same client at the same time, use
+multiple value lists @code{INSERT} statements. This is much faster (many
+times in some cases) than using separate @code{INSERT} statements.
+@item
+If you are inserting a lot of rows from different clients, you can get
+higher speed by using the @code{INSERT DELAYED} statement. @xref{INSERT,
+, @code{INSERT}}.
+@item
+Note that with @code{MyISAM} you can insert rows at the same time
+@code{SELECT}s are running if there are no deleted rows in the tables.
+@item
+When loading a table from a text file, use @code{LOAD DATA INFILE}. This
+is usually 20 times faster than using a lot of @code{INSERT} statements.
+@xref{LOAD DATA, , @code{LOAD DATA}}.
+@item
+It is possible with some extra work to make @code{LOAD DATA INFILE} run even
+faster when the table has many indexes. Use the following procedure:
+
+@enumerate
+@item
+Optionally create the table with @code{CREATE TABLE}. For example, using
+@code{mysql} or Perl-DBI.
+
+@item
+Execute a @code{FLUSH TABLES} statement or the shell command @code{mysqladmin
+flush-tables}.
+
+@item
+Use @code{myisamchk --keys-used=0 -rq /path/to/db/tbl_name}. This will
+remove all usage of all indexes from the table.
+
+@item
+Insert data into the table with @code{LOAD DATA INFILE}. This will not
+update any indexes and will therefore be very fast.
+
+@item
+If you are going to only read the table in the future, run @code{myisampack}
+on it to make it smaller. @xref{Compressed format}.
+
+@item
+Re-create the indexes with @code{myisamchk -r -q
+/path/to/db/tbl_name}. This will create the index tree in memory before
+writing it to disk, which is much faster because it avoids lots of disk
+seeks. The resulting index tree is also perfectly balanced.
+
+@item
+Execute a @code{FLUSH TABLES} statement or the shell command @code{mysqladmin
+flush-tables}.
+@end enumerate
+
+This procedure will be built into @code{LOAD DATA INFILE} in some future
+version of @strong{MySQL}.
+@item
+You can speed up insertions by locking your tables:
+
+@example
+mysql> LOCK TABLES a WRITE;
+mysql> INSERT INTO a VALUES (1,23),(2,34),(4,33);
+mysql> INSERT INTO a VALUES (8,26),(6,29);
+mysql> UNLOCK TABLES;
+@end example
+
+The main speed difference is that the index buffer is flushed to disk only
+once, after all @code{INSERT} statements have completed. Normally there would
+be as many index buffer flushes as there are different @code{INSERT}
+statements. Locking is not needed if you can insert all rows with a single
+statement.
+
+Locking will also lower the total time of multi-connection tests, but the
+maximum wait time for some threads will go up (because they wait for
+locks). For example:
+
+@example
+thread 1 does 1000 inserts
+thread 2, 3, and 4 does 1 insert
+thread 5 does 1000 inserts
+@end example
+
+If you don't use locking, 2, 3, and 4 will finish before 1 and 5. If you
+use locking, 2, 3, and 4 probably will not finish before 1 or 5, but the
+total time should be about 40% faster.
+
+As @code{INSERT}, @code{UPDATE}, and @code{DELETE} operations are very
+fast in @strong{MySQL}, you will obtain better overall performance by
+adding locks around everything that does more than about 5 inserts or
+updates in a row. If you do very many inserts in a row, you could do a
+@code{LOCK TABLES} followed by an @code{UNLOCK TABLES} once in a while
+(about each 1000 rows) to allow other threads access to the table. This
+would still result in a nice performance gain.
+
+Of course, @code{LOAD DATA INFILE} is much faster for loading data.
+@end itemize
+
+To get some more speed for both @code{LOAD DATA INFILE} and
+@code{INSERT}, enlarge the key buffer. @xref{Server parameters}.
+
+
+@node Update speed, Delete speed, Insert speed, Query Speed
+@subsection Speed of @code{UPDATE} Queries
+
+Update queries are optimized as a @code{SELECT} query with the additional
+overhead of a write. The speed of the write is dependent on the size of
+the data that is being updated and the number of indexes that are
+updated. Indexes that are not changed will not be updated.
+
+Also, another way to get fast updates is to delay updates and then do
+many updates in a row later. Doing many updates in a row is much quicker
+than doing one at a time if you lock the table.
+
+Note that, with dynamic record format, updating a record to
+a longer total length may split the record. So if you do this often,
+it is very important to @code{OPTIMIZE TABLE} sometimes.
+@xref{OPTIMIZE TABLE, , @code{OPTIMIZE TABLE}}.
+
+
+@node Delete speed, Tips, Update speed, Query Speed
+@subsection Speed of @code{DELETE} Queries
+
+If you want to delete all rows in the table, you should use
+@code{TRUNCATE TABLE table_name}. @xref{TRUNCATE}.
+
+The time to delete a record is exactly proportional to the number of
+indexes. To delete records more quickly, you can increase the size of
+the index cache. @xref{Server parameters}.
+
+
+@node Tips, , Delete speed, Query Speed
+@subsection Other Optimization Tips
+
+@cindex optimization, tips
+@cindex tips, optimization
+
+Unsorted tips for faster systems:
+
+@itemize @bullet
+@item
+Use persistent connections to the database to avoid the connection
+overhead. If you can't use persistent connections and you are doing a
+lot of new connections to the database, you may want to change the value
+of the @code{thread_cache_size} variable. @xref{Server parameters}.
+@item
+Always check that all your queries really use the indexes you have created
+in the tables. In @strong{MySQL} you can do this with the @code{EXPLAIN}
+command. @xref{EXPLAIN, Explain, Explain, manual}.
+@item
+Try to avoid complex @code{SELECT} queries on tables that are updated a
+lot. This is to avoid problems with table locking.
+@item
+The new @code{MyISAM} tables can insert rows in a table without deleted
+rows at the same time another table is reading from it. If this is important
+for you, you should consider methods where you don't have to delete rows
+or run @code{OPTIMIZE TABLE} after you have deleted a lot of rows.
+@item
+Use @code{ALTER TABLE ... ORDER BY expr1,expr2...} if you mostly
+retrieve rows in expr1,expr2.. order. By using this option after big
+changes to the table, you may be able to get higher performance.
+@item
+In some cases it may make sense to introduce a column that is 'hashed'
+based on information from other columns. If this column is short and
+reasonably unique it may be much faster than a big index on many
+columns. In @strong{MySQL} it's very easy to use this extra column:
+@code{SELECT * FROM table_name WHERE hash=MD5(concat(col1,col2))
+AND col_1='constant' AND col_2='constant'}
+@item
+For tables that change a lot you should try to avoid all @code{VARCHAR}
+or @code{BLOB} columns. You will get dynamic row length as soon as you
+are using a single @code{VARCHAR} or @code{BLOB} column. @xref{Table
+types}.
+@item
+It's not normally useful to split a table into different tables just
+because the rows gets 'big'. To access a row, the biggest performance
+hit is the disk seek to find the first byte of the row. After finding
+the data most new disks can read the whole row fast enough for most
+applications. The only cases where it really matters to split up a table is if
+it's a dynamic row size table (see above) that you can change to a fixed
+row size, or if you very often need to scan the table and don't need
+most of the columns. @xref{Table types}.
+@item
+If you very often need to calculate things based on information from a
+lot of rows (like counts of things), it's probably much better to
+introduce a new table and update the counter in real time. An update of
+type @code{UPDATE table set count=count+1 where index_column=constant}
+is very fast!
+
+This is really important when you use databases like @strong{MySQL} that
+only have table locking (multiple readers / single writers). This will
+also give better performance with most databases, as the row locking
+manager in this case will have less to do.
+@item
+If you need to collect statistics from big log tables, use summary tables
+instead of scanning the whole table. Maintaining the summaries should be
+much faster than trying to do statistics 'live'. It's much faster to
+regenerate new summary tables from the logs when things change
+(depending on business decisions) than to have to change the running
+application!
+@item
+If possible, one should classify reports as 'live' or 'statistical',
+where data needed for statistical reports are only generated based on
+summary tables that are generated from the actual data.
+@item
+Take advantage of the fact that columns have default values. Insert
+values explicitly only when the value to be inserted differs from the
+default. This reduces the parsing that @strong{MySQL} need to do and
+improves the insert speed.
+@item
+In some cases it's convenient to pack and store data into a blob. In this
+case you have to add some extra code in your application to pack/unpack
+things in the blob, but this may save a lot of accesses at some stage.
+This is practical when you have data that doesn't conform to a static
+table structure.
+@item
+Normally you should try to keep all data non-redundant (what
+is called 3rd normal form in database theory), but you should not be
+afraid of duplicating things or creating summary tables if you need these
+to gain more speed.
+@item
+Stored procedures or UDF (user-defined functions) may be a good way to
+get more performance. In this case you should, however, always have a way
+to do this some other (slower) way if you use some database that doesn't
+support this.
+@item
+You can always gain something by caching queries/answers in your
+application and trying to do many inserts/updates at the same time. If
+your database supports lock tables (like @strong{MySQL} and Oracle),
+this should help to ensure that the index cache is only flushed once
+after all updates.
+@item
+Use @code{INSERT /*! DELAYED */} when you do not need to know when your
+data is written. This speeds things up because many records can be written
+with a single disk write.
+@item
+Use @code{INSERT /*! LOW_PRIORITY */} when you want your selects to be
+more important.
+@item
+Use @code{SELECT /*! HIGH_PRIORITY */} to get selects that jump the
+queue. That is, the select is done even if there is somebody waiting to
+do a write.
+@item
+Use the multi-line @code{INSERT} statement to store many rows with one
+SQL command (many SQL servers supports this).
+@item
+Use @code{LOAD DATA INFILE} to load bigger amounts of data. This is
+faster than normal inserts and will be even faster when @code{myisamchk}
+is integrated in @code{mysqld}.
+@item
+Use @code{AUTO_INCREMENT} columns to make unique values.
+@item
+Use @code{OPTIMIZE TABLE} once in a while to avoid fragmentation when
+using dynamic table format. @xref{OPTIMIZE TABLE, , @code{OPTIMIZE TABLE}}.
+
+@item
+Use @code{HEAP} tables to get more speed when possible. @xref{Table
+types}.
+@item
+When using a normal Web server setup, images should be stored as
+files. That is, store only a file reference in the database. The main
+reason for this is that a normal Web server is much better at caching
+files than database contents. So it it's much easier to get a fast
+system if you are using files.
+@item
+Use in memory tables for non-critical data that are accessed often (like
+information about the last shown banner for users that don't have
+cookies).
+@item
+Columns with identical information in different tables should be
+declared identical and have identical names. Before Version 3.23 you
+got slow joins otherwise.
+
+Try to keep the names simple (use @code{name} instead of
+@code{customer_name} in the customer table). To make your names portable
+to other SQL servers you should keep them shorter than 18 characters.
+@item
+If you need REALLY high speed, you should take a look at the low-level
+interfaces for data storage that the different SQL servers support! For
+example, by accessing the @strong{MySQL} @code{MyISAM} directly, you could
+get a speed increase of 2-5 times compared to using the SQL interface.
+To be able to do this the data must be on the same server as
+the application, and usually it should only be accessed by one process
+(because external file locking is really slow). One could eliminate the
+above problems by introducing low-level @code{MyISAM} commands in the
+@strong{MySQL} server (this could be one easy way to get more
+performance if needed). By carefully designing the database interface,
+it should be quite easy to support this types of optimization.
+@item
+In many cases it's faster to access data from a database (using a live
+connection) than accessing a text file, just because the database is
+likely to be more compact than the text file (if you are using numerical
+data), and this will involve fewer disk accesses. You will also save
+code because you don't have to parse your text files to find line and
+column boundaries.
+@item
+You can also use replication to speed things up. @xref{Replication}.
+@item
+Declaring a table with @code{DELAY_KEY_WRITE=1} will make the updating of
+indexes faster, as these are not logged to disk until the file is closed.
+The downside is that you should run @code{myisamchk} on these tables before
+you start @code{mysqld} to ensure that they are okay if something killed
+@code{mysqld} in the middle. As the key information can always be generated
+from the data, you should not lose anything by using @code{DELAY_KEY_WRITE}.
+@end itemize
+
+
+@node Locking Issues, Optimizing Database Structure, Query Speed, MySQL Optimization
+@section Locking Issues
+
+
+@menu
+* Internal locking::
+* Table locking::
+@end menu
+
+@node Internal locking, Table locking, Locking Issues, Locking Issues
+@subsection How MySQL Locks Tables
+
+@cindex internal locking
+@cindex locking, tables
+@cindex tables, locking
+
+You can find a discussion about different locking methods in the appendix.
+@xref{Locking methods}.
+
+All locking in @strong{MySQL} is deadlock-free. This is managed by always
+requesting all needed locks at once at the beginning of a query and always
+locking the tables in the same order.
+
+The locking method @strong{MySQL} uses for @code{WRITE} locks works as follows:
+
+@itemize @bullet
+@item
+If there are no locks on the table, put a write lock on it.
+@item
+Otherwise, put the lock request in the write lock queue.
+@end itemize
+
+The locking method @strong{MySQL} uses for @code{READ} locks works as follows:
+
+@itemize @bullet
+@item
+If there are no write locks on the table, put a read lock on it.
+@item
+Otherwise, put the lock request in the read lock queue.
+@end itemize
+
+When a lock is released, the lock is made available to the threads
+in the write lock queue, then to the threads in the read lock queue.
+
+This means that if you have many updates on a table, @code{SELECT}
+statements will wait until there are no more updates.
+
+To work around this for the case where you want to do many @code{INSERT} and
+@code{SELECT} operations on a table, you can insert rows in a temporary
+table and update the real table with the records from the temporary table
+once in a while.
+
+This can be done with the following code:
+@example
+mysql> LOCK TABLES real_table WRITE, insert_table WRITE;
+mysql> insert into real_table select * from insert_table;
+mysql> TRUNCATE TABLE insert_table;
+mysql> UNLOCK TABLES;
+@end example
+
+You can use the @code{LOW_PRIORITY} options with @code{INSERT},
+@code{UPDATE} or @code{DELETE} or @code{HIGH_PRIORITY} with
+@code{SELECT} if you want to prioritize retrieval in some specific
+cases. You can also start @code{mysqld} with @code{--low-priority-updates}
+to get the same behaveour.
+
+Using @code{SQL_BUFFER_RESULT} can also help making table locks shorter.
+@xref{SELECT}.
+
+You could also change the locking code in @file{mysys/thr_lock.c} to use a
+single queue. In this case, write locks and read locks would have the same
+priority, which might help some applications.
+
+
+@node Table locking, , Internal locking, Locking Issues
+@subsection Table Locking Issues
+
+@cindex problems, table locking
+
+The table locking code in @strong{MySQL} is deadlock free.
+
+@strong{MySQL} uses table locking (instead of row locking or column
+locking) on all table types, except @code{BDB} tables, to achieve a very
+high lock speed. For large tables, table locking is MUCH better than
+row locking for most applications, but there are, of course, some
+pitfalls.
+
+For @code{BDB} and @code{InnoDB} tables, @strong{MySQL} only uses table
+locking if you explicitely lock the table with @code{LOCK TABLES} or
+execute a command that will modify every row in the table, like
+@code{ALTER TABLE}. For these table types we recommend you to not use
+@code{LOCK TABLES} at all.
+
+In @strong{MySQL} Version 3.23.7 and above, you can insert rows into
+@code{MyISAM} tables at the same time other threads are reading from the
+table. Note that currently this only works if there are no holes after
+deleted rows in the table at the time the insert is made. When all holes
+has been filled with new data, concurrent inserts will automatically be
+enabled again.
+
+Table locking enables many threads to read from a table at the same
+time, but if a thread wants to write to a table, it must first get
+exclusive access. During the update, all other threads that want to
+access this particular table will wait until the update is ready.
+
+As updates on tables normally are considered to be more important than
+@code{SELECT}, all statements that update a table have higher priority
+than statements that retrieve information from a table. This should
+ensure that updates are not 'starved' because one issues a lot of heavy
+queries against a specific table. (You can change this by using
+LOW_PRIORITY with the statement that does the update or
+@code{HIGH_PRIORITY} with the @code{SELECT} statement.)
+
+Starting from @strong{MySQL} Version 3.23.7 one can use the
+@code{max_write_lock_count} variable to force @strong{MySQL} to
+temporary give all @code{SELECT} statements, that wait for a table, a
+higher priority after a specific number of inserts on a table.
+
+Table locking is, however, not very good under the following senario:
+
+@itemize @bullet
+@item
+A client issues a @code{SELECT} that takes a long time to run.
+@item
+Another client then issues an @code{UPDATE} on a used table. This client
+will wait until the @code{SELECT} is finished.
+@item
+Another client issues another @code{SELECT} statement on the same table. As
+@code{UPDATE} has higher priority than @code{SELECT}, this @code{SELECT}
+will wait for the @code{UPDATE} to finish. It will also wait for the first
+@code{SELECT} to finish!
+@item
+A thread is waiting for something like @code{full disk}, in which case all
+threads that wants to access the problem table will also be put in a waiting
+state until more disk space is made available.
+@end itemize
+
+Some possible solutions to this problem are:
+
+@itemize @bullet
+@item
+Try to get the @code{SELECT} statements to run faster. You may have to create
+some summary tables to do this.
+
+@item
+Start @code{mysqld} with @code{--low-priority-updates}. This will give
+all statements that update (modify) a table lower priority than a @code{SELECT}
+statement. In this case the last @code{SELECT} statement in the previous
+scenario would execute before the @code{INSERT} statement.
+
+@item
+You can give a specific @code{INSERT}, @code{UPDATE}, or @code{DELETE}
+statement lower priority with the @code{LOW_PRIORITY} attribute.
+
+@item
+Start @code{mysqld} with a low value for @strong{max_write_lock_count} to give
+@code{READ} locks after a certain number of @code{WRITE} locks.
+
+@item
+You can specify that all updates from a specific thread should be done with
+low priority by using the SQL command: @code{SET SQL_LOW_PRIORITY_UPDATES=1}.
+@xref{SET OPTION, , @code{SET OPTION}}.
+
+@item
+You can specify that a specific @code{SELECT} is very important with the
+@code{HIGH_PRIORITY} attribute. @xref{SELECT, , @code{SELECT}}.
+
+@item
+If you have problems with @code{INSERT} combined with @code{SELECT},
+switch to use the new @code{MyISAM} tables as these support concurrent
+@code{SELECT}s and @code{INSERT}s.
+
+@item
+If you mainly mix @code{INSERT} and @code{SELECT} statements, the
+@code{DELAYED} attribute to @code{INSERT} will probably solve your problems.
+@xref{INSERT, , @code{INSERT}}.
+
+@item
+If you have problems with @code{SELECT} and @code{DELETE}, the @code{LIMIT}
+option to @code{DELETE} may help. @xref{DELETE, , @code{DELETE}}.
+@end itemize
+
+
+@node Optimizing Database Structure, Optimizing the Server, Locking Issues, MySQL Optimization
+@section Optimizing Database Structure
+
+
+@menu
+* Design::
+* Data size::
+* MySQL indexes::
+* Indexes::
+* Multiple-column indexes::
+* Table cache::
+* Creating many tables::
+* Open tables::
+@end menu
+
+@node Design, Data size, Optimizing Database Structure, Optimizing Database Structure
+@subsection Design Choices
+
+@cindex design, choices
+@cindex database design
+@cindex storage of data
+
+@strong{MySQL} keeps row data and index data in separate files. Many (almost
+all) other databases mix row and index data in the same file. We believe that
+the @strong{MySQL} choice is better for a very wide range of modern systems.
+
+Another way to store the row data is to keep the information for each
+column in a separate area (examples are SDBM and Focus). This will cause a
+performance hit for every query that accesses more than one column. Because
+this degenerates so quickly when more than one column is accessed,
+we believe that this model is not good for general purpose databases.
+
+The more common case is that the index and data are stored together
+(like in Oracle/Sybase et al). In this case you will find the row
+information at the leaf page of the index. The good thing with this
+layout is that it, in many cases, depending on how well the index is
+cached, saves a disk read. The bad things with this layout are:
+
+@itemize @bullet
+@item
+Table scanning is much slower because you have to read through the indexes
+to get at the data.
+@item
+You can't use only the index table to retrieve data for a query.
+@item
+You lose a lot of space, as you must duplicate indexes from the nodes
+(as you can't store the row in the nodes).
+@item
+Deletes will degenerate the table over time (as indexes in nodes are
+usually not updated on delete).
+@item
+It's harder to cache ONLY the index data.
+@end itemize
+
+
+@node Data size, MySQL indexes, Design, Optimizing Database Structure
+@subsection Get Your Data as Small as Possible
+
+@cindex data, size
+@cindex reducing, data size
+@cindex storage space, minimizing
+@cindex tables, improving performance
+@cindex performance, improving
+
+One of the most basic optimization is to get your data (and indexes) to
+take as little space on the disk (and in memory) as possible. This can
+give huge improvements because disk reads are faster and normally less
+main memory will be used. Indexing also takes less resources if
+done on smaller columns.
+
+@strong{MySQL} supports a lot of different table types and row formats.
+Choosing the right table format may give you a big performance gain.
+@xref{Table types}.
+
+You can get better performance on a table and minimize storage space
+using the techniques listed below:
+
+@itemize @bullet
+@item
+Use the most efficient (smallest) types possible. @strong{MySQL} has
+many specialized types that save disk space and memory.
+
+@item
+Use the smaller integer types if possible to get smaller tables. For
+example, @code{MEDIUMINT} is often better than @code{INT}.
+
+@item
+Declare columns to be @code{NOT NULL} if possible. It makes everything
+faster and you save one bit per column. Note that if you really need
+@code{NULL} in your application you should definitely use it. Just avoid
+having it on all columns by default.
+
+@item
+If you don't have any variable-length columns (@code{VARCHAR},
+@code{TEXT}, or @code{BLOB} columns), a fixed-size record format is
+used. This is faster but unfortunately may waste some space.
+@xref{MyISAM table formats}.
+
+@item
+The primary index of a table should be as short as possible. This makes
+identification of one row easy and efficient.
+
+@item
+For each table, you have to decide which storage/index method to
+use. @xref{Table types}.
+
+@item
+Only create the indexes that you really need. Indexes are good for
+retrieval but bad when you need to store things fast. If you mostly
+access a table by searching on a combination of columns, make an index
+on them. The first index part should be the most used column. If you are
+ALWAYS using many columns, you should use the column with more duplicates
+first to get better compression of the index.
+
+@item
+If it's very likely that a column has a unique prefix on the first number
+of characters, it's better to only index this prefix. @strong{MySQL}
+supports an index on a part of a character column. Shorter indexes are
+faster not only because they take less disk space but also because they
+will give you more hits in the index cache and thus fewer disk
+seeks. @xref{Server parameters}.
+
+@item
+In some circumstances it can be beneficial to split into two a table that is
+scanned very often. This is especially true if it is a dynamic
+format table and it is possible to use a smaller static format table that
+can be used to find the relevant rows when scanning the table.
+@end itemize
+
+
+@node MySQL indexes, Indexes, Data size, Optimizing Database Structure
+@subsection How MySQL Uses Indexes
+
+@cindex indexes, use of
+
+Indexes are used to find rows with a specific value of one column
+fast. Without an index @strong{MySQL} has to start with the first record
+and then read through the whole table until it finds the relevant
+rows. The bigger the table, the more this costs. If the table has an index
+for the columns in question, @strong{MySQL} can quickly get a position to
+seek to in the middle of the data file without having to look at all the
+data. If a table has 1000 rows, this is at least 100 times faster than
+reading sequentially. Note that if you need to access almost all 1000
+rows it is faster to read sequentially because we then avoid disk seeks.
+
+All @strong{MySQL} indexes (@code{PRIMARY}, @code{UNIQUE}, and
+@code{INDEX}) are stored in B-trees. Strings are automatically prefix-
+and end-space compressed. @xref{CREATE INDEX, , @code{CREATE INDEX}}.
+
+Indexes are used to:
+@itemize @bullet
+@item
+Quickly find the rows that match a @code{WHERE} clause.
+
+@item
+Retrieve rows from other tables when performing joins.
+
+@item
+Find the @code{MAX()} or @code{MIN()} value for a specific indexed
+column. This is optimized by a preprocessor that checks if you are
+using @code{WHERE} key_part_# = constant on all key parts < N. In this case
+@strong{MySQL} will do a single key lookup and replace the @code{MIN()}
+expression with a constant. If all expressions are replaced with
+constants, the query will return at once:
+
+@example
+SELECT MIN(key_part2),MAX(key_part2) FROM table_name where key_part1=10
+@end example
+
+@item
+Sort or group a table if the sorting or grouping is done on a leftmost
+prefix of a usable key (for example, @code{ORDER BY key_part_1,key_part_2 }). The
+key is read in reverse order if all key parts are followed by @code{DESC}.
+
+The index can also be used even if the @code{ORDER BY} doesn't match the index
+exactly, as long as all the unused index parts and all the extra
+are @code{ORDER BY} columns are constants in the @code{WHERE} clause. The
+following queries will use the index to resolve the @code{ORDER BY} part:
+
+@example
+SELECT * FROM foo ORDER BY key_part1,key_part2,key_part3;
+SELECT * FROM foo WHERE column=constant ORDER BY column, key_part1;
+SELECT * FROM foo WHERE key_part1=const GROUP BY key_part2;
+@end example
+
+@item
+In some cases a query can be optimized to retrieve values without
+consulting the data file. If all used columns for some table are numeric
+and form a leftmost prefix for some key, the values may be retrieved
+from the index tree for greater speed:
+
+@example
+SELECT key_part3 FROM table_name WHERE key_part1=1
+@end example
+
+@end itemize
+
+Suppose you issue the following @code{SELECT} statement:
+
+@example
+mysql> SELECT * FROM tbl_name WHERE col1=val1 AND col2=val2;
+@end example
+
+If a multiple-column index exists on @code{col1} and @code{col2}, the
+appropriate rows can be fetched directly. If separate single-column
+indexes exist on @code{col1} and @code{col2}, the optimizer tries to
+find the most restrictive index by deciding which index will find fewer
+rows and using that index to fetch the rows.
+
+@cindex indexes, leftmost prefix of
+@cindex leftmost prefix of indexes
+If the table has a multiple-column index, any leftmost prefix of the
+index can be used by the optimizer to find rows. For example, if you
+have a three-column index on @code{(col1,col2,col3)}, you have indexed
+search capabilities on @code{(col1)}, @code{(col1,col2)}, and
+@code{(col1,col2,col3)}.
+
+@strong{MySQL} can't use a partial index if the columns don't form a
+leftmost prefix of the index. Suppose you have the @code{SELECT}
+statements shown below:
+
+@example
+mysql> SELECT * FROM tbl_name WHERE col1=val1;
+mysql> SELECT * FROM tbl_name WHERE col2=val2;
+mysql> SELECT * FROM tbl_name WHERE col2=val2 AND col3=val3;
+@end example
+
+If an index exists on @code{(col1,col2,col3)}, only the first query
+shown above uses the index. The second and third queries do involve
+indexed columns, but @code{(col2)} and @code{(col2,col3)} are not
+leftmost prefixes of @code{(col1,col2,col3)}.
+
+@findex LIKE, and indexes
+@findex LIKE, and wildcards
+@cindex indexes, and @code{LIKE}
+@cindex wildcards, and @code{LIKE}
+@strong{MySQL} also uses indexes for @code{LIKE} comparisons if the argument
+to @code{LIKE} is a constant string that doesn't start with a wild-card
+character. For example, the following @code{SELECT} statements use indexes:
+
+@example
+mysql> select * from tbl_name where key_col LIKE "Patrick%";
+mysql> select * from tbl_name where key_col LIKE "Pat%_ck%";
+@end example
+
+In the first statement, only rows with @code{"Patrick" <= key_col <
+"Patricl"} are considered. In the second statement, only rows with
+@code{"Pat" <= key_col < "Pau"} are considered.
+
+The following @code{SELECT} statements will not use indexes:
+@example
+mysql> select * from tbl_name where key_col LIKE "%Patrick%";
+mysql> select * from tbl_name where key_col LIKE other_col;
+@end example
+
+In the first statement, the @code{LIKE} value begins with a wild-card
+character. In the second statement, the @code{LIKE} value is not a
+constant.
+
+@findex IS NULL, and indexes
+@cindex indexes, and @code{IS NULL}
+Searching using @code{column_name IS NULL} will use indexes if column_name
+is an index.
+
+@strong{MySQL} normally uses the index that finds the least number of rows. An
+index is used for columns that you compare with the following operators:
+@code{=}, @code{>}, @code{>=}, @code{<}, @code{<=}, @code{BETWEEN}, and a
+@code{LIKE} with a non-wild-card prefix like @code{'something%'}.
+
+Any index that doesn't span all @code{AND} levels in the @code{WHERE} clause
+is not used to optimize the query. In other words: To be able to use an
+index, a prefix of the index must be used in every @code{AND} group.
+
+The following @code{WHERE} clauses use indexes:
+@example
+... WHERE index_part1=1 AND index_part2=2 AND other_column=3
+... WHERE index=1 OR A=10 AND index=2 /* index = 1 OR index = 2 */
+... WHERE index_part1='hello' AND index_part_3=5
+ /* optimized like "index_part1='hello'" */
+... WHERE index1=1 and index2=2 or index1=3 and index3=3;
+ /* Can use index on index1 but not on index2 or index 3 */
+@end example
+
+These @code{WHERE} clauses do @strong{NOT} use indexes:
+@example
+... WHERE index_part2=1 AND index_part3=2 /* index_part_1 is not used */
+... WHERE index=1 OR A=10 /* Index is not used in both AND parts */
+... WHERE index_part1=1 OR index_part2=10 /* No index spans all rows */
+@end example
+
+Note that in some cases @strong{MySQL} will not use an index, even if one
+would be available. Some of the cases where this happens are:
+
+@itemize @bullet
+@item
+If the use of the index would require @strong{MySQL} to access more
+than 30 % of the rows in the table. (In this case a table scan is
+probably much faster, as this will require us to do much fewer seeks).
+Note that if such a query uses @code{LIMIT} to only retrieve
+part of the rows, @strong{MySQL} will use an index anyway, as it can
+much more quickly find the few rows to return in the result.
+@end itemize
+
+
+@menu
+* Indexes::
+* Multiple-column indexes::
+* Table cache::
+* Creating many tables::
+* Open tables::
+@end menu
+
+@node Indexes, Multiple-column indexes, MySQL indexes, Optimizing Database Structure
+@subsection Column Indexes
+
+@cindex indexes, columns
+@cindex columns, indexes
+@cindex keys
+
+All @strong{MySQL} column types can be indexed. Use of indexes on the
+relevant columns is the best way to improve the performance of @code{SELECT}
+operations.
+
+The maximum number of keys and the maximum index length is defined per
+table handler. @xref{Table types}. You can with all table handlers have
+at least 16 keys and a total index length of at least 256 bytes.
+
+For @code{CHAR} and @code{VARCHAR} columns, you can index a prefix of a
+column. This is much faster and requires less disk space than indexing the
+whole column. The syntax to use in the @code{CREATE TABLE} statement to
+index a column prefix looks like this:
+
+@example
+KEY index_name (col_name(length))
+@end example
+
+The example below creates an index for the first 10 characters of the
+@code{name} column:
+
+@example
+mysql> CREATE TABLE test (
+ name CHAR(200) NOT NULL,
+ KEY index_name (name(10)));
+@end example
+
+For @code{BLOB} and @code{TEXT} columns, you must index a prefix of the
+column. You cannot index the entire column.
+
+In @strong{MySQL} Version 3.23.23 or later, you can also create special
+@strong{FULLTEXT} indexes. They are used for full-text search. Only the
+@code{MyISAM} table type supports @code{FULLTEXT} indexes. They can be
+created only from @code{VARCHAR} and @code{TEXT} columns.
+Indexing always happens over the entire column and partial indexing is not
+supported. See @ref{Fulltext Search} for details.
+
+@node Multiple-column indexes, Table cache, Indexes, Optimizing Database Structure
+@subsection Multiple-Column Indexes
+
+@cindex multi-column indexes
+@cindex indexes, multi-column
+@cindex keys, multi-column
+
+@strong{MySQL} can create indexes on multiple columns. An index may
+consist of up to 15 columns. (On @code{CHAR} and @code{VARCHAR} columns you
+can also use a prefix of the column as a part of an index).
+
+A multiple-column index can be considered a sorted array containing values
+that are created by concatenating the values of the indexed columns.
+
+@strong{MySQL} uses multiple-column indexes in such a way that queries are
+fast when you specify a known quantity for the first column of the index in a
+@code{WHERE} clause, even if you don't specify values for the other columns.
+
+Suppose a table is created using the following specification:
+
+@example
+mysql> CREATE TABLE test (
+ id INT NOT NULL,
+ last_name CHAR(30) NOT NULL,
+ first_name CHAR(30) NOT NULL,
+ PRIMARY KEY (id),
+ INDEX name (last_name,first_name));
+@end example
+
+Then the index @code{name} is an index over @code{last_name} and
+@code{first_name}. The index will be used for queries that specify
+values in a known range for @code{last_name}, or for both @code{last_name}
+and @code{first_name}.
+Therefore, the @code{name} index will be used in the following queries:
+
+@example
+mysql> SELECT * FROM test WHERE last_name="Widenius";
+
+mysql> SELECT * FROM test WHERE last_name="Widenius"
+ AND first_name="Michael";
+
+mysql> SELECT * FROM test WHERE last_name="Widenius"
+ AND (first_name="Michael" OR first_name="Monty");
+
+mysql> SELECT * FROM test WHERE last_name="Widenius"
+ AND first_name >="M" AND first_name < "N";
+@end example
+
+However, the @code{name} index will NOT be used in the following queries:
+
+@example
+mysql> SELECT * FROM test WHERE first_name="Michael";
+
+mysql> SELECT * FROM test WHERE last_name="Widenius"
+ OR first_name="Michael";
+@end example
+
+For more information on the manner in which @strong{MySQL} uses indexes to
+improve query performance, see @ref{MySQL indexes, , @strong{MySQL}
+indexes}.
+
+
+@node Table cache, Creating many tables, Multiple-column indexes, Optimizing Database Structure
+@subsection How MySQL Opens and Closes Tables
+
+@findex table_cache
+
+@cindex tables, opening
+@cindex tables, closing
+@cindex opening, tables
+@cindex closing, tables
+@cindex table cache
+
+@code{table_cache}, @code{max_connections}, and @code{max_tmp_tables}
+affect the maximum number of files the server keeps open. If you
+increase one or both of these values, you may run up against a limit
+imposed by your operating system on the per-process number of open file
+descriptors. However, you can increase the limit on many systems.
+Consult your OS documentation to find out how to do this, because the
+method for changing the limit varies widely from system to system.
+
+@code{table_cache} is related to @code{max_connections}. For example,
+for 200 concurrent running connections, you should have a table cache of
+at least @code{200 * n}, where @code{n} is the maximum number of tables
+in a join. You also need to reserve some extra file descriptors for
+temporary tables and files.
+
+The cache of open tables can grow to a maximum of @code{table_cache}
+(default 64; this can be changed with the @code{-O table_cache=#}
+option to @code{mysqld}). A table is never closed, except when the
+cache is full and another thread tries to open a table or if you use
+@code{mysqladmin refresh} or @code{mysqladmin flush-tables}.
+
+When the table cache fills up, the server uses the following procedure
+to locate a cache entry to use:
+
+@itemize @bullet
+@item
+Tables that are not currently in use are released, in least-recently-used
+order.
+
+@item
+If the cache is full and no tables can be released, but a new table needs to
+be opened, the cache is temporarily extended as necessary.
+
+@item
+If the cache is in a temporarily-extended state and a table goes from in-use
+to not-in-use state, the table is closed and released from the cache.
+@end itemize
+
+A table is opened for each concurrent access. This means that
+if you have two threads accessing the same table or access the table
+twice in the same query (with @code{AS}) the table needs to be opened twice.
+The first open of any table takes two file descriptors; each additional
+use of the table takes only one file descriptor. The extra descriptor
+for the first open is used for the index file; this descriptor is shared
+among all threads.
+
+You can check if your table cache is too small by checking the mysqld
+variable @code{opened_tables}. If this is quite big, even if you
+haven't done a lot of @code{FLUSH TABLES}, you should increase your table
+cache. @xref{SHOW STATUS}.
+
+
+@node Creating many tables, Open tables, Table cache, Optimizing Database Structure
+@subsection Drawbacks to Creating Large Numbers of Tables in the Same Database
+
+@cindex tables, too many
+
+If you have many files in a directory, open, close, and create operations will
+be slow. If you execute @code{SELECT} statements on many different tables,
+there will be a little overhead when the table cache is full, because for
+every table that has to be opened, another must be closed. You can reduce
+this overhead by making the table cache larger.
+
+
+@node Open tables, , Creating many tables, Optimizing Database Structure
+@subsection Why So Many Open tables?
+
+@cindex tables, open
+@cindex open tables
+
+When you run @code{mysqladmin status}, you'll see something like this:
+
+@example
+Uptime: 426 Running threads: 1 Questions: 11082 Reloads: 1 Open tables: 12
+@end example
+
+This can be somewhat perplexing if you only have 6 tables.
+
+@strong{MySQL} is multithreaded, so it may have many queries on the same
+table simultaneously. To minimize the problem with two threads having
+different states on the same file, the table is opened independently by
+each concurrent thread. This takes some memory and one extra file
+descriptor for the data file. The index file descriptor is shared
+between all threads.
+
+
+@node Optimizing the Server, Disk issues, Optimizing Database Structure, MySQL Optimization
+@section Optimizing the MySQL Server
+
+
+@menu
+* System::
+* Server parameters::
+* Compile and link options::
+* Memory use::
+* DNS::
+* SET OPTION::
+@end menu
+
+@node System, Server parameters, Optimizing the Server, Optimizing the Server
+@subsection System/Compile Time and Startup Parameter Tuning
+
+@cindex compiling, optimizing
+@cindex system optimization
+@cindex startup parameters, tuning
+
+We start with the system level things since some of these decisions have
+to be made very early. In other cases a fast look at this part may
+suffice because it not that important for the big gains. However, it is always
+nice to have a feeling about how much one could gain by changing things
+at this level.
+
+The default OS to use is really important! To get the most use of
+multiple CPU machines one should use Solaris (because the threads works
+really nice) or Linux (because the 2.2 kernel has really good SMP
+support). Also on 32-bit machines Linux has a 2G file size limit by
+default. Hopefully this will be fixed soon when new filesystems are
+released (XFS/Reiserfs). If you have a desperate need for files bigger
+than 2G on Linux-intel 32 bit, you should get the LFS patch for the ext2
+file system.
+
+Because we have not run @strong{MySQL} in production on that many platforms, we
+advice you to test your intended platform before choosing it, if possible.
+
+@cindex locking
+Other tips:
+@itemize @bullet
+@item
+If you have enough RAM, you could remove all swap devices. Some
+operating systems will use a swap device in some contexts even if you
+have free memory.
+@item
+Use the @code{--skip-locking} @strong{MySQL} option to avoid external
+locking. Note that this will not impact @strong{MySQL}'s functionality as
+long as you only run one server. Just remember to take down the server (or
+lock relevant parts) before you run @code{myisamchk}. On some system
+this switch is mandatory because the external locking does not work in any
+case.
+
+The @code{--skip-locking} option is on by default when compiling with
+MIT-pthreads, because @code{flock()} isn't fully supported by
+MIT-pthreads on all platforms. It's also on default for Linux
+as Linux file locking are not yet safe.
+
+The only case when you can't use @code{--skip-locking} is if you run
+multiple @strong{MySQL} @emph{servers} (not clients) on the same data,
+or run @code{myisamchk} on the table without first flushing and locking
+the @code{mysqld} server tables first.
+
+You can still use @code{LOCK TABLES}/@code{UNLOCK TABLES} even if you
+are using @code{--skip-locking}
+@end itemize
+
+
+@node Server parameters, Compile and link options, System, Optimizing the Server
+@subsection Tuning Server Parameters
+
+@cindex parameters, server
+@cindex @code{mysqld} server, buffer sizes
+@cindex buffer sizes, @code{mysqld} server
+@cindex startup parameters
+
+You can get the default buffer sizes used by the @code{mysqld} server
+with this command:
+
+@example
+shell> mysqld --help
+@end example
+
+@cindex @code{mysqld} options
+@cindex variables, @code{mysqld}
+This command produces a list of all @code{mysqld} options and configurable
+variables. The output includes the default values and looks something
+like this:
+
+@example
+Possible variables for option --set-variable (-O) are:
+back_log current value: 5
+bdb_cache_size current value: 1048540
+binlog_cache_size current_value: 32768
+connect_timeout current value: 5
+delayed_insert_timeout current value: 300
+delayed_insert_limit current value: 100
+delayed_queue_size current value: 1000
+flush_time current value: 0
+interactive_timeout current value: 28800
+join_buffer_size current value: 131072
+key_buffer_size current value: 1048540
+lower_case_table_names current value: 0
+long_query_time current value: 10
+max_allowed_packet current value: 1048576
+max_binlog_cache_size current_value: 4294967295
+max_connections current value: 100
+max_connect_errors current value: 10
+max_delayed_threads current value: 20
+max_heap_table_size current value: 16777216
+max_join_size current value: 4294967295
+max_sort_length current value: 1024
+max_tmp_tables current value: 32
+max_write_lock_count current value: 4294967295
+myisam_sort_buffer_size current value: 8388608
+net_buffer_length current value: 16384
+net_retry_count current value: 10
+net_read_timeout current value: 30
+net_write_timeout current value: 60
+query_buffer_size current value: 0
+record_buffer current value: 131072
+slow_launch_time current value: 2
+sort_buffer current value: 2097116
+table_cache current value: 64
+thread_concurrency current value: 10
+tmp_table_size current value: 1048576
+thread_stack current value: 131072
+wait_timeout current value: 28800
+@end example
+
+If there is a @code{mysqld} server currently running, you can see what
+values it actually is using for the variables by executing this command:
+
+@example
+shell> mysqladmin variables
+@end example
+
+You can find a full description for all variables in the @code{SHOW VARIABLES}
+section in this manual. @xref{SHOW VARIABLES}.
+
+You can also see some statistics from a running server by issuing the command
+@code{SHOW STATUS}. @xref{SHOW STATUS}.
+
+@strong{MySQL} uses algorithms that are very scalable, so you can usually
+run with very little memory. If you, however, give @strong{MySQL} more
+memory, you will normally also get better performance.
+
+When tuning a @strong{MySQL} server, the two most important variables to use
+are @code{key_buffer_size} and @code{table_cache}. You should first feel
+confident that you have these right before trying to change any of the
+other variables.
+
+If you have much memory (>=256M) and many tables and want maximum performance
+with a moderate number of clients, you should use something like this:
+
+@example
+shell> safe_mysqld -O key_buffer=64M -O table_cache=256 \
+ -O sort_buffer=4M -O record_buffer=1M &
+@end example
+
+If you have only 128M and only a few tables, but you still do a lot of
+sorting, you can use something like:
+
+@example
+shell> safe_mysqld -O key_buffer=16M -O sort_buffer=1M
+@end example
+
+If you have little memory and lots of connections, use something like this:
+
+@example
+shell> safe_mysqld -O key_buffer=512k -O sort_buffer=100k \
+ -O record_buffer=100k &
+@end example
+
+or even:
+
+@example
+shell> safe_mysqld -O key_buffer=512k -O sort_buffer=16k \
+ -O table_cache=32 -O record_buffer=8k -O net_buffer=1K &
+@end example
+
+When you have installed @strong{MySQL}, the @file{support-files} directory will
+contain some different @code{my.cnf} example files, @file{my-huge.cnf},
+@file{my-large.cnf}, @file{my-medium.cnf}, and @file{my-small.cnf}, you can
+use as a base to optimize your system.
+
+If there are very many connections, ``swapping problems'' may occur unless
+@code{mysqld} has been configured to use very little memory for each
+connection. @code{mysqld} performs better if you have enough memory for all
+connections, of course.
+
+Note that if you change an option to @code{mysqld}, it remains in effect only
+for that instance of the server.
+
+To see the effects of a parameter change, do something like this:
+
+@example
+shell> mysqld -O key_buffer=32m --help
+@end example
+
+Make sure that the @code{--help} option is last; otherwise, the effect of any
+options listed after it on the command line will not be reflected in the
+output.
+
+
+@node Compile and link options, Memory use, Server parameters, Optimizing the Server
+@subsection How Compiling and Linking Affects the Speed of MySQL
+
+@cindex linking, speed
+@cindex compiling, speed
+@cindex speed, compiling
+@cindex speed, linking
+
+Most of the following tests are done on Linux with the
+@strong{MySQL} benchmarks, but they should give some indication for
+other operating systems and workloads.
+
+You get the fastest executable when you link with @code{-static}.
+
+On Linux, you will get the fastest code when compiling with @code{pgcc}
+and @code{-O3}. To compile @file{sql_yacc.cc} with these options, you
+need about 200M memory because @code{gcc/pgcc} needs a lot of memory to
+make all functions inline. You should also set @code{CXX=gcc} when
+configuring @strong{MySQL} to avoid inclusion of the @code{libstdc++}
+library (it is not needed). Note that with some versions of @code{pgcc},
+the resulting code will only run on true Pentium processors, even if you
+use the compiler option that you want the resulting code to be working on
+all x586 type processors (like AMD).
+
+By just using a better compiler and/or better compiler options you can
+get a 10-30 % speed increase in your application. This is particularly
+important if you compile the SQL server yourself!
+
+We have tested both the Cygnus CodeFusion and Fujitsu compilers, but
+when we tested them, neither was sufficiently bug free to allow
+@strong{MySQL} to be compiled with optimizations on.
+
+When you compile @strong{MySQL} you should only include support for the
+character sets that you are going to use. (Option @code{--with-charset=xxx}).
+The standard @strong{MySQL} binary distributions are compiled with support
+for all character sets.
+
+Here is a list of some measurements that we have done:
+@itemize @bullet
+@item
+If you use @code{pgcc} and compile everything with @code{-O6}, the
+@code{mysqld} server is 1% faster than with @code{gcc} 2.95.2.
+
+@item
+If you link dynamically (without @code{-static}), the result is 13%
+slower on Linux. Note that you still can use a dynamic linked
+@strong{MySQL} library. It is only the server that is critical for
+performance.
+
+@item
+If you strip your @code{mysqld} binary with @code{strip libexec/mysqld},
+the resulting binary can be up to 4 % faster.
+
+@item
+If you connect using TCP/IP rather than Unix sockets, the result is 7.5%
+slower on the same computer. (If you are connection to @code{localhost},
+@strong{MySQL} will, by default, use sockets).
+
+@item
+If you connect using TCP/IP from another computer over a 100M Ethernet,
+things will be 8-11 % slower.
+
+@item
+If you compile with @code{--with-debug=full}, then you will loose 20 %
+for most queries, but some queries may take substantially longer (The
+@strong{MySQL} benchmarks ran 35 % slower)
+If you use @code{--with-debug}, then you will only loose 15 %.
+By starting a @code{mysqld} version compiled with @code{--with-debug=full}
+with @code{--skip-safemalloc} the end result should be close to when
+configuring with @code{--with-debug}.
+
+@item
+On a Sun SPARCstation 20, SunPro C++ 4.2 is 5 % faster than @code{gcc} 2.95.2.
+
+@item
+Compiling with @code{gcc} 2.95.2 for ultrasparc with the option
+@code{-mcpu=v8 -Wa,-xarch=v8plusa} gives 4 % more performance.
+
+@item
+On Solaris 2.5.1, MIT-pthreads is 8-12% slower than Solaris native
+threads on a single processor. With more load/CPUs the difference should
+get bigger.
+
+@item
+Running with @code{--log-bin} makes @strong{[MySQL} 1 % slower.
+
+@item
+Compiling on Linux-x86 using gcc without frame pointers
+@code{-fomit-frame-pointer} or @code{-fomit-frame-pointer -ffixed-ebp}
+@code{mysqld} 1-4% faster.
+@end itemize
+
+The @strong{MySQL}-Linux distribution provided by @strong{MySQL AB} used
+to be compiled with @code{pgcc}, but we had to go back to regular gcc
+because of a bug in @code{pgcc} that would generate the code that does
+not run on AMD. We will continue using gcc until that bug is resolved.
+In the meantime, if you have a non-AMD machine, you can get a faster
+binary by compiling with @code{pgcc}. The standard @strong{MySQL}
+Linux binary is linked statically to get it faster and more portable.
+
+
+@node Memory use, DNS, Compile and link options, Optimizing the Server
+@subsection How MySQL Uses Memory
+
+@cindex memory use
+
+The list below indicates some of the ways that the @code{mysqld} server
+uses memory. Where applicable, the name of the server variable relevant
+to the memory use is given:
+
+@itemize @bullet
+@item
+The key buffer (variable @code{key_buffer_size}) is shared by all
+threads; Other buffers used by the server are allocated as
+needed. @xref{Server parameters}.
+
+@item
+Each connection uses some thread-specific space: A stack (default 64K,
+variable @code{thread_stack}), a connection buffer (variable
+@code{net_buffer_length}), and a result buffer (variable
+@code{net_buffer_length}). The connection buffer and result buffer are
+dynamically enlarged up to @code{max_allowed_packet} when needed. When
+a query is running, a copy of the current query string is also allocated.
+
+@item
+All threads share the same base memory.
+
+@item
+Only the compressed ISAM / MyISAM tables are memory mapped. This is
+because the 32-bit memory space of 4GB is not large enough for most
+big tables. When systems with a 64-bit address space become more
+common we may add general support for memory mapping.
+
+@item
+Each request doing a sequential scan over a table allocates a read buffer
+(variable @code{record_buffer}).
+
+@item
+All joins are done in one pass, and most joins can be done without even
+using a temporary table. Most temporary tables are memory-based (HEAP)
+tables. Temporary tables with a big record length (calculated as the
+sum of all column lengths) or that contain @code{BLOB} columns are
+stored on disk.
+
+One problem in @strong{MySQL} versions before Version 3.23.2 is that if a HEAP table
+exceeds the size of @code{tmp_table_size}, you get the error @code{The
+table tbl_name is full}. In newer versions this is handled by
+automatically changing the in-memory (HEAP) table to a disk-based
+(MyISAM) table as necessary. To work around this problem, you can
+increase the temporary table size by setting the @code{tmp_table_size}
+option to @code{mysqld}, or by setting the SQL option
+@code{SQL_BIG_TABLES} in the client program. @xref{SET OPTION, ,
+@code{SET OPTION}}. In @strong{MySQL} Version 3.20, the maximum size of the
+temporary table was @code{record_buffer*16}, so if you are using this
+version, you have to increase the value of @code{record_buffer}. You can
+also start @code{mysqld} with the @code{--big-tables} option to always
+store temporary tables on disk. However, this will affect the speed of
+many complicated queries.
+
+@item
+Most requests doing a sort allocates a sort buffer and 0-2 temporary
+files depending on the result set size. @xref{Temporary files}.
+
+@item
+Almost all parsing and calculating is done in a local memory store. No
+memory overhead is needed for small items and the normal slow memory
+allocation and freeing is avoided. Memory is allocated only for
+unexpectedly large strings (this is done with @code{malloc()} and
+@code{free()}).
+
+@item
+Each index file is opened once and the data file is opened once for each
+concurrently running thread. For each concurrent thread, a table structure,
+column structures for each column, and a buffer of size @code{3 * n} is
+allocated (where @code{n} is the maximum row length, not counting @code{BLOB}
+columns). A @code{BLOB} uses 5 to 8 bytes plus the length of the @code{BLOB}
+data. The @code{ISAM}/@code{MyISAM} table handlers will use one extra row
+buffer for internal usage.
+
+@item
+For each table having @code{BLOB} columns, a buffer is enlarged dynamically
+to read in larger @code{BLOB} values. If you scan a table, a buffer as large
+as the largest @code{BLOB} value is allocated.
+
+@item
+Table handlers for all in-use tables are saved in a cache and managed as a
+FIFO. Normally the cache has 64 entries. If a table has been used by two
+running threads at the same time, the cache contains two entries for the
+table. @xref{Table cache}.
+
+@item
+A @code{mysqladmin flush-tables} command closes all tables that are not in
+use and marks all in-use tables to be closed when the currently executing
+thread finishes. This will effectively free most in-use memory.
+@end itemize
+
+@code{ps} and other system status programs may report that @code{mysqld}
+uses a lot of memory. This may be caused by thread-stacks on different
+memory addresses. For example, the Solaris version of @code{ps} counts
+the unused memory between stacks as used memory. You can verify this by
+checking available swap with @code{swap -s}. We have tested
+@code{mysqld} with commercial memory-leakage detectors, so there should
+be no memory leaks.
+
+
+@node DNS, SET OPTION, Memory use, Optimizing the Server
+@subsection How MySQL uses DNS
+
+@cindex DNS
+@cindex hostname caching
+
+When a new thread connects to @code{mysqld}, @code{mysqld} will span a
+new thread to handle the request. This thread will first check if the
+hostname is in the hostname cache. If not the thread will call
+@code{gethostbyaddr_r()} and @code{gethostbyname_r()} to resolve the
+hostname.
+
+If the operating system doesn't support the above thread-safe calls, the
+thread will lock a mutex and call @code{gethostbyaddr()} and
+@code{gethostbyname()} instead. Note that in this case no other thread
+can resolve other hostnames that is not in the hostname cache until the
+first thread is ready.
+
+You can disable DNS host lookup by starting @code{mysqld} with
+@code{--skip-name-resolve}. In this case you can however only use IP
+names in the @strong{MySQL} privilege tables.
+
+If you have a very slow DNS and many hosts, you can get more performance by
+either disabling DNS lookop with @code{--skip-name-resolve} or by
+increasing the @code{HOST_CACHE_SIZE} define (default: 128) and recompile
+@code{mysqld}.
+
+You can disable the hostname cache with @code{--skip-host-cache}. You
+can clear the hostname cache with @code{FLUSH HOSTS} or @code{mysqladmin
+flush-hosts}.
+
+If you don't want to allow connections over @code{TCP/IP}, you can do this
+by starting @code{mysqld} with @code{--skip-networking}.
+
+
+@node SET OPTION, , DNS, Optimizing the Server
+@subsection @code{SET} Syntax
+
+@findex SET OPTION
+
+@example
+SET [OPTION] SQL_VALUE_OPTION= value, ...
+@end example
+
+@code{SET OPTION} sets various options that affect the operation of the
+server or your client. Any option you set remains in effect until the
+current session ends, or until you set the option to a different value.
+
+@table @code
+@item CHARACTER SET character_set_name | DEFAULT
+This maps all strings from and to the client with the given mapping.
+Currently the only option for @code{character_set_name} is
+@code{cp1251_koi8}, but you can easily add new mappings by editing the
+@file{sql/convert.cc} file in the @strong{MySQL} source distribution. The
+default mapping can be restored by using a @code{character_set_name} value of
+@code{DEFAULT}.
+
+Note that the syntax for setting the @code{CHARACTER SET} option differs
+from the syntax for setting the other options.
+
+@item PASSWORD = PASSWORD('some password')
+@cindex passwords, setting
+Set the password for the current user. Any non-anonymous user can change his
+own password!
+
+@item PASSWORD FOR user = PASSWORD('some password')
+Set the password for a specific user on the current server host. Only a user
+with access to the @code{mysql} database can do this. The user should be
+given in @code{user@@hostname} format, where @code{user} and @code{hostname}
+are exactly as they are listed in the @code{User} and @code{Host} columns of
+the @code{mysql.user} table entry. For example, if you had an entry with
+@code{User} and @code{Host} fields of @code{'bob'} and @code{'%.loc.gov'},
+you would write:
+
+@example
+mysql> SET PASSWORD FOR bob@@"%.loc.gov" = PASSWORD("newpass");
+
+or
+
+mysql> UPDATE mysql.user SET password=PASSWORD("newpass") where user="bob' and host="%.loc.gov";
+@end example
+
+@item SQL_AUTO_IS_NULL = 0 | 1
+If set to @code{1} (default) then one can find the last inserted row
+for a table with an auto_increment row with the following construct:
+@code{WHERE auto_increment_column IS NULL}. This is used by some
+ODBC programs like Access.
+
+@item AUTOCOMMIT= 0 | 1
+If set to @code{1} all changes to a table will be done at once. To start
+a multi-command transaction, you have to use the @code{BEGIN}
+statement. @xref{COMMIT}. If set to @code{0} you have to use @code{COMMIT} /
+@code{ROLLBACK} to accept/revoke that transaction. @xref{COMMIT}. Note
+that when you change from not @code{AUTOCOMMIT} mode to
+@code{AUTOCOMMIT} mode, @strong{MySQL} will do an automatic
+@code{COMMIT} on any open transactions.
+
+@item SQL_BIG_TABLES = 0 | 1
+@cindex table is full
+If set to @code{1}, all temporary tables are stored on disk rather than in
+memory. This will be a little slower, but you will not get the error
+@code{The table tbl_name is full} for big @code{SELECT} operations that
+require a large temporary table. The default value for a new connection is
+@code{0} (that is, use in-memory temporary tables).
+
+@item SQL_BIG_SELECTS = 0 | 1
+If set to @code{0}, @strong{MySQL} will abort if a @code{SELECT} is attempted
+that probably will take a very long time. This is useful when an inadvisable
+@code{WHERE} statement has been issued. A big query is defined as a
+@code{SELECT} that probably will have to examine more than
+@code{max_join_size} rows. The default value for a new connection is
+@code{1} (which will allow all @code{SELECT} statements).
+
+@item SQL_BUFFER_RESULT = 0 | 1
+@code{SQL_BUFFER_RESULT} will force the result from @code{SELECT}'s
+to be put into a temporary table. This will help @strong{MySQL} free the
+table locks early and will help in cases where it takes a long time to
+send the result set to the client.
+
+@item SQL_LOW_PRIORITY_UPDATES = 0 | 1
+If set to @code{1}, all @code{INSERT}, @code{UPDATE}, @code{DELETE}, and
+and @code{LOCK TABLE WRITE} statements wait until there is no pending
+@code{SELECT} or @code{LOCK TABLE READ} on the affected table.
+
+@item SQL_MAX_JOIN_SIZE = value | DEFAULT
+Don't allow @code{SELECT}s that will probably need to examine more than
+@code{value} row combinations. By setting this value, you can catch
+@code{SELECT}s where keys are not used properly and that would probably
+take a long time. Setting this to a value other than @code{DEFAULT} will reset
+the @code{SQL_BIG_SELECTS} flag. If you set the @code{SQL_BIG_SELECTS}
+flag again, the @code{SQL_MAX_JOIN_SIZE} variable will be ignored.
+You can set a default value for this variable by starting @code{mysqld} with
+@code{-O max_join_size=#}.
+
+@item SQL_SAFE_MODE = 0 | 1
+If set to @code{1}, @strong{MySQL} will abort if an @code{UPDATE} or
+@code{DELETE} is attempted that doesn't use a key or @code{LIMIT} in the
+@code{WHERE} clause. This makes it possible to catch wrong updates
+when creating SQL commands by hand.
+
+@item SQL_SELECT_LIMIT = value | DEFAULT
+The maximum number of records to return from @code{SELECT} statements. If
+a @code{SELECT} has a @code{LIMIT} clause, the @code{LIMIT} takes precedence
+over the value of @code{SQL_SELECT_LIMIT}. The default value for a new
+connection is ``unlimited.'' If you have changed the limit, the default value
+can be restored by using a @code{SQL_SELECT_LIMIT} value of @code{DEFAULT}.
+
+@item SQL_LOG_OFF = 0 | 1
+If set to @code{1}, no logging will be done to the standard log for this
+client, if the client has the @strong{process} privilege. This does not
+affect the update log!
+
+@item SQL_LOG_UPDATE = 0 | 1
+If set to @code{0}, no logging will be done to the update log for the client,
+if the client has the @strong{process} privilege. This does not affect the
+standard log!
+
+@item SQL_QUOTE_SHOW_CREATE = 0 | 1
+If set to @code{1}, @code{SHOW CREATE TABLE} will quote
+table and column names. This is @strong{on} by default,
+for replication of tables with fancy column names to work.
+@ref{SHOW CREATE TABLE, , @code{SHOW CREATE TABLE}}.
+
+@item TIMESTAMP = timestamp_value | DEFAULT
+Set the time for this client. This is used to get the original timestamp if
+you use the update log to restore rows. @code{timestamp_value} should be a
+UNIX Epoch timestamp, not a @strong{MySQL} timestamp.
+
+@item LAST_INSERT_ID = #
+Set the value to be returned from @code{LAST_INSERT_ID()}. This is stored in
+the update log when you use @code{LAST_INSERT_ID()} in a command that updates
+a table.
+
+@item INSERT_ID = #
+Set the value to be used by the following @code{INSERT} or @code{ALTER TABLE}
+command when inserting an @code{AUTO_INCREMENT} value. This is mainly used
+with the update log.
+@end table
+
+
+@menu
+* SET TRANSACTION::
+@end menu
+
+@node SET TRANSACTION, , SET OPTION, SET OPTION
+@subsubsection @code{SET TRANSACTION} Syntax
+
+@findex ISOLATION LEVEL
+
+@example
+SET [GLOBAL | SESSION] TRANSACTION ISOLATION LEVEL
+[READ UNCOMMITTED | READ COMMITTED | REPEATABLE READ | SERIALIZABLE]
+@end example
+
+Sets the transaction isolation level for the global, whole session or
+the next transaction.
+
+The default behavior is to set the isolation level for the next (not started)
+transaction.
+
+If you set the @code{GLOBAL} privilege it will affect all new created threads.
+You will need the @code{PROCESS} privilege to do do this.
+
+Setting the @code{SESSION} privilege will affect the following and all
+future transactions.
+
+You can set the default isolation level for @code{mysqld} with
+@code{--transaction-isolation=...}. @xref{Command-line options}.
+
+
+@node Disk issues, , Optimizing the Server, MySQL Optimization
+@section Disk Issues
+
+@cindex disk issues
+@cindex performance, disk issues
+
+@itemize @bullet
+@item
+As mentioned before, disks seeks are a big performance bottleneck. This
+problems gets more and more apparent when the data starts to grow so
+large that effective caching becomes impossible. For large databases,
+where you access data more or less randomly, you can be sure that you
+will need at least one disk seek to read and a couple of disk seeks to
+write things. To minimize this problem, use disks with low seek times.
+
+@item
+Increase the number of available disk spindles (and thereby reduce
+the seek overhead) by either symlink files to different disks or striping
+the disks.
+
+@table @strong
+@item Using symbolic links
+This means that you symlink the index and/or data file(s) from the
+normal data directory to another disk (that may also be striped). This
+makes both the seek and read times better (if the disks are not used for
+other things). @xref{Symbolic links}.
+
+@cindex striping, defined
+@item Striping
+Striping means that you have many disks and put the first block on the
+first disk, the second block on the second disk, and the Nth on the
+(N mod number_of_disks) disk, and so on. This means if your normal data
+size is less than the stripe size (or perfectly aligned) you will get
+much better performance. Note that striping is very dependent on the OS
+and stripe-size. So benchmark your application with different
+stripe-sizes. @xref{Custom Benchmarks}.
+
+Note that the speed difference for striping is @strong{very} dependent
+on the parameters. Depending on how you set the striping parameters and
+number of disks you may get a difference in orders of magnitude. Note that
+you have to choose to optimize for random or sequential access.
+@end table
+
+@item
+For reliability you may want to use RAID 0+1 (striping + mirroring), but
+in this case you will need 2*N drives to hold N drives of data. This is
+probably the best option if you have the money for it! You may, however,
+also have to invest in some volume-management software to handle it
+efficiently.
+
+@item
+A good option is to have semi-important data (that can be regenerated)
+on RAID 0 disk while storing really important data (like host information
+and logs) on a RAID 0+1 or RAID N disk. RAID N can be a problem if you
+have many writes because of the time to update the parity bits.
+
+@item
+You may also set the parameters for the file system that the database
+uses. One easy change is to mount the file system with the noatime
+option. That makes it skip the updating of the last access time in the
+inode and by this will avoid some disk seeks.
+
+@item
+On Linux, you can get much more performance (up to 100 % under load is
+not uncommon) by using hdpram to configure your disk's interface! The
+following should be quite good hdparm options for @strong{MySQL} (and
+probably many other applications):
+
+@example
+hdparm -m 16 -d 1
+@end example
+
+Note that the performance/reliability when using the above depends on
+your hardware, so we strongly suggest that you test your system
+thoroughly after using @code{hdparm}! Please consult the @code{hdparm}
+man page for more information! If @code{hdparm} is not used wisely,
+filesystem corruption may result. Backup everything before experimenting!
+
+@item
+On many operating systems you can mount the disks with the 'async' flag to
+set the file system to be updated asynchronously. If your computer is
+reasonable stable, this should give you more performance without sacrificing
+too much reliability. (This flag is on by default on Linux.)
+
+@item
+If you don't need to know when a file was last accessed (which is not
+really useful on a database server), you can mount your file systems
+with the noatime flag.
+@end itemize
+
+@menu
+* Symbolic links::
+@end menu
+
+@node Symbolic links, , Disk issues, Disk issues
+@subsection Using Symbolic Links
+
+@cindex symbolic links
+@cindex links, symbolic
+
+You can move tables and databases from the database directory to other
+locations and replace them with symbolic links to the new locations.
+You might want to do this, for example, to move a database to a file
+system with more free space or increase the speed of your system by
+spreading your tables to different disk.
+
+The recommended may to do this, is to just symlink databases to different
+disk and only symlink tables as a last resort.
+
+@cindex databases, symbolic links
+@menu
+* Symbolic links to databases::
+* Symbolic links to tables::
+@end menu
+
+
+@node Symbolic links to databases, Symbolic links to tables, Symbolic links, Symbolic links
+@subsubsection Using Symbolic Links for Databases
+
+The way to symlink a database is to first create a directory on some
+disk where you have free space and then create a symlink to it from
+the @strong{MySQL} database directory.
+
+@example
+shell> mkdir /dr1/databases/test
+shell> ln -s /dr1/databases/test mysqld-datadir
+@end example
+
+@strong{MySQL} doesn't support that you link one directory to multiple
+databases. Replacing a database directory with a symbolic link will
+work fine as long as you don't make a symbolic link between databases.
+Suppose you have a database @code{db1} under the @strong{MySQL} data
+directory, and then make a symlink @code{db2} that points to @code{db1}:
+
+@example
+shell> cd /path/to/datadir
+shell> ln -s db1 db2
+@end example
+
+Now, for any table @code{tbl_a} in @code{db1}, there also appears to be
+a table @code{tbl_a} in @code{db2}. If one thread updates @code{db1.tbl_a}
+and another thread updates @code{db2.tbl_a}, there will be problems.
+
+If you really need this, you must change the following code in
+@file{mysys/mf_format.c}:
+
+@example
+if (flag & 32 || (!lstat(to,&stat_buff) && S_ISLNK(stat_buff.st_mode)))
+@end example
+
+to
+
+@example
+if (1)
+@end example
+
+On Windows you can use internal symbolic links to directories by compiling
+@strong{MySQL} with @code{-DUSE_SYMDIR}. This allows you to put different
+databases on different disks. @xref{Windows symbolic links}.
+
+
+@node Symbolic links to tables, , Symbolic links to databases, Symbolic links
+@subsubsection Using Symbolic Links for Tables
+
+@cindex databases, symbolic links
+
+Before @strong{MySQL} 4.0 you should not symlink tables, if you are not
+very carefully with them. The problem is that if you run @code{ALTER
+TABLE}, @code{REPAIR TABLE} or @code{OPTIMIZE TABLE} on a symlinked
+table, the symlinks will be removed and replaced by the original
+files. This happens because the above command works by creating a
+temporary file in the database directory and when the command is
+complete, replace the original file with the temporary file.
+
+You should not symlink tables on system that doesn't have a fully
+working @code{realpath()} call. (At least Linux and Solaris support
+@code{realpath()})
+
+In @strong{MySQL} 4.0 symlinks is only fully supported for @code{MyISAM}
+tables. For other table types you will probably get strange problems
+when doing any of the above mentioned commands.
+
+The handling of symbolic links in @strong{MySQL} 4.0 works the following
+way (this is mostly relevant only for @code{MyISAM} tables).
+
+@itemize @bullet
+@item
+In the data directory you will always have the table definition file
+and the data/index files.
+
+@item
+You can symlink the index file and the data file to different directories
+independent of the other.
+
+@item
+The symlinking can be done from the operating system (if @code{mysqld} is
+not running) or with the @code{INDEX/DATA DIRECTORY="path-to-dir"} command
+in @code{CREATE TABLE}. @xref{CREATE TABLE}.
+
+@item
+@code{myisamchk} will not replace a symlink with the index/file but
+work directly on the files the symlinks points to. Any temporary files
+will be created in the same directory where the data/index file is.
+
+@item
+When you drop a table that is using symlinks, both the symlink and the
+file the symlink points to is dropped. This is a good reason to why you
+should NOT run @code{mysqld} as root and not allow persons to have write
+access to the @strong{MySQL} database directories.
+
+@item
+If you rename a table with @code{ALTER TABLE RENAME} and you don't change
+database, the symlink in the database directory will be renamed to the new
+name and the data/index file will be renamed accordingly.
+
+@item
+If you use @code{ALTER TABLE RENAME} to move a table to another database,
+then the table will be moved to the other database directory and the old
+symlinks and the files they pointed to will be deleted.
+
+@item
+If you are not using symlinks you should use the @code{--skip-symlink}
+option to @code{mysqld} to ensure that no one can drop or rename a file
+outside of the @code{mysqld} data directory.
+@end itemize
+
+Things that are not yet supported:
+
+@cindex TODO, symlinks
+@itemize @bullet
+@item
+@code{ALTER TABLE} ignores all @code{INDEX/DATA DIRECTORY="path"} options.
+@item
+@code{CREATE TABLE} doesn't report if the table has symbolic links.
+@item
+@code{mysqldump} doesn't include the symbolic links information in the output.
+@item
+@code{BACKUP TABLE} and @code{RESTORE TABLE} doesn't use symbolic links.
+@end itemize
+
+
+
+
+
+@node Reference, Table types, MySQL Optimization, Top
@chapter MySQL Language Reference
@menu
@@ -15954,12 +26703,6 @@ to restart @code{mysqld} with @code{--skip-grant-tables} to run
* ALTER TABLE:: @code{ALTER TABLE} syntax
* RENAME TABLE:: @code{RENAME TABLE} syntax
* DROP TABLE:: @code{DROP TABLE} syntax
-* OPTIMIZE TABLE:: @code{OPTIMIZE TABLE} syntax
-* CHECK TABLE:: @code{CHECK TABLE} syntax
-* BACKUP TABLE:: @code{BACKUP TABLE} syntax
-* RESTORE TABLE:: @code{RESTORE TABLE} syntax
-* ANALYZE TABLE:: @code{ANALYZE TABLE} syntax
-* REPAIR TABLE:: @code{REPAIR TABLE} syntax
* DELETE:: @code{DELETE} syntax
* TRUNCATE:: @code{TRUNCATE} syntax
* SELECT:: @code{SELECT} syntax
@@ -15969,16 +26712,9 @@ to restart @code{mysqld} with @code{--skip-grant-tables} to run
* LOAD DATA:: @code{LOAD DATA INFILE} syntax
* UPDATE:: @code{UPDATE} syntax
* USE:: @code{USE} syntax
-* FLUSH:: @code{FLUSH} syntax (clearing caches)
-* KILL:: @code{KILL} syntax
-* SHOW:: @code{SHOW} syntax (Get information about tables, columns, ...)
-* EXPLAIN:: @code{EXPLAIN} syntax (Get information about a @code{SELECT})
* DESCRIBE:: @code{DESCRIBE} syntax (Get information about names of columns)
* COMMIT:: @code{BEGIN/COMMIT/ROLLBACK} syntax
* LOCK TABLES:: @code{LOCK TABLES/UNLOCK TABLES} syntax
-* SET OPTION:: @code{SET OPTION} syntax
-* SET TRANSACTION:: @code{SET TRANSACTION} syntax
-* GRANT:: @code{GRANT} and @code{REVOKE} syntax
* CREATE INDEX:: @code{CREATE INDEX} syntax
* DROP INDEX:: @code{DROP INDEX} syntax
* Comments:: Comment syntax
@@ -15993,13 +26729,15 @@ effectively. This chapter also serves as a reference to all functionality
included in @strong{MySQL}. In order to use this chapter effectively, you
may find it useful to refer to the various indexes.
+
+@node Literals, Variables, Reference, Reference
+@section Literals: How to Write Strings and Numbers
+
@cindex strings, defined
@cindex strings, escaping characters
@cindex literals
@cindex escape characters
@cindex backslash, escape character
-@node Literals, Variables, Reference, Reference
-@section Literals: How to Write Strings and Numbers
@menu
* String syntax:: Strings
@@ -16773,8 +27511,6 @@ be chosen from the list of values @code{'value1'}, @code{'value2'},
* Date and time types:: Date and time types
* String types:: String types
* Choosing types:: Choosing the right type for a column
-* Indexes:: Column indexes
-* Multiple-column indexes:: Multiple-column indexes
* Other-vendor column types:: Using column types from other database engines
@end menu
@@ -17831,7 +28567,7 @@ the @code{SET} definition in the second column.
@cindex types, columns
@cindex choosing types
-@node Choosing types, Indexes, String types, Column types
+@node Choosing types, Other-vendor column types, String types, Column types
@subsection Choosing the Right Type for a Column
For the most efficient use of storage, try to use the most precise type in
@@ -17848,113 +28584,13 @@ For high precision, you can always convert to a fixed-point type stored
in a @code{BIGINT}. This allows you to do all calculations with integers
and convert results back to floating-point values only when necessary.
-@cindex indexes, columns
-@cindex columns, indexes
-@cindex keys
-@node Indexes, Multiple-column indexes, Choosing types, Column types
-@subsection Column Indexes
-
-All @strong{MySQL} column types can be indexed. Use of indexes on the
-relevant columns is the best way to improve the performance of @code{SELECT}
-operations.
-
-The maximum number of keys and the maximum index length is defined per
-table handler. @xref{Table types}. You can with all table handlers have
-at least 16 keys and a total index length of at least 256 bytes.
-
-For @code{CHAR} and @code{VARCHAR} columns, you can index a prefix of a
-column. This is much faster and requires less disk space than indexing the
-whole column. The syntax to use in the @code{CREATE TABLE} statement to
-index a column prefix looks like this:
-@example
-KEY index_name (col_name(length))
-@end example
-
-The example below creates an index for the first 10 characters of the
-@code{name} column:
-
-@example
-mysql> CREATE TABLE test (
- name CHAR(200) NOT NULL,
- KEY index_name (name(10)));
-@end example
-
-For @code{BLOB} and @code{TEXT} columns, you must index a prefix of the
-column. You cannot index the entire column.
-
-In @strong{MySQL} Version 3.23.23 or later, you can also create special
-@strong{FULLTEXT} indexes. They are used for full-text search. Only the
-@code{MyISAM} table type supports @code{FULLTEXT} indexes. They can be
-created only from @code{VARCHAR} and @code{TEXT} columns.
-Indexing always happens over the entire column and partial indexing is not
-supported. See @ref{Fulltext Search} for details.
-
-@cindex multi-column indexes
-@cindex indexes, multi-column
-@cindex keys, multi-column
-@node Multiple-column indexes, Other-vendor column types, Indexes, Column types
-@subsection Multiple-column Indexes
-
-@strong{MySQL} can create indexes on multiple columns. An index may
-consist of up to 15 columns. (On @code{CHAR} and @code{VARCHAR} columns you
-can also use a prefix of the column as a part of an index).
-
-A multiple-column index can be considered a sorted array containing values
-that are created by concatenating the values of the indexed columns.
-
-@strong{MySQL} uses multiple-column indexes in such a way that queries are
-fast when you specify a known quantity for the first column of the index in a
-@code{WHERE} clause, even if you don't specify values for the other columns.
-
-Suppose a table is created using the following specification:
-
-@example
-mysql> CREATE TABLE test (
- id INT NOT NULL,
- last_name CHAR(30) NOT NULL,
- first_name CHAR(30) NOT NULL,
- PRIMARY KEY (id),
- INDEX name (last_name,first_name));
-@end example
-
-Then the index @code{name} is an index over @code{last_name} and
-@code{first_name}. The index will be used for queries that specify
-values in a known range for @code{last_name}, or for both @code{last_name}
-and @code{first_name}.
-Therefore, the @code{name} index will be used in the following queries:
-
-@example
-mysql> SELECT * FROM test WHERE last_name="Widenius";
-
-mysql> SELECT * FROM test WHERE last_name="Widenius"
- AND first_name="Michael";
-
-mysql> SELECT * FROM test WHERE last_name="Widenius"
- AND (first_name="Michael" OR first_name="Monty");
-
-mysql> SELECT * FROM test WHERE last_name="Widenius"
- AND first_name >="M" AND first_name < "N";
-@end example
-
-However, the @code{name} index will NOT be used in the following queries:
-
-@example
-mysql> SELECT * FROM test WHERE first_name="Michael";
-
-mysql> SELECT * FROM test WHERE last_name="Widenius"
- OR first_name="Michael";
-@end example
-
-For more information on the manner in which @strong{MySQL} uses indexes to
-improve query performance, see @ref{MySQL indexes, , @strong{MySQL}
-indexes}.
+@node Other-vendor column types, , Choosing types, Column types
+@subsection Using Column Types from Other Database Engines
@cindex types, portability
@cindex portability, types
@cindex columns, other types
-@node Other-vendor column types, , Multiple-column indexes, Column types
-@subsection Using Column Types from Other Database Engines
To make it easier to use code written for SQL implementations from other
vendors, @strong{MySQL} maps column types as shown in the table below. These
@@ -19935,7 +30571,7 @@ mysql> select PERIOD_DIFF(9802,199703);
@findex DATE_SUB()
@findex ADDDATE()
@findex SUBDATE()
-@findex EXTRACT(type FROM date)
+@findex EXTRACT()
@item DATE_ADD(date,INTERVAL expr type)
@itemx DATE_SUB(date,INTERVAL expr type)
@itemx ADDDATE(date,INTERVAL expr type)
@@ -19955,7 +30591,7 @@ or subtracted from the starting date. @code{expr} is a string; it may start
with a @samp{-} for negative intervals. @code{type} is a keyword indicating
how the expression should be interpreted.
-The @code{EXTRACT(type FROM date)} function returns the 'type'
+The related function @code{EXTRACT(type FROM date)} returns the 'type'
interval from the date.
The following table shows how the @code{type} and @code{expr} arguments
@@ -20009,12 +30645,6 @@ mysql> SELECT DATE_ADD("1998-01-01 00:00:00",
-> 1997-12-30 14:00:00
mysql> SELECT DATE_SUB("1998-01-02", INTERVAL 31 DAY);
-> 1997-12-02
-mysql> SELECT EXTRACT(YEAR FROM "1999-07-02");
- -> 1999
-mysql> SELECT EXTRACT(YEAR_MONTH FROM "1999-07-02 01:02:03");
- -> 199907
-mysql> SELECT EXTRACT(DAY_MINUTE FROM "1999-07-02 01:02:03");
- -> 20102
@end example
If you specify an interval value that is too short (does not include all the
@@ -20053,6 +30683,22 @@ mysql> select DATE_ADD('1998-01-30', Interval 1 month);
Note from the preceding example that the word @code{INTERVAL} and the
@code{type} keyword are not case sensitive.
+@findex EXTRACT()
+@item EXTRACT(type FROM date)
+
+The @code{EXTRACT()} function uses the same kinds of interval type
+specifiers as @code{DATE_ADD()} or @code{DATE_SUB()}, but extracts parts
+from the date rather than performing date arithmetic.
+
+@example
+mysql> SELECT EXTRACT(YEAR FROM "1999-07-02");
+ -> 1999
+mysql> SELECT EXTRACT(YEAR_MONTH FROM "1999-07-02 01:02:03");
+ -> 199907
+mysql> SELECT EXTRACT(DAY_MINUTE FROM "1999-07-02 01:02:03");
+ -> 20102
+@end example
+
@findex TO_DAYS()
@item TO_DAYS(date)
Given a date @code{date}, returns a daynumber (the number of days since year
@@ -20724,7 +31370,7 @@ only creates a directory under the @strong{MySQL} data directory.
@cindex @code{mysqladmin}
You can also create databases with @code{mysqladmin}.
-@xref{Programs}.
+@xref{Client-Side Scripts}.
@findex DROP DATABASE
@node DROP DATABASE, CREATE TABLE, CREATE DATABASE, Reference
@@ -20761,7 +31407,7 @@ In @strong{MySQL} Version 3.22 or later, you can use the keywords
exist.
@cindex @code{mysqladmin}
-You can also drop databases with @code{mysqladmin}. @xref{Programs}.
+You can also drop databases with @code{mysqladmin}. @xref{Client-Side Scripts}.
@findex CREATE TABLE
@node CREATE TABLE, ALTER TABLE, DROP DATABASE, Reference
@@ -21167,6 +31813,15 @@ mysql> select * from bar;
For each row in table @code{foo}, a row is inserted in @code{bar} with
the values from @code{foo} and default values for the new columns.
+@code{CREATE TABLE ... SELECT} will not automaticly create any indexes
+for you. This is done intentionally to make the command as flexible as
+possible. If you want to have indexes in the created table, you should
+specify these before the @code{SELECT} statement:
+
+@example
+mysql> create table bar (unique (n)) select n from foo;
+@end example
+
If any errors occur while copying the data to the table, it will
automatically be deleted.
@@ -21567,7 +32222,7 @@ will do a reverse rename for all renamed tables to get everything back
to the original state.
@findex DROP TABLE
-@node DROP TABLE, OPTIMIZE TABLE, RENAME TABLE, Reference
+@node DROP TABLE, DELETE, RENAME TABLE, Reference
@section @code{DROP TABLE} Syntax
@example
@@ -21587,295 +32242,11 @@ For the moment they don't do anything.
@strong{NOTE}: @code{DROP TABLE} is not transaction-safe and will
automatically commit any active transactions.
-@cindex tables, defragment
-@cindex tables, fragmentation
-@findex OPTIMIZE TABLE
-@node OPTIMIZE TABLE, CHECK TABLE, DROP TABLE, Reference
-@section @code{OPTIMIZE TABLE} Syntax
-
-@example
-OPTIMIZE TABLE tbl_name[,tbl_name]...
-@end example
-
-@code{OPTIMIZE TABLE} should be used if you have deleted a large part of a
-table or if you have made many changes to a table with variable-length rows
-(tables that have @code{VARCHAR}, @code{BLOB}, or @code{TEXT} columns).
-Deleted records are maintained in a linked list and subsequent @code{INSERT}
-operations reuse old record positions. You can use @code{OPTIMIZE TABLE} to
-reclaim the unused space and to defragment the data file.
-
-For the moment @code{OPTIMIZE TABLE} only works on @strong{MyISAM} and
-@code{BDB} tables. For @code{BDB} tables, @code{OPTIMIZE TABLE} is
-currently mapped to @code{ANALYZE TABLE}. @xref{ANALYZE TABLE}.
-
-You can get optimize table to work on other table types by starting
-@code{mysqld} with @code{--skip-new} or @code{--safe-mode}, but in this
-case @code{OPTIMIZE TABLE} is just mapped to @code{ALTER TABLE}.
-
-@code{OPTIMIZE TABLE} works the following way:
-@itemize @bullet
-@item
-If the table has deleted or split rows, repair the table.
-@item
-If the index pages are not sorted, sort them.
-@item
-If the statistics are not up to date (and the repair couldn't be done
-by sorting the index), update them.
-@end itemize
-
-@code{OPTIMIZE TABLE} for @code{MyISAM} tables is equvialent of running
-@code{myisamchk --quick --check-changed-tables --sort-index --analyze}
-on the table.
-
-Note that the table is locked during the time @code{OPTIMIZE TABLE} is
-running!
-
-@findex CHECK TABLE
-@node CHECK TABLE, BACKUP TABLE, OPTIMIZE TABLE, Reference
-@section @code{CHECK TABLE} Syntax
-
-@example
-CHECK TABLE tbl_name[,tbl_name...] [option [option...]]
-
-option = QUICK | FAST | MEDIUM | EXTEND | CHANGED
-@end example
-
-@code{CHECK TABLE} only works on @code{MyISAM} tables. On
-@code{MyISAM} tables it's the same thing as running @code{myisamchk -m
-table_name} on the table.
-
-If you don't specify any option @code{MEDIUM} is used.
-
-Checks the table(s) for errors. For @code{MyISAM} tables the key statistics
-is updated. The command returns a table with the following columns:
-
-@multitable @columnfractions .35 .65
-@item @strong{Column} @tab @strong{Value}
-@item Table @tab Table name.
-@item Op @tab Always ``check''.
-@item Msg_type @tab One of @code{status}, @code{error}, @code{info}, or @code{warning}.
-@item Msg_text @tab The message.
-@end multitable
-
-Note that you can get many rows of information for each checked
-table. The last row will be of @code{Msg_type status} and should
-normally be @code{OK}. If you don't get @code{OK}, or @code{Not
-checked} you should normally run a repair of the table. @xref{Table
-maintenance}. @code{Not checked} means that the table the given @code{TYPE}
-told @strong{MySQL} that there wasn't any need to check the table.
-
-The different check types stand for the following:
-
-@multitable @columnfractions .20 .80
-@item @strong{Type} @tab @strong{Meaning}
-@item @code{QUICK} @tab Don't scan the rows to check for wrong links.
-@item @code{FAST} @tab Only check tables which haven't been closed properly.
-@item @code{CHANGED} @tab Only check tables which have been changed since last check or haven't been closed properly.
-@item @code{MEDIUM} @tab Scan rows to verify that deleted links are ok. This also calculates a key checksum for the rows and verifies this with a calcualted checksum for the keys.
-@item @code{EXTENDED} @tab Do a full key lookup for all keys for each row. This ensures that the table is 100 % consistent, but will take a long time!
-@end multitable
-
-For dynamic sized @code{MyISAM} tables a started check will always
-do a @code{MEDIUM} check. For static size rows we skip the row scan
-for @code{QUICK} and @code{FAST} as the rows are very seldom corrupted.
-
-You can combine check options as in:
-
-@example
-CHECK TABLE test_table FAST QUICK;
-@end example
-
-Which only would do a quick check on the table if it wasn't closed properly.
-
-@strong{NOTE:} that in some case @code{CHECK TABLE} will change the
-table! This happens if the table is marked as 'corrupted' or 'not
-closed properly' but @code{CHECK TABLE} didn't find any problems in the
-table. In this case @code{CHECK TABLE} will mark the table as ok.
-
-If a table is corrupted, then it's most likely that the problem is in
-the indexes and not in the data part. All of the above check types
-checks the indexes throughly and should thus find most errors.
-
-If you just want to check a table that you assume is ok, you should use
-no check options or the @code{QUICK} option. The later should be used
-when you are in a hurry and can take the very small risk that
-@code{QUICK} didn't find an error in the data file (In most cases
-@strong{MySQL} should find, under normal usage, any error in the data
-file. If this happens then the table will be marked as 'corrupted',
-in which case the table can't be used until it's repaired).
-
-@code{FAST} and @code{CHANGED} are mostly intended to be used from a
-script (for example to be executed from cron) if you want to check your
-table from time to time. In most cases you @code{FAST} is to be prefered
-over @code{CHANGED}. (The only case when it isn't is when you suspect a
-bug you have found a bug in the @code{MyISAM} code.).
-
-@code{EXTENDED} is only to be used after you have run a normal check but
-still get strange errors from a table when @strong{MySQL} tries to
-update a row or find a row by key (this is VERY unlikely to happen if a
-normal check has succeeded!).
-
-Some things reported by check table, can't be corrected automatically:
-
-@itemize @bullet
-@item
-@code{Found row where the auto_increment column has the value 0}.
-
-This means that you have in the table a row where the
-@code{auto_increment} index column contains the value 0.
-(It's possible to create a row where the auto_increment column is 0 by
-explicitely setting the column to 0 with an @code{UPDATE} statement)
-
-This isn't an error in itself, but could cause trouble if you decide to
-dump the table and restore it or do an @code{ALTER TABLE} on the
-table. In this case the auto_increment column will change value,
-according to the rules of auto_increment columns, which could cause
-problems like a duplicate key error.
-
-To get rid of the warning, just execute an @code{UPDATE} statement
-to set the column to some other value than 0.
-@end itemize
-
-
-@findex BACKUP TABLE
-@node BACKUP TABLE, RESTORE TABLE, CHECK TABLE, Reference
-@section @code{BACKUP TABLE} Syntax
-
-@example
-BACKUP TABLE tbl_name[,tbl_name...] TO '/path/to/backup/directory'
-@end example
-
-Make a copy of all the table files to the backup directory that are the
-minimum needed to restore it. Currenlty only works for @code{MyISAM}
-tables. For @code{MyISAM} table, copies @code{.frm} (definition) and
-@code{.MYD} (data) files. The index file can be rebuilt from those two.
-
-Before using this command, please see @xref{Backup}.
-
-During the backup, read lock will be held for each table, one at time,
-as they are being backed up. If you want to backup several tables as
-a snapshot, you must first issue @code{LOCK TABLES} obtaining a read
-lock for each table in the group.
-
-The command returns a table with the following columns:
-
-@multitable @columnfractions .35 .65
-@item @strong{Column} @tab @strong{Value}
-@item Table @tab Table name
-@item Op @tab Always ``backup''
-@item Msg_type @tab One of @code{status}, @code{error}, @code{info} or @code{warning}.
-@item Msg_text @tab The message.
-@end multitable
-
-Note that @code{BACKUP TABLE} is only available in @strong{MySQL}
-version 3.23.25 and later.
-
-@findex RESTORE TABLE
-@node RESTORE TABLE, ANALYZE TABLE, BACKUP TABLE, Reference
-@section @code{RESTORE TABLE} Syntax
-
-@example
-RESTORE TABLE tbl_name[,tbl_name...] FROM '/path/to/backup/directory'
-@end example
-
-Restores the table(s) from the backup that was made with
-@code{BACKUP TABLE}. Existing tables will not be overwritten - if you
-try to restore over an existing table, you will get an error. Restore
-will take longer than BACKUP due to the need to rebuilt the index. The
-more keys you have, the longer it is going to take. Just as
-@code{BACKUP TABLE}, currently only works of @code{MyISAM} tables.
-
-
-The command returns a table with the following columns:
-
-@multitable @columnfractions .35 .65
-@item @strong{Column} @tab @strong{Value}
-@item Table @tab Table name
-@item Op @tab Always ``restore''
-@item Msg_type @tab One of @code{status}, @code{error}, @code{info} or @code{warning}.
-@item Msg_text @tab The message.
-@end multitable
-
-
-@findex ANALYZE TABLE
-@node ANALYZE TABLE, REPAIR TABLE, RESTORE TABLE, Reference
-@section @code{ANALYZE TABLE} Syntax
-
-@example
-ANALYZE TABLE tbl_name[,tbl_name...]
-@end example
-
-Analyze and store the key distribution for the table. During the
-analyze the table is locked with a read lock. This works on
-@code{MyISAM} and @code{BDB} tables.
-
-This is equivalent to running @code{myisamchk -a} on the table.
-
-@strong{MySQL} uses the stored key distribution to decide in which order
-tables should be joined when one does a join on something else than a
-constant.
-
-The command returns a table with the following columns:
-
-@multitable @columnfractions .35 .65
-@item @strong{Column} @tab @strong{Value}
-@item Table @tab Table name
-@item Op @tab Always ``analyze''
-@item Msg_type @tab One of @code{status}, @code{error}, @code{info} or @code{warning}.
-@item Msg_text @tab The message.
-@end multitable
-
-You can check the stored key distribution with the @code{SHOW INDEX} command.
-@xref{SHOW DATABASE INFO}.
-
-If the table hasn't changed since the last @code{ANALYZE TABLE} command,
-the table will not be analyzed again.
-
-@findex REPAIR TABLE
-@node REPAIR TABLE, DELETE, ANALYZE TABLE, Reference
-@section @code{REPAIR TABLE} Syntax
-
-@example
-REPAIR TABLE tbl_name[,tbl_name...] [QUICK] [EXTENDED]
-@end example
-
-@code{REPAIR TABLE} only works on @code{MyISAM} tables and is the same
-as running @code{myisamchk -r table_name} on the table.
-
-Normally you should never have to run this command, but if disaster strikes
-you are very likely to get back all your data from a MyISAM table with
-@code{REPAIR TABLE}. If your tables get corrupted a lot you should
-try to find the reason for this! @xref{Crashing}. @xref{MyISAM table problems}.
-
-@code{REPAIR TABLE} repairs a possible corrupted table. The command returns a
-table with the following columns:
-@multitable @columnfractions .35 .65
-@item @strong{Column} @tab @strong{Value}
-@item Table @tab Table name
-@item Op @tab Always ``repair''
-@item Msg_type @tab One of @code{status}, @code{error}, @code{info} or @code{warning}.
-@item Msg_text @tab The message.
-@end multitable
-
-Note that you can get many rows of information for each repaired
-table. The last one row will be of @code{Msg_type status} and should
-normally be @code{OK}. If you don't get @code{OK}, you should try
-repairing the table with @code{myisamchk -o}, as @code{REPAIR TABLE}
-does not yet implement all the options of @code{myisamchk}. In the near
-future, we will make it more flexible.
-
-If @code{QUICK} is given then @strong{MySQL} will try to do a
-@code{REPAIR} of only the index tree.
-
-If you use @code{EXTENDED} then @strong{MySQL} will create the index row
-by row instead of creating one index at a time with sorting; This may be
-better than sorting on fixed-length keys if you have long @code{char()}
-keys that compress very good.
+@node DELETE, TRUNCATE, DROP TABLE, Reference
+@section @code{DELETE} Syntax
@findex DELETE
-@node DELETE, TRUNCATE, REPAIR TABLE, Reference
-@section @code{DELETE} Syntax
@example
DELETE [LOW_PRIORITY] FROM tbl_name
@@ -23269,7 +33640,7 @@ In @strong{MySQL} Version 3.23, you can use @code{LIMIT #} to ensure that
only a given number of rows are changed.
@findex USE
-@node USE, FLUSH, UPDATE, Reference
+@node USE, DESCRIBE, UPDATE, Reference
@section @code{USE} Syntax
@example
@@ -23303,1409 +33674,11 @@ mysql> SELECT author_name,editor_name FROM author,db2.editor
@cindex compatibility, with Sybase
The @code{USE} statement is provided for Sybase compatibility.
-@cindex @code{mysqladmin}
-@cindex clearing, caches
-@cindex caches, clearing
-@findex FLUSH
-@node FLUSH, KILL, USE, Reference
-@section @code{FLUSH} Syntax
-
-@example
-FLUSH flush_option [,flush_option]
-@end example
-
-You should use the @code{FLUSH} command if you want to clear some of the
-internal caches @strong{MySQL} uses. To execute @code{FLUSH}, you must have
-the @strong{RELOAD} privilege.
-
-@code{flush_option} can be any of the following:
-
-@multitable @columnfractions .15 .85
-@item @code{HOSTS} @tab Empties the host cache tables. You should flush the
-host tables if some of your hosts change IP number or if you get the
-error message @code{Host ... is blocked}. When more than
-@code{max_connect_errors} errors occur in a row for a given host while
-connection to the @strong{MySQL} server, @strong{MySQL} assumes
-something is wrong and blocks the host from further connection requests.
-Flushing the host tables allows the host to attempt to connect
-again. @xref{Blocked host}.) You can start @code{mysqld} with
-@code{-O max_connection_errors=999999999} to avoid this error message.
-
-@item @code{LOGS} @tab Closes and reopens all log files.
-If you have specified the update log file or a binary log file without
-an extension, the extension number of the log file will be incremented
-by one relative to the previous file. If you have used an extension in
-the file name, @strong{MySQL} will close and reopen the update log file.
-@xref{Update log}. This is the same thing as sending the @code{SIGHUP}
-signal to the @code{mysqld} server.
-
-@item @code{PRIVILEGES} @tab Reloads the privileges from the grant tables in
-the @code{mysql} database.
-
-@item @code{TABLES} @tab Closes all open tables and force all tables in use to be closed.
-
-@item @code{[TABLE | TABLES] table_name [,table_name...]} @tab Flushes only the given tables.
-
-@item @code{TABLES WITH READ LOCK} @tab Closes all open tables and locks all tables for all databases with a read until one executes @code{UNLOCK TABLES}. This is very convenient way to get backups if you have a file system, like Veritas,that can take snapshots in time.
-
-@item @code{STATUS} @tab Resets most status variables to zero. This is something one should only use when debugging a query.
-@end multitable
-
-You can also access each of the commands shown above with the @code{mysqladmin}
-utility, using the @code{flush-hosts}, @code{flush-logs}, @code{reload},
-or @code{flush-tables} commands.
-
-Take also a look at the @code{RESET} command used with
-replication. @xref{Replication SQL}.
-
-@cindex @code{mysqladmin}
-@findex KILL
-@node KILL, SHOW, FLUSH, Reference
-@section @code{KILL} Syntax
-@example
-KILL thread_id
-@end example
-
-Each connection to @code{mysqld} runs in a separate thread. You can see
-which threads are running with the @code{SHOW PROCESSLIST} command and kill
-a thread with the @code{KILL thread_id} command.
-
-If you have the @strong{process} privilege, you can see and kill all threads.
-Otherwise, you can see and kill only your own threads.
-
-You can also use the @code{mysqladmin processlist} and @code{mysqladmin kill}
-commands to examine and kill threads.
-
-When you do a @code{KILL}, a thread specific @code{kill flag} is set for
-the thread.
-
-In most cases it may take some time for the thread to die as the kill
-flag is only checked at specific intervals.
-
-@itemize @bullet
-@item
-In @code{SELECT}, @code{ORDER BY} and @code{GROUP BY} loops, the flag is
-checked after reading a block of rows. If the kill flag is set the
-statement is aborted
-@item
-When doing an @code{ALTER TABLE} the kill flag is checked before each block of
-rows are read from the original table. If the kill flag was set the command
-is aborted and the temporary table is deleted.
-@item
-When doing an @code{UPDATE TABLE} and @code{DELETE TABLE}, the kill flag
-is checked after each block read and after each updated or delete
-row. If the kill flag is set the statement is aborted. Note that if you
-are not using transactions, the changes will not be rolled back!
-@item
-@code{GET_LOCK()} will abort with @code{NULL}.
-@item
-An @code{INSERT DELAYED} thread will quickly flush all rows it has in
-memory and die.
-@item
-If the thread is in the table lock handler (state: @code{Locked}),
-the table lock will be quickly aborted.
-@item
-If the thread is waiting for free disk space in a @code{write} call, the
-write is aborted with an disk full error message.
-@end itemize
-
-@findex SHOW DATABASES
-@findex SHOW TABLES
-@findex SHOW COLUMNS
-@findex SHOW FIELDS
-@findex SHOW INDEX
-@findex SHOW KEYS
-@findex SHOW STATUS
-@findex SHOW VARIABLES
-@findex SHOW PROCESSLIST
-@findex SHOW TABLE STATUS
-@findex SHOW GRANTS
-@findex SHOW CREATE TABLE
-@findex SHOW MASTER STATUS
-@findex SHOW MASTER LOGS
-@findex SHOW SLAVE STATUS
-@node SHOW, EXPLAIN, KILL, Reference
-@section @code{SHOW} Syntax
-
-@example
- SHOW DATABASES [LIKE wild]
-or SHOW [OPEN] TABLES [FROM db_name] [LIKE wild]
-or SHOW [FULL] COLUMNS FROM tbl_name [FROM db_name] [LIKE wild]
-or SHOW INDEX FROM tbl_name [FROM db_name]
-or SHOW TABLE STATUS [FROM db_name] [LIKE wild]
-or SHOW STATUS [LIKE wild]
-or SHOW VARIABLES [LIKE wild]
-or SHOW LOGS
-or SHOW [FULL] PROCESSLIST
-or SHOW GRANTS FOR user
-or SHOW CREATE TABLE table_name
-or SHOW MASTER STATUS
-or SHOW MASTER LOGS
-or SHOW SLAVE STATUS
-@end example
-
-@code{SHOW} provides information about databases, tables, columns, or
-status information about the server. If the @code{LIKE wild} part is
-used, the @code{wild} string can be a string that uses the SQL @samp{%}
-and @samp{_} wild-card characters.
-
-@findex SHOW DATABASES
-@findex SHOW TABLES
-@findex SHOW COLUMNS
-@findex SHOW FIELDS
-@findex SHOW INDEX
-@findex SHOW KEYS
-@menu
-* SHOW DATABASE INFO::
-* SHOW TABLE STATUS::
-* SHOW STATUS::
-* SHOW VARIABLES::
-* SHOW LOGS::
-* SHOW PROCESSLIST::
-* SHOW GRANTS::
-* SHOW CREATE TABLE::
-@end menu
-
-@cindex displaying, information, @code{SHOW}
-@node SHOW DATABASE INFO, SHOW TABLE STATUS, SHOW, SHOW
-@subsection @code{SHOW} Information About Databases, Tables, Columns, and Indexes
-
-You can use @code{db_name.tbl_name} as an alternative to the @code{tbl_name
-FROM db_name} syntax. These two statements are equivalent:
-
-@example
-mysql> SHOW INDEX FROM mytable FROM mydb;
-mysql> SHOW INDEX FROM mydb.mytable;
-@end example
-
-@code{SHOW DATABASES} lists the databases on the @strong{MySQL} server
-host. You can also get this list using the @code{mysqlshow} command.
-
-@code{SHOW TABLES} lists the tables in a given database. You can also
-get this list using the @code{mysqlshow db_name} command.
-
-@strong{NOTE:} If a user doesn't have any privileges for a table, the table
-will not show up in the output from @code{SHOW TABLES} or @code{mysqlshow
-db_name}.
-
-@code{SHOW OPEN TABLES} lists the tables that are currently open in
-the table cache. @xref{Table cache}. The @code{Comment} field tells
-how many times the table is @code{cached} and @code{in_use}.
-
-@code{SHOW COLUMNS} lists the columns in a given table. If you specify
-the @code{FULL} option, you will also get the privileges you have for
-each column. If the column types are different than you expect them to
-be based on a @code{CREATE TABLE} statement, note that @strong{MySQL}
-sometimes changes column types. @xref{Silent column changes}.
-
-The @code{DESCRIBE} statement provides information similar to
-@code{SHOW COLUMNS}.
-@xref{DESCRIBE, , @code{DESCRIBE}}.
-
-@code{SHOW FIELDS} is a synonym for @code{SHOW COLUMNS}, and
-@code{SHOW KEYS} is a synonym for @code{SHOW INDEX}. You can also
-list a table's columns or indexes with @code{mysqlshow db_name tbl_name}
-or @code{mysqlshow -k db_name tbl_name}.
-
-@code{SHOW INDEX} returns the index information in a format that closely
-resembles the @code{SQLStatistics} call in ODBC. The following columns
-are returned:
-
-@multitable @columnfractions .35 .65
-@item @strong{Column} @tab @strong{Meaning}
-@item @code{Table} @tab Name of the table.
-@item @code{Non_unique} @tab 0 if the index can't contain duplicates.
-@item @code{Key_name} @tab Name of the index.
-@item @code{Seq_in_index} @tab Column sequence number in index,
- starting with 1.
-@item @code{Column_name} @tab Column name.
-@item @code{Collation} @tab How the column is sorted in the index.
- In @strong{MySQL}, this can have values
- @samp{A} (Ascending) or @code{NULL} (Not
- sorted).
-@item @code{Cardinality} @tab Number of unique values in the index.
- This is updated by running
- @code{isamchk -a}.
-@item @code{Sub_part} @tab Number of indexed characters if the
- column is only partly indexed.
- @code{NULL} if the entire key is indexed.
-@item @code{Comment} @tab Various remarks. For now, it tells
- whether index is FULLTEXT or not.
-@end multitable
-
-Note that as the @code{Cardinality} is counted based on statistics
-stored as integers, it's not necessarily accurate for small tables.
-
-@cindex displaying, table status
-@cindex tables, displaying status
-@cindex status, tables
-@node SHOW TABLE STATUS, SHOW STATUS, SHOW DATABASE INFO, SHOW
-@subsection @code{SHOW TABLE STATUS}
-
-@example
-SHOW TABLE STATUS [FROM db_name] [LIKE wild]
-@end example
-
-@code{SHOW TABLE STATUS} (new in Version 3.23) works likes @code{SHOW
-STATUS}, but provides a lot of information about each table. You can
-also get this list using the @code{mysqlshow --status db_name} command.
-The following columns are returned:
-
-@multitable @columnfractions .30 .70
-@item @strong{Column} @tab @strong{Meaning}
-@item @code{Name} @tab Name of the table.
-@item @code{Type} @tab Type of table. @xref{Table types}.
-@item @code{Row_format} @tab The row storage format (Fixed, Dynamic, or Compressed).
-@item @code{Rows} @tab Number of rows.
-@item @code{Avg_row_length} @tab Average row length.
-@item @code{Data_length} @tab Length of the data file.
-@item @code{Max_data_length} @tab Max length of the data file.
-@item @code{Index_length} @tab Length of the index file.
-@item @code{Data_free} @tab Number of allocated but not used bytes.
-@item @code{Auto_increment} @tab Next autoincrement value.
-@item @code{Create_time} @tab When the table was created.
-@item @code{Update_time} @tab When the data file was last updated.
-@item @code{Check_time} @tab When the table was last checked.
-@item @code{Create_options} @tab Extra options used with @code{CREATE TABLE}.
-@item @code{Comment} @tab The comment used when creating the table (or some information why @strong{MySQL} couldn't access the table information).
-@end multitable
-
-@code{InnoDB} tables will report the free space in the tablespace
-in the table comment.
-
-@node SHOW STATUS, SHOW VARIABLES, SHOW TABLE STATUS, SHOW
-@subsection @code{SHOW STATUS}
-
-@cindex @code{mysqladmin}
-@code{SHOW STATUS} provides server status information
-(like @code{mysqladmin extended-status}). The output resembles that shown
-below, though the format and numbers probably differ:
-
-@example
-+--------------------------+------------+
-| Variable_name | Value |
-+--------------------------+------------+
-| Aborted_clients | 0 |
-| Aborted_connects | 0 |
-| Bytes_received | 155372598 |
-| Bytes_sent | 1176560426 |
-| Connections | 30023 |
-| Created_tmp_disk_tables | 0 |
-| Created_tmp_tables | 8340 |
-| Created_tmp_files | 60 |
-| Delayed_insert_threads | 0 |
-| Delayed_writes | 0 |
-| Delayed_errors | 0 |
-| Flush_commands | 1 |
-| Handler_delete | 462604 |
-| Handler_read_first | 105881 |
-| Handler_read_key | 27820558 |
-| Handler_read_next | 390681754 |
-| Handler_read_prev | 6022500 |
-| Handler_read_rnd | 30546748 |
-| Handler_read_rnd_next | 246216530 |
-| Handler_update | 16945404 |
-| Handler_write | 60356676 |
-| Key_blocks_used | 14955 |
-| Key_read_requests | 96854827 |
-| Key_reads | 162040 |
-| Key_write_requests | 7589728 |
-| Key_writes | 3813196 |
-| Max_used_connections | 0 |
-| Not_flushed_key_blocks | 0 |
-| Not_flushed_delayed_rows | 0 |
-| Open_tables | 1 |
-| Open_files | 2 |
-| Open_streams | 0 |
-| Opened_tables | 44600 |
-| Questions | 2026873 |
-| Select_full_join | 0 |
-| Select_full_range_join | 0 |
-| Select_range | 99646 |
-| Select_range_check | 0 |
-| Select_scan | 30802 |
-| Slave_running | OFF |
-| Slave_open_temp_tables | 0 |
-| Slow_launch_threads | 0 |
-| Slow_queries | 0 |
-| Sort_merge_passes | 30 |
-| Sort_range | 500 |
-| Sort_rows | 30296250 |
-| Sort_scan | 4650 |
-| Table_locks_immediate | 1920382 |
-| Table_locks_waited | 0 |
-| Threads_cached | 0 |
-| Threads_created | 30022 |
-| Threads_connected | 1 |
-| Threads_running | 1 |
-| Uptime | 80380 |
-+--------------------------+------------+
-@end example
-
-@cindex variables, status
-The status variables listed above have the following meaning:
-
-@multitable @columnfractions .35 .65
-@item @strong{Variable} @tab @strong{Meaning}
-@item @code{Aborted_clients} @tab Number of connections aborted because the client died without closing the connection properly. @xref{Communication errors}.
-@item @code{Aborted_connects} @tab Number of tries to connect to the @strong{MySQL} server that failed. @xref{Communication errors}.
-@item @code{Bytes_received} @tab Number of bytes received from all clients.
-@item @code{Bytes_sent} @tab Number of bytes sent to all clients.
-@item @code{Connections} @tab Number of connection attempts to the @strong{MySQL} server.
-@item @code{Created_tmp_disk_tables} @tab Number of implicit temporary tables on disk created while executing statements.
-@item @code{Created_tmp_tables} @tab Number of implicit temporary tables in memory created while executing statements.
-@item @code{Created_tmp_files} @tab How many temporary files @code{mysqld} have created.
-@item @code{Delayed_insert_threads} @tab Number of delayed insert handler threads in use.
-@item @code{Delayed_writes} @tab Number of rows written with @code{INSERT DELAYED}.
-@item @code{Delayed_errors} @tab Number of rows written with @code{INSERT DELAYED} for which some error occurred (probably @code{duplicate key}).
-@item @code{Flush_commands} @tab Number of executed @code{FLUSH} commands.
-@item @code{Handler_delete} @tab Number of times a row was deleted from a table.
-@item @code{Handler_read_first} @tab Number of times the first entry was read from an index.
-If this is high, it suggests that the server is doing a lot of full index scans, for example,
-@code{SELECT col1 FROM foo}, assuming that col1 is indexed.
-@item @code{Handler_read_key} @tab Number of requests to read a row based on a key. If this
-is high, it is a good indication that your queries and tables are properly indexed.
-@item @code{Handler_read_next} @tab Number of requests to read next row in key order. This
-will be incremented if you are querying an index column with a range constraint. This also
-will be incremented if you are doing an index scan.
-@item @code{Handler_read_rnd} @tab Number of requests to read a row based on a fixed position.
-This will be high if you are doing a lot of queries that require sorting of the result.
-@item @code{Handler_read_rnd_next} @tab Number of requests to read the next row in the datafile.
-This will be high if you are doing a lot of table scans. Generally this suggests that your tables
-are not properly indexed or that your queries are not written to take advantage of the indexes you
-have.
-@item @code{Handler_update} @tab Number of requests to update a row in a table.
-@item @code{Handler_write} @tab Number of requests to insert a row in a table.
-@item @code{Key_blocks_used} @tab The number of used blocks in the key cache.
-@item @code{Key_read_requests} @tab The number of requests to read a key block from the cache.
-@item @code{Key_reads} @tab The number of physical reads of a key block from disk.
-@item @code{Key_write_requests} @tab The number of requests to write a key block to the cache.
-@item @code{Key_writes} @tab The number of physical writes of a key block to disk.
-@item @code{Max_used_connections} @tab The maximum number of connections in use simultaneously.
-@item @code{Not_flushed_key_blocks} @tab Keys blocks in the key cache that has changed but hasn't yet been flushed to disk.
-@item @code{Not_flushed_delayed_rows} @tab Number of rows waiting to be written in @code{INSERT DELAY} queues.
-@item @code{Open_tables} @tab Number of tables that are open.
-@item @code{Open_files} @tab Number of files that are open.
-@item @code{Open_streams} @tab Number of streams that are open (used mainly for logging).
-@item @code{Opened_tables} @tab Number of tables that have been opened.
-@item @code{Select_full_join} @tab Number of joins without keys (Should be 0).
-@item @code{Select_full_range_join} @tab Number of joins where we used a range search on reference table.
-@item @code{Select_range} @tab Number of joins where we used ranges on the first table. (It's normally not critical even if this is big.)
-@item @code{Select_scan} @tab Number of joins where we scanned the first table.
-@item @code{Select_range_check} @tab Number of joins without keys where we check for key usage after each row (Should be 0).
-@item @code{Questions} @tab Number of queries sent to the server.
-@item @code{Slave_open_temp_tables} @tab Number of temporary tables currently
-open by the slave thread
-@item @code{Slow_launch_threads} @tab Number of threads that have taken more than @code{slow_launch_time} to connect.
-@item @code{Slow_queries} @tab Number of queries that have taken more than @code{long_query_time}. @xref{Slow query log}.
-@item @code{Sort_merge_passes} @tab Number of merges the sort has to do. If this value is large you should consider increasing @code{sort_buffer}.
-@item @code{Sort_range} @tab Number of sorts that where done with ranges.
-@item @code{Sort_rows} @tab Number of sorted rows.
-@item @code{Sort_scan} @tab Number of sorts that where done by scanning the table.
-@item @code{Table_locks_immediate} @tab Number of times a table lock was
-acquired immediately. Available after 3.23.33.
-@item @code{Table_locks_waited} @tab Number of times a table lock could not
-be acquired immediately and a wait was needed. If this is high, and you
-have performance problems, you should first optimize your queries, and then
-either split your table(s) or use replication. Available after 3.23.33.
-@item @code{Threads_cached} @tab Number of threads in the thread cache.
-@item @code{Threads_connected} @tab Number of currently open connections.
-@item @code{Threads_created} @tab Number of threads created to handle connections.
-@item @code{Threads_running} @tab Number of threads that are not sleeping.
-@item @code{Uptime} @tab How many seconds the server has been up.
-@end multitable
-
-Some comments about the above:
-
-@itemize @bullet
-@item
-If @code{Opened_tables} is big, then your @code{table_cache}
-variable is probably too small.
-@item
-If @code{key_reads} is big, then your @code{key_cache} is probably too
-small. The cache hit rate can be calculated with
-@code{key_reads}/@code{key_read_requests}.
-@item
-If @code{Handler_read_rnd} is big, then you probably have a lot of
-queries that require @strong{MySQL} to scan whole tables or you have
-joins that don't use keys properly.
-@item
-If @code{Threads_created} is big, you may want to increase the
-@code{thread_cache_size} variable.
-@end itemize
-
-@node SHOW VARIABLES, SHOW LOGS, SHOW STATUS, SHOW
-@subsection @code{SHOW VARIABLES}
-
-@example
-SHOW VARIABLES [LIKE wild]
-@end example
-
-@code{SHOW VARIABLES} shows the values of some @strong{MySQL} system
-variables. You can also get this information using the @code{mysqladmin
-variables} command. If the default values are unsuitable, you can set most
-of these variables using command-line options when @code{mysqld} starts up.
-@xref{Command-line options}.
-
-The output resembles that shown below, though the format and numbers may
-differ somewhat:
-
-@example
-+-------------------------+---------------------------+
-| Variable_name | Value |
-+-------------------------+---------------------------+
-| ansi_mode | OFF |
-| back_log | 50 |
-| basedir | /my/monty/ |
-| bdb_cache_size | 16777216 |
-| bdb_log_buffer_size | 32768 |
-| bdb_home | /my/monty/data/ |
-| bdb_max_lock | 10000 |
-| bdb_logdir | |
-| bdb_shared_data | OFF |
-| bdb_tmpdir | /tmp/ |
-| binlog_cache_size | 32768 |
-| concurrent_insert | ON |
-| connect_timeout | 5 |
-| datadir | /my/monty/data/ |
-| delay_key_write | ON |
-| delayed_insert_limit | 100 |
-| delayed_insert_timeout | 300 |
-| delayed_queue_size | 1000 |
-| flush | OFF |
-| flush_time | 0 |
-| have_bdb | YES |
-| have_innodb | YES |
-| have_raid | YES |
-| have_ssl | NO |
-| init_file | |
-| interactive_timeout | 28800 |
-| join_buffer_size | 131072 |
-| key_buffer_size | 16776192 |
-| language | /my/monty/share/english/ |
-| large_files_support | ON |
-| log | OFF |
-| log_update | OFF |
-| log_bin | OFF |
-| log_slave_updates | OFF |
-| long_query_time | 10 |
-| low_priority_updates | OFF |
-| lower_case_table_names | 0 |
-| max_allowed_packet | 1048576 |
-| max_binlog_cache_size | 4294967295 |
-| max_connections | 100 |
-| max_connect_errors | 10 |
-| max_delayed_threads | 20 |
-| max_heap_table_size | 16777216 |
-| max_join_size | 4294967295 |
-| max_sort_length | 1024 |
-| max_tmp_tables | 32 |
-| max_write_lock_count | 4294967295 |
-| myisam_recover_options | DEFAULT |
-| myisam_sort_buffer_size | 8388608 |
-| net_buffer_length | 16384 |
-| net_read_timeout | 30 |
-| net_retry_count | 10 |
-| net_write_timeout | 60 |
-| open_files_limit | 0 |
-| pid_file | /my/monty/data/donna.pid |
-| port | 3306 |
-| protocol_version | 10 |
-| record_buffer | 131072 |
-| query_buffer_size | 0 |
-| safe_show_database | OFF |
-| server_id | 0 |
-| skip_locking | ON |
-| skip_networking | OFF |
-| skip_show_database | OFF |
-| slow_launch_time | 2 |
-| socket | /tmp/mysql.sock |
-| sort_buffer | 2097116 |
-| table_cache | 64 |
-| table_type | MYISAM |
-| thread_cache_size | 4 |
-| thread_stack | 65536 |
-| tmp_table_size | 1048576 |
-| tmpdir | /tmp/ |
-| version | 3.23.29a-gamma-debug |
-| wait_timeout | 28800 |
-+-------------------------+---------------------------+
-@end example
-
-Each option is described below. Values for buffer sizes, lengths, and stack
-sizes are given in bytes. You can specify values with a suffix of @samp{K}
-or @samp{M} to indicate kilobytes or megabytes. For example, @code{16M}
-indicates 16 megabytes. The case of suffix letters does not matter;
-@code{16M} and @code{16m} are equivalent:
-
-@cindex variables, values
-@table @code
-@item @code{ansi_mode}.
-Is @code{ON} if @code{mysqld} was started with @code{--ansi}.
-@xref{ANSI mode}.
-
-@item @code{back_log}
-The number of outstanding connection requests @strong{MySQL} can have. This
-comes into play when the main @strong{MySQL} thread gets @strong{VERY}
-many connection requests in a very short time. It then takes some time
-(although very little) for the main thread to check the connection and start
-a new thread. The @code{back_log} value indicates how many requests can be
-stacked during this short time before @strong{MySQL} momentarily stops
-answering new requests. You need to increase this only if you expect a large
-number of connections in a short period of time.
-
-In other words, this value is the size of the listen queue for incoming
-TCP/IP connections. Your operating system has its own limit on the size
-of this queue. The manual page for the Unix @code{listen(2)} system
-call should have more details. Check your OS documentation for the
-maximum value for this variable. Attempting to set @code{back_log}
-higher than your operating system limit will be ineffective.
-
-@item @code{basedir}
-The value of the @code{--basedir} option.
-
-@item @code{bdb_cache_size}
-The buffer that is allocated to cache index and rows for @code{BDB}
-tables. If you don't use @code{BDB} tables, you should start
-@code{mysqld} with @code{--skip-bdb} to not waste memory for this
-cache.
-
-@item @code{bdb_log_buffer_size}
-The buffer that is allocated to cache index and rows for @code{BDB}
-tables. If you don't use @code{BDB} tables, you should set this to 0 or
-start @code{mysqld} with @code{--skip-bdb} to not waste memory for this
-cache.
-
-@item @code{bdb_home}
-The value of the @code{--bdb-home} option.
-
-@item @code{bdb_max_lock}
-The maximum number of locks (1000 by default) you can have active on a
-BDB table. You should increase this if you get errors of type @code{bdb:
-Lock table is out of available locks} or @code{Got error 12 from ...}
-when you have do long transactions or when @code{mysqld} has to examine
-a lot of rows to calculate the query.
-
-@item @code{bdb_logdir}
-The value of the @code{--bdb-logdir} option.
-
-@item @code{bdb_shared_data}
-Is @code{ON} if you are using @code{--bdb-shared-data}.
-
-@item @code{bdb_tmpdir}
-The value of the @code{--bdb-tmpdir} option.
-
-@item @code{binlog_cache_size}. The size of the cache to hold the SQL
-statements for the binary log during a transaction. If you often use
-big, multi-statement transactions you can increase this to get more
-performance. @xref{COMMIT}.
-
-@item @code{character_set}
-The default character set.
-
-@item @code{character_sets}
-The supported character sets.
-
-@item @code{concurrent_inserts}
-If @code{ON} (the default), @strong{MySQL} will allow you to use @code{INSERT}
-on @code{MyISAM} tables at the same time as you run @code{SELECT} queries
-on them. You can turn this option off by starting @code{mysqld} with @code{--safe}
-or @code{--skip-new}.
-
-@cindex timeout
-@item @code{connect_timeout}
-The number of seconds the @code{mysqld} server is waiting for a connect
-packet before responding with @code{Bad handshake}.
-
-@item @code{datadir}
-The value of the @code{--datadir} option.
-
-@item @code{delay_key_write}
-If enabled (is on by default), @strong{MySQL} will honor the
-@code{delay_key_write} option @code{CREATE TABLE}. This means that the
-key buffer for tables with this option will not get flushed on every
-index update, but only when a table is closed. This will speed up
-writes on keys a lot, but you should add automatic checking of all tables
-with @code{myisamchk --fast --force} if you use this. Note that if you
-start @code{mysqld} with the @code{--delay-key-write-for-all-tables}
-option this means that all tables will be treated as if they were
-created with the @code{delay_key_write} option. You can clear this flag
-by starting @code{mysqld} with @code{--skip-new} or @code{--safe-mode}.
-
-@item @code{delayed_insert_limit}
-After inserting @code{delayed_insert_limit} rows, the @code{INSERT
-DELAYED} handler will check if there are any @code{SELECT} statements
-pending. If so, it allows these to execute before continuing.
-
-@item @code{delayed_insert_timeout}
-How long a @code{INSERT DELAYED} thread should wait for @code{INSERT}
-statements before terminating.
-
-@item @code{delayed_queue_size}
-What size queue (in rows) should be allocated for handling @code{INSERT
-DELAYED}. If the queue becomes full, any client that does @code{INSERT
-DELAYED} will wait until there is room in the queue again.
-
-@item @code{flush}
-This is @code{ON} if you have started @strong{MySQL} with the @code{--flush}
-option.
-
-@item @code{flush_time}
-If this is set to a non-zero value, then every @code{flush_time} seconds all
-tables will be closed (to free up resources and sync things to disk). We
-only recommend this option on Win95, Win98, or on systems where you have
-very little resources.
-
-@item @code{have_bdb}
-@code{YES} if @code{mysqld} supports Berkeley DB tables. @code{DISABLED}
-if @code{--skip-bdb} is used.
-@item @code{have_innodb}
-@code{YES} if @code{mysqld} supports InnoDB tables. @code{DISABLED}
-if @code{--skip-innodb} is used.
-@item @code{have_raid}
-@code{YES} if @code{mysqld} supports the @code{RAID} option.
-@item @code{have_ssl}
-@code{YES} if @code{mysqld} supports SSL (encryption) on the client/server
-protocol.
-
-@item @code{init_file}
-The name of the file specified with the @code{--init-file} option when
-you start the server. This is a file of SQL statements you want the
-server to execute when it starts.
-
-@item @code{interactive_timeout}
-The number of seconds the server waits for activity on an interactive
-connection before closing it. An interactive client is defined as a
-client that uses the @code{CLIENT_INTERACTIVE} option to
-@code{mysql_real_connect()}. See also @code{wait_timeout}.
-
-@item @code{join_buffer_size}
-The size of the buffer that is used for full joins (joins that do not
-use indexes). The buffer is allocated one time for each full join
-between two tables. Increase this value to get a faster full join when
-adding indexes is not possible. (Normally the best way to get fast joins
-is to add indexes.)
-
-@c Make texi2html support index @anchor{Index cache size}. Then change
-@c some xrefs to point here
-@cindex indexes, block size
-@item @code{key_buffer_size}
-Index blocks are buffered and are shared by all threads.
-@code{key_buffer_size} is the size of the buffer used for index blocks.
-
-Increase this to get better index handling (for all reads and multiple
-writes) to as much as you can afford; 64M on a 256M machine that mainly
-runs @strong{MySQL} is quite common. If you, however, make this too big
-(more than 50% of your total memory?) your system may start to page and
-become REALLY slow. Remember that because @strong{MySQL} does not cache
-data read, that you will have to leave some room for the OS filesystem
-cache.
-
-You can check the performance of the key buffer by doing @code{show
-status} and examine the variables @code{Key_read_requests},
-@code{Key_reads}, @code{Key_write_requests}, and @code{Key_writes}. The
-@code{Key_reads/Key_read_request} ratio should normally be < 0.01.
-The @code{Key_write/Key_write_requests} is usually near 1 if you are
-using mostly updates/deletes but may be much smaller if you tend to
-do updates that affect many at the same time or if you are
-using @code{delay_key_write}. @xref{SHOW}.
-
-To get even more speed when writing many rows at the same time, use
-@code{LOCK TABLES}. @xref{LOCK TABLES, , @code{LOCK TABLES}}.
-
-@item @code{language}
-The language used for error messages.
-
-@item @code{large_file_support}
-If @code{mysqld} was compiled with options for big file support.
-
-@item @code{locked_in_memory}
-If @code{mysqld} was locked in memory with @code{--memlock}
-
-@item @code{log}
-If logging of all queries is enabled.
-
-@item @code{log_update}
-If the update log is enabled.
-
-@item @code{log_bin}
-If the binary log is enabled.
-
-@item @code{log_slave_updates}
-If the updates from the slave should be logged.
-
-@item @code{long_query_time}
-If a query takes longer than this (in seconds), the @code{Slow_queries} counter
-will be incremented. If you are using @code{--log-slow-queries}, the query
-will be logged to the slow query logfile. @xref{Slow query log}.
-
-@item @code{lower_case_table_names}
-If set to 1 table names are stored in lowercase on disk. This will enable
-you to access the table names case-insensitive also on Unix.
-@xref{Name case sensitivity}.
-
-@item @code{max_allowed_packet}
-The maximum size of one packet. The message buffer is initialized to
-@code{net_buffer_length} bytes, but can grow up to @code{max_allowed_packet}
-bytes when needed. This value by default is small, to catch big (possibly
-wrong) packets. You must increase this value if you are using big
-@code{BLOB} columns. It should be as big as the biggest @code{BLOB} you want
-to use. The current protocol limits @code{max_allowed_packet} to 16M.
-
-@item @code{max_binlog_cache_size}
-If a multi-statement transaction requires more than this amount of memory,
-one will get the error "Multi-statement transaction required more than
-'max_binlog_cache_size' bytes of storage".
-
-@item @code{max_binlog_size}
-Available after 3.23.33. If a write to the binary (replication) log exceeds
-the given value, rotate the logs. You cannot set it to less than 1024 bytes,
-or more than 1 GB. Default is 1 GB.
-
-@item @code{max_connections}
-The number of simultaneous clients allowed. Increasing this value increases
-the number of file descriptors that @code{mysqld} requires. See below for
-comments on file descriptor limits. @xref{Too many connections}.
-
-@item @code{max_connect_errors}
-If there is more than this number of interrupted connections from a host
-this host will be blocked from further connections. You can unblock a host
-with the command @code{FLUSH HOSTS}.
-
-@item @code{max_delayed_threads}
-Don't start more than this number of threads to handle @code{INSERT DELAYED}
-statements. If you try to insert data into a new table after all @code{INSERT
-DELAYED} threads are in use, the row will be inserted as if the
-@code{DELAYED} attribute wasn't specified.
-
-@item @code{max_heap_table_size}
-Don't allow creation of heap tables bigger than this.
-
-@item @code{max_join_size}
-Joins that are probably going to read more than @code{max_join_size}
-records return an error. Set this value if your users tend to perform joins
-that lack a @code{WHERE} clause, that take a long time, and that return
-millions of rows.
-
-@item @code{max_sort_length}
-The number of bytes to use when sorting @code{BLOB} or @code{TEXT}
-values (only the first @code{max_sort_length} bytes of each value
-are used; the rest are ignored).
-
-@item @code{max_user_connections}
-The maximum number of active connections for a single user (0 = no limit).
-
-@item @code{max_tmp_tables}
-(This option doesn't yet do anything.)
-Maximum number of temporary tables a client can keep open at the same time.
-
-@item @code{max_write_lock_count}
-After this many write locks, allow some read locks to run in between.
-
-@item @code{myisam_recover_options}
-The value of the @code{--myisam-recover} option.
-
-@item @code{myisam_sort_buffer_size}
-The buffer that is allocated when sorting the index when doing a
-@code{REPAIR} or when creating indexes with @code{CREATE INDEX} or
-@code{ALTER TABLE}.
-
-@item @code{myisam_max_extra_sort_file_size}.
-If the creating of the temporary file for fast index creation would be
-this much bigger than using the key cache, then prefer the key cache
-method. This is mainly used to force long character keys in large
-tables to use the slower key cache method to create the index.
-@strong{NOTE} that this parameter is given in megabytes!
-
-@item @code{myisam_max_sort_file_size}
-The maximum size of the temporary file @strong{MySQL} is allowed to use
-while recreating the index (during @code{REPAIR}, @code{ALTER TABLE}
-or @code{LOAD DATA INFILE}. If the file size would be bigger than this,
-the index will be created through the key cache (which is slower).
-@strong{NOTE} that this parameter is given in megabytes!
-
-@item @code{net_buffer_length}
-The communication buffer is reset to this size between queries. This
-should not normally be changed, but if you have very little memory, you
-can set it to the expected size of a query. (That is, the expected length of
-SQL statements sent by clients. If statements exceed this length, the buffer
-is automatically enlarged, up to @code{max_allowed_packet} bytes.)
-
-@item @code{net_read_timeout}
-Number of seconds to wait for more data from a connection before aborting
-the read. Note that when we don't expect data from a connection, the timeout
-is defined by @code{write_timeout}. See also @code{slave_read_timeout}.
-
-@item @code{net_retry_count}
-If a read on a communication port is interrupted, retry this many times
-before giving up. This value should be quite high on @code{FreeBSD} as
-internal interrupts are sent to all threads.
-
-@item @code{net_write_timeout}
-Number of seconds to wait for a block to be written to a connection before
-aborting the write.
-
-@item @code{open_files_limit}
-If this is not 0, then @code{mysqld} will use this value to reserve file
-descriptors to use with @code{setrlimit()}. If this value is 0 then
-@code{mysqld} will reserve @code{max_connections*5} or
-@code{max_connections + table_cache*2} (whichever is larger) number of
-files. You should try increasing this if @code{mysqld} gives you the
-error 'Too many open files'.
-
-@item @code{pid_file}
-The value of the @code{--pid-file} option.
-
-@item @code{port}
-The value of the @code{--port} option.
-
-@item @code{protocol_version}
-The protocol version used by the @strong{MySQL} server.
-
-@item @code{record_buffer}
-Each thread that does a sequential scan allocates a buffer of this
-size for each table it scans. If you do many sequential scans, you may
-want to increase this value.
-
-@item @code{query_buffer_size}
-The initial allocation of the query buffer. If most of your queries are
-long (like when inserting blobs), you should increase this!
-
-@item @code{safe_show_databases}
-Don't show databases for which the user doesn't have any database or
-table privileges. This can improve security if you're concerned about
-people being able to see what databases other users have. See also
-@code{skip_show_databases}.
-
-@item @code{server_id}
-The value of the @code{--server-id} option.
-
-@item @code{skip_locking}
-Is OFF if @code{mysqld} uses external locking.
-
-@item @code{skip_networking}
-Is ON if we only allow local (socket) connections.
-
-@item @code{skip_show_databases}
-This prevents people from doing @code{SHOW DATABASES} if they don't have
-the @code{PROCESS_PRIV} privilege. This can improve security if you're
-concerned about people being able to see what databases other users
-have. See also @code{safe_show_databases}.
-
-@item @code{slave_read_timeout}
-Number of seconds to wait for more data from a master/slave connection
-before aborting the read.
-
-@item @code{slow_launch_time}
-If creating the thread takes longer than this value (in seconds), the
-@code{Slow_launch_threads} counter will be incremented.
-
-@item @code{socket}
-The Unix socket used by the server.
-
-@item @code{sort_buffer}
-Each thread that needs to do a sort allocates a buffer of this
-size. Increase this value for faster @code{ORDER BY} or @code{GROUP BY}
-operations.
-@xref{Temporary files}.
-
-@item @code{table_cache}
-The number of open tables for all threads. Increasing this value
-increases the number of file descriptors that @code{mysqld} requires.
-@strong{MySQL} needs two file descriptors for each unique open table.
-See below for comments on file descriptor limits. You can check if you
-need to increase the table cache by checking the @code{Opened_tables}
-variable. @xref{SHOW}. If this variable is big and you don't do
-@code{FLUSH TABLES} a lot (which just forces all tables to be closed and
-reopenend), then you should increase the value of this variable.
-
-Make sure that your operating system can handle the number of open file
-descriptors implied by the @code{table_cache} setting. If @code{table_cache}
-is set too high, @strong{MySQL} may run out of file descriptors and refuse
-connections, fail to perform queries, and be very unreliable.
-
-For information about how the table cache works, see @ref{Table cache}.
-
-@item @code{table_type}
-The default table type
-
-@item @code{thread_cache_size}
-How many threads we should keep in a cache for reuse. When a
-client disconnects, the client's threads are put in the cache if there
-aren't more than @code{thread_cache_size} threads from before. All new
-threads are first taken from the cache, and only when the cache is empty
-is a new thread created. This variable can be increased to improve
-performance if you have a lot of new connections. (Normally this doesn't
-give a notable performance improvement if you have a good
-thread implementation.) By examing the difference between
-the @code{Connections} and @code{Threads_created} you can see how efficient
-the current thread cache is for you.
-
-@item @code{thread_concurrency}
-On Solaris, @code{mysqld} will call @code{thr_setconcurrency()} with
-this value. @code{thr_setconcurrency()} permits the application to give
-the threads system a hint for the desired number of threads that should
-be run at the same time.
-
-@item @code{thread_stack}
-The stack size for each thread. Many of the limits detected by the
-@code{crash-me} test are dependent on this value. The default is
-large enough for normal operation. @xref{Benchmarks}.
-
-@item @code{timezone}
-The timezone for the server.
-
-@item @code{tmp_table_size}
-If an in-memory temporary table exceeds this size, @strong{MySQL}
-will automatically convert it to an on-disk @code{MyISAM} table.
-Increase the value of @code{tmp_table_size} if you do many advanced
-@code{GROUP BY} queries and you have lots of memory.
-
-@item @code{tmpdir}
-The directory used for temporary files and temporary tables.
-
-@item @code{version}
-The version number for the server.
-
-@item @code{wait_timeout}
-The number of seconds the server waits for activity on a connection before
-closing it. See also @code{interactive_timeout}.
-@end table
-
-The manual section that describes tuning @strong{MySQL} contains some
-information of how to tune the above variables. @xref{Server parameters}.
-
-@node SHOW LOGS, SHOW PROCESSLIST, SHOW VARIABLES, SHOW
-@subsection @code{SHOW LOGS}
-@code{SHOW LOGS} shows you status information about existing log
-files. It currently only displays information about Berkeley DB log
-files.
-
-@itemize @bullet
-@item @code{File} shows the full path to the log file
-@item @code{Type} shows the type of the log file (@code{BDB} for Berkeley
-DB log files)
-@item @code{Status} shows the status of the log file (@code{FREE} if the
-file can be removed, or @code{IN USE} if the file is needed by the transaction
-subsystem)
-@end itemize
-
-@cindex threads, display
-@cindex processes, display
-@findex threads
-@findex PROCESSLIST
-@node SHOW PROCESSLIST, SHOW GRANTS, SHOW LOGS, SHOW
-@subsection @code{SHOW PROCESSLIST}
-
-@code{SHOW PROCESSLIST} shows you which threads are running. You can
-also get this information using the @code{mysqladmin processlist}
-command. If you have the @strong{process} privilege, you can see all
-threads. Otherwise, you can see only your own threads. @xref{KILL, ,
-@code{KILL}}. If you don't use the @code{FULL} option, then only
-the first 100 characters of each query will be shown.
-
-This command is very useful if you get the 'too many connections' error
-message and want to find out what's going on. @strong{MySQL} reserves
-one extra connection for a client with the @code{Process_priv} privilege
-to ensure that you should always be able to login and check the system
-(assuming you are not giving this privilege to all your users).
-
-@cindex privileges, display
-@node SHOW GRANTS, SHOW CREATE TABLE, SHOW PROCESSLIST, SHOW
-@subsection @code{SHOW GRANTS}
-
-@code{SHOW GRANTS FOR user} lists the grant commands that must be issued to
-duplicate the grants for a user.
-
-@example
-mysql> SHOW GRANTS FOR root@@localhost;
-+---------------------------------------------------------------------+
-| Grants for root@@localhost |
-+---------------------------------------------------------------------+
-| GRANT ALL PRIVILEGES ON *.* TO 'root'@@'localhost' WITH GRANT OPTION |
-+---------------------------------------------------------------------+
-@end example
-
-@node SHOW CREATE TABLE, , SHOW GRANTS, SHOW
-@subsection @code{SHOW CREATE TABLE}
-
-Shows a @code{CREATE TABLE} statement that will create the given table:
-
-@example
-mysql> show create table t\G
-*************************** 1. row ***************************
- Table: t
-Create Table: CREATE TABLE t (
- id int(11) default NULL auto_increment,
- s char(60) default NULL,
- PRIMARY KEY (id)
-) TYPE=MyISAM
-
-@end example
-
-@code{SHOW CREATE TABLE} will quote table and column names according to
-@code{SQL_QUOTE_SHOW_CREATE} option.
-@ref{SET OPTION, , @code{SET OPTION SQL_QUOTE_SHOW_CREATE}}.
-
-@findex EXPLAIN
-@findex SELECT, optimizing
-@node EXPLAIN, DESCRIBE, SHOW, Reference
-@section @code{EXPLAIN} Syntax (Get Information About a @code{SELECT})
-
-@example
- EXPLAIN tbl_name
-or EXPLAIN SELECT select_options
-@end example
-
-@code{EXPLAIN tbl_name} is a synonym for @code{DESCRIBE tbl_name} or
-@code{SHOW COLUMNS FROM tbl_name}.
-
-When you precede a @code{SELECT} statement with the keyword @code{EXPLAIN},
-@strong{MySQL} explains how it would process the @code{SELECT}, providing
-information about how tables are joined and in which order.
-
-With the help of @code{EXPLAIN}, you can see when you must add indexes
-to tables to get a faster @code{SELECT} that uses indexes to find the
-records. You can also see if the optimizer joins the tables in an optimal
-order. To force the optimizer to use a specific join order for a
-@code{SELECT} statement, add a @code{STRAIGHT_JOIN} clause.
-
-For non-simple joins, @code{EXPLAIN} returns a row of information for each
-table used in the @code{SELECT} statement. The tables are listed in the order
-they would be read. @strong{MySQL} resolves all joins using a single-sweep
-multi-join method. This means that @strong{MySQL} reads a row from the first
-table, then finds a matching row in the second table, then in the third table
-and so on. When all tables are processed, it outputs the selected columns and
-backtracks through the table list until a table is found for which there are
-more matching rows. The next row is read from this table and the process
-continues with the next table.
-
-Output from @code{EXPLAIN} includes the following columns:
-
-@table @code
-@item table
-The table to which the row of output refers.
-
-@item type
-The join type. Information about the various types is given below.
-
-@item possible_keys
-The @code{possible_keys} column indicates which indexes @strong{MySQL}
-could use to find the rows in this table. Note that this column is
-totally independent of the order of the tables. That means that some of
-the keys in possible_keys may not be usable in practice with the
-generated table order.
-
-If this column is empty, there are no relevant indexes. In this case,
-you may be able to improve the performance of your query by examining
-the @code{WHERE} clause to see if it refers to some column or columns
-that would be suitable for indexing. If so, create an appropriate index
-and check the query with @code{EXPLAIN} again. @xref{ALTER TABLE}.
-
-To see what indexes a table has, use @code{SHOW INDEX FROM tbl_name}.
-
-@item key
-The @code{key} column indicates the key that @strong{MySQL} actually
-decided to use. The key is @code{NULL} if no index was chosen. If
-@strong{MySQL} chooses the wrong index, you can probably force
-@strong{MySQL} to use another index by using @code{myisamchk --analyze},
-@xref{myisamchk syntax}, or by using @code{USE INDEX/IGNORE INDEX}.
-@xref{JOIN}.
-
-@item key_len
-The @code{key_len} column indicates the length of the key that
-@strong{MySQL} decided to use. The length is @code{NULL} if the
-@code{key} is @code{NULL}. Note that this tells us how many parts of a
-multi-part key @strong{MySQL} will actually use.
-
-@item ref
-The @code{ref} column shows which columns or constants are used with the
-@code{key} to select rows from the table.
-
-@item rows
-The @code{rows} column indicates the number of rows @strong{MySQL}
-believes it must examine to execute the query.
-
-@item Extra
-This column contains additional information of how @strong{MySQL} will
-resolve the query. Here is an explanation of the different text
-strings that can be found in this column:
-
-@table @code
-@item Distinct
-@strong{MySQL} will not continue searching for more rows for the current row
-combination after it has found the first matching row.
-
-@item Not exists
-@strong{MySQL} was able to do a @code{LEFT JOIN} optimization on the
-query and will not examine more rows in this table for the previous row
-combination after it finds one row that matches the @code{LEFT JOIN} criteria.
-
-Here is an example for this:
-
-@example
-SELECT * FROM t1 LEFT JOIN t2 ON t1.id=t2.id WHERE t2.id IS NULL;
-@end example
-
-Assume that @code{t2.id} is defined with @code{NOT NULL}. In this case
-@strong{MySQL} will scan @code{t1} and look up the rows in @code{t2}
-through @code{t1.id}. If @strong{MySQL} finds a matching row in
-@code{t2}, it knows that @code{t2.id} can never be @code{NULL}, and will
-not scan through the rest of the rows in @code{t2} that has the same
-@code{id}. In other words, for each row in @code{t1}, @strong{MySQL}
-only needs to do a single lookup in @code{t2}, independent of how many
-matching rows there are in @code{t2}.
-
-@item @code{range checked for each record (index map: #)}
-@strong{MySQL} didn't find a real good index to use. It will, instead, for
-each row combination in the preceding tables, do a check on which index to
-use (if any), and use this index to retrieve the rows from the table. This
-isn't very fast but is faster than having to do a join without
-an index.
-
-@item Using filesort
-@strong{MySQL} will need to do an extra pass to find out how to retrieve
-the rows in sorted order. The sort is done by going through all rows
-according to the @code{join type} and storing the sort key + pointer to
-the row for all rows that match the @code{WHERE}. Then the keys are
-sorted. Finally the rows are retrieved in sorted order.
-
-@item Using index
-The column information is retrieved from the table using only
-information in the index tree without having to do an additional seek to
-read the actual row. This can be done when all the used columns for
-the table are part of the same index.
-
-@item Using temporary
-To resolve the query @strong{MySQL} will need to create a
-temporary table to hold the result. This typically happens if you do an
-@code{ORDER BY} on a different column set than you did a @code{GROUP
-BY} on.
-
-@item Where used
-A @code{WHERE} clause will be used to restrict which rows will be
-matched against the next table or sent to the client. If you don't have
-this information and the table is of type @code{ALL} or @code{index},
-you may have something wrong in your query (if you don't intend to
-fetch/examine all rows from the table).
-@end table
-
-If you want to get your queries as fast as possible, you should look out for
-@code{Using filesort} and @code{Using temporary}.
-@end table
-
-The different join types are listed below, ordered from best to worst type:
-
-@cindex system table
-@cindex tables, system
-@table @code
-@item system
-The table has only one row (= system table). This is a special case of
-the @code{const} join type.
-
-@cindex constant table
-@cindex tables, constant
-@item const
-The table has at most one matching row, which will be read at the start
-of the query. Because there is only one row, values from the column in
-this row can be regarded as constants by the rest of the
-optimizer. @code{const} tables are very fast as they are read only once!
-
-@item eq_ref
-One row will be read from this table for each combination of rows from
-the previous tables. This is the best possible join type, other than the
-@code{const} types. It is used when all parts of an index are used by
-the join and the index is @code{UNIQUE} or a @code{PRIMARY KEY}.
-
-@item ref
-All rows with matching index values will be read from this table for each
-combination of rows from the previous tables. @code{ref} is used if the join
-uses only a leftmost prefix of the key, or if the key is not @code{UNIQUE}
-or a @code{PRIMARY KEY} (in other words, if the join cannot select a single
-row based on the key value). If the key that is used matches only a few rows,
-this join type is good.
-
-@item range
-Only rows that are in a given range will be retrieved, using an index to
-select the rows. The @code{key} column indicates which index is used.
-The @code{key_len} contains the longest key part that was used.
-The @code{ref} column will be NULL for this type.
-
-@item index
-This is the same as @code{ALL}, except that only the index tree is
-scanned. This is usually faster than @code{ALL}, as the index file is usually
-smaller than the data file.
-
-@item ALL
-A full table scan will be done for each combination of rows from the
-previous tables. This is normally not good if the table is the first
-table not marked @code{const}, and usually @strong{very} bad in all other
-cases. You normally can avoid @code{ALL} by adding more indexes, so that
-the row can be retrieved based on constant values or column values from
-earlier tables.
-@end table
-
-You can get a good indication of how good a join is by multiplying all values
-in the @code{rows} column of the @code{EXPLAIN} output. This should tell you
-roughly how many rows @strong{MySQL} must examine to execute the query. This
-number is also used when you restrict queries with the @code{max_join_size}
-variable.
-@xref{Server parameters}.
-
-The following example shows how a @code{JOIN} can be optimized progressively
-using the information provided by @code{EXPLAIN}.
-
-Suppose you have the @code{SELECT} statement shown below, that you examine
-using @code{EXPLAIN}:
-
-@example
-EXPLAIN SELECT tt.TicketNumber, tt.TimeIn,
- tt.ProjectReference, tt.EstimatedShipDate,
- tt.ActualShipDate, tt.ClientID,
- tt.ServiceCodes, tt.RepetitiveID,
- tt.CurrentProcess, tt.CurrentDPPerson,
- tt.RecordVolume, tt.DPPrinted, et.COUNTRY,
- et_1.COUNTRY, do.CUSTNAME
- FROM tt, et, et AS et_1, do
- WHERE tt.SubmitTime IS NULL
- AND tt.ActualPC = et.EMPLOYID
- AND tt.AssignedPC = et_1.EMPLOYID
- AND tt.ClientID = do.CUSTNMBR;
-@end example
-
-For this example, assume that:
-
-@itemize @bullet
-@item
-The columns being compared have been declared as follows:
-
-@multitable @columnfractions .1 .2 .7
-@item @strong{Table} @tab @strong{Column} @tab @strong{Column type}
-@item @code{tt} @tab @code{ActualPC} @tab @code{CHAR(10)}
-@item @code{tt} @tab @code{AssignedPC} @tab @code{CHAR(10)}
-@item @code{tt} @tab @code{ClientID} @tab @code{CHAR(10)}
-@item @code{et} @tab @code{EMPLOYID} @tab @code{CHAR(15)}
-@item @code{do} @tab @code{CUSTNMBR} @tab @code{CHAR(15)}
-@end multitable
-
-@item
-The tables have the indexes shown below:
-
-@multitable @columnfractions .1 .9
-@item @strong{Table} @tab @strong{Index}
-@item @code{tt} @tab @code{ActualPC}
-@item @code{tt} @tab @code{AssignedPC}
-@item @code{tt} @tab @code{ClientID}
-@item @code{et} @tab @code{EMPLOYID} (primary key)
-@item @code{do} @tab @code{CUSTNMBR} (primary key)
-@end multitable
-
-@item
-The @code{tt.ActualPC} values aren't evenly distributed.
-@end itemize
-
-Initially, before any optimizations have been performed, the @code{EXPLAIN}
-statement produces the following information:
-
-@example
-table type possible_keys key key_len ref rows Extra
-et ALL PRIMARY NULL NULL NULL 74
-do ALL PRIMARY NULL NULL NULL 2135
-et_1 ALL PRIMARY NULL NULL NULL 74
-tt ALL AssignedPC,ClientID,ActualPC NULL NULL NULL 3872
- range checked for each record (key map: 35)
-@end example
-
-Because @code{type} is @code{ALL} for each table, this output indicates that
-@strong{MySQL} is doing a full join for all tables! This will take quite a
-long time, as the product of the number of rows in each table must be
-examined! For the case at hand, this is @code{74 * 2135 * 74 * 3872 =
-45,268,558,720} rows. If the tables were bigger, you can only imagine how
-long it would take.
-
-One problem here is that @strong{MySQL} can't (yet) use indexes on columns
-efficiently if they are declared differently. In this context,
-@code{VARCHAR} and @code{CHAR} are the same unless they are declared as
-different lengths. Because @code{tt.ActualPC} is declared as @code{CHAR(10)}
-and @code{et.EMPLOYID} is declared as @code{CHAR(15)}, there is a length
-mismatch.
-
-To fix this disparity between column lengths, use @code{ALTER TABLE} to
-lengthen @code{ActualPC} from 10 characters to 15 characters:
-
-@example
-mysql> ALTER TABLE tt MODIFY ActualPC VARCHAR(15);
-@end example
-
-Now @code{tt.ActualPC} and @code{et.EMPLOYID} are both @code{VARCHAR(15)}.
-Executing the @code{EXPLAIN} statement again produces this result:
-
-@example
-table type possible_keys key key_len ref rows Extra
-tt ALL AssignedPC,ClientID,ActualPC NULL NULL NULL 3872 where used
-do ALL PRIMARY NULL NULL NULL 2135
- range checked for each record (key map: 1)
-et_1 ALL PRIMARY NULL NULL NULL 74
- range checked for each record (key map: 1)
-et eq_ref PRIMARY PRIMARY 15 tt.ActualPC 1
-@end example
-
-This is not perfect, but is much better (the product of the @code{rows}
-values is now less by a factor of 74). This version is executed in a couple
-of seconds.
-
-A second alteration can be made to eliminate the column length mismatches
-for the @code{tt.AssignedPC = et_1.EMPLOYID} and @code{tt.ClientID =
-do.CUSTNMBR} comparisons:
-
-@example
-mysql> ALTER TABLE tt MODIFY AssignedPC VARCHAR(15),
- MODIFY ClientID VARCHAR(15);
-@end example
-
-Now @code{EXPLAIN} produces the output shown below:
-
-@example
-table type possible_keys key key_len ref rows Extra
-et ALL PRIMARY NULL NULL NULL 74
-tt ref AssignedPC,ClientID,ActualPC ActualPC 15 et.EMPLOYID 52 where used
-et_1 eq_ref PRIMARY PRIMARY 15 tt.AssignedPC 1
-do eq_ref PRIMARY PRIMARY 15 tt.ClientID 1
-@end example
-
-This is almost as good as it can get.
-
-The remaining problem is that, by default, @strong{MySQL} assumes that values
-in the @code{tt.ActualPC} column are evenly distributed, and that isn't the
-case for the @code{tt} table. Fortunately, it is easy to tell @strong{MySQL}
-about this:
-
-@example
-shell> myisamchk --analyze PATH_TO_MYSQL_DATABASE/tt
-shell> mysqladmin refresh
-@end example
-
-Now the join is perfect, and @code{EXPLAIN} produces this result:
-
-@example
-table type possible_keys key key_len ref rows Extra
-tt ALL AssignedPC,ClientID,ActualPC NULL NULL NULL 3872 where used
-et eq_ref PRIMARY PRIMARY 15 tt.ActualPC 1
-et_1 eq_ref PRIMARY PRIMARY 15 tt.AssignedPC 1
-do eq_ref PRIMARY PRIMARY 15 tt.ClientID 1
-@end example
-
-Note that the @code{rows} column in the output from @code{EXPLAIN} is an
-educated guess from the @strong{MySQL} join optimizer. To optimize a
-query, you should check if the numbers are even close to the truth. If not,
-you may get better performance by using @code{STRAIGHT_JOIN} in your
-@code{SELECT} statement and trying to list the tables in a different order in
-the @code{FROM} clause.
@findex DESC
@findex DESCRIBE
-@node DESCRIBE, COMMIT, EXPLAIN, Reference
+@node DESCRIBE, COMMIT, USE, Reference
@section @code{DESCRIBE} Syntax (Get Information About Columns)
@example
@@ -24790,7 +33763,7 @@ You can change the isolation level for transactions with
@findex LOCK TABLES
@findex UNLOCK TABLES
-@node LOCK TABLES, SET OPTION, COMMIT, Reference
+@node LOCK TABLES, CREATE INDEX, COMMIT, Reference
@section @code{LOCK TABLES/UNLOCK TABLES} Syntax
@example
@@ -24806,17 +33779,9 @@ are locked by the current thread are automatically unlocked when the
thread issues another @code{LOCK TABLES}, or when the connection to the
server is closed.
-The main reasons to use @code{LOCK TABLES} are:
-
-@itemize @bullet
-@item
-Emulate transactions with not transaction safe tables.
-@item
-To get more speed with @code{MyISAM} tables when inserting/updating data
-over many statements. The main reason this gives more speed is that
-@strong{MySQL} will not flush the key cache for the locked tables until
-@code{UNLOCK TABLES} is called.
-@end itemize
+The main reasons to use @code{LOCK TABLES} are for emulating transactions
+or getting more speed when updating tables. This is explained in more
+detail later.
If a thread obtains a @code{READ} lock on a table, that thread (and all other
threads) can only read from the table. If a thread obtains a @code{WRITE}
@@ -24829,8 +33794,10 @@ execute while the lock is held. This can't however be used if you are
going to manipulate the database files outside @strong{MySQL} while you
hold the lock.
-Each thread waits (without timing out) until it obtains all the locks it has
-requested.
+When you use @code{LOCK TABLES}, you must lock all tables that you are
+going to use and you must use the same alias that you are going to use
+in your queries! If you are using a table multiple times in a query
+(with aliases), you must get a lock for each alias!
@code{WRITE} locks normally have higher priority than @code{READ} locks, to
ensure that updates are processed as soon as possible. This means that if one
@@ -24842,15 +33809,32 @@ locks while the thread is waiting for the @code{WRITE} lock. You should only
use @code{LOW_PRIORITY WRITE} locks if you are sure that there will
eventually be a time when no threads will have a @code{READ} lock.
-@code{LOCK TABLES} and @code{UNLOCK TABLES} both commits any active
-transactions.
+@code{LOCK TABLES} works as follows:
+@enumerate
+@item
+Sort all tables to be locked in a internally defined order (from the
+user standpoint the order is undefined).
+@item
+If a table is locked with a read and a write lock, put the write lock
+before the read lock.
+@item
+Lock one table at a time until the thread gets all locks.
+@end enumerate
-When you use @code{LOCK TABLES}, you must lock all tables that you are
-going to use and you must use the same alias that you are going to use
-in your queries! If you are using a table multiple times in a query
-(with aliases), you must get a lock for each alias! This policy ensures
-that table locking is deadlock free and makes the locking code smaller,
-simpler and much faster.
+This policy ensures that table locking is deadlock free. There is
+however other things one needs to be aware of with this schema:
+
+If you are using a @code{LOW_PRIORITY_WRITE} lock for a table, this
+means only that @strong{MySQL} will wait for this particlar lock until
+there is no threads that wants a @code{READ} lock. When the thread has
+got the @code{WRITE} lock and is waiting to get the lock for the next
+table in the lock table list, all other threads will wait for the
+@code{WRITE} lock to be released. If this becomes a serious problem
+with your application, you should consider converting some of your
+tables to transactions safe tables.
+
+You can safely kill a thread that is waiting for a table lock with
+@code{KILL}. @xref{KILL}.
Note that you should @strong{NOT} lock any tables that you are using with
@code{INSERT DELAYED}. This is because that in this case the @code{INSERT}
@@ -24867,6 +33851,12 @@ If you are going to run many operations on a bunch of tables, it's much
faster to lock the tables you are going to use. The downside is, of course,
that no other thread can update a @code{READ}-locked table and no other
thread can read a @code{WRITE}-locked table.
+
+The reason some things are faster under @code{LOCK TABLES} is that
+@strong{MySQL} will not flush the key cache for the locked tables until
+@code{UNLOCK TABLES} is called (normally the key cache is flushed after
+each SQL statement). This speeds up inserting/updateing/deletes on
+@code{MyISAM} tables.
@item
If you are using a table handler in @strong{MySQL} that doesn't support
transactions, you must use @code{LOCK TABLES} if you want to ensure that
@@ -24898,7 +33888,7 @@ table in the server and implemented with @code{pthread_mutex_lock()} and
See @ref{Internal locking}, for more information on locking policy.
-You can also lock all tables in all databases with read locks with the
+You can lock all tables in all databases with read locks with the
@code{FLUSH TABLES WITH READ LOCK} command. @xref{FLUSH}. This is very
convenient way to get backups if you have a file system, like Veritas,
that can take snapshots in time.
@@ -24907,417 +33897,15 @@ that can take snapshots in time.
automatically commit any active transactions before attempting to lock the
tables.
-@findex SET OPTION
-@node SET OPTION, SET TRANSACTION, LOCK TABLES, Reference
-@section @code{SET} Syntax
-
-@example
-SET [OPTION] SQL_VALUE_OPTION= value, ...
-@end example
-
-@code{SET OPTION} sets various options that affect the operation of the
-server or your client. Any option you set remains in effect until the
-current session ends, or until you set the option to a different value.
-
-@table @code
-@item CHARACTER SET character_set_name | DEFAULT
-This maps all strings from and to the client with the given mapping.
-Currently the only option for @code{character_set_name} is
-@code{cp1251_koi8}, but you can easily add new mappings by editing the
-@file{sql/convert.cc} file in the @strong{MySQL} source distribution. The
-default mapping can be restored by using a @code{character_set_name} value of
-@code{DEFAULT}.
-
-Note that the syntax for setting the @code{CHARACTER SET} option differs
-from the syntax for setting the other options.
-
-@item PASSWORD = PASSWORD('some password')
-@cindex passwords, setting
-Set the password for the current user. Any non-anonymous user can change his
-own password!
-
-@item PASSWORD FOR user = PASSWORD('some password')
-Set the password for a specific user on the current server host. Only a user
-with access to the @code{mysql} database can do this. The user should be
-given in @code{user@@hostname} format, where @code{user} and @code{hostname}
-are exactly as they are listed in the @code{User} and @code{Host} columns of
-the @code{mysql.user} table entry. For example, if you had an entry with
-@code{User} and @code{Host} fields of @code{'bob'} and @code{'%.loc.gov'},
-you would write:
-
-@example
-mysql> SET PASSWORD FOR bob@@"%.loc.gov" = PASSWORD("newpass");
-
-or
-
-mysql> UPDATE mysql.user SET password=PASSWORD("newpass") where user="bob' and host="%.loc.gov";
-@end example
-
-@item SQL_AUTO_IS_NULL = 0 | 1
-If set to @code{1} (default) then one can find the last inserted row
-for a table with an auto_increment row with the following construct:
-@code{WHERE auto_increment_column IS NULL}. This is used by some
-ODBC programs like Access.
-
-@item AUTOCOMMIT= 0 | 1
-If set to @code{1} all changes to a table will be done at once. To start
-a multi-command transaction, you have to use the @code{BEGIN}
-statement. @xref{COMMIT}. If set to @code{0} you have to use @code{COMMIT} /
-@code{ROLLBACK} to accept/revoke that transaction. @xref{COMMIT}. Note
-that when you change from not @code{AUTOCOMMIT} mode to
-@code{AUTOCOMMIT} mode, @strong{MySQL} will do an automatic
-@code{COMMIT} on any open transactions.
-
-@item SQL_BIG_TABLES = 0 | 1
-@cindex table is full
-If set to @code{1}, all temporary tables are stored on disk rather than in
-memory. This will be a little slower, but you will not get the error
-@code{The table tbl_name is full} for big @code{SELECT} operations that
-require a large temporary table. The default value for a new connection is
-@code{0} (that is, use in-memory temporary tables).
-
-@item SQL_BIG_SELECTS = 0 | 1
-If set to @code{0}, @strong{MySQL} will abort if a @code{SELECT} is attempted
-that probably will take a very long time. This is useful when an inadvisable
-@code{WHERE} statement has been issued. A big query is defined as a
-@code{SELECT} that probably will have to examine more than
-@code{max_join_size} rows. The default value for a new connection is
-@code{1} (which will allow all @code{SELECT} statements).
-
-@item SQL_BUFFER_RESULT = 0 | 1
-@code{SQL_BUFFER_RESULT} will force the result from @code{SELECT}'s
-to be put into a temporary table. This will help @strong{MySQL} free the
-table locks early and will help in cases where it takes a long time to
-send the result set to the client.
-
-@item SQL_LOW_PRIORITY_UPDATES = 0 | 1
-If set to @code{1}, all @code{INSERT}, @code{UPDATE}, @code{DELETE}, and
-and @code{LOCK TABLE WRITE} statements wait until there is no pending
-@code{SELECT} or @code{LOCK TABLE READ} on the affected table.
-
-@item SQL_MAX_JOIN_SIZE = value | DEFAULT
-Don't allow @code{SELECT}s that will probably need to examine more than
-@code{value} row combinations. By setting this value, you can catch
-@code{SELECT}s where keys are not used properly and that would probably
-take a long time. Setting this to a value other than @code{DEFAULT} will reset
-the @code{SQL_BIG_SELECTS} flag. If you set the @code{SQL_BIG_SELECTS}
-flag again, the @code{SQL_MAX_JOIN_SIZE} variable will be ignored.
-You can set a default value for this variable by starting @code{mysqld} with
-@code{-O max_join_size=#}.
-
-@item SQL_SAFE_MODE = 0 | 1
-If set to @code{1}, @strong{MySQL} will abort if an @code{UPDATE} or
-@code{DELETE} is attempted that doesn't use a key or @code{LIMIT} in the
-@code{WHERE} clause. This makes it possible to catch wrong updates
-when creating SQL commands by hand.
-
-@item SQL_SELECT_LIMIT = value | DEFAULT
-The maximum number of records to return from @code{SELECT} statements. If
-a @code{SELECT} has a @code{LIMIT} clause, the @code{LIMIT} takes precedence
-over the value of @code{SQL_SELECT_LIMIT}. The default value for a new
-connection is ``unlimited.'' If you have changed the limit, the default value
-can be restored by using a @code{SQL_SELECT_LIMIT} value of @code{DEFAULT}.
-
-@item SQL_LOG_OFF = 0 | 1
-If set to @code{1}, no logging will be done to the standard log for this
-client, if the client has the @strong{process} privilege. This does not
-affect the update log!
-
-@item SQL_LOG_UPDATE = 0 | 1
-If set to @code{0}, no logging will be done to the update log for the client,
-if the client has the @strong{process} privilege. This does not affect the
-standard log!
-
-@item SQL_QUOTE_SHOW_CREATE = 0 | 1
-If set to @code{1}, @code{SHOW CREATE TABLE} will quote
-table and column names. This is @strong{on} by default,
-for replication of tables with fancy column names to work.
-@ref{SHOW CREATE TABLE, , @code{SHOW CREATE TABLE}}.
-
-@item TIMESTAMP = timestamp_value | DEFAULT
-Set the time for this client. This is used to get the original timestamp if
-you use the update log to restore rows. @code{timestamp_value} should be a
-UNIX Epoch timestamp, not a @strong{MySQL} timestamp.
-
-@item LAST_INSERT_ID = #
-Set the value to be returned from @code{LAST_INSERT_ID()}. This is stored in
-the update log when you use @code{LAST_INSERT_ID()} in a command that updates
-a table.
-
-@item INSERT_ID = #
-Set the value to be used by the following @code{INSERT} or @code{ALTER TABLE}
-command when inserting an @code{AUTO_INCREMENT} value. This is mainly used
-with the update log.
-@end table
-
-@findex ISOLATION LEVEL
-@node SET TRANSACTION, GRANT, SET OPTION, Reference
-@section @code{SET TRANSACTION} Syntax
-
-@example
-SET [GLOBAL | SESSION] TRANSACTION ISOLATION LEVEL
-[READ UNCOMMITTED | READ COMMITTED | REPEATABLE READ | SERIALIZABLE]
-@end example
-
-Sets the transaction isolation level for the global, whole session or
-the next transaction.
-
-The default behavior is to set the isolation level for the next (not started)
-transaction.
-
-If you set the @code{GLOBAL} privilege it will affect all new created threads.
-You will need the @code{PROCESS} privilege to do do this.
-
-Setting the @code{SESSION} privilege will affect the following and all
-future transactions.
-
-You can set the default isolation level for @code{mysqld} with
-@code{--transaction-isolation=...}. @xref{Command-line options}.
-
-@cindex privileges, granting
-@cindex privileges, revoking
-@cindex global privileges
-@cindex revoking, privleges
-@cindex granting, privleges
-
-@findex GRANT
-@findex REVOKE
-@node GRANT, CREATE INDEX, SET TRANSACTION, Reference
-@section @code{GRANT} and @code{REVOKE} Syntax
-
-@example
-GRANT priv_type [(column_list)] [, priv_type [(column_list)] ...]
- ON @{tbl_name | * | *.* | db_name.*@}
- TO user_name [IDENTIFIED BY 'password']
- [, user_name [IDENTIFIED BY 'password'] ...]
- [WITH GRANT OPTION]
-
-REVOKE priv_type [(column_list)] [, priv_type [(column_list)] ...]
- ON @{tbl_name | * | *.* | db_name.*@}
- FROM user_name [, user_name ...]
-@end example
-
-@code{GRANT} is implemented in @strong{MySQL} Version 3.22.11 or later. For
-earlier @strong{MySQL} versions, the @code{GRANT} statement does nothing.
-
-The @code{GRANT} and @code{REVOKE} commands allow system administrators
-to create users and grant and revoke rights to @strong{MySQL} users at
-four privilege levels:
-
-@table @strong
-@item Global level
-Global privileges apply to all databases on a given server. These privileges
-are stored in the @code{mysql.user} table.
-
-@item Database level
-Database privileges apply to all tables in a given database. These privileges
-are stored in the @code{mysql.db} and @code{mysql.host} tables.
-
-@item Table level
-Table privileges apply to all columns in a given table. These privileges are
-stored in the @code{mysql.tables_priv} table.
-
-@item Column level
-Column privileges apply to single columns in a given table. These privileges are
-stored in the @code{mysql.columns_priv} table.
-@end table
-
-If you give a grant for a users that doesn't exists, that user is created.
-For examples of how @code{GRANT} works, see @ref{Adding users}.
-
-For the @code{GRANT} and @code{REVOKE} statements, @code{priv_type} may be
-specified as any of the following:
-
-@example
-ALL PRIVILEGES FILE RELOAD
-ALTER INDEX SELECT
-CREATE INSERT SHUTDOWN
-DELETE PROCESS UPDATE
-DROP REFERENCES USAGE
-@end example
-
-@code{ALL} is a synonym for @code{ALL PRIVILEGES}. @code{REFERENCES} is not
-yet implemented. @code{USAGE} is currently a synonym for ``no privileges.''
-It can be used when you want to create a user that has no privileges.
-
-To revoke the @strong{grant} privilege from a user, use a @code{priv_type}
-value of @code{GRANT OPTION}:
-
-@example
-REVOKE GRANT OPTION ON ... FROM ...;
-@end example
-
-The only @code{priv_type} values you can specify for a table are @code{SELECT},
-@code{INSERT}, @code{UPDATE}, @code{DELETE}, @code{CREATE}, @code{DROP},
-@code{GRANT}, @code{INDEX}, and @code{ALTER}.
-
-The only @code{priv_type} values you can specify for a column (that is, when
-you use a @code{column_list} clause) are @code{SELECT}, @code{INSERT}, and
-@code{UPDATE}.
-
-You can set global privileges by using @code{ON *.*} syntax. You can set
-database privileges by using @code{ON db_name.*} syntax. If you specify
-@code{ON *} and you have a current database, you will set the privileges for
-that database. (@strong{WARNING:} If you specify @code{ON *} and you
-@emph{don't} have a current database, you will affect the global privileges!)
-
-In order to accommodate granting rights to users from arbitrary hosts,
-@strong{MySQL} supports specifying the @code{user_name} value in the form
-@code{user@@host}. If you want to specify a @code{user} string
-containing special characters (such as @samp{-}), or a @code{host} string
-containing special characters or wild-card characters (such as @samp{%}), you
-can quote the user or host name (for example, @code{'test-user'@@'test-hostname'}).
-
-You can specify wild cards in the hostname. For example,
-@code{user@@"%.loc.gov"} applies to @code{user} for any host in the
-@code{loc.gov} domain, and @code{user@@"144.155.166.%"} applies to @code{user}
-for any host in the @code{144.155.166} class C subnet.
-
-The simple form @code{user} is a synonym for @code{user@@"%"}.
-@strong{NOTE:} If you allow anonymous users to connect to the @strong{MySQL}
-server (which is the default), you should also add all local users as
-@code{user@@localhost} because otherwise the anonymous user entry for the
-local host in the @code{mysql.user} table will be used when the user tries to
-log into the @strong{MySQL} server from the local machine! Anonymous users
-are defined by inserting entries with @code{User=''} into the
-@code{mysql.user} table. You can verify if this applies to you by executing
-this query:
-
-@example
-mysql> SELECT Host,User FROM mysql.user WHERE User='';
-@end example
-
-For the moment, @code{GRANT} only supports host, table, database, and
-column names up to 60 characters long. A user name can be up to 16
-characters.
-
-The privileges for a table or column are formed from the
-logical OR of the privileges at each of the four privilege
-levels. For example, if the @code{mysql.user} table specifies that a
-user has a global @strong{select} privilege, this can't be denied by an
-entry at the database, table, or column level.
-
-The privileges for a column can be calculated as follows:
-
-@example
-global privileges
-OR (database privileges AND host privileges)
-OR table privileges
-OR column privileges
-@end example
-
-In most cases, you grant rights to a user at only one of the privilege
-levels, so life isn't normally as complicated as above. The details of the
-privilege-checking procedure are presented in
-@ref{Privilege system}.
-
-If you grant privileges for a user/hostname combination that does not exist
-in the @code{mysql.user} table, an entry is added and remains there until
-deleted with a @code{DELETE} command. In other words, @code{GRANT} may
-create @code{user} table entries, but @code{REVOKE} will not remove them;
-you must do that explicitly using @code{DELETE}.
-
-@cindex passwords, setting
-In @strong{MySQL} Version 3.22.12 or later,
-if a new user is created or if you have global grant privileges, the user's
-password will be set to the password specified by the @code{IDENTIFIED BY}
-clause, if one is given. If the user already had a password, it is replaced
-by the new one.
-
-@strong{WARNING:} If you create a new user but do not specify an
-@code{IDENTIFIED BY} clause, the user has no password. This is insecure.
-
-Passwords can also be set with the @code{SET PASSWORD} command.
-@xref{SET OPTION, , @code{SET OPTION}}.
-
-If you grant privileges for a database, an entry in the @code{mysql.db}
-table is created if needed. When all privileges for the database have been
-removed with @code{REVOKE}, this entry is deleted.
-
-If a user doesn't have any privileges on a table, the table is not displayed
-when the user requests a list of tables (for example, with a @code{SHOW TABLES}
-statement).
-
-The @code{WITH GRANT OPTION} clause gives the user the ability to give
-to other users any privileges the user has at the specified privilege level.
-You should be careful to whom you give the @strong{grant} privilege, as two
-users with different privileges may be able to join privileges!
-
-You cannot grant another user a privilege you don't have yourself;
-the @strong{grant} privilege allows you to give away only those privileges
-you possess.
-
-Be aware that when you grant a user the @strong{grant} privilege at a
-particular privilege level, any privileges the user already possesses (or
-is given in the future!) at that level are also grantable by that user.
-Suppose you grant a user the @strong{insert} privilege on a database. If
-you then grant the @strong{select} privilege on the database and specify
-@code{WITH GRANT OPTION}, the user can give away not only the @strong{select}
-privilege, but also @strong{insert}. If you then grant the @strong{update}
-privilege to the user on the database, the user can give away the
-@strong{insert}, @strong{select} and @strong{update}.
-
-You should not grant @strong{alter} privileges to a normal user. If you
-do that, the user can try to subvert the privilege system by renaming
-tables!
-
-Note that if you are using table or column privileges for even one user, the
-server examines table and column privileges for all users and this will slow
-down @strong{MySQL} a bit.
-
-When @code{mysqld} starts, all privileges are read into memory.
-Database, table, and column privileges take effect at once, and
-user-level privileges take effect the next time the user connects.
-Modifications to the grant tables that you perform using @code{GRANT} or
-@code{REVOKE} are noticed by the server immediately.
-If you modify the grant tables manually (using @code{INSERT}, @code{UPDATE},
-etc.), you should execute a @code{FLUSH PRIVILEGES} statement or run
-@code{mysqladmin flush-privileges} to tell the server to reload the grant
-tables.
-@xref{Privilege changes}.
-
-@cindex ANSI SQL, differences from
-The biggest differences between the ANSI SQL and @strong{MySQL} versions of
-@code{GRANT} are:
-@itemize @bullet
-@item
-In @strong{MySQL} privileges are given for an username + hostname combination
-and not only for an username.
-
-@item
-ANSI SQL doesn't have global or database-level privileges, and ANSI SQL
-doesn't support all privilege types that @strong{MySQL} supports.
-@strong{MySQL} doesn't support the ANSI SQL @code{TRIGGER}, @code{EXECUTE} or
-@code{UNDER} privileges.
-
-@item
-ANSI SQL privileges are structured in a hierarchal manner. If you remove
-an user, all privileges the user has granted are revoked. In
-@strong{MySQL} the granted privileges are not automatically revoked, but
-you have to revoke these yourself if needed.
-
-@item
-If you in @strong{MySQL} have the @code{INSERT} grant on only part of the
-columns in a table, you can execute @code{INSERT} statements on the
-table; The columns for which you don't have the @code{INSERT} privilege
-will set to their default values. ANSI SQL requires you to have the
-@code{INSERT} privilege on all columns.
+@node CREATE INDEX, DROP INDEX, LOCK TABLES, Reference
+@section @code{CREATE INDEX} Syntax
-@item
-When you drop a table in ANSI SQL, all privileges for the table are revoked.
-If you revoke a privilege in ANSI SQL, all privileges that were granted based
-on this privilege are also revoked. In @strong{MySQL}, privileges can be
-dropped only with explicit @code{REVOKE} commands or by manipulating the
-@strong{MySQL} grant tables.
-@end itemize
+@findex CREATE INDEX
@cindex indexes
@cindex indexes, multi-part
@cindex multi-part index
-@findex CREATE INDEX
-@node CREATE INDEX, DROP INDEX, GRANT, Reference
-@section @code{CREATE INDEX} Syntax
@example
CREATE [UNIQUE|FULLTEXT] INDEX index_name ON tbl_name (col_name[(length)],... )
@@ -25590,7 +34178,7 @@ used them.
@cindex MySQL table types
@cindex @code{MyISAM} table type
@cindex types, of tables
-@node Table types, Tutorial, Reference, Top
+@node Table types, Fulltext Search, Reference, Top
@chapter MySQL Table Types
As of @strong{MySQL} Version 3.23.6, you can choose between three basic
@@ -27852,7 +36440,7 @@ For disaster recovery, one should use table backups plus
@strong{MySQL}'s binary log. @xref{Backup}.
@strong{Warning}: If you delete old log files that are in use, BDB will
-not be able to do recovery at all and you may loose data if something
+not be able to do recovery at all and you may lose data if something
goes wrong.
@item
@strong{MySQL} requires a @code{PRIMARY KEY} in each BDB table to be
@@ -28005,3714 +36593,12 @@ not trivial).
@end itemize
-@cindex tutorial
-@cindex terminal monitor, defined
-@cindex monitor, terminal
-@cindex options, provided by MySQL
-@node Tutorial, Server, Table types, Top
-@chapter MySQL Tutorial
-
-@menu
-* Connecting-disconnecting:: Connecting to and disconnecting from the server
-* Entering queries:: Entering queries
-* Database use:: Creating and using a database
-* Getting information:: Getting information about databases and tables
-* Examples:: Examples
-* Batch mode:: Using @code{mysql} in batch mode
-* Twin:: Queries from twin project
-@end menu
-
-This chapter provides a tutorial introduction to @strong{MySQL} by showing
-how to use the @code{mysql} client program to create and use a simple
-database. @code{mysql} (sometimes referred to as the ``terminal monitor'' or
-just ``monitor'') is an interactive program that allows you to connect to a
-@strong{MySQL} server, run queries, and view the results. @code{mysql} may
-also be used in batch mode: you place your queries in a file beforehand, then
-tell @code{mysql} to execute the contents of the file. Both ways of using
-@code{mysql} are covered here.
-
-To see a list of options provided by @code{mysql}, invoke it with
-the @code{--help} option:
-
-@example
-shell> mysql --help
-@end example
-
-This chapter assumes that @code{mysql} is installed on your machine and that
-a @strong{MySQL} server is available to which you can connect. If this is
-not true, contact your @strong{MySQL} administrator. (If @emph{you} are the
-administrator, you will need to consult other sections of this manual.)
-
-This chapter describes the entire process of setting up and using a
-database. If you are interested only in accessing an already-existing
-database, you may want to skip over the sections that describe how to
-create the database and the tables it contains.
-
-Because this chapter is tutorial in nature, many details are necessarily left
-out. Consult the relevant sections of the manual for more
-information on the topics covered here.
-
-@cindex connecting, to the server
-@cindex disconnecting, from the server
-@cindex server, connecting
-@cindex server, disconnecting
-@node Connecting-disconnecting, Entering queries, Tutorial, Tutorial
-@section Connecting to and Disconnecting from the Server
-
-To connect to the server, you'll usually need to provide a @strong{MySQL}
-user name when you invoke @code{mysql} and, most likely, a password. If the
-server runs on a machine other than the one where you log in, you'll also
-need to specify a hostname. Contact your administrator to find out what
-connection parameters you should use to connect (that is, what host, user name,
-and password to use). Once you know the proper parameters, you should be
-able to connect like this:
-
-@example
-shell> mysql -h host -u user -p
-Enter password: ********
-@end example
-
-The @code{********} represents your password; enter it when @code{mysql}
-displays the @code{Enter password:} prompt.
-
-If that works, you should see some introductory information followed by a
-@code{mysql>} prompt:
-
-
-@example
-shell> mysql -h host -u user -p
-Enter password: ********
-Welcome to the MySQL monitor. Commands end with ; or \g.
-Your MySQL connection id is 459 to server version: 3.22.20a-log
-
-Type 'help' for help.
-
-mysql>
-@end example
-
-The prompt tells you that @code{mysql} is ready for you to enter commands.
-
-Some @strong{MySQL} installations allow users to connect as the anonymous
-(unnamed) user to the server running on the local host. If this is the case
-on your machine, you should be able to connect to that server by invoking
-@code{mysql} without any options:
-
-@example
-shell> mysql
-@end example
-
-After you have connected successfully, you can disconnect any time by typing
-@code{QUIT} at the @code{mysql>} prompt:
-
-@example
-mysql> QUIT
-Bye
-@end example
-
-You can also disconnect by pressing Control-D.
-
-Most examples in the following sections assume you are connected to the
-server. They indicate this by the @code{mysql>} prompt.
-
-@cindex running, queries
-@cindex queries, entering
-@cindex entering, queries
-@node Entering queries, Database use, Connecting-disconnecting, Tutorial
-@section Entering Queries
-
-Make sure you are connected to the server, as discussed in the previous
-section. Doing so will not in itself select any database to work with, but
-that's okay. At this point, it's more important to find out a little about
-how to issue queries than to jump right in creating tables, loading data
-into them, and retrieving data from them. This section describes the basic
-principles of entering commands, using several queries you can try out to
-familiarize yourself with how @code{mysql} works.
-
-Here's a simple command that asks the server to tell you its version number
-and the current date. Type it in as shown below following the @code{mysql>}
-prompt and hit the RETURN key:
-
-@example
-mysql> SELECT VERSION(), CURRENT_DATE;
-+--------------+--------------+
-| version() | CURRENT_DATE |
-+--------------+--------------+
-| 3.22.20a-log | 1999-03-19 |
-+--------------+--------------+
-1 row in set (0.01 sec)
-mysql>
-@end example
-
-This query illustrates several things about @code{mysql}:
-
-@itemize @bullet
-@item
-A command normally consists of a SQL statement followed by a semicolon.
-(There are some exceptions where a semicolon is not needed. @code{QUIT},
-mentioned earlier, is one of them. We'll get to others later.)
-
-@item
-When you issue a command, @code{mysql} sends it to the server for execution
-and displays the results, then prints another @code{mysql>} to indicate
-that it is ready for another command.
-
-@item
-@code{mysql} displays query output as a table (rows and columns). The first
-row contains labels for the columns. The rows following are the query
-results. Normally, column labels are the names of the columns you fetch from
-database tables. If you're retrieving the value of an expression rather than
-a table column (as in the example just shown), @code{mysql} labels the column
-using the expression itself.
-
-@item
-@code{mysql} shows how many rows were returned and how long the query took
-to execute, which gives you a rough idea of server performance. These values
-are imprecise because they represent wall clock time (not CPU or machine
-time), and because they are affected by factors such as server load and
-network latency. (For brevity, the ``rows in set'' line is not shown in
-the remaining examples in this chapter.)
-@end itemize
-
-Keywords may be entered in any lettercase. The following queries are
-equivalent:
-
-@example
-mysql> SELECT VERSION(), CURRENT_DATE;
-mysql> select version(), current_date;
-mysql> SeLeCt vErSiOn(), current_DATE;
-@end example
-
-Here's another query. It demonstrates that you can use @code{mysql} as a
-simple calculator:
-
-@example
-mysql> SELECT SIN(PI()/4), (4+1)*5;
-+-------------+---------+
-| SIN(PI()/4) | (4+1)*5 |
-+-------------+---------+
-| 0.707107 | 25 |
-+-------------+---------+
-@end example
-
-The commands shown thus far have been relatively short, single-line
-statements. You can even enter multiple statements on a single line.
-Just end each one with a semicolon:
-
-@example
-mysql> SELECT VERSION(); SELECT NOW();
-+--------------+
-| version() |
-+--------------+
-| 3.22.20a-log |
-+--------------+
-
-+---------------------+
-| NOW() |
-+---------------------+
-| 1999-03-19 00:15:33 |
-+---------------------+
-@end example
-
-A command need not be given all on a single line, so lengthy commands that
-require several lines are not a problem. @code{mysql} determines where your
-statement ends by looking for the terminating semicolon, not by looking for
-the end of the input line. (In other words, @code{mysql}
-accepts free-format input: it collects input lines but does not execute them
-until it sees the semicolon.)
-
-Here's a simple multiple-line statement:
-
-@example
-mysql> SELECT
- -> USER()
- -> ,
- -> CURRENT_DATE;
-+--------------------+--------------+
-| USER() | CURRENT_DATE |
-+--------------------+--------------+
-| joesmith@@localhost | 1999-03-18 |
-+--------------------+--------------+
-@end example
-
-In this example, notice how the prompt changes from @code{mysql>} to
-@code{->} after you enter the first line of a multiple-line query. This is
-how @code{mysql} indicates that it hasn't seen a complete statement and is
-waiting for the rest. The prompt is your friend, because it provides
-valuable feedback. If you use that feedback, you will always be aware of
-what @code{mysql} is waiting for.
-
-If you decide you don't want to execute a command that you are in the
-process of entering, cancel it by typing @code{\c}:
-
-@example
-mysql> SELECT
- -> USER()
- -> \c
-mysql>
-@end example
-
-Here, too, notice the prompt. It switches back to @code{mysql>} after you
-type @code{\c}, providing feedback to indicate that @code{mysql} is ready
-for a new command.
-
-The following table shows each of the prompts you may see and summarizes what
-they mean about the state that @code{mysql} is in:
-
-@cindex prompts, meanings
-@multitable @columnfractions .10 .9
-@item @strong{Prompt} @tab @strong{Meaning}
-@item @code{mysql>} @tab Ready for new command.
-@item @code{@ @ @ @ ->} @tab Waiting for next line of multiple-line command.
-@item @code{@ @ @ @ '>} @tab Waiting for next line, collecting a string that begins
-with a single quote (@samp{'}).
-@item @code{@ @ @ @ ">} @tab Waiting for next line, collecting a string that begins
-with a double quote (@samp{"}).
-@end multitable
-
-Multiple-line statements commonly occur by accident when you intend to
-issue a command on a single line, but forget the terminating semicolon. In
-this case, @code{mysql} waits for more input:
-
-@example
-mysql> SELECT USER()
- ->
-@end example
-
-If this happens to you (you think you've entered a statement but the only
-response is a @code{->} prompt), most likely @code{mysql} is waiting for the
-semicolon. If you don't notice what the prompt is telling you, you might sit
-there for a while before realizing what you need to do. Enter a semicolon to
-complete the statement, and @code{mysql} will execute it:
-
-@example
-mysql> SELECT USER()
- -> ;
-+--------------------+
-| USER() |
-+--------------------+
-| joesmith@@localhost |
-+--------------------+
-@end example
-
-The @code{'>} and @code{">} prompts occur during string collection.
-In @strong{MySQL}, you can write strings surrounded by either @samp{'}
-or @samp{"} characters (for example, @code{'hello'} or @code{"goodbye"}),
-and @code{mysql} lets you enter strings that span multiple lines. When you
-see a @code{'>} or @code{">} prompt, it means that you've entered a line
-containing a string that begins with a @samp{'} or @samp{"} quote character,
-but have not yet entered the matching quote that terminates the string.
-That's fine if you really are entering a multiple-line string, but how likely
-is that? Not very. More often, the @code{'>} and @code{">} prompts indicate
-that you've inadvertantly left out a quote character. For example:
-
-@example
-mysql> SELECT * FROM my_table WHERE name = "Smith AND age < 30;
- ">
-@end example
-
-If you enter this @code{SELECT} statement, then hit RETURN and wait for the
-result, nothing will happen. Instead of wondering why this
-query takes so long, notice the clue provided by the @code{">} prompt. It
-tells you that @code{mysql} expects to see the rest of an unterminated
-string. (Do you see the error in the statement? The string @code{"Smith} is
-missing the second quote.)
-
-At this point, what do you do? The simplest thing is to cancel the command.
-However, you cannot just type @code{\c} in this case, because @code{mysql}
-interprets it as part of the string that it is collecting! Instead, enter
-the closing quote character (so @code{mysql} knows you've finished the
-string), then type @code{\c}:
-
-@example
-mysql> SELECT * FROM my_table WHERE name = "Smith AND age < 30;
- "> "\c
-mysql>
-@end example
-
-The prompt changes back to @code{mysql>}, indicating that @code{mysql}
-is ready for a new command.
-
-It's important to know what the @code{'>} and @code{">} prompts signify,
-because if you mistakenly enter an unterminated string, any further lines you
-type will appear to be ignored by @code{mysql} --- including a line
-containing @code{QUIT}! This can be quite confusing, especially if you
-don't know that you need to supply the terminating quote before you can
-cancel the current command.
-
-@cindex databases, creating
-@cindex databases, using
-@cindex creating, databases
-@node Database use, Getting information, Entering queries, Tutorial
-@section Creating and Using a Database
-
-@menu
-* Creating database:: Creating a database
-* Creating tables:: Creating a table
-* Loading tables:: Loading data into a table
-* Retrieving data:: Retrieving information from a table
-@end menu
-
-Now that you know how to enter commands, it's time to access a database.
-
-Suppose you have several pets in your home (your menagerie) and you'd
-like to keep track of various types of information about them. You can do so
-by creating tables to hold your data and loading them with the desired
-information. Then you can answer different sorts of questions about your
-animals by retrieving data from the tables. This section shows you how to:
-
-@itemize @bullet
-@item
-Create a database
-@item
-Create a table
-@item
-Load data into the table
-@item
-Retrieve data from the table in various ways
-@item
-Use multiple tables
-@end itemize
-
-The menagerie database will be simple (deliberately), but it is not difficult
-to think of real-world situations in which a similar type of database might
-be used. For example, a database like this could be used by a farmer to keep
-track of livestock, or by a veterinarian to keep track of patient records.
-A menagerie distribution containing some of the queries and sample data used
-in the following sections can be obtained from the @strong{MySQL} Web site.
-It's available in either
-@uref{http://www.mysql.com/Downloads/Contrib/Examples/menagerie.tar.gz,compressed @code{tar} format}
-or
-@uref{http://www.mysql.com/Downloads/Contrib/Examples/menagerie.zip,Zip format}.
-
-Use the @code{SHOW} statement to find out what databases currently exist
-on the server:
-
-@example
-mysql> SHOW DATABASES;
-+----------+
-| Database |
-+----------+
-| mysql |
-| test |
-| tmp |
-+----------+
-@end example
-
-The list of databases is probably different on your machine, but the
-@code{mysql} and @code{test} databases are likely to be among them. The
-@code{mysql} database is required because it describes user access
-privileges. The @code{test} database is often provided as a workspace for
-users to try things out.
-
-If the @code{test} database exists, try to access it:
-
-@example
-mysql> USE test
-Database changed
-@end example
-
-Note that @code{USE}, like @code{QUIT}, does not require a semicolon. (You
-can terminate such statements with a semicolon if you like; it does no harm.)
-The @code{USE} statement is special in another way, too: it must be given on
-a single line.
-
-You can use the @code{test} database (if you have access to it) for the
-examples that follow, but anything you create in that database can be
-removed by anyone else with access to it. For this reason, you should
-probably ask your @strong{MySQL} administrator for permission to use a
-database of your own. Suppose you want to call yours @code{menagerie}. The
-administrator needs to execute a command like this:
-
-@example
-mysql> GRANT ALL ON menagerie.* TO your_mysql_name;
-@end example
-
-where @code{your_mysql_name} is the @strong{MySQL} user name assigned to
-you.
-
-@cindex selecting, databases
-@cindex databases, selecting
-@node Creating database, Creating tables, Database use, Database use
-@subsection Creating and Selecting a Database
-
-If the administrator creates your database for you when setting up your
-permissions, you can begin using it. Otherwise, you need to create it
-yourself:
-
-@example
-mysql> CREATE DATABASE menagerie;
-@end example
-
-Under Unix, database names are case sensitive (unlike SQL keywords), so you
-must always refer to your database as @code{menagerie}, not as
-@code{Menagerie}, @code{MENAGERIE}, or some other variant. This is also true
-for table names. (Under Windows, this restriction does not apply, although
-you must refer to databases and tables using the same lettercase throughout a
-given query.)
-
-Creating a database does not select it for use; you must do that explicitly.
-To make @code{menagerie} the current database, use this command:
-
-@example
-mysql> USE menagerie
-Database changed
-@end example
-
-Your database needs to be created only once, but you must select it for use
-each time you begin a @code{mysql} session. You can do this by issuing a
-@code{USE} statement as shown above. Alternatively, you can select the
-database on the command line when you invoke @code{mysql}. Just specify its
-name after any connection parameters that you might need to provide. For
-example:
-
-@example
-shell> mysql -h host -u user -p menagerie
-Enter password: ********
-@end example
-
-Note that @code{menagerie} is not your password on the command just shown.
-If you want to supply your password on the command line after the @code{-p}
-option, you must do so with no intervening space (for example, as
-@code{-pmypassword}, not as @code{-p mypassword}). However, putting your
-password on the command line is not recommended, because doing so exposes it
-to snooping by other users logged in on your machine.
-
-@cindex tables, creating
-@cindex creating, tables
-@node Creating tables, Loading tables, Creating database, Database use
-@subsection Creating a Table
-
-Creating the database is the easy part, but at this point it's empty, as
-@code{SHOW TABLES} will tell you:
-
-@example
-mysql> SHOW TABLES;
-Empty set (0.00 sec)
-@end example
-
-The harder part is deciding what the structure of your database should be:
-what tables you will need and what columns will be in each of them.
-
-You'll want a table that contains a record for each of your pets. This can
-be called the @code{pet} table, and it should contain, as a bare minimum,
-each animal's name. Because the name by itself is not very interesting, the
-table should contain other information. For example, if more than one person
-in your family keeps pets, you might want to list each animal's owner. You
-might also want to record some basic descriptive information such as species
-and sex.
-
-How about age? That might be of interest, but it's not a good thing to store
-in a database. Age changes as time passes, which means you'd have to update
-your records often. Instead, it's better to store a fixed value such as
-date of birth. Then, whenever you need age, you can calculate it as the
-difference between the current date and the birth date. @strong{MySQL}
-provides functions for doing date arithmetic, so this is not difficult.
-Storing birth date rather than age has other advantages, too:
-
-@itemize @bullet
-@item
-You can use the database for tasks such as generating reminders for upcoming
-pet birthdays. (If you think this type of query is somewhat silly, note that
-it is the same question you might ask in the context of a business database
-to identify clients to whom you'll soon need to send out birthday greetings,
-for that computer-assisted personal touch.)
-
-@item
-You can calculate age in relation to dates other than the current date. For
-example, if you store death date in the database, you can easily calculate
-how old a pet was when it died.
-@end itemize
-
-You can probably think of other types of information that would be useful in
-the @code{pet} table, but the ones identified so far are sufficient for now:
-name, owner, species, sex, birth, and death.
-
-Use a @code{CREATE TABLE} statement to specify the layout of your table:
-
-@example
-mysql> CREATE TABLE pet (name VARCHAR(20), owner VARCHAR(20),
- -> species VARCHAR(20), sex CHAR(1), birth DATE, death DATE);
-@end example
-
-@code{VARCHAR} is a good choice for the @code{name}, @code{owner}, and
-@code{species} columns because the column values will vary in length. The
-lengths of those columns need not all be the same, and need not be
-@code{20}. You can pick any length from @code{1} to @code{255}, whatever
-seems most reasonable to you. (If you make a poor choice and it turns
-out later that you need a longer field, @strong{MySQL} provides an
-@code{ALTER TABLE} statement.)
-
-Animal sex can be represented in a variety of ways, for example, @code{"m"}
-and @code{"f"}, or perhaps @code{"male"} and @code{"female"}. It's simplest
-to use the single characters @code{"m"} and @code{"f"}.
-
-The use of the @code{DATE} data type for the @code{birth} and @code{death}
-columns is a fairly obvious choice.
-
-Now that you have created a table, @code{SHOW TABLES} should produce some
-output:
-
-@example
-mysql> SHOW TABLES;
-+---------------------+
-| Tables in menagerie |
-+---------------------+
-| pet |
-+---------------------+
-@end example
-
-To verify that your table was created the way you expected, use
-a @code{DESCRIBE} statement:
-
-@example
-mysql> DESCRIBE pet;
-+---------+-------------+------+-----+---------+-------+
-| Field | Type | Null | Key | Default | Extra |
-+---------+-------------+------+-----+---------+-------+
-| name | varchar(20) | YES | | NULL | |
-| owner | varchar(20) | YES | | NULL | |
-| species | varchar(20) | YES | | NULL | |
-| sex | char(1) | YES | | NULL | |
-| birth | date | YES | | NULL | |
-| death | date | YES | | NULL | |
-+---------+-------------+------+-----+---------+-------+
-@end example
-
-You can use @code{DESCRIBE} any time, for example, if you forget the names of
-the columns in your table or what types they are.
-
-@cindex loading, tables
-@cindex tables, loading data
-@cindex data, loading into tables
-@node Loading tables, Retrieving data, Creating tables, Database use
-@subsection Loading Data into a Table
-
-After creating your table, you need to populate it. The @code{LOAD DATA} and
-@code{INSERT} statements are useful for this.
-
-Suppose your pet records can be described as shown below.
-(Observe that @strong{MySQL} expects dates in @code{YYYY-MM-DD} format;
-this may be different than what you are used to.)
-
-@multitable @columnfractions .16 .16 .16 .16 .16 .16
-@item @strong{name} @tab @strong{owner} @tab @strong{species} @tab @strong{sex} @tab @strong{birth} @tab @strong{death}
-@item Fluffy @tab Harold @tab cat @tab f @tab 1993-02-04 @tab
-@item Claws @tab Gwen @tab cat @tab m @tab 1994-03-17 @tab
-@item Buffy @tab Harold @tab dog @tab f @tab 1989-05-13 @tab
-@item Fang @tab Benny @tab dog @tab m @tab 1990-08-27 @tab
-@item Bowser @tab Diane @tab dog @tab m @tab 1989-08-31 @tab 1995-07-29
-@item Chirpy @tab Gwen @tab bird @tab f @tab 1998-09-11 @tab
-@item Whistler @tab Gwen @tab bird @tab @tab 1997-12-09 @tab
-@item Slim @tab Benny @tab snake @tab m @tab 1996-04-29 @tab
-@end multitable
-
-Because you are beginning with an empty table, an easy way to populate it is to
-create a text file containing a row for each of your animals, then load the
-contents of the file into the table with a single statement.
-
-You could create a text file @file{pet.txt} containing one record per line,
-with values separated by tabs, and given in the order in which the columns
-were listed in the @code{CREATE TABLE} statement. For missing values (such
-as unknown sexes or death dates for animals that are still living), you can
-use @code{NULL} values. To represent these in your text file, use
-@code{\N}. For example, the record for Whistler the bird would look like
-this (where the whitespace between values is a single tab character):
-
-@multitable @columnfractions .15 .15 .15 .15 .25 .15
-@item @code{Whistler} @tab @code{Gwen} @tab @code{bird} @tab @code{\N} @tab @code{1997-12-09} @tab @code{\N}
-@end multitable
-
-To load the text file @file{pet.txt} into the @code{pet} table, use this
-command:
-
-@example
-mysql> LOAD DATA LOCAL INFILE "pet.txt" INTO TABLE pet;
-@end example
-
-You can specify the column value separator and end of line marker explicitly
-in the @code{LOAD DATA} statement if you wish, but the defaults are tab and
-linefeed. These are sufficient for the statement to read the file
-@file{pet.txt} properly.
-
-When you want to add new records one at a time, the @code{INSERT} statement
-is useful. In its simplest form, you supply values for each column, in the
-order in which the columns were listed in the @code{CREATE TABLE} statement.
-Suppose Diane gets a new hamster named Puffball. You could add a new record
-using an @code{INSERT} statement like this:
-
-@example
-mysql> INSERT INTO pet
- -> VALUES ('Puffball','Diane','hamster','f','1999-03-30',NULL);
-@end example
-
-Note that string and date values are specified as quoted strings here. Also,
-with @code{INSERT}, you can insert @code{NULL} directly to represent a
-missing value. You do not use @code{\N} like you do with @code{LOAD DATA}.
-
-From this example, you should be able to see that there would be a lot more
-typing involved to load
-your records initially using several @code{INSERT} statements rather
-than a single @code{LOAD DATA} statement.
-
-@cindex data, retrieving
-@cindex tables, retrieving data
-@cindex retrieving, data from tables
-@cindex unloading, tables
-@node Retrieving data, , Loading tables, Database use
-@subsection Retrieving Information from a Table
-
-@menu
-* Selecting all:: Selecting all data
-* Selecting rows:: Selecting particular rows
-* Selecting columns:: Selecting particular columns
-* Sorting rows:: Sorting rows
-* Date calculations:: Date calculations
-* Working with NULL:: Working with @code{NULL} values
-* Pattern matching:: Pattern matching
-* Counting rows:: Counting rows
-* Multiple tables::
-@end menu
-
-The @code{SELECT} statement is used to pull information from a table.
-The general form of the statement is:
-
-@example
-SELECT what_to_select
-FROM which_table
-WHERE conditions_to_satisfy
-@end example
-
-@code{what_to_select} indicates what you want to see. This can be a list of
-columns, or @code{*} to indicate ``all columns.'' @code{which_table}
-indicates the table from which you want to retrieve data. The @code{WHERE}
-clause is optional. If it's present, @code{conditions_to_satisfy} specifies
-conditions that rows must satisfy to qualify for retrieval.
-
-@node Selecting all, Selecting rows, Retrieving data, Retrieving data
-@subsubsection Selecting All Data
-
-The simplest form of @code{SELECT} retrieves everything from a table:
-
-@example
-mysql> SELECT * FROM pet;
-+----------+--------+---------+------+------------+------------+
-| name | owner | species | sex | birth | death |
-+----------+--------+---------+------+------------+------------+
-| Fluffy | Harold | cat | f | 1993-02-04 | NULL |
-| Claws | Gwen | cat | m | 1994-03-17 | NULL |
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-| Fang | Benny | dog | m | 1990-08-27 | NULL |
-| Bowser | Diane | dog | m | 1998-08-31 | 1995-07-29 |
-| Chirpy | Gwen | bird | f | 1998-09-11 | NULL |
-| Whistler | Gwen | bird | NULL | 1997-12-09 | NULL |
-| Slim | Benny | snake | m | 1996-04-29 | NULL |
-| Puffball | Diane | hamster | f | 1999-03-30 | NULL |
-+----------+--------+---------+------+------------+------------+
-@end example
-
-This form of @code{SELECT} is useful if you want to review your entire table,
-for instance, after you've just loaded it with your initial dataset. As it
-happens, the output just shown reveals an error in your data file: Bowser
-appears to have been born after he died! Consulting your original pedigree
-papers, you find that the correct birth year is 1989, not 1998.
-
-There are are least a couple of ways to fix this:
-
-@itemize @bullet
-@item
-Edit the file @file{pet.txt} to correct the error, then empty the table
-and reload it using @code{DELETE} and @code{LOAD DATA}:
-
-@example
-mysql> SET AUTOCOMMIT=1; # Used for quick re-create of the table
-mysql> DELETE FROM pet;
-mysql> LOAD DATA LOCAL INFILE "pet.txt" INTO TABLE pet;
-@end example
-
-However, if you do this, you must also re-enter the record for Puffball.
-
-@item
-Fix only the erroneous record with an @code{UPDATE} statement:
-
-@example
-mysql> UPDATE pet SET birth = "1989-08-31" WHERE name = "Bowser";
-@end example
-@end itemize
-
-As shown above, it is easy to retrieve an entire table. But typically you
-don't want to do that, particularly when the table becomes large. Instead,
-you're usually more interested in answering a particular question, in which
-case you specify some constraints on the information you want. Let's look at
-some selection queries in terms of questions about your pets that they
-answer.
-
-@cindex rows, selecting
-@cindex tables, selecting rows
-@node Selecting rows, Selecting columns, Selecting all, Retrieving data
-@subsubsection Selecting Particular Rows
-
-You can select only particular rows from your table. For example, if you want
-to verify the change that you made to Bowser's birth date, select Bowser's
-record like this:
-
-@example
-mysql> SELECT * FROM pet WHERE name = "Bowser";
-+--------+-------+---------+------+------------+------------+
-| name | owner | species | sex | birth | death |
-+--------+-------+---------+------+------------+------------+
-| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
-+--------+-------+---------+------+------------+------------+
-@end example
-
-The output confirms that the year is correctly recorded now as 1989, not 1998.
-
-String comparisons are normally case insensitive, so you can specify the
-name as @code{"bowser"}, @code{"BOWSER"}, etc. The query result will be
-the same.
-
-You can specify conditions on any column, not just @code{name}. For example,
-if you want to know which animals were born after 1998, test the @code{birth}
-column:
-
-@example
-mysql> SELECT * FROM pet WHERE birth >= "1998-1-1";
-+----------+-------+---------+------+------------+-------+
-| name | owner | species | sex | birth | death |
-+----------+-------+---------+------+------------+-------+
-| Chirpy | Gwen | bird | f | 1998-09-11 | NULL |
-| Puffball | Diane | hamster | f | 1999-03-30 | NULL |
-+----------+-------+---------+------+------------+-------+
-@end example
-
-You can combine conditions, for example, to locate female dogs:
-
-@example
-mysql> SELECT * FROM pet WHERE species = "dog" AND sex = "f";
-+-------+--------+---------+------+------------+-------+
-| name | owner | species | sex | birth | death |
-+-------+--------+---------+------+------------+-------+
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-+-------+--------+---------+------+------------+-------+
-@end example
-
-The preceding query uses the @code{AND} logical operator. There is also an
-@code{OR} operator:
-
-@example
-mysql> SELECT * FROM pet WHERE species = "snake" OR species = "bird";
-+----------+-------+---------+------+------------+-------+
-| name | owner | species | sex | birth | death |
-+----------+-------+---------+------+------------+-------+
-| Chirpy | Gwen | bird | f | 1998-09-11 | NULL |
-| Whistler | Gwen | bird | NULL | 1997-12-09 | NULL |
-| Slim | Benny | snake | m | 1996-04-29 | NULL |
-+----------+-------+---------+------+------------+-------+
-@end example
-
-@code{AND} and @code{OR} may be intermixed. If you do that, it's a good idea
-to use parentheses to indicate how conditions should be grouped:
-
-@example
-mysql> SELECT * FROM pet WHERE (species = "cat" AND sex = "m")
- -> OR (species = "dog" AND sex = "f");
-+-------+--------+---------+------+------------+-------+
-| name | owner | species | sex | birth | death |
-+-------+--------+---------+------+------------+-------+
-| Claws | Gwen | cat | m | 1994-03-17 | NULL |
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-+-------+--------+---------+------+------------+-------+
-@end example
-
-@cindex columns, selecting
-@cindex tables, selecting columns
-@node Selecting columns, Sorting rows, Selecting rows, Retrieving data
-@subsubsection Selecting Particular Columns
-
-
-If you don't want to see entire rows from your table, just name the columns
-in which you're interested, separated by commas. For example, if you want to
-know when your animals were born, select the @code{name} and @code{birth}
-columns:
-
-@example
-mysql> SELECT name, birth FROM pet;
-+----------+------------+
-| name | birth |
-+----------+------------+
-| Fluffy | 1993-02-04 |
-| Claws | 1994-03-17 |
-| Buffy | 1989-05-13 |
-| Fang | 1990-08-27 |
-| Bowser | 1989-08-31 |
-| Chirpy | 1998-09-11 |
-| Whistler | 1997-12-09 |
-| Slim | 1996-04-29 |
-| Puffball | 1999-03-30 |
-+----------+------------+
-@end example
-
-To find out who owns pets, use this query:
-
-@example
-mysql> SELECT owner FROM pet;
-+--------+
-| owner |
-+--------+
-| Harold |
-| Gwen |
-| Harold |
-| Benny |
-| Diane |
-| Gwen |
-| Gwen |
-| Benny |
-| Diane |
-+--------+
-@end example
-
-@findex DISTINCT
-However, notice that the query simply retrieves the @code{owner} field from
-each record, and some of them appear more than once. To minimize the output,
-retrieve each unique output record just once by adding the keyword
-@code{DISTINCT}:
-
-@example
-mysql> SELECT DISTINCT owner FROM pet;
-+--------+
-| owner |
-+--------+
-| Benny |
-| Diane |
-| Gwen |
-| Harold |
-+--------+
-@end example
-
-You can use a @code{WHERE} clause to combine row selection with column
-selection. For example, to get birth dates for dogs and cats only,
-use this query:
-
-@example
-mysql> SELECT name, species, birth FROM pet
- -> WHERE species = "dog" OR species = "cat";
-+--------+---------+------------+
-| name | species | birth |
-+--------+---------+------------+
-| Fluffy | cat | 1993-02-04 |
-| Claws | cat | 1994-03-17 |
-| Buffy | dog | 1989-05-13 |
-| Fang | dog | 1990-08-27 |
-| Bowser | dog | 1989-08-31 |
-+--------+---------+------------+
-@end example
-
-@cindex rows, sorting
-@cindex sorting, table rows
-@cindex sorting, data
-@cindex tables, sorting rows
-@node Sorting rows, Date calculations, Selecting columns, Retrieving data
-@subsubsection Sorting Rows
-
-You may have noticed in the preceding examples that the result rows are
-displayed in no particular order. However, it's often easier to examine
-query output when the rows are sorted in some meaningful way. To sort a
-result, use an @code{ORDER BY} clause.
-
-Here are animal birthdays, sorted by date:
-
-@example
-mysql> SELECT name, birth FROM pet ORDER BY birth;
-+----------+------------+
-| name | birth |
-+----------+------------+
-| Buffy | 1989-05-13 |
-| Bowser | 1989-08-31 |
-| Fang | 1990-08-27 |
-| Fluffy | 1993-02-04 |
-| Claws | 1994-03-17 |
-| Slim | 1996-04-29 |
-| Whistler | 1997-12-09 |
-| Chirpy | 1998-09-11 |
-| Puffball | 1999-03-30 |
-+----------+------------+
-@end example
-
-To sort in reverse order, add the @code{DESC} (descending) keyword to the
-name of the column you are sorting by:
-
-@example
-mysql> SELECT name, birth FROM pet ORDER BY birth DESC;
-+----------+------------+
-| name | birth |
-+----------+------------+
-| Puffball | 1999-03-30 |
-| Chirpy | 1998-09-11 |
-| Whistler | 1997-12-09 |
-| Slim | 1996-04-29 |
-| Claws | 1994-03-17 |
-| Fluffy | 1993-02-04 |
-| Fang | 1990-08-27 |
-| Bowser | 1989-08-31 |
-| Buffy | 1989-05-13 |
-+----------+------------+
-@end example
-
-You can sort on multiple columns. For example, to sort by type of
-animal, then by birth date within animal type with youngest animals first,
-use the following query:
-
-@example
-mysql> SELECT name, species, birth FROM pet ORDER BY species, birth DESC;
-+----------+---------+------------+
-| name | species | birth |
-+----------+---------+------------+
-| Chirpy | bird | 1998-09-11 |
-| Whistler | bird | 1997-12-09 |
-| Claws | cat | 1994-03-17 |
-| Fluffy | cat | 1993-02-04 |
-| Fang | dog | 1990-08-27 |
-| Bowser | dog | 1989-08-31 |
-| Buffy | dog | 1989-05-13 |
-| Puffball | hamster | 1999-03-30 |
-| Slim | snake | 1996-04-29 |
-+----------+---------+------------+
-@end example
-
-Note that the @code{DESC} keyword applies only to the column name immediately
-preceding it (@code{birth}); @code{species} values are still sorted in
-ascending order.
-
-@cindex date calculations
-@cindex calculating, dates
-@cindex extracting, dates
-@cindex age, calculating
-@node Date calculations, Working with NULL, Sorting rows, Retrieving data
-@subsubsection Date Calculations
-
-@strong{MySQL} provides several functions that you can use to perform
-calculations on dates, for example, to calculate ages or extract
-parts of dates.
-
-To determine how many years old each of your pets is, compute age as the
-difference between the birth date and the current date. Do this by
-converting the two dates to days, take the difference, and divide by 365 (the
-number of days in a year):
-
-@example
-mysql> SELECT name, (TO_DAYS(NOW())-TO_DAYS(birth))/365 FROM pet;
-+----------+-------------------------------------+
-| name | (TO_DAYS(NOW())-TO_DAYS(birth))/365 |
-+----------+-------------------------------------+
-| Fluffy | 6.15 |
-| Claws | 5.04 |
-| Buffy | 9.88 |
-| Fang | 8.59 |
-| Bowser | 9.58 |
-| Chirpy | 0.55 |
-| Whistler | 1.30 |
-| Slim | 2.92 |
-| Puffball | 0.00 |
-+----------+-------------------------------------+
-@end example
-
-Although the query works, there are some things about it that could be
-improved. First, the result could be scanned more easily if the rows were
-presented in some order. Second, the heading for the age column isn't very
-meaningful.
-
-The first problem can be handled by adding an @code{ORDER BY name} clause to
-sort the output by name. To deal with the column heading, provide a name for
-the column so that a different label appears in the output (this is called a
-column alias):
-
-@example
-mysql> SELECT name, (TO_DAYS(NOW())-TO_DAYS(birth))/365 AS age
- -> FROM pet ORDER BY name;
-+----------+------+
-| name | age |
-+----------+------+
-| Bowser | 9.58 |
-| Buffy | 9.88 |
-| Chirpy | 0.55 |
-| Claws | 5.04 |
-| Fang | 8.59 |
-| Fluffy | 6.15 |
-| Puffball | 0.00 |
-| Slim | 2.92 |
-| Whistler | 1.30 |
-+----------+------+
-@end example
-
-To sort the output by @code{age} rather than @code{name}, just use a
-different @code{ORDER BY} clause:
-
-@example
-mysql> SELECT name, (TO_DAYS(NOW())-TO_DAYS(birth))/365 AS age
- -> FROM pet ORDER BY age;
-+----------+------+
-| name | age |
-+----------+------+
-| Puffball | 0.00 |
-| Chirpy | 0.55 |
-| Whistler | 1.30 |
-| Slim | 2.92 |
-| Claws | 5.04 |
-| Fluffy | 6.15 |
-| Fang | 8.59 |
-| Bowser | 9.58 |
-| Buffy | 9.88 |
-+----------+------+
-@end example
-
-A similar query can be used to determine age at death for animals that have
-died. You determine which animals these are by checking whether or not the
-@code{death} value is @code{NULL}. Then, for those with non-@code{NULL}
-values, compute the difference between the @code{death} and @code{birth}
-values:
-
-@example
-mysql> SELECT name, birth, death, (TO_DAYS(death)-TO_DAYS(birth))/365 AS age
- -> FROM pet WHERE death IS NOT NULL ORDER BY age;
-+--------+------------+------------+------+
-| name | birth | death | age |
-+--------+------------+------------+------+
-| Bowser | 1989-08-31 | 1995-07-29 | 5.91 |
-+--------+------------+------------+------+
-@end example
-
-The query uses @code{death IS NOT NULL} rather than @code{death != NULL}
-because @code{NULL} is a special value. This is explained later.
-@xref{Working with NULL, , Working with @code{NULL}}.
-
-What if you want to know which animals have birthdays next month? For this
-type of calculation, year and day are irrelevant; you simply want to extract
-the month part of the @code{birth} column. @strong{MySQL} provides several
-date-part extraction functions, such as @code{YEAR()}, @code{MONTH()}, and
-@code{DAYOFMONTH()}. @code{MONTH()} is the appropriate function here. To
-see how it works, run a simple query that displays the value of both
-@code{birth} and @code{MONTH(birth)}:
-
-@example
-mysql> SELECT name, birth, MONTH(birth) FROM pet;
-+----------+------------+--------------+
-| name | birth | MONTH(birth) |
-+----------+------------+--------------+
-| Fluffy | 1993-02-04 | 2 |
-| Claws | 1994-03-17 | 3 |
-| Buffy | 1989-05-13 | 5 |
-| Fang | 1990-08-27 | 8 |
-| Bowser | 1989-08-31 | 8 |
-| Chirpy | 1998-09-11 | 9 |
-| Whistler | 1997-12-09 | 12 |
-| Slim | 1996-04-29 | 4 |
-| Puffball | 1999-03-30 | 3 |
-+----------+------------+--------------+
-@end example
-
-Finding animals with birthdays in the upcoming month is easy, too. Suppose
-the current month is April. Then the month value is @code{4} and you look
-for animals born in May (month 5) like this:
-
-@example
-mysql> SELECT name, birth FROM pet WHERE MONTH(birth) = 5;
-+-------+------------+
-| name | birth |
-+-------+------------+
-| Buffy | 1989-05-13 |
-+-------+------------+
-@end example
-
-There is a small complication if the current month is December, of course.
-You don't just add one to the month number (@code{12}) and look for animals
-born in month 13, because there is no such month. Instead, you look for
-animals born in January (month 1).
-
-You can even write the query so that it works no matter what the current
-month is. That way you don't have to use a particular month number
-in the query. @code{DATE_ADD()} allows you to add a time interval to a
-given date. If you add a month to the value of @code{NOW()}, then extract
-the month part with @code{MONTH()}, the result produces the month in which to
-look for birthdays:
-
-@example
-mysql> SELECT name, birth FROM pet
- -> WHERE MONTH(birth) = MONTH(DATE_ADD(NOW(), INTERVAL 1 MONTH));
-@end example
-
-A different way to accomplish the same task is to add @code{1} to get the
-next month after the current one (after using the modulo function (@code{MOD})
-to wrap around the month value to @code{0} if it is currently
-@code{12}):
-
-@example
-mysql> SELECT name, birth FROM pet
- -> WHERE MONTH(birth) = MOD(MONTH(NOW()), 12) + 1;
-@end example
-
-Note that @code{MONTH} returns a number between 1 and 12. And
-@code{MOD(something,12)} returns a number between 0 and 11. So the
-addition has to be after the @code{MOD()} otherwise we would go from
-November (11) to January (1).
-
-@findex NULL
-@cindex NULL value
-@node Working with NULL, Pattern matching, Date calculations, Retrieving data
-@subsubsection Working with @code{NULL} Values
-
-The @code{NULL} value can be surprising until you get used to it.
-Conceptually, @code{NULL} means missing value or unknown value and it
-is treated somewhat differently than other values. To test for @code{NULL},
-you cannot use the arithmetic comparison operators such as @code{=}, @code{<},
-or @code{!=}. To demonstrate this for yourself, try the following query:
-
-@example
-mysql> SELECT 1 = NULL, 1 != NULL, 1 < NULL, 1 > NULL;
-+----------+-----------+----------+----------+
-| 1 = NULL | 1 != NULL | 1 < NULL | 1 > NULL |
-+----------+-----------+----------+----------+
-| NULL | NULL | NULL | NULL |
-+----------+-----------+----------+----------+
-@end example
-
-Clearly you get no meaningful results from these comparisons. Use
-the @code{IS NULL} and @code{IS NOT NULL} operators instead:
-
-@example
-mysql> SELECT 1 IS NULL, 1 IS NOT NULL;
-+-----------+---------------+
-| 1 IS NULL | 1 IS NOT NULL |
-+-----------+---------------+
-| 0 | 1 |
-+-----------+---------------+
-@end example
-
-In @strong{MySQL}, 0 or @code{NULL} means false and anything else means true.
-The default truth value from a boolean operation is 1.
-
-This special treatment of @code{NULL} is why, in the previous section, it
-was necessary to determine which animals are no longer alive using
-@code{death IS NOT NULL} instead of @code{death != NULL}.
-
-@cindex pattern matching
-@cindex matching, patterns
-@cindex expressions, extended
-@node Pattern matching, Counting rows, Working with NULL, Retrieving data
-@subsubsection Pattern Matching
-
-@strong{MySQL} provides standard SQL pattern matching as well as a form of
-pattern matching based on extended regular expressions similar to those used
-by Unix utilities such as @code{vi}, @code{grep}, and @code{sed}.
-
-SQL pattern matching allows you to use @samp{_} to match any single
-character and @samp{%} to match an arbitrary number of characters (including
-zero characters). In @strong{MySQL}, SQL patterns are case insensitive by
-default. Some examples are shown below. Note that you do not use @code{=}
-or @code{!=} when you use SQL patterns; use the @code{LIKE} or @code{NOT
-LIKE} comparison operators instead.
-
-To find names beginning with @samp{b}:
-
-@example
-mysql> SELECT * FROM pet WHERE name LIKE "b%";
-+--------+--------+---------+------+------------+------------+
-| name | owner | species | sex | birth | death |
-+--------+--------+---------+------+------------+------------+
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
-+--------+--------+---------+------+------------+------------+
-@end example
-
-To find names ending with @samp{fy}:
-
-@example
-mysql> SELECT * FROM pet WHERE name LIKE "%fy";
-+--------+--------+---------+------+------------+-------+
-| name | owner | species | sex | birth | death |
-+--------+--------+---------+------+------------+-------+
-| Fluffy | Harold | cat | f | 1993-02-04 | NULL |
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-+--------+--------+---------+------+------------+-------+
-@end example
-
-To find names containing a @samp{w}:
-
-@example
-mysql> SELECT * FROM pet WHERE name LIKE "%w%";
-+----------+-------+---------+------+------------+------------+
-| name | owner | species | sex | birth | death |
-+----------+-------+---------+------+------------+------------+
-| Claws | Gwen | cat | m | 1994-03-17 | NULL |
-| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
-| Whistler | Gwen | bird | NULL | 1997-12-09 | NULL |
-+----------+-------+---------+------+------------+------------+
-@end example
-
-To find names containing exactly five characters, use the @samp{_} pattern
-character:
-
-@example
-mysql> SELECT * FROM pet WHERE name LIKE "_____";
-+-------+--------+---------+------+------------+-------+
-| name | owner | species | sex | birth | death |
-+-------+--------+---------+------+------------+-------+
-| Claws | Gwen | cat | m | 1994-03-17 | NULL |
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-+-------+--------+---------+------+------------+-------+
-@end example
-
-The other type of pattern matching provided by @strong{MySQL} uses extended
-regular expressions. When you test for a match for this type of pattern, use
-the @code{REGEXP} and @code{NOT REGEXP} operators (or @code{RLIKE} and
-@code{NOT RLIKE}, which are synonyms).
-
-Some characteristics of extended regular expressions are:
-
-@itemize @bullet
-@item
-@samp{.} matches any single character.
-
-@item
-A character class @samp{[...]} matches any character within the brackets.
-For example, @samp{[abc]} matches @samp{a}, @samp{b}, or @samp{c}. To name a
-range of characters, use a dash. @samp{[a-z]} matches any lowercase letter,
-whereas @samp{[0-9]} matches any digit.
-
-@item
-@samp{*} matches zero or more instances of the thing preceding it. For
-example, @samp{x*} matches any number of @samp{x} characters,
-@samp{[0-9]*} matches any number of digits, and @samp{.*} matches any
-number of anything.
-
-@item
-Regular expressions are case sensitive, but you can use a character class to
-match both lettercases if you wish. For example, @samp{[aA]} matches
-lowercase or uppercase @samp{a} and @samp{[a-zA-Z]} matches any letter in
-either case.
-
-@item
-The pattern matches if it occurs anywhere in the value being tested.
-(SQL patterns match only if they match the entire value.)
-
-@item
-To anchor a pattern so that it must match the beginning or end of the value
-being tested, use @samp{^} at the beginning or @samp{$} at the end of the
-pattern.
-@end itemize
-
-To demonstrate how extended regular expressions work, the @code{LIKE} queries
-shown above are rewritten below to use @code{REGEXP}.
-
-To find names beginning with @samp{b}, use @samp{^} to match the beginning of
-the name:
-
-@example
-mysql> SELECT * FROM pet WHERE name REGEXP "^b";
-+--------+--------+---------+------+------------+------------+
-| name | owner | species | sex | birth | death |
-+--------+--------+---------+------+------------+------------+
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
-+--------+--------+---------+------+------------+------------+
-@end example
-
-Prior to @strong{MySQL} Version 3.23.4, @code{REGEXP} is case sensitive,
-and the previous query will return no rows. To match either lowercase or
-uppercase @samp{b}, use this query instead:
-
-@example
-mysql> SELECT * FROM pet WHERE name REGEXP "^[bB]";
-@end example
-
-From @strong{MySQL} 3.23.4 on, to force a @code{REGEXP} comparison to
-be case sensitive, use the @code{BINARY} keyword to make one of the
-strings a binary string. This query will match only lowercase @samp{b}
-at the beginning of a name:
-
-@example
-mysql> SELECT * FROM pet WHERE name REGEXP BINARY "^b";
-@end example
-
-To find names ending with @samp{fy}, use @samp{$} to match the end of the
-name:
-
-@example
-mysql> SELECT * FROM pet WHERE name REGEXP "fy$";
-+--------+--------+---------+------+------------+-------+
-| name | owner | species | sex | birth | death |
-+--------+--------+---------+------+------------+-------+
-| Fluffy | Harold | cat | f | 1993-02-04 | NULL |
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-+--------+--------+---------+------+------------+-------+
-@end example
-
-To find names containing a lowercase or uppercase @samp{w}, use this query:
-
-@example
-mysql> SELECT * FROM pet WHERE name REGEXP "w";
-+----------+-------+---------+------+------------+------------+
-| name | owner | species | sex | birth | death |
-+----------+-------+---------+------+------------+------------+
-| Claws | Gwen | cat | m | 1994-03-17 | NULL |
-| Bowser | Diane | dog | m | 1989-08-31 | 1995-07-29 |
-| Whistler | Gwen | bird | NULL | 1997-12-09 | NULL |
-+----------+-------+---------+------+------------+------------+
-@end example
-
-Because a regular expression pattern matches if it occurs anywhere in the
-value, it is not necessary in the previous query to put a wild card on either
-side of the pattern to get it to match the entire value like it would be if
-you used a SQL pattern.
-
-To find names containing exactly five characters, use @samp{^} and @samp{$}
-to match the beginning and end of the name, and five instances of @samp{.}
-in between:
-
-@example
-mysql> SELECT * FROM pet WHERE name REGEXP "^.....$";
-+-------+--------+---------+------+------------+-------+
-| name | owner | species | sex | birth | death |
-+-------+--------+---------+------+------------+-------+
-| Claws | Gwen | cat | m | 1994-03-17 | NULL |
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-+-------+--------+---------+------+------------+-------+
-@end example
-
-You could also write the previous query using the @samp{@{n@}}
-``repeat-@code{n}-times'' operator:
-
-@example
-mysql> SELECT * FROM pet WHERE name REGEXP "^.@{5@}$";
-+-------+--------+---------+------+------------+-------+
-| name | owner | species | sex | birth | death |
-+-------+--------+---------+------+------------+-------+
-| Claws | Gwen | cat | m | 1994-03-17 | NULL |
-| Buffy | Harold | dog | f | 1989-05-13 | NULL |
-+-------+--------+---------+------+------------+-------+
-@end example
-
-@cindex rows, counting
-@cindex tables, counting rows
-@cindex counting, table rows
-@node Counting rows, Multiple tables, Pattern matching, Retrieving data
-@subsubsection Counting Rows
-
-Databases are often used to answer the question, ``How often does a certain
-type of data occur in a table?'' For example, you might want to know how
-many pets you have, or how many pets each owner has, or you might want to
-perform various kinds of censuses on your animals.
-
-Counting the total number of animals you have is the same question as ``How
-many rows are in the @code{pet} table?'' because there is one record per pet.
-The @code{COUNT()} function counts the number of non-@code{NULL} results, so
-the query to count your animals looks like this:
-
-@example
-mysql> SELECT COUNT(*) FROM pet;
-+----------+
-| COUNT(*) |
-+----------+
-| 9 |
-+----------+
-@end example
-
-Earlier, you retrieved the names of the people who owned pets. You can
-use @code{COUNT()} if you want to find out how many pets each owner has:
-
-@example
-mysql> SELECT owner, COUNT(*) FROM pet GROUP BY owner;
-+--------+----------+
-| owner | COUNT(*) |
-+--------+----------+
-| Benny | 2 |
-| Diane | 2 |
-| Gwen | 3 |
-| Harold | 2 |
-+--------+----------+
-@end example
-
-Note the use of @code{GROUP BY} to group together all records for each
-@code{owner}. Without it, all you get is an error message:
-
-@example
-mysql> SELECT owner, COUNT(owner) FROM pet;
-ERROR 1140 at line 1: Mixing of GROUP columns (MIN(),MAX(),COUNT()...)
-with no GROUP columns is illegal if there is no GROUP BY clause
-@end example
-
-@code{COUNT()} and @code{GROUP BY} are useful for characterizing your
-data in various ways. The following examples show different ways to
-perform animal census operations.
-
-Number of animals per species:
-
-@example
-mysql> SELECT species, COUNT(*) FROM pet GROUP BY species;
-+---------+----------+
-| species | COUNT(*) |
-+---------+----------+
-| bird | 2 |
-| cat | 2 |
-| dog | 3 |
-| hamster | 1 |
-| snake | 1 |
-+---------+----------+
-@end example
-
-Number of animals per sex:
-
-@example
-mysql> SELECT sex, COUNT(*) FROM pet GROUP BY sex;
-+------+----------+
-| sex | COUNT(*) |
-+------+----------+
-| NULL | 1 |
-| f | 4 |
-| m | 4 |
-+------+----------+
-@end example
-
-(In this output, @code{NULL} indicates sex unknown.)
-
-Number of animals per combination of species and sex:
-
-@example
-mysql> SELECT species, sex, COUNT(*) FROM pet GROUP BY species, sex;
-+---------+------+----------+
-| species | sex | COUNT(*) |
-+---------+------+----------+
-| bird | NULL | 1 |
-| bird | f | 1 |
-| cat | f | 1 |
-| cat | m | 1 |
-| dog | f | 1 |
-| dog | m | 2 |
-| hamster | f | 1 |
-| snake | m | 1 |
-+---------+------+----------+
-@end example
-
-You need not retrieve an entire table when you use @code{COUNT()}. For
-example, the previous query, when performed just on dogs and cats, looks like
-this:
-
-@example
-mysql> SELECT species, sex, COUNT(*) FROM pet
- -> WHERE species = "dog" OR species = "cat"
- -> GROUP BY species, sex;
-+---------+------+----------+
-| species | sex | COUNT(*) |
-+---------+------+----------+
-| cat | f | 1 |
-| cat | m | 1 |
-| dog | f | 1 |
-| dog | m | 2 |
-+---------+------+----------+
-@end example
-
-Or, if you wanted the number of animals per sex only for known-sex animals:
-
-@example
-mysql> SELECT species, sex, COUNT(*) FROM pet
- -> WHERE sex IS NOT NULL
- -> GROUP BY species, sex;
-+---------+------+----------+
-| species | sex | COUNT(*) |
-+---------+------+----------+
-| bird | f | 1 |
-| cat | f | 1 |
-| cat | m | 1 |
-| dog | f | 1 |
-| dog | m | 2 |
-| hamster | f | 1 |
-| snake | m | 1 |
-+---------+------+----------+
-@end example
-
-@cindex tables, multiple
-@node Multiple tables, , Counting rows, Retrieving data
-@subsubsection Using More Than one Table
-
-The @code{pet} table keeps track of which pets you have. If you want to
-record other information about them, such as events in their lives like
-visits to the vet or when litters are born, you need another table. What
-should this table look like? It needs:
-
-@itemize @bullet
-@item
-To contain the pet name so you know which animal each event pertains
-to.
-
-@item
-A date so you know when the event occurred.
-
-@item
-A field to describe the event.
-
-@item
-An event type field, if you want to be able to categorize events.
-@end itemize
-
-Given these considerations, the @code{CREATE TABLE} statement for the
-@code{event} table might look like this:
-
-@example
-mysql> CREATE TABLE event (name VARCHAR(20), date DATE,
- -> type VARCHAR(15), remark VARCHAR(255));
-@end example
-
-As with the @code{pet} table, it's easiest to load the initial records
-by creating a tab-delimited text file containing the information:
-
-@multitable @columnfractions .15 .15 .15 .55
-@item Fluffy @tab 1995-05-15 @tab litter @tab 4 kittens, 3 female, 1 male
-@item Buffy @tab 1993-06-23 @tab litter @tab 5 puppies, 2 female, 3 male
-@item Buffy @tab 1994-06-19 @tab litter @tab 3 puppies, 3 female
-@item Chirpy @tab 1999-03-21 @tab vet @tab needed beak straightened
-@item Slim @tab 1997-08-03 @tab vet @tab broken rib
-@item Bowser @tab 1991-10-12 @tab kennel
-@item Fang @tab 1991-10-12 @tab kennel
-@item Fang @tab 1998-08-28 @tab birthday @tab Gave him a new chew toy
-@item Claws @tab 1998-03-17 @tab birthday @tab Gave him a new flea collar
-@item Whistler @tab 1998-12-09 @tab birthday @tab First birthday
-@end multitable
-
-Load the records like this:
-
-@example
-mysql> LOAD DATA LOCAL INFILE "event.txt" INTO TABLE event;
-@end example
-
-Based on what you've learned from the queries you've run on the @code{pet}
-table, you should be able to perform retrievals on the records in the
-@code{event} table; the principles are the same. But when is the
-@code{event} table by itself insufficient to answer questions you might ask?
-
-Suppose you want to find out the ages of each pet when they had their
-litters. The @code{event} table indicates when this occurred, but to
-calculate the age of the mother, you need her birth date. Because that is
-stored in the @code{pet} table, you need both tables for the query:
-
-@example
-mysql> SELECT pet.name, (TO_DAYS(date) - TO_DAYS(birth))/365 AS age, remark
- -> FROM pet, event
- -> WHERE pet.name = event.name AND type = "litter";
-+--------+------+-----------------------------+
-| name | age | remark |
-+--------+------+-----------------------------+
-| Fluffy | 2.27 | 4 kittens, 3 female, 1 male |
-| Buffy | 4.12 | 5 puppies, 2 female, 3 male |
-| Buffy | 5.10 | 3 puppies, 3 female |
-+--------+------+-----------------------------+
-@end example
-
-There are several things to note about this query:
-
-@itemize @bullet
-@item
-The @code{FROM} clause lists two tables because the query needs to pull
-information from both of them.
-
-@item
-When combining (joining) information from multiple tables, you need to
-specify how records in one table can be matched to records in the other.
-This is easy because they both have a @code{name} column. The query uses
-@code{WHERE} clause to match up records in the two tables based on the
-@code{name} values.
-
-@item
-Because the @code{name} column occurs in both tables, you must be specific
-about which table you mean when referring to the column. This is done
-by prepending the table name to the column name.
-@end itemize
-
-You need not have two different tables to perform a join. Sometimes it is
-useful to join a table to itself, if you want to compare records in a table
-to other records in that same table. For example, to find breeding pairs
-among your pets, you can join the @code{pet} table with itself to pair up
-males and females of like species:
-
-@example
-mysql> SELECT p1.name, p1.sex, p2.name, p2.sex, p1.species
- -> FROM pet AS p1, pet AS p2
- -> WHERE p1.species = p2.species AND p1.sex = "f" AND p2.sex = "m";
-+--------+------+--------+------+---------+
-| name | sex | name | sex | species |
-+--------+------+--------+------+---------+
-| Fluffy | f | Claws | m | cat |
-| Buffy | f | Fang | m | dog |
-| Buffy | f | Bowser | m | dog |
-+--------+------+--------+------+---------+
-@end example
-
-In this query, we specify aliases for the table name in order
-to refer to the columns and keep straight which instance of the table
-each column reference is associated with.
-
-@cindex databases, information about
-@cindex tables, information about
-@findex DESCRIBE
-@node Getting information, Examples, Database use, Tutorial
-@section Getting Information About Databases and Tables
-
-What if you forget the name of a database or table, or what the structure of
-a given table is (for example, what its columns are called)? @strong{MySQL}
-addresses this problem through several statements that provide information
-about the databases and tables it supports.
-
-You have already seen @code{SHOW DATABASES}, which lists the databases
-managed by the server. To find out which database is currently selected,
-use the @code{DATABASE()} function:
-
-@example
-mysql> SELECT DATABASE();
-+------------+
-| DATABASE() |
-+------------+
-| menagerie |
-+------------+
-@end example
-
-If you haven't selected any database yet, the result is blank.
-
-To find out what tables the current database contains (for example, when
-you're not sure about the name of a table), use this command:
-
-@example
-mysql> SHOW TABLES;
-+---------------------+
-| Tables in menagerie |
-+---------------------+
-| event |
-| pet |
-+---------------------+
-@end example
-
-If you want to find out about the structure of a table, the @code{DESCRIBE}
-command is useful; it displays information about each of a table's columns:
-
-@example
-mysql> DESCRIBE pet;
-+---------+-------------+------+-----+---------+-------+
-| Field | Type | Null | Key | Default | Extra |
-+---------+-------------+------+-----+---------+-------+
-| name | varchar(20) | YES | | NULL | |
-| owner | varchar(20) | YES | | NULL | |
-| species | varchar(20) | YES | | NULL | |
-| sex | char(1) | YES | | NULL | |
-| birth | date | YES | | NULL | |
-| death | date | YES | | NULL | |
-+---------+-------------+------+-----+---------+-------+
-@end example
-
-@code{Field} indicates the column name, @code{Type} is the data type for
-the column, @code{Null} indicates whether or not the column can contain
-@code{NULL} values, @code{Key} indicates whether or not the column is
-indexed, and @code{Default} specifies the column's default value.
-
-If you have indexes on a table,
-@code{SHOW INDEX FROM tbl_name} produces information about them.
-
-@cindex queries, examples
-@cindex examples, queries
-@node Examples, Batch mode, Getting information, Tutorial
-@section Examples of Common Queries
-
-Here are examples of how to solve some common problems with
-@strong{MySQL}.
-
-Some of the examples use the table @code{shop} to hold the price of each
-article (item number) for certain traders (dealers). Supposing that each
-trader has a single fixed price per article, then (@code{item},
-@code{trader}) is a primary key for the records.
-
-Start the command line tool @code{mysql} and select a database:
-
-@example
-mysql your-database-name
-@end example
-
-(In most @strong{MySQL} installations, you can use the database-name 'test').
-
-You can create the example table as:
-
-@example
-CREATE TABLE shop (
- article INT(4) UNSIGNED ZEROFILL DEFAULT '0000' NOT NULL,
- dealer CHAR(20) DEFAULT '' NOT NULL,
- price DOUBLE(16,2) DEFAULT '0.00' NOT NULL,
- PRIMARY KEY(article, dealer));
-
-INSERT INTO shop VALUES
-(1,'A',3.45),(1,'B',3.99),(2,'A',10.99),(3,'B',1.45),(3,'C',1.69),
-(3,'D',1.25),(4,'D',19.95);
-@end example
-
-Okay, so the example data is:
-
-@example
-mysql> SELECT * FROM shop;
-
-+---------+--------+-------+
-| article | dealer | price |
-+---------+--------+-------+
-| 0001 | A | 3.45 |
-| 0001 | B | 3.99 |
-| 0002 | A | 10.99 |
-| 0003 | B | 1.45 |
-| 0003 | C | 1.69 |
-| 0003 | D | 1.25 |
-| 0004 | D | 19.95 |
-+---------+--------+-------+
-@end example
-
-@menu
-* example-Maximum-column:: The maximum value for a column
-* example-Maximum-row:: The row holding the maximum of a certain column
-* example-Maximum-column-group:: Maximum of column per group
-* example-Maximum-column-group-row:: The rows holding the group-wise maximum of a certain field
-* example-user-variables:: Using user variables
-* example-Foreign keys:: Using foreign keys
-* Searching on two keys::
-* Calculating days::
-@end menu
-
-@node example-Maximum-column, example-Maximum-row, Examples, Examples
-@subsection The Maximum Value for a Column
-
-``What's the highest item number?''
-
-@example
-SELECT MAX(article) AS article FROM shop
-
-+---------+
-| article |
-+---------+
-| 4 |
-+---------+
-@end example
-
-@node example-Maximum-row, example-Maximum-column-group, example-Maximum-column, Examples
-@subsection The Row Holding the Maximum of a Certain Column
-
-``Find number, dealer, and price of the most expensive article.''
-
-In ANSI SQL this is easily done with a sub-query:
-
-@example
-SELECT article, dealer, price
-FROM shop
-WHERE price=(SELECT MAX(price) FROM shop)
-@end example
-
-In @strong{MySQL} (which does not yet have sub-selects), just do it in
-two steps:
-
-@enumerate
-@item
-Get the maximum price value from the table with a @code{SELECT} statement.
-@item
-Using this value compile the actual query:
-@example
-SELECT article, dealer, price
-FROM shop
-WHERE price=19.95
-@end example
-@end enumerate
-
-Another solution is to sort all rows descending by price and only
-get the first row using the @strong{MySQL} specific @code{LIMIT} clause:
-
-@example
-SELECT article, dealer, price
-FROM shop
-ORDER BY price DESC
-LIMIT 1
-@end example
-
-@strong{NOTE}: If there are several most expensive articles (for example, each 19.95)
-the @code{LIMIT} solution shows only one of them!
-
-@node example-Maximum-column-group, example-Maximum-column-group-row, example-Maximum-row, Examples
-@subsection Maximum of Column per Group
-
-``What's the highest price per article?''
-
-@example
-SELECT article, MAX(price) AS price
-FROM shop
-GROUP BY article
-
-+---------+-------+
-| article | price |
-+---------+-------+
-| 0001 | 3.99 |
-| 0002 | 10.99 |
-| 0003 | 1.69 |
-| 0004 | 19.95 |
-+---------+-------+
-@end example
-
-@node example-Maximum-column-group-row, example-user-variables, example-Maximum-column-group, Examples
-@subsection The Rows Holding the Group-wise Maximum of a Certain Field
-
-``For each article, find the dealer(s) with the most expensive price.''
-
-In ANSI SQL, I'd do it with a sub-query like this:
-
-@example
-SELECT article, dealer, price
-FROM shop s1
-WHERE price=(SELECT MAX(s2.price)
- FROM shop s2
- WHERE s1.article = s2.article);
-@end example
-
-In @strong{MySQL} it's best do it in several steps:
-
-@enumerate
-@item
-Get the list of (article,maxprice).
-@item
-For each article get the corresponding rows that have the stored maximum
-price.
-@end enumerate
-
-This can easily be done with a temporary table:
-
-@example
-CREATE TEMPORARY TABLE tmp (
- article INT(4) UNSIGNED ZEROFILL DEFAULT '0000' NOT NULL,
- price DOUBLE(16,2) DEFAULT '0.00' NOT NULL);
-
-LOCK TABLES shop read;
-
-INSERT INTO tmp SELECT article, MAX(price) FROM shop GROUP BY article;
-
-SELECT shop.article, dealer, shop.price FROM shop, tmp
-WHERE shop.article=tmp.article AND shop.price=tmp.price;
-
-UNLOCK TABLES;
-
-DROP TABLE tmp;
-@end example
-
-If you don't use a @code{TEMPORARY} table, you must also lock the 'tmp' table.
-
-``Can it be done with a single query?''
-
-Yes, but only by using a quite inefficient trick that I call the
-``MAX-CONCAT trick'':
-
-@example
-SELECT article,
- SUBSTRING( MAX( CONCAT(LPAD(price,6,'0'),dealer) ), 7) AS dealer,
- 0.00+LEFT( MAX( CONCAT(LPAD(price,6,'0'),dealer) ), 6) AS price
-FROM shop
-GROUP BY article;
-
-+---------+--------+-------+
-| article | dealer | price |
-+---------+--------+-------+
-| 0001 | B | 3.99 |
-| 0002 | A | 10.99 |
-| 0003 | C | 1.69 |
-| 0004 | D | 19.95 |
-+---------+--------+-------+
-@end example
-
-The last example can, of course, be made a bit more efficient by doing the
-splitting of the concatenated column in the client.
-
-@node example-user-variables, example-Foreign keys, example-Maximum-column-group-row, Examples
-@subsection Using user variables
-
-You can use @strong{MySQL} user variables to remember results without
-having to store them in a temporary variables in the client.
-@xref{Variables}.
-
-For example, to find the articles with the highest and lowest price you
-can do:
-
-@example
-select @@min_price:=min(price),@@max_price:=max(price) from shop;
-select * from shop where price=@@min_price or price=@@max_price;
-
-+---------+--------+-------+
-| article | dealer | price |
-+---------+--------+-------+
-| 0003 | D | 1.25 |
-| 0004 | D | 19.95 |
-+---------+--------+-------+
-@end example
-
-@cindex foreign keys
-@cindex keys, foreign
-@node example-Foreign keys, Searching on two keys, example-user-variables, Examples
-@subsection Using Foreign Keys
-
-You don't need foreign keys to join 2 tables.
-
-The only thing @strong{MySQL} doesn't do is @code{CHECK} to make sure that
-the keys you use really exist in the table(s) you're referencing and it
-doesn't automatically delete rows from table with a foreign key
-definition. If you use your keys like normal, it'll work just fine:
-
-
-@example
-CREATE TABLE persons (
- id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
- name CHAR(60) NOT NULL,
- PRIMARY KEY (id)
-);
-
-CREATE TABLE shirts (
- id SMALLINT UNSIGNED NOT NULL AUTO_INCREMENT,
- style ENUM('t-shirt', 'polo', 'dress') NOT NULL,
- color ENUM('red', 'blue', 'orange', 'white', 'black') NOT NULL,
- owner SMALLINT UNSIGNED NOT NULL REFERENCES persons,
- PRIMARY KEY (id)
-);
-
-
-INSERT INTO persons VALUES (NULL, 'Antonio Paz');
-
-INSERT INTO shirts VALUES
-(NULL, 'polo', 'blue', LAST_INSERT_ID()),
-(NULL, 'dress', 'white', LAST_INSERT_ID()),
-(NULL, 't-shirt', 'blue', LAST_INSERT_ID());
-
-
-INSERT INTO persons VALUES (NULL, 'Lilliana Angelovska');
-
-INSERT INTO shirts VALUES
-(NULL, 'dress', 'orange', LAST_INSERT_ID()),
-(NULL, 'polo', 'red', LAST_INSERT_ID()),
-(NULL, 'dress', 'blue', LAST_INSERT_ID()),
-(NULL, 't-shirt', 'white', LAST_INSERT_ID());
-
-
-SELECT * FROM persons;
-+----+---------------------+
-| id | name |
-+----+---------------------+
-| 1 | Antonio Paz |
-| 2 | Lilliana Angelovska |
-+----+---------------------+
-
-SELECT * FROM shirts;
-+----+---------+--------+-------+
-| id | style | color | owner |
-+----+---------+--------+-------+
-| 1 | polo | blue | 1 |
-| 2 | dress | white | 1 |
-| 3 | t-shirt | blue | 1 |
-| 4 | dress | orange | 2 |
-| 5 | polo | red | 2 |
-| 6 | dress | blue | 2 |
-| 7 | t-shirt | white | 2 |
-+----+---------+--------+-------+
-
-
-SELECT s.* FROM persons p, shirts s
- WHERE p.name LIKE 'Lilliana%'
- AND s.owner = p.id
- AND s.color <> 'white';
-
-+----+-------+--------+-------+
-| id | style | color | owner |
-+----+-------+--------+-------+
-| 4 | dress | orange | 2 |
-| 5 | polo | red | 2 |
-| 6 | dress | blue | 2 |
-+----+-------+--------+-------+
-@end example
-
-@findex UNION
-@cindex searching, two keys
-@cindex keys, searching on two
-@node Searching on two keys, Calculating days, example-Foreign keys, Examples
-@subsection Searching on Two Keys
-
-@strong{MySQL} doesn't yet optimize when you search on two different
-keys combined with @code{OR} (Searching on one key with different @code{OR}
-parts is optimized quite good):
-
-@example
-SELECT field1_index, field2_index FROM test_table WHERE field1_index = '1'
-OR field2_index = '1'
-@end example
-
-The reason is that we haven't yet had time to come up with an efficient
-way to handle this in the general case. (The @code{AND} handling is,
-in comparison, now completely general and works very well).
-
-For the moment you can solve this very efficiently by using a
-@code{TEMPORARY} table. This type of optimization is also very good if
-you are using very complicated queries where the SQL server does the
-optimizations in the wrong order.
-
-@example
-CREATE TEMPORARY TABLE tmp
-SELECT field1_index, field2_index FROM test_table WHERE field1_index = '1';
-INSERT INTO tmp
-SELECT field1_index, field2_index FROM test_table WHERE field2_index = '1';
-SELECT * from tmp;
-DROP TABLE tmp;
-@end example
-
-The above way to solve this query is in effect an @code{UNION} of two queries.
-
-@cindex bit_functions, example
-@findex BIT_OR
-@findex BIT_COUNT
-@findex <<
-@node Calculating days, , Searching on two keys, Examples
-@subsection Calculating visits per day
-
-The following shows an idea of how you can use the bit group functions
-to calculate the number of days per month a user has visited a web page.
-
-@example
-CREATE TABLE t1 (year YEAR(4), month INT(2) UNSIGNED ZEROFILL, day INT(2) UNSIGNED ZEROFILL);
-INSERT INTO t1 VALUES(2000,1,1),(2000,1,20),(2000,1,30),(2000,2,2),(2000,2,23),(2000,2,23);
-
-SELECT year,month,BIT_COUNT(BIT_OR(1<<day)) AS days FROM t1 GROUP BY year,month;
-
-Which returns:
-
-+------+-------+------+
-| year | month | days |
-+------+-------+------+
-| 2000 | 01 | 3 |
-| 2000 | 02 | 2 |
-+------+-------+------+
-@end example
-
-The above calculates how many different days was used for a given
-year/month combination, with automatic removal of duplicate entries.
-
-@cindex modes, batch
-@cindex batch mode
-@cindex running, batch mode
-@cindex script files
-@cindex files, script
-@node Batch mode, Twin, Examples, Tutorial
-@section Using @code{mysql} in Batch Mode
-
-In the previous sections, you used @code{mysql} interactively to enter
-queries and view the results. You can also run @code{mysql} in batch
-mode. To do this, put the commands you want to run in a file, then
-tell @code{mysql} to read its input from the file:
-
-@example
-shell> mysql < batch-file
-@end example
-
-If you need to specify connection parameters on the command line, the
-command might look like this:
-
-@example
-shell> mysql -h host -u user -p < batch-file
-Enter password: ********
-@end example
-
-When you use @code{mysql} this way, you are creating a script file, then
-executing the script.
-
-Why use a script? Here are a few reasons:
-
-@itemize @bullet
-@item
-If you run a query repeatedly (say, every day or every week), making it a
-script allows you to avoid retyping it each time you execute it.
-
-@item
-You can generate new queries from existing ones that are similar by copying
-and editing script files.
-
-@item
-Batch mode can also be useful while you're developing a query, particularly
-for multiple-line commands or multiple-statement sequences of commands. If
-you make a mistake, you don't have to retype everything. Just edit your
-script to correct the error, then tell @code{mysql} to execute it again.
-
-@item
-If you have a query that produces a lot of output, you can run the output
-through a pager rather than watching it scroll off the top of your screen:
-
-@example
-shell> mysql < batch-file | more
-@end example
-
-@item
-You can catch the output in a file for further processing:
-
-@example
-shell> mysql < batch-file > mysql.out
-@end example
-
-@item
-You can distribute your script to other people so they can run the commands,
-too.
-
-@item
-Some situations do not allow for interactive use, for example, when you run
-a query from a @code{cron} job. In this case, you must use batch mode.
-@end itemize
-
-The default output format is different (more concise) when you run
-@code{mysql} in batch mode than when you use it interactively. For
-example, the output of @code{SELECT DISTINCT species FROM pet} looks like
-this when run interactively:
-
-@example
-+---------+
-| species |
-+---------+
-| bird |
-| cat |
-| dog |
-| hamster |
-| snake |
-+---------+
-@end example
-
-But like this when run in batch mode:
-
-@example
-species
-bird
-cat
-dog
-hamster
-snake
-@end example
-
-If you want to get the interactive output format in batch mode, use
-@code{mysql -t}. To echo to the output the commands that are executed, use
-@code{mysql -vvv}.
-
-@cindex Twin Studies, queries
-@cindex queries, Twin Studeis project
-@node Twin, , Batch mode, Tutorial
-@section Queries from Twin Project
-
-At Analytikerna and Lentus, we have been doing the systems and field work
-for a big research project. This project is a collaboration between the
-Institute of Environmental Medicine at Karolinska Institutet Stockholm
-and the Section on Clinical Research in Aging and Psychology at the
-University of Southern California.
-
-The project involves a screening part where all twins in Sweden older
-than 65 years are interviewed by telephone. Twins who meet certain
-criteria are passed on to the next stage. In this latter stage, twins who
-want to participate are visited by a doctor/nurse team. Some of the
-examinations include physical and neuropsychological examination,
-laboratory testing, neuroimaging, psychological status assessment, and family
-history collection. In addition, data are collected on medical and
-environmental risk factors.
-
-More information about Twin studies can be found at:
-
-@example
-@url{http://www.imm.ki.se/TWIN/TWINUKW.HTM}
-@end example
-
-The latter part of the project is administered with a Web interface
-written using Perl and @strong{MySQL}.
-
-Each night all data from the interviews are moved into a @strong{MySQL}
-database.
-
-@menu
-* Twin pool:: Find all non-distributed twins
-* Twin event:: Show a table on twin pair status
-@end menu
-
-@node Twin pool, Twin event, Twin, Twin
-@subsection Find all Non-distributed Twins
-
-The following query is used to determine who goes into the second part of the
-project:
-
-@example
-select
- concat(p1.id, p1.tvab) + 0 as tvid,
- concat(p1.christian_name, " ", p1.surname) as Name,
- p1.postal_code as Code,
- p1.city as City,
- pg.abrev as Area,
- if(td.participation = "Aborted", "A", " ") as A,
- p1.dead as dead1,
- l.event as event1,
- td.suspect as tsuspect1,
- id.suspect as isuspect1,
- td.severe as tsevere1,
- id.severe as isevere1,
- p2.dead as dead2,
- l2.event as event2,
- h2.nurse as nurse2,
- h2.doctor as doctor2,
- td2.suspect as tsuspect2,
- id2.suspect as isuspect2,
- td2.severe as tsevere2,
- id2.severe as isevere2,
- l.finish_date
-from
- twin_project as tp
- /* For Twin 1 */
- left join twin_data as td on tp.id = td.id and tp.tvab = td.tvab
- left join informant_data as id on tp.id = id.id and tp.tvab = id.tvab
- left join harmony as h on tp.id = h.id and tp.tvab = h.tvab
- left join lentus as l on tp.id = l.id and tp.tvab = l.tvab
- /* For Twin 2 */
- left join twin_data as td2 on p2.id = td2.id and p2.tvab = td2.tvab
- left join informant_data as id2 on p2.id = id2.id and p2.tvab = id2.tvab
- left join harmony as h2 on p2.id = h2.id and p2.tvab = h2.tvab
- left join lentus as l2 on p2.id = l2.id and p2.tvab = l2.tvab,
- person_data as p1,
- person_data as p2,
- postal_groups as pg
-where
- /* p1 gets main twin and p2 gets his/her twin. */
- /* ptvab is a field inverted from tvab */
- p1.id = tp.id and p1.tvab = tp.tvab and
- p2.id = p1.id and p2.ptvab = p1.tvab and
- /* Just the sceening survey */
- tp.survey_no = 5 and
- /* Skip if partner died before 65 but allow emigration (dead=9) */
- (p2.dead = 0 or p2.dead = 9 or
- (p2.dead = 1 and
- (p2.death_date = 0 or
- (((to_days(p2.death_date) - to_days(p2.birthday)) / 365)
- >= 65))))
- and
- (
- /* Twin is suspect */
- (td.future_contact = 'Yes' and td.suspect = 2) or
- /* Twin is suspect - Informant is Blessed */
- (td.future_contact = 'Yes' and td.suspect = 1 and id.suspect = 1) or
- /* No twin - Informant is Blessed */
- (ISNULL(td.suspect) and id.suspect = 1 and id.future_contact = 'Yes') or
- /* Twin broken off - Informant is Blessed */
- (td.participation = 'Aborted'
- and id.suspect = 1 and id.future_contact = 'Yes') or
- /* Twin broken off - No inform - Have partner */
- (td.participation = 'Aborted' and ISNULL(id.suspect) and p2.dead = 0))
- and
- l.event = 'Finished'
- /* Get at area code */
- and substring(p1.postal_code, 1, 2) = pg.code
- /* Not already distributed */
- and (h.nurse is NULL or h.nurse=00 or h.doctor=00)
- /* Has not refused or been aborted */
- and not (h.status = 'Refused' or h.status = 'Aborted'
- or h.status = 'Died' or h.status = 'Other')
-order by
- tvid;
-@end example
-
-Some explanations:
-@table @asis
-@item @code{concat(p1.id, p1.tvab) + 0 as tvid}
-We want to sort on the concatenated @code{id} and @code{tvab} in
-numerical order. Adding @code{0} to the result causes @strong{MySQL} to
-treat the result as a number.
-@item column @code{id}
-This identifies a pair of twins. It is a key in all tables.
-@item column @code{tvab}
-This identifies a twin in a pair. It has a value of @code{1} or @code{2}.
-@item column @code{ptvab}
-This is an inverse of @code{tvab}. When @code{tvab} is @code{1} this is
-@code{2}, and vice versa. It exists to save typing and to make it easier for
-@strong{MySQL} to optimize the query.
-@end table
-
-This query demonstrates, among other things, how to do lookups on a
-table from the same table with a join (@code{p1} and @code{p2}). In the example, this
-is used to check whether a twin's partner died before the age of 65. If so,
-the row is not returned.
-
-All of the above exist in all tables with twin-related information. We
-have a key on both @code{id,tvab} (all tables), and @code{id,ptvab}
-(@code{person_data}) to make queries faster.
-
-On our production machine (A 200MHz UltraSPARC), this query returns
-about 150-200 rows and takes less than one second.
-
-The current number of records in the tables used above:
-@multitable @columnfractions .3 .5
-@item @strong{Table} @tab @strong{Rows}
-@item @code{person_data} @tab 71074
-@item @code{lentus} @tab 5291
-@item @code{twin_project} @tab 5286
-@item @code{twin_data} @tab 2012
-@item @code{informant_data} @tab 663
-@item @code{harmony} @tab 381
-@item @code{postal_groups} @tab 100
-@end multitable
-
-@node Twin event, , Twin pool, Twin
-@subsection Show a Table on Twin Pair Status
-
-Each interview ends with a status code called @code{event}. The query
-shown below is used to display a table over all twin pairs combined by
-event. This indicates in how many pairs both twins are finished, in how many
-pairs one twin is finished and the other refused, and so on.
-
-@example
-select
- t1.event,
- t2.event,
- count(*)
-from
- lentus as t1,
- lentus as t2,
- twin_project as tp
-where
- /* We are looking at one pair at a time */
- t1.id = tp.id
- and t1.tvab=tp.tvab
- and t1.id = t2.id
- /* Just the sceening survey */
- and tp.survey_no = 5
- /* This makes each pair only appear once */
- and t1.tvab='1' and t2.tvab='2'
-group by
- t1.event, t2.event;
-
-@end example
-
-@cindex functions, server
-@cindex server functions
-@node Server, Replication, Tutorial, Top
-@chapter MySQL Server Functions
-
-@menu
-* Languages:: What languages are supported by @strong{MySQL}?
-@end menu
-
-This chapter describes the languages @strong{MySQL} supports, how sorting
-works in @strong{MySQL}, and how to add new character sets to @strong{MySQL}.
-You will also find information about maximum table sizes in this chapter.
-
-@cindex error messages, languages
-@cindex messages, languages
-@cindex files, error messages
-@cindex language support
-@node Languages, , Server, Server
-@section What Languages Are Supported by MySQL?
-
-@code{mysqld} can issue error messages in the following languages:
-Czech, Danish, Dutch, English (the default), Estonian, French, German, Greek,
-Hungarian, Italian, Japanese, Korean, Norwegian, Norwegian-ny, Polish,
-Portuguese, Romanian, Russian, Slovak, Spanish, and Swedish.
-
-To start @code{mysqld} with a particular language, use either the
-@code{--language=lang} or @code{-L lang} options. For example:
-
-@example
-shell> mysqld --language=swedish
-@end example
-
-or:
-
-@example
-shell> mysqld --language=/usr/local/share/swedish
-@end example
-
-Note that all language names are specified in lowercase.
-
-The language files are located (by default) in
-@file{@var{mysql_base_dir}/share/@var{LANGUAGE}/}.
-
-To update the error message file, you should edit the @file{errmsg.txt} file
-and execute the following command to generate the @file{errmsg.sys} file:
-
-@example
-shell> comp_err errmsg.txt errmsg.sys
-@end example
-
-If you upgrade to a newer version of @strong{MySQL}, remember to repeat
-your changes with the new @file{errmsg.txt} file.
-
-@menu
-* Character sets:: The character set used for data and sorting
-* Adding character set:: Adding a new character set
-* Character arrays:: The character definition arrays
-* String collating:: String collating support
-* Multi-byte characters:: Multi-byte character support
-@end menu
-
-@cindex character sets
-@cindex data, character sets
-@cindex sorting, character sets
-@node Character sets, Adding character set, Languages, Languages
-@subsection The Character Set Used for Data and Sorting
-
-By default, @strong{MySQL} uses the ISO-8859-1 (Latin1) character set
-with sorting according to Swedish/Finnish. This is the character set suitable
-in the USA and western Europe.
-
-All standard @strong{MySQL} binaries are compiled with
-@code{--with-extra-charsets=complex}. This will add code to all
-standard programs to be able to handle @code{latin1} and all multi-byte
-character sets within the binary. Other character sets will be
-loaded from a character-set definition file when needed.
-
-The character set determines what characters are allowed in names and how
-things are sorted by the @code{ORDER BY} and @code{GROUP BY} clauses of
-the @code{SELECT} statement.
-
-You can change the character set with the @code{--default-character-set}
-option when you start the server. The character sets available depend
-on the @code{--with-charset=charset} and @code{--with-extra-charset=
-list-of-charset | complex | all} options to @code{configure}, and the
-character set configuration files listed in
-@file{SHAREDIR/charsets/Index}. @xref{configure options}.
-
-If you change the character set when running @strong{MySQL} (which may
-also change the sort order), you must run myisamchk -r -q on all
-tables. Otherwise your indexes may not be ordered correctly.
-
-When a client connects to a @strong{MySQL} server, the server sends the
-default character set in use to the client. The client will switch to
-use this character set for this connection.
-
-One should use @code{mysql_real_escape_string()} when escaping strings
-for a SQL query. @code{mysql_real_escape_string()} is identical to the
-old @code{mysql_escape_string()} function, except that it takes the MYSQL
-connection handle as the first parameter.
-
-If the client is compiled with different paths than where the server is
-installed and the user who configured @strong{MySQL} didn't included all
-character sets in the @strong{MySQL} binary, one must specify for
-the client where it can find the additional character sets it will need
-if the server runs with a different character set than the client.
-
-One can specify this by putting in a @strong{MySQL} option file:
-
-@example
-[client]
-character-sets-dir=/usr/local/mysql/share/mysql/charsets
-@end example
-
-where the path points to where the dynamic @strong{MySQL} character sets
-are stored.
-
-One can force the client to use specific character set by specifying:
-
-@example
-[client]
-default-character-set=character-set-name
-@end example
-
-but normally this is never needed.
-
-@cindex character sets, adding
-@cindex adding, character sets
-@node Adding character set, Character arrays, Character sets, Languages
-@subsection Adding a New Character Set
-
-To add another character set to @strong{MySQL}, use the following procedure.
-
-Decide if the set is simple or complex. If the character set
-does not need to use special string collating routines for
-sorting and does not need multi-byte character support, it is
-simple. If it needs either of those features, it is complex.
-
-For example, @code{latin1} and @code{danish} are simple charactersets while
-@code{big5} or @code{czech} are complex character sets.
-
-In the following section, we have assumed that you name your character
-set @code{MYSET}.
-
-For a simple character set do the following:
-
-@enumerate
-@item
-Add MYSET to the end of the @file{sql/share/charsets/Index} file
-Assign an unique number to it.
-
-@item
-Create the file @file{sql/share/charsets/MYSET.conf}.
-(You can use @file{sql/share/charsets/latin1.conf} as a base for this).
-
-The syntax for the file very simple:
-
-@itemize @bullet
-@item
-Comments start with a '#' character and proceed to the end of the line.
-@item
-Words are separated by arbitrary amounts of whitespace.
-@item
-When defining the character set, every word must be a number in hexadecimal
-format
-@item
-The @code{ctype} array takes up the first 257 words. The
-@code{to_lower}, @code{to_upper} and @code{sort_order} arrays take up
-256 words each after that.
-@end itemize
-
-@xref{Character arrays}.
-
-@item
-Add the character set name to the @code{CHARSETS_AVAILABLE} and
-@code{COMPILED_CHARSETS} lists in @code{configure.in}.
-
-@item
-Reconfigure, recompile, and test.
-
-@end enumerate
-
-For a complex character set do the following:
-
-@enumerate
-@item
-Create the file @file{strings/ctype-MYSET.c} in the @strong{MySQL} source
-distribution.
-
-@item
-Add MYSET to the end of the @file{sql/share/charsets/Index} file.
-Assign an unique number to it.
-
-@item
-Look at one of the existing @file{ctype-*.c} files to see what needs to
-be defined, for example @file{strings/ctype-big5.c}. Note that the
-arrays in your file must have names like @code{ctype_MYSET},
-@code{to_lower_MYSET}, and so on. This corresponds to the arrays
-in the simple character set. @xref{Character arrays}. For a complex
-character set
-
-@item
-Near the top of the file, place a special comment like this:
-
-@example
-/*
- * This comment is parsed by configure to create ctype.c,
- * so don't change it unless you know what you are doing.
- *
- * .configure. number_MYSET=MYNUMBER
- * .configure. strxfrm_multiply_MYSET=N
- * .configure. mbmaxlen_MYSET=N
- */
-@end example
-
-The @code{configure} program uses this comment to include
-the character set into the @strong{MySQL} library automatically.
-
-The strxfrm_multiply and mbmaxlen lines will be explained in
-the following sections. Only include them if you the string
-collating functions or the multi-byte character set functions,
-respectively.
-
-@item
-You should then create some of the following functions:
-
-@itemize @bullet
-@item @code{my_strncoll_MYSET()}
-@item @code{my_strcoll_MYSET()}
-@item @code{my_strxfrm_MYSET()}
-@item @code{my_like_range_MYSET()}
-@end itemize
-
-@xref{String collating}.
-
-@item
-Add the character set name to the @code{CHARSETS_AVAILABLE} and
-@code{COMPILED_CHARSETS} lists in @code{configure.in}.
-
-@item
-Reconfigure, recompile, and test.
-@end enumerate
-
-The file @file{sql/share/charsets/README} includes some more instructions.
-
-If you want to have the character set included in the @strong{MySQL}
-distribution, mail a patch to @email{internals@@lists.mysql.com}.
-
-@node Character arrays, String collating, Adding character set, Languages
-@subsection The character definition arrays
-
-@code{to_lower[]} and @code{to_upper[]} are simple arrays that hold the
-lowercase and uppercase characters corresponding to each member of the
-character set. For example:
-
-@example
-to_lower['A'] should contain 'a'
-to_upper['a'] should contain 'A'
-@end example
-
-@code{sort_order[]} is a map indicating how characters should be ordered for
-comparison and sorting purposes. For many character sets, this is the same as
-@code{to_upper[]} (which means sorting will be case insensitive).
-@strong{MySQL} will sort characters based on the value of
-@code{sort_order[character]}. For more complicated sorting rules, see
-the discussion of string collating below. @xref{String collating}.
-
-@code{ctype[]} is an array of bit values, with one element for one character.
-(Note that @code{to_lower[]}, @code{to_upper[]}, and @code{sort_order[]}
-are indexed by character value, but @code{ctype[]} is indexed by character
-value + 1. This is an old legacy to be able to handle EOF.)
-
-You can find the following bitmask definitions in @file{m_ctype.h}:
-
-@example
-#define _U 01 /* Uppercase */
-#define _L 02 /* Lowercase */
-#define _N 04 /* Numeral (digit) */
-#define _S 010 /* Spacing character */
-#define _P 020 /* Punctuation */
-#define _C 040 /* Control character */
-#define _B 0100 /* Blank */
-#define _X 0200 /* heXadecimal digit */
-@end example
-
-The @code{ctype[]} entry for each character should be the union of the
-applicable bitmask values that describe the character. For example,
-@code{'A'} is an uppercase character (@code{_U}) as well as a
-hexadecimal digit (@code{_X}), so @code{ctype['A'+1]} should contain the
-value:
-
-@example
-_U + _X = 01 + 0200 = 0201
-@end example
-
-@cindex collating, strings
-@cindex string collating
-@node String collating, Multi-byte characters, Character arrays, Languages
-@subsection String Collating Support
-
-If the sorting rules for your language are too complex to be handled
-with the simple @code{sort_order[]} table, you need to use the string
-collating functions.
-
-Right now the best documentation on this is the character sets that are
-already implemented. Look at the big5, czech, gbk, sjis, and tis160
-character sets for examples.
-
-You must specify the @code{strxfrm_multiply_MYSET=N} value in the
-special comment at the top of the file. @code{N} should be set to
-the maximum ratio the strings may grow during @code{my_strxfrm_MYSET} (it
-must be a positive integer).
-
-@cindex characters, multi-byte
-@cindex multi-byte characters
-@node Multi-byte characters, , String collating, Languages
-@subsection Multi-byte Character Support
-
-If your want to add support for a new character set that includes
-multi-byte characters, you need to use the multi-byte character
-functions.
-
-Right now the best documentation on this is the character sets that are
-already implemented. Look at the euc_kr, gb2312, gbk, sjis and ujis
-character sets for examples. These are implemented in the
-@code{ctype-'charset'.c} files in the @file{strings} directory.
-
-You must specify the @code{mbmaxlen_MYSET=N} value in the special
-comment at the top of the source file. @code{N} should be set to the
-size in bytes of the largest character in the set.
-
-
-@cindex replication
-@cindex increasing, speed
-@cindex speed, increasing
-@cindex databases, replicating
-@node Replication, Fulltext Search, Server, Top
-@chapter Replication in MySQL
-
-@menu
-* Replication Intro:: Introduction
-* Replication Implementation:: Replication Implementation Overview
-* Replication HOWTO:: HOWTO
-* Replication Features:: Replication Features
-* Replication Options:: Replication Options in my.cnf
-* Replication SQL:: SQL Commands related to replication
-* Replication FAQ:: Frequently Asked Questions about replication
-* Replication Problems:: Troubleshooting Replication.
-@end menu
-
-This chapter describes the various replication features in @strong{MySQL}.
-It serves as a reference to the options available with replication.
-You will be introduced to replication and learn how to implement it.
-Towards the end, there are some frequently asked questions and descriptions
-of problems and how to solve them.
-
-@node Replication Intro, Replication Implementation, Replication, Replication
-@section Introduction
-
-One way replication can be used is to increase both robustness and
-speed. For robustness you can have two systems and can switch to the backup if
-you have problems with the master. The extra speed is achieved by
-sending a part of the non-updating queries to the replica server. Of
-course this only works if non-updating queries dominate, but that is the
-normal case.
-
-Starting in Version 3.23.15, @strong{MySQL} supports one-way replication
-internally. One server acts as the master, while the other acts as the
-slave. Note that one server could play the roles of master in one pair
-and slave in the other. The master server keeps a binary log of updates
-(@xref{Binary log}.) and an index file to binary logs to keep track of
-log rotation. The slave, upon connecting, informs the master where it
-left off since the last successfully propagated update, catches up on
-the updates, and then blocks and waits for the master to notify it of
-the new updates.
-
-Note that if you are replicating a database, all updates to this
-database should be done through the master!
-
-On older servers one can use the update log to do simple replication.
-@xref{Log Replication}.
-
-Another benefit of using replication is that one can get live backups of
-the system by doing a backup on a slave instead of doing it on the
-master. @xref{Backup}.
-
-@cindex master-slave setup
-@node Replication Implementation, Replication HOWTO, Replication Intro, Replication
-@section Replication Implementation Overview
-
-@strong{MySQL} replication is based on the server keeping track of all
-changes to your database (updates, deletes, etc) in the binary
-log. (@xref{Binary log}.) and the slave server(s) reading the saved
-queries from the master server's binary log so that the slave can
-execute the same queries on its copy of the data.
-
-It is @strong{very important} to realize that the binary log is simply a
-record starting from a fixed point in time (the moment you enable binary
-logging). Any slaves which you set up will need copies of all the data
-from your master as it existed the moment that you enabled binary
-logging on the master. If you start your slaves with data that doesn't
-agree with what was on the master @strong{when the binary log was
-started}, your slaves may fail.
-
-A future version (4.0) of @strong{MySQL} will remove the need to keep a
-(possibly large) snapshot of data for new slaves that you might wish to
-set up through the live backup functionality with no locking required.
-However, at this time, it is necessary to block all writes either with a
-global read lock or by shutting down the master while taking a snapshot.
-
-Once a slave is properly configured and running, it will simply connect
-to the master and wait for updates to process. If the master goes away
-or the slave loses connectivity with your master, it will keep trying to
-connect every @code{master-connect-retry} seconds until it is able to
-reconnect and resume listening for updates.
-
-Each slave keeps track of where it left off. The master server has no
-knowledge of how many slaves there are or which ones are up-to-date at
-any given time.
-
-The next section explains the master/slave setup process in more detail.
-
-@node Replication HOWTO, Replication Features, Replication Implementation, Replication
-@section HOWTO
-
-Below is a quick description of how to set up complete replication on
-your current @strong{MySQL} server. It assumes you want to replicate all
-your databases and have not configured replication before. You will need
-to shutdown your master server briefly to complete the steps outlined
-below.
-
-@enumerate
-@item
-Make sure you have a recent version of @strong{MySQL} installed on the master
-and slave(s).
-
-Use Version 3.23.29 or higher. Previous releases used a different binary
-log format and had bugs which have been fixed in newer releases. Please,
-do not report bugs until you have verified that the problem is present
-in the latest release.
-@item
-Set up special a replication user on the master with the @code{FILE}
-privilege and permission to connect from all the slaves. If the user is
-only doing replication (which is recommended), you don't need to grant any
-additional privileges.
-
-For example, to create a user named @code{repl} which can access your
-master from any host, you might use this command:
-
-@example
-GRANT FILE ON *.* TO repl@@"%" IDENTIFIED BY '<password>';
-@end example
-
-@item
-Shut down @strong{MySQL} on the master.
-
-@example
-mysqladmin -u root -p<password> shutdown
-@end example
-
-@item
-Snapshot all the data on your master server.
-
-The easiest way to do this (on Unix) is to simply use @strong{tar} to
-produce an archive of your entire data directory. The exact data
-directory location depends on your installation.
-
-@example
-tar -cvf /tmp/mysql-snapshot.tar /path/to/data-dir
-@end example
-
-Windows users can use WinZip or similar software to create an archive of
-the data directory.
-
-@item
-In @code{my.cnf} on the master add @code{log-bin} and
-@code{server-id=unique number} to the @code{[mysqld]} section and
-restart it. It is very important that the id of the slave is different from
-the id of the master. Think of @code{server-id} as something similar
-to the IP address - it uniquely identifies the server instance in the
-community of replication partners.
-
-@example
-[mysqld]
-log-bin
-server-id=1
-@end example
-
-@item
-Restart @strong{MySQL} on the master.
-
-@item
-Add the following to @code{my.cnf} on the slave(s):
-
-@example
-master-host=<hostname of the master>
-master-user=<replication user name>
-master-password=<replication user password>
-master-port=<TCP/IP port for master>
-server-id=<some unique number between 2 and 2^32-1>
-@end example
-
-replacing the values in <> with what is relevant to your system.
-
-@code{server-id} must be different for each server participating in
-replication. If you don't specify a server-id, it will be set to 1 if
-you have not defined @code{master-host}, else it will be set to 2. Note
-that in the case of @code{server-id} omission the master will refuse
-connections from all slaves, and the slave will refuse to connect to a
-master. Thus, omitting @code{server-id} is only good for backup with a
-binary log.
-
-
-@item
-Copy the snapshot data into your data directory on your slave(s). Make
-sure that the privileges on the files and directories are correct. The
-user which @strong{MySQL} runs as needs to be able to read and write to
-them, just as on the master.
-
-@item Restart the slave(s).
-
-@end enumerate
-
-After you have done the above, the slave(s) should connect to the master
-and catch up on any updates which happened since the snapshot was taken.
-
-If you have forgotten to set @code{server-id} for the slave you will get
-the following error in the error log file:
-
-@example
-Warning: one should set server_id to a non-0 value if master_host is set.
-The server will not act as a slave.
-@end example
-
-If you have forgot to do this for the master, the slaves will not be
-able to connect to the master.
-
-If a slave is not able to replicate for any reason, you will find error
-messages in the error log on the slave.
-
-Once a slave is replicating, you will find a file called
-@code{master.info} in the same directory as your error log. The
-@code{master.info} file is used by the slave to keep track of how much
-of the master's binary log is has processed. @strong{Do not} remove or
-edit the file, unless you really know what you are doing. Even in that case,
-it is preferred that you use @code{CHANGE MASTER TO} command.
-
-@cindex options, replication
-@cindex @code{my.cnf} file
-@cindex files,@code{my.cnf}
-@node Replication Features, Replication Options, Replication HOWTO, Replication
-@section Replication Features and known problems
-
-Below is an explanation of what is supported and what is not:
-
-@itemize @bullet
-@item
-Replication will be done correctly with @code{AUTO_INCREMENT},
-@code{LAST_INSERT_ID}, and @code{TIMESTAMP} values.
-@item
-@code{RAND()} in updates does not replicate properly. Use
-@code{RAND(some_non_rand_expr)} if you are replicating updates with
-@code{RAND()}. You can, for example, use @code{UNIX_TIMESTAMP()} for the
-argument to @code{RAND()}.
-@item
-You have to use the same character set (@code{--default-character-set})
-on the master and the slave. If not, you may get duplicate key errors on
-the slave, because a key that is regarded as unique on the master may
-not be that in the other character set.
-@item
-@code{LOAD DATA INFILE} will be handled properly as long as the file
-still resides on the master server at the time of update
-propagation. @code{LOAD LOCAL DATA INFILE} will be skipped.
-@item
-Update queries that use user variables are not replication-safe (yet).
-@item
-@code{FLUSH} commands are not stored in the binary log and are because
-of this not replicated to the slaves. This is not normally a problem as
-@code{FLUSH} doesn't change anything. This does however mean that if you
-update the @code{MySQL} privilege tables directly without using
-@code{GRANT} statement and you replicate the @code{MySQL} privilege
-database, you must do a @code{FLUSH PRIVILEGES} on your slaves to put
-the new privileges into effect.
-@item
-Temporary tables starting in 3.23.29 are replicated properly with the
-exception of the case when you shut down slave server ( not just slave thread),
-you have some temporary tables open, and the are used in subsequent updates.
-To deal with this problem, to shut down the slave, do @code{SLAVE STOP}, then
-check @code{Slave_open_temp_tables} variable to see if it is 0, then issue
-@code{mysqladmin shutdown}. If the number is not 0, restart the slave thread
-with @code{SLAVE START} and see
-if you have better luck next time. There will be a cleaner solution, but it
-has to wait until version 4.0.
-In earlier versions temporary tables are not being replicated properly - we
-recommend that you either upgrade, or execute @code{SET SQL_LOG_BIN=0} on
-your clients before all queries with temp tables.
-@item
-@strong{MySQL} only supports one master and many slaves. We will in 4.x
-add a voting algorithm to automatically change master if something goes
-wrong with the current master. We will also introduce 'agent' processes
-to help doing load balancing by sending select queries to different
-slaves.
-@item
-Starting in Version 3.23.26, it is safe to connect servers in a circular
-master-slave relationship with @code{log-slave-updates} enabled.
-Note, however, that many queries will not work right in this kind of
-setup unless your client code is written to take care of the potential
-problems that can happen from updates that occur in different sequence
-on different servers.
-
-This means that you can do a setup like the following:
-
-@example
-A -> B -> C -> A
-@end example
-
-This setup will only works if you only do non conflicting updates
-between the tables. In other words, if you insert data in A and C, you
-should never insert a row in A that may have a conflicting key with a
-row insert in C. You should also not update the sam rows on two servers
-if the order in which the updates are applied matters.
-
-Note that the log format has changed in Version 3.23.26 so that
-pre-3.23.26 slaves will not be able to read it.
-@item
-If the query on the slave gets an error, the slave thread will
-terminate, and a message will appear in the @code{.err} file. You should
-then connect to the slave manually, fix the cause of the error (for
-example, non-existent table), and then run @code{SLAVE START} sql
-command (available starting in Version 3.23.16). In Version 3.23.15, you
-will have to restart the server.
-@item
-If connection to the master is lost, the slave will retry immediately,
-and then in case of failure every @code{master-connect-retry} (default
-60) seconds. Because of this, it is safe to shut down the master, and
-then restart it after a while. The slave will also be able to deal with
-network connectivity outages.
-@item
-Shutting down the slave (cleanly) is also safe, as it keeps track of
-where it left off. Unclean shutdowns might produce problems, especially
-if disk cache was not synced before the system died. Your system fault
-tolerance will be greatly increased if you have a good UPS.
-@item
-If the master is listening on a non-standard port, you will also need to
-specify this with @code{master-port} parameter in @code{my.cnf} .
-@item
-In Version 3.23.15, all of the tables and databases will be
-replicated. Starting in Version 3.23.16, you can restrict replication to
-a set of databases with @code{replicate-do-db} directives in
-@code{my.cnf} or just exclude a set of databases with
-@code{replicate-ignore-db}. Note that up until Version 3.23.23, there was a bug
-that did not properly deal with @code{LOAD DATA INFILE} if you did it in
-a database that was excluded from replication.
-@item
-Starting in Version 3.23.16, @code{SET SQL_LOG_BIN = 0} will turn off
-replication (binary) logging on the master, and @code{SET SQL_LOG_BIN =
-1} will turn in back on - you must have the process privilege to do
-this.
-@item
-Starting in Version 3.23.19, you can clean up stale replication leftovers when
-something goes wrong and you want a clean start with @code{FLUSH MASTER}
-and @code{FLUSH SLAVE} commands. In Version 3.23.26 we have renamed them to
-@code{RESET MASTER} and @code{RESET SLAVE} respectively to clarify
-what they do. The old @code{FLUSH} variants still work, though, for
-compatibility.
-
-@item
-Starting in Version 3.23.21, you can use @code{LOAD TABLE FROM MASTER} for
-network backup and to set up replication initially. We have recently
-received a number of bug reports concerning it that we are investigating, so
-we recommend that you use it only in testing until we make it more stable.
-@item
-Starting in Version 3.23.23, you can change masters and adjust log position
-with @code{CHANGE MASTER TO}.
-@item
-Starting in Version 3.23.23, you tell the master that updates in certain
-databases should not be logged to the binary log with @code{binlog-ignore-db}.
-@item
-Starting in Version 3.23.26, you can use @code{replicate-rewrite-db} to tell
-the slave to apply updates from one database on the master to the one
-with a different name on the slave.
-@item
-Starting in Version 3.23.28, you can use @code{PURGE MASTER LOGS TO 'log-name'}
-to get rid of old logs while the slave is running.
-@end itemize
-
-@node Replication Options, Replication SQL, Replication Features, Replication
-@section Replication Options in my.cnf
-
-If you are using replication, we recommend you to use @strong{MySQL} Version
-3.23.30 or later. Older versions work, but they do have some bugs and are
-missing some features.
-
-On both master and slave you need to use the @code{server-id} option.
-This sets an unique replication id. You should pick a unique value in the
-range between 1 to 2^32-1 for each master and slave.
-Example: @code{server-id=3}
-
-The following table has the options you can use for the @strong{MASTER}:
-
-@multitable @columnfractions .3 .7
-
-@item @strong{Option} @tab @strong{Description}
-@item @code{log-bin=filename} @tab
-Write to a binary update log to the specified location. Note that if you
-give it a parameter with an extension (for example,
-@code{log-bin=/mysql/logs/replication.log} ) versions up to 3.23.24 will
-not work right during replication if you do @code{FLUSH LOGS} . The
-problem is fixed in Version 3.23.25. If you are using this kind of log
-name, @code{FLUSH LOGS} will be ignored on binlog. To clear the log, run
-@code{FLUSH MASTER}, and do not forget to run @code{FLUSH SLAVE} on all
-slaves. In Version 3.23.26 and in later versions you should use
-@code{RESET MASTER} and @code{RESET SLAVE}
-
-@item @code{log-bin-index=filename} @tab
-Because the user could issue the @code{FLUSH LOGS} command, we need to
-know which log is currently active and which ones have been rotated out
-and in what sequence. This information is stored in the binary log index file.
-The default is `hostname`.index. You can use this option if you want to
-be a rebel. (Example: @code{log-bin-index=db.index})
-
-@item @code{sql-bin-update-same} @tab
-If set, setting @code{SQL_LOG_BIN} to a value will automatically set
-@code{SQL_LOG_UPDATE} to the same value and vice versa.
-
-@item @code{binlog-do-db=database_name} @tab
-Tells the master it should log updates for the specified database, and
-exclude all others not explicitly mentioned.
-(Example: @code{binlog-do-db=some_database})
-
-@item @code{binlog-ignore-db=database_name} @tab
-Tells the master that updates to the given database should not be logged
-to the binary log (Example: @code{binlog-ignore-db=some_database})
-@end multitable
-
-The following table has the options you can use for the @strong{SLAVE}:
-
-@multitable @columnfractions .3 .7
-
-@item @strong{Option} @tab @strong{Description}
-@item @code{master-host=host} @tab
-Master hostname or IP address for replication. If not set, the slave
-thread will not be started.
-(Example: @code{master-host=db-master.mycompany.com})
-
-@item @code{master-user=username} @tab
-The user the slave thread will us for authentication when connecting to
-the master. The user must have @code{FILE} privilege. If the master user
-is not set, user @code{test} is assumed. (Example:
-@code{master-user=scott})
-
-@item @code{master-password=password} @tab
-The password the slave thread will authenticate with when connecting to
-the master. If not set, an empty password is assumed. (Example:
-@code{master-password=tiger})
-
-@item @code{master-port=portnumber} @tab
-The port the master is listening on. If not set, the compiled setting of
-@code{MYSQL_PORT} is assumed. If you have not tinkered with
-@code{configure} options, this should be 3306. (Example:
-@code{master-port=3306})
-
-@item @code{master-connect-retry=seconds} @tab
-The number of seconds the slave thread will sleep before retrying to
-connect to the master in case the master goes down or the connection is
-lost. Default is 60. (Example: @code{master-connect-retry=60})
-
-@item @code{master-info-file=filename} @tab
-The location of the file that remembers where we left off on the master
-during the replication process. The default is master.info in the data
-directory. Sasha: The only reason I see for ever changing the default
-is the desire to be rebelious. (Example:
-@code{master-info-file=master.info})
-
-@item @code{replicate-do-table=db_name.table_name} @tab
-Tells the slave thread to restrict replication to the specified database.
-To specify more than one table, use the directive multiple times,
-once for each table. .
-(Example: @code{replicate-do-table=some_db.some_table})
-
-@item @code{replicate-ignore-table=db_name.table_name} @tab
-Tells the slave thread to not replicate to the specified table. To
-specify more than one table to ignore, use the directive multiple
-times, once for each table.(Example:
-@code{replicate-ignore-table=db_name.some_table})
-
-@item @code{replicate-wild-do-table=db_name.table_name} @tab
-Tells the slave thread to restrict replication to the tables that match the
-specified wildcard pattern. .
-To specify more than one table, use the directive multiple times,
-once for each table. .
-(Example: @code{replicate-do-table=foo%.bar%} will replicate only updates
-to tables in all databases that start with foo and whose table names
-start with bar)
-
-@item @code{replicate-wild-ignore-table=db_name.table_name} @tab
-Tells the slave thread to not replicate to the tables that match the given
-wild card pattern. To
-specify more than one table to ignore, use the directive multiple
-times, once for each table.(Example:
-@code{replicate-ignore-table=foo%.bar%} - will not upates to tables in all databases that start with foo and whose table names
-start with bar)
-
-@item @code{replicate-ignore-db=database_name} @tab
-Tells the slave thread to not replicate to the specified database. To
-specify more than one database to ignore, use the directive multiple
-times, once for each database. This option will not work if you use cross
-database updates. If you need cross database updates to work, make sure
-you have 3.23.28 or later, and use
-@code{replicate-wild-ignore-table=db_name.%}(Example:
-@code{replicate-ignore-db=some_db})
-
-@item @code{replicate-do-db=database_name} @tab
-Tells the slave thread to restrict replication to the specified database.
-To specify more than one database, use the directive multiple times,
-once for each database. Note that this will only work if you do not use
-cross-database queries such as @code{UPDATE some_db.some_table SET
-foo='bar'} while having selected a different or no database. If you need
-cross database updates to work, make sure
-you have 3.23.28 or later, and use
-@code{replicate-wild-do-table=db_name.%}
-(Example: @code{replicate-do-db=some_db})
-
-@item @code{log-slave-updates} @tab
-Tells the slave to log the updates from the slave thread to the binary
-log. Off by default. You will need to turn it on if you plan to
-daisy-chain the slaves.
-
-@item @code{replicate-rewrite-db=from_name->to_name} @tab
-Updates to a database with a different name than the original (Example:
-@code{replicate-rewrite-db=master_db_name->slave_db_name}
-
-@item @code{skip-slave-start} @tab
-Tells the slave server not to start the slave on the startup. The user
-can start it later with @code{SLAVE START}.
-
-@item @code{slave_read_timeout=#}
-Number of seconds to wait for more data from the master before aborting
-the read.
-@end multitable
-
-@cindex SQL commands, replication
-@cindex commands, replication
-@cindex replication, commands
-@node Replication SQL, Replication FAQ, Replication Options, Replication
-@section SQL Commands Related to Replication
-
-Replication can be controlled through the SQL interface. Below is the
-summary of commands:
-
-@multitable @columnfractions .30 .70
-@item @strong{Command} @tab @strong{Description}
-
-@item @code{SLAVE START}
- @tab Starts the slave thread. (Slave)
-
-@item @code{SLAVE STOP}
- @tab Stops the slave thread. (Slave)
-
-@item @code{SET SQL_LOG_BIN=0}
- @tab Disables update logging if the user has process privilege.
- Ignored otherwise. (Master)
-
-@item @code{SET SQL_LOG_BIN=1}
- @tab Re-enables update logging if the user has process privilege.
- Ignored otherwise. (Master)
-
-@item @code{SET SQL_SLAVE_SKIP_COUNTER=n}
- @tab Skip the next @code{n} events from the master. Only valid when
-the slave thread is not running, otherwise, gives an error. Useful for
-recovering from replication glitches.
-
-@item @code{RESET MASTER}
- @tab Deletes all binary logs listed in the index file, resetting the binlog
-index file to be empty. In pre-3.23.26 versions, @code{FLUSH MASTER} (Master)
-
-@item @code{RESET SLAVE}
- @tab Makes the slave forget its replication position in the master
-logs. In pre 3.23.26 versions the command was called
-@code{FLUSH SLAVE}(Slave)
-
-@item @code{LOAD TABLE tblname FROM MASTER}
- @tab Downloads a copy of the table from master to the slave. (Slave)
-
-@item @code{CHANGE MASTER TO master_def_list}
- @tab Changes the master parameters to the values specified in
-@code{master_def_list} and restarts the slave thread. @code{master_def_list}
-is a comma-separated list of @code{master_def} where @code{master_def} is
-one of the following: @code{MASTER_HOST}, @code{MASTER_USER},
-@code{MASTER_PASSWORD}, @code{MASTER_PORT}, @code{MASTER_CONNECT_RETRY},
-@code{MASTER_LOG_FILE}, @code{MASTER_LOG_POS}. Example:
-
-@example
-
-CHANGE MASTER TO
- MASTER_HOST='master2.mycompany.com',
- MASTER_USER='replication',
- MASTER_PASSWORD='bigs3cret',
- MASTER_PORT=3306,
- MASTER_LOG_FILE='master2-bin.001',
- MASTER_LOG_POS=4;
-
-@end example
-
-You only need to specify the values that need to be changed. The values that
-you omit will stay the same with the exception of when you change the host or
-the port. In that case, the slave will assume that since you are connecting to
-a different host or a different port, the master is different. Therefore, the
-old values of log and position are not applicable anymore, and will
-automatically be reset to an empty string and 0, respectively (the start
-values). Note that if you restart the slave, it will remember its last master.
-If this is not desirable, you should delete the @file{master.info} file before
-restarting, and the slave will read its master from @code{my.cnf} or the
-command line. (Slave)
-
-@item @code{SHOW MASTER STATUS} @tab Provides status information on the binlog of the master. (Master)
-
-@item @code{SHOW SLAVE STATUS} @tab Provides status information on essential parameters of the slave thread. (Slave)
-@item @code{SHOW MASTER LOGS} @tab Only available starting in Version 3.23.28. Lists the binary logs on the master. You should use this command prior to @code{PURGE MASTER LOGS TO} to find out how far you should go.
-
-@item @code{PURGE MASTER LOGS TO 'logname'}
- @tab Available starting in Version 3.23.28. Deletes all the
-replication logs that are listed in the log
-index as being prior to the specified log, and removed them from the
-log index, so that the given log now becomes first. Example:
-
-@example
-PURGE MASTER LOGS TO 'mysql-bin.010'
-@end example
-
-This command will do nothing and fail with an error if you have an
-active slave that is currently reading one of the logs you are trying to
-delete. However, if you have a dormant slave, and happen to purge one of
-the logs it wants to read, the slave will be unable to replicate once it
-comes up. The command is safe to run while slaves are replicating - you
-do not need to stop them.
-
-You must first check all the slaves with @code{SHOW SLAVE STATUS} to
-see which log they are on, then do a listing of the logs on the
-master with @code{SHOW MASTER LOGS}, find the earliest log among all
-the slaves (if all the slaves are up to date, this will be the
-last log on the list), backup all the logs you are about to delete
-(optional) and purge up to the target log.
-
-@end multitable
-
-@node Replication FAQ, Replication Problems, Replication SQL, Replication
-@section Replication FAQ
-
-@cindex @code{Binlog_Dump}
-@strong{Q}: Why do I sometimes see more than one @code{Binlog_Dump} thread on
-the master after I have restarted the slave?
-
-@strong{A}: @code{Binlog_Dump} is a continuous process that is handled by the
-server in the following way:
-
-@itemize @bullet
-@item
-Catch up on the updates.
-@item
-Once there are no more updates left, go into @code{pthread_cond_wait()},
-from which we can be awakened either by an update or a kill.
-@item
-On wake up, check the reason. If we are not supposed to die, continue
-the @code{Binlog_dump} loop.
-@item
-If there is some fatal error, such as detecting a dead client,
-terminate the loop.
-@end itemize
-
-So if the slave thread stops on the slave, the corresponding
-@code{Binlog_Dump} thread on the master will not notice it until after
-at least one update to the master (or a kill), which is needed to wake
-it up from @code{pthread_cond_wait()}. In the meantime, the slave
-could have opened another connection, which resulted in another
-@code{Binlog_Dump} thread.
-
-The above problem should not be present in Version 3.23.26 and later
-versions. In Version 3.23.26 we added @code{server-id} to each
-replication server, and now all the old zombie threads are killed on the
-master when a new replication thread connects from the same slave
-
-@strong{Q}: How do I rotate replication logs?
-
-@strong{A}: In Version 3.23.28 you should use @code{PURGE MASTER LOGS
-TO} command after determining which logs can be deleted, and optionally
-backing them up first. In earlier versions the process is much more
-painful, and cannot be safely done without stopping all the slaves in
-the case that you plan to re-use log names. You will need to stop the
-slave threads, edit the binary log index file, delete all the old logs,
-restart the master, start slave threads, and then remove the old log files.
-
-
-@strong{Q}: How do I upgrade on a hot replication setup?
-
-@strong{A}: If you are upgrading pre-3.23.26 versions, you should just
-lock the master tables, let the slave catch up, then run @code{FLUSH
-MASTER} on the master, and @code{FLUSH SLAVE} on the slave to reset the
-logs, then restart new versions of the master and the slave. Note that
-the slave can stay down for some time - since the master is logging
-all the updates, the slave will be able to catch up once it is up and
-can connect.
-
-After 3.23.26, we have locked the replication protocol for modifications, so
-you can upgrade masters and slave on the fly to a newer 3.23 version and you
-can have different versions of @strong{MySQL} running on the slave and the
-master, as long as they are both newer than 3.23.26.
-
-@cindex replication, two-way
-@strong{Q}: What issues should I be aware of when setting up two-way
-replication?
-
-@strong{A}: @strong{MySQL} replication currently does not support any
-locking protocol between master and slave to guarantee the atomicity of
-a distributed (cross-server) update. In in other words, it is possible
-for client A to make an update to co-master 1, and in the meantime,
-before it propagates to co-master 2, client B could make an update to
-co-master 2 that will make the update of client A work differently than
-it did on co-master 1. Thus when the update of client A will make it
-to co-master 2, it will produce tables that will be different than
-what you have on co-master 1, even after all the updates from co-master
-2 have also propagated. So you should not co-chain two servers in a
-two-way replication relationship, unless you are sure that you updates
-can safely happen in any order, or unless you take care of mis-ordered
-updates somehow in the client code.
-
-
-You must also realize that two-way replication actually does not improve
-performance very much, if at all, as far as updates are concerned. Both
-servers need to do the same amount of updates each, as you would have
-one server do. The only difference is that there will be a little less
-lock contention, because the updates originating on another server will
-be serialized in one slave thread. This benefit, though, might be
-offset by network delays.
-
-@cindex performance, improving
-@cindex increasing, performance
-@strong{Q}: How can I use replication to improve performance of my system?
-
-@strong{A}: You should set up one server as the master, and direct all
-writes to it, and configure as many slaves as you have the money and
-rackspace for, distributing the reads among the master and the slaves.
-You can also start the slaves with @code{--skip-bdb},
-@code{--low-priority-updates} and @code{--delay-key-write-for-all-tables}
-to get speed improvements for the slave. In this case the slave will
-use non-transactional @code{MyISAM} tables instead of @code{BDB} tables
-to get more speed.
-
-@strong{Q}: What should I do to prepare my client code to use
-performance-enhancing replication?
-
-@strong{A}:
-If the part of your code that is responsible for database access has
-been properly abstracted/modularized, converting it to run with the
-replicated setup should be very smooth and easy - just change the
-implementation of your database access to read from some slave or the
-master, and to always write to the master. If your code does not have
-this level of abstraction,
-setting up a replicated system will give you an opportunity/motivation
-to it clean up.
- You should start by creating a wrapper library
-/module with the following functions:
-
-@itemize
-@item
-@code{safe_writer_connect()}
-@item
-@code{safe_reader_connect()}
-@item
-@code{safe_reader_query()}
-@item
-@code{safe_writer_query()}
-@end itemize
-
-@code{safe_} means that the function will take care of handling all
-the error conditions.
-
-You should then convert your client code to use the wrapper library.
-It may be a painful and scary process at first, but it will pay off in
-the long run. All applications that follow the above pattern will be
-able to take advantage of one-master/many slaves solution. The
-code will be a lot easier to maintain, and adding troubleshooting
-options will be trivial. You will just need to modify one or two
-functions, for example, to log how long each query took, or which
-query, among your many thousands, gave you an error. If you have written a lot of code already,
-you may want to automate the conversion task by using Monty's
-@code{replace} utility, which comes with the standard distribution of
-@strong{MySQL}, or just write your own Perl script. Hopefully, your
-code follows some recognizable pattern. If not, then you are probably
-better off re-writing it anyway, or at least going through and manually
-beating it into a pattern.
-
-Note that, of course, you can use different names for the
-functions. What is important is having unified interface for connecting
-for reads, connecting for writes, doing a read, and doing a write.
-
-
-@strong{Q}: When and how much can @strong{MySQL} replication improve the performance
-of my system?
-
-@strong{A}: @strong{MySQL} replication is most beneficial for a system
-with frequent reads and not so frequent writes. In theory, by using a
-one master/many slaves setup you can scale by adding more slaves until
-you either run out of network bandwidth, or your update
-load grows to the point
-that the master cannot handle it.
-
-In order to determine how many slaves you can get before the added
-benefits begin to level out, and how much you can improve performance
-of your site, you need to know your query patterns, and empirically
- (by benchmarking) determine the relationship between the throughput
-on reads (reads per second, or @code{max_reads}) and on writes
-@code{max_writes}) on a typical master and a typical slave. The
-example below will show you a rather simplified calculation of what you
-can get with replication for our imagined system.
-
-Let's say our system load consists of 10% writes and 90% reads, and we
-have determined that @code{max_reads} = 1200 - 2 * @code{max_writes},
-or in other words, our system can do 1200 reads per second with no
-writes, our average write is twice as slow as average read,
-and the relationship is
-linear. Let us suppose that our master and slave are of the same
-capacity, and we have N slaves and 1 master. Then we have for each
-server (master or slave):
-
-@code{reads = 1200 - 2 * writes} (from bencmarks)
-
-@code{reads = 9* writes / (N + 1) } (reads split, but writes go
-to all servers)
-
-@code{9*writes/(N+1) + 2 * writes = 1200}
-
-@code{writes = 1200/(2 + 9/(N+1)}
-
-So if N = 0, which means we have no replication, our system can handle
-1200/11, about 109 writes per second (which means we will have 9 times
-as many reads due to the nature of our application).
-
-If N = 1, we can get up to 184 writes per second.
-
-If N = 8, we get up to 400.
-
-If N = 17, 480 writes.
-
-Eventually as N approaches infinity (and our budget negative infinity),
-we can get very close to 600 writes per second, increasing system
-throughput about 5.5 times. However, with only 8 servers, we increased
-it almost 4 times already.
-
-Note that our computations assumed infinite network bandwidth, and
-neglected several other factors that could turn out to be significant on
-your system. In many cases, you may not be able to make a computation
-similar to the one above that will accurately predict what will happen
-on your system if you add N replication slaves. However, answering the
-following questions should help you decided whether and how much, if at
-all, the replication will improve the performance of your system:
-
-@itemize @bullet
-@item
-What is the read/write ratio on your system?
-@item
-How much more write load can one server handle if you reduce the reads?
-@item
-How many slaves do you have bandwidth for on your network?
-@end itemize
-
-@strong{Q}: How can I use replication to provide redundancy/high
-availability?
-
-@strong{A}: With the currently available features, you would have to
-set up a master and a slave (or several slaves), and write a script
-that will monitor the
-master to see if it is up, and instruct your applications and
-the slaves of the master change in case of failure. Some suggestions:
-
-@itemize @bullet
-@item
-To tell a slave to change the master use the @code{CHANGE MASTER TO} command.
-@item
-A good way to keep your applications informed where the master is by
-having a dynamic DNS entry for the master. With @strong{bind} you can
-use @code{nsupdate} to dynamically update your DNS.
-@item
-You should run your slaves with the @code{log-bin} option and without
-@code{log-slave-updates}. This way the slave will be ready to become a
-master as soon as you issue @code{STOP SLAVE}; @code{RESET MASTER}, and
-@code{CHANGE MASTER TO} on the other slaves. It will also help you catch
-spurious updates that may happen because of misconfiguration of the
-slave (ideally, you want to configure access rights so that no client
-can update the slave, except for the slave thread) combined with the
-bugs in your client programs (they should never update the slave
-directly).
-
-@end itemize
-
-We are currently working on integrating an automatic master election
-system into @strong{MySQL}, but until it is ready, you will have to
-create your own monitoring tools.
-
-@node Replication Problems, , Replication FAQ, Replication
-@section Troubleshooting Replication
-
-If you have followed the instructions, and your replication setup is not
-working, first eliminate the user error factor by checking the following:
-
-@itemize @bullet
-@item
-Is the master logging to the binary log? Check with @code{SHOW MASTER STATUS}.
-If it is, @code{Position} will be non-zero. If not, verify that you have
-given the master @code{log-bin} option and have set @code{server-id}.
-@item
-Is the slave running? Check with @code{SHOW SLAVE STATUS}. The answer is found
-in @code{Slave_running} column. If not, verify slave options and check the
-error log for messages.
-@item
-If the slave is running, did it establish connection with the master? Do
-@code{SHOW PROCESSLIST}, find the thread with @code{system user} value in
-@code{User} column and @code{none} in the @code{Host} column, and check the
-@code{State} column. If it says @code{connecting to master}, verify the
-privileges for the replication user on the master, master host name, your
-DNS setup, whether the master is actually running, whether it is reachable
-from the slave, and if all that seems ok, read the error logs.
-@item
-If the slave was running, but then stopped, look at SHOW SLAVE STATUS
-output and check the error logs. It usually
-happens when some query that succeeded on the master fails on the slave. This
-should never happen if you have taken a proper snapshot of the master, and
-never modify the data on the slave outside of the slave thread. If it does,
-it is a bug, read below on how to report it.
-@item
-If a query on that succeeded on the master refuses to run on the slave, and
-a full database resync ( the proper thing to do ) does not seem feasible,
-try the following:
-@itemize bullet
-@item
-First see if there is some stray record in the way. Understand how it got
-there, then delete it and run @code{SLAVE START}
-@item
-If the above does not work or does not apply, try to understand if it would
-be safe to make the update manually ( if needed) and then ignore the next
-query from the master.
-@item
-If you have decided you can skip the next query, do
-@code{SET SQL_SLAVE_SKIP_COUNTER=1; SLAVE START;} to skip a query that
-does not use auto_increment, or last_insert_id or
-@code{SET SQL_SLAVE_SKIP_COUNTER=2; SLAVE START;} otherwise. The reason
-auto_increment/last_insert_id queries are different is that they take
-two events in the binary log of the master.
-
-@item
-If you are sure the slave started out perfectly in sync with the master,
-and no one has updated the tables involved outside of slave thread,
-report the bug, so
-you will not have to do the above tricks again.
-@end itemize
-@item
-Make sure you are not running into an old bug by upgrading to the most recent
-version.
-@item
-If all else fails, read the error logs. If they are big,
-@code{grep -i slave /path/to/your-log.err} on the slave. There is no
-generic pattern to search for on the master, as the only errors it logs
-are general system errors - if it can, it will send the error to the slave
-when things go wrong.
-@end itemize
-
-When you have determined that there is no user error involved, and replication
-still either does not work at all or is unstable, it is time to start working
-on a bug report. We need to get as much info as possible from you to be able
-to track down the bug. Please do spend some time and effort preparing a good
-bug report. Ideally, we would like to have a test case in the format found in
-@code{mysql-test/t/rpl*} directory of the source tree. If you submit a test
-case like that, you can expect a patch within a day or two in most cases,
-although, of course, you mileage may vary depending on a number of factors.
-
-Second best option is a just program with easily configurable connection
-arguments for the master and the slave that will demonstrate the problem on our
-systems. You can write one in Perl or in C, depending on which language you
-know better.
-
-If you have one of the above ways to demonstrate the bug, use
-@code{mysqlbug} to prepare a bug report and send it to
-@email{bugs@@lists.mysql.com}. If you have a phantom - a problem that
-does occur but you cannot duplicate "at will":
-
-@itemize @bullet
-@item
-Verify that there is no user error involved. For example, if you update the
-slave outside of the slave thread, the data will be out of sync, and you can
-have unique key violations on updates, in which case the slave thread will
-stop and wait for you to clean up the tables manually to bring them in sync.
-@item
-Run slave with @code{log-slave-updates} and @code{log-bin} - this will keep
-a log of all updates on the slave.
-@item
-Save all evidence before resetting the replication. If we have no or only
-sketchy information, it would take us a while to track down the problem. The
-evidence you should collect is:
-@itemize @bullet
-@item
-All binary logs on the master
-@item
-All binary log on the slave
-@item
-The output of @code{SHOW MASTER STATUS} on the master at the time
-you have discovered the problem
-@item
-The output of @code{SHOW SLAVE STATUS} on the master at the time
-you have discovered the problem
-@item
-Error logs on the master and on the slave
-@end itemize
-@item
-Use @code{mysqlbinlog} to examine the binary logs. The following should
-be helpful
-to find the trouble query, for example:
-@example
-mysqlbinlog -j pos_from_slave_status /path/to/log_from_slave_status | head
-@end example
-@end itemize
-
-Once you have collected the evidence on the phantom problem, try hard to
-isolate it into a separate test case first. Then report the problem to
-@email{bugs@@lists.mysql.com} with as much info as possible.
+@node Fulltext Search, Maintenance, Table types, Top
+@chapter MySQL Full-text Search
@cindex searching, full-text
@cindex full-text search
@cindex FULLTEXT
-@node Fulltext Search, Performance, Replication, Top
-@chapter MySQL Full-text Search
Since Version 3.23.23, @strong{MySQL} has support for full-text indexing
and searching. Full-text indexes in @strong{MySQL} are an index of type
@@ -31843,12 +36729,23 @@ the Internet with a search engine. It is with this reasoning that such rows
have been assigned a low semantical value in @strong{this particular dataset}.
@menu
+* Fulltext restrictions::
* Fulltext Fine-tuning::
* Fulltext Features to Appear in MySQL 4.0::
* Fulltext TODO::
@end menu
-@node Fulltext Fine-tuning, Fulltext Features to Appear in MySQL 4.0, Fulltext Search, Fulltext Search
+@node Fulltext restrictions, Fulltext Fine-tuning, Fulltext Search, Fulltext Search
+@section Fulltext restrictions
+@itemize @bullet
+@item
+All parameters to the @code{MATCH} function must be columns from the
+same table that is part of the same fulltext index.
+@item
+The argument to @code{AGAINST} must be a constant string.
+@end itemize
+
+@node Fulltext Fine-tuning, Fulltext Features to Appear in MySQL 4.0, Fulltext restrictions, Fulltext Search
@section Fine-tuning MySQL Full-text Search
Unfortunately, full-text search has no user-tunable parameters yet,
@@ -31905,7 +36802,7 @@ implemented in the 4.0 tree. It explains
@code{OPTIMIZE TABLE} with @code{FULLTEXT} indexes are now
up to 100 times faster.
-@item @code{MATCH ... AGAINST} now supports the following
+@item @code{MATCH ... AGAINST} is going to supports the following
@strong{boolean operators}:
@itemize @bullet
@@ -31950,4036 +36847,9 @@ the user wants to treat as words, examples are "C++", "AS/400", "TCP/IP", etc.
parameters to @code{FULLTEXT} in @code{CREATE/ALTER TABLE}).
@end itemize
-@cindex performance, maximizing
-@cindex optimization
-@node Performance, MySQL Benchmarks, Fulltext Search, Top
-@chapter Getting Maximum Performance from MySQL
-
-Optimization is a complicated task because it ultimately requires
-understanding of the whole system. While it may be possible to do some
-local optimizations with small knowledge of your system/application, the
-more optimal you want your system to become the more you will have to
-know about it.
-
-So this chapter will try to explain and give some examples of different
-ways to optimize @strong{MySQL}. But remember that there are always some
-(increasingly harder) additional ways to make the system even faster.
-
-@menu
-* Optimize Basics:: Optimization overview
-* System:: System/Compile time and startup parameter tuning
-* Data size:: Get your data as small as possible
-* MySQL indexes:: How @strong{MySQL} uses indexes
-* Query Speed:: Speed of queries that access or update data
-* Tips:: Other optimization tips
-* Benchmarks:: Using your own benchmarks
-* Design:: Design choices
-* Design Limitations:: MySQL design limitations/tradeoffs
-* Portability:: Portability
-* Internal use:: What have we used MySQL for?
-@end menu
-
-@node Optimize Basics, System, Performance, Performance
-@section Optimization Overview
-
-The most important part for getting a system fast is of course the basic
-design. You also need to know what kinds of things your system will be
-doing, and what your bottlenecks are.
-
-The most common bottlenecks are:
-@itemize @bullet
-@item Disk seeks.
-It takes time for the disk to find a piece of data. With modern disks in
-1999, the mean time for this is usually lower than 10ms, so we can in
-theory do about 1000 seeks a second. This time improves slowly with new
-disks and is very hard to optimize for a single table. The way to
-optimize this is to spread the data on more than one disk.
-@item Disk reading/writing.
-When the disk is at the correct position we need to read the data. With
-modern disks in 1999, one disk delivers something like 10-20Mb/s. This
-is easier to optimize than seeks because you can read in parallel from
-multiple disks.
-@item CPU cycles.
-When we have the data in main memory (or if it already were
-there) we need to process it to get to our result. Having small
-tables compared to the memory is the most common limiting
-factor. But then, with small tables speed is usually not the problem.
-@item Memory bandwidth.
-When the CPU needs more data than can fit in the CPU cache the main
-memory bandwidth becomes a bottleneck. This is an uncommon bottleneck
-for most systems, but one should be aware of it.
-@end itemize
-
-@cindex compiling, optimizing
-@cindex system optimization
-@cindex startup parameters, tuning
-@node System, Data size, Optimize Basics, Performance
-@section System/Compile Time and Startup Parameter Tuning
-
-We start with the system level things since some of these decisions have
-to be made very early. In other cases a fast look at this part may
-suffice because it not that important for the big gains. However, it is always
-nice to have a feeling about how much one could gain by changing things
-at this level.
-
-The default OS to use is really important! To get the most use of
-multiple CPU machines one should use Solaris (because the threads works
-really nice) or Linux (because the 2.2 kernel has really good SMP
-support). Also on 32-bit machines Linux has a 2G file size limit by
-default. Hopefully this will be fixed soon when new filesystems are
-released (XFS/Reiserfs). If you have a desperate need for files bigger
-than 2G on Linux-intel 32 bit, you should get the LFS patch for the ext2
-file system.
-
-Because we have not run @strong{MySQL} in production on that many platforms, we
-advice you to test your intended platform before choosing it, if possible.
-
-@cindex locking
-Other tips:
-@itemize @bullet
-@item
-If you have enough RAM, you could remove all swap devices. Some
-operating systems will use a swap device in some contexts even if you
-have free memory.
-@item
-Use the @code{--skip-locking} @strong{MySQL} option to avoid external
-locking. Note that this will not impact @strong{MySQL}'s functionality as
-long as you only run one server. Just remember to take down the server (or
-lock relevant parts) before you run @code{myisamchk}. On some system
-this switch is mandatory because the external locking does not work in any
-case.
-
-The @code{--skip-locking} option is on by default when compiling with
-MIT-pthreads, because @code{flock()} isn't fully supported by
-MIT-pthreads on all platforms. It's also on default for Linux
-as Linux file locking are not yet safe.
-
-The only case when you can't use @code{--skip-locking} is if you run
-multiple @strong{MySQL} @emph{servers} (not clients) on the same data,
-or run @code{myisamchk} on the table without first flushing and locking
-the @code{mysqld} server tables first.
-
-You can still use @code{LOCK TABLES}/@code{UNLOCK TABLES} even if you
-are using @code{--skip-locking}
-@end itemize
-
-@menu
-* Compile and link options:: How compiling and linking affects the speed of MySQL
-* Disk issues:: Disk issues
-* Symbolic links:: Using Symbolic Links
-* Server parameters:: Tuning server parameters
-* Table cache:: How MySQL opens and closes tables
-* Creating many tables:: Drawbacks of creating large numbers of tables in the same database
-* Open tables:: Why so many open tables?
-* Memory use:: How MySQL uses memory
-* Internal locking:: How MySQL locks tables
-* Table locking:: Table locking issues
-* DNS::
-@end menu
-
-@node Compile and link options, Disk issues, System, System
-@subsection How Compiling and Linking Affects the Speed of MySQL
-
-Most of the following tests are done on Linux with the
-@strong{MySQL} benchmarks, but they should give some indication for
-other operating systems and workloads.
-
-@cindex linking, speed
-@cindex compiling, speed
-@cindex speed, compiling
-@cindex speed, linking
-
-You get the fastest executable when you link with @code{-static}.
-
-On Linux, you will get the fastest code when compiling with @code{pgcc}
-and @code{-O3}. To compile @file{sql_yacc.cc} with these options, you
-need about 200M memory because @code{gcc/pgcc} needs a lot of memory to
-make all functions inline. You should also set @code{CXX=gcc} when
-configuring @strong{MySQL} to avoid inclusion of the @code{libstdc++}
-library (it is not needed). Note that with some versions of @code{pgcc},
-the resulting code will only run on true Pentium processors, even if you
-use the compiler option that you want the resulting code to be working on
-all x586 type processors (like AMD).
-
-By just using a better compiler and/or better compiler options you can
-get a 10-30 % speed increase in your application. This is particularly
-important if you compile the SQL server yourself!
-
-We have tested both the Cygnus CodeFusion and Fujitsu compilers, but
-when we tested them, neither was sufficiently bug free to allow
-@strong{MySQL} to be compiled with optimizations on.
-
-When you compile @strong{MySQL} you should only include support for the
-character sets that you are going to use. (Option @code{--with-charset=xxx}).
-The standard @strong{MySQL} binary distributions are compiled with support
-for all character sets.
-
-Here is a list of some measurements that we have done:
-@itemize @bullet
-@item
-If you use @code{pgcc} and compile everything with @code{-O6}, the
-@code{mysqld} server is 1% faster than with @code{gcc} 2.95.2.
-
-@item
-If you link dynamically (without @code{-static}), the result is 13%
-slower on Linux. Note that you still can use a dynamic linked
-@strong{MySQL} library. It is only the server that is critical for
-performance.
-
-@item
-If you strip your @code{mysqld} binary with @code{strip libexec/mysqld},
-the resulting binary can be up to 4 % faster.
-
-@item
-If you connect using TCP/IP rather than Unix sockets, the result is 7.5%
-slower on the same computer. (If you are connection to @code{localhost},
-@strong{MySQL} will, by default, use sockets).
-
-@item
-If you connect using TCP/IP from another computer over a 100M Ethernet,
-things will be 8-11 % slower.
-
-@item
-If you compile with @code{--with-debug=full}, then you will loose 20 %
-for most queries, but some queries may take substantially longer (The
-@strong{MySQL} benchmarks ran 35 % slower)
-If you use @code{--with-debug}, then you will only loose 15 %.
-By starting a @code{mysqld} version compiled with @code{--with-debug=full}
-with @code{--skip-safemalloc} the end result should be close to when
-configuring with @code{--with-debug}.
-
-@item
-On a Sun SPARCstation 20, SunPro C++ 4.2 is 5 % faster than @code{gcc} 2.95.2.
-
-@item
-Compiling with @code{gcc} 2.95.2 for ultrasparc with the option
-@code{-mcpu=v8 -Wa,-xarch=v8plusa} gives 4 % more performance.
-
-@item
-On Solaris 2.5.1, MIT-pthreads is 8-12% slower than Solaris native
-threads on a single processor. With more load/CPUs the difference should
-get bigger.
-
-@item
-Running with @code{--log-bin} makes @strong{[MySQL} 1 % slower.
-
-@item
-Compiling on Linux-x86 using gcc without frame pointers
-@code{-fomit-frame-pointer} or @code{-fomit-frame-pointer -ffixed-ebp}
-@code{mysqld} 1-4% faster.
-@end itemize
-
-The @strong{MySQL}-Linux distribution provided by @strong{MySQL AB} used
-to be compiled with @code{pgcc}, but we had to go back to regular gcc
-because of a bug in @code{pgcc} that would generate the code that does
-not run on AMD. We will continue using gcc until that bug is resolved.
-In the meantime, if you have a non-AMD machine, you can get a faster
-binary by compiling with @code{pgcc}. The standard @strong{MySQL}
-Linux binary is linked statically to get it faster and more portable.
-
-@cindex disk issues
-@cindex performance, disk issues
-@node Disk issues, Symbolic links, Compile and link options, System
-@subsection Disk Issues
-
-@itemize @bullet
-@item
-As mentioned before, disks seeks are a big performance bottleneck. This
-problems gets more and more apparent when the data starts to grow so
-large that effective caching becomes impossible. For large databases,
-where you access data more or less randomly, you can be sure that you
-will need at least one disk seek to read and a couple of disk seeks to
-write things. To minimize this problem, use disks with low seek times.
-@item
-Increase the number of available disk spindles (and thereby reduce
-the seek overhead) by either symlink files to different disks or striping
-the disks.
-@table @strong
-@item Using symbolic links
-This means that you symlink the index and/or data file(s) from the
-normal data directory to another disk (that may also be striped). This
-makes both the seek and read times better (if the disks are not used for
-other things). @xref{Symbolic links}.
-@cindex striping, defined
-@item Striping
-Striping means that you have many disks and put the first block on the
-first disk, the second block on the second disk, and the Nth on the
-(N mod number_of_disks) disk, and so on. This means if your normal data
-size is less than the stripe size (or perfectly aligned) you will get
-much better performance. Note that striping is very dependent on the OS
-and stripe-size. So benchmark your application with different
-stripe-sizes. @xref{Benchmarks}.
-
-Note that the speed difference for striping is @strong{very} dependent
-on the parameters. Depending on how you set the striping parameters and
-number of disks you may get a difference in orders of magnitude. Note that
-you have to choose to optimize for random or sequential access.
-@end table
-@item
-For reliability you may want to use RAID 0+1 (striping + mirroring), but
-in this case you will need 2*N drives to hold N drives of data. This is
-probably the best option if you have the money for it! You may, however,
-also have to invest in some volume-management software to handle it
-efficiently.
-@item
-A good option is to have semi-important data (that can be regenerated)
-on RAID 0 disk while storing really important data (like host information
-and logs) on a RAID 0+1 or RAID N disk. RAID N can be a problem if you
-have many writes because of the time to update the parity bits.
-@item
-You may also set the parameters for the file system that the database
-uses. One easy change is to mount the file system with the noatime
-option. That makes it skip the updating of the last access time in the
-inode and by this will avoid some disk seeks.
-@item
-On Linux, you can get much more performance (up to 100 % under load is
-not uncommon) by using hdpram to configure your disk's interface! The
-following should be quite good hdparm options for @strong{MySQL} (and
-probably many other applications):
-@example
-hdparm -m 16 -d 1
-
-@end example
-
-Note that the performance/reliability when using the above depends on
-your hardware, so we strongly suggest that you test your system
-thoroughly after using @code{hdparm}! Please consult the @code{hdparm}
-man page for more information! If @code{hdparm} is not used wisely,
-filesystem corruption may result. Backup everything before experimenting!
-@item
-On many operating systems you can mount the disks with the 'async' flag to set the file
-system to be updated asynchronously. If your computer is reasonable stable,
-this should give you more performance without sacrificing too much reliability.
-(This flag is on by default on Linux.)
-@item
-If you don't need to know when a file was last accessed (which is not
-really useful on a database server), you can mount your file systems
-with the noatime flag.
-@end itemize
-
-@cindex symbolic links
-@cindex links, symbolic
-@node Symbolic links, Server parameters, Disk issues, System
-@subsection Using Symbolic Links
-
-You can move tables and databases from the database directory to other
-locations and replace them with symbolic links to the new locations.
-You might want to do this, for example, to move a database to a file
-system with more free space or increase the speed of your system by
-spreading your tables to different disk.
-
-The recommended may to do this, is to just symlink databases to different
-disk and only symlink tables as a last resort.
-.
-
-@cindex databases, symbolic links
-@menu
-* Symbolic links to database::
-* Symbolic links to tables::
-@end menu
-
-@node Symbolic links to database, Symbolic links to tables, Symbolic links, Symbolic links
-@subsubsection Using Symbolic Links for Databases
-
-The way to symlink a database is to first create a directory on some
-disk where you have free space and then create a symlink to it from
-the @strong{MySQL} database directory.
-
-@example
-shell> mkdir /dr1/databases/test
-shell> ln -s /dr1/databases/test mysqld-datadir
-@end example
-
-@strong{MySQL} doesn't support that you link one directory to multiple
-databases. Replacing a database directory with a symbolic link will
-work fine as long as you don't make a symbolic link between databases.
-Suppose you have a database @code{db1} under the @strong{MySQL} data
-directory, and then make a symlink @code{db2} that points to @code{db1}:
-
-@example
-shell> cd /path/to/datadir
-shell> ln -s db1 db2
-@end example
-
-Now, for any table @code{tbl_a} in @code{db1}, there also appears to be
-a table @code{tbl_a} in @code{db2}. If one thread updates @code{db1.tbl_a}
-and another thread updates @code{db2.tbl_a}, there will be problems.
-
-If you really need this, you must change the following code in
-@file{mysys/mf_format.c}:
-
-@example
-if (flag & 32 || (!lstat(to,&stat_buff) && S_ISLNK(stat_buff.st_mode)))
-@end example
-
-to
-
-@example
-if (1)
-@end example
-
-On Windows you can use internal symbolic links to directories by compiling
-@strong{MySQL} with @code{-DUSE_SYMDIR}. This allows you to put different
-databases on different disks. @xref{Windows symbolic links}.
-
-@cindex databases, symbolic links
-@node Symbolic links to tables, , Symbolic links to database, Symbolic links
-@subsubsection Using Symbolic Links for Tables
-
-Before @strong{MySQL} 4.0 you should not symlink tables, if you are not
-very carefully with them. The problem is that if you run @code{ALTER
-TABLE}, @code{REPAIR TABLE} or @code{OPTIMIZE TABLE} on a symlinked
-table, the symlinks will be removed and replaced by the original
-files. This happens because the above command works by creating a
-temporary file in the database directory and when the command is
-complete, replace the original file with the temporary file.
-
-You should not symlink tables on system that doesn't have a fully
-working @code{realpath()} call. (At least Linux and Solaris support
-@code{realpath()})
-
-In @strong{MySQL} 4.0 symlinks is only fully supported for @code{MyISAM}
-tables. For other table types you will probably get strange problems
-when doing any of the above mentioned commands.
-
-The handling of symbolic links in @strong{MySQL} 4.0 works the following
-way (this is mostly relevant only for @code{MyISAM} tables).
-
-@itemize @bullet
-@item
-In the data directory you will always have the table definition file
-and the data/index files.
-@item
-You can symlink the index file and the data file to different directories
-independent of the other.
-@item
-The symlinking can be done from the operating system (if @code{mysqld} is
-not running) or with the @code{INDEX/DATA DIRECTORY="path-to-dir"} command
-in @code{CREATE TABLE}. @xref{CREATE TABLE}.
-@item
-@code{myisamchk} will not replace a symlink with the index/file but
-work directly on the files the symlinks points to. Any temporary files
-will be created in the same directory where the data/index file is.
-@item
-When you drop a table that is using symlinks, both the symlink and the
-file the symlink points to is dropped. This is a good reason to why you
-should NOT run @code{mysqld} as root and not allow persons to have write
-access to the @strong{MySQL} database directories.
-@item
-If you rename a table with @code{ALTER TABLE RENAME} and you don't change
-database, the symlink in the database directory will be renamed to the new
-name and the data/index file will be renamed accordingly.
-@item
-If you use @code{ALTER TABLE RENAME} to move a table to another database,
-then the table will be moved to the other database directory and the old
-symlinks and the files they pointed to will be deleted.
-@item
-If you are not using symlinks you should use the @code{--skip-symlink}
-option to @code{mysqld} to ensure that no one can drop or rename a file
-outside of the @code{mysqld} data directory.
-@end itemize
-
-Things that are not yet supported:
-
-@cindex TODO, symlinks
-@itemize @bullet
-@item
-@code{ALTER TABLE} ignores all @code{INDEX/DATA DIRECTORY="path"} options.
-@item
-@code{CREATE TABLE} doesn't report if the table has symbolic links.
-@item
-@code{mysqldump} doesn't include the symbolic links information in the output.
-@item
-@code{BACKUP TABLE} and @code{RESTORE TABLE} doesn't use symbolic links.
-@end itemize
-
-@cindex parameters, server
-@cindex @code{mysqld} server, buffer sizes
-@cindex buffer sizes, @code{mysqld} server
-@cindex startup parameters
-@node Server parameters, Table cache, Symbolic links, System
-@subsection Tuning Server Parameters
-
-You can get the default buffer sizes used by the @code{mysqld} server
-with this command:
-
-@example
-shell> mysqld --help
-@end example
-
-@cindex @code{mysqld} options
-@cindex variables, @code{mysqld}
-This command produces a list of all @code{mysqld} options and configurable
-variables. The output includes the default values and looks something
-like this:
-
-@example
-Possible variables for option --set-variable (-O) are:
-back_log current value: 5
-bdb_cache_size current value: 1048540
-binlog_cache_size current_value: 32768
-connect_timeout current value: 5
-delayed_insert_timeout current value: 300
-delayed_insert_limit current value: 100
-delayed_queue_size current value: 1000
-flush_time current value: 0
-interactive_timeout current value: 28800
-join_buffer_size current value: 131072
-key_buffer_size current value: 1048540
-lower_case_table_names current value: 0
-long_query_time current value: 10
-max_allowed_packet current value: 1048576
-max_binlog_cache_size current_value: 4294967295
-max_connections current value: 100
-max_connect_errors current value: 10
-max_delayed_threads current value: 20
-max_heap_table_size current value: 16777216
-max_join_size current value: 4294967295
-max_sort_length current value: 1024
-max_tmp_tables current value: 32
-max_write_lock_count current value: 4294967295
-myisam_sort_buffer_size current value: 8388608
-net_buffer_length current value: 16384
-net_retry_count current value: 10
-net_read_timeout current value: 30
-net_write_timeout current value: 60
-query_buffer_size current value: 0
-record_buffer current value: 131072
-slow_launch_time current value: 2
-sort_buffer current value: 2097116
-table_cache current value: 64
-thread_concurrency current value: 10
-tmp_table_size current value: 1048576
-thread_stack current value: 131072
-wait_timeout current value: 28800
-@end example
-
-If there is a @code{mysqld} server currently running, you can see what
-values it actually is using for the variables by executing this command:
-
-@example
-shell> mysqladmin variables
-@end example
-
-You can find a full description for all variables in the @code{SHOW VARIABLES}
-section in this manual. @xref{SHOW VARIABLES}.
-
-You can also see some statistics from a running server by issuing the command
-@code{SHOW STATUS}. @xref{SHOW STATUS}.
-
-@strong{MySQL} uses algorithms that are very scalable, so you can usually
-run with very little memory. If you, however, give @strong{MySQL} more
-memory, you will normally also get better performance.
-
-When tuning a @strong{MySQL} server, the two most important variables to use
-are @code{key_buffer_size} and @code{table_cache}. You should first feel
-confident that you have these right before trying to change any of the
-other variables.
-
-If you have much memory (>=256M) and many tables and want maximum performance
-with a moderate number of clients, you should use something like this:
-
-@example
-shell> safe_mysqld -O key_buffer=64M -O table_cache=256 \
- -O sort_buffer=4M -O record_buffer=1M &
-@end example
-
-If you have only 128M and only a few tables, but you still do a lot of
-sorting, you can use something like:
-
-@example
-shell> safe_mysqld -O key_buffer=16M -O sort_buffer=1M
-@end example
-
-If you have little memory and lots of connections, use something like this:
-
-@example
-shell> safe_mysqld -O key_buffer=512k -O sort_buffer=100k \
- -O record_buffer=100k &
-@end example
-
-or even:
-
-@example
-shell> safe_mysqld -O key_buffer=512k -O sort_buffer=16k \
- -O table_cache=32 -O record_buffer=8k -O net_buffer=1K &
-@end example
-
-When you have installed @strong{MySQL}, the @file{support-files} directory will
-contain some different @code{my.cnf} example files, @file{my-huge.cnf},
-@file{my-large.cnf}, @file{my-medium.cnf}, and @file{my-small.cnf}, you can
-use as a base to optimize your system.
-
-If there are very many connections, ``swapping problems'' may occur unless
-@code{mysqld} has been configured to use very little memory for each
-connection. @code{mysqld} performs better if you have enough memory for all
-connections, of course.
-
-Note that if you change an option to @code{mysqld}, it remains in effect only
-for that instance of the server.
-
-To see the effects of a parameter change, do something like this:
-
-@example
-shell> mysqld -O key_buffer=32m --help
-@end example
-
-Make sure that the @code{--help} option is last; otherwise, the effect of any
-options listed after it on the command line will not be reflected in the
-output.
-
-@cindex tables, opening
-@cindex tables, closing
-@cindex opening, tables
-@cindex closing, tables
-@cindex table cache
-@findex table_cache
-@node Table cache, Creating many tables, Server parameters, System
-@subsection How MySQL Opens and Closes Tables
-
-@code{table_cache}, @code{max_connections}, and @code{max_tmp_tables}
-affect the maximum number of files the server keeps open. If you
-increase one or both of these values, you may run up against a limit
-imposed by your operating system on the per-process number of open file
-descriptors. However, you can increase the limit on many systems.
-Consult your OS documentation to find out how to do this, because the
-method for changing the limit varies widely from system to system.
-
-@code{table_cache} is related to @code{max_connections}. For example,
-for 200 concurrent running connections, you should have a table cache of
-at least @code{200 * n}, where @code{n} is the maximum number of tables
-in a join. You also need to reserve some extra file descriptors for
-temporary tables and files.
-
-The cache of open tables can grow to a maximum of @code{table_cache}
-(default 64; this can be changed with the @code{-O table_cache=#}
-option to @code{mysqld}). A table is never closed, except when the
-cache is full and another thread tries to open a table or if you use
-@code{mysqladmin refresh} or @code{mysqladmin flush-tables}.
-
-When the table cache fills up, the server uses the following procedure
-to locate a cache entry to use:
-
-@itemize @bullet
-@item
-Tables that are not currently in use are released, in least-recently-used
-order.
-
-@item
-If the cache is full and no tables can be released, but a new table needs to
-be opened, the cache is temporarily extended as necessary.
-
-@item
-If the cache is in a temporarily-extended state and a table goes from in-use
-to not-in-use state, the table is closed and released from the cache.
-@end itemize
-
-A table is opened for each concurrent access. This means that
-if you have two threads accessing the same table or access the table
-twice in the same query (with @code{AS}) the table needs to be opened twice.
-The first open of any table takes two file descriptors; each additional
-use of the table takes only one file descriptor. The extra descriptor
-for the first open is used for the index file; this descriptor is shared
-among all threads.
-
-You can check if your table cache is too small by checking the mysqld
-variable @code{opened_tables}. If this is quite big, even if you
-haven't done a lot of @code{FLUSH TABLES}, you should increase your table
-cache. @xref{SHOW STATUS}.
-
-@cindex tables, too many
-@node Creating many tables, Open tables, Table cache, System
-@subsection Drawbacks to Creating Large Numbers of Tables in the Same Database
-
-If you have many files in a directory, open, close, and create operations will
-be slow. If you execute @code{SELECT} statements on many different tables,
-there will be a little overhead when the table cache is full, because for
-every table that has to be opened, another must be closed. You can reduce
-this overhead by making the table cache larger.
-
-@cindex tables, open
-@cindex open tables
-@node Open tables, Memory use, Creating many tables, System
-@subsection Why So Many Open tables?
-
-When you run @code{mysqladmin status}, you'll see something like this:
-
-@example
-Uptime: 426 Running threads: 1 Questions: 11082 Reloads: 1 Open tables: 12
-@end example
-
-This can be somewhat perplexing if you only have 6 tables.
-
-@strong{MySQL} is multithreaded, so it may have many queries on the same
-table simultaneously. To minimize the problem with two threads having
-different states on the same file, the table is opened independently by
-each concurrent thread. This takes some memory and one extra file
-descriptor for the data file. The index file descriptor is shared
-between all threads.
-
-@cindex memory use
-@node Memory use, Internal locking, Open tables, System
-@subsection How MySQL Uses Memory
-
-The list below indicates some of the ways that the @code{mysqld} server
-uses memory. Where applicable, the name of the server variable relevant
-to the memory use is given:
-
-@itemize @bullet
-@item
-The key buffer (variable @code{key_buffer_size}) is shared by all
-threads; Other buffers used by the server are allocated as
-needed. @xref{Server parameters}.
-
-@item
-Each connection uses some thread-specific space: A stack (default 64K,
-variable @code{thread_stack}), a connection buffer (variable
-@code{net_buffer_length}), and a result buffer (variable
-@code{net_buffer_length}). The connection buffer and result buffer are
-dynamically enlarged up to @code{max_allowed_packet} when needed. When
-a query is running, a copy of the current query string is also allocated.
-
-@item
-All threads share the same base memory.
-
-@item
-Only the compressed ISAM / MyISAM tables are memory mapped. This is
-because the 32-bit memory space of 4GB is not large enough for most
-big tables. When systems with a 64-bit address space become more
-common we may add general support for memory mapping.
-
-@item
-Each request doing a sequential scan over a table allocates a read buffer
-(variable @code{record_buffer}).
-
-@item
-All joins are done in one pass, and most joins can be done without even
-using a temporary table. Most temporary tables are memory-based (HEAP)
-tables. Temporary tables with a big record length (calculated as the
-sum of all column lengths) or that contain @code{BLOB} columns are
-stored on disk.
-
-One problem in @strong{MySQL} versions before Version 3.23.2 is that if a HEAP table
-exceeds the size of @code{tmp_table_size}, you get the error @code{The
-table tbl_name is full}. In newer versions this is handled by
-automatically changing the in-memory (HEAP) table to a disk-based
-(MyISAM) table as necessary. To work around this problem, you can
-increase the temporary table size by setting the @code{tmp_table_size}
-option to @code{mysqld}, or by setting the SQL option
-@code{SQL_BIG_TABLES} in the client program. @xref{SET OPTION, ,
-@code{SET OPTION}}. In @strong{MySQL} Version 3.20, the maximum size of the
-temporary table was @code{record_buffer*16}, so if you are using this
-version, you have to increase the value of @code{record_buffer}. You can
-also start @code{mysqld} with the @code{--big-tables} option to always
-store temporary tables on disk. However, this will affect the speed of
-many complicated queries.
-
-@item
-Most requests doing a sort allocates a sort buffer and 0-2 temporary
-files depending on the result set size. @xref{Temporary files}.
-
-@item
-Almost all parsing and calculating is done in a local memory store. No
-memory overhead is needed for small items and the normal slow memory
-allocation and freeing is avoided. Memory is allocated only for
-unexpectedly large strings (this is done with @code{malloc()} and
-@code{free()}).
-
-@item
-Each index file is opened once and the data file is opened once for each
-concurrently running thread. For each concurrent thread, a table structure,
-column structures for each column, and a buffer of size @code{3 * n} is
-allocated (where @code{n} is the maximum row length, not counting @code{BLOB}
-columns). A @code{BLOB} uses 5 to 8 bytes plus the length of the @code{BLOB}
-data. The @code{ISAM}/@code{MyISAM} table handlers will use one extra row
-buffer for internal usage.
-
-@item
-For each table having @code{BLOB} columns, a buffer is enlarged dynamically
-to read in larger @code{BLOB} values. If you scan a table, a buffer as large
-as the largest @code{BLOB} value is allocated.
-
-@item
-Table handlers for all in-use tables are saved in a cache and managed as a
-FIFO. Normally the cache has 64 entries. If a table has been used by two
-running threads at the same time, the cache contains two entries for the
-table. @xref{Table cache}.
-
-@item
-A @code{mysqladmin flush-tables} command closes all tables that are not in
-use and marks all in-use tables to be closed when the currently executing
-thread finishes. This will effectively free most in-use memory.
-@end itemize
-
-@code{ps} and other system status programs may report that @code{mysqld}
-uses a lot of memory. This may be caused by thread-stacks on different
-memory addresses. For example, the Solaris version of @code{ps} counts
-the unused memory between stacks as used memory. You can verify this by
-checking available swap with @code{swap -s}. We have tested
-@code{mysqld} with commercial memory-leakage detectors, so there should
-be no memory leaks.
-
-@cindex internal locking
-@cindex locking, tables
-@cindex tables, locking
-@node Internal locking, Table locking, Memory use, System
-@subsection How MySQL Locks Tables
-
-You can find a discussion about different locking methods in the appendix.
-@xref{Locking methods}.
-
-All locking in @strong{MySQL} is deadlock-free. This is managed by always
-requesting all needed locks at once at the beginning of a query and always
-locking the tables in the same order.
-
-The locking method @strong{MySQL} uses for @code{WRITE} locks works as follows:
-
-@itemize @bullet
-@item
-If there are no locks on the table, put a write lock on it.
-@item
-Otherwise, put the lock request in the write lock queue.
-@end itemize
-
-The locking method @strong{MySQL} uses for @code{READ} locks works as follows:
-
-@itemize @bullet
-@item
-If there are no write locks on the table, put a read lock on it.
-@item
-Otherwise, put the lock request in the read lock queue.
-@end itemize
-
-When a lock is released, the lock is made available to the threads
-in the write lock queue, then to the threads in the read lock queue.
-
-This means that if you have many updates on a table, @code{SELECT}
-statements will wait until there are no more updates.
-
-To work around this for the case where you want to do many @code{INSERT} and
-@code{SELECT} operations on a table, you can insert rows in a temporary
-table and update the real table with the records from the temporary table
-once in a while.
-
-This can be done with the following code:
-@example
-mysql> LOCK TABLES real_table WRITE, insert_table WRITE;
-mysql> insert into real_table select * from insert_table;
-mysql> TRUNCATE TABLE insert_table;
-mysql> UNLOCK TABLES;
-@end example
-
-You can use the @code{LOW_PRIORITY} options with @code{INSERT},
-@code{UPDATE} or @code{DELETE} or @code{HIGH_PRIORITY} with
-@code{SELECT} if you want to prioritize retrieval in some specific
-cases. You can also start @code{mysqld} with @code{--low-priority-updates}
-to get the same behaveour.
-
-Using @code{SQL_BUFFER_RESULT} can also help making table locks shorter.
-@xref{SELECT}.
-
-You could also change the locking code in @file{mysys/thr_lock.c} to use a
-single queue. In this case, write locks and read locks would have the same
-priority, which might help some applications.
-
-@cindex problems, table locking
-@node Table locking, DNS, Internal locking, System
-@subsection Table Locking Issues
-
-The table locking code in @strong{MySQL} is deadlock free.
-
-@strong{MySQL} uses table locking (instead of row locking or column
-locking) on all table types, except @code{BDB} tables, to achieve a very
-high lock speed. For large tables, table locking is MUCH better than
-row locking for most applications, but there are, of course, some
-pitfalls.
-
-For @code{BDB} and @code{InnoDB} tables, @strong{MySQL} only uses table
-locking if you explicitely lock the table with @code{LOCK TABLES} or
-execute a command that will modify every row in the table, like
-@code{ALTER TABLE}. For these table types we recommend you to not use
-@code{LOCK TABLES} at all.
-
-In @strong{MySQL} Version 3.23.7 and above, you can insert rows into
-@code{MyISAM} tables at the same time other threads are reading from the
-table. Note that currently this only works if there are no holes after
-deleted rows in the table at the time the insert is made. When all holes
-has been filled with new data, concurrent inserts will automatically be
-enabled again.
-
-Table locking enables many threads to read from a table at the same
-time, but if a thread wants to write to a table, it must first get
-exclusive access. During the update, all other threads that want to
-access this particular table will wait until the update is ready.
-
-As updates on tables normally are considered to be more important than
-@code{SELECT}, all statements that update a table have higher priority
-than statements that retrieve information from a table. This should
-ensure that updates are not 'starved' because one issues a lot of heavy
-queries against a specific table. (You can change this by using
-LOW_PRIORITY with the statement that does the update or
-@code{HIGH_PRIORITY} with the @code{SELECT} statement.)
-
-Starting from @strong{MySQL} Version 3.23.7 one can use the
-@code{max_write_lock_count} variable to force @strong{MySQL} to
-temporary give all @code{SELECT} statements, that wait for a table, a
-higher priority after a specific number of inserts on a table.
-
-Table locking is, however, not very good under the following senario:
-
-@itemize @bullet
-@item
-A client issues a @code{SELECT} that takes a long time to run.
-@item
-Another client then issues an @code{UPDATE} on a used table. This client
-will wait until the @code{SELECT} is finished.
-@item
-Another client issues another @code{SELECT} statement on the same table. As
-@code{UPDATE} has higher priority than @code{SELECT}, this @code{SELECT}
-will wait for the @code{UPDATE} to finish. It will also wait for the first
-@code{SELECT} to finish!
-@item
-A thread is waiting for something like @code{full disk}, in which case all
-threads that wants to access the problem table will also be put in a waiting
-state until more disk space is made available.
-@end itemize
-
-Some possible solutions to this problem are:
-
-@itemize @bullet
-@item
-Try to get the @code{SELECT} statements to run faster. You may have to create
-some summary tables to do this.
-
-@item
-Start @code{mysqld} with @code{--low-priority-updates}. This will give
-all statements that update (modify) a table lower priority than a @code{SELECT}
-statement. In this case the last @code{SELECT} statement in the previous
-scenario would execute before the @code{INSERT} statement.
-
-@item
-You can give a specific @code{INSERT}, @code{UPDATE}, or @code{DELETE}
-statement lower priority with the @code{LOW_PRIORITY} attribute.
-
-@item
-Start @code{mysqld} with a low value for @strong{max_write_lock_count} to give
-@code{READ} locks after a certain number of @code{WRITE} locks.
-
-@item
-You can specify that all updates from a specific thread should be done with
-low priority by using the SQL command: @code{SET SQL_LOW_PRIORITY_UPDATES=1}.
-@xref{SET OPTION, , @code{SET OPTION}}.
-
-@item
-You can specify that a specific @code{SELECT} is very important with the
-@code{HIGH_PRIORITY} attribute. @xref{SELECT, , @code{SELECT}}.
-
-@item
-If you have problems with @code{INSERT} combined with @code{SELECT},
-switch to use the new @code{MyISAM} tables as these support concurrent
-@code{SELECT}s and @code{INSERT}s.
-
-@item
-If you mainly mix @code{INSERT} and @code{SELECT} statements, the
-@code{DELAYED} attribute to @code{INSERT} will probably solve your problems.
-@xref{INSERT, , @code{INSERT}}.
-
-@item
-If you have problems with @code{SELECT} and @code{DELETE}, the @code{LIMIT}
-option to @code{DELETE} may help. @xref{DELETE, , @code{DELETE}}.
-@end itemize
-
-@cindex DNS
-@cindex hostname caching
-@node DNS, , Table locking, System
-@subsection How MySQL uses DNS
-
-When a new thread connects to @code{mysqld}, @code{mysqld} will span a
-new thread to handle the request. This thread will first check if the
-hostname is in the hostname cache. If not the thread will call
-@code{gethostbyaddr_r()} and @code{gethostbyname_r()} to resolve the
-hostname.
-
-If the operating system doesn't support the above thread-safe calls, the
-thread will lock a mutex and call @code{gethostbyaddr()} and
-@code{gethostbyname()} instead. Note that in this case no other thread
-can resolve other hostnames that is not in the hostname cache until the
-first thread is ready.
-
-You can disable DNS host lookup by starting @code{mysqld} with
-@code{--skip-name-resolve}. In this case you can however only use IP
-names in the @strong{MySQL} privilege tables.
-
-If you have a very slow DNS and many hosts, you can get more performance by
-either disabling DNS lookop with @code{--skip-name-resolve} or by
-increasing the @code{HOST_CACHE_SIZE} define (default: 128) and recompile
-@code{mysqld}.
-
-You can disable the hostname cache with @code{--skip-host-cache}. You
-can clear the hostname cache with @code{FLUSH HOSTS} or @code{mysqladmin
-flush-hosts}.
-
-If you don't want to allow connections over @code{TCP/IP}, you can do this
-by starting @code{mysqld} with @code{--skip-networking}.
-
-@cindex data, size
-@cindex reducing, data size
-@cindex storage space, minimizing
-@cindex tables, improving performance
-@cindex performance, improving
-@node Data size, MySQL indexes, System, Performance
-@section Get Your Data as Small as Possible
-
-One of the most basic optimization is to get your data (and indexes) to
-take as little space on the disk (and in memory) as possible. This can
-give huge improvements because disk reads are faster and normally less
-main memory will be used. Indexing also takes less resources if
-done on smaller columns.
-
-@strong{MySQL} supports a lot of different table types and row formats.
-Choosing the right table format may give you a big performance gain.
-@xref{Table types}.
-
-You can get better performance on a table and minimize storage space
-using the techniques listed below:
-
-@itemize @bullet
-@item
-Use the most efficient (smallest) types possible. @strong{MySQL} has
-many specialized types that save disk space and memory.
-@item
-Use the smaller integer types if possible to get smaller tables. For
-example, @code{MEDIUMINT} is often better than @code{INT}.
-@item
-Declare columns to be @code{NOT NULL} if possible. It makes everything
-faster and you save one bit per column. Note that if you really need
-@code{NULL} in your application you should definitely use it. Just avoid
-having it on all columns by default.
-@item
-If you don't have any variable-length columns (@code{VARCHAR},
-@code{TEXT}, or @code{BLOB} columns), a fixed-size record format is
-used. This is faster but unfortunately may waste some space.
-@xref{MyISAM table formats}.
-@item
-The primary index of a table should be as short as possible. This makes
-identification of one row easy and efficient.
-@item
-For each table, you have to decide which storage/index method to
-use. @xref{Table types}.
-@item
-Only create the indexes that you really need. Indexes are good for
-retrieval but bad when you need to store things fast. If you mostly
-access a table by searching on a combination of columns, make an index
-on them. The first index part should be the most used column. If you are
-ALWAYS using many columns, you should use the column with more duplicates
-first to get better compression of the index.
-@item
-If it's very likely that a column has a unique prefix on the first number
-of characters, it's better to only index this prefix. @strong{MySQL}
-supports an index on a part of a character column. Shorter indexes are
-faster not only because they take less disk space but also because they
-will give you more hits in the index cache and thus fewer disk
-seeks. @xref{Server parameters}.
-@item
-In some circumstances it can be beneficial to split into two a table that is
-scanned very often. This is especially true if it is a dynamic
-format table and it is possible to use a smaller static format table that
-can be used to find the relevant rows when scanning the table.
-@end itemize
-
-@cindex indexes, uses for
-@node MySQL indexes, Query Speed, Data size, Performance
-@section How MySQL Uses Indexes
-
-Indexes are used to find rows with a specific value of one column
-fast. Without an index @strong{MySQL} has to start with the first record
-and then read through the whole table until it finds the relevant
-rows. The bigger the table, the more this costs. If the table has an index
-for the columns in question, @strong{MySQL} can quickly get a position to
-seek to in the middle of the data file without having to look at all the
-data. If a table has 1000 rows, this is at least 100 times faster than
-reading sequentially. Note that if you need to access almost all 1000
-rows it is faster to read sequentially because we then avoid disk seeks.
-
-All @strong{MySQL} indexes (@code{PRIMARY}, @code{UNIQUE}, and
-@code{INDEX}) are stored in B-trees. Strings are automatically prefix-
-and end-space compressed. @xref{CREATE INDEX, , @code{CREATE INDEX}}.
-
-Indexes are used to:
-@itemize @bullet
-@item
-Quickly find the rows that match a @code{WHERE} clause.
-
-@item
-Retrieve rows from other tables when performing joins.
-
-@item
-Find the @code{MAX()} or @code{MIN()} value for a specific indexed
-column. This is optimized by a preprocessor that checks if you are
-using @code{WHERE} key_part_# = constant on all key parts < N. In this case
-@strong{MySQL} will do a single key lookup and replace the @code{MIN()}
-expression with a constant. If all expressions are replaced with
-constants, the query will return at once:
-
-@example
-SELECT MIN(key_part2),MAX(key_part2) FROM table_name where key_part1=10
-@end example
-
-@item
-Sort or group a table if the sorting or grouping is done on a leftmost
-prefix of a usable key (for example, @code{ORDER BY key_part_1,key_part_2 }). The
-key is read in reverse order if all key parts are followed by @code{DESC}.
-
-The index can also be used even if the @code{ORDER BY} doesn't match the index
-exactly, as long as all the unused index parts and all the extra
-are @code{ORDER BY} columns are constants in the @code{WHERE} clause. The
-following queries will use the index to resolve the @code{ORDER BY} part:
-
-@example
-SELECT * FROM foo ORDER BY key_part1,key_part2,key_part3;
-SELECT * FROM foo WHERE column=constant ORDER BY column, key_part1;
-SELECT * FROM foo WHERE key_part1=const GROUP BY key_part2;
-@end example
-
-@item
-In some cases a query can be optimized to retrieve values without
-consulting the data file. If all used columns for some table are numeric
-and form a leftmost prefix for some key, the values may be retrieved
-from the index tree for greater speed:
-
-@example
-SELECT key_part3 FROM table_name WHERE key_part1=1
-@end example
-
-@end itemize
-
-Suppose you issue the following @code{SELECT} statement:
-
-@example
-mysql> SELECT * FROM tbl_name WHERE col1=val1 AND col2=val2;
-@end example
-
-If a multiple-column index exists on @code{col1} and @code{col2}, the
-appropriate rows can be fetched directly. If separate single-column
-indexes exist on @code{col1} and @code{col2}, the optimizer tries to
-find the most restrictive index by deciding which index will find fewer
-rows and using that index to fetch the rows.
-
-@cindex indexes, leftmost prefix of
-@cindex leftmost prefix of indexes
-If the table has a multiple-column index, any leftmost prefix of the
-index can be used by the optimizer to find rows. For example, if you
-have a three-column index on @code{(col1,col2,col3)}, you have indexed
-search capabilities on @code{(col1)}, @code{(col1,col2)}, and
-@code{(col1,col2,col3)}.
-
-@strong{MySQL} can't use a partial index if the columns don't form a
-leftmost prefix of the index. Suppose you have the @code{SELECT}
-statements shown below:
-
-@example
-mysql> SELECT * FROM tbl_name WHERE col1=val1;
-mysql> SELECT * FROM tbl_name WHERE col2=val2;
-mysql> SELECT * FROM tbl_name WHERE col2=val2 AND col3=val3;
-@end example
-
-If an index exists on @code{(col1,col2,col3)}, only the first query
-shown above uses the index. The second and third queries do involve
-indexed columns, but @code{(col2)} and @code{(col2,col3)} are not
-leftmost prefixes of @code{(col1,col2,col3)}.
-
-@findex LIKE, and indexes
-@findex LIKE, and wildcards
-@cindex indexes, and @code{LIKE}
-@cindex wildcards, and @code{LIKE}
-@strong{MySQL} also uses indexes for @code{LIKE} comparisons if the argument
-to @code{LIKE} is a constant string that doesn't start with a wild-card
-character. For example, the following @code{SELECT} statements use indexes:
-
-@example
-mysql> select * from tbl_name where key_col LIKE "Patrick%";
-mysql> select * from tbl_name where key_col LIKE "Pat%_ck%";
-@end example
-
-In the first statement, only rows with @code{"Patrick" <= key_col <
-"Patricl"} are considered. In the second statement, only rows with
-@code{"Pat" <= key_col < "Pau"} are considered.
-
-The following @code{SELECT} statements will not use indexes:
-@example
-mysql> select * from tbl_name where key_col LIKE "%Patrick%";
-mysql> select * from tbl_name where key_col LIKE other_col;
-@end example
-
-In the first statement, the @code{LIKE} value begins with a wild-card
-character. In the second statement, the @code{LIKE} value is not a
-constant.
-
-@findex IS NULL, and indexes
-@cindex indexes, and @code{IS NULL}
-Searching using @code{column_name IS NULL} will use indexes if column_name
-is an index.
-
-@strong{MySQL} normally uses the index that finds the least number of rows. An
-index is used for columns that you compare with the following operators:
-@code{=}, @code{>}, @code{>=}, @code{<}, @code{<=}, @code{BETWEEN}, and a
-@code{LIKE} with a non-wild-card prefix like @code{'something%'}.
-
-Any index that doesn't span all @code{AND} levels in the @code{WHERE} clause
-is not used to optimize the query. In other words: To be able to use an
-index, a prefix of the index must be used in every @code{AND} group.
-
-The following @code{WHERE} clauses use indexes:
-@example
-... WHERE index_part1=1 AND index_part2=2 AND other_column=3
-... WHERE index=1 OR A=10 AND index=2 /* index = 1 OR index = 2 */
-... WHERE index_part1='hello' AND index_part_3=5
- /* optimized like "index_part1='hello'" */
-... WHERE index1=1 and index2=2 or index1=3 and index3=3;
- /* Can use index on index1 but not on index2 or index 3 */
-@end example
-
-These @code{WHERE} clauses do @strong{NOT} use indexes:
-@example
-... WHERE index_part2=1 AND index_part3=2 /* index_part_1 is not used */
-... WHERE index=1 OR A=10 /* Index is not used in both AND parts */
-... WHERE index_part1=1 OR index_part2=10 /* No index spans all rows */
-@end example
-
-Note that in some cases @strong{MySQL} will not use an index, even if one
-would be available. Some of the cases where this happens are:
-
-@itemize @bullet
-@item
-If the use of the index would require @strong{MySQL} to access more
-than 30 % of the rows in the table. (In this case a table scan is
-probably much faster, as this will require us to do much fewer seeks).
-Note that if such a query uses @code{LIMIT} to only retrieve
-part of the rows, @strong{MySQL} will use an index anyway, as it can
-much more quickly find the few rows to return in the result.
-@end itemize
-
-@cindex queries, speed of
-@cindex permission checks, effect on speed
-@cindex speed, of queries
-@node Query Speed, Tips, MySQL indexes, Performance
-@section Speed of Queries that Access or Update Data
-
-First, one thing that affects all queries: The more complex permission
-system setup you have, the more overhead you get.
-
-If you do not have any @code{GRANT} statements done, @strong{MySQL} will
-optimize the permission checking somewhat. So if you have a very high
-volume it may be worth the time to avoid grants. Otherwise more
-permission check results in a larger overhead.
-
-If your problem is with some explicit @strong{MySQL} function, you can
-always time this in the @strong{MySQL} client:
-
-@example
-mysql> select benchmark(1000000,1+1);
-+------------------------+
-| benchmark(1000000,1+1) |
-+------------------------+
-| 0 |
-+------------------------+
-1 row in set (0.32 sec)
-@end example
-
-The above shows that @strong{MySQL} can execute 1,000,000 @code{+}
-expressions in 0.32 seconds on a @code{PentiumII 400MHz}.
-
-All @strong{MySQL} functions should be very optimized, but there may be
-some exceptions, and the @code{benchmark(loop_count,expression)} is a
-great tool to find out if this is a problem with your query.
-
-@menu
-* Estimating performance:: Estimating query performance
-* SELECT speed:: Speed of @code{SELECT} queries
-* Where optimizations:: How MySQL optimizes @code{WHERE} clauses
-* DISTINCT optimization:: How MySQL Optimizes @code{DISTINCT}
-* LEFT JOIN optimization:: How MySQL optimizes @code{LEFT JOIN}
-* LIMIT optimization:: How MySQL optimizes @code{LIMIT}
-* Insert speed:: Speed of @code{INSERT} queries
-* Update speed:: Speed of @code{UPDATE} queries
-* Delete speed:: Speed of @code{DELETE} queries
-@end menu
-
-@cindex estimating, query performance
-@cindex queries, estimating performance
-@cindex performance, estimating
-@node Estimating performance, SELECT speed, Query Speed, Query Speed
-@subsection Estimating Query Performance
-
-In most cases you can estimate the performance by counting disk seeks.
-For small tables, you can usually find the row in 1 disk seek (as the
-index is probably cached). For bigger tables, you can estimate that
-(using B++ tree indexes) you will need: @code{log(row_count) /
-log(index_block_length / 3 * 2 / (index_length + data_pointer_length)) +
-1} seeks to find a row.
-
-In @strong{MySQL} an index block is usually 1024 bytes and the data
-pointer is usually 4 bytes. A 500,000 row table with an
-index length of 3 (medium integer) gives you:
-@code{log(500,000)/log(1024/3*2/(3+4)) + 1} = 4 seeks.
-
-As the above index would require about 500,000 * 7 * 3/2 = 5.2M,
-(assuming that the index buffers are filled to 2/3, which is typical)
-you will probably have much of the index in memory and you will probably
-only need 1-2 calls to read data from the OS to find the row.
-
-For writes, however, you will need 4 seek requests (as above) to find
-where to place the new index and normally 2 seeks to update the index
-and write the row.
-
-Note that the above doesn't mean that your application will slowly
-degenerate by N log N! As long as everything is cached by the OS or SQL
-server things will only go marginally slower while the table gets
-bigger. After the data gets too big to be cached, things will start to
-go much slower until your applications is only bound by disk-seeks
-(which increase by N log N). To avoid this, increase the index cache as
-the data grows. @xref{Server parameters}.
-
-@cindex speed, of queries
-@findex SELECT speed
-
-@node SELECT speed, Where optimizations, Estimating performance, Query Speed
-@subsection Speed of @code{SELECT} Queries
-
-In general, when you want to make a slow @code{SELECT ... WHERE} faster, the
-first thing to check is whether or not you can add an index. @xref{MySQL
-indexes, , @strong{MySQL} indexes}. All references between different tables
-should usually be done with indexes. You can use the @code{EXPLAIN} command
-to determine which indexes are used for a @code{SELECT}.
-@xref{EXPLAIN, , @code{EXPLAIN}}.
-
-Some general tips:
-
-@itemize @bullet
-@item
-To help @strong{MySQL} optimize queries better, run @code{myisamchk
---analyze} on a table after it has been loaded with relevant data. This
-updates a value for each index part that indicates the average number of
-rows that have the same value. (For unique indexes, this is always 1,
-of course.). @strong{MySQL} will use this to decide which index to
-choose when you connect two tables with 'a non-constant expression'.
-You can check the result from the @code{analyze} run by doing @code{SHOW
-INDEX FROM table_name} and examining the @code{Cardinality} column.
-
-@item
-To sort an index and data according to an index, use @code{myisamchk
---sort-index --sort-records=1} (if you want to sort on index 1). If you
-have a unique index from which you want to read all records in order
-according to that index, this is a good way to make that faster. Note,
-however, that this sorting isn't written optimally and will take a long
-time for a large table!
-@end itemize
-
-@cindex optimizations
-@findex WHERE
-@node Where optimizations, DISTINCT optimization, SELECT speed, Query Speed
-@subsection How MySQL Optimizes @code{WHERE} Clauses
-
-The @code{WHERE} optimizations are put in the @code{SELECT} part here because
-they are mostly used with @code{SELECT}, but the same optimizations apply for
-@code{WHERE} in @code{DELETE} and @code{UPDATE} statements.
-
-Also note that this section is incomplete. @strong{MySQL} does many
-optimizations, and we have not had time to document them all.
-
-Some of the optimizations performed by @strong{MySQL} are listed below:
-
-@itemize @bullet
-@item
-Removal of unnecessary parentheses:
-@example
- ((a AND b) AND c OR (((a AND b) AND (c AND d))))
--> (a AND b AND c) OR (a AND b AND c AND d)
-@end example
-@item
-Constant folding:
-@example
- (a<b AND b=c) AND a=5
--> b>5 AND b=c AND a=5
-@end example
-@item
-Constant condition removal (needed because of constant folding):
-@example
- (B>=5 AND B=5) OR (B=6 AND 5=5) OR (B=7 AND 5=6)
--> B=5 OR B=6
-@end example
-@item
-Constant expressions used by indexes are evaluated only once.
-@item
-@code{COUNT(*)} on a single table without a @code{WHERE} is retrieved
-directly from the table information. This is also done for any @code{NOT NULL}
-expression when used with only one table.
-@item
-Early detection of invalid constant expressions. @strong{MySQL} quickly
-detects that some @code{SELECT} statements are impossible and returns no rows.
-@item
-@code{HAVING} is merged with @code{WHERE} if you don't use @code{GROUP BY}
-or group functions (@code{COUNT()}, @code{MIN()}...).
-@item
-For each sub-join, a simpler @code{WHERE} is constructed to get a fast
-@code{WHERE} evaluation for each sub-join and also to skip records as
-soon as possible.
-@cindex constant table
-@cindex tables, constant
-@item
-All constant tables are read first, before any other tables in the query.
-A constant table is:
-@itemize @minus
-@item
-An empty table or a table with 1 row.
-@item
-A table that is used with a @code{WHERE} clause on a @code{UNIQUE}
-index, or a @code{PRIMARY KEY}, where all index parts are used with constant
-expressions and the index parts are defined as @code{NOT NULL}.
-@end itemize
-All the following tables are used as constant tables:
-@example
-mysql> SELECT * FROM t WHERE primary_key=1;
-mysql> SELECT * FROM t1,t2
- WHERE t1.primary_key=1 AND t2.primary_key=t1.id;
-@end example
-
-@item
-The best join combination to join the tables is found by trying all
-possibilities. If all columns in @code{ORDER BY} and in @code{GROUP
-BY} come from the same table, then this table is preferred first when
-joining.
-@item
-If there is an @code{ORDER BY} clause and a different @code{GROUP BY}
-clause, or if the @code{ORDER BY} or @code{GROUP BY} contains columns
-from tables other than the first table in the join queue, a temporary
-table is created.
-@item
-If you use @code{SQL_SMALL_RESULT}, @strong{MySQL} will use an in-memory
-temporary table.
-@item
-Each table index is queried, and the best index that spans fewer than 30% of
-the rows is used. If no such index can be found, a quick table scan is used.
-@item
-In some cases, @strong{MySQL} can read rows from the index without even
-consulting the data file. If all columns used from the index are numeric,
-then only the index tree is used to resolve the query.
-@item
-Before each record is output, those that do not match the @code{HAVING} clause
-are skipped.
-@end itemize
-
-Some examples of queries that are very fast:
-
-@example
-mysql> SELECT COUNT(*) FROM tbl_name;
-mysql> SELECT MIN(key_part1),MAX(key_part1) FROM tbl_name;
-mysql> SELECT MAX(key_part2) FROM tbl_name
- WHERE key_part_1=constant;
-mysql> SELECT ... FROM tbl_name
- ORDER BY key_part1,key_part2,... LIMIT 10;
-mysql> SELECT ... FROM tbl_name
- ORDER BY key_part1 DESC,key_part2 DESC,... LIMIT 10;
-@end example
-
-The following queries are resolved using only the index tree (assuming
-the indexed columns are numeric):
-
-@example
-mysql> SELECT key_part1,key_part2 FROM tbl_name WHERE key_part1=val;
-mysql> SELECT COUNT(*) FROM tbl_name
- WHERE key_part1=val1 AND key_part2=val2;
-mysql> SELECT key_part2 FROM tbl_name GROUP BY key_part1;
-@end example
-
-The following queries use indexing to retrieve the rows in sorted
-order without a separate sorting pass:
-
-@example
-mysql> SELECT ... FROM tbl_name ORDER BY key_part1,key_part2,... ;
-mysql> SELECT ... FROM tbl_name ORDER BY key_part1 DESC,key_part2 DESC,... ;
-@end example
-
-@findex DISTINCT
-@cindex optimizing, DISTINCT
-@node DISTINCT optimization, LEFT JOIN optimization, Where optimizations, Query Speed
-@subsection How MySQL Optimizes @code{DISTINCT}
-
-@code{DISTINCT} is converted to a @code{GROUP BY} on all columns,
-@code{DISTINCT} combined with @code{ORDER BY} will in many cases also
-need a temporary table.
-
-When combining @code{LIMIT #} with @code{DISTINCT}, @strong{MySQL} will stop
-as soon as it finds @code{#} unique rows.
-
-If you don't use columns from all used tables, @strong{MySQL} will stop
-the scanning of the not used tables as soon as it has found the first match.
-
-@example
-SELECT DISTINCT t1.a FROM t1,t2 where t1.a=t2.a;
-@end example
-
-In the case, assuming t1 is used before t2 (check with @code{EXPLAIN}), then
-@strong{MySQL} will stop reading from t2 (for that particular row in t1)
-when the first row in t2 is found.
-
-@findex LEFT JOIN
-@cindex optimizing, LEFT JOIN
-@node LEFT JOIN optimization, LIMIT optimization, DISTINCT optimization, Query Speed
-@subsection How MySQL Optimizes @code{LEFT JOIN} and @code{RIGHT JOIN}
-
-@code{A LEFT JOIN B} in @strong{MySQL} is implemented as follows:
-
-@itemize @bullet
-@item
-The table @code{B} is set to be dependent on table @code{A} and all tables
-that @code{A} is dependent on.
-@item
-The table @code{A} is set to be dependent on all tables (except @code{B})
-that are used in the @code{LEFT JOIN} condition.
-@item
-All @code{LEFT JOIN} conditions are moved to the @code{WHERE} clause.
-@item
-All standard join optimizations are done, with the exception that a table is
-always read after all tables it is dependent on. If there is a circular
-dependence then @strong{MySQL} will issue an error.
-@item
-All standard @code{WHERE} optimizations are done.
-@item
-If there is a row in @code{A} that matches the @code{WHERE} clause, but there
-wasn't any row in @code{B} that matched the @code{LEFT JOIN} condition,
-then an extra @code{B} row is generated with all columns set to @code{NULL}.
-@item
-If you use @code{LEFT JOIN} to find rows that don't exist in some
-table and you have the following test: @code{column_name IS NULL} in the
-@code{WHERE} part, where column_name is a column that is declared as
-@code{NOT NULL}, then @strong{MySQL} will stop searching after more rows
-(for a particular key combination) after it has found one row that
-matches the @code{LEFT JOIN} condition.
-@end itemize
-
-@code{RIGHT JOIN} is implemented analogously as @code{LEFT JOIN}.
-
-The table read order forced by @code{LEFT JOIN} and @code{STRAIGHT JOIN}
-will help the join optimizer (which calculates in which order tables
-should be joined) to do its work much more quickly, as there are fewer
-table permutations to check.
-
-Note that the above means that if you do a query of type:
-
-@example
-SELECT * FROM a,b LEFT JOIN c ON (c.key=a.key) LEFT JOIN d (d.key=a.key) WHERE b.key=d.key
-@end example
-
-@strong{MySQL} will do a full scan on @code{b} as the @code{LEFT
-JOIN} will force it to be read before @code{d}.
-
-The fix in this case is to change the query to:
-
-@example
-SELECT * FROM b,a LEFT JOIN c ON (c.key=a.key) LEFT JOIN d (d.key=a.key) WHERE b.key=d.key
-@end example
-
-@cindex optimizing, LIMIT
-@findex LIMIT
-@node LIMIT optimization, Insert speed, LEFT JOIN optimization, Query Speed
-@subsection How MySQL Optimizes @code{LIMIT}
-
-In some cases @strong{MySQL} will handle the query differently when you are
-using @code{LIMIT #} and not using @code{HAVING}:
-
-@itemize @bullet
-@item
-If you are selecting only a few rows with @code{LIMIT}, @strong{MySQL}
-will use indexes in some cases when it normally would prefer to do a
-full table scan.
-@item
-If you use @code{LIMIT #} with @code{ORDER BY}, @strong{MySQL} will end the
-sorting as soon as it has found the first @code{#} lines instead of sorting
-the whole table.
-@item
-When combining @code{LIMIT #} with @code{DISTINCT}, @strong{MySQL} will stop
-as soon as it finds @code{#} unique rows.
-@item
-In some cases a @code{GROUP BY} can be resolved by reading the key in order
-(or do a sort on the key) and then calculate summaries until the
-key value changes. In this case @code{LIMIT #} will not calculate any
-unnecessary @code{GROUP BY}'s.
-@item
-As soon as @strong{MySQL} has sent the first @code{#} rows to the client, it
-will abort the query.
-@item
-@code{LIMIT 0} will always quickly return an empty set. This is useful
-to check the query and to get the column types of the result columns.
-@item
-The size of temporary tables uses the @code{LIMIT #} to calculate how much
-space is needed to resolve the query.
-@end itemize
-
-@cindex speed, inserting
-@cindex inserting, speed of
-@node Insert speed, Update speed, LIMIT optimization, Query Speed
-@subsection Speed of @code{INSERT} Queries
-
-The time to insert a record consists approximately of:
-
-@itemize @bullet
-@item
-Connect: (3)
-@item
-Sending query to server: (2)
-@item
-Parsing query: (2)
-@item
-Inserting record: (1 x size of record)
-@item
-Inserting indexes: (1 x number of indexes)
-@item
-Close: (1)
-@end itemize
-
-where the numbers are somewhat proportional to the overall time. This
-does not take into consideration the initial overhead to open tables
-(which is done once for each concurrently running query).
-
-The size of the table slows down the insertion of indexes by N log N
-(B-trees).
-
-Some ways to speed up inserts:
-
-@itemize @bullet
-@item
-If you are inserting many rows from the same client at the same time, use
-multiple value lists @code{INSERT} statements. This is much faster (many
-times in some cases) than using separate @code{INSERT} statements.
-@item
-If you are inserting a lot of rows from different clients, you can get
-higher speed by using the @code{INSERT DELAYED} statement. @xref{INSERT,
-, @code{INSERT}}.
-@item
-Note that with @code{MyISAM} you can insert rows at the same time
-@code{SELECT}s are running if there are no deleted rows in the tables.
-@item
-When loading a table from a text file, use @code{LOAD DATA INFILE}. This
-is usually 20 times faster than using a lot of @code{INSERT} statements.
-@xref{LOAD DATA, , @code{LOAD DATA}}.
-@item
-It is possible with some extra work to make @code{LOAD DATA INFILE} run even
-faster when the table has many indexes. Use the following procedure:
-
-@enumerate
-@item
-Optionally create the table with @code{CREATE TABLE}. For example, using
-@code{mysql} or Perl-DBI.
-
-@item
-Execute a @code{FLUSH TABLES} statement or the shell command @code{mysqladmin
-flush-tables}.
-
-@item
-Use @code{myisamchk --keys-used=0 -rq /path/to/db/tbl_name}. This will
-remove all usage of all indexes from the table.
-
-@item
-Insert data into the table with @code{LOAD DATA INFILE}. This will not
-update any indexes and will therefore be very fast.
-
-@item
-If you are going to only read the table in the future, run @code{myisampack}
-on it to make it smaller. @xref{Compressed format}.
-
-@item
-Re-create the indexes with @code{myisamchk -r -q
-/path/to/db/tbl_name}. This will create the index tree in memory before
-writing it to disk, which is much faster because it avoids lots of disk
-seeks. The resulting index tree is also perfectly balanced.
-
-@item
-Execute a @code{FLUSH TABLES} statement or the shell command @code{mysqladmin
-flush-tables}.
-@end enumerate
-
-This procedure will be built into @code{LOAD DATA INFILE} in some future
-version of @strong{MySQL}.
-@item
-You can speed up insertions by locking your tables:
-
-@example
-mysql> LOCK TABLES a WRITE;
-mysql> INSERT INTO a VALUES (1,23),(2,34),(4,33);
-mysql> INSERT INTO a VALUES (8,26),(6,29);
-mysql> UNLOCK TABLES;
-@end example
-
-The main speed difference is that the index buffer is flushed to disk only
-once, after all @code{INSERT} statements have completed. Normally there would
-be as many index buffer flushes as there are different @code{INSERT}
-statements. Locking is not needed if you can insert all rows with a single
-statement.
-
-Locking will also lower the total time of multi-connection tests, but the
-maximum wait time for some threads will go up (because they wait for
-locks). For example:
-
-@example
-thread 1 does 1000 inserts
-thread 2, 3, and 4 does 1 insert
-thread 5 does 1000 inserts
-@end example
-
-If you don't use locking, 2, 3, and 4 will finish before 1 and 5. If you
-use locking, 2, 3, and 4 probably will not finish before 1 or 5, but the
-total time should be about 40% faster.
-
-As @code{INSERT}, @code{UPDATE}, and @code{DELETE} operations are very
-fast in @strong{MySQL}, you will obtain better overall performance by
-adding locks around everything that does more than about 5 inserts or
-updates in a row. If you do very many inserts in a row, you could do a
-@code{LOCK TABLES} followed by an @code{UNLOCK TABLES} once in a while
-(about each 1000 rows) to allow other threads access to the table. This
-would still result in a nice performance gain.
-
-Of course, @code{LOAD DATA INFILE} is much faster for loading data.
-@end itemize
-
-To get some more speed for both @code{LOAD DATA INFILE} and
-@code{INSERT}, enlarge the key buffer. @xref{Server parameters}.
-
-@node Update speed, Delete speed, Insert speed, Query Speed
-@subsection Speed of @code{UPDATE} Queries
-
-Update queries are optimized as a @code{SELECT} query with the additional
-overhead of a write. The speed of the write is dependent on the size of
-the data that is being updated and the number of indexes that are
-updated. Indexes that are not changed will not be updated.
-
-Also, another way to get fast updates is to delay updates and then do
-many updates in a row later. Doing many updates in a row is much quicker
-than doing one at a time if you lock the table.
-
-Note that, with dynamic record format, updating a record to
-a longer total length may split the record. So if you do this often,
-it is very important to @code{OPTIMIZE TABLE} sometimes.
-@xref{OPTIMIZE TABLE, , @code{OPTIMIZE TABLE}}.
-
-@node Delete speed, , Update speed, Query Speed
-@subsection Speed of @code{DELETE} Queries
-
-If you want to delete all rows in the table, you should use
-@code{TRUNCATE TABLE table_name}. @xref{TRUNCATE}.
-
-The time to delete a record is exactly proportional to the number of
-indexes. To delete records more quickly, you can increase the size of
-the index cache. @xref{Server parameters}.
-
-@cindex optimization, tips
-@cindex tips, optimization
-@node Tips, Benchmarks, Query Speed, Performance
-@section Other Optimization Tips
-
-Unsorted tips for faster systems:
-
-@itemize @bullet
-@item
-Use persistent connections to the database to avoid the connection
-overhead. If you can't use persistent connections and you are doing a
-lot of new connections to the database, you may want to change the value
-of the @code{thread_cache_size} variable. @xref{Server parameters}.
-@item
-Always check that all your queries really use the indexes you have created
-in the tables. In @strong{MySQL} you can do this with the @code{EXPLAIN}
-command. @xref{EXPLAIN, Explain, Explain, manual}.
-@item
-Try to avoid complex @code{SELECT} queries on tables that are updated a
-lot. This is to avoid problems with table locking.
-@item
-The new @code{MyISAM} tables can insert rows in a table without deleted
-rows at the same time another table is reading from it. If this is important
-for you, you should consider methods where you don't have to delete rows
-or run @code{OPTIMIZE TABLE} after you have deleted a lot of rows.
-@item
-Use @code{ALTER TABLE ... ORDER BY expr1,expr2...} if you mostly
-retrieve rows in expr1,expr2.. order. By using this option after big
-changes to the table, you may be able to get higher performance.
-@item
-In some cases it may make sense to introduce a column that is 'hashed'
-based on information from other columns. If this column is short and
-reasonably unique it may be much faster than a big index on many
-columns. In @strong{MySQL} it's very easy to use this extra column:
-@code{SELECT * FROM table_name WHERE hash=MD5(concat(col1,col2))
-AND col_1='constant' AND col_2='constant'}
-@item
-For tables that change a lot you should try to avoid all @code{VARCHAR}
-or @code{BLOB} columns. You will get dynamic row length as soon as you
-are using a single @code{VARCHAR} or @code{BLOB} column. @xref{Table
-types}.
-@item
-It's not normally useful to split a table into different tables just
-because the rows gets 'big'. To access a row, the biggest performance
-hit is the disk seek to find the first byte of the row. After finding
-the data most new disks can read the whole row fast enough for most
-applications. The only cases where it really matters to split up a table is if
-it's a dynamic row size table (see above) that you can change to a fixed
-row size, or if you very often need to scan the table and don't need
-most of the columns. @xref{Table types}.
-@item
-If you very often need to calculate things based on information from a
-lot of rows (like counts of things), it's probably much better to
-introduce a new table and update the counter in real time. An update of
-type @code{UPDATE table set count=count+1 where index_column=constant}
-is very fast!
-
-This is really important when you use databases like @strong{MySQL} that
-only have table locking (multiple readers / single writers). This will
-also give better performance with most databases, as the row locking
-manager in this case will have less to do.
-@item
-If you need to collect statistics from big log tables, use summary tables
-instead of scanning the whole table. Maintaining the summaries should be
-much faster than trying to do statistics 'live'. It's much faster to
-regenerate new summary tables from the logs when things change
-(depending on business decisions) than to have to change the running
-application!
-@item
-If possible, one should classify reports as 'live' or 'statistical',
-where data needed for statistical reports are only generated based on
-summary tables that are generated from the actual data.
-@item
-Take advantage of the fact that columns have default values. Insert
-values explicitly only when the value to be inserted differs from the
-default. This reduces the parsing that @strong{MySQL} need to do and
-improves the insert speed.
-@item
-In some cases it's convenient to pack and store data into a blob. In this
-case you have to add some extra code in your application to pack/unpack
-things in the blob, but this may save a lot of accesses at some stage.
-This is practical when you have data that doesn't conform to a static
-table structure.
-@item
-Normally you should try to keep all data non-redundant (what
-is called 3rd normal form in database theory), but you should not be
-afraid of duplicating things or creating summary tables if you need these
-to gain more speed.
-@item
-Stored procedures or UDF (user-defined functions) may be a good way to
-get more performance. In this case you should, however, always have a way
-to do this some other (slower) way if you use some database that doesn't
-support this.
-@item
-You can always gain something by caching queries/answers in your
-application and trying to do many inserts/updates at the same time. If
-your database supports lock tables (like @strong{MySQL} and Oracle),
-this should help to ensure that the index cache is only flushed once
-after all updates.
-@item
-Use @code{INSERT /*! DELAYED */} when you do not need to know when your
-data is written. This speeds things up because many records can be written
-with a single disk write.
-@item
-Use @code{INSERT /*! LOW_PRIORITY */} when you want your selects to be
-more important.
-@item
-Use @code{SELECT /*! HIGH_PRIORITY */} to get selects that jump the
-queue. That is, the select is done even if there is somebody waiting to
-do a write.
-@item
-Use the multi-line @code{INSERT} statement to store many rows with one
-SQL command (many SQL servers supports this).
-@item
-Use @code{LOAD DATA INFILE} to load bigger amounts of data. This is
-faster than normal inserts and will be even faster when @code{myisamchk}
-is integrated in @code{mysqld}.
-@item
-Use @code{AUTO_INCREMENT} columns to make unique values.
-@item
-Use @code{OPTIMIZE TABLE} once in a while to avoid fragmentation when
-using dynamic table format. @xref{OPTIMIZE TABLE, , @code{OPTIMIZE TABLE}}.
-
-@item
-Use @code{HEAP} tables to get more speed when possible. @xref{Table
-types}.
-@item
-When using a normal Web server setup, images should be stored as
-files. That is, store only a file reference in the database. The main
-reason for this is that a normal Web server is much better at caching
-files than database contents. So it it's much easier to get a fast
-system if you are using files.
-@item
-Use in memory tables for non-critical data that are accessed often (like
-information about the last shown banner for users that don't have
-cookies).
-@item
-Columns with identical information in different tables should be
-declared identical and have identical names. Before Version 3.23 you
-got slow joins otherwise.
-
-Try to keep the names simple (use @code{name} instead of
-@code{customer_name} in the customer table). To make your names portable
-to other SQL servers you should keep them shorter than 18 characters.
-@item
-If you need REALLY high speed, you should take a look at the low-level
-interfaces for data storage that the different SQL servers support! For
-example, by accessing the @strong{MySQL} @code{MyISAM} directly, you could
-get a speed increase of 2-5 times compared to using the SQL interface.
-To be able to do this the data must be on the same server as
-the application, and usually it should only be accessed by one process
-(because external file locking is really slow). One could eliminate the
-above problems by introducing low-level @code{MyISAM} commands in the
-@strong{MySQL} server (this could be one easy way to get more
-performance if needed). By carefully designing the database interface,
-it should be quite easy to support this types of optimization.
-@item
-In many cases it's faster to access data from a database (using a live
-connection) than accessing a text file, just because the database is
-likely to be more compact than the text file (if you are using numerical
-data), and this will involve fewer disk accesses. You will also save
-code because you don't have to parse your text files to find line and
-column boundaries.
-@item
-You can also use replication to speed things up. @xref{Replication}.
-@item
-Declaring a table with @code{DELAY_KEY_WRITE=1} will make the updating of
-indexes faster, as these are not logged to disk until the file is closed.
-The downside is that you should run @code{myisamchk} on these tables before
-you start @code{mysqld} to ensure that they are okay if something killed
-@code{mysqld} in the middle. As the key information can always be generated
-from the data, you should not lose anything by using @code{DELAY_KEY_WRITE}.
-@end itemize
-
-@cindex benchmarks
-@cindex performance, benchmarks
-@node Benchmarks, Design, Tips, Performance
-@section Using Your Own Benchmarks
-
-You should definitely benchmark your application and database to find
-out where the bottlenecks are. By fixing it (or by replacing the
-bottleneck with a 'dummy module') you can then easily identify the next
-bottleneck (and so on). Even if the overall performance for your
-application is sufficient, you should at least make a plan for each
-bottleneck, and decide how to solve it if someday you really need the
-extra performance.
-
-For an example of portable benchmark programs, look at the @strong{MySQL}
-benchmark suite. @xref{MySQL Benchmarks, , @strong{MySQL} Benchmarks}. You
-can take any program from this suite and modify it for your needs. By doing this,
-you can try different solutions to your problem and test which is really the
-fastest solution for you.
-
-It is very common that some problems only occur when the system is very
-heavily loaded. We have had many customers who contact us when they
-have a (tested) system in production and have encountered load problems. In
-every one of these cases so far, it has been problems with basic design
-(table scans are NOT good at high load) or OS/Library issues. Most of
-this would be a @strong{LOT} easier to fix if the systems were not
-already in production.
-
-To avoid problems like this, you should put some effort into benchmarking
-your whole application under the worst possible load! You can use Sasha's
-recent hack for this -
-@uref{http://www.mysql.com/Downloads/super-smack/super-smack-1.0.tar.gz,
-super-smack}.
-As the name suggests, it can bring your system down to its knees if you ask it,
-so make sure to use it only on your development systems.
-
-@cindex design, choices
-@cindex database design
-@cindex storage of data
-@node Design, Design Limitations, Benchmarks, Performance
-@section Design Choices
-
-@strong{MySQL} keeps row data and index data in separate files. Many (almost
-all) other databases mix row and index data in the same file. We believe that
-the @strong{MySQL} choice is better for a very wide range of modern systems.
-
-Another way to store the row data is to keep the information for each
-column in a separate area (examples are SDBM and Focus). This will cause a
-performance hit for every query that accesses more than one column. Because
-this degenerates so quickly when more than one column is accessed,
-we believe that this model is not good for general purpose databases.
-
-The more common case is that the index and data are stored together
-(like in Oracle/Sybase et al). In this case you will find the row
-information at the leaf page of the index. The good thing with this
-layout is that it, in many cases, depending on how well the index is
-cached, saves a disk read. The bad things with this layout are:
-
-@itemize @bullet
-@item
-Table scanning is much slower because you have to read through the indexes
-to get at the data.
-@item
-You can't use only the index table to retrieve data for a query.
-@item
-You lose a lot of space, as you must duplicate indexes from the nodes
-(as you can't store the row in the nodes).
-@item
-Deletes will degenerate the table over time (as indexes in nodes are
-usually not updated on delete).
-@item
-It's harder to cache ONLY the index data.
-@end itemize
-
-@cindex design, limitations
-@node Design Limitations, Portability, Design, Performance
-@section MySQL Design Limitations/Tradeoffs
-
-Because @strong{MySQL} uses extremely fast table locking (multiple readers /
-single writers) the biggest remaining problem is a mix of a steady stream of
-inserts and slow selects on the same table.
-
-We believe that for a huge number of systems the extremely fast
-performance in other cases make this choice a win. This case is usually
-also possible to solve by having multiple copies of the table, but it
-takes more effort and hardware.
-
-We are also working on some extensions to solve this problem for some
-common application niches.
-
-@cindex portability
-@cindex crash-me program
-@cindex programs, crash-me
-@node Portability, Internal use, Design Limitations, Performance
-@section Portability
-
-Because all SQL servers implement different parts of SQL, it takes work to
-write portable SQL applications. For very simple selects/inserts it is
-very easy, but the more you need the harder it gets. If you want an
-application that is fast with many databases it becomes even harder!
-
-To make a complex application portable you need to choose a number of
-SQL servers that it should work with.
-
-You can use the @strong{MySQL} crash-me program/web-page
-@uref{http://www.mysql.com/information/crash-me.php} to find functions,
-types, and limits you can use with a selection of database
-servers. Crash-me now tests far from everything possible, but it
-is still comprehensive with about 450 things tested.
-
-For example, you shouldn't have column names longer than 18 characters
-if you want to be able to use Informix or DB2.
-
-Both the @strong{MySQL} benchmarks and crash-me programs are very
-database-independent. By taking a look at how we have handled this, you
-can get a feeling for what you have to do to write your application
-database-independent. The benchmarks themselves can be found in the
-@file{sql-bench} directory in the @strong{MySQL} source
-distribution. They are written in Perl with DBI database interface
-(which solves the access part of the problem).
-
-See @uref{http://www.mysql.com/information/benchmarks.html} for the results
-from this benchmark.
-
-As you can see in these results, all databases have some weak points. That
-is, they have different design compromises that lead to different
-behavior.
-
-If you strive for database independence, you need to get a good feeling
-for each SQL server's bottlenecks. @strong{MySQL} is VERY fast in
-retrieving and updating things, but will have a problem in mixing slow
-readers/writers on the same table. Oracle, on the other hand, has a big
-problem when you try to access rows that you have recently updated
-(until they are flushed to disk). Transaction databases in general are
-not very good at generating summary tables from log tables, as in this
-case row locking is almost useless.
-
-To get your application @emph{really} database-independent, you need to define
-an easy extendable interface through which you manipulate your data. As
-C++ is available on most systems, it makes sense to use a C++ classes
-interface to the databases.
-
-If you use some specific feature for some database (like the
-@code{REPLACE} command in @strong{MySQL}), you should code a method for
-the other SQL servers to implement the same feature (but slower). With
-@strong{MySQL} you can use the @code{/*! */} syntax to add
-@strong{MySQL}-specific keywords to a query. The code inside
-@code{/**/} will be treated as a comment (ignored) by most other SQL
-servers.
-
-If REAL high performance is more important than exactness, as in some
-Web applications, a possibility is to create an application layer that
-caches all results to give you even higher performance. By letting
-old results 'expire' after a while, you can keep the cache reasonably
-fresh. This is quite nice in case of extremely high load, in which case
-you can dynamically increase the cache and set the expire timeout higher
-until things get back to normal.
-
-In this case the table creation information should contain information
-of the initial size of the cache and how often the table should normally
-be refreshed.
-
-@cindex uses, of MySQL
-@cindex customers, of MySQL
-@node Internal use, , Portability, Performance
-@section What Have We Used MySQL For?
-
-During @strong{MySQL} initial development, the features of @strong{MySQL} were made to fit
-our largest customer. They handle data warehousing for a couple of the
-biggest retailers in Sweden.
-
-From all stores, we get weekly summaries of all bonus card transactions,
-and we are expected to provide useful information for the store owners
-to help them find how their advertisement campaigns are affecting their
-customers.
-
-The data is quite huge (about 7 million summary transactions per month),
-and we have data for 4-10 years that we need to present to the users.
-We got weekly requests from the customers that they want to get
-'instant' access to new reports from this data.
-
-We solved this by storing all information per month in compressed
-'transaction' tables. We have a set of simple macros (script) that
-generates summary tables grouped by different criteria (product group,
-customer id, store ...) from the transaction tables. The reports are
-Web pages that are dynamically generated by a small Perl script that
-parses a Web page, executes the SQL statements in it, and inserts the
-results. We would have used PHP or mod_perl instead but they were
-not available at that time.
-
-For graphical data we wrote a simple tool in @code{C} that can produce
-GIFs based on the result of a SQL query (with some processing of the
-result). This is also dynamically executed from the Perl script that
-parses the @code{HTML} files.
-
-In most cases a new report can simply be done by copying an existing
-script and modifying the SQL query in it. In some cases, we will need to
-add more fields to an existing summary table or generate a new one, but
-this is also quite simple, as we keep all transactions tables on disk.
-(Currently we have at least 50G of transactions tables and 200G of other
-customer data.)
-
-We also let our customers access the summary tables directly with ODBC
-so that the advanced users can themselves experiment with the data.
-
-We haven't had any problems handling this with quite modest Sun Ultra
-SPARCstation (2x200 Mhz). We recently upgraded one of our servers to a 2
-CPU 400 Mhz UltraSPARC, and we are now planning to start handling
-transactions on the product level, which would mean a ten-fold increase
-of data. We think we can keep up with this by just adding more disk to
-our systems.
-
-We are also experimenting with Intel-Linux to be able to get more CPU
-power cheaper. Now that we have the binary portable database format (new
-in Version 3.23), we will start to use this for some parts of the application.
-
-Our initial feelings are that Linux will perform much better on
-low-to-medium load and Solaris will perform better when you start to get a
-high load because of extreme disk IO, but we don't yet have anything
-conclusive about this. After some discussion with a Linux Kernel
-developer, this might be a side effect of Linux giving so much resources
-to the batch job that the interactive performance gets very low. This
-makes the machine feel very slow and unresponsive while big batches are
-going. Hopefully this will be better handled in future Linux Kernels.
-
-@cindex benchmark suite
-@cindex crash-me program
-@node MySQL Benchmarks, Tools, Performance, Top
-@chapter The MySQL Benchmark Suite
-
-This should contain a technical description of the @strong{MySQL}
-benchmark suite (and @code{crash-me}), but that description is not
-written yet. Currently, you can get a good idea of the benchmark by
-looking at the code and results in the @file{sql-bench} directory in any
-@strong{MySQL} source distributions.
-
-This benchmark suite is meant to be a benchmark that will tell any user
-what things a given SQL implementation performs well or poorly at.
-
-Note that this benchmark is single threaded, so it measures the minimum
-time for the operations. We plan to in the future add a lot of
-multi-threaded tests to the benchmark suite.
-
-For example, (run on the same NT 4.0 machine):
-
-@multitable @columnfractions .6 .2 .2
-@strong{Reading 2000000 rows by index} @tab @strong{Seconds} @tab @strong{Seconds}
-@item mysql @tab 367 @tab 249
-@item mysql_odbc @tab 464
-@item db2_odbc @tab 1206
-@item informix_odbc @tab 121126
-@item ms-sql_odbc @tab 1634
-@item oracle_odbc @tab 20800
-@item solid_odbc @tab 877
-@item sybase_odbc @tab 17614
-@end multitable
-
-@multitable @columnfractions .6 .2 .2
-@strong{Inserting (350768) rows} @tab @strong{Seconds} @tab @strong{Seconds}
-@item mysql @tab 381 @tab 206
-@item mysql_odbc @tab 619
-@item db2_odbc @tab 3460
-@item informix_odbc @tab 2692
-@item ms-sql_odbc @tab 4012
-@item oracle_odbc @tab 11291
-@item solid_odbc @tab 1801
-@item sybase_odbc @tab 4802
-@end multitable
-
-In the above test @strong{MySQL} was run with a 8M index cache.
-
-We have gather some more benchmark results at
-@uref{http://www.mysql.com/information/benchmarks.html}.
-
-Note that Oracle is not included because they asked to be removed. All
-Oracle benchmarks have to be passed by Oracle! We believe that makes
-Oracle benchmarks @strong{VERY} biased because the above benchmarks are
-supposed to show what a standard installation can do for a single
-client.
-
-To run the benchmark suite, you have to download a @strong{MySQL} source
-distribution, install the perl DBI driver, the perl DBD driver for the
-database you want to test and then do:
-
-@example
-cd sql-bench
-perl run-all-tests --server=#
-@end example
-
-where # is one of supported servers. You can get a list of all options
-and supported servers by doing @code{run-all-tests --help}.
-
-@cindex crash-me
-@code{crash-me} tries to determine what features a database supports and
-what its capabilities and limitations are by actually running
-queries. For example, it determines:
-
-@itemize @bullet
-@item
-What column types are supported
-@item
-How many indexes are supported
-@item
-What functions are supported
-@item
-How big a query can be
-@item
-How big a @code{VARCHAR} column can be
-@end itemize
-
-We can find the result from crash-me on a lot of different databases at
-@uref{http://www.mysql.com/information/crash-me.php}.
-
-@cindex utilities
-@node Tools, Maintenance, MySQL Benchmarks, Top
-@chapter MySQL Utilites
-
-@menu
-* Programs:: What do the executables do?
-* mysqld-max:: mysqld-max, An extended mysqld server
-* safe_mysqld:: safe_mysqld, the wrapper around mysqld
-* mysqld_multi:: Program for managing multiple @strong{MySQL} servers
-* mysql:: The command line tool
-* mysqladmin:: Administering a @strong{MySQL} server
-* mysqldump:: Dumping the structure and data from @strong{MySQL} databases and tables
-* mysqlhotcopy:: Copying @strong{MySQL} Databases and Tables
-* mysqlimport:: Importing data from text files
-* perror:: Displaying error messages
-* mysqlshow:: Showing databases, tables and columns
-* myisampack:: The @strong{MySQL} compressed read-only table generator
-@end menu
-
-In this chapter you will learn about the @strong{MySQL} Utilities that
-come in a given distribution. You will learn what each of them does, how
-to use it, and what you should use it for.
-
-@cindex environment variables
-@cindex programs, list of
-@node Programs, mysqld-max, Tools, Tools
-@section Overview of the Different MySQL Programs
-
-All @strong{MySQL} clients that communicate with the server using the
-@code{mysqlclient} library use the following environment variables:
-
-@tindex MYSQL_UNIX_PORT environment variable
-@tindex Environment variable, MYSQL_UNIX_PORT
-@tindex MYSQL_TCP_PORT environment variable
-@tindex Environment variable, MYSQL_TCP_PORT
-@tindex MYSQL_PWD environment variable
-@tindex Environment variable, MYSQL_PWD
-@tindex MYSQL_DEBUG environment variable
-@tindex Environment variable, MYSQL_DEBUG
-@multitable @columnfractions .25 .75
-@item @strong{Name} @tab @strong{Description}
-@item @code{MYSQL_UNIX_PORT} @tab The default socket; used for connections to @code{localhost}
-@item @code{MYSQL_TCP_PORT} @tab The default TCP/IP port
-@item @code{MYSQL_PWD} @tab The default password
-@item @code{MYSQL_DEBUG} @tab Debug-trace options when debugging
-@item @code{TMPDIR} @tab The directory where temporary tables/files are created
-@end multitable
-
-Use of @code{MYSQL_PWD} is insecure.
-@xref{Connecting}.
-
-@tindex MYSQL_HISTFILE environment variable
-@tindex Environment variable, MYSQL_HISTFILE
-@tindex HOME environment variable
-@tindex Environment variable, HOME
-@cindex history file
-@cindex command line history
-@tindex .mysql_history file
-The @file{mysql} client uses the file named in the @code{MYSQL_HISTFILE}
-environment variable to save the command-line history. The default value for
-the history file is @file{$HOME/.mysql_history}, where @code{$HOME} is the
-value of the @code{HOME} environment variable. @xref{Environment variables}.
-
-All @strong{MySQL} programs take many different options. However, every
-@strong{MySQL} program provides a @code{--help} option that you can use
-to get a full description of the program's different options. For example, try
-@code{mysql --help}.
-
-You can override default options for all standard client programs with an
-option file. @ref{Option files}.
-
-The list below briefly describes the @strong{MySQL} programs:
-
-@table @code
-
-@cindex @code{myisamchk}
-@item myisamchk
-Utility to describe, check, optimize, and repair @strong{MySQL} tables.
-Because @code{myisamchk} has many functions, it is described in its own
-chapter. @xref{Maintenance}.
-
-@cindex @code{make_binary_distribution}
-@item make_binary_distribution
-Makes a binary release of a compiled @strong{MySQL}. This could be sent
-by FTP to @file{/pub/mysql/Incoming} on @code{support.mysql.com} for the
-convenience of other @strong{MySQL} users.
-
-@cindex @code{msql2mysql}
-@item msql2mysql
-A shell script that converts @code{mSQL} programs to @strong{MySQL}. It doesn't
-handle all cases, but it gives a good start when converting.
-
-@cindex @code{mysqlaccess}
-@item mysqlaccess
-A script that checks the access privileges for a host, user, and database
-combination.
-
-@cindex @code{mysqladmin}
-@item mysqladmin
-Utility for performing administrative operations, such as creating or
-dropping databases, reloading the grant tables, flushing tables to disk, and
-reopening log files. @code{mysqladmin} can also be used to retrieve version,
-process, and status information from the server.
-@xref{mysqladmin, , @code{mysqladmin}}.
-
-@cindex @code{mysqlbug}
-@item mysqlbug
-The @strong{MySQL} bug report script. This script should always be used when
-filing a bug report to the @strong{MySQL} list.
-
-@cindex @code{mysqld}
-@item mysqld
-The SQL daemon. This should always be running.
-
-@cindex @code{mysqldump}
-@item mysqldump
-Dumps a @strong{MySQL} database into a file as SQL statements or
-as tab-separated text files. Enhanced freeware originally by Igor Romanenko.
-@xref{mysqldump, , @code{mysqldump}}.
-
-@cindex @code{mysqlimport}
-@item mysqlimport
-Imports text files into their respective tables using @code{LOAD DATA
-INFILE}. @xref{mysqlimport, , @code{mysqlimport}}.
-
-@cindex @code{mysqlshow}
-@item mysqlshow
-Displays information about databases, tables, columns, and indexes.
-
-@cindex @code{mysql_install_db}
-@item mysql_install_db
-Creates the @strong{MySQL} grant tables with default privileges. This is
-usually executed only once, when first installing @strong{MySQL}
-on a system.
-
-@cindex @code{replace}
-@item replace
-A utility program that is used by @code{msql2mysql}, but that has more
-general applicability as well. @code{replace} changes strings in place in
-files or on the standard input. Uses a finite state machine to match longer
-strings first. Can be used to swap strings. For example, this command
-swaps @code{a} and @code{b} in the given files:
-
-@example
-shell> replace a b b a -- file1 file2 ...
-@end example
-@end table
-
-@cindex @code{mysqld-max}
-@node mysqld-max, safe_mysqld, Programs, Tools
-@section mysqld-max, An extended mysqld server
-
-@code{mysqld-max} is the MySQL server (@code{mysqld}) configured with
-the following configure options:
-
-@multitable @columnfractions .3 .7
-@item @strong{Option} @tab @strong{Comment}
-@item --with-server-suffix=-max @tab Add a suffix to the @code{mysqld} version string.
-@item --with-bdb @tab Support for Berkeley DB (BDB) tables
-@item --with-innodb @tab Support for InnoDB tables.
-@item CFLAGS=-DUSE_SYMDIR @tab Symbolic links support for Windows.
-@end multitable
-
-You can find the @strong{MySQL}-max binaries at
-@uref{http://www.mysql.com/downloads/mysql-max-3.23.html}.
-
-The Windows @strong{MySQL} 3.23 binary distribution includes both the
-standard @strong{mysqld.exe} binary and the @code{mysqld-max.exe} binary.
-@uref{http://www.mysql.com/downloads/mysql-3.23.html}.
-@xref{Windows installation}.
-
-Note that as Berkeley DB and InnoDB are not available for all platforms,
-some of the @code{Max} binaries may not have support for both of these.
-You can check which table types are supported by doing the following
-query:
-
-@example
-mysql> show variables like "have_%";
-+---------------+-------+
-| Variable_name | Value |
-+---------------+-------+
-| have_bdb | YES |
-| have_innodb | NO |
-| have_isam | YES |
-| have_raid | YES |
-| have_ssl | NO |
-+---------------+-------+
-@end example
-
-The meaning of the values are:
-
-@multitable @columnfractions .3 .7
-@item @strong{Value} @tab @strong{Meaning}.
-@item YES @tab The option is activated and usable.
-@item NO @tab @strong{MySQL} is not compiled with support for this option.
-@item DISABLED @tab The xxxx option is disabled because one started @code{mysqld} with @code{--skip-xxxx} or because one didn't start @code{mysqld} with all needed options to enable the option. In this case the @code{hostname.err} file should contain a reason for why the option is disabled.
-@end multitable
-
-@strong{NOTE}: To be able to create InnoDB tables you @strong{MUST} edit
-your startup options to include at least the @code{innodb_data_file_path}
-option. @xref{InnoDB start}.
-
-To get better performance for BDB tables, you should add some configuration
-options for these too. @xref{BDB start}.
-
-@code{safe_mysqld} will automatically try to start any @code{mysqld} binary
-with the @code{-max} prefix. This makes it very easy to test out a
-another @code{mysqld} binary in an existing installation. Just
-run @code{configure} with the options you want and then install the
-new @code{mysqld} binary as @code{mysqld-max} in the same directory
-where your old @code{mysqld} binary is. @xref{safe_mysqld, , @code{safe_mysqld}}.
-
-The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld}
-feature. It just installs the @code{mysqld-max} executable and
-@code{safe_mysqld} will automatically use this executable when
-@code{safe_mysqld} is restarted.
-
-The following table shows which table types our standard @strong{MySQL-Max}
-binaries includes:
-
-@multitable @columnfractions .4 .3 .3
-@item @strong{System} @tab @strong{BDB} @tab @strong{InnoDB}
-@item AIX 4.3 @tab N @tab Y
-@item HP-UX 11.0 @tab N @tab Y
-@item Linux-Alpha @tab N @tab Y
-@item Linux-Intel @tab Y @tab Y
-@item Linux-Ia64 @tab N @tab Y
-@item Solaris-intel @tab N @tab Y
-@item Solaris-sparc @tab Y @tab Y
-@item SCO OSR5 @tab Y @tab Y
-@item UnixWare @tab Y @tab Y
-@item Windows/NT @tab Y @tab Y
-@end multitable
-
-@cindex tools, safe_mysqld
-@cindex scripts
-@cindex @code{safe_mysqld}
-@node safe_mysqld, mysqld_multi, mysqld-max, Tools
-@section safe_mysqld, the wrapper around mysqld
-
-@code{safe_mysqld} is the recommended way to start a @code{mysqld}
-daemon on Unix. @code{safe_mysqld} adds some safety features such as
-restarting the server when an error occurs and logging run-time
-information to a log file.
-
-If you don't use @code{--mysqld=#} or @code{--mysqld-version=#}
-@code{safe_mysqld} will use an executable named @code{mysqld-max} if it
-exists. If not, @code{safe_mysqld} will start @code{mysqld}.
-This makes it very easy to test to use @code{mysqld-max} instead of
-@code{mysqld}; Just copy @code{mysqld-max} to where you have
-@code{mysqld} and it will be used.
-
-Normally one should never edit the @code{safe_mysqld} script, but
-instead put the options to @code{safe_mysqld} in the
-@code{[safe_mysqld]} section in the @code{my.cnf}
-file. @code{safe_mysqld} will read all options from the @code{[mysqld]},
-@code{[server]} and @code{[safe_mysqld]} sections from the option files.
-@xref{Option files}.
-
-Note that all options on the command line to @code{safe_mysqld} are passed
-to @code{mysqld}. If you wants to use any options in @code{safe_mysqld} that
-@code{mysqld} doesn't support, you must specify these in the option file.
-
-Most of the options to @code{safe_mysqld} are the same as the options to
-@code{mysqld}. @xref{Command-line options}.
-
-@code{safe_mysqld} supports the following options:
-
-@table @code
-@item --basedir=path
-@item --core-file-size=#
-Size of the core file @code{mysqld} should be able to create. Passed to @code{ulimit -c}.
-@item --datadir=path
-@item --defaults-extra-file=path
-@item --defaults-file=path
-@item --err-log=path
-@item --ledir=path
-Path to @code{mysqld}
-@item --log=path
-@item --mysqld=mysqld-version
-Name of the @code{mysqld} version in the @code{ledir} directory you want to start.
-@item --mysqld-version=version
-Similar to @code{--mysqld=} but here you only give the suffix for @code{mysqld}.
-For example if you use @code{--mysqld-version=max}, @code{safe_mysqld} will
-start the @code{ledir/mysqld-max} version. If the argument to
-@code{--mysqld-version} is empty, @code{ledir/mysqld} will be used.
-@item --no-defaults
-@item --open-files-limit=#
-Number of files @code{mysqld} should be able to open. Passed to @code{ulimit -n}. Note that you need to start @code{safe_mysqld} as root for this to work properly!
-@item --pid-file=path
-@item --port=#
-@item --socket=path
-@item --timezone=#
-Set the timezone (the @code{TZ}) variable to the value of this parameter.
-@item --user=#
-@end table
-
-The @code{safe_mysqld} script is written so that it normally is able to start
-a server that was installed from either a source or a binary version of
-@strong{MySQL}, even if these install the server in slightly different
-locations. @code{safe_mysqld} expects one of these conditions to be true:
-
-@itemize @bullet
-@item
-The server and databases can be found relative to the directory from which
-@code{safe_mysqld} is invoked. @code{safe_mysqld} looks under its working
-directory for @file{bin} and @file{data} directories (for binary
-distributions) or for @file{libexec} and @file{var} directories (for source
-distributions). This condition should be met if you execute
-@code{safe_mysqld} from your @strong{MySQL} installation directory (for
-example, @file{/usr/local/mysql} for a binary distribution).
-
-@item
-If the server and databases cannot be found relative to the working directory,
-@code{safe_mysqld} attempts to locate them by absolute pathnames. Typical
-locations are @file{/usr/local/libexec} and @file{/usr/local/var}.
-The actual locations are determined when the distribution was built from which
-@code{safe_mysqld} comes. They should be correct if
-@strong{MySQL} was installed in a standard location.
-@end itemize
-
-Because @code{safe_mysqld} will try to find the server and databases relative
-to its own working directory, you can install a binary distribution of
-@strong{MySQL} anywhere, as long as you start @code{safe_mysqld} from the
-@strong{MySQL} installation directory:
-
-@example
-shell> cd mysql_installation_directory
-shell> bin/safe_mysqld &
-@end example
-
-If @code{safe_mysqld} fails, even when invoked from the @strong{MySQL}
-installation directory, you can modify it to use the path to @code{mysqld}
-and the pathname options that are correct for your system. Note that if you
-upgrade @strong{MySQL} in the future, your modified version of
-@code{safe_mysqld} will be overwritten, so you should make a copy of your
-edited version that you can reinstall.
-
-@cindex tools, mysqld_multi
-@cindex scripts
-@cindex multi mysqld
-@cindex @code{mysqld_multi}
-@node mysqld_multi, mysql, safe_mysqld, Tools
-@section mysqld_multi, program for managing multiple @strong{MySQL} servers
-
-@code{mysqld_multi} is meant for managing several @code{mysqld}
-processes running in different UNIX sockets and TCP/IP ports.
-
-The program will search for group(s) named [mysqld#] from my.cnf (or the
-given --config-file=...), where # can be any positive number starting
-from 1. These groups should be the same as the usual @code{[mysqld]}
-group (e.g. options to mysqld, see @strong{MySQL} manual for detailed
-information about this group), but with those port, socket etc. options
-that are wanted for each separate @code{mysqld} processes. The number in
-the group name has another function; it can be used for starting,
-stopping, or reporting some specific @code{mysqld} servers with this
-program. See the usage and options below for more information.
-
-@example
-Usage: mysqld_multi [OPTIONS] @{start|stop|report@} [GNR,GNR,GNR...]
-or mysqld_multi [OPTIONS] @{start|stop|report@} [GNR-GNR,GNR,GNR-GNR,...]
-@end example
-
-The GNR above means the group number. You can start, stop or report
-any GNR, or several of them at the same time. (See --example) The GNRs
-list can be comma separated, or a dash combined, of which the latter
-means that all the GNRs between GNR1-GNR2 will be affected. Without
-GNR argument all the found groups will be either started, stopped, or
-reported. Note that you must not have any white spaces in the GNR
-list. Anything after a white space is ignored.
-
-@code{mysqld_multi} supports the following options:
-
-@table @code
-@cindex config-file option
-@item --config-file=...
-Alternative config file. NOTE: This will not affect this program's own
-options (group @code{[mysqld_multi]}), but only groups
-[mysqld#]. Without this option everything will be searched from the
-ordinary my.cnf file.
-@cindex example option
-@item --example
-Give an example of a config file.
-@cindex help option
-@item --help
-Print this help and exit.
-@cindex log option
-@item --log=...
-Log file. Full path to and the name for the log file. NOTE: If the file
-exists, everything will be appended.
-@cindex mysqladmin option
-@item --mysqladmin=...
-@code{mysqladmin} binary to be used for a server shutdown.
-@cindex mysqld option
-@item --mysqld=...
-@code{mysqld} binary to be used. Note that you can give
-@code{safe_mysqld} to this option also. The options are passed to
-@code{mysqld}. Just make sure you have @code{mysqld} in your environment
-variable @code{PATH} or fix @code{safe_mysqld}.
-@cindex no-log option
-@item --no-log
-Print to stdout instead of the log file. By default the log file is
-turned on.
-@cindex password option
-@item --password=...
-Password for user for @code{mysqladmin}.
-@cindex tcp-ip option
-@item --tcp-ip
-Connect to the @strong{MySQL} server(s) via the TCP/IP port instead of
-the UNIX socket. This affects stopping and reporting. If a socket file
-is missing, the server may still be running, but can be accessed only
-via the TCP/IP port. By default connecting is done via the UNIX socket.
-@cindex user option
-@item --user=...
-@strong{MySQL} user for @code{mysqladmin}.
-@cindex version option
-@item --version
-Print the version number and exit.
-@end table
-
-Some notes about @code{mysqld_multi}:
-
-@itemize @bullet
-@item
-Make sure that the @strong{MySQL} user, who is stopping the
-@code{mysqld} services (e.g using the @code{mysqladmin}) have the same
-password and username for all the data directories accessed (to the
-'mysql' database) And make sure that the user has the 'Shutdown_priv'
-privilege! If you have many data- directories and many different 'mysql'
-databases with different passwords for the @strong{MySQL} 'root' user,
-you may want to create a common 'multi_admin' user for each using the
-same password (see below). Example how to do it:
-@example
-shell> mysql -u root -S /tmp/mysql.sock -proot_password -e
-"GRANT SHUTDOWN ON *.* TO multi_admin@@localhost IDENTIFIED BY 'multipass'"
-@xref{Privileges}.
-@end example
-You will have to do the above for each @code{mysqld} running in each
-data directory, that you have (just change the socket, -S=...)
-@item
-@code{pid-file} is very important, if you are using @code{safe_mysqld}
-to start @code{mysqld} (e.g. --mysqld=safe_mysqld) Every @code{mysqld}
-should have its own @code{pid-file}. The advantage using
-@code{safe_mysqld} instead of @code{mysqld} directly here is, that
-@code{safe_mysqld} 'guards' every @code{mysqld} process and will restart
-it, if a @code{mysqld} process fails due to signal kill -9, or
-similar. (Like segmentation fault, which @strong{MySQL} should never do,
-of course ;) Please note that @code{safe_mysqld} script may require that
-you start it from a certain place. This means that you may have to CD to
-a certain directory, before you start the @code{mysqld_multi}. If
-you have problems starting, please see the @code{safe_mysqld}
-script. Check especially the lines:
-@example
---------------------------------------------------------------------------
-MY_PWD=`pwd` Check if we are starting this relative (for the binary
-release) if test -d /data/mysql -a -f ./share/mysql/english/errmsg.sys
--a -x ./bin/mysqld
---------------------------------------------------------------------------
-@xref{safe_mysqld, , @code{safe_mysqld}}.
-@end example
-The above test should be successful, or you may encounter problems.
-@item
-Beware of the dangers starting multiple @code{mysqlds} in the same data
-directory. Use separate data directories, unless you @strong{KNOW} what
-you are doing!
-@item
-The socket file and the TCP/IP port must be different for every @code{mysqld}.
-@item
-The first and fifth @code{mysqld} group were intentionally left out from
-the example. You may have 'gaps' in the config file. This gives you
-more flexibility. The order in which the @code{mysqlds} are started or
-stopped depends on the order in which they appear in the config file.
-@item
-When you want to refer to a certain group using GNR with this program,
-just use the number in the end of the group name ( [mysqld# <== ).
-@item
-You may want to use option '--user' for @code{mysqld}, but in order to
-do this you need to be root when you start the @code{mysqld_multi}
-script. Having the option in the config file doesn't matter; you will
-just get a warning, if you are not the superuser and the @code{mysqlds}
-are started under @strong{YOUR} UNIX account. @strong{IMPORTANT}: Make
-sure that the @code{pid-file} and the data directory are
-read+write(+execute for the latter one) accessible for @strong{THAT}
-UNIX user, who the specific @code{mysqld} process is started
-as. @strong{DON'T} use the UNIX root account for this, unless you
-@strong{KNOW} what you are doing!
-@item
-@strong{MOST IMPORTANT}: Make sure that you understand the meanings of
-the options that are passed to the @code{mysqlds} and why @strong{WOULD
-YOU WANT} to have separate @code{mysqld} processes. Starting multiple
-@code{mysqlds} in one data directory @strong{WILL NOT} give you extra
-performance in a threaded system!
-@end itemize
-
-@xref{Multiple servers}.
-
-This is an example of the config file on behalf of @code{mysqld_multi}.
-
-@example
-# This file should probably be in your home dir (~/.my.cnf) or /etc/my.cnf
-# Version 2.1 by Jani Tolonen
-
-[mysqld_multi]
-mysqld = /usr/local/bin/safe_mysqld
-mysqladmin = /usr/local/bin/mysqladmin
-user = multi_admin
-password = multipass
-
-[mysqld2]
-socket = /tmp/mysql.sock2
-port = 3307
-pid-file = /usr/local/mysql/var2/hostname.pid2
-datadir = /usr/local/mysql/var2
-language = /usr/local/share/mysql/english
-user = john
-
-[mysqld3]
-socket = /tmp/mysql.sock3
-port = 3308
-pid-file = /usr/local/mysql/var3/hostname.pid3
-datadir = /usr/local/mysql/var3
-language = /usr/local/share/mysql/swedish
-user = monty
-
-[mysqld4]
-socket = /tmp/mysql.sock4
-port = 3309
-pid-file = /usr/local/mysql/var4/hostname.pid4
-datadir = /usr/local/mysql/var4
-language = /usr/local/share/mysql/estonia
-user = tonu
-
-[mysqld6]
-socket = /tmp/mysql.sock6
-port = 3311
-pid-file = /usr/local/mysql/var6/hostname.pid6
-datadir = /usr/local/mysql/var6
-language = /usr/local/share/mysql/japanese
-user = jani
-@end example
-
-@xref{Option files}.
-
-@cindex command line tool
-@cindex tools, command line
-@cindex scripts
-@cindex @code{mysql}
-@node mysql, mysqladmin, mysqld_multi, Tools
-@section The Command-line Tool
-
-@code{mysql} is a simple SQL shell (with GNU @code{readline} capabilities).
-It supports interactive and non-interactive use. When used interactively,
-query results are presented in an ASCII-table format. When used
-non-interactively (for example, as a filter), the result is presented in
-tab-separated format. (The output format can be changed using command-line
-options.) You can run scripts simply like this:
-
-@example
-shell> mysql database < script.sql > output.tab
-@end example
-
-If you have problems due to insufficient memory in the client, use the
-@code{--quick} option! This forces @code{mysql} to use
-@code{mysql_use_result()} rather than @code{mysql_store_result()} to
-retrieve the result set.
-
-Using @code{mysql} is very easy. Just start it as follows:
-@code{mysql database} or @code{mysql --user=user_name --password=your_password database}. Type a SQL statement, end it with @samp{;}, @samp{\g}, or @samp{\G}
-and press RETURN/ENTER.
-
-@cindex command line options
-@cindex options, command line
-@cindex startup parameters
-@code{mysql} supports the following options:
-
-@table @code
-@cindex help option
-@item -?, --help
-Display this help and exit.
-@cindex automatic rehash option
-@item -A, --no-auto-rehash
-No automatic rehashing. One has to use 'rehash' to get table and field
-completion. This gives a quicker start of mysql.
-@cindex batch option
-@item -B, --batch
-Print results with a tab as separator, each row on a new line. Doesn't use
-history file.
-@cindex character sets option
-@item --character-sets-dir=...
-Directory where character sets are located.
-@cindex compress option.
-@item -C, --compress
-Use compression in server/client protocol.
-@cindex debug option
-@item -#, --debug[=...]
-Debug log. Default is 'd:t:o,/tmp/mysql.trace'.
-@cindex database option
-@item -D, --database=...
-Database to use. This is mainly useful in the @code{my.cnf} file.
-@cindex default character set option
-@item --default-character-set=...
-Set the default character set.
-@cindex execute option
-@item -e, --execute=...
-Execute command and quit. (Output like with --batch)
-@cindex vertical option
-@item -E, --vertical
-Print the output of a query (rows) vertically. Without this option you
-can also force this output by ending your statements with @code{\G}.
-@cindex force option
-@item -f, --force
-Continue even if we get a SQL error.
-@cindex no-named-commands option
-@item -g, --no-named-commands
-Named commands are disabled. Use \* form only, or use named commands
-only in the beginning of a line ending with a semicolon (;). Since
-Version 10.9, the client now starts with this option ENABLED by default!
-With the -g option, long format commands will still work from the first
-line, however.
-@cindex enable-named-commands option
-@item -G, --enable-named-commands
-Named commands are @strong{enabled}. Long format commands are allowed as
-well as shortened \* commands.
-@cindex ignore space option.
-@item -i, --ignore-space
-Ignore space after function names.
-@cindex host option
-@item -h, --host=...
-Connect to the given host.
-@cindex html option
-@item -H, --html
-Produce HTML output.
-@cindex skip line numbers option
-@item -L, --skip-line-numbers
-Don't write line number for errors. Useful when one wants to compare result
-files that includes error messages
-@cindex no pager option
-@item --no-pager
-Disable pager and print to stdout. See interactive help (\h) also.
-@cindex no tee option
-@item --no-tee
-Disable outfile. See interactive help (\h) also.
-@cindex unbuffered option.
-@item -n, --unbuffered
-Flush buffer after each query.
-@cindex skip column names option
-@item -N, --skip-column-names
-Don't write column names in results.
-@cindex set variable option
-@item -O, --set-variable var=option
-Give a variable a value. @code{--help} lists variables.
-@cindex one database option
-@item -o, --one-database
-Only update the default database. This is useful for skipping updates to
-other database in the update log.
-@cindex pager option
-@item @code{--pager[=...]}
-Output type. Default is your @code{ENV} variable @code{PAGER}. Valid
-pagers are less, more, cat [> filename], etc. See interactive help (\h)
-also. This option does not work in batch mode. Pager works only in UNIX.
-@cindex password option
-@item -p[password], --password[=...]
-Password to use when connecting to server. If a password is not given on
-the command line, you will be prompted for it. Note that if you use the
-short form @code{-p} you can't have a space between the option and the
-password.
-@item -P --port=...
-TCP/IP port number to use for connection.
-@cindex quick option
-@item -q, --quick
-Don't cache result, print it row-by-row. This may slow down the server
-if the output is suspended. Doesn't use history file.
-@cindex raw option
-@item -r, --raw
-Write column values without escape conversion. Used with @code{--batch}
-@cindex silent option
-@item -s, --silent
-Be more silent.
-@item -S --socket=...
-Socket file to use for connection.
-@cindex table option
-@item -t --table
-Output in table format. This is default in non-batch mode.
-@item -T, --debug-info
-Print some debug information at exit.
-@cindex tee option
-@item --tee=...
-Append everything into outfile. See interactive help (\h) also. Does not
-work in batch mode.
-@cindex user option
-@item -u, --user=#
-User for login if not current user.
-@cindex safe updates option
-@item -U, --safe-updates[=#], --i-am-a-dummy[=#]
-Only allow @code{UPDATE} and @code{DELETE} that uses keys. See below for
-more information about this option. You can reset this option if you have
-it in your @code{my.cnf} file by using @code{--safe-updates=0}.
-@cindex verbose option
-@item -v, --verbose
-More verbose output (-v -v -v gives the table output format).
-@cindex version option
-@item -V, --version
-Output version information and exit.
-@cindex wait option
-@item -w, --wait
-Wait and retry if connection is down instead of aborting.
-@end table
-
-You can also set the following variables with @code{-O} or
-@code{--set-variable}:
-
-@cindex timeout
-@multitable @columnfractions .3 .2 .5
-@item Variablename @tab Default @tab Description
-@item connect_timeout @tab 0 @tab Number of seconds before timeout connection.
-@item max_allowed_packet @tab 16777216 @tab Max packetlength to send/receive from to server
-@item net_buffer_length @tab 16384 @tab Buffer for TCP/IP and socket communication
-@item select_limit @tab 1000 @tab Automatic limit for SELECT when using --i-am-a-dummy
-@item max_join_size @tab 1000000 @tab Automatic limit for rows in a join when using --i-am-a-dummy.
-@end multitable
-
-If you type 'help' on the command line, @code{mysql} will print out the
-commands that it supports:
-
-@cindex commands, list of
-@example
-mysql> help
-
-MySQL commands:
-help (\h) Display this text.
-? (\h) Synonym for `help'.
-clear (\c) Clear command.
-connect (\r) Reconnect to the server. Optional arguments are db and host.
-edit (\e) Edit command with $EDITOR.
-ego (\G) Send command to mysql server, display result vertically.
-exit (\q) Exit mysql. Same as quit.
-go (\g) Send command to mysql server.
-nopager (\n) Disable pager, print to stdout.
-notee (\t) Don't write into outfile.
-pager (\P) Set PAGER [to_pager]. Print the query results via PAGER.
-print (\p) Print current command.
-quit (\q) Quit mysql.
-rehash (\#) Rebuild completion hash.
-source (\.) Execute a SQL script file. Takes a file name as an argument.
-status (\s) Get status information from the server.
-tee (\T) Set outfile [to_outfile]. Append everything into given outfile.
-use (\u) Use another database. Takes database name as argument.
-@end example
-
-From the above, pager only works in UNIX.
-
-@cindex status command
-The @code{status} command gives you some information about the
-connection and the server you are using. If you are running in the
-@code{--safe-updates} mode, @code{status} will also print the values for
-the @code{mysql} variables that affect your queries.
-
-@cindex @code{safe-mode} command
-A useful startup option for beginners (introduced in @strong{MySQL}
-Version 3.23.11) is @code{--safe-updates} (or @code{--i-am-a-dummy} for
-users that has at some time done a @code{DELETE FROM table_name} but
-forgot the @code{WHERE} clause). When using this option, @code{mysql}
-sends the following command to the @strong{MySQL} server when opening
-the connection:
-
-@example
-SET SQL_SAFE_UPDATES=1,SQL_SELECT_LIMIT=#select_limit#,
- SQL_MAX_JOIN_SIZE=#max_join_size#"
-@end example
-
-where @code{#select_limit#} and @code{#max_join_size#} are variables that
-can be set from the @code{mysql} command line. @xref{SET OPTION, @code{SET}}.
-
-The effect of the above is:
-
-@itemize @bullet
-@item
-You are not allowed to do an @code{UPDATE} or @code{DELETE} statement
-if you don't have a key constraint in the @code{WHERE} part. One can,
-however, force an @code{UPDATE/DELETE} by using @code{LIMIT}:
-@example
-UPDATE table_name SET not_key_column=# WHERE not_key_column=# LIMIT 1;
-@end example
-@item
-All big results are automatically limited to @code{#select_limit#} rows.
-@item
-@code{SELECT}'s that will probably need to examine more than
-@code{#max_join_size} row combinations will be aborted.
-@end itemize
-
-Some useful hints about the @code{mysql} client:
-
-Some data is much more readable when displayed vertically, instead of
-the usual horizontal box type output. For example longer text, which
-includes new lines, is often much easier to be read with vertical
-output.
-
-@example
-mysql> select * from mails where length(txt) < 300 limit 300,1\G
-*************************** 1. row ***************************
- msg_nro: 3068
- date: 2000-03-01 23:29:50
-time_zone: +0200
-mail_from: Monty
- reply: monty@@no.spam.com
- mail_to: "Thimble Smith" <tim@@no.spam.com>
- sbj: UTF-8
- txt: >>>>> "Thimble" == Thimble Smith writes:
-
-Thimble> Hi. I think this is a good idea. Is anyone familiar with UTF-8
-Thimble> or Unicode? Otherwise I'll put this on my TODO list and see what
-Thimble> happens.
-
-Yes, please do that.
-
-Regards,
-Monty
- file: inbox-jani-1
- hash: 190402944
-1 row in set (0.09 sec)
-@end example
-
-@itemize @bullet
-@item
-For logging, you can use the @code{tee} option. The @code{tee} can be
-started with option @code{--tee=...}, or from the command line
-interactively with command @code{tee}. All the data displayed on the
-screen will also be appended into a given file. This can be very useful
-for debugging purposes also. The @code{tee} can be disabled from the
-command line with command @code{notee}. Executing @code{tee} again
-starts logging again. Without a parameter the previous file will be
-used. Note that @code{tee} will flush the results into the file after
-each command, just before the command line appears again waiting for the
-next command.
-@item
-Browsing, or searching the results in the interactive mode in UNIX less,
-more, or any other similar program, is now possible with option
-@code{--pager[=...]}. Without argument, @code{mysql} client will look
-for environment variable PAGER and set @code{pager} to that.
-@code{pager} can be started from the interactive command line with
-command @code{pager} and disabled with command @code{nopager}. The
-command takes an argument optionally and the @code{pager} will be set to
-that. Command @code{pager} can be called without an argument, but this
-requires that the option @code{--pager} was used, or the @code{pager}
-will default to stdout. @code{pager} works only in UNIX, since it uses
-the popen() function, which doesn't exist in Windows. In Windows, the
-@code{tee} option can be used instead, although it may not be as handy
-as @code{pager} can be in some situations.
-@item
-A few tips about @code{pager}: You can use it to write to a file:
-@example
-mysql> pager cat > /tmp/log.txt
-@end example
-and the results will only go to a file. You can also pass any options
-for the programs that you want to use with the @code{pager}:
-@example
-mysql> pager less -n -i -S
-@end example
-From the above do note the option '-S'. You may find it very useful when
-browsing the results; try the option with horizontal output (end
-commands with '\g', or ';') and with vertical output (end commands with
-'\G'). Sometimes a very wide result set is hard to be read from the screen,
-with option -S to less you can browse the results within the interactive
-less from left to right, preventing lines longer than your screen from
-being continued to the next line. This can make the result set much more
-readable. You can swith the mode between on and off within the interactive
-less with '-S'. See the 'h' for more help about less.
-@item
-Last (unless you already understood this from the above examples ;) you
-can combine very complex ways to handle the results, for example the
-following would send the results to two files in two different
-directories, on two different hard-disks mounted on /dr1 and /dr2, yet
-let the results still be seen on the screen via less:
-@example
-mysql> pager cat | tee /dr1/tmp/res.txt | tee /dr2/tmp/res2.txt | less -n -i -S
-@end example
-@item
-You can also combine the two functions above; have the @code{tee}
-enabled, @code{pager} set to 'less' and you will be able to browse the
-results in unix 'less' and still have everything appended into a file
-the same time. The difference between @code{UNIX tee} used with the
-@code{pager} and the @code{mysql} client in-built @code{tee}, is that
-the in-built @code{tee} works even if you don't have the @code{UNIX tee}
-available. The in-built @code{tee} also logs everything that is printed
-on the screen, where the @code{UNIX tee} used with @code{pager} doesn't
-log quite that much. Last, but not least, the interactive @code{tee} is
-more handy to switch on and off, when you want to log something into a
-file, but want to be able to turn the feature off sometimes.
-@end itemize
-
-
-@cindex administration, server
-@cindex server administration
-@cindex @code{mysladmn}
-@node mysqladmin, mysqldump, mysql, Tools
-@section Administering a MySQL Server
-
-A utility for performing administrative operations. The syntax is:
-
-@example
-shell> mysqladmin [OPTIONS] command [command-option] command ...
-@end example
-
-You can get a list of the options your version of @code{mysqladmin} supports
-by executing @code{mysqladmin --help}.
-
-The current @code{mysqladmin} supports the following commands:
-
-@multitable @columnfractions .3 .7
-@item create databasename @tab Create a new database.
-@item drop databasename @tab Delete a database and all its tables.
-@item extended-status @tab Gives an extended status message from the server.
-@item flush-hosts @tab Flush all cached hosts.
-@item flush-logs @tab Flush all logs.
-@item flush-tables @tab Flush all tables.
-@item flush-privileges @tab Reload grant tables (same as reload).
-@item kill id,id,... @tab Kill mysql threads.
-@item password @tab New-password. Change old password to new-password.
-@item ping @tab Check if @code{mysqld} is alive.
-@item processlist @tab Show list of active threads in server.
-@item reload @tab Reload grant tables.
-@item refresh @tab Flush all tables and close and open logfiles.
-@item shutdown @tab Take server down.
-@item slave-start @tab Start slave replication thread.
-@item slave-stop @tab Stop slave replication thread.
-@item status @tab Gives a short status message from the server.
-@item variables @tab Prints variables available.
-@item version @tab Get version info from server.
-@end multitable
-
-All commands can be shortened to their unique prefix. For example:
-
-@example
-shell> mysqladmin proc stat
-+----+-------+-----------+----+-------------+------+-------+------+
-| Id | User | Host | db | Command | Time | State | Info |
-+----+-------+-----------+----+-------------+------+-------+------+
-| 6 | monty | localhost | | Processlist | 0 | | |
-+----+-------+-----------+----+-------------+------+-------+------+
-Uptime: 10077 Threads: 1 Questions: 9 Slow queries: 0 Opens: 6 Flush tables: 1 Open tables: 2 Memory in use: 1092K Max memory used: 1116K
-@end example
-
-@cindex status command, results
-The @code{mysqladmin status} command result has the following columns:
-
-@cindex uptime
-@multitable @columnfractions .3 .7
-@item Uptime @tab Number of seconds the @strong{MySQL} server has been up.
-@cindex threads
-@item Threads @tab Number of active threads (clients).
-@cindex questions
-@item Questions @tab Number of questions from clients since @code{mysqld} was started.
-@cindex slow queries
-@item Slow queries @tab Queries that have taken more than @code{long_query_time} seconds. @xref{Slow query log}.
-@cindex opens
-@item Opens @tab How many tables @code{mysqld} has opened.
-@cindex flush tables
-@cindex tables, flush
-@item Flush tables @tab Number of @code{flush ...}, @code{refresh}, and @code{reload} commands.
-@cindex open tables
-@item Open tables @tab Number of tables that are open now.
-@cindex memory use
-@item Memory in use @tab Memory allocated directly by the @code{mysqld} code (only available when @strong{MySQL} is compiled with --with-debug=full).
-@cindex max memory used
-@item Max memory used @tab Maximum memory allocated directly by the @code{mysqld} code (only available when @strong{MySQL} is compiled with --with-debug=full).
-@end multitable
-
-If you do @code{myslqadmin shutdown} on a socket (in other words, on a
-the computer where @code{mysqld} is running), @code{mysqladmin} will
-wait until the @strong{MySQL} @code{pid-file} is removed to ensure that
-the @code{mysqld} server has stopped properly.
-
-@cindex dumping, databases
-@cindex databases, dumping
-@cindex tables, dumping
-@cindex backing up, databases
-@node mysqldump, mysqlhotcopy, mysqladmin, Tools
-@section Dumping the Structure and Data from MySQL Databases and Tables
-
-@cindex @code{mysqldump}
-Utility to dump a database or a collection of database for backup or for
-transferring the data to another SQL server (not necessarily a @strong{MySQL}
-server). The dump will contain SQL statements to create the table
-and/or populate the table.
-
-If you are doing a backup on the server, you should consider using
-the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, , @code{mysqlhotcopy}}.
-
-@example
-shell> mysqldump [OPTIONS] database [tables]
-OR mysqldump [OPTIONS] --databases [OPTIONS] DB1 [DB2 DB3...]
-OR mysqldump [OPTIONS] --all-databases [OPTIONS]
-@end example
-
-If you don't give any tables or use the @code{--databases} or
-@code{--all-databases}, the whole database(s) will be dumped.
-
-You can get a list of the options your version of @code{mysqldump} supports
-by executing @code{mysqldump --help}.
-
-Note that if you run @code{mysqldump} without @code{--quick} or
-@code{--opt}, @code{mysqldump} will load the whole result set into
-memory before dumping the result. This will probably be a problem if
-you are dumping a big database.
-
-Note that if you are using a new copy of the @code{mysqldump} program
-and you are going to do a dump that will be read into a very old @strong{MySQL}
-server, you should not use the @code{--opt} or @code{-e} options.
-
-@code{mysqldump} supports the following options:
-
-@table @code
-@item --add-locks
-Add @code{LOCK TABLES} before and @code{UNLOCK TABLE} after each table dump.
-(To get faster inserts into @strong{MySQL}.)
-@item --add-drop-table
-Add a @code{drop table} before each create statement.
-@item -A, --all-databases
-Dump all the databases. This will be same as @code{--databases} with all
-databases selected.
-@item -a, --all
-Include all @strong{MySQL}-specific create options.
-@item --allow-keywords
-Allow creation of column names that are keywords. This works by
-prefixing each column name with the table name.
-@item -c, --complete-insert
-Use complete insert statements (with column names).
-@item -C, --compress
-Compress all information between the client and the server if both support
-compression.
-@item -B, --databases
-To dump several databases. Note the difference in usage. In this case
-no tables are given. All name arguments are regarded as database names.
-@code{USE db_name;} will be included in the output before each new database.
-@item --delayed
-Insert rows with the @code{INSERT DELAYED} command.
-@item -e, --extended-insert
-Use the new multiline @code{INSERT} syntax. (Gives more compact and
-faster inserts statements.)
-@item -#, --debug[=option_string]
-Trace usage of the program (for debugging).
-@item --help
-Display a help message and exit.
-@item --fields-terminated-by=...
-@itemx --fields-enclosed-by=...
-@itemx --fields-optionally-enclosed-by=...
-@itemx --fields-escaped-by=...
-@itemx --lines-terminated-by=...
-These options are used with the @code{-T} option and have the same
-meaning as the corresponding clauses for @code{LOAD DATA INFILE}.
-@xref{LOAD DATA, , @code{LOAD DATA}}.
-@item -F, --flush-logs
-Flush log file in the @strong{MySQL} server before starting the dump.
-@item -f, --force,
-Continue even if we get a SQL error during a table dump.
-@item -h, --host=..
-Dump data from the @strong{MySQL} server on the named host. The default host
-is @code{localhost}.
-@item -l, --lock-tables.
-Lock all tables before starting the dump. The tables are locked with
-@code{READ LOCAL} to allow concurrent inserts in the case of @code{MyISAM}
-tables.
-@item -n, --no-create-db
-'CREATE DATABASE /*!32312 IF NOT EXISTS*/ db_name;' will not be put in the
-output. The above line will be added otherwise, if --databases or
---all-databases option was given.
-@item -t, --no-create-info
-Don't write table creation information (The @code{CREATE TABLE} statement.)
-@item -d, --no-data
-Don't write any row information for the table. This is very useful if you
-just want to get a dump of the structure for a table!
-@item --opt
-Same as @code{--quick --add-drop-table --add-locks --extended-insert
---lock-tables}. Should give you the fastest possible dump for reading
-into a @strong{MySQL} server.
-@item -pyour_pass, --password[=your_pass]
-The password to use when connecting to the server. If you specify
-no @samp{=your_pass} part,
-@code{mysqldump} you will be prompted for a password.
-@item -P port_num, --port=port_num
-The TCP/IP port number to use for connecting to a host. (This is used for
-connections to hosts other than @code{localhost}, for which Unix sockets are
-used.)
-@item -q, --quick
-Don't buffer query, dump directly to stdout. Uses @code{mysql_use_result()}
-to do this.
-@item -r, --result-file=...
-Direct output to a given file. This option should be used in MSDOS,
-because it prevents new line '\n' from being converted to '\n\r' (new
-line + carriage return).
-@item -S /path/to/socket, --socket=/path/to/socket
-The socket file to use when connecting to @code{localhost} (which is the
-default host).
-@item --tables
-Overrides option --databases (-B).
-@item -T, --tab=path-to-some-directory
-Creates a @code{table_name.sql} file, that contains the SQL CREATE commands,
-and a @code{table_name.txt} file, that contains the data, for each give table.
-@strong{NOTE}: This only works if @code{mysqldump} is run on the same
-machine as the @code{mysqld} daemon. The format of the @code{.txt} file
-is made according to the @code{--fields-xxx} and @code{--lines--xxx} options.
-@item -u user_name, --user=user_name
-The @strong{MySQL} user name to use when connecting to the server. The
-default value is your Unix login name.
-@item -O var=option, --set-variable var=option
-Set the value of a variable. The possible variables are listed below.
-@item -v, --verbose
-Verbose mode. Print out more information on what the program does.
-@item -V, --version
-Print version information and exit.
-@item -w, --where='where-condition'
-Dump only selected records. Note that QUOTES are mandatory:
-
-@example
-"--where=user='jimf'" "-wuserid>1" "-wuserid<1"
-@end example
-@item -O net_buffer_length=#, where # < 16M
-When creating multi-row-insert statements (as with option
-@code{--extended-insert} or @code{--opt}), @code{mysqldump} will create
-rows up to @code{net_buffer_length} length. If you increase this
-variable, you should also ensure that the @code{max_allowed_packet}
-variable in the @strong{MySQL} server is bigger than the
-@code{net_buffer_length}.
-@end table
-
-The most normal use of @code{mysqldump} is probably for making a backup of
-whole databases. @xref{Backup}.
-
-@example
-mysqldump --opt database > backup-file.sql
-@end example
-
-You can read this back into @strong{MySQL} with:
-
-@example
-mysql database < backup-file.sql
-@end example
-
-or
-
-@example
-mysql -e "source /patch-to-backup/backup-file.sql" database
-@end example
-
-However, it's also very useful to populate another @strong{MySQL} server with
-information from a database:
-
-@example
-mysqldump --opt database | mysql ---host=remote-host -C database
-@end example
-
-It is possible to dump several databases with one command:
-
-@example
-mysqldump --databases database1 [database2 database3...] > my_databases.sql
-@end example
-
-If all the databases are wanted, one can use:
-
-@example
-mysqldump --all-databases > all_databases.sql
-@end example
-
-@cindex dumping, databases
-@cindex databases, dumping
-@cindex tables, dumping
-@cindex backing up, databases
-@node mysqlhotcopy, mysqlimport, mysqldump, Tools
-@section Copying MySQL Databases and Tables
-
-@code{mysqlhotcopy} is a perl script that uses @code{LOCK TABLES},
-@code{FLUSH TABLES} and @code{cp} or @code{scp} to quickly make a backup
-of a database. It's the fastest way to make a backup of the database,
-of single tables but it can only be run on the same machine where the
-database directories are.
-
-@example
-mysqlhotcopy db_name [/path/to/new_directory]
-
-mysqlhotcopy db_name_1 ... db_name_n /path/to/new_directory
-
-mysqlhotcopy db_name./regex/
-@end example
-
-@code{mysqlhotcopy} supports the following options:
-
-@table @code
-@item -?, --help
-Display a help screen and exit
-@item -u, --user=#
-User for database login
-@item -p, --password=#
-Password to use when connecting to server
-@item -P, --port=#
-Port to use when connecting to local server
-@item -S, --socket=#
-Socket to use when connecting to local server
-@item --allowold
-Don't abort if target already exists (rename it _old)
-@item --keepold
-Don't delete previous (now renamed) target when done
-@item --noindices
-Don't include full index files in copy to make the backup smaller and faster
-The indexes can later be reconstructed with @code{myisamchk -rq.}.
-@item --method=#
-Method for copy (@code{cp} or @code{scp}).
-@item -q, --quiet
-Be silent except for errors
-@item --debug
-Enable debug
-@item -n, --dryrun
-Report actions without doing them
-@item --regexp=#
-Copy all databases with names matching regexp
-@item --suffix=#
-Suffix for names of copied databases
-@item --checkpoint=#
-Insert checkpoint entry into specified db.table
-@item --flushlog
-Flush logs once all tables are locked.
-@item --tmpdir=#
-Temporary directory (instead of /tmp).
-@end table
-
-You can use @code{perldoc mysqlhotcopy} to get a more complete
-documentation for @code{mysqlhotcopy}.
-
-@code{mysqlhotcopy} reads the groups @code{[client]} and @code{[mysqlhotcopy]}
-from the option files.
-
-To be able to execute @code{mysqlhotcopy} you need write access to the
-backup directory, @code{SELECT} privilege to the tables you are about to
-copy and the @strong{MySQL} @code{Reload} privilege (to be able to
-execute @code{FLUSH TABLES}).
-
-@cindex importing, data
-@cindex data, importing
-@cindex files, text
-@cindex text files, importing
-@cindex @code{mysqlimport}
-@node mysqlimport, perror, mysqlhotcopy, Tools
-@section Importing Data from Text Files
-
-@code{mysqlimport} provides a command-line interface to the @code{LOAD DATA
-INFILE} SQL statement. Most options to @code{mysqlimport} correspond
-directly to the same options to @code{LOAD DATA INFILE}.
-@xref{LOAD DATA, , @code{LOAD DATA}}.
-
-@code{mysqlimport} is invoked like this:
-
-@example
-shell> mysqlimport [options] database textfile1 [textfile2....]
-@end example
-
-For each text file named on the command line,
-@code{mysqlimport} strips any extension from the filename and uses the result
-to determine which table to import the file's contents into. For example,
-files named @file{patient.txt}, @file{patient.text}, and @file{patient} would
-all be imported into a table named @code{patient}.
-
-@code{mysqlimport} supports the following options:
-
-@table @code
-@item -c, --columns=...
-This option takes a comma-separated list of field names as an argument.
-The field list is used to create a proper @code{LOAD DATA INFILE} command,
-which is then passed to @strong{MySQL}. @xref{LOAD DATA, , @code{LOAD DATA}}.
-
-@item -C, --compress
-Compress all information between the client and the server if both support
-compression.
-
-@item -#, --debug[=option_string]
-Trace usage of the program (for debugging).
-
-@item -d, --delete
-Empty the table before importing the text file.
-
-@item --fields-terminated-by=...
-@itemx --fields-enclosed-by=...
-@itemx --fields-optionally-enclosed-by=...
-@itemx --fields-escaped-by=...
-@itemx --lines-terminated-by=...
-These options have the same meaning as the corresponding clauses for
-@code{LOAD DATA INFILE}. @xref{LOAD DATA, , @code{LOAD DATA}}.
-
-@item -f, --force
-Ignore errors. For example, if a table for a text file doesn't exist,
-continue processing any remaining files. Without @code{--force},
-@code{mysqlimport} exits if a table doesn't exist.
-
-@item --help
-Display a help message and exit.
-
-@item -h host_name, --host=host_name
-Import data to the @strong{MySQL} server on the named host. The default host
-is @code{localhost}.
-
-@item -i, --ignore
-See the description for the @code{--replace} option.
-
-@item -l, --lock-tables
-Lock @strong{ALL} tables for writing before processing any text files. This
-ensures that all tables are synchronized on the server.
-
-@item -L, --local
-Read input files from the client. By default, text files are assumed to be on
-the server if you connect to @code{localhost} (which is the default host).
-
-@item -pyour_pass, --password[=your_pass]
-The password to use when connecting to the server. If you specify
-no @samp{=your_pass} part,
-@code{mysqlimport} you will be prompted for a password.
-
-@item -P port_num, --port=port_num
-The TCP/IP port number to use for connecting to a host. (This is used for
-connections to hosts other than @code{localhost}, for which Unix sockets are
-used.)
-
-@item -r, --replace
-The @code{--replace} and @code{--ignore} options control handling of input
-records that duplicate existing records on unique key values. If you specify
-@code{--replace}, new rows replace existing rows that have the same unique key
-value. If you specify @code{--ignore}, input rows that duplicate an existing
-row on a unique key value are skipped. If you don't specify either option, an
-error occurs when a duplicate key value is found, and the rest of the text
-file is ignored.
-
-@item -s, --silent
-Silent mode. Write output only when errors occur.
-
-@item -S /path/to/socket, --socket=/path/to/socket
-The socket file to use when connecting to @code{localhost} (which is the
-default host).
-
-@item -u user_name, --user=user_name
-The @strong{MySQL} user name to use when connecting to the server. The
-default value is your Unix login name.
-
-@item -v, --verbose
-Verbose mode. Print out more information what the program does.
-
-@item -V, --version
-Print version information and exit.
-@end table
-
-Here is a sample run using @code{mysqlimport}:
-
-@example
-$ mysql --version
-mysql Ver 9.33 Distrib 3.22.25, for pc-linux-gnu (i686)
-$ uname -a
-Linux xxx.com 2.2.5-15 #1 Mon Apr 19 22:21:09 EDT 1999 i586 unknown
-$ mysql -e 'CREATE TABLE imptest(id INT, n VARCHAR(30))' test
-$ ed
-a
-100 Max Sydow
-101 Count Dracula
-.
-w imptest.txt
-32
-q
-$ od -c imptest.txt
-0000000 1 0 0 \t M a x S y d o w \n 1 0
-0000020 1 \t C o u n t D r a c u l a \n
-0000040
-$ mysqlimport --local test imptest.txt
-test.imptest: Records: 2 Deleted: 0 Skipped: 0 Warnings: 0
-$ mysql -e 'SELECT * FROM imptest' test
-+------+---------------+
-| id | n |
-+------+---------------+
-| 100 | Max Sydow |
-| 101 | Count Dracula |
-+------+---------------+
-@end example
-
-@cindex error messages, displaying
-@cindex perror
-@node perror, mysqlshow, mysqlimport, Tools
-@section Converting an error code to the corresponding error message
-
-@code{perror} can be used to print error message(s). @code{perror} can
-be invoked like this:
-
-@example
-shell> perror [OPTIONS] [ERRORCODE [ERRORCODE...]]
-
-For example:
-
-shell> perror 64 79
-Error code 64: Machine is not on the network
-Error code 79: Can not access a needed shared library
-@end example
-
-@code{perror} can be used to display a description for a system error
-code, or an MyISAM/ISAM table handler error code. The error messages
-are mostly system dependent.
-
-@cindex databases, displaying
-@cindex displaying, database information
-@cindex tables, displaying
-@cindex columns, displaying
-@cindex showing, database information
-@node mysqlshow, myisampack, perror, Tools
-@section Showing Databases, Tables, and Columns
-
-@code{mysqlshow} can be used to quickly look at which databases exist,
-their tables, and the table's columns.
-
-With the @code{mysql} program you can get the same information with the
-@code{SHOW} commands. @xref{SHOW}.
-
-@code{mysqlshow} is invoked like this:
-
-@example
-shell> mysqlshow [OPTIONS] [database [table [column]]]
-@end example
-
-@itemize @bullet
-@item
-If no database is given, all matching databases are shown.
-@item
-If no table is given, all matching tables in the database are shown.
-@item
-If no column is given, all matching columns and column types in the table
-are shown.
-@end itemize
-
-Note that in newer @strong{MySQL} versions, you only see those
-database/tables/columns for which you have some privileges.
-
-If the last argument contains a shell or SQL wild-card (@code{*}, @code{?},
-@code{%} or @code{_}) then only what's matched by the wild card is shown.
-This may cause some confusion when you try to display the columns for a
-table with a @code{_} as in this case @code{mysqlshow} only shows you
-the table names that match the pattern. This is easily fixed by
-adding an extra @code{%} last on the command line (as a separate
-argument).
-
-@cindex compressed tables
-@cindex tables, compressed
-@cindex MyISAM, compressed tables
-@cindex @code{myisampack}
-@cindex @code{pack_isam}
-@node myisampack, , mysqlshow, Tools
-@section The MySQL Compressed Read-only Table Generator
-
-@code{myisampack} is used to compress MyISAM tables, and @code{pack_isam}
-is used to compress ISAM tables. Because ISAM tables are deprecated, we
-will only discuss @code{myisampack} here, but everything said about
-@code{myisampack} should also be true for @code{pack_isam}.
-
-@code{myisampack} works by compressing each column in the table separately.
-The information needed to decompress columns is read into memory when the
-table is opened. This results in much better performance when accessing
-individual records, because you only have to uncompress exactly one record, not
-a much larger disk block as when using Stacker on MS-DOS.
-Usually, @code{myisampack} packs the data file 40%-70%.
-
-@strong{MySQL} uses memory mapping (@code{mmap()}) on compressed tables and
-falls back to normal read/write file usage if @code{mmap()} doesn't work.
-
-There are currently two limitations with @code{myisampack}:
-@itemize @bullet
-@item
-After packing, the table is read-only.
-@item
-@code{myisampack} can also pack @code{BLOB} or @code{TEXT} columns. The
-older @code{pack_isam} could not do this.
-@end itemize
-
-Fixing these limitations is on our TODO list but with low priority.
-
-@code{myisampack} is invoked like this:
-
-@example
-shell> myisampack [options] filename ...
-@end example
-
-Each filename should be the name of an index (@file{.MYI}) file. If you
-are not in the database directory, you should specify the pathname to the
-file. It is permissible to omit the @file{.MYI} extension.
-
-@code{myisampack} supports the following options:
-
-@table @code
-@item -b, --backup
-Make a backup of the table as @code{tbl_name.OLD}.
-
-@item -#, --debug=debug_options
-Output debug log. The @code{debug_options} string often is
-@code{'d:t:o,filename'}.
-
-@item -f, --force
-Force packing of the table even if it becomes bigger or if the temporary file
-exists. @code{myisampack} creates a temporary file named @file{tbl_name.TMD}
-while it compresses the table. If you kill @code{myisampack}, the @file{.TMD}
-file may not be deleted. Normally, @code{myisampack} exits with an error if
-it finds that @file{tbl_name.TMD} exists. With @code{--force},
-@code{myisampack} packs the table anyway.
-
-@item -?, --help
-Display a help message and exit.
-
-@item -j big_tbl_name, --join=big_tbl_name
-Join all tables named on the command line into a single table
-@code{big_tbl_name}. All tables that are to be combined
-MUST be identical (same column names and types, same indexes, etc.).
-
-@item -p #, --packlength=#
-Specify the record length storage size, in bytes. The value should be 1, 2,
-or 3. (@code{myisampack} stores all rows with length pointers of 1, 2, or 3
-bytes. In most normal cases, @code{myisampack} can determine the right length
-value before it begins packing the file, but it may notice during the packing
-process that it could have used a shorter length. In this case,
-@code{myisampack} will print a note that the next time you pack the same file,
-you could use a shorter record length.)
-
-@item -s, --silent
-Silent mode. Write output only when errors occur.
-
-@item -t, --test
-Don't actually pack table, just test packing it.
-
-@item -T dir_name, --tmp_dir=dir_name
-Use the named directory as the location in which to write the temporary table.
-
-@item -v, --verbose
-Verbose mode. Write information about progress and packing result.
-
-@item -V, --version
-Display version information and exit.
-
-@item -w, --wait
-
-Wait and retry if table is in use. If the @code{mysqld} server was
-invoked with the @code{--skip-locking} option, it is not a good idea to
-invoke @code{myisampack} if the table might be updated during the
-packing process.
-@end table
-
-@cindex examples, compressed tables
-The sequence of commands shown below illustrates a typical table compression
-session:
-
-@example
-shell> ls -l station.*
--rw-rw-r-- 1 monty my 994128 Apr 17 19:00 station.MYD
--rw-rw-r-- 1 monty my 53248 Apr 17 19:00 station.MYI
--rw-rw-r-- 1 monty my 5767 Apr 17 19:00 station.frm
-
-shell> myisamchk -dvv station
-
-MyISAM file: station
-Isam-version: 2
-Creation time: 1996-03-13 10:08:58
-Recover time: 1997-02-02 3:06:43
-Data records: 1192 Deleted blocks: 0
-Datafile: Parts: 1192 Deleted data: 0
-Datafile pointer (bytes): 2 Keyfile pointer (bytes): 2
-Max datafile length: 54657023 Max keyfile length: 33554431
-Recordlength: 834
-Record format: Fixed length
-
-table description:
-Key Start Len Index Type Root Blocksize Rec/key
-1 2 4 unique unsigned long 1024 1024 1
-2 32 30 multip. text 10240 1024 1
-
-Field Start Length Type
-1 1 1
-2 2 4
-3 6 4
-4 10 1
-5 11 20
-6 31 1
-7 32 30
-8 62 35
-9 97 35
-10 132 35
-11 167 4
-12 171 16
-13 187 35
-14 222 4
-15 226 16
-16 242 20
-17 262 20
-18 282 20
-19 302 30
-20 332 4
-21 336 4
-22 340 1
-23 341 8
-24 349 8
-25 357 8
-26 365 2
-27 367 2
-28 369 4
-29 373 4
-30 377 1
-31 378 2
-32 380 8
-33 388 4
-34 392 4
-35 396 4
-36 400 4
-37 404 1
-38 405 4
-39 409 4
-40 413 4
-41 417 4
-42 421 4
-43 425 4
-44 429 20
-45 449 30
-46 479 1
-47 480 1
-48 481 79
-49 560 79
-50 639 79
-51 718 79
-52 797 8
-53 805 1
-54 806 1
-55 807 20
-56 827 4
-57 831 4
-
-shell> myisampack station.MYI
-Compressing station.MYI: (1192 records)
-- Calculating statistics
-
-normal: 20 empty-space: 16 empty-zero: 12 empty-fill: 11
-pre-space: 0 end-space: 12 table-lookups: 5 zero: 7
-Original trees: 57 After join: 17
-- Compressing file
-87.14%
-
-shell> ls -l station.*
--rw-rw-r-- 1 monty my 127874 Apr 17 19:00 station.MYD
--rw-rw-r-- 1 monty my 55296 Apr 17 19:04 station.MYI
--rw-rw-r-- 1 monty my 5767 Apr 17 19:00 station.frm
-
-shell> myisamchk -dvv station
-
-MyISAM file: station
-Isam-version: 2
-Creation time: 1996-03-13 10:08:58
-Recover time: 1997-04-17 19:04:26
-Data records: 1192 Deleted blocks: 0
-Datafile: Parts: 1192 Deleted data: 0
-Datafilepointer (bytes): 3 Keyfile pointer (bytes): 1
-Max datafile length: 16777215 Max keyfile length: 131071
-Recordlength: 834
-Record format: Compressed
-
-table description:
-Key Start Len Index Type Root Blocksize Rec/key
-1 2 4 unique unsigned long 10240 1024 1
-2 32 30 multip. text 54272 1024 1
-
-Field Start Length Type Huff tree Bits
-1 1 1 constant 1 0
-2 2 4 zerofill(1) 2 9
-3 6 4 no zeros, zerofill(1) 2 9
-4 10 1 3 9
-5 11 20 table-lookup 4 0
-6 31 1 3 9
-7 32 30 no endspace, not_always 5 9
-8 62 35 no endspace, not_always, no empty 6 9
-9 97 35 no empty 7 9
-10 132 35 no endspace, not_always, no empty 6 9
-11 167 4 zerofill(1) 2 9
-12 171 16 no endspace, not_always, no empty 5 9
-13 187 35 no endspace, not_always, no empty 6 9
-14 222 4 zerofill(1) 2 9
-15 226 16 no endspace, not_always, no empty 5 9
-16 242 20 no endspace, not_always 8 9
-17 262 20 no endspace, no empty 8 9
-18 282 20 no endspace, no empty 5 9
-19 302 30 no endspace, no empty 6 9
-20 332 4 always zero 2 9
-21 336 4 always zero 2 9
-22 340 1 3 9
-23 341 8 table-lookup 9 0
-24 349 8 table-lookup 10 0
-25 357 8 always zero 2 9
-26 365 2 2 9
-27 367 2 no zeros, zerofill(1) 2 9
-28 369 4 no zeros, zerofill(1) 2 9
-29 373 4 table-lookup 11 0
-30 377 1 3 9
-31 378 2 no zeros, zerofill(1) 2 9
-32 380 8 no zeros 2 9
-33 388 4 always zero 2 9
-34 392 4 table-lookup 12 0
-35 396 4 no zeros, zerofill(1) 13 9
-36 400 4 no zeros, zerofill(1) 2 9
-37 404 1 2 9
-38 405 4 no zeros 2 9
-39 409 4 always zero 2 9
-40 413 4 no zeros 2 9
-41 417 4 always zero 2 9
-42 421 4 no zeros 2 9
-43 425 4 always zero 2 9
-44 429 20 no empty 3 9
-45 449 30 no empty 3 9
-46 479 1 14 4
-47 480 1 14 4
-48 481 79 no endspace, no empty 15 9
-49 560 79 no empty 2 9
-50 639 79 no empty 2 9
-51 718 79 no endspace 16 9
-52 797 8 no empty 2 9
-53 805 1 17 1
-54 806 1 3 9
-55 807 20 no empty 3 9
-56 827 4 no zeros, zerofill(2) 2 9
-57 831 4 no zeros, zerofill(1) 2 9
-@end example
-
-The information printed by @code{myisampack} is described below:
-
-@table @code
-@item normal
-The number of columns for which no extra packing is used.
-
-@item empty-space
-The number of columns containing
-values that are only spaces; these will occupy 1 bit.
-
-@item empty-zero
-The number of columns containing
-values that are only binary 0's; these will occupy 1 bit.
-
-@item empty-fill
-The number of integer columns that don't occupy the full byte range of their
-type; these are changed to a smaller type (for example, an @code{INTEGER}
-column may be changed to @code{MEDIUMINT}).
-
-@item pre-space
-The number of decimal columns that are stored with leading spaces. In this
-case, each value will contain a count for the number of leading spaces.
-
-@item end-space
-The number of columns that have a lot of trailing spaces. In this case, each
-value will contain a count for the number of trailing spaces.
-
-@item table-lookup
-The column had only a small number of different values, which were
-converted to an @code{ENUM} before Huffman compression.
-
-@item zero
-The number of columns for which all values are zero.
-
-@item Original trees
-The initial number of Huffman trees.
-
-@item After join
-The number of distinct Huffman trees left after joining
-trees to save some header space.
-@end table
-
-After a table has been compressed, @code{myisamchk -dvv} prints additional
-information about each field:
-
-@table @code
-@item Type
-The field type may contain the following descriptors:
-
-@table @code
-@item constant
-All rows have the same value.
-
-@item no endspace
-Don't store endspace.
-
-@item no endspace, not_always
-Don't store endspace and don't do end space compression for all values.
-
-@item no endspace, no empty
-Don't store endspace. Don't store empty values.
-
-@item table-lookup
-The column was converted to an @code{ENUM}.
-
-@item zerofill(n)
-The most significant @code{n} bytes in the value are always 0 and are not
-stored.
-
-@item no zeros
-Don't store zeros.
-
-@item always zero
-0 values are stored in 1 bit.
-@end table
-
-@item Huff tree
-The Huffman tree associated with the field.
-
-@item Bits
-The number of bits used in the Huffman tree.
-@end table
-
-After you have run @code{pack_isam}/@code{myisampack} you must run
-@code{isamchk}/@code{myisamchk} to re-create the index. At this time you
-can also sort the index blocks and create statistics needed for
-the @strong{MySQL} optimizer to work more efficiently:
-
-@example
-myisamchk -rq --analyze --sort-index table_name.MYI
-isamchk -rq --analyze --sort-index table_name.ISM
-@end example
-
-After you have installed the packed table into the @strong{MySQL} database
-directory you should do @code{mysqladmin flush-tables} to force @code{mysqld}
-to start using the new table.
-
-If you want to unpack a packed table, you can do this with the
-@code{--unpack} option to @code{isamchk} or @code{myisamchk}.
+@node Maintenance, Adding functions, Fulltext Search, Top
+@chapter Maintaining a MySQL Installation
@cindex installation maintenance
@cindex maintaining, tables
@@ -35989,406 +36859,17 @@ If you want to unpack a packed table, you can do this with the
@cindex @code{mysqlcheck}
@cindex crash, recovery
@cindex recovery, from crash
-@node Maintenance, Adding functions, Tools, Top
-@chapter Maintaining a MySQL Installation
+
@menu
-* Table maintenance:: Table maintenance and crash recovery
* Using mysqlcheck:: Using mysqlcheck for maintenance and recovery
-* Maintenance regimen:: Setting up a table maintenance regimen
-* Table-info:: Getting information about a table
-* Crash recovery:: Using @code{myisamchk} for crash recovery
-* Log file maintenance:: Log file maintenance
@end menu
This chapter covers what you should know about maintaining a @strong{MySQL}
distribution. You will learn how to care for your tables on a regular
basis, and what to do when disaster strikes.
-@node Table maintenance, Using mysqlcheck, Maintenance, Maintenance
-@section Using @code{myisamchk} for Table Maintenance and Crash Recovery
-
-Starting with @strong{MySQL} Version 3.23.13, you can check MyISAM
-tables with the @code{CHECK TABLE} command. @xref{CHECK TABLE}. You can
-repair tables with the @code{REPAIR TABLE} command. @xref{REPAIR TABLE}.
-
-To check/repair MyISAM tables (@code{.MYI} and @code{.MYD}) you should
-use the @code{myisamchk} utility. To check/repair ISAM tables
-(@code{.ISM} and @code{.ISD}) you should use the @code{isamchk}
-utility. @xref{Table types}.
-
-In the following text we will talk about @code{myisamchk}, but everything
-also applies to the old @code{isamchk}.
-
-You can use the @code{myisamchk} utility to get information about your
-database tables, check and repair them, or optimize them. The following
-sections describe how to invoke @code{myisamchk} (including a
-description of its options), how to set up a table maintenance schedule,
-and how to use @code{myisamchk} to perform its various functions.
-
-You can, in most cases, also use the command @code{OPTIMIZE TABLES} to
-optimize and repair tables, but this is not as fast or reliable (in case
-of real fatal errors) as @code{myisamchk}. On the other hand,
-@code{OPTIMIZE TABLE} is easier to use and you don't have to worry about
-flushing tables.
-@xref{OPTIMIZE TABLE, , @code{OPTIMIZE TABLE}}.
-
-Even that the repair in @code{myisamchk} is quite secure, it's always a
-good idea to make a backup BEFORE doing a repair (or anything that could
-make a lot of changes to a table)
-
-@menu
-* myisamchk syntax:: @code{myisamchk} invocation syntax
-* myisamchk memory:: @code{myisamchk} memory usage
-@end menu
-
-@node myisamchk syntax, myisamchk memory, Table maintenance, Table maintenance
-@subsection @code{myisamchk} Invocation Syntax
-
-@code{myisamchk} is invoked like this:
-
-@example
-shell> myisamchk [options] tbl_name
-@end example
-
-The @code{options} specify what you want @code{myisamchk} to do. They are
-described below. (You can also get a list of options by invoking
-@code{myisamchk --help}.) With no options, @code{myisamchk} simply checks your
-table. To get more information or to tell @code{myisamchk} to take corrective
-action, specify options as described below and in the following sections.
-
-@code{tbl_name} is the database table you want to check/repair. If you run
-@code{myisamchk} somewhere other than in the database directory, you must
-specify the path to the file, because @code{myisamchk} has no idea where your
-database is located. Actually, @code{myisamchk} doesn't care whether or not
-the files you are working on are located in a database directory; you can
-copy the files that correspond to a database table into another location and
-perform recovery operations on them there.
-
-You can name several tables on the @code{myisamchk} command line if you
-wish. You can also specify a name as an index file
-name (with the @file{.MYI} suffix), which allows you to specify all
-tables in a directory by using the pattern @file{*.MYI}.
-For example, if you are in a database directory, you can check all the
-tables in the directory like this:
-
-@example
-shell> myisamchk *.MYI
-@end example
-
-If you are not in the database directory, you can check all the tables there
-by specifying the path to the directory:
-
-@example
-shell> myisamchk /path/to/database_dir/*.MYI
-@end example
-
-You can even check all tables in all databases by specifying a wild card
-with the path to the @strong{MySQL} data directory:
-
-@example
-shell> myisamchk /path/to/datadir/*/*.MYI
-@end example
-
-The recommended way to quickly check all tables is:
-
-@example
-myisamchk --silent --fast /path/to/datadir/*/*.MYI
-isamchk --silent /path/to/datadir/*/*.ISM
-@end example
-
-If you want to check all tables and repair all tables that are corrupted,
-you can use the following line:
-
-@example
-myisamchk --silent --force --fast --update-state -O key_buffer=64M -O sort_buffer=64M -O read_buffer=1M -O write_buffer=1M /path/to/datadir/*/*.MYI
-isamchk --silent --force -O key_buffer=64M -O sort_buffer=64M -O read_buffer=1M -O write_buffer=1M /path/to/datadir/*/*.ISM
-@end example
-
-The above assumes that you have more than 64 M free.
-
-Note that if you get an error like:
-
-@example
-myisamchk: warning: 1 clients is using or hasn't closed the table properly
-@end example
-
-This means that you are trying to check a table that has been updated by
-the another program (like the @code{mysqld} server) that hasn't yet closed
-the file or that has died without closing the file properly.
-
-If you @code{mysqld} is running, you must force a sync/close of all
-tables with @code{FLUSH TABLES} and ensure that no one is using the
-tables while you are running @code{myisamchk}. In @strong{MySQL} Version 3.23
-the easiest way to avoid this problem is to use @code{CHECK TABLE}
-instead of @code{myisamchk} to check tables.
-
-@menu
-* myisamchk general options::
-* myisamchk check options::
-* myisamchk repair options::
-* myisamchk other options::
-@end menu
-
-@cindex options, @code{myisamchk}
-@cindex @code{myisamchk}, options
-@node myisamchk general options, myisamchk check options, myisamchk syntax, myisamchk syntax
-@subsubsection General Options for @code{myisamchk}
-
-@code{myisamchk} supports the following options.
-
-@table @code
-@item -# or --debug=debug_options
-Output debug log. The @code{debug_options} string often is
-@code{'d:t:o,filename'}.
-@item -? or --help
-Display a help message and exit.
-@item -O var=option, --set-variable var=option
-Set the value of a variable. The possible variables and their default values
-for myisamchk can be examined with @code{myisamchk --help}:
-@multitable @columnfractions .3 .7
-@item key_buffer_size @tab 523264
-@item read_buffer_size @tab 262136
-@item write_buffer_size @tab 262136
-@item sort_buffer_size @tab 2097144
-@item sort_key_blocks @tab 16
-@item decode_bits @tab 9
-@end multitable
-
-@code{sort_buffer_size} is used when the keys are repaired by sorting
-keys, which is the normal case when you use @code{--recover}.
-
-@code{key_buffer_size} is used when you are checking the table with
-@code{--extended-check} or when the keys are repaired by inserting key
-row by row in to the table (like when doing normal inserts). Repairing
-through the key buffer is used in the following cases:
-
-@itemize @bullet
-@item
-If you use @code{--safe-recover}.
-@item
-If you are using a @code{FULLTEXT} index.
-@item
-If the temporary files needed to sort the keys would be more than twice
-as big as when creating the key file directly. This is often the case
-when you have big @code{CHAR}, @code{VARCHAR} or @code{TEXT} keys as the
-sort needs to store the whole keys during sorting. If you have lots
-of temporary space and you can force @code{myisamchk} to repair by sorting
-you can use the @code{--sort-recover} option.
-@end itemize
-
-Reparing through the key buffer takes much less disk space than using
-sorting, but is also much slower.
-
-If you want a faster repair, set the above variables to about 1/4 of your
-available memory. You can set both variables to big values, as only one
-of the above buffers will be used at a time.
-
-@item -s or --silent
-Silent mode. Write output only when errors occur. You can use @code{-s}
-twice (@code{-ss}) to make @code{myisamchk} very silent.
-@item -v or --verbose
-Verbose mode. Print more information. This can be used with @code{-d} and
-@code{-e}. Use @code{-v} multiple times (@code{-vv}, @code{-vvv}) for more
-verbosity!
-@item -V or --version
-Print the @code{myisamchk} version and exit.
-@item -w or, --wait
-Instead of giving an error if the table is locked, wait until the table
-is unlocked before continuing. Note that if you are running @code{mysqld}
-on the table with @code{--skip-locking}, the table can only be locked
-by another @code{myisamchk} command.
-@end table
-
-@cindex check options, myisamchk
-@cindex tables, checking
-@node myisamchk check options, myisamchk repair options, myisamchk general options, myisamchk syntax
-@subsubsection Check Options for @code{myisamchk}
-
-@table @code
-@item -c or --check
-Check table for errors. This is the default operation if you are not
-giving @code{myisamchk} any options that override this.
-
-@item -e or --extend-check
-Check the table VERY thoroughly (which is quite slow if you have many
-indexes). This option should only be used in extreme cases. Normally,
-@code{myisamchk} or @code{myisamchk --medium-check} should, in most
-cases, be able to find out if there are any errors in the table.
-
-If you are using @code{--extended-check} and have much memory, you should
-increase the value of @code{key_buffer_size} a lot!
-
-@item -F or --fast
-Check only tables that haven't been closed properly.
-@item -C or --check-only-changed
-Check only tables that have changed since the last check.
-@item -f or --force
-Restart @code{myisamchk} with @code{-r} (repair) on the table, if
-@code{myisamchk} finds any errors in the table.
-@item -i or --information
-Print informational statistics about the table that is checked.
-@item -m or --medium-check
-Faster than extended-check, but only finds 99.99% of all errors.
-Should, however, be good enough for most cases.
-@item -U or --update-state
-Store in the @file{.MYI} file when the table was checked and if the table crashed. This should be used to get full benefit of the
-@code{--check-only-changed} option, but you shouldn't use this
-option if the @code{mysqld} server is using the table and you are
-running @code{mysqld} with @code{--skip-locking}.
-@item -T or --read-only
-Don't mark table as checked. This is useful if you use @code{myisamchk}
-to check a table that is in use by some other application that doesn't
-use locking (like @code{mysqld --skip-locking}).
-@end table
-
-@cindex repair options, myisamchk
-@cindex files, repairing
-@node myisamchk repair options, myisamchk other options, myisamchk check options, myisamchk syntax
-@subsubsection Repair Options for myisamchk
-
-The following options are used if you start @code{myisamchk} with
-@code{-r} or @code{-o}:
-
-@table @code
-@item -D # or --data-file-length=#
-Max length of data file (when re-creating data file when it's 'full').
-@item -e or --extend-check
-Try to recover every possible row from the data file.
-Normally this will also find a lot of garbage rows. Don't use this option
-if you are not totally desperate.
-@item -f or --force
-Overwrite old temporary files (@code{table_name.TMD}) instead of aborting.
-@item -k # or keys-used=#
-If you are using ISAM, tells the ISAM table handler to update only the
-first @code{#} indexes. If you are using @code{MyISAM}, tells which keys
-to use, where each binary bit stands for one key (first key is bit 0).
-This can be used to get faster inserts! Deactivated indexes can be
-reactivated by using @code{myisamchk -r}. keys.
-@item -l or --no-symlinks
-Do not follow symbolic links. Normally @code{myisamchk} repairs the
-table a symlink points at. This option doesn't exist in MySQL 4.0,
-as MySQL 4.0 will not remove symlinks during repair.
-@item -r or --recover
-Can fix almost anything except unique keys that aren't unique
-(which is an extremely unlikely error with ISAM/MyISAM tables).
-If you want to recover a table, this is the option to try first. Only if
-myisamchk reports that the table can't be recovered by @code{-r}, you
-should then try @code{-o}. (Note that in the unlikely case that @code{-r}
-fails, the data file is still intact.)
-If you have lots of memory, you should increase the size of
-@code{sort_buffer_size}!
-@item -o or --safe-recover
-Uses an old recovery method (reads through all rows in order and updates
-all index trees based on the found rows); this is a magnitude slower
-than @code{-r}, but can handle a couple of very unlikely cases that
-@code{-r} cannot handle. This recovery method also uses much less disk
-space than @code{-r}. Normally one should always first repair with
-@code{-r}, and only if this fails use @code{-o}.
-
-If you have lots of memory, you should increase the size of
-@code{key_buffer_size}!
-@item -n or --sort-recover
-Force @code{myisamchk} to use sorting to resolve the keys even if the
-temporary files should be very big. This will not have any effect if you have
-fulltext keys in the table.
-
-@item --character-sets-dir=...
-Directory where character sets are stored.
-@item --set-character-set=name
-Change the character set used by the index
-@item .t or --tmpdir=path
-Path for storing temporary files. If this is not set, @code{myisamchk} will
-use the environment variable @code{TMPDIR} for this.
-@item -q or --quick
-Faster repair by not modifying the data file. One can give a second
-@code{-q} to force @code{myisamchk} to modify the original datafile in case
-of duplicate keys
-@item -u or --unpack
-Unpack file packed with myisampack.
-@end table
-
-@node myisamchk other options, , myisamchk repair options, myisamchk syntax
-@subsubsection Other Options for @code{myisamchk}
-
-Other actions that @code{myisamchk} can do, besides repair and check tables:
-
-@table @code
-@item -a or --analyze
-Analyze the distribution of keys. This improves join performance by
-enabling the join optimizer to better choose in which order it should
-join the tables and which keys it should use:
-@code{myisamchk --describe --verbose table_name'} or using @code{SHOW KEYS} in
-@strong{MySQL}.
-@item -d or --description
-Prints some information about table.
-@item -A or --set-auto-increment[=value]
-Force auto_increment to start at this or higher value. If no value is
-given, then sets the next auto_increment value to the highest used value
-for the auto key + 1.
-@item -S or --sort-index
-Sort the index tree blocks in high-low order.
-This will optimize seeks and will make table scanning by key faster.
-@item -R or --sort-records=#
-Sorts records according to an index. This makes your data much more localized
-and may speed up ranged @code{SELECT} and @code{ORDER BY} operations on
-this index. (It may be VERY slow to do a sort the first time!)
-To find out a table's index numbers, use @code{SHOW INDEX}, which shows a
-table's indexes in the same order that @code{myisamchk} sees them. Indexes are
-numbered beginning with 1.
-@end table
-
-@cindex memory usage, myisamchk
-@node myisamchk memory, , myisamchk syntax, Table maintenance
-@subsection @code{myisamchk} Memory Usage
-
-Memory allocation is important when you run @code{myisamchk}.
-@code{myisamchk} uses no more memory than you specify with the @code{-O}
-options. If you are going to use @code{myisamchk} on very large files,
-you should first decide how much memory you want it to use. The default
-is to use only about 3M to fix things. By using larger values, you can
-get @code{myisamchk} to operate faster. For example, if you have more
-than 32M RAM, you could use options such as these (in addition to any
-other options you might specify):
-
-@example
-shell> myisamchk -O sort=16M -O key=16M -O read=1M -O write=1M ...
-@end example
-
-Using @code{-O sort=16M} should probably be enough for most cases.
-
-Be aware that @code{myisamchk} uses temporary files in @code{TMPDIR}. If
-@code{TMPDIR} points to a memory file system, you may easily get out of
-memory errors. If this happens, set @code{TMPDIR} to point at some directory
-with more space and restart @code{myisamchk}.
-
-When repairing, @code{myisamchk} will also need a lot of disk space:
-
-@itemize @bullet
-@item
-Double the size of the record file (the original one and a copy). This
-space is not needed if one does a repair with @code{--quick}, as in this
-case only the index file will be re-created. This space is needed on the
-same disk as the original record file!
-@item
-Space for the new index file that replaces the old one. The old
-index file is truncated at start, so one usually ignore this space.
-This space is needed on the same disk as the original index file!
-@item
-When using @code{--recover} or @code{--sort-recover}
-(but not when using @code{--safe-recover}, you will need space for a
-sort buffer for:
-@code{(largest_key + row_pointer_length)*number_of_rows * 2}.
-You can check the length of the keys and the row_pointer_length with
-@code{myisamchk -dv table}.
-This space is allocated on the temporary disk (specified by @code{TMPDIR} or
-@code{--tmpdir=#}).
-@end itemize
-
-If you have a problem with disk space during repair, you can try to use
-@code{--safe-recover} instead of @code{--recover}.
-
-@node Using mysqlcheck, Maintenance regimen, Table maintenance, Maintenance
+@node Using mysqlcheck, , Maintenance, Maintenance
@section Using @code{mysqlcheck} for Table Maintenance and Crash Recovery
Since @strong{MySQL} version 3.23.38 you will be able to use a new
@@ -36511,780 +36992,7 @@ Print info about the various stages.
Output version information and exit.
@end table
-@cindex maintaining, tables
-@cindex tables, maintenance regimen
-@node Maintenance regimen, Table-info, Using mysqlcheck, Maintenance
-@section Setting Up a Table Maintenance Regimen
-
-Starting with @strong{MySQL} Version 3.23.13, you can check MyISAM
-tables with the @code{CHECK TABLE} command. @xref{CHECK TABLE}. You can
-repair tables with the @code{REPAIR TABLE} command. @xref{REPAIR TABLE}.
-
-It is a good idea to perform table checks on a regular basis rather than
-waiting for problems to occur. For maintenance purposes, you can use
-@code{myisamchk -s} to check tables. The @code{-s} option (short for
-@code{--silent}) causes @code{myisamchk} to run in silent mode, printing
-messages only when errors occur.
-
-@tindex .pid (process ID) file
-It's also a good idea to check tables when the server starts up.
-For example, whenever the machine has done a reboot in the middle of an
-update, you usually need to check all the tables that could have been
-affected. (This is an ``expected crashed table''.) You could add a test to
-@code{safe_mysqld} that runs @code{myisamchk} to check all tables that have
-been modified during the last 24 hours if there is an old @file{.pid}
-(process ID) file left after a reboot. (The @file{.pid} file is created by
-@code{mysqld} when it starts up and removed when it terminates normally. The
-presence of a @file{.pid} file at system startup time indicates that
-@code{mysqld} terminated abnormally.)
-
-An even better test would be to check any table whose last-modified time
-is more recent than that of the @file{.pid} file.
-
-You should also check your tables regularly during normal system
-operation. At @strong{MySQL AB}, we run a @code{cron} job to check all
-our important tables once a week, using a line like this in a @file{crontab}
-file:
-
-@example
-35 0 * * 0 /path/to/myisamchk --fast --silent /path/to/datadir/*/*.MYI
-@end example
-
-This prints out information about crashed tables so we can examine and repair
-them when needed.
-
-As we haven't had any unexpectedly crashed tables (tables that become
-corrupted for reasons other than hardware trouble)
-for a couple of years now (this is really true), once a week is
-more than enough for us.
-
-We recommend that to start with, you execute @code{myisamchk -s} each
-night on all tables that have been updated during the last 24 hours,
-until you come to trust @strong{MySQL} as much as we do.
-
-@cindex tables, defragment
-Normally you don't need to maintain @strong{MySQL} tables that much. If
-you are changing tables with dynamic size rows (tables with @code{VARCHAR},
-@code{BLOB} or @code{TEXT} columns) or have tables with many deleted rows
-you may want to from time to time (once a month?) defragment/reclaim space
-from the tables.
-
-You can do this by using @code{OPTIMIZE TABLE} on the tables in question or
-if you can take the @code{mysqld} server down for a while do:
-
-@example
-isamchk -r --silent --sort-index -O sort_buffer_size=16M */*.ISM
-myisamchk -r --silent --sort-index -O sort_buffer_size=16M */*.MYI
-@end example
-
-@cindex tables, information
-@node Table-info, Crash recovery, Maintenance regimen, Maintenance
-@section Getting Information About a Table
-
-To get a description of a table or statistics about it, use the commands shown
-below. We explain some of the information in more detail later:
-
-@table @code
-@item myisamchk -d tbl_name
-Runs @code{myisamchk} in ``describe mode'' to produce a description of
-your table. If you start the @strong{MySQL} server using the
-@code{--skip-locking} option, @code{myisamchk} may report an error for a
-table that is updated while it runs. However, because @code{myisamchk}
-doesn't change the table in describe mode, there isn't any risk of
-destroying data.
-
-@item myisamchk -d -v tbl_name
-To produce more information about what @code{myisamchk} is doing, add @code{-v}
-to tell it to run in verbose mode.
-
-@item myisamchk -eis tbl_name
-Shows only the most important information from a table. It is slow because it
-must read the whole table.
-
-@item myisamchk -eiv tbl_name
-This is like @code{-eis}, but tells you what is being done.
-@end table
-
-@cindex examples, @code{myisamchk} output
-@cindex @code{myisamchk}, example output
-Example of @code{myisamchk -d} output:
-@example
-MyISAM file: company.MYI
-Record format: Fixed length
-Data records: 1403698 Deleted blocks: 0
-Recordlength: 226
-
-table description:
-Key Start Len Index Type
-1 2 8 unique double
-2 15 10 multip. text packed stripped
-3 219 8 multip. double
-4 63 10 multip. text packed stripped
-5 167 2 multip. unsigned short
-6 177 4 multip. unsigned long
-7 155 4 multip. text
-8 138 4 multip. unsigned long
-9 177 4 multip. unsigned long
- 193 1 text
-@end example
-
-Example of @code{myisamchk -d -v} output:
-@example
-MyISAM file: company
-Record format: Fixed length
-File-version: 1
-Creation time: 1999-10-30 12:12:51
-Recover time: 1999-10-31 19:13:01
-Status: checked
-Data records: 1403698 Deleted blocks: 0
-Datafile parts: 1403698 Deleted data: 0
-Datafilepointer (bytes): 3 Keyfile pointer (bytes): 3
-Max datafile length: 3791650815 Max keyfile length: 4294967294
-Recordlength: 226
-
-table description:
-Key Start Len Index Type Rec/key Root Blocksize
-1 2 8 unique double 1 15845376 1024
-2 15 10 multip. text packed stripped 2 25062400 1024
-3 219 8 multip. double 73 40907776 1024
-4 63 10 multip. text packed stripped 5 48097280 1024
-5 167 2 multip. unsigned short 4840 55200768 1024
-6 177 4 multip. unsigned long 1346 65145856 1024
-7 155 4 multip. text 4995 75090944 1024
-8 138 4 multip. unsigned long 87 85036032 1024
-9 177 4 multip. unsigned long 178 96481280 1024
- 193 1 text
-@end example
-
-Example of @code{myisamchk -eis} output:
-@example
-Checking MyISAM file: company
-Key: 1: Keyblocks used: 97% Packed: 0% Max levels: 4
-Key: 2: Keyblocks used: 98% Packed: 50% Max levels: 4
-Key: 3: Keyblocks used: 97% Packed: 0% Max levels: 4
-Key: 4: Keyblocks used: 99% Packed: 60% Max levels: 3
-Key: 5: Keyblocks used: 99% Packed: 0% Max levels: 3
-Key: 6: Keyblocks used: 99% Packed: 0% Max levels: 3
-Key: 7: Keyblocks used: 99% Packed: 0% Max levels: 3
-Key: 8: Keyblocks used: 99% Packed: 0% Max levels: 3
-Key: 9: Keyblocks used: 98% Packed: 0% Max levels: 4
-Total: Keyblocks used: 98% Packed: 17%
-
-Records: 1403698 M.recordlength: 226 Packed: 0%
-Recordspace used: 100% Empty space: 0% Blocks/Record: 1.00
-Record blocks: 1403698 Delete blocks: 0
-Recorddata: 317235748 Deleted data: 0
-Lost space: 0 Linkdata: 0
-
-User time 1626.51, System time 232.36
-Maximum resident set size 0, Integral resident set size 0
-Non physical pagefaults 0, Physical pagefaults 627, Swaps 0
-Blocks in 0 out 0, Messages in 0 out 0, Signals 0
-Voluntary context switches 639, Involuntary context switches 28966
-@end example
-
-Example of @code{myisamchk -eiv} output:
-@example
-Checking MyISAM file: company
-Data records: 1403698 Deleted blocks: 0
-- check file-size
-- check delete-chain
-block_size 1024:
-index 1:
-index 2:
-index 3:
-index 4:
-index 5:
-index 6:
-index 7:
-index 8:
-index 9:
-No recordlinks
-- check index reference
-- check data record references index: 1
-Key: 1: Keyblocks used: 97% Packed: 0% Max levels: 4
-- check data record references index: 2
-Key: 2: Keyblocks used: 98% Packed: 50% Max levels: 4
-- check data record references index: 3
-Key: 3: Keyblocks used: 97% Packed: 0% Max levels: 4
-- check data record references index: 4
-Key: 4: Keyblocks used: 99% Packed: 60% Max levels: 3
-- check data record references index: 5
-Key: 5: Keyblocks used: 99% Packed: 0% Max levels: 3
-- check data record references index: 6
-Key: 6: Keyblocks used: 99% Packed: 0% Max levels: 3
-- check data record references index: 7
-Key: 7: Keyblocks used: 99% Packed: 0% Max levels: 3
-- check data record references index: 8
-Key: 8: Keyblocks used: 99% Packed: 0% Max levels: 3
-- check data record references index: 9
-Key: 9: Keyblocks used: 98% Packed: 0% Max levels: 4
-Total: Keyblocks used: 9% Packed: 17%
-
-- check records and index references
-[LOTS OF ROW NUMBERS DELETED]
-
-Records: 1403698 M.recordlength: 226 Packed: 0%
-Recordspace used: 100% Empty space: 0% Blocks/Record: 1.00
-Record blocks: 1403698 Delete blocks: 0
-Recorddata: 317235748 Deleted data: 0
-Lost space: 0 Linkdata: 0
-
-User time 1639.63, System time 251.61
-Maximum resident set size 0, Integral resident set size 0
-Non physical pagefaults 0, Physical pagefaults 10580, Swaps 0
-Blocks in 4 out 0, Messages in 0 out 0, Signals 0
-Voluntary context switches 10604, Involuntary context switches 122798
-@end example
-
-Here are the sizes of the data and index files for the table used in the
-preceding examples:
-
-@example
--rw-rw-r-- 1 monty tcx 317235748 Jan 12 17:30 company.MYD
--rw-rw-r-- 1 davida tcx 96482304 Jan 12 18:35 company.MYM
-@end example
-
-Explanations for the types of information @code{myisamchk} produces are
-given below. The ``keyfile'' is the index file. ``Record'' and ``row''
-are synonymous:
-
-@table @code
-@item ISAM file
-Name of the ISAM (index) file.
-
-@item Isam-version
-Version of ISAM format. Currently always 2.
-
-@item Creation time
-When the data file was created.
-
-@item Recover time
-When the index/data file was last reconstructed.
-
-@item Data records
-How many records are in the table.
-
-@item Deleted blocks
-How many deleted blocks still have reserved space.
-You can optimize your table to minimize this space.
-@xref{Optimization}.
-
-@item Datafile: Parts
-For dynamic record format, this indicates how many data blocks there are. For
-an optimized table without fragmented records, this is the same as @code{Data
-records}.
-
-@item Deleted data
-How many bytes of non-reclaimed deleted data there are.
-You can optimize your table to minimize this space.
-@xref{Optimization}.
-
-@item Datafile pointer
-The size of the data file pointer, in bytes. It is usually 2, 3, 4, or 5
-bytes. Most tables manage with 2 bytes, but this cannot be controlled
-from @strong{MySQL} yet. For fixed tables, this is a record address. For
-dynamic tables, this is a byte address.
-
-@item Keyfile pointer
-The size of the index file pointer, in bytes. It is usually 1, 2, or 3
-bytes. Most tables manage with 2 bytes, but this is calculated
-automatically by @strong{MySQL}. It is always a block address.
-
-@item Max datafile length
-How long the table's data file (@code{.MYD} file) can become, in bytes.
-
-@item Max keyfile length
-How long the table's key file (@code{.MYI} file) can become, in bytes.
-
-@item Recordlength
-How much space each record takes, in bytes.
-
-@item Record format
-The format used to store table rows.
-The examples shown above use @code{Fixed length}.
-Other possible values are @code{Compressed} and @code{Packed}.
-
-@item table description
-A list of all keys in the table. For each key, some low-level information
-is presented:
-
-@table @code
-@item Key
-This key's number.
-
-@item Start
-Where in the record this index part starts.
-
-@item Len
-How long this index part is. For packed numbers, this should always be
-the full length of the column. For strings, it may be shorter than the full
-length of the indexed column, because you can index a prefix of a string
-column.
-
-@item Index
-@code{unique} or @code{multip.} (multiple). Indicates whether or not one value
-can exist multiple times in this index.
-
-@item Type
-What data-type this index part has. This is an ISAM data-type
-with the options @code{packed}, @code{stripped} or @code{empty}.
-
-@item Root
-Address of the root index block.
-
-@item Blocksize
-The size of each index block. By default this is 1024, but the value may be
-changed at compile time.
-
-@item Rec/key
-This is a statistical value used by the optimizer. It tells how many
-records there are per value for this key. A unique key always has a
-value of 1. This may be updated after a table is loaded (or greatly
-changed) with @code{myisamchk -a}. If this is not updated at all, a default
-value of 30 is given.
-@end table
-
-@item
-In the first example above, the 9th key is a multi-part key with two parts.
-
-@item Keyblocks used
-What percentage of the keyblocks are used. Because the table used in the
-examples had just been reorganized with @code{myisamchk}, the values are very
-high (very near the theoretical maximum).
-
-@item Packed
-@strong{MySQL} tries to pack keys with a common suffix. This can only be used
-for @code{CHAR}/@code{VARCHAR}/@code{DECIMAL} keys. For long strings like
-names, this can significantly reduce the space used. In the third example
-above, the 4th key is 10 characters long and a 60% reduction in space is
-achieved.
-
-@item Max levels
-How deep the B-tree for this key is. Large tables with long keys get high
-values.
-
-@item Records
-How many rows are in the table.
-
-@item M.recordlength
-The average record length. For tables with fixed-length records, this is the
-exact record length.
-
-@item Packed
-@strong{MySQL} strips spaces from the end of strings. The @code{Packed}
-value indicates the percentage of savings achieved by doing this.
-
-@item Recordspace used
-What percentage of the data file is used.
-
-@item Empty space
-What percentage of the data file is unused.
-
-@item Blocks/Record
-Average number of blocks per record (that is, how many links a fragmented
-record is composed of). This is always 1 for fixed-format tables. This value
-should stay as close to 1.0 as possible. If it gets too big, you can
-reorganize the table with @code{myisamchk}.
-@xref{Optimization}.
-
-@item Recordblocks
-How many blocks (links) are used. For fixed format, this is the same as the number
-of records.
-
-@item Deleteblocks
-How many blocks (links) are deleted.
-
-@item Recorddata
-How many bytes in the data file are used.
-
-@item Deleted data
-How many bytes in the data file are deleted (unused).
-
-@item Lost space
-If a record is updated to a shorter length, some space is lost. This is
-the sum of all such losses, in bytes.
-
-@item Linkdata
-When the dynamic table format is used, record fragments are linked with
-pointers (4 to 7 bytes each). @code{Linkdata} is the sum of the amount of
-storage used by all such pointers.
-@end table
-
-If a table has been compressed with @code{myisampack}, @code{myisamchk
--d} prints additional information about each table column. See
-@ref{myisampack, , @code{myisampack}}, for an example of this
-information and a description of what it means.
-
-@cindex crash, recovery
-@cindex recovery, from crash
-@node Crash recovery, Log file maintenance, Table-info, Maintenance
-@section Using @code{myisamchk} for Crash Recovery
-
-If you run @code{mysqld} with @code{--skip-locking} (which is the default on
-some systems, like Linux), you can't reliably use @code{myisamchk} to
-check a table when @code{mysqld} is using the same table. If you
-can be sure that no one is accessing the tables through @code{mysqld}
-while you run @code{myisamchk}, you only have to do @code{mysqladmin
-flush-tables} before you start checking the tables. If you can't
-guarantee the above, then you must take down @code{mysqld} while you
-check the tables. If you run @code{myisamchk} while @code{mysqld} is updating
-the tables, you may get a warning that a table is corrupt even if it
-isn't.
-
-If you are not using @code{--skip-locking}, you can use @code{myisamchk}
-to check tables at any time. While you do this, all clients that try
-to update the table will wait until @code{myisamchk} is ready before
-continuing.
-
-If you use @code{myisamchk} to repair or optimize tables, you
-@strong{MUST} always ensure that the @code{mysqld} server is not using
-the table (this also applies if you are using @code{--skip-locking}).
-If you don't take down @code{mysqld} you should at least do a
-@code{mysqladmin flush-tables} before you run @code{myisamchk}.
-
-This chapter describes how to check for and deal with data corruption
-in @strong{MySQL} databases. If your tables get corrupted a lot you should
-try to find the reason for this! @xref{Crashing}.
-
-The @code{MyISAM} table section contains reason for why a table could be
-corrupted. @xref{MyISAM table problems}.
-
-When performing crash recovery, it is important to understand that each table
-@code{tbl_name} in a database corresponds to three files in the database
-directory:
-
-@multitable @columnfractions .2 .8
-@item @strong{File} @tab @strong{Purpose}
-@item @file{tbl_name.frm} @tab Table definition (form) file
-@item @file{tbl_name.MYD} @tab Data file
-@item @file{tbl_name.MYI} @tab Index file
-@end multitable
-
-Each of these three file types is subject to corruption in various ways, but
-problems occur most often in data files and index files.
-
-@code{myisamchk} works by creating a copy of the @file{.MYD} (data) file
-row by row. It ends the repair stage by removing the old @file{.MYD}
-file and renaming the new file to the original file name. If you use
-@code{--quick}, @code{myisamchk} does not create a temporary @file{.MYD}
-file, but instead assumes that the @file{.MYD} file is correct and only
-generates a new index file without touching the @file{.MYD} file. This
-is safe, because @code{myisamchk} automatically detects if the
-@file{.MYD} file is corrupt and aborts the repair in this case. You can
-also give two @code{--quick} options to @code{myisamchk}. In this case,
-@code{myisamchk} does not abort on some errors (like duplicate key) but
-instead tries to resolve them by modifying the @file{.MYD}
-file. Normally the use of two @code{--quick} options is useful only if
-you have too little free disk space to perform a normal repair. In this
-case you should at least make a backup before running @code{myisamchk}.
-
-@menu
-* Check:: How to check tables for errors
-* Repair:: How to repair tables
-* Optimization:: Table optimization
-@end menu
-
-@cindex checking, tables for errors
-@cindex tables, error checking
-@cindex errors, checking tables for
-@node Check, Repair, Crash recovery, Crash recovery
-@subsection How to Check Tables for Errors
-
-To check a MyISAM table, use the following commands:
-
-@table @code
-@item myisamchk tbl_name
-This finds 99.99% of all errors. What it can't find is corruption that
-involves @strong{ONLY} the data file (which is very unusual). If you want
-to check a table, you should normally run @code{myisamchk} without options or
-with either the @code{-s} or @code{--silent} option.
-
-@item myisamchk -m tbl_name
-This finds 99.999% of all errors. It checks first all index entries for errors and
-then it reads through all rows. It calculates a checksum for all keys in
-the rows and verifies that they checksum matches the checksum for the keys
-in the index tree.
-
-@item myisamchk -e tbl_name
-This does a complete and thorough check of all data (@code{-e} means
-``extended check''). It does a check-read of every key for each row to verify
-that they indeed point to the correct row. This may take a LONG time on a
-big table with many keys. @code{myisamchk} will normally stop after the first
-error it finds. If you want to obtain more information, you can add the
-@code{--verbose} (@code{-v}) option. This causes @code{myisamchk} to keep
-going, up through a maximum of 20 errors. In normal usage, a simple
-@code{myisamchk} (with no arguments other than the table name) is sufficient.
-
-@item myisamchk -e -i tbl_name
-Like the previous command, but the @code{-i} option tells @code{myisamchk} to
-print some informational statistics, too.
-@end table
-
-@cindex tables, repairing
-@cindex repairing, tables
-@node Repair, Optimization, Check, Crash recovery
-@subsection How to Repair Tables
-
-In the following section we only talk about using @code{myisamchk} on
-@code{MyISAM} tables (extensions @code{.MYI} and @code{.MYD}). If you
-are using @code{ISAM} tables (extensions @code{.ISM} and @code{.ISD}),
-you should use @code{isamchk} instead.
-
-Starting with @strong{MySQL} Version 3.23.14, you can repair MyISAM
-tables with the @code{REPAIR TABLE} command. @xref{REPAIR TABLE}.
-
-The symptoms of a corrupted table include queries that abort unexpectedly
-and observable errors such as these:
-
-@itemize @bullet
-@item
-@file{tbl_name.frm} is locked against change
-@item
-Can't find file @file{tbl_name.MYI} (Errcode: ###)
-@item
-Unexpected end of file
-@item
-Record file is crashed
-@item
-Got error ### from table handler
-
-To get more information about the error you can run @code{perror ###}. Here
-is the most common errors that indicates a problem with the table:
-
-@example
-shell> perror 126 127 132 134 135 136 141 144 145
-126 = Index file is crashed / Wrong file format
-127 = Record-file is crashed
-132 = Old database file
-134 = Record was already deleted (or record file crashed)
-135 = No more room in record file
-136 = No more room in index file
-141 = Duplicate unique key or constraint on write or update
-144 = Table is crashed and last repair failed
-145 = Table was marked as crashed and should be repaired
-@end example
-
-Note that error 135, no more room in record file, is not an error that
-can be fixed by a simple repair. In this case you have to do:
-
-@example
-ALTER TABLE table MAX_ROWS=xxx AVG_ROW_LENGTH=yyy;
-@end example
-
-@end itemize
-
-In the other cases, you must repair your tables. @code{myisamchk}
-can usually detect and fix most things that go wrong.
-
-The repair process involves up to four stages, described below. Before you
-begin, you should @code{cd} to the database directory and check the
-permissions of the table files. Make sure they are readable by the Unix user
-that @code{mysqld} runs as (and to you, because you need to access the files
-you are checking). If it turns out you need to modify files, they must also
-be writable by you.
-
-If you are using @strong{MySQL} Version 3.23.16 and above, you can (and
-should) use the @code{CHECK} and @code{REPAIR} commands to check and repair
-@code{MyISAM} tables. @xref{CHECK TABLE}. @xref{REPAIR TABLE}.
-
-The manual section about table maintenance includes the options to
-@code{isamchk}/@code{myisamchk}. @xref{Table maintenance}.
-
-The following section is for the cases where the above command fails or
-if you want to use the extended features that @code{isamchk}/@code{myisamchk} provides.
-
-If you are going to repair a table from the command line, you must first
-take down the @code{mysqld} server. Note that when you do
-@code{mysqladmin shutdown} on a remote server, the @code{mysqld} server
-will still be alive for a while after @code{mysqladmin} returns, until
-all queries are stopped and all keys have been flushed to disk.
-@noindent
-@strong{Stage 1: Checking your tables}
-
-Run @code{myisamchk *.MYI} or @code{myisamchk -e *.MYI} if you have
-more time. Use the @code{-s} (silent) option to suppress unnecessary
-information.
-
-If the @code{mysqld} server is done you should use the --update option to tell
-@code{myisamchk} to mark the table as 'checked'.
-
-You have to repair only those tables for which @code{myisamchk} announces an
-error. For such tables, proceed to Stage 2.
-
-If you get weird errors when checking (such as @code{out of
-memory} errors), or if @code{myisamchk} crashes, go to Stage 3.
-
-@noindent
-@strong{Stage 2: Easy safe repair}
-
-NOTE: If you want repairing to go much faster, you should add: @code{-O
-sort_buffer=# -O key_buffer=#} (where # is about 1/4 of the available
-memory) to all @code{isamchk/myisamchk} commands.
-
-First, try @code{myisamchk -r -q tbl_name} (@code{-r -q} means ``quick
-recovery mode''). This will attempt to repair the index file without
-touching the data file. If the data file contains everything that it
-should and the delete links point at the correct locations within the
-data file, this should work, and the table is fixed. Start repairing the
-next table. Otherwise, use the following procedure:
-
-@enumerate
-@item
-Make a backup of the data file before continuing.
-
-@item
-Use @code{myisamchk -r tbl_name} (@code{-r} means ``recovery mode''). This will
-remove incorrect records and deleted records from the data file and
-reconstruct the index file.
-
-@item
-If the preceding step fails, use @code{myisamchk --safe-recover tbl_name}.
-Safe recovery mode uses an old recovery method that handles a few cases that
-regular recovery mode doesn't (but is slower).
-@end enumerate
-
-If you get weird errors when repairing (such as @code{out of
-memory} errors), or if @code{myisamchk} crashes, go to Stage 3.
-
-@noindent
-@strong{Stage 3: Difficult repair}
-
-You should only reach this stage if the first 16K block in the index file is
-destroyed or contains incorrect information, or if the index file is
-missing. In this case, it's necessary to create a new index file. Do so as
-follows:
-
-@enumerate
-@item
-Move the data file to some safe place.
-
-@item
-Use the table description file to create new (empty) data and index files:
-
-@example
-shell> mysql db_name
-mysql> SET AUTOCOMMIT=1;
-mysql> TRUNCATE TABLE table_name;
-mysql> quit
-@end example
-
-If your SQL version doesn't have @code{TRUNCATE TABLE}, use @code{DELETE FROM
-table_name} instead.
-
-@item
-Copy the old data file back onto the newly created data file.
-(Don't just move the old file back onto the new file; you want to retain
-a copy in case something goes wrong.)
-@end enumerate
-
-Go back to Stage 2. @code{myisamchk -r -q} should work now. (This shouldn't
-be an endless loop.)
-
-@noindent
-@strong{Stage 4: Very difficult repair}
-
-You should reach this stage only if the description file has also
-crashed. That should never happen, because the description file isn't changed
-after the table is created:
-
-@enumerate
-@item
-Restore the description file from a backup and go back to Stage 3. You can
-also restore the index file and go back to Stage 2. In the latter case, you
-should start with @code{myisamchk -r}.
-
-@item
-If you don't have a backup but know exactly how the table was created, create
-a copy of the table in another database. Remove the new data file, then move
-the description and index files from the other database to your crashed
-database. This gives you new description and index files, but leaves
-the data file alone. Go back to Stage 2 and attempt to reconstruct
-the index file.
-@end enumerate
-
-@cindex tables, optimizing
-@cindex optimizing, tables
-@node Optimization, , Repair, Crash recovery
-@subsection Table Optimization
-
-To coalesce fragmented records and eliminate wasted space resulting from
-deleting or updating records, run @code{myisamchk} in recovery mode:
-
-@example
-shell> myisamchk -r tbl_name
-@end example
-
-You can optimize a table in the same way using the SQL @code{OPTIMIZE TABLE}
-statement. @code{OPTIMIZE TABLE} does a repair of the table, a key
-analyzes and also sorts the index tree to give faster key lookups.
-There is also no possibility of unwanted interaction between a utility
-and the server, because the server does all the work when you use
-@code{OPTIMIZE TABLE}. @xref{OPTIMIZE TABLE}.
-
-@code{myisamchk} also has a number of other options you can use to improve
-the performance of a table:
-
-@table @code
-@item -S, --sort-index
-@item -R index_num, --sort-records=index_num
-@item -a, --analyze
-@end table
-
-For a full description of the option. @xref{myisamchk syntax}.
-
-@cindex files, log
-@cindex maintaining, log files
-@cindex log files, maintaining
-@node Log file maintenance, , Crash recovery, Maintenance
-@section Log file Maintenance
-
-@strong{MySQL} has a lot of log files which make it easy to see what is
-going. @xref{Log files}. One must however from time to time clean up
-after @code{MysQL} to ensure that the logs don't take up too much disk
-space.
-
-When using @strong{MySQL} with log files, you will, from time to time,
-want to remove/backup old log files and tell @strong{MySQL} to start
-logging on new files. @xref{Backup}.
-
-On a Linux (@code{Redhat}) installation, you can use the
-@code{mysql-log-rotate} script for this. If you installed @strong{MySQL}
-from an RPM distribution, the script should have been installed
-automatically. Note that you should be careful with this if you are using
-the log for replication!
-
-On other systems you must install a short script yourself that you
-start from @code{cron} to handle log files.
-
-You can force @strong{MySQL} to start using new log files by using
-@code{mysqladmin flush-logs} or by using the SQL command @code{FLUSH LOGS}.
-If you are using @strong{MySQL} Version 3.21 you must use @code{mysqladmin refresh}.
-
-The above command does the following:
-
-@itemize @bullet
-@item
-If standard logging (@code{--log}) or slow query logging
-(@code{--log-slow-queries}) is used, closes and reopens the log file.
-(@file{mysql.log} and @file{`hostname`-slow.log} as default).
-@item
-If update logging (@code{--log-update}) is used, closes the update log and
-opens a new log file with a higher sequence number.
-@end itemize
-
-If you are using only an update log, you only have to flush the logs and then
-move away the old update log files to a backup.
-If you are using the normal logging, you can do something like:
-
-@example
-shell> cd mysql-data-directory
-shell> mv mysql.log mysql.old
-shell> mysqladmin flush-logs
-@end example
-
-and then take a backup and remove @file{mysql.old}.
@cindex functions, new
@cindex adding, new functions
@@ -38547,7 +38255,6 @@ likely it is that we can fix the problem!
@chapter Using MySQL with Some Common Programs
@menu
-* Apache:: Using @strong{MySQL} with Apache
* Borland C++::
@end menu
@@ -38564,32 +38271,9 @@ Find some tips on how to compile @strong{MySQL} and @strong{MySQL}-based
programs using Borland C++.
@end itemize
-@cindex Apache
-@node Apache, Borland C++, Common programs, Common programs
-@section Using MySQL with Apache
-
-The Contrib section includes programs that let you authenticate your
-users from a @strong{MySQL} database and also let you log your log files
-into a @strong{MySQL} table. @xref{Contrib}.
-
-You can change the Apache logging format to be easily readable by
-@strong{MySQL} by putting the following into the Apache configuration file:
-
-@example
-LogFormat \
- "\"%h\",%@{%Y%m%d%H%M%S@}t,%>s,\"%b\",\"%@{Content-Type@}o\", \
- \"%U\",\"%@{Referer@}i\",\"%@{User-Agent@}i\""
-@end example
-
-In @strong{MySQL} you can do something like this:
-
-@example
-LOAD DATA INFILE '/local/access_log' INTO TABLE table_name
-FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' ESCAPED BY '\\'
-@end example
@cindex Borland C++ compiler
-@node Borland C++, , Apache, Common programs
+@node Borland C++, , Common programs, Common programs
@section Borland C++
You can compile the @strong{MySQL} Windows source with Borland C++ 5.02.
@@ -38622,7 +38306,6 @@ pre-allocated MYSQL struct.
* Link errors:: Problems when linking with the @strong{MySQL} client library
* Common errors:: Some common errors when using @strong{MySQL}
* Full disk:: How @strong{MySQL} handles a full disk
-* Multiple sql commands:: How to run SQL commands from a text file
* Temporary files:: Where @strong{MySQL} stores temporary files
* Problems with mysql.sock:: How to protect @file{/tmp/mysql.sock}
* Changing MySQL user:: How to run @strong{MySQL} as a normal user
@@ -39544,7 +39227,7 @@ to be.
@cindex full disk
@cindex disk full
-@node Full disk, Multiple sql commands, Common errors, Problems
+@node Full disk, Temporary files, Common errors, Problems
@section How MySQL Handles a Full Disk
@noindent
@@ -39587,35 +39270,8 @@ it will remove the big temporary files and mark the table as crashed
(except for @code{ALTER TABLE}, in which the old table will be left
unchanged).
-@node Multiple sql commands, Temporary files, Full disk, Problems
-@section How to Run SQL Commands from a Text File
-
-The @code{mysql} client typically is used interactively, like this:
-
-@example
-shell> mysql database
-@end example
-
-However, it's also possible to put your SQL commands in a file and tell
-@code{mysql} to read its input from that file. To do so, create a text
-file @file{text_file} that contains the commands you wish to execute.
-Then invoke @code{mysql} as shown below:
-@example
-shell> mysql database < text_file
-@end example
-
-You can also start your text file with a @code{USE db_name} statement. In
-this case, it is unnecessary to specify the database name on the command
-line:
-
-@example
-shell> mysql < text_file
-@end example
-
-@xref{Programs}.
-
-@node Temporary files, Problems with mysql.sock, Multiple sql commands, Problems
+@node Temporary files, Problems with mysql.sock, Full disk, Problems
@section Where MySQL Stores Temporary Files
@strong{MySQL} uses the value of the @code{TMPDIR} environment variable as
@@ -40284,14 +39940,12 @@ We plan to fix the above in 4.0.
@cindex problems, solving
@cindex solving, problems
@cindex databases, replicating
-@node Common problems, Log files, Problems, Top
+@node Common problems, Clients, Problems, Top
@chapter Solving Some Common Problems with MySQL
@cindex replication
@menu
* Log Replication:: Database replication with update log
-* Backup:: Database backups
-* Multiple servers:: Running multiple @strong{MySQL} servers on the same machine
@end menu
In this chapter, you will find information to solve some of the more common
@@ -40301,7 +39955,7 @@ database using the update or binary logs.
@cindex database replication
@cindex replication, database
-@node Log Replication, Backup, Common problems, Common problems
+@node Log Replication, , Common problems, Common problems
@section Database Replication with Update Log
Now that master-slave internal replication is available starting in
@@ -40343,491 +39997,15 @@ not return the same value as in the original database:
All time functions are safe to use, as the timestamp is sent to the
mirror if needed. @code{LAST_INSERT_ID()} is also safe to use.
-@cindex databases, backups
-@cindex backups
-@node Backup, Multiple servers, Log Replication, Common problems
-@section Database Backups
-
-Because @strong{MySQL} tables are stored as files, it is easy to do a
-backup. To get a consistent backup, do a @code{LOCK TABLES} on the
-relevant tables followed by @code{FLUSH TABLES} for the tables.
-@xref{LOCK TABLES, , @code{LOCK TABLES}}.
-@xref{FLUSH, , @code{FLUSH}}.
-You only need a read lock; this allows other threads to continue to
-query the tables while you are making a copy of the files in the
-database directory. The @code{FLUSH TABLE} is needed to ensure that
-the all active index pages is written to disk before you start the backup.
-
-If you want to make a SQL level backup of a table, you can use
-@code{SELECT INTO OUTFILE} or @code{BACKUP TABLE}. @xref{SELECT}.
-@xref{BACKUP TABLE}.
-
-Another way to back up a database is to use the @code{mysqldump} program or
-the @code{mysqlhotcopy script}. @xref{mysqldump, , @code{mysqldump}}.
-@xref{mysqlhotcopy, , @code{mysqlhotcopy}}.
-
-@enumerate
-@item
-Do a full backup of your databases:
-@example
-shell> mysqldump --tab=/path/to/some/dir --opt --full
-
-or
-
-shell> mysqlhotcopy database /path/to/some/dir
-@end example
-
-You can also simply copy all table files (@file{*.frm}, @file{*.MYD}, and
-@file{*.MYI} files) as long as the server isn't updating anything.
-The script @code{mysqlhotcopy} does use this method.
-
-@item
-@cindex log files, names
-Stop @code{mysqld} if it's running, then start it with the
-@code{--log-update[=file_name]} option. @xref{Update log}. The update
-log file(s) provide you with the information you need to replicate
-changes to the database that are made subsequent to the point at which
-you executed @code{mysqldump}.
-@end enumerate
-
-If you have to restore something, try to recover your tables using
-@code{REPAIR TABLE} or @code{myisamchk -r} first. That should work in
-99.9% of all cases. If @code{myisamchk} fails, try the following
-procedure: (This will only work if you have started @strong{MySQL} with
-@code{--log-update}. @xref{Update log}.):
-
-@enumerate
-@item
-Restore the original @code{mysqldump} backup.
-@item
-Execute the following command to re-run the updates in the binary log:
-
-@example
-shell> mysqlbinlog hostname-bin.[0-9]* | mysql
-@end example
-
-If you are using the update log you can use:
-
-@example
-shell> ls -1 -t -r hostname.[0-9]* | xargs cat | mysql
-@end example
-@end enumerate
-
-@code{ls} is used to get all the update log files in the right order.
-
-You can also do selective backups with @code{SELECT * INTO OUTFILE 'file_name'
-FROM tbl_name} and restore with @code{LOAD DATA INFILE 'file_name' REPLACE
-...} To avoid duplicate records, you need a @code{PRIMARY KEY} or a
-@code{UNIQUE} key in the table. The @code{REPLACE} keyword causes old records
-to be replaced with new ones when a new record duplicates an old record on
-a unique key value.
-
-If you get performance problems in making backups on your system, you can
-solve this by setting up replication and do the backups on the slave
-instead of on the master. @xref{Replication Intro}.
-
-If you are using a Veritas file system, you can do:
-
-@enumerate
-@item
-Execute in a client (perl ?) @code{FLUSH TABLES WITH READ LOCK}
-@item
-Fork a shell or execute in another client @code{mount vxfs snapshot}.
-@item
-Execute in the first client @code{UNLOCK TABLES}
-@item
-Copy files from snapshot
-@item
-Unmount snapshot
-@end enumerate
-
-@cindex multiple servers
-@cindex servers, multiple
-@cindex running, multiple servers
-@node Multiple servers, , Backup, Common problems
-@section Running Multiple MySQL Servers on the Same Machine
-
-There are circumstances when you might want to run multiple servers on the same
-machine. For example, you might want to test a new @strong{MySQL} release
-while leaving your existing production setup undisturbed. Or you might
-be an Internet service provider that wants to provide independent
-@strong{MySQL} installations for different customers.
-
-If you want to run multiple servers, the easiest way is to compile the servers
-with different TCP/IP ports and socket files so they are not
-both listening to the same TCP/IP port or socket file. @xref{mysqld_multi, ,
-@code{mysqld_multi}}.
-
-Assume an existing server is configured for the default port number and
-socket file. Then configure the new server with a @code{configure} command
-something like this:
-
-@example
-shell> ./configure --with-tcp-port=port_number \
- --with-unix-socket-path=file_name \
- --prefix=/usr/local/mysql-3.22.9
-@end example
-
-Here @code{port_number} and @code{file_name} should be different than the
-default port number and socket file pathname, and the @code{--prefix} value
-should specify an installation directory different than the one under which
-the existing @strong{MySQL} installation is located.
-
-You can check the socket used by any currently executing @strong{MySQL} server
-with this command:
-
-@example
-shell> mysqladmin -h hostname --port=port_number variables
-@end example
-
-Note that if you specify ``@code{localhost}'' as a hostname, @code{mysqladmin}
-will default to using Unix sockets instead of TCP/IP.
-
-If you have a @strong{MySQL} server running on the port you used, you will
-get a list of some of the most important configurable variables in
-@strong{MySQL}, including the socket name.
-
-You don't have to recompile a new @strong{MySQL} server just to start with
-a different port and socket. You can change the port and socket to be used
-by specifying them at run time as options to @code{safe_mysqld}:
-
-@example
-shell> /path/to/safe_mysqld --socket=file_name --port=port_number
-@end example
-
-@code{mysqld_multi} can also take @code{safe_mysqld} (or @code{mysqld})
-as an argument and pass the options from a configuration file to
-@code{safe_mysqld} and further to @code{mysqld}.
-
-If you run the new server on the same database directory as another
-server with logging enabled, you should also specify the name of the log
-files to @code{safe_mysqld} with @code{--log}, @code{--log-update}, or
-@code{--log-slow-queries}. Otherwise, both servers may be trying to
-write to the same log file.
-
-@strong{WARNING}: Normally you should never have two servers that update
-data in the same database! If your OS doesn't support fault-free system
-locking, this may lead to unpleasant surprises!
-
-If you want to use another database directory for the second server, you
-can use the @code{--datadir=path} option to @code{safe_mysqld}.
-
-@strong{NOTE} also that starting several @strong{MySQL} servers
-(@code{mysqlds}) in different machines and letting them access one data
-directory over @code{NFS} is generally a @strong{BAD IDEA}! The problem
-is that the @code{NFS} will become the bottleneck with the speed. It is
-not meant for such use. And last but not least, you would still have to
-come up with a solution how to make sure that two or more @code{mysqlds}
-are not interfering with each other. At the moment there is no platform
-that would 100% reliable do the file locking (@code{lockd} daemon
-usually) in every situation. Yet there would be one more possible risk
-with @code{NFS}; it would make the work even more complicated for
-@code{lockd} daemon to handle. So make it easy for your self and forget
-about the idea. The working solution is to have one computer with an
-operating system that efficiently handles threads and have several CPUs
-in it.
-
-When you want to connect to a @strong{MySQL} server that is running with
-a different port than the port that is compiled into your client, you
-can use one of the following methods:
-
-@itemize @bullet
-@item
-Start the client with @code{--host 'hostname' --port=port_number} to connect
-with TCP/IP, or @code{[--host localhost] --socket=file_name} to connect via
-a Unix socket.
-
-@item
-In your C or Perl programs, you can give the port or socket arguments
-when connecting to the @strong{MySQL} server.
-
-@item
-If your are using the Perl @code{DBD::mysql} module you can read the options
-from the @strong{MySQL} option files. @xref{Option files}.
-
-@example
-$dsn = "DBI:mysql:test;mysql_read_default_group=client;mysql_read_default_file=/usr/local/mysql/data/my.cnf"
-$dbh = DBI->connect($dsn, $user, $password);
-@end example
-
-@item
-@tindex MYSQL_UNIX_PORT environment variable
-@tindex MYSQL_TCP_PORT environment variable
-@tindex environment variable, MYSQL_UNIX_PORT
-@tindex environment variable, MYSQL_TCP_PORT
-Set the @code{MYSQL_UNIX_PORT} and @code{MYSQL_TCP_PORT} environment variables
-to point to the Unix socket and TCP/IP port before you start your clients.
-If you normally use a specific socket or port, you should place commands
-to set these environment variables in your @file{.login} file.
-@xref{Environment variables}. @xref{Programs}.
-
-@item
-@tindex .my.cnf file
-Specify the default socket and TCP/IP port in the @file{.my.cnf} file in your
-home directory. @xref{Option files}.
-@end itemize
-
-@cindex Log files
-@node Log files, Clients, Common problems, Top
-@chapter The MySQL log files
-
-@strong{MySQL} has several different log files that can help you find
-out what's going on inside @code{mysqld}:
-
-@multitable @columnfractions .3 .7
-@item The error log @tab Problems encountering starting, running or stopping @code{mysqld}.
-@item The isam log @tab Logs all changes to the ISAM tables. Used only for debugging the isam code.
-@item The query log @tab Established connections and executed queries.
-@item The update log @tab Deprecated: Stores all statements that changes data
-@item The binary log @tab Stores all statements that changes something. Used also for replication
-@item The slow log @tab Stores all queries that took more than @code{long_query_time} to execute or didn't use indexes.
-@end multitable
-
-All logs can be found in the @code{mysqld} data directory. You can
-force @code{mysqld} to reopen the log files (or in some cases
-switch to a new log) by executing @code{FLUSH LOGS}. @xref{FLUSH}.
-
-@cindex error log
-@cindex files, error log
-@menu
-* Error log::
-* Query log::
-* Update log::
-* Binary log::
-* Slow query log::
-@end menu
-
-@node Error log, Query log, Log files, Log files
-@section The Error Log
-
-@code{mysqld} writes all errors to the stderr, which the
-@code{safe_mysqld} script redirects to a file called
-@code{'hostname'.err}. (On Windows, @code{mysqld} writes this directly
-to @file{\mysql\data\mysql.err}).
-
-This contains information indicating when @code{mysqld} was started and
-stopped and also any critical errors found when running. If @code{mysqld}
-dies unexpectedly and @code{safe_mysqld} needs to restart @code{mysqld},
-@code{safe_mysqld} will write a @code{restarted mysqld} row in this
-file. This log also holds a warning if @code{mysqld} notices a table
-that needs to be automatically checked or repaired.
-
-On some operating systems, the error log will contain a stack trace
-for where @code{mysqld} died. This can be used to find out where
-@code{mysqld} died. @xref{Using stack trace}.
-
-@cindex query log
-@cindex files, query log
-@node Query log, Update log, Error log, Log files
-@section The Query Log
-
-If you want to know what happens within @code{mysqld}, you should start
-it with @code{--log[=file]}. This will log all connections and queries
-to the log file (by default named @file{'hostname'.log}). This log can
-be very useful when you suspect an error in a client and want to know
-exactly what @code{mysqld} thought the client sent to it.
-
-By default, the @code{mysql.server} script starts the @strong{MySQL}
-server with the @code{-l} option. If you need better performance when
-you start using @strong{MySQL} in a production environment, you can
-remove the @code{-l} option from @code{mysql.server} or change it to
-@code{--log-binary}.
-
-The entries in this log are written as @code{mysqld} receives the questions.
-This may be different than the order in which the statements are executed.
-This is in contrast to the update log and the binary log which are written
-after the query is executed, but before any locks are released.
-
-@cindex update log
-@cindex files, update log
-@node Update log, Binary log, Query log, Log files
-@section The Update Log
-
-@strong{NOTE}: The update log is replaced by the binary
-log. @xref{Binary log}. With this you can do anything that you can do
-with the update log.
-
-When started with the @code{--log-update[=file_name]} option,
-@code{mysqld} writes a log file containing all SQL commands that update
-data. If no filename is given, it defaults to the name of the host
-machine. If a filename is given, but it doesn't contain a path, the file
-is written in the data directory. If @file{file_name} doesn't have an
-extension, @code{mysqld} will create log file names like so:
-@file{file_name.###}, where @code{###} is a number that is incremented each
-time you execute @code{mysqladmin refresh}, execute @code{mysqladmin
-flush-logs}, execute the @code{FLUSH LOGS} statement, or restart the server.
-
-@strong{NOTE:} For the above scheme to work, you should NOT create
-your own files with the same filename as the update log + some extensions
-that may be regarded as a number, in the directory used by the update log!
-
-If you use the @code{--log} or @code{-l} options, @code{mysqld} writes a
-general log with a filename of @file{hostname.log}, and restarts and
-refreshes do not cause a new log file to be generated (although it is closed
-and reopened). In this case you can copy it (on Unix) by doing:
-
-@example
-mv hostname.log hostname-old.log
-mysqladmin flush-logs
-cp hostname-old.log to-backup-directory
-rm hostname-old.log
-@end example
-
-Update logging is smart because it logs only statements that really update
-data. So an @code{UPDATE} or a @code{DELETE} with a @code{WHERE} that finds no
-rows is not written to the log. It even skips @code{UPDATE} statements that
-set a column to the value it already has.
-
-The update logging is done immediately after a query completes but before
-any locks are released or any commit is done. This ensures that the log
-will be logged in the execution order.
-
-If you want to update a database from update log files, you could do the
-following (assuming your update logs have names of the form
-@file{file_name.###}):
-
-@example
-shell> ls -1 -t -r file_name.[0-9]* | xargs cat | mysql
-@end example
-
-@code{ls} is used to get all the log files in the right order.
-
-This can be useful if you have to revert to backup files after a crash
-and you want to redo the updates that occurred between the time of the backup
-and the crash.
-
-@cindex binary log
-@cindex files, binary log
-@node Binary log, Slow query log, Update log, Log files
-@section The Binary Log
-
-In the future the binary log will replace the update log, so we
-recommend you to switch to this log format as soon as possible!
-
-The binary log contains all information that is available in the update
-log in a more efficient format. It also contains information about how long
-every query that updated the database took.
-
-The binary log is also used when you are replicating a slave from a master.
-@xref{Replication}.
-
-When started with the @code{--log-bin[=file_name]} option, @code{mysqld}
-writes a log file containing all SQL commands that update data. If no
-file name is given, it defaults to the name of the host machine followed
-by @code{-bin}. If file name is given, but it doesn't contain a path, the
-file is written in the data directory.
-
-You can use the following options to @code{mysqld} to affect what is logged
-to the binary log:
-
-@multitable @columnfractions .4 .6
-@item @code{binlog-do-db=database_name} @tab
-Tells the master it should log updates for the specified database, and
-exclude all others not explicitly mentioned.
-(Example: @code{binlog-do-db=some_database})
-
-@item @code{binlog-ignore-db=database_name} @tab
-Tells the master that updates to the given database should not be logged
-to the binary log (Example: @code{binlog-ignore-db=some_database})
-@end multitable
-
-To the binary log filename @code{mysqld} will append an extension that is a
-number that is incremented each time you execute @code{mysqladmin
-refresh}, execute @code{mysqladmin flush-logs}, execute the @code{FLUSH LOGS}
-statement or restart the server.
-
-To be able to know which different binary log files have been used,
-@code{mysqld} will also create a binary log index file that
-contains the name of all used binary log files. By default this has the
-same name as the binary log file, with the extension @code{'.index'}.
-You can change the name of the binary log index file with the
-@code{--log-bin-index=[filename]} option.
-
-If you are using replication, you should not delete old binary log
-files until you are sure that no slave will ever need to use them.
-One way to do this is to do @code{mysqladmin flush-logs} once a day and then
-remove any logs that are more than 3 days old.
-
-You can examine the binary log file with the @code{mysqlbinlog} command.
-For example, you can update a @strong{MySQL} server from the binary log
-as follows:
-
-@example
-mysqlbinlog log-file | mysql -h server_name
-@end example
-
-You can also use the @code{mysqlbinlog} program to read the binary log
-directly from a remote @strong{MySQL} server!
-
-@code{mysqlbinlog --help} will give you more information of how to use
-this program!
-
-If you are using @code{BEGIN [WORK]} or @code{SET AUTOCOMMIT=0}, you must
-use the @strong{MySQL} binary log for backups instead of the old update log.
-
-The binary logging is done immediately after a query completes but before
-any locks are released or any commit is done. This ensures that the log
-will be logged in the execution order.
-
-All updates (@code{UPDATE}, @code{DELETE} or @code{INSERT}) that change
-a transactional table (like BDB tables) are cached until a @code{COMMIT}.
-Any updates to a non-transactional table are stored in the binary log at
-once. Every thread will, on start, allocate a buffer of
-@code{binlog_cache_size} to buffer queries. If a query is bigger than
-this, the thread will open a temporary file to handle the bigger cache.
-The temporary file will be deleted when the thread ends.
-
-The @code{max_binlog_cache_size} can be used to restrict the total size used
-to cache a multi-transaction query.
-
-If you are using the update or binary log, concurrent inserts will
-not work together with @code{CREATE ... INSERT} and @code{INSERT ... SELECT}.
-This is to ensure that you can recreate an exact copy of your tables by
-applying the log on a backup.
-
-@cindex slow query log
-@cindex files, slow query log
-@node Slow query log, , Binary log, Log files
-@section The Slow Query Log
-
-When started with the @code{--log-slow-queries[=file_name]} option,
-@code{mysqld} writes a log file containing all SQL commands that took
-more than @code{long_query_time} to execute. The time to get the initial
-table locks are not counted as execution time.
-
-The slow query log is logged after the query is executed and after all
-locks has been released. This may be different than the order in which
-the statements are executed.
-
-If no file name is given, it defaults to the name of the host machine
-suffixed with @code{-slow.log}. If a filename is given, but doesn't
-contain a path, the file is written in the data directory.
-
-The slow query log can be used to find queries that take a long time to
-execute and are thus candidates for optimization. With a large log, that
-can become a difficult task. You can pipe the slow query log through the
-@code{mysqldumpslow} command to get a summary of the queries which
-appear in the log.
-
-You are using @code{--log-long-format} then also queries that are not
-using indexes are printed. @xref{Command-line options}.
-
-@cindex database replication
-@cindex replication, database
-@cindex database mirroring
-@cindex mirroring, database
-You can also use the update logs when you have a mirrored database on
-another host and you want to replicate the changes that have been made
-to the master database. @xref{Log Replication}.
+@node Clients, MySQL internals, Common problems, Top
+@chapter MySQL APIs
@cindex client tools
@cindex APIs
@cindex @code{mysqlclient} library
@cindex buffer sizes, client
@cindex library, @code{mysqlclient}
-@node Clients, MySQL internals, Log files, Top
-@chapter MySQL APIs
@menu
* C:: @strong{MySQL} C API
@@ -40870,7 +40048,7 @@ Most of the other client APIs (all except Java) use the @code{mysqlclient}
library to communicate with the @strong{MySQL} server. This means that, for
example, you can take advantage of many of the same environment variables
that are used by other client programs, because they are referenced from the
-library. See @ref{Programs}, for a list of these variables.
+library. See @ref{Client-Side Scripts}, for a list of these variables.
The client has a maximum communication buffer size. The size of the buffer
that is allocated initially (16K bytes) is automatically increased up to the
@@ -45927,7 +45105,7 @@ helped to make @strong{MySQL} what it is today.
@end menu
@node Developers, Contributors, Credits, Credits
-@appendixsubsec Developers at MySQL AB
+@appendixsec Developers at MySQL AB
These are the developers that are or have been employed by @strong{MySQL AB}
to work on @strong{MySQL}, roughly in the order they started to work with us.
@@ -46117,7 +45295,7 @@ Allan Larsson (The BOSS for TCX DataKonsult AB).
@cindex contributors, list of
@node Contributors, Supporters, Developers, Credits
-@appendixsubsec Contributors to MySQL
+@appendixsec Contributors to MySQL
Contributors to the @strong{MySQL} distribution are listed here, in
somewhat random order:
@@ -46299,8 +45477,6 @@ For making @code{mysqlaccess} more secure.
@item Albert Chin-A-Young.
Configure updates for Tru64, large file support and better TCP wrappers
support.
-@item Valueclick Inc.
-For sponsoring the optimize section in this manual.
@end table
Other contributors, bugfinders, and testers: James H. Thompson, Maurizio
@@ -46351,7 +45527,7 @@ ODBC and VisualC++ interface questions.
@cindex contributing companies, list of
@node Supporters, , Contributors, Credits
-@appendixsubsec Supporters to MySQL
+@appendixsec Supporters to MySQL
The following companies has helped us finance development of
@strong{MySQL} by either paying us for developing a new feature,
@@ -46496,6 +45672,7 @@ users use this code as the rest of the code and because of this we are
not yet 100% confident in this code.
@menu
+* News-3.23.41::
* News-3.23.40:: Changes in release 3.23.40
* News-3.23.39:: Changes in release 3.23.39
* News-3.23.38:: Changes in release 3.23.38
@@ -46540,7 +45717,15 @@ not yet 100% confident in this code.
* News-3.23.0:: Changes in release 3.23.0
@end menu
-@node News-3.23.40, News-3.23.39, News-3.23.x, News-3.23.x
+@node News-3.23.41, News-3.23.40, News-3.23.x, News-3.23.x
+@appendixsubsec Changes in release 3.23.41
+@itemize @bullet
+@item
+Fixed optimizing bug in @code{ORDER BY} where some @code{ORDER BY} parts
+where wrongly removed.
+@end itemize
+
+@node News-3.23.40, News-3.23.39, News-3.23.41, News-3.23.x
@appendixsubsec Changes in release 3.23.40
@itemize @bullet
@item
@@ -51725,7 +50910,7 @@ queries with @code{EXPLAIN}.
You should also read the OS-specific section in this manual for
problems that may be unique to your environment.
-@xref{Source install system issues}.
+@xref{Operating System Specific Notes}.
@menu
* Compiling for debugging::
@@ -52687,9 +51872,11 @@ All new development is concentrated to @strong{MySQL}.
@node GPL license, LGPL license, Unireg, Top
@appendix GNU GENERAL PUBLIC LICENSE
-@center Version 2, June 1991
-@c This file is intended to be included in another file.
+@cindex GPL, General Public License
+@cindex GPL, GNU General Public License
+
+@center Version 2, June 1991
@display
Copyright @copyright{} 1989, 1991 Free Software Foundation, Inc.
@@ -52699,7 +51886,7 @@ Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
@end display
-@unnumberedsec Preamble
+@appendixsec Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
@@ -52750,9 +51937,10 @@ patent must be licensed for everyone's free use or not licensed at all.
modification follow.
@iftex
-@unnumberedsec TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+@appendixsec TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
@end iftex
@ifinfo
+@center GNU GENERAL PUBLIC LICENSE
@center TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
@end ifinfo
@@ -53012,7 +52200,7 @@ POSSIBILITY OF SUCH DAMAGES.
@end ifinfo
@page
-@unnumberedsec Appendix: How to Apply These Terms to Your New Programs
+@appendixsec How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
@@ -53080,10 +52268,12 @@ Public License instead of this License.
@page
-@node LGPL license, Function Index, GPL license, Top
+@node LGPL license, Placeholder, GPL license, Top
@appendix GNU LESSER GENERAL PUBLIC LICENSE
+
@cindex LGPL, Lesser General Public License
@cindex LGPL, GNU Library General Public License
+
@center Version 2.1, February 1999
@display
@@ -53098,7 +52288,7 @@ as the successor of the GNU Library Public License, version 2, hence the
version number 2.1.]
@end display
-@appendixsubsec Preamble
+@appendixsec Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
@@ -53201,7 +52391,7 @@ former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
@iftex
-@appendixsubsec TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+@appendixsec TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
@end iftex
@ifinfo
@center GNU LESSER GENERAL PUBLIC LICENSE
@@ -53597,7 +52787,7 @@ DAMAGES.
@end ifinfo
@page
-@appendixsubsec How to Apply These Terms to Your New Libraries
+@appendixsec How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
@@ -53646,7 +52836,547 @@ Ty Coon, President of Vice
That's all there is to it!
-@node Function Index, Concept Index, LGPL license, Top
+
+@node Placeholder, Function Index, LGPL license, Top
+@appendix Pieces of the manual in transit
+
+@menu
+* Installing binary::
+* Building clients::
+* Perl support::
+@end menu
+
+@node Installing binary, Building clients, Placeholder, Placeholder
+@appendixsec Installing a MySQL Binary Distribution
+
+@cindex installing, binary distribution
+@cindex binary distributions, installing
+
+@menu
+* Linux-RPM:: Linux RPM files
+* Building clients:: Building client programs
+@end menu
+
+You need the following tools to install a @strong{MySQL} binary distribution:
+
+@itemize @bullet
+@item
+GNU @code{gunzip} to uncompress the distribution.
+
+@item
+A reasonable @code{tar} to unpack the distribution. GNU @code{tar} is
+known to work. Sun @code{tar} is known to have problems.
+@end itemize
+
+@cindex RPM, defined
+@cindex RedHat Package Manager
+An alternative installation method under Linux is to use RPM (RedHat Package
+Manager) distributions. @xref{Linux-RPM}.
+
+@c texi2html fails to split chapters if I use strong for all of this.
+If you run into problems, @strong{PLEASE ALWAYS USE} @code{mysqlbug} when
+posting questions to @email{mysql@@lists.mysql.com}. Even if the problem
+isn't a bug, @code{mysqlbug} gathers system information that will help others
+solve your problem. By not using @code{mysqlbug}, you lessen the likelihood
+of getting a solution to your problem! You will find @code{mysqlbug} in the
+@file{bin} directory after you unpack the distribution. @xref{Bug reports}.
+
+@cindex commands, for binary distribution
+The basic commands you must execute to install and use a @strong{MySQL}
+binary distribution are:
+
+@example
+shell> groupadd mysql
+shell> useradd -g mysql mysql
+shell> cd /usr/local
+shell> gunzip < /path/to/mysql-VERSION-OS.tar.gz | tar xvf -
+shell> ln -s mysql-VERSION-OS mysql
+shell> cd mysql
+shell> scripts/mysql_install_db
+shell> chown -R root /usr/local/mysql
+shell> chown -R mysql /usr/local/mysql/data
+shell> chgrp -R mysql /usr/local/mysql
+shell> chown -R root /usr/local/mysql/bin
+shell> bin/safe_mysqld --user=mysql &
+@end example
+
+@cindex adding, new users
+@cindex new users, adding
+@cindex users, adding
+
+You can add new users using the @code{bin/mysql_setpermission} script if
+you install the @code{DBI} and @code{Msql-Mysql-modules} Perl modules.
+
+A more detailed description follows.
+
+To install a binary distribution, follow the steps below, then proceed
+to @ref{Post-installation}, for post-installation setup and testing:
+
+@enumerate
+@item
+Pick the directory under which you want to unpack the distribution, and move
+into it. In the example below, we unpack the distribution under
+@file{/usr/local} and create a directory @file{/usr/local/mysql} into which
+@strong{MySQL} is installed. (The following instructions therefore assume
+you have permission to create files in @file{/usr/local}. If that directory
+is protected, you will need to perform the installation as @code{root}.)
+
+@item
+Obtain a distribution file from one of the sites listed in
+@ref{Getting MySQL, , Getting @strong{MySQL}}.
+
+@strong{MySQL} binary distributions are provided as compressed @code{tar}
+archives and have names like @file{mysql-VERSION-OS.tar.gz}, where
+@code{VERSION} is a number (for example, @code{3.21.15}), and @code{OS}
+indicates the type of operating system for which the distribution is intended
+(for example, @code{pc-linux-gnu-i586}).
+
+@item
+If you see a binary distribution marked with the @code{-max} prefix, this
+means that the binary has support for transaction-safe tables and other
+features. @xref{mysqld-max, , @code{mysqld-max}}. Note that all binaries
+are built from the same @strong{MySQL} source distribution.
+
+@item
+Add a user and group for @code{mysqld} to run as:
+
+@example
+shell> groupadd mysql
+shell> useradd -g mysql mysql
+@end example
+
+These commands add the @code{mysql} group and the @code{mysql} user. The
+syntax for @code{useradd} and @code{groupadd} may differ slightly on different
+versions of Unix. They may also be called @code{adduser} and @code{addgroup}.
+You may wish to call the user and group something else instead of @code{mysql}.
+
+@item
+Change into the intended installation directory:
+
+@example
+shell> cd /usr/local
+@end example
+
+@item
+Unpack the distribution and create the installation directory:
+
+@example
+shell> gunzip < /path/to/mysql-VERSION-OS.tar.gz | tar xvf -
+shell> ln -s mysql-VERSION-OS mysql
+@end example
+
+The first command creates a directory named @file{mysql-VERSION-OS}. The
+second command makes a symbolic link to that directory. This lets you refer
+more easily to the installation directory as @file{/usr/local/mysql}.
+
+@item
+Change into the installation directory:
+
+@example
+shell> cd mysql
+@end example
+
+You will find several files and subdirectories in the @code{mysql} directory.
+The most important for installation purposes are the @file{bin} and
+@file{scripts} subdirectories.
+
+@table @file
+@item bin
+@tindex PATH environment variable
+@tindex environment variable, PATH
+This directory contains client programs and the server
+You should add the full pathname of this directory to your
+@code{PATH} environment variable so that your shell finds the @strong{MySQL}
+programs properly. @xref{Environment variables}.
+
+@item scripts
+This directory contains the @code{mysql_install_db} script used to initialize
+the @code{mysql} database containing the grant tables that store the server
+access permissions.
+@end table
+
+@item
+If you would like to use @code{mysqlaccess} and have the @strong{MySQL}
+distribution in some non-standard place, you must change the location where
+@code{mysqlaccess} expects to find the @code{mysql} client. Edit the
+@file{bin/mysqlaccess} script at approximately line 18. Search for a line
+that looks like this:
+
+@example
+$MYSQL = '/usr/local/bin/mysql'; # path to mysql executable
+@end example
+
+Change the path to reflect the location where @code{mysql} actually is
+stored on your system. If you do not do this, you will get a @code{Broken
+pipe} error when you run @code{mysqlaccess}.
+
+@item
+Create the @strong{MySQL} grant tables (necessary only if you haven't
+installed @strong{MySQL} before):
+@example
+shell> scripts/mysql_install_db
+@end example
+
+Note that @strong{MySQL} versions older than Version 3.22.10 started the
+@strong{MySQL} server when you run @code{mysql_install_db}. This is no
+longer true!
+
+@item
+Change ownership of binaries to @code{root} and ownership of the data
+directory to the user that you will run @code{mysqld} as:
+
+@example
+shell> chown -R root /usr/local/mysql
+shell> chown -R mysql /usr/local/mysql/data
+shell> chgrp -R mysql /usr/local/mysql
+@end example
+
+The first command changes the @code{owner} attribute of the files to the
+@code{root} user, the second one changes the @code{owner} attribute of the
+data directory to the @code{mysql} user, and the third one changes the
+@code{group} attribute to the @code{mysql} group.
+
+@item
+If you want to install support for the Perl @code{DBI}/@code{DBD} interface,
+see @ref{Perl support}.
+
+@item
+If you would like @strong{MySQL} to start automatically when you boot your
+machine, you can copy @code{support-files/mysql.server} to the location where
+your system has its startup files. More information can be found in the
+@code{support-files/mysql.server} script itself and in
+@ref{Automatic start}.
+
+@end enumerate
+
+After everything has been unpacked and installed, you should initialize
+and test your distribution.
+
+You can start the @strong{MySQL} server with the following command:
+
+@example
+shell> bin/safe_mysqld --user=mysql &
+@end example
+
+@xref{safe_mysqld, , @code{safe_mysqld}}.
+
+@xref{Post-installation}.
+
+
+@node Building clients, Perl support, Installing binary, Placeholder
+@appendixsec Building Client Programs
+
+@cindex client programs, building
+@cindex linking
+@cindex building, client programs
+@cindex programs, client
+
+If you compile @strong{MySQL} clients that you've written yourself or that
+you obtain from a third party, they must be linked using the
+@code{-lmysqlclient -lz} option on the link command. You may also need to
+specify a @code{-L} option to tell the linker where to find the library. For
+example, if the library is installed in @file{/usr/local/mysql/lib}, use
+@code{-L/usr/local/mysql/lib -lmysqlclient -lz} on the link command.
+
+For clients that use @strong{MySQL} header files, you may need to specify a
+@code{-I} option when you compile them (for example,
+@code{-I/usr/local/mysql/include}), so the compiler can find the header
+files.
+
+
+
+
+
+@node Perl support, , Building clients, Placeholder
+@appendixsec Perl Installation Comments
+
+@cindex Perl, installing
+@cindex installing, Perl
+
+@menu
+* Perl installation:: Installing Perl on Unix
+* ActiveState Perl:: Installing ActiveState Perl on Windows
+* Windows Perl:: Installing the @strong{MySQL} Perl distribution on Windows
+* Perl support problems:: Problems using the Perl @code{DBI}/@code{DBD} interface
+@end menu
+
+@node Perl installation, ActiveState Perl, Perl support, Perl support
+@appendixsubsec Installing Perl on Unix
+
+Perl support for @strong{MySQL} is provided by means of the
+@code{DBI}/@code{DBD} client interface. @xref{Perl}. The Perl
+@code{DBD}/@code{DBI} client code requires Perl Version 5.004 or later. The
+interface @strong{will not work} if you have an older version of Perl.
+
+@strong{MySQL} Perl support also requires that you've installed
+@strong{MySQL} client programming support. If you installed @strong{MySQL}
+from RPM files, client programs are in the client RPM, but client programming
+support is in the developer RPM. Make sure you've installed the latter RPM.
+
+As of Version 3.22.8, Perl support is distributed separately from the main
+@strong{MySQL} distribution. If you want to install Perl support, the files
+you will need can be obtained from
+@uref{http://www.mysql.com/Downloads/Contrib/}.
+
+The Perl distributions are provided as compressed @code{tar} archives and
+have names like @file{MODULE-VERSION.tar.gz}, where @code{MODULE} is the
+module name and @code{VERSION} is the version number. You should get the
+@code{Data-Dumper}, @code{DBI}, and @code{Msql-Mysql-modules} distributions
+and install them in that order. The installation procedure is shown below.
+The example shown is for the @code{Data-Dumper} module, but the procedure is
+the same for all three distributions:
+
+@enumerate
+@item
+Unpack the distribution into the current directory:
+@example
+shell> gunzip < Data-Dumper-VERSION.tar.gz | tar xvf -
+@end example
+This command creates a directory named @file{Data-Dumper-VERSION}.
+
+@item
+Change into the top-level directory of the unpacked distribution:
+@example
+shell> cd Data-Dumper-VERSION
+@end example
+
+@item
+Build the distribution and compile everything:
+@example
+shell> perl Makefile.PL
+shell> make
+shell> make test
+shell> make install
+@end example
+@end enumerate
+
+The @code{make test} command is important because it verifies that the
+module is working. Note that when you run that command during the
+@code{Msql-Mysql-modules} installation to exercise the interface code, the
+@strong{MySQL} server must be running or the test will fail.
+
+It is a good idea to rebuild and reinstall the @code{Msql-Mysql-modules}
+distribution whenever you install a new release of @strong{MySQL},
+particularly if you notice symptoms such as all your @code{DBI} scripts
+dumping core after you upgrade @strong{MySQL}.
+
+If you don't have the right to install Perl modules in the system directory
+or if you to install local Perl modules, the following reference may help
+you:
+
+@example
+@uref{http://www.iserver.com/support/contrib/perl5/modules.html}
+@end example
+
+Look under the heading
+@code{Installing New Modules that Require Locally Installed Modules}.
+
+@node ActiveState Perl, Windows Perl, Perl installation, Perl support
+@appendixsubsec Installing ActiveState Perl on Windows
+
+@cindex installing, Perl on Windows
+@cindex Perl, installing on Windows
+@cindex ActiveState Perl
+
+To install the @strong{MySQL} @code{DBD} module with ActiveState Perl on
+Windows, you should do the following:
+
+@itemize @bullet
+@item
+Get ActiveState Perl from
+@uref{http://www.activestate.com/Products/ActivePerl/index.html}
+and install it.
+
+@item
+Open a DOS shell.
+
+@item
+If required, set the HTTP_proxy variable. For example, you might try:
+
+@example
+set HTTP_proxy=my.proxy.com:3128
+@end example
+
+@item
+Start the PPM program:
+
+@example
+C:\> c:\perl\bin\ppm.pl
+@end example
+
+@item
+If you have not already done so, install @code{DBI}:
+
+@example
+ppm> install DBI
+@end example
+
+@item
+If this succeeds, run the following command:
+
+@example
+install ftp://ftp.de.uu.net/pub/CPAN/authors/id/JWIED/DBD-mysql-1.2212.x86.ppd
+@end example
+@end itemize
+
+The above should work at least with ActiveState Perl Version 5.6.
+
+If you can't get the above to work, you should instead install the
+@strong{MyODBC} driver and connect to @strong{MySQL} server through
+ODBC:
+
+@example
+use DBI;
+$dbh= DBI->connect("DBI:ODBC:$dsn","$user","$password") ||
+ die "Got error $DBI::errstr when connecting to $dsn\n";
+@end example
+
+@node Windows Perl, Perl support problems, ActiveState Perl, Perl support
+@appendixsubsec Installing the MySQL Perl Distribution on Windows
+
+The @strong{MySQL} Perl distribution contains @code{DBI},
+@code{DBD:MySQL} and @code{DBD:ODBC}.
+
+@itemize @bullet
+@item
+Get the Perl distribution for Windows from
+@uref{http://www.mysql.com/download.html}.
+
+@item
+Unzip the distribution in @code{C:} so that you get a @file{C:\PERL} directory.
+
+@item
+Add the directory @file{C:\PERL\BIN} to your path.
+
+@item
+Add the directory @file{C:\PERL\BIN\MSWIN32-x86-thread} or
+@file{C:\PERL\BIN\MSWIN32-x86} to your path.
+
+@item
+Test that @code{perl} works by executing @code{perl -v} in a DOS shell.
+@end itemize
+
+@cindex problems, installing Perl
+@cindex Perl DBI/DBD, installation problems
+@node Perl support problems, , Windows Perl, Perl support
+@appendixsubsec Problems Using the Perl @code{DBI}/@code{DBD} Interface
+
+If Perl reports that it can't find the @file{../mysql/mysql.so} module,
+then the problem is probably that Perl can't locate the shared library
+@file{libmysqlclient.so}.
+
+You can fix this by any of the following methods:
+
+@itemize @bullet
+@item
+Compile the @code{Msql-Mysql-modules} distribution with @code{perl
+Makefile.PL -static -config} rather than @code{perl Makefile.PL}.
+
+@item
+Copy @code{libmysqlclient.so} to the directory where your other shared
+libraries are located (probably @file{/usr/lib} or @file{/lib}).
+
+@item
+On Linux you can add the pathname of the directory where
+@file{libmysqlclient.so} is located to the @file{/etc/ld.so.conf} file.
+
+@tindex LD_RUN_PATH environment variable
+@tindex Environment variable, LD_RUN_PATH
+@item
+Add the pathname of the directory where @file{libmysqlclient.so} is located
+to the @code{LD_RUN_PATH} environment variable.
+@end itemize
+
+If you get the following errors from @code{DBD-mysql},
+you are probably using @code{gcc} (or using an old binary compiled with
+@code{gcc}):
+
+@example
+/usr/bin/perl: can't resolve symbol '__moddi3'
+/usr/bin/perl: can't resolve symbol '__divdi3'
+@end example
+
+Add @code{-L/usr/lib/gcc-lib/... -lgcc} to the link command when the
+@file{mysql.so} library gets built (check the output from @code{make} for
+@file{mysql.so} when you compile the Perl client). The @code{-L} option
+should specify the pathname of the directory where @file{libgcc.a} is located
+on your system.
+
+Another cause of this problem may be that Perl and @strong{MySQL} aren't both
+compiled with @code{gcc}. In this case, you can solve the mismatch by
+compiling both with @code{gcc}.
+
+If you get the following error from @code{Msql-Mysql-modules}
+when you run the tests:
+
+@example
+t/00base............install_driver(mysql) failed: Can't load '../blib/arch/auto/DBD/mysql/mysql.so' for module DBD::mysql: ../blib/arch/auto/DBD/mysql/mysql.so: undefined symbol: uncompress at /usr/lib/perl5/5.00503/i586-linux/DynaLoader.pm line 169.
+@end example
+
+it means that you need to include the compression library, -lz, to the
+link line. This can be doing the following change in the file
+@file{lib/DBD/mysql/Install.pm}:
+
+@example
+$sysliblist .= " -lm";
+
+to
+
+$sysliblist .= " -lm -lz";
+@end example
+
+After this, you MUST run 'make realclean' and then proceed with the
+installation from the beginning.
+
+If you want to use the Perl module on a system that doesn't support dynamic
+linking (like SCO) you can generate a static version of Perl that includes
+@code{DBI} and @code{DBD-mysql}. The way this works is that you generate a
+version of Perl with the @code{DBI} code linked in and install it on top of
+your current Perl. Then you use that to build a version of Perl that
+additionally has the @code{DBD} code linked in, and install that.
+
+On SCO, you must have the following environment variables set:
+
+@example
+shell> LD_LIBRARY_PATH=/lib:/usr/lib:/usr/local/lib:/usr/progressive/lib
+or
+shell> LD_LIBRARY_PATH=/usr/lib:/lib:/usr/local/lib:/usr/ccs/lib:/usr/progressive/lib:/usr/skunk/lib
+shell> LIBPATH=/usr/lib:/lib:/usr/local/lib:/usr/ccs/lib:/usr/progressive/lib:/usr/skunk/lib
+shell> MANPATH=scohelp:/usr/man:/usr/local1/man:/usr/local/man:/usr/skunk/man:
+@end example
+
+First, create a Perl that includes a statically linked @code{DBI} by running
+these commands in the directory where your @code{DBI} distribution is
+located:
+
+@example
+shell> perl Makefile.PL -static -config
+shell> make
+shell> make install
+shell> make perl
+@end example
+
+Then you must install the new Perl. The output of @code{make perl} will
+indicate the exact @code{make} command you will need to execute to perform
+the installation. On SCO, this is @code{make -f Makefile.aperl inst_perl
+MAP_TARGET=perl}.
+
+Next, use the just-created Perl to create another Perl that also includes a
+statically-linked @code{DBD::mysql} by running these commands in the
+directory where your @code{Msql-Mysql-modules} distribution is located:
+
+@example
+shell> perl Makefile.PL -static -config
+shell> make
+shell> make install
+shell> make perl
+@end example
+
+Finally, you should install this new Perl. Again, the output of @code{make
+perl} indicates the command to use.
+
+
+@node Function Index, Concept Index, Placeholder, Top
@unnumbered SQL command, type and function index
@printindex fn
diff --git a/client/mysqlimport.c b/client/mysqlimport.c
index 3672edd62e5..79f0a8d584e 100644
--- a/client/mysqlimport.c
+++ b/client/mysqlimport.c
@@ -48,17 +48,19 @@ static MYSQL mysql_connection;
static char *opt_password=0, *current_user=0,
*current_host=0, *current_db=0, *fields_terminated=0,
*lines_terminated=0, *enclosed=0, *opt_enclosed=0,
- *escaped=0, opt_low_priority=0, *opt_columns=0;
+ *escaped=0, opt_low_priority=0, *opt_columns=0,
+ *default_charset;
static uint opt_mysql_port=0;
static my_string opt_mysql_unix_port=0;
#include "sslopt-vars.h"
enum options {OPT_FTB=256, OPT_LTB, OPT_ENC, OPT_O_ENC, OPT_ESC,
- OPT_LOW_PRIORITY, OPT_CHARSETS_DIR};
+ OPT_LOW_PRIORITY, OPT_CHARSETS_DIR, OPT_DEFAULT_CHARSET};
static struct option long_options[] =
{
{"character-sets-dir", required_argument, 0, OPT_CHARSETS_DIR},
+ {"default-character-set", required_argument, 0, OPT_DEFAULT_CHARSET},
{"columns", required_argument, 0, 'c'},
{"compress", no_argument, 0, 'C'},
{"debug", optional_argument, 0, '#'},
@@ -119,6 +121,8 @@ file. The SQL command 'LOAD DATA INFILE' is used to import the rows.\n");
printf("\n\
-#, --debug[=...] Output debug log. Often this is 'd:t:o,filename`\n\
-?, --help Displays this help and exits.\n\
+ --default-character-set=...\n\
+ Set the default character set.\n\
--character-sets-dir=...\n\
Directory where character sets are\n\
-c, --columns=... Use only these columns to import the data to.\n\
@@ -179,6 +183,9 @@ static int get_options(int *argc, char ***argv)
case 'C':
opt_compress=1;
break;
+ case OPT_DEFAULT_CHARSET:
+ default_charset= optarg;
+ break;
case OPT_CHARSETS_DIR:
charsets_dir= optarg;
break;
@@ -269,6 +276,11 @@ static int get_options(int *argc, char ***argv)
fprintf(stderr, "You can't use --ignore (-i) and --replace (-r) at the same time.\n");
return(1);
}
+ if (default_charset)
+ {
+ if (set_default_charset_by_name(default_charset, MYF(MY_WME)))
+ exit(1);
+ }
(*argc)-=optind;
(*argv)+=optind;
if (*argc < 2)
diff --git a/configure.in b/configure.in
index cfe6d323021..87a144b4b57 100644
--- a/configure.in
+++ b/configure.in
@@ -4,7 +4,7 @@ dnl Process this file with autoconf to produce a configure script.
AC_INIT(sql/mysqld.cc)
AC_CANONICAL_SYSTEM
# The Docs Makefile.am parses this line!
-AM_INIT_AUTOMAKE(mysql, 3.23.40)
+AM_INIT_AUTOMAKE(mysql, 3.23.41)
AM_CONFIG_HEADER(config.h)
PROTOCOL_VERSION=10
@@ -1946,9 +1946,13 @@ AC_SUBST(CLIENT_LIBS)
AC_SUBST(sql_client_dirs)
AC_SUBST(linked_client_targets)
-if test "$with_server" = "yes"
+if test "$with_server" = "yes" -o "$THREAD_SAFE_CLIENT" != "no"
then
AC_DEFINE(THREAD)
+fi
+
+if test "$with_server" = "yes"
+then
# Avoid _PROGRAMS names
THREAD_LPROGRAMS="test_thr_alarm test_thr_lock"
AC_SUBST(THREAD_LPROGRAMS)
@@ -2044,7 +2048,10 @@ EOF
AC_DEFINE(HAVE_GEMINI_DB)
fi
+fi
+if test "$with_server" = "yes" -o "$THREAD_SAFE_CLIENT" != "no"
+then
if test "$with_posix_threads" = "no" -o "$with_mit_threads" = "yes"
then
# MIT user level threads
diff --git a/include/mysql_com.h b/include/mysql_com.h
index 8a5eea1024c..5b22d58150d 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -227,6 +227,7 @@ void hash_password(unsigned long *result, const char *password);
void my_init(void);
void load_defaults(const char *conf_file, const char **groups,
int *argc, char ***argv);
+void my_thread_end(void);
#define NULL_LENGTH ((unsigned long) ~0) /* For net_store_length */
diff --git a/include/mysqld_error.h b/include/mysqld_error.h
index cfcf7672013..32967931eac 100644
--- a/include/mysqld_error.h
+++ b/include/mysqld_error.h
@@ -210,4 +210,5 @@
#define ER_READ_ONLY_TRANSACTION 1207
#define ER_DROP_DB_WITH_READ_LOCK 1208
#define ER_CREATE_DB_WITH_READ_LOCK 1209
-#define ER_ERROR_MESSAGES 210
+#define ER_WRONG_ARGUMENTS 1210
+#define ER_ERROR_MESSAGES 211
diff --git a/innobase/btr/btr0btr.c b/innobase/btr/btr0btr.c
index 2507f805cd6..af2029bf1e8 100644
--- a/innobase/btr/btr0btr.c
+++ b/innobase/btr/btr0btr.c
@@ -71,30 +71,6 @@ btr_page_create(
dict_tree_t* tree, /* in: index tree */
mtr_t* mtr); /* in: mtr */
/******************************************************************
-Allocates a new file page to be used in an index tree. */
-static
-page_t*
-btr_page_alloc(
-/*===========*/
- /* out: new allocated page,
- x-latched */
- dict_tree_t* tree, /* in: index tree */
- ulint hint_page_no, /* in: hint of a good page */
- byte file_direction, /* in: direction where a possible
- page split is made */
- ulint level, /* in: level where the page is placed
- in the tree */
- mtr_t* mtr); /* in: mtr */
-/******************************************************************
-Frees a file page used in an index tree. */
-static
-void
-btr_page_free(
-/*==========*/
- dict_tree_t* tree, /* in: index tree */
- page_t* page, /* in, own: page to be freed */
- mtr_t* mtr); /* in: mtr */
-/******************************************************************
Sets the child node file address in a node pointer. */
UNIV_INLINE
void
@@ -319,11 +295,12 @@ btr_page_alloc_for_ibuf(
/******************************************************************
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents! */
-static
+
page_t*
btr_page_alloc(
/*===========*/
- /* out: new allocated page, x-latched */
+ /* out: new allocated page, x-latched;
+ NULL if out of space */
dict_tree_t* tree, /* in: index tree */
ulint hint_page_no, /* in: hint of a good page */
byte file_direction, /* in: direction where a possible
@@ -358,7 +335,10 @@ btr_page_alloc(
new_page_no = fseg_alloc_free_page_general(seg_header, hint_page_no,
file_direction, TRUE, mtr);
- ut_a(new_page_no != FIL_NULL);
+ if (new_page_no == FIL_NULL) {
+
+ return(NULL);
+ }
new_page = buf_page_get(dict_tree_get_space(tree), new_page_no,
RW_X_LATCH, mtr);
@@ -435,20 +415,22 @@ btr_page_free_for_ibuf(
}
/******************************************************************
-Frees a file page used in an index tree. */
-static
+Frees a file page used in an index tree. Can be used also to (BLOB)
+external storage pages, because the page level 0 can be given as an
+argument. */
+
void
-btr_page_free(
-/*==========*/
+btr_page_free_low(
+/*==============*/
dict_tree_t* tree, /* in: index tree */
page_t* page, /* in: page to be freed, x-latched */
+ ulint level, /* in: page level */
mtr_t* mtr) /* in: mtr */
{
fseg_header_t* seg_header;
page_t* root;
ulint space;
ulint page_no;
- ulint level;
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
MTR_MEMO_PAGE_X_FIX));
@@ -465,8 +447,6 @@ btr_page_free(
}
root = btr_root_get(tree, mtr);
-
- level = btr_page_get_level(page, mtr);
if (level == 0) {
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
@@ -481,6 +461,26 @@ btr_page_free(
}
/******************************************************************
+Frees a file page used in an index tree. NOTE: cannot free field external
+storage pages because the page must contain info on its level. */
+
+void
+btr_page_free(
+/*==========*/
+ dict_tree_t* tree, /* in: index tree */
+ page_t* page, /* in: page to be freed, x-latched */
+ mtr_t* mtr) /* in: mtr */
+{
+ ulint level;
+
+ ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
+ MTR_MEMO_PAGE_X_FIX));
+ level = btr_page_get_level(page, mtr);
+
+ btr_page_free_low(tree, page, level, mtr);
+}
+
+/******************************************************************
Sets the child node file address in a node pointer. */
UNIV_INLINE
void
@@ -1276,6 +1276,7 @@ btr_insert_on_non_leaf_level(
dtuple_t* tuple, /* in: the record to be inserted */
mtr_t* mtr) /* in: mtr */
{
+ big_rec_t* dummy_big_rec;
btr_cur_t cursor;
ulint err;
rec_t* rec;
@@ -1294,7 +1295,7 @@ btr_insert_on_non_leaf_level(
| BTR_KEEP_SYS_FLAG
| BTR_NO_UNDO_LOG_FLAG,
&cursor, tuple,
- &rec, NULL, mtr);
+ &rec, &dummy_big_rec, NULL, mtr);
ut_a(err == DB_SUCCESS);
}
diff --git a/innobase/btr/btr0cur.c b/innobase/btr/btr0cur.c
index a8680c6b380..e8ff88c6f4f 100644
--- a/innobase/btr/btr0cur.c
+++ b/innobase/btr/btr0cur.c
@@ -12,7 +12,7 @@ many pages in the tablespace before we start the operation, because
if leaf splitting has been started, it is difficult to undo, except
by crashing the database and doing a roll-forward.
-(c) 1994-1996 Innobase Oy
+(c) 1994-2001 Innobase Oy
Created 10/16/1994 Heikki Tuuri
*******************************************************/
@@ -49,6 +49,15 @@ can be released by page reorganize, then it is reorganized */
this many index pages */
#define BTR_KEY_VAL_ESTIMATE_N_PAGES 8
+/* The structure of a BLOB part header */
+/*--------------------------------------*/
+#define BTR_BLOB_HDR_PART_LEN 0 /* BLOB part len on this
+ page */
+#define BTR_BLOB_HDR_NEXT_PAGE_NO 4 /* next BLOB part page no,
+ FIL_NULL if none */
+/*--------------------------------------*/
+#define BTR_BLOB_HDR_SIZE 8
+
/***********************************************************************
Adds path information to the cursor for the current page, for which
the binary search has been performed. */
@@ -60,6 +69,19 @@ btr_cur_add_path_info(
ulint height, /* in: height of the page in tree;
0 means leaf node */
ulint root_height); /* in: root node height in tree */
+/***************************************************************
+Frees the externally stored fields for a record, if the field is mentioned
+in the update vector. */
+static
+void
+btr_rec_free_updated_extern_fields(
+/*===============================*/
+ dict_index_t* index, /* in: index of rec; the index tree MUST be
+ X-latched */
+ rec_t* rec, /* in: record */
+ upd_t* update, /* in: update vector */
+ mtr_t* mtr); /* in: mini-transaction handle which contains
+ an X-latch to record page and to the tree */
/*==================== B-TREE SEARCH =========================*/
@@ -745,9 +767,13 @@ btr_cur_optimistic_insert(
dtuple_t* entry, /* in: entry to insert */
rec_t** rec, /* out: pointer to inserted record if
succeed */
+ big_rec_t** big_rec,/* out: big rec vector whose fields have to
+ be stored externally by the caller, or
+ NULL */
que_thr_t* thr, /* in: query thread or NULL */
mtr_t* mtr) /* in: mtr */
{
+ big_rec_t* big_rec_vec = NULL;
dict_index_t* index;
page_cur_t* page_cursor;
page_t* page;
@@ -764,6 +790,8 @@ btr_cur_optimistic_insert(
ut_ad(dtuple_check_typed(entry));
+ *big_rec = NULL;
+
page = btr_cur_get_page(cursor);
index = cursor->index;
@@ -772,15 +800,27 @@ btr_cur_optimistic_insert(
max_size = page_get_max_insert_size_after_reorganize(page, 1);
level = btr_page_get_level(page, mtr);
+calculate_sizes_again:
/* Calculate the record size when entry is converted to a record */
data_size = dtuple_get_data_size(entry);
extra_size = rec_get_converted_extra_size(data_size,
dtuple_get_n_fields(entry));
rec_size = data_size + extra_size;
- if (rec_size >= page_get_free_space_of_empty() / 2) {
+ if ((rec_size >= page_get_free_space_of_empty() / 2)
+ || (rec_size >= REC_MAX_DATA_SIZE)) {
+
+ /* The record is so big that we have to store some fields
+ externally on separate database pages */
+
+ big_rec_vec = dtuple_convert_big_rec(index, entry);
+
+ if (big_rec_vec == NULL) {
+
+ return(DB_TOO_BIG_RECORD);
+ }
- return(DB_TOO_BIG_RECORD);
+ goto calculate_sizes_again;
}
/* If there have been many consecutive inserts, and we are on the leaf
@@ -795,7 +835,11 @@ btr_cur_optimistic_insert(
&& (0 == level)
&& (btr_page_get_split_rec_to_right(cursor, &dummy_rec)
|| btr_page_get_split_rec_to_left(cursor, &dummy_rec))) {
-
+
+ if (big_rec_vec) {
+ dtuple_convert_back_big_rec(index, entry, big_rec_vec);
+ }
+
return(DB_FAIL);
}
@@ -804,6 +848,9 @@ btr_cur_optimistic_insert(
|| (page_get_max_insert_size(page, 1) >= rec_size)
|| (page_get_n_recs(page) <= 1))) {
+ if (big_rec_vec) {
+ dtuple_convert_back_big_rec(index, entry, big_rec_vec);
+ }
return(DB_FAIL);
}
@@ -812,6 +859,9 @@ btr_cur_optimistic_insert(
if (err != DB_SUCCESS) {
+ if (big_rec_vec) {
+ dtuple_convert_back_big_rec(index, entry, big_rec_vec);
+ }
return(err);
}
@@ -835,6 +885,19 @@ btr_cur_optimistic_insert(
*rec = page_cur_tuple_insert(page_cursor, entry, mtr);
+ if (!(*rec)) {
+ char* err_buf = mem_alloc(1000);
+
+ dtuple_sprintf(err_buf, 900, entry);
+
+ fprintf(stderr,
+ "InnoDB: Error: cannot insert tuple %s to index %s of table %s\n"
+ "InnoDB: max insert size %lu\n",
+ err_buf, index->name, index->table->name, max_size);
+
+ mem_free(err_buf);
+ }
+
ut_a(*rec); /* <- We calculated above the record would fit */
}
@@ -845,6 +908,7 @@ btr_cur_optimistic_insert(
btr_search_update_hash_on_insert(cursor);
}
#endif
+
if (!(flags & BTR_NO_LOCKING_FLAG) && inherit) {
lock_update_insert(*rec);
@@ -860,6 +924,8 @@ btr_cur_optimistic_insert(
rec_size + PAGE_DIR_SLOT_SIZE);
}
+ *big_rec = big_rec_vec;
+
return(DB_SUCCESS);
}
@@ -884,17 +950,24 @@ btr_cur_pessimistic_insert(
dtuple_t* entry, /* in: entry to insert */
rec_t** rec, /* out: pointer to inserted record if
succeed */
+ big_rec_t** big_rec,/* out: big rec vector whose fields have to
+ be stored externally by the caller, or
+ NULL */
que_thr_t* thr, /* in: query thread or NULL */
mtr_t* mtr) /* in: mtr */
{
- page_t* page;
- ulint err;
- ibool dummy_inh;
- ibool success;
- ulint n_extents = 0;
+ dict_index_t* index = cursor->index;
+ big_rec_t* big_rec_vec = NULL;
+ page_t* page;
+ ulint err;
+ ibool dummy_inh;
+ ibool success;
+ ulint n_extents = 0;
ut_ad(dtuple_check_typed(entry));
+ *big_rec = NULL;
+
page = btr_cur_get_page(cursor);
ut_ad(mtr_memo_contains(mtr,
@@ -908,8 +981,8 @@ btr_cur_pessimistic_insert(
cursor->flag = BTR_CUR_BINARY;
- err = btr_cur_optimistic_insert(flags, cursor, entry, rec, thr, mtr);
-
+ err = btr_cur_optimistic_insert(flags, cursor, entry, rec, big_rec,
+ thr, mtr);
if (err != DB_FAIL) {
return(err);
@@ -932,7 +1005,7 @@ btr_cur_pessimistic_insert(
n_extents = cursor->tree_height / 16 + 3;
- success = fsp_reserve_free_extents(cursor->index->space,
+ success = fsp_reserve_free_extents(index->space,
n_extents, FSP_NORMAL, mtr);
if (!success) {
err = DB_OUT_OF_FILE_SPACE;
@@ -941,7 +1014,22 @@ btr_cur_pessimistic_insert(
}
}
- if (dict_tree_get_page(cursor->index->tree)
+ if ((rec_get_converted_size(entry)
+ >= page_get_free_space_of_empty() / 2)
+ || (rec_get_converted_size(entry) >= REC_MAX_DATA_SIZE)) {
+
+ /* The record is so big that we have to store some fields
+ externally on separate database pages */
+
+ big_rec_vec = dtuple_convert_big_rec(index, entry);
+
+ if (big_rec_vec == NULL) {
+
+ return(DB_TOO_BIG_RECORD);
+ }
+ }
+
+ if (dict_tree_get_page(index->tree)
== buf_frame_get_page_no(page)) {
/* The page is the root page */
@@ -950,7 +1038,7 @@ btr_cur_pessimistic_insert(
*rec = btr_page_split_and_insert(cursor, entry, mtr);
}
- btr_cur_position(cursor->index, page_rec_get_prev(*rec), cursor);
+ btr_cur_position(index, page_rec_get_prev(*rec), cursor);
#ifdef BTR_CUR_ADAPT
btr_search_update_hash_on_insert(cursor);
@@ -963,9 +1051,11 @@ btr_cur_pessimistic_insert(
err = DB_SUCCESS;
if (n_extents > 0) {
- fil_space_release_free_extents(cursor->index->space, n_extents);
+ fil_space_release_free_extents(index->space, n_extents);
}
-
+
+ *big_rec = big_rec_vec;
+
return(err);
}
@@ -1227,7 +1317,8 @@ btr_cur_optimistic_update(
dulint roll_ptr;
trx_t* trx;
mem_heap_t* heap;
- ibool reorganized = FALSE;
+ ibool reorganized = FALSE;
+ ulint i;
/* Only clustered index records are updated using this function */
ut_ad((cursor->index)->type & DICT_CLUSTERED);
@@ -1247,6 +1338,23 @@ btr_cur_optimistic_update(
cmpl_info, thr, mtr));
}
+ for (i = 0; i < upd_get_n_fields(update); i++) {
+ if (upd_get_nth_field(update, i)->extern_storage) {
+
+ /* Externally stored fields are treated in pessimistic
+ update */
+
+ return(DB_OVERFLOW);
+ }
+ }
+
+ if (rec_contains_externally_stored_field(btr_cur_get_rec(cursor))) {
+ /* Externally stored fields are treated in pessimistic
+ update */
+
+ return(DB_OVERFLOW);
+ }
+
page_cursor = btr_cur_get_page_cur(cursor);
heap = mem_heap_create(1024);
@@ -1260,9 +1368,9 @@ btr_cur_optimistic_update(
if (new_rec_size >= page_get_free_space_of_empty() / 2) {
- mem_heap_free(heap);
+ mem_heap_free(heap);
- return(DB_TOO_BIG_RECORD);
+ return(DB_OVERFLOW);
}
max_size = old_rec_size
@@ -1377,6 +1485,48 @@ btr_cur_pess_upd_restore_supremum(
rec);
}
+/***************************************************************
+Replaces and copies the data in the new column values stored in the
+update vector to the clustered index entry given. */
+static
+void
+btr_cur_copy_new_col_vals(
+/*======================*/
+ dtuple_t* entry, /* in/out: index entry where replaced */
+ upd_t* update, /* in: update vector */
+ mem_heap_t* heap) /* in: heap where data is copied */
+{
+ upd_field_t* upd_field;
+ dfield_t* dfield;
+ dfield_t* new_val;
+ ulint field_no;
+ byte* data;
+ ulint i;
+
+ dtuple_set_info_bits(entry, update->info_bits);
+
+ for (i = 0; i < upd_get_n_fields(update); i++) {
+
+ upd_field = upd_get_nth_field(update, i);
+
+ field_no = upd_field->field_no;
+
+ dfield = dtuple_get_nth_field(entry, field_no);
+
+ new_val = &(upd_field->new_val);
+
+ if (new_val->len == UNIV_SQL_NULL) {
+ data = NULL;
+ } else {
+ data = mem_heap_alloc(heap, new_val->len);
+
+ ut_memcpy(data, new_val->data, new_val->len);
+ }
+
+ dfield_set_data(dfield, data, new_val->len);
+ }
+}
+
/*****************************************************************
Performs an update of a record on a page of a tree. It is assumed
that mtr holds an x-latch on the tree and on the cursor page. If the
@@ -1389,8 +1539,9 @@ btr_cur_pessimistic_update(
/* out: DB_SUCCESS or error code */
ulint flags, /* in: undo logging, locking, and rollback
flags */
- btr_cur_t* cursor, /* in: cursor on the record to update;
- cursor does not stay valid */
+ btr_cur_t* cursor, /* in: cursor on the record to update */
+ big_rec_t** big_rec,/* out: big rec vector whose fields have to
+ be stored externally by the caller, or NULL */
upd_t* update, /* in: update vector; this is allowed also
contain trx id and roll ptr fields, but
the values in update vector have no effect */
@@ -1399,6 +1550,8 @@ btr_cur_pessimistic_update(
que_thr_t* thr, /* in: query thread */
mtr_t* mtr) /* in: mtr */
{
+ big_rec_t* big_rec_vec = NULL;
+ big_rec_t* dummy_big_rec;
dict_index_t* index;
page_t* page;
dict_tree_t* tree;
@@ -1414,6 +1567,11 @@ btr_cur_pessimistic_update(
ibool was_first;
ibool success;
ulint n_extents = 0;
+ ulint* ext_vect;
+ ulint n_ext_vect;
+ ulint reserve_flag;
+
+ *big_rec = NULL;
page = btr_cur_get_page(cursor);
rec = btr_cur_get_rec(cursor);
@@ -1449,8 +1607,14 @@ btr_cur_pessimistic_update(
n_extents = cursor->tree_height / 16 + 3;
+ if (flags & BTR_NO_UNDO_LOG_FLAG) {
+ reserve_flag = FSP_CLEANING;
+ } else {
+ reserve_flag = FSP_NORMAL;
+ }
+
success = fsp_reserve_free_extents(cursor->index->space,
- n_extents, FSP_NORMAL, mtr);
+ n_extents, reserve_flag, mtr);
if (!success) {
err = DB_OUT_OF_FILE_SPACE;
@@ -1464,7 +1628,7 @@ btr_cur_pessimistic_update(
new_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
- row_upd_clust_index_replace_new_col_vals(new_entry, update);
+ btr_cur_copy_new_col_vals(new_entry, update, heap);
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,
@@ -1487,17 +1651,49 @@ btr_cur_pessimistic_update(
lock_rec_store_on_page_infimum(rec);
btr_search_update_hash_on_delete(cursor);
+
+ if (flags & BTR_NO_UNDO_LOG_FLAG) {
+ /* We are in a transaction rollback undoing a row
+ update: we must free possible externally stored fields
+ which got new values in the update */
+
+ ut_a(big_rec_vec == NULL);
+
+ btr_rec_free_updated_extern_fields(index, rec, update, mtr);
+ }
+
+ /* We have to set appropriate extern storage bits in the new
+ record to be inserted: we have to remember which fields were such */
+
+ ext_vect = mem_heap_alloc(heap, sizeof(ulint) * rec_get_n_fields(rec));
+ n_ext_vect = btr_push_update_extern_fields(ext_vect, rec, update);
+
page_cur_delete_rec(page_cursor, mtr);
page_cur_move_to_prev(page_cursor);
- if (optim_err == DB_UNDERFLOW) {
- rec = btr_cur_insert_if_possible(cursor, new_entry,
+ if ((rec_get_converted_size(new_entry) >=
+ page_get_free_space_of_empty() / 2)
+ || (rec_get_converted_size(new_entry) >= REC_MAX_DATA_SIZE)) {
+
+ big_rec_vec = dtuple_convert_big_rec(index, new_entry);
+
+ if (big_rec_vec == NULL) {
+
+ mem_heap_free(heap);
+
+ goto return_after_reservations;
+ }
+ }
+
+ rec = btr_cur_insert_if_possible(cursor, new_entry,
&dummy_reorganized, mtr);
- ut_a(rec); /* <- We knew the insert would fit */
+ ut_a(rec || optim_err != DB_UNDERFLOW);
+ if (rec) {
lock_rec_restore_from_page_infimum(rec, page);
-
+ rec_set_field_extern_bits(rec, ext_vect, n_ext_vect, mtr);
+
btr_cur_compress_if_useful(cursor, mtr);
err = DB_SUCCESS;
@@ -1521,9 +1717,13 @@ btr_cur_pessimistic_update(
err = btr_cur_pessimistic_insert(BTR_NO_UNDO_LOG_FLAG
| BTR_NO_LOCKING_FLAG
| BTR_KEEP_SYS_FLAG,
- cursor, new_entry, &rec, NULL, mtr);
+ cursor, new_entry, &rec,
+ &dummy_big_rec, NULL, mtr);
ut_a(rec);
ut_a(err == DB_SUCCESS);
+ ut_a(dummy_big_rec == NULL);
+
+ rec_set_field_extern_bits(rec, ext_vect, n_ext_vect, mtr);
lock_rec_restore_from_page_infimum(rec, page);
@@ -1541,9 +1741,12 @@ btr_cur_pessimistic_update(
return_after_reservations:
if (n_extents > 0) {
- fil_space_release_free_extents(cursor->index->space, n_extents);
+ fil_space_release_free_extents(cursor->index->space,
+ n_extents);
}
+ *big_rec = big_rec_vec;
+
return(err);
}
@@ -1932,6 +2135,11 @@ btr_cur_optimistic_delete(
ut_ad(btr_page_get_level(page, mtr) == 0);
+ if (rec_contains_externally_stored_field(btr_cur_get_rec(cursor))) {
+
+ return(FALSE);
+ }
+
if (btr_cur_can_delete_without_compress(cursor, mtr)) {
lock_update_delete(btr_cur_get_rec(cursor));
@@ -2009,6 +2217,8 @@ btr_cur_pessimistic_delete(
}
}
+ btr_rec_free_externally_stored_fields(cursor->index,
+ btr_cur_get_rec(cursor), mtr);
if ((page_get_n_recs(page) < 2)
&& (dict_tree_get_page(btr_cur_get_tree(cursor))
!= buf_frame_get_page_no(page))) {
@@ -2079,7 +2289,7 @@ return_after_reservations:
fil_space_release_free_extents(cursor->index->space, n_extents);
}
- return(ret);
+ return(ret);
}
/***********************************************************************
@@ -2292,3 +2502,553 @@ btr_estimate_number_of_different_key_vals(
return(index->table->stat_n_rows / (total_n_recs / n_diff));
}
+
+/*================== EXTERNAL STORAGE OF BIG FIELDS ===================*/
+
+/***********************************************************************
+Stores the positions of the fields marked as extern storage in the update
+vector, and also those fields who are marked as extern storage in rec
+and not mentioned in updated fields. We use this function to remember
+which fields we must mark as extern storage in a record inserted for an
+update. */
+
+ulint
+btr_push_update_extern_fields(
+/*==========================*/
+ /* out: number of values stored in ext_vect */
+ ulint* ext_vect, /* in: array of ulints, must be preallocated
+ to have space for all fields in rec */
+ rec_t* rec, /* in: record */
+ upd_t* update) /* in: update vector or NULL */
+{
+ ulint n_pushed = 0;
+ ibool is_updated;
+ ulint n;
+ ulint j;
+ ulint i;
+
+ if (update) {
+ n = upd_get_n_fields(update);
+
+ for (i = 0; i < n; i++) {
+
+ if (upd_get_nth_field(update, i)->extern_storage) {
+
+ ext_vect[n_pushed] =
+ upd_get_nth_field(update, i)->field_no;
+
+ n_pushed++;
+ }
+ }
+ }
+
+ n = rec_get_n_fields(rec);
+
+ for (i = 0; i < n; i++) {
+ if (rec_get_nth_field_extern_bit(rec, i)) {
+
+ /* Check it is not in updated fields */
+ is_updated = FALSE;
+
+ if (update) {
+ for (j = 0; j < upd_get_n_fields(update);
+ j++) {
+ if (upd_get_nth_field(update, j)
+ ->field_no == i) {
+ is_updated = TRUE;
+ }
+ }
+ }
+
+ if (!is_updated) {
+ ext_vect[n_pushed] = i;
+ n_pushed++;
+ }
+ }
+ }
+
+ return(n_pushed);
+}
+
+/***********************************************************************
+Returns the length of a BLOB part stored on the header page. */
+static
+ulint
+btr_blob_get_part_len(
+/*==================*/
+ /* out: part length */
+ byte* blob_header) /* in: blob header */
+{
+ return(mach_read_from_4(blob_header + BTR_BLOB_HDR_PART_LEN));
+}
+
+/***********************************************************************
+Returns the page number where the next BLOB part is stored. */
+static
+ulint
+btr_blob_get_next_page_no(
+/*======================*/
+ /* out: page number or FIL_NULL if
+ no more pages */
+ byte* blob_header) /* in: blob header */
+{
+ return(mach_read_from_4(blob_header + BTR_BLOB_HDR_NEXT_PAGE_NO));
+}
+
+/***********************************************************************
+Stores the fields in big_rec_vec to the tablespace and puts pointers to
+them in rec. The fields are stored on pages allocated from leaf node
+file segment of the index tree. */
+
+ulint
+btr_store_big_rec_extern_fields(
+/*============================*/
+ /* out: DB_SUCCESS or error */
+ dict_index_t* index, /* in: index of rec; the index tree
+ MUST be X-latched */
+ rec_t* rec, /* in: record */
+ big_rec_t* big_rec_vec, /* in: vector containing fields
+ to be stored externally */
+ mtr_t* local_mtr) /* in: mtr containing the latch to
+ rec and to the tree */
+{
+ byte* data;
+ ulint local_len;
+ ulint extern_len;
+ ulint store_len;
+ ulint page_no;
+ page_t* page;
+ ulint space_id;
+ page_t* prev_page;
+ page_t* rec_page;
+ ulint prev_page_no;
+ ulint hint_page_no;
+ ulint i;
+ mtr_t mtr;
+
+ ut_ad(mtr_memo_contains(local_mtr, dict_tree_get_lock(index->tree),
+ MTR_MEMO_X_LOCK));
+ ut_ad(mtr_memo_contains(local_mtr, buf_block_align(data),
+ MTR_MEMO_PAGE_X_FIX));
+ ut_a(index->type & DICT_CLUSTERED);
+
+ space_id = buf_frame_get_space_id(rec);
+
+ /* We have to create a file segment to the tablespace
+ for each field and put the pointer to the field in rec */
+
+ for (i = 0; i < big_rec_vec->n_fields; i++) {
+
+ data = rec_get_nth_field(rec, big_rec_vec->fields[i].field_no,
+ &local_len);
+ ut_a(local_len >= BTR_EXTERN_FIELD_REF_SIZE);
+ local_len -= BTR_EXTERN_FIELD_REF_SIZE;
+ extern_len = big_rec_vec->fields[i].len;
+
+ ut_a(extern_len > 0);
+
+ prev_page_no = FIL_NULL;
+
+ while (extern_len > 0) {
+ mtr_start(&mtr);
+
+ if (prev_page_no == FIL_NULL) {
+ hint_page_no = buf_frame_get_page_no(rec) + 1;
+ } else {
+ hint_page_no = prev_page_no + 1;
+ }
+
+ page = btr_page_alloc(index->tree, hint_page_no,
+ FSP_NO_DIR, 0, &mtr);
+ if (page == NULL) {
+
+ mtr_commit(&mtr);
+
+ return(DB_OUT_OF_FILE_SPACE);
+ }
+
+ page_no = buf_frame_get_page_no(page);
+
+ if (prev_page_no != FIL_NULL) {
+ prev_page = buf_page_get(space_id,
+ prev_page_no,
+ RW_X_LATCH, &mtr);
+
+ buf_page_dbg_add_level(prev_page,
+ SYNC_EXTERN_STORAGE);
+
+ mlog_write_ulint(prev_page + FIL_PAGE_DATA
+ + BTR_BLOB_HDR_NEXT_PAGE_NO,
+ page_no, MLOG_4BYTES, &mtr);
+ }
+
+ if (extern_len > (UNIV_PAGE_SIZE - FIL_PAGE_DATA
+ - BTR_BLOB_HDR_SIZE
+ - FIL_PAGE_DATA_END)) {
+ store_len = UNIV_PAGE_SIZE - FIL_PAGE_DATA
+ - BTR_BLOB_HDR_SIZE
+ - FIL_PAGE_DATA_END;
+ } else {
+ store_len = extern_len;
+ }
+
+ mlog_write_string(page + FIL_PAGE_DATA
+ + BTR_BLOB_HDR_SIZE,
+ big_rec_vec->fields[i].data
+ + big_rec_vec->fields[i].len
+ - extern_len,
+ store_len, &mtr);
+ mlog_write_ulint(page + FIL_PAGE_DATA
+ + BTR_BLOB_HDR_PART_LEN,
+ store_len, MLOG_4BYTES, &mtr);
+ mlog_write_ulint(page + FIL_PAGE_DATA
+ + BTR_BLOB_HDR_NEXT_PAGE_NO,
+ FIL_NULL, MLOG_4BYTES, &mtr);
+
+ extern_len -= store_len;
+
+ rec_page = buf_page_get(space_id,
+ buf_frame_get_page_no(data),
+ RW_X_LATCH, &mtr);
+
+ buf_page_dbg_add_level(rec_page, SYNC_NO_ORDER_CHECK);
+
+ mlog_write_ulint(data + local_len + BTR_EXTERN_LEN, 0,
+ MLOG_4BYTES, &mtr);
+ mlog_write_ulint(data + local_len + BTR_EXTERN_LEN + 4,
+ big_rec_vec->fields[i].len
+ - extern_len,
+ MLOG_4BYTES, &mtr);
+
+ if (prev_page_no == FIL_NULL) {
+ mlog_write_ulint(data + local_len
+ + BTR_EXTERN_SPACE_ID,
+ space_id,
+ MLOG_4BYTES, &mtr);
+
+ mlog_write_ulint(data + local_len
+ + BTR_EXTERN_PAGE_NO,
+ page_no,
+ MLOG_4BYTES, &mtr);
+
+ mlog_write_ulint(data + local_len
+ + BTR_EXTERN_OFFSET,
+ FIL_PAGE_DATA,
+ MLOG_4BYTES, &mtr);
+
+ /* Set the bit denoting that this field
+ in rec is stored externally */
+
+ rec_set_nth_field_extern_bit(rec,
+ big_rec_vec->fields[i].field_no,
+ TRUE, &mtr);
+ }
+
+ prev_page_no = page_no;
+
+ mtr_commit(&mtr);
+ }
+ }
+
+ return(DB_SUCCESS);
+}
+
+/***********************************************************************
+Frees the space in an externally stored field to the file space
+management. */
+
+void
+btr_free_externally_stored_field(
+/*=============================*/
+ dict_index_t* index, /* in: index of the data, the index
+ tree MUST be X-latched */
+ byte* data, /* in: internally stored data
+ + reference to the externally
+ stored part */
+ ulint local_len, /* in: length of data */
+ mtr_t* local_mtr) /* in: mtr containing the latch to
+ data an an X-latch to the index
+ tree */
+{
+ page_t* page;
+ page_t* rec_page;
+ ulint space_id;
+ ulint page_no;
+ ulint offset;
+ ulint extern_len;
+ ulint next_page_no;
+ ulint part_len;
+ mtr_t mtr;
+
+ ut_a(local_len >= BTR_EXTERN_FIELD_REF_SIZE);
+ ut_ad(mtr_memo_contains(local_mtr, dict_tree_get_lock(index->tree),
+ MTR_MEMO_X_LOCK));
+ ut_ad(mtr_memo_contains(local_mtr, buf_block_align(data),
+ MTR_MEMO_PAGE_X_FIX));
+ ut_a(local_len >= BTR_EXTERN_FIELD_REF_SIZE);
+ local_len -= BTR_EXTERN_FIELD_REF_SIZE;
+
+ for (;;) {
+ mtr_start(&mtr);
+
+ rec_page = buf_page_get(buf_frame_get_space_id(data),
+ buf_frame_get_page_no(data), RW_X_LATCH, &mtr);
+
+ buf_page_dbg_add_level(rec_page, SYNC_NO_ORDER_CHECK);
+
+ space_id = mach_read_from_4(data + local_len
+ + BTR_EXTERN_SPACE_ID);
+
+ page_no = mach_read_from_4(data + local_len
+ + BTR_EXTERN_PAGE_NO);
+
+ offset = mach_read_from_4(data + local_len + BTR_EXTERN_OFFSET);
+
+ extern_len = mach_read_from_4(data + local_len
+ + BTR_EXTERN_LEN + 4);
+
+ /* If extern len is 0, then there is no external storage data
+ at all */
+
+ if (extern_len == 0) {
+
+ mtr_commit(&mtr);
+
+ return;
+ }
+
+ page = buf_page_get(space_id, page_no, RW_X_LATCH, &mtr);
+
+ buf_page_dbg_add_level(page, SYNC_EXTERN_STORAGE);
+
+ next_page_no = mach_read_from_4(page + FIL_PAGE_DATA
+ + BTR_BLOB_HDR_NEXT_PAGE_NO);
+
+ part_len = btr_blob_get_part_len(page + FIL_PAGE_DATA);
+
+ ut_a(extern_len >= part_len);
+
+ /* We must supply the page level (= 0) as an argument
+ because we did not store it on the page (we save the space
+ overhead from an index page header. */
+
+ btr_page_free_low(index->tree, page, 0, &mtr);
+
+ mlog_write_ulint(data + local_len + BTR_EXTERN_PAGE_NO,
+ next_page_no,
+ MLOG_4BYTES, &mtr);
+ mlog_write_ulint(data + local_len + BTR_EXTERN_LEN + 4,
+ extern_len - part_len,
+ MLOG_4BYTES, &mtr);
+ if (next_page_no == FIL_NULL) {
+ ut_a(extern_len - part_len == 0);
+ }
+
+ if (extern_len - part_len == 0) {
+ ut_a(next_page_no == FIL_NULL);
+ }
+
+ mtr_commit(&mtr);
+ }
+}
+
+/***************************************************************
+Frees the externally stored fields for a record. */
+
+void
+btr_rec_free_externally_stored_fields(
+/*==================================*/
+ dict_index_t* index, /* in: index of the data, the index
+ tree MUST be X-latched */
+ rec_t* rec, /* in: record */
+ mtr_t* mtr) /* in: mini-transaction handle which contains
+ an X-latch to record page and to the index
+ tree */
+{
+ ulint n_fields;
+ byte* data;
+ ulint len;
+ ulint i;
+
+ ut_ad(mtr_memo_contains(mtr, buf_block_align(rec),
+ MTR_MEMO_PAGE_X_FIX));
+ if (rec_get_data_size(rec) <= REC_1BYTE_OFFS_LIMIT) {
+
+ return;
+ }
+
+ /* Free possible externally stored fields in the record */
+
+ n_fields = rec_get_n_fields(rec);
+
+ for (i = 0; i < n_fields; i++) {
+ if (rec_get_nth_field_extern_bit(rec, i)) {
+
+ data = rec_get_nth_field(rec, i, &len);
+ btr_free_externally_stored_field(index, data, len, mtr);
+ }
+ }
+}
+
+/***************************************************************
+Frees the externally stored fields for a record, if the field is mentioned
+in the update vector. */
+static
+void
+btr_rec_free_updated_extern_fields(
+/*===============================*/
+ dict_index_t* index, /* in: index of rec; the index tree MUST be
+ X-latched */
+ rec_t* rec, /* in: record */
+ upd_t* update, /* in: update vector */
+ mtr_t* mtr) /* in: mini-transaction handle which contains
+ an X-latch to record page and to the tree */
+{
+ upd_field_t* ufield;
+ ulint n_fields;
+ byte* data;
+ ulint len;
+ ulint i;
+
+ ut_ad(mtr_memo_contains(mtr, buf_block_align(rec),
+ MTR_MEMO_PAGE_X_FIX));
+ if (rec_get_data_size(rec) <= REC_1BYTE_OFFS_LIMIT) {
+
+ return;
+ }
+
+ /* Free possible externally stored fields in the record */
+
+ n_fields = upd_get_n_fields(update);
+
+ for (i = 0; i < n_fields; i++) {
+ ufield = upd_get_nth_field(update, i);
+
+ if (rec_get_nth_field_extern_bit(rec, ufield->field_no)) {
+
+ data = rec_get_nth_field(rec, ufield->field_no, &len);
+ btr_free_externally_stored_field(index, data, len, mtr);
+ }
+ }
+}
+
+/***********************************************************************
+Copies an externally stored field of a record to mem heap. Parameter
+data contains a pointer to 'internally' stored part of the field:
+possibly some data, and the reference to the externally stored part in
+the last 20 bytes of data. */
+
+byte*
+btr_copy_externally_stored_field(
+/*=============================*/
+ /* out: the whole field copied to heap */
+ ulint* len, /* out: length of the whole field */
+ byte* data, /* in: 'internally' stored part of the
+ field containing also the reference to
+ the external part */
+ ulint local_len,/* in: length of data */
+ mem_heap_t* heap) /* in: mem heap */
+{
+ page_t* page;
+ ulint space_id;
+ ulint page_no;
+ ulint offset;
+ ulint extern_len;
+ byte* blob_header;
+ ulint part_len;
+ byte* buf;
+ ulint copied_len;
+ mtr_t mtr;
+
+ ut_a(local_len >= BTR_EXTERN_FIELD_REF_SIZE);
+
+ local_len -= BTR_EXTERN_FIELD_REF_SIZE;
+
+ space_id = mach_read_from_4(data + local_len + BTR_EXTERN_SPACE_ID);
+
+ page_no = mach_read_from_4(data + local_len + BTR_EXTERN_PAGE_NO);
+
+ offset = mach_read_from_4(data + local_len + BTR_EXTERN_OFFSET);
+
+ /* Currently a BLOB cannot be bigger that 4 GB; we
+ leave the 4 upper bytes in the length field unused */
+
+ extern_len = mach_read_from_4(data + local_len + BTR_EXTERN_LEN + 4);
+
+ buf = mem_heap_alloc(heap, local_len + extern_len);
+
+ ut_memcpy(buf, data, local_len);
+ copied_len = local_len;
+
+ if (extern_len == 0) {
+ *len = copied_len;
+
+ return(buf);
+ }
+
+ for (;;) {
+ mtr_start(&mtr);
+
+ page = buf_page_get(space_id, page_no, RW_S_LATCH, &mtr);
+
+ buf_page_dbg_add_level(page, SYNC_EXTERN_STORAGE);
+
+ blob_header = page + offset;
+
+ part_len = btr_blob_get_part_len(blob_header);
+
+ ut_memcpy(buf + copied_len, blob_header + BTR_BLOB_HDR_SIZE,
+ part_len);
+ copied_len += part_len;
+
+ page_no = btr_blob_get_next_page_no(blob_header);
+
+ /* On other BLOB pages except the first the BLOB header
+ always is at the page data start: */
+
+ offset = FIL_PAGE_DATA;
+
+ mtr_commit(&mtr);
+
+ if (page_no == FIL_NULL) {
+ ut_a(copied_len == local_len + extern_len);
+
+ *len = copied_len;
+
+ return(buf);
+ }
+
+ ut_a(copied_len < local_len + extern_len);
+ }
+}
+
+/***********************************************************************
+Copies an externally stored field of a record to mem heap. */
+
+byte*
+btr_rec_copy_externally_stored_field(
+/*=================================*/
+ /* out: the field copied to heap */
+ rec_t* rec, /* in: record */
+ ulint no, /* in: field number */
+ ulint* len, /* out: length of the field */
+ mem_heap_t* heap) /* in: mem heap */
+{
+ ulint local_len;
+ byte* data;
+
+ ut_a(rec_get_nth_field_extern_bit(rec, no));
+
+ /* An externally stored field can contain some initial
+ data from the field, and in the last 20 bytes it has the
+ space id, page number, and offset where the rest of the
+ field data is stored, and the data length in addition to
+ the data stored locally. We may need to store some data
+ locally to get the local record length above the 128 byte
+ limit so that field offsets are stored in two bytes, and
+ the extern bit is available in those two bytes. */
+
+ data = rec_get_nth_field(rec, no, &local_len);
+
+ return(btr_copy_externally_stored_field(len, data, local_len, heap));
+}
diff --git a/innobase/buf/buf0buf.c b/innobase/buf/buf0buf.c
index ede9e621462..3fabe6c6d0e 100644
--- a/innobase/buf/buf0buf.c
+++ b/innobase/buf/buf0buf.c
@@ -216,14 +216,44 @@ buf_calc_page_checksum(
/* out: checksum */
byte* page) /* in: buffer page */
{
- ulint checksum;
+ ulint checksum;
- checksum = ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN);
- + ut_fold_binary(page + FIL_PAGE_DATA, UNIV_PAGE_SIZE - FIL_PAGE_DATA
- - FIL_PAGE_END_LSN);
- checksum = checksum & 0xFFFFFFFF;
+ checksum = ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN);
+ + ut_fold_binary(page + FIL_PAGE_DATA,
+ UNIV_PAGE_SIZE - FIL_PAGE_DATA
+ - FIL_PAGE_END_LSN);
+ checksum = checksum & 0xFFFFFFFF;
- return(checksum);
+ return(checksum);
+}
+
+/************************************************************************
+Checks if a page is corrupt. */
+
+ibool
+buf_page_is_corrupted(
+/*==================*/
+ /* out: TRUE if corrupted */
+ byte* read_buf) /* in: a database page */
+{
+ ulint checksum;
+
+ checksum = buf_calc_page_checksum(read_buf);
+
+ if ((mach_read_from_4(read_buf + FIL_PAGE_LSN + 4)
+ != mach_read_from_4(read_buf + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN + 4))
+ || (checksum != mach_read_from_4(read_buf
+ + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN)
+ && mach_read_from_4(read_buf + FIL_PAGE_LSN)
+ != mach_read_from_4(read_buf
+ + UNIV_PAGE_SIZE
+ - FIL_PAGE_END_LSN))) {
+ return(TRUE);
+ }
+
+ return(FALSE);
}
/************************************************************************
@@ -1265,34 +1295,22 @@ buf_page_io_complete(
dulint id;
dict_index_t* index;
ulint io_type;
- ulint checksum;
ut_ad(block);
io_type = block->io_fix;
if (io_type == BUF_IO_READ) {
- checksum = buf_calc_page_checksum(block->frame);
-
/* From version 3.23.38 up we store the page checksum
to the 4 upper bytes of the page end lsn field */
- if ((mach_read_from_4(block->frame + FIL_PAGE_LSN + 4)
- != mach_read_from_4(block->frame + UNIV_PAGE_SIZE
- - FIL_PAGE_END_LSN + 4))
- || (checksum != mach_read_from_4(block->frame
- + UNIV_PAGE_SIZE
- - FIL_PAGE_END_LSN)
- && mach_read_from_4(block->frame + FIL_PAGE_LSN)
- != mach_read_from_4(block->frame
- + UNIV_PAGE_SIZE
- - FIL_PAGE_END_LSN))) {
- fprintf(stderr,
+ if (buf_page_is_corrupted(block->frame)) {
+ fprintf(stderr,
"InnoDB: Database page corruption or a failed\n"
"InnoDB: file read of page %lu.\n", block->offset);
- fprintf(stderr,
+ fprintf(stderr,
"InnoDB: You may have to recover from a backup.\n");
- exit(1);
+ exit(1);
}
if (recv_recovery_is_on()) {
@@ -1601,11 +1619,28 @@ void
buf_print_io(void)
/*==============*/
{
+ ulint size;
+
ut_ad(buf_pool);
+ size = buf_pool_get_curr_size() / UNIV_PAGE_SIZE;
+
mutex_enter(&(buf_pool->mutex));
+
+ printf("LRU list length %lu \n", UT_LIST_GET_LEN(buf_pool->LRU));
+ printf("Free list length %lu \n", UT_LIST_GET_LEN(buf_pool->free));
+ printf("Flush list length %lu \n",
+ UT_LIST_GET_LEN(buf_pool->flush_list));
+ printf("Buffer pool size in pages %lu\n", size);
- printf("pages read %lu, created %lu, written %lu\n",
+ printf("Pending reads %lu \n", buf_pool->n_pend_reads);
+
+ printf("Pending writes: LRU %lu, flush list %lu, single page %lu\n",
+ buf_pool->n_flush[BUF_FLUSH_LRU],
+ buf_pool->n_flush[BUF_FLUSH_LIST],
+ buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]);
+
+ printf("Pages read %lu, created %lu, written %lu\n",
buf_pool->n_pages_read, buf_pool->n_pages_created,
buf_pool->n_pages_written);
mutex_exit(&(buf_pool->mutex));
diff --git a/innobase/buf/buf0flu.c b/innobase/buf/buf0flu.c
index 7129b8d20a9..0f27cee45a5 100644
--- a/innobase/buf/buf0flu.c
+++ b/innobase/buf/buf0flu.c
@@ -1,7 +1,7 @@
/******************************************************
The database buffer buf_pool flush algorithm
-(c) 1995 Innobase Oy
+(c) 1995-2001 Innobase Oy
Created 11/11/1995 Heikki Tuuri
*******************************************************/
@@ -15,7 +15,6 @@ Created 11/11/1995 Heikki Tuuri
#include "ut0byte.h"
#include "ut0lst.h"
#include "fil0fil.h"
-
#include "buf0buf.h"
#include "buf0lru.h"
#include "buf0rea.h"
@@ -195,9 +194,145 @@ buf_flush_write_complete(
}
/************************************************************************
-Does an asynchronous write of a buffer page. NOTE: in simulated aio we must
-call os_aio_simulated_wake_handler_threads after we have posted a batch
-of writes! */
+Flushes possible buffered writes from the doublewrite memory buffer to disk,
+and also wakes up the aio thread if simulated aio is used. It is very
+important to call this function after a batch of writes has been posted,
+and also when we may have to wait for a page latch! Otherwise a deadlock
+of threads can occur. */
+static
+void
+buf_flush_buffered_writes(void)
+/*===========================*/
+{
+ buf_block_t* block;
+ ulint len;
+ ulint i;
+
+ if (trx_doublewrite == NULL) {
+ os_aio_simulated_wake_handler_threads();
+
+ return;
+ }
+
+ mutex_enter(&(trx_doublewrite->mutex));
+
+ /* Write first to doublewrite buffer blocks. We use synchronous
+ aio and thus know that file write has been completed when the
+ control returns. */
+
+ if (trx_doublewrite->first_free == 0) {
+
+ mutex_exit(&(trx_doublewrite->mutex));
+
+ return;
+ }
+
+ if (trx_doublewrite->first_free > TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
+ len = TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE;
+ } else {
+ len = trx_doublewrite->first_free * UNIV_PAGE_SIZE;
+ }
+
+ fil_io(OS_FILE_WRITE,
+ TRUE, TRX_SYS_SPACE,
+ trx_doublewrite->block1, 0, len,
+ (void*)trx_doublewrite->write_buf, NULL);
+
+ if (trx_doublewrite->first_free > TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
+ len = (trx_doublewrite->first_free
+ - TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) * UNIV_PAGE_SIZE;
+
+ fil_io(OS_FILE_WRITE,
+ TRUE, TRX_SYS_SPACE,
+ trx_doublewrite->block2, 0, len,
+ (void*)(trx_doublewrite->write_buf
+ + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE),
+ NULL);
+ }
+
+ /* Now flush the doublewrite buffer data to disk */
+
+ fil_flush(TRX_SYS_SPACE);
+
+ /* We know that the writes have been flushed to disk now
+ and in recovery we will find them in the doublewrite buffer
+ blocks. Next do the writes to the intended positions. */
+
+ for (i = 0; i < trx_doublewrite->first_free; i++) {
+ block = trx_doublewrite->buf_block_arr[i];
+
+ fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
+ FALSE, block->space, block->offset, 0, UNIV_PAGE_SIZE,
+ (void*)block->frame, (void*)block);
+ }
+
+ /* Wake possible simulated aio thread to actually post the
+ writes to the operating system */
+
+ os_aio_simulated_wake_handler_threads();
+
+ /* Wait that all async writes to tablespaces have been posted to
+ the OS */
+
+ os_aio_wait_until_no_pending_writes();
+
+ /* Now we flush the data to disk (for example, with fsync) */
+
+ fil_flush_file_spaces(FIL_TABLESPACE);
+
+ /* We can now reuse the doublewrite memory buffer: */
+
+ trx_doublewrite->first_free = 0;
+
+ mutex_exit(&(trx_doublewrite->mutex));
+}
+
+/************************************************************************
+Posts a buffer page for writing. If the doublewrite memory buffer is
+full, calls buf_flush_buffered_writes and waits for for free space to
+appear. */
+static
+void
+buf_flush_post_to_doublewrite_buf(
+/*==============================*/
+ buf_block_t* block) /* in: buffer block to write */
+{
+try_again:
+ mutex_enter(&(trx_doublewrite->mutex));
+
+ if (trx_doublewrite->first_free
+ >= 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
+ mutex_exit(&(trx_doublewrite->mutex));
+
+ buf_flush_buffered_writes();
+
+ goto try_again;
+ }
+
+ ut_memcpy(trx_doublewrite->write_buf
+ + UNIV_PAGE_SIZE * trx_doublewrite->first_free,
+ block->frame, UNIV_PAGE_SIZE);
+
+ trx_doublewrite->buf_block_arr[trx_doublewrite->first_free] = block;
+
+ trx_doublewrite->first_free++;
+
+ if (trx_doublewrite->first_free
+ >= 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
+ mutex_exit(&(trx_doublewrite->mutex));
+
+ buf_flush_buffered_writes();
+
+ return;
+ }
+
+ mutex_exit(&(trx_doublewrite->mutex));
+}
+
+/************************************************************************
+Does an asynchronous write of a buffer page. NOTE: in simulated aio and
+also when the doublewrite buffer is used, we must call
+buf_flush_buffered_writes after we have posted a batch of writes! */
static
void
buf_flush_write_block_low(
@@ -222,15 +357,24 @@ buf_flush_write_block_low(
mach_write_to_8(block->frame + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN,
block->newest_modification);
+ /* Write to the page the space id and page number */
+
+ mach_write_to_4(block->frame + FIL_PAGE_SPACE, block->space);
+ mach_write_to_4(block->frame + FIL_PAGE_OFFSET, block->offset);
+
/* We overwrite the first 4 bytes of the end lsn field to store
a page checksum */
mach_write_to_4(block->frame + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN,
buf_calc_page_checksum(block->frame));
- fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
- FALSE, block->space, block->offset, 0, UNIV_PAGE_SIZE,
+ if (!trx_doublewrite) {
+ fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
+ FALSE, block->space, block->offset, 0, UNIV_PAGE_SIZE,
(void*)block->frame, (void*)block);
+ } else {
+ buf_flush_post_to_doublewrite_buf(block);
+ }
}
/************************************************************************
@@ -251,14 +395,14 @@ buf_flush_try_page(
buf_block_t* block;
ibool locked;
- ut_ad((flush_type == BUF_FLUSH_LRU) || (flush_type == BUF_FLUSH_LIST)
- || (flush_type == BUF_FLUSH_SINGLE_PAGE));
+ ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST
+ || flush_type == BUF_FLUSH_SINGLE_PAGE);
mutex_enter(&(buf_pool->mutex));
block = buf_page_hash_get(space, offset);
- if ((flush_type == BUF_FLUSH_LIST)
+ if (flush_type == BUF_FLUSH_LIST
&& block && buf_flush_ready_for_flush(block, flush_type)) {
block->io_fix = BUF_IO_WRITE;
@@ -286,7 +430,7 @@ buf_flush_try_page(
mutex_exit(&(buf_pool->mutex));
if (!locked) {
- os_aio_simulated_wake_handler_threads();
+ buf_flush_buffered_writes();
rw_lock_s_lock_gen(&(block->lock), BUF_IO_WRITE);
}
@@ -300,7 +444,7 @@ buf_flush_try_page(
return(1);
- } else if ((flush_type == BUF_FLUSH_LRU) && block
+ } else if (flush_type == BUF_FLUSH_LRU && block
&& buf_flush_ready_for_flush(block, flush_type)) {
/* VERY IMPORTANT:
@@ -328,7 +472,7 @@ buf_flush_try_page(
return(1);
- } else if ((flush_type == BUF_FLUSH_SINGLE_PAGE) && block
+ } else if (flush_type == BUF_FLUSH_SINGLE_PAGE && block
&& buf_flush_ready_for_flush(block, flush_type)) {
block->io_fix = BUF_IO_WRITE;
@@ -387,6 +531,14 @@ buf_flush_try_neighbors(
low = offset;
high = offset + 1;
+ } else if (flush_type == BUF_FLUSH_LIST) {
+ /* Since semaphore waits require us to flush the
+ doublewrite buffer to disk, it is best that the
+ search area is just the page itself, to minimize
+ chances for semaphore waits */
+
+ low = offset;
+ high = offset + 1;
}
/* printf("Flush area: low %lu high %lu\n", low, high); */
@@ -418,13 +570,6 @@ buf_flush_try_neighbors(
mutex_exit(&(buf_pool->mutex));
- /* In simulated aio we wake up the i/o-handler threads now that
- we have posted a batch of writes: */
-
- /* printf("Flush count %lu ; Waking i/o handlers\n", count); */
-
- os_aio_simulated_wake_handler_threads();
-
return(count);
}
@@ -565,13 +710,15 @@ buf_flush_batch(
mutex_exit(&(buf_pool->mutex));
- if (buf_debug_prints && (page_count > 0)) {
+ buf_flush_buffered_writes();
+
+ if (buf_debug_prints && page_count > 0) {
if (flush_type == BUF_FLUSH_LRU) {
- printf("To flush %lu pages in LRU flush\n",
+ printf("Flushed %lu pages in LRU flush\n",
page_count);
} else if (flush_type == BUF_FLUSH_LIST) {
- printf("To flush %lu pages in flush list flush\n",
- page_count, flush_type);
+ printf("Flushed %lu pages in flush list flush\n",
+ page_count);
} else {
ut_error;
}
diff --git a/innobase/buf/buf0rea.c b/innobase/buf/buf0rea.c
index 728bf4404b8..db187cdd896 100644
--- a/innobase/buf/buf0rea.c
+++ b/innobase/buf/buf0rea.c
@@ -49,7 +49,9 @@ ulint
buf_read_page_low(
/*==============*/
/* out: 1 if a read request was queued, 0 if the page
- already resided in buf_pool */
+ already resided in buf_pool or if the page is in
+ the doublewrite buffer blocks in which case it is never
+ read into the pool */
ibool sync, /* in: TRUE if synchronous aio is desired */
ulint mode, /* in: BUF_READ_IBUF_PAGES_ONLY, ...,
ORed to OS_AIO_SIMULATED_WAKE_LATER (see below
@@ -63,6 +65,16 @@ buf_read_page_low(
wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER;
mode = mode & ~OS_AIO_SIMULATED_WAKE_LATER;
+ if (trx_doublewrite && space == TRX_SYS_SPACE
+ && ( (offset >= trx_doublewrite->block1
+ && offset < trx_doublewrite->block1
+ + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE)
+ || (offset >= trx_doublewrite->block2
+ && offset < trx_doublewrite->block2
+ + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE))) {
+ return(0);
+ }
+
#ifdef UNIV_LOG_DEBUG
if (space % 2 == 1) {
/* We are updating a replicate space while holding the
diff --git a/innobase/data/data0data.c b/innobase/data/data0data.c
index fe5611bc312..4172fb9c8ce 100644
--- a/innobase/data/data0data.c
+++ b/innobase/data/data0data.c
@@ -13,7 +13,10 @@ Created 5/30/1994 Heikki Tuuri
#endif
#include "ut0rnd.h"
-
+#include "rem0rec.h"
+#include "page0page.h"
+#include "dict0dict.h"
+#include "btr0cur.h"
byte data_error; /* data pointers of tuple fields are initialized
to point here for error checking */
@@ -378,6 +381,172 @@ dtuple_sprintf(
return(len);
}
+/******************************************************************
+Moves parts of long fields in entry to the big record vector so that
+the size of tuple drops below the maximum record size allowed in the
+database. Moves data only from those fields which are not necessary
+to determine uniquely the insertion place of the tuple in the index. */
+
+big_rec_t*
+dtuple_convert_big_rec(
+/*===================*/
+ /* out, own: created big record vector,
+ NULL if we are not able to shorten
+ the entry enough, i.e., if there are
+ too many short fields in entry */
+ dict_index_t* index, /* in: index */
+ dtuple_t* entry) /* in: index entry */
+{
+ mem_heap_t* heap;
+ big_rec_t* vector;
+ dfield_t* dfield;
+ ulint size;
+ ulint n_fields;
+ ulint longest;
+ ulint longest_i;
+ ulint i;
+
+ size = rec_get_converted_size(entry);
+
+ heap = mem_heap_create(size + dtuple_get_n_fields(entry)
+ * sizeof(big_rec_field_t) + 1000);
+
+ vector = mem_heap_alloc(heap, sizeof(big_rec_t));
+
+ vector->heap = heap;
+ vector->fields = mem_heap_alloc(heap, dtuple_get_n_fields(entry)
+ * sizeof(big_rec_field_t));
+
+ /* Decide which fields to shorten: the algorithm is to look for
+ the longest field which does not occur in the ordering part
+ of any index on the table */
+
+ n_fields = 0;
+
+ while ((rec_get_converted_size(entry)
+ >= page_get_free_space_of_empty() / 2)
+ || rec_get_converted_size(entry) >= REC_MAX_DATA_SIZE) {
+
+ longest = 0;
+ for (i = dict_index_get_n_unique_in_tree(index);
+ i < dtuple_get_n_fields(entry); i++) {
+
+ /* Skip over fields which are ordering in some index */
+
+ if (dict_field_get_col(
+ dict_index_get_nth_field(index, i))
+ ->ord_part == 0) {
+
+ dfield = dtuple_get_nth_field(entry, i);
+
+ if (dfield->len != UNIV_SQL_NULL &&
+ dfield->len > longest) {
+
+ longest = dfield->len;
+
+ longest_i = i;
+ }
+ }
+ }
+
+ if (longest < BTR_EXTERN_FIELD_REF_SIZE + 10) {
+
+ /* Cannot shorten more */
+
+ mem_heap_free(heap);
+
+ return(NULL);
+ }
+
+ /* Move data from field longest_i to big rec vector,
+ but do not let data size of the remaining entry
+ drop below 128 which is the limit for the 2-byte
+ offset storage format in a physical record */
+
+ dfield = dtuple_get_nth_field(entry, longest_i);
+ vector->fields[n_fields].field_no = longest_i;
+
+ if (dtuple_get_data_size(entry) - dfield->len
+ <= REC_1BYTE_OFFS_LIMIT) {
+ vector->fields[n_fields].len =
+ dtuple_get_data_size(entry)
+ - REC_1BYTE_OFFS_LIMIT;
+ /* Since dfield will contain at least
+ a 20-byte reference to the extern storage,
+ we know that the data size of entry will be
+ > REC_1BYTE_OFFS_LIMIT */
+ } else {
+ vector->fields[n_fields].len = dfield->len;
+ }
+
+ vector->fields[n_fields].data = mem_heap_alloc(heap,
+ vector->fields[n_fields].len);
+
+ /* Copy data (from the end of field) to big rec vector */
+
+ ut_memcpy(vector->fields[n_fields].data,
+ ((byte*)dfield->data) + dfield->len
+ - vector->fields[n_fields].len,
+ vector->fields[n_fields].len);
+ dfield->len = dfield->len - vector->fields[n_fields].len
+ + BTR_EXTERN_FIELD_REF_SIZE;
+
+ /* Set the extern field reference in dfield to zero */
+ memset(((byte*)dfield->data)
+ + dfield->len - BTR_EXTERN_FIELD_REF_SIZE,
+ 0, BTR_EXTERN_FIELD_REF_SIZE);
+ n_fields++;
+ }
+
+ vector->n_fields = n_fields;
+ return(vector);
+}
+
+/******************************************************************
+Puts back to entry the data stored in vector. Note that to ensure the
+fields in entry can accommodate the data, vector must have been created
+from entry with dtuple_convert_big_rec. */
+
+void
+dtuple_convert_back_big_rec(
+/*========================*/
+ dict_index_t* index, /* in: index */
+ dtuple_t* entry, /* in: entry whose data was put to vector */
+ big_rec_t* vector) /* in, own: big rec vector; it is
+ freed in this function */
+{
+ dfield_t* dfield;
+ ulint i;
+
+ for (i = 0; i < vector->n_fields; i++) {
+
+ dfield = dtuple_get_nth_field(entry,
+ vector->fields[i].field_no);
+ /* Copy data from big rec vector */
+
+ ut_memcpy(((byte*)dfield->data)
+ + dfield->len - BTR_EXTERN_FIELD_REF_SIZE,
+ vector->fields[i].data,
+ vector->fields[i].len);
+ dfield->len = dfield->len + vector->fields[i].len
+ - BTR_EXTERN_FIELD_REF_SIZE;
+ }
+
+ mem_heap_free(vector->heap);
+}
+
+/******************************************************************
+Frees the memory in a big rec vector. */
+
+void
+dtuple_big_rec_free(
+/*================*/
+ big_rec_t* vector) /* in, own: big rec vector; it is
+ freed in this function */
+{
+ mem_heap_free(vector->heap);
+}
+
#ifdef notdefined
/******************************************************************
diff --git a/innobase/fil/fil0fil.c b/innobase/fil/fil0fil.c
index 6f201c7bce4..5c783627721 100644
--- a/innobase/fil/fil0fil.c
+++ b/innobase/fil/fil0fil.c
@@ -90,6 +90,9 @@ struct fil_node_struct {
is ignored) */
ulint n_pending;
/* count of pending i/o-ops on this file */
+ ibool is_modified; /* this is set to TRUE when we write
+ to the file and FALSE when we call fil_flush
+ for this file space */
UT_LIST_NODE_T(fil_node_t) chain;
/* link field for the file chain */
UT_LIST_NODE_T(fil_node_t) LRU;
@@ -301,6 +304,8 @@ fil_node_create(
node->size = size;
node->magic_n = FIL_NODE_MAGIC_N;
node->n_pending = 0;
+
+ node->is_modified = FALSE;
HASH_SEARCH(hash, system->spaces, id, space, space->id == id);
@@ -721,6 +726,47 @@ fil_space_get_size(
}
/***********************************************************************
+Checks if the pair space, page_no refers to an existing page in a
+tablespace file space. */
+
+ibool
+fil_check_adress_in_tablespace(
+/*===========================*/
+ /* out: TRUE if the address is meaningful */
+ ulint id, /* in: space id */
+ ulint page_no)/* in: page number */
+{
+ fil_space_t* space;
+ fil_system_t* system = fil_system;
+ ulint size;
+ ibool ret;
+
+ ut_ad(system);
+
+ mutex_enter(&(system->mutex));
+
+ HASH_SEARCH(hash, system->spaces, id, space, space->id == id);
+
+ if (space == NULL) {
+ ret = FALSE;
+ } else {
+ size = space->size;
+
+ if (page_no > size) {
+ ret = FALSE;
+ } else if (space->purpose != FIL_TABLESPACE) {
+ ret = FALSE;
+ } else {
+ ret = TRUE;
+ }
+ }
+
+ mutex_exit(&(system->mutex));
+
+ return(ret);
+}
+
+/***********************************************************************
Tries to reserve free extents in a file space. */
ibool
@@ -812,8 +858,14 @@ fil_node_prepare_for_io(
fil_node_close(last_node, system);
}
- node->handle = os_file_create(node->name, OS_FILE_OPEN,
- OS_FILE_AIO, &ret);
+ if (space->purpose == FIL_LOG) {
+ node->handle = os_file_create(node->name, OS_FILE_OPEN,
+ OS_FILE_AIO, OS_LOG_FILE, &ret);
+ } else {
+ node->handle = os_file_create(node->name, OS_FILE_OPEN,
+ OS_FILE_AIO, OS_DATA_FILE, &ret);
+ }
+
ut_a(ret);
node->open = TRUE;
@@ -851,7 +903,8 @@ void
fil_node_complete_io(
/*=================*/
fil_node_t* node, /* in: file node */
- fil_system_t* system) /* in: file system */
+ fil_system_t* system, /* in: file system */
+ ulint type) /* in: OS_FILE_WRITE or ..._READ */
{
ut_ad(node);
ut_ad(system);
@@ -860,6 +913,10 @@ fil_node_complete_io(
node->n_pending--;
+ if (type != OS_FILE_READ) {
+ node->is_modified = TRUE;
+ }
+
if (node->n_pending == 0) {
/* The node must be put back to the LRU list */
UT_LIST_ADD_FIRST(LRU, system->LRU, node);
@@ -1016,7 +1073,7 @@ loop:
mutex_enter(&(system->mutex));
- fil_node_complete_io(node, system);
+ fil_node_complete_io(node, system, type);
mutex_exit(&(system->mutex));
@@ -1090,12 +1147,14 @@ fil_aio_wait(
fil_node_t* fil_node;
fil_system_t* system = fil_system;
void* message;
+ ulint type;
ut_ad(fil_validate());
if (os_aio_use_native_aio) {
#ifdef WIN_ASYNC_IO
- ret = os_aio_windows_handle(segment, 0, &fil_node, &message);
+ ret = os_aio_windows_handle(segment, 0, &fil_node, &message,
+ &type);
#elif defined(POSIX_ASYNC_IO)
ret = os_aio_posix_handle(segment, &fil_node, &message);
#else
@@ -1103,14 +1162,14 @@ fil_aio_wait(
#endif
} else {
ret = os_aio_simulated_handle(segment, (void**) &fil_node,
- &message);
+ &message, &type);
}
ut_a(ret);
mutex_enter(&(system->mutex));
- fil_node_complete_io(fil_node, fil_system);
+ fil_node_complete_io(fil_node, fil_system, type);
mutex_exit(&(system->mutex));
@@ -1149,8 +1208,10 @@ fil_flush(
node = UT_LIST_GET_FIRST(space->chain);
while (node) {
- if (node->open) {
+ if (node->open && node->is_modified) {
file = node->handle;
+
+ node->is_modified = FALSE;
mutex_exit(&(system->mutex));
@@ -1159,9 +1220,11 @@ fil_flush(
handle is still open: we assume that the OS
will not crash or trap even if we pass a handle
to a closed file below in os_file_flush! */
+
+ /* printf("Flushing to file %s\n", node->name); */
os_file_flush(file);
-
+
mutex_enter(&(system->mutex));
}
diff --git a/innobase/fsp/fsp0fsp.c b/innobase/fsp/fsp0fsp.c
index 101fb5f3ba0..ccc13f15fde 100644
--- a/innobase/fsp/fsp0fsp.c
+++ b/innobase/fsp/fsp0fsp.c
@@ -3239,8 +3239,8 @@ fsp_validate(
ut_a(descr_count * FSP_EXTENT_SIZE == free_limit);
ut_a(n_used + n_full_frag_pages
- == n_used2 + (free_limit + XDES_DESCRIBED_PER_PAGE - 1)
- / XDES_DESCRIBED_PER_PAGE
+ == n_used2 + 2* ((free_limit + XDES_DESCRIBED_PER_PAGE - 1)
+ / XDES_DESCRIBED_PER_PAGE)
+ seg_inode_len_full + seg_inode_len_free);
ut_a(frag_n_used == n_used);
diff --git a/innobase/ibuf/ibuf0ibuf.c b/innobase/ibuf/ibuf0ibuf.c
index 171c6169927..3db20fb13ee 100644
--- a/innobase/ibuf/ibuf0ibuf.c
+++ b/innobase/ibuf/ibuf0ibuf.c
@@ -1946,6 +1946,7 @@ ibuf_insert_low(
ulint page_no,/* in: page number where to insert */
que_thr_t* thr) /* in: query thread */
{
+ big_rec_t* dummy_big_rec;
ulint entry_size;
btr_pcur_t pcur;
btr_cur_t* cursor;
@@ -2101,7 +2102,8 @@ ibuf_insert_low(
if (mode == BTR_MODIFY_PREV) {
err = btr_cur_optimistic_insert(BTR_NO_LOCKING_FLAG, cursor,
- ibuf_entry, &ins_rec, thr,
+ ibuf_entry, &ins_rec,
+ &dummy_big_rec, thr,
&mtr);
if (err == DB_SUCCESS) {
/* Update the page max trx id field */
@@ -2121,7 +2123,8 @@ ibuf_insert_low(
err = btr_cur_pessimistic_insert(BTR_NO_LOCKING_FLAG
| BTR_NO_UNDO_LOG_FLAG,
cursor,
- ibuf_entry, &ins_rec, thr,
+ ibuf_entry, &ins_rec,
+ &dummy_big_rec, thr,
&mtr);
if (err == DB_SUCCESS) {
/* Update the page max trx id field */
diff --git a/innobase/include/btr0btr.h b/innobase/include/btr0btr.h
index f8a3000ca8a..bea85565125 100644
--- a/innobase/include/btr0btr.h
+++ b/innobase/include/btr0btr.h
@@ -357,6 +357,44 @@ btr_get_size(
/* out: number of pages */
dict_index_t* index, /* in: index */
ulint flag); /* in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
+/******************************************************************
+Allocates a new file page to be used in an index tree. NOTE: we assume
+that the caller has made the reservation for free extents! */
+
+page_t*
+btr_page_alloc(
+/*===========*/
+ /* out: new allocated page, x-latched;
+ NULL if out of space */
+ dict_tree_t* tree, /* in: index tree */
+ ulint hint_page_no, /* in: hint of a good page */
+ byte file_direction, /* in: direction where a possible
+ page split is made */
+ ulint level, /* in: level where the page is placed
+ in the tree */
+ mtr_t* mtr); /* in: mtr */
+/******************************************************************
+Frees a file page used in an index tree. NOTE: cannot free field external
+storage pages because the page must contain info on its level. */
+
+void
+btr_page_free(
+/*==========*/
+ dict_tree_t* tree, /* in: index tree */
+ page_t* page, /* in: page to be freed, x-latched */
+ mtr_t* mtr); /* in: mtr */
+/******************************************************************
+Frees a file page used in an index tree. Can be used also to BLOB
+external storage pages, because the page level 0 can be given as an
+argument. */
+
+void
+btr_page_free_low(
+/*==============*/
+ dict_tree_t* tree, /* in: index tree */
+ page_t* page, /* in: page to be freed, x-latched */
+ ulint level, /* in: page level */
+ mtr_t* mtr); /* in: mtr */
/*****************************************************************
Prints size info of a B-tree. */
diff --git a/innobase/include/btr0cur.h b/innobase/include/btr0cur.h
index 4ce2177bfe8..ffae434a5d9 100644
--- a/innobase/include/btr0cur.h
+++ b/innobase/include/btr0cur.h
@@ -151,11 +151,14 @@ btr_cur_optimistic_insert(
ulint flags, /* in: undo logging and locking flags: if not
zero, the parameters index and thr should be
specified */
- btr_cur_t* cursor, /* in: cursor on page after which
- to insert; cursor stays valid */
+ btr_cur_t* cursor, /* in: cursor on page after which to insert;
+ cursor stays valid */
dtuple_t* entry, /* in: entry to insert */
rec_t** rec, /* out: pointer to inserted record if
succeed */
+ big_rec_t** big_rec,/* out: big rec vector whose fields have to
+ be stored externally by the caller, or
+ NULL */
que_thr_t* thr, /* in: query thread or NULL */
mtr_t* mtr); /* in: mtr */
/*****************************************************************
@@ -169,13 +172,19 @@ btr_cur_pessimistic_insert(
/*=======================*/
/* out: DB_SUCCESS or error number */
ulint flags, /* in: undo logging and locking flags: if not
- zero, the parameters index and thr should be
- specified */
+ zero, the parameter thr should be
+ specified; if no undo logging is specified,
+ then the caller must have reserved enough
+ free extents in the file space so that the
+ insertion will certainly succeed */
btr_cur_t* cursor, /* in: cursor after which to insert;
- cursor does not stay valid */
+ cursor stays valid */
dtuple_t* entry, /* in: entry to insert */
rec_t** rec, /* out: pointer to inserted record if
succeed */
+ big_rec_t** big_rec,/* out: big rec vector whose fields have to
+ be stored externally by the caller, or
+ NULL */
que_thr_t* thr, /* in: query thread or NULL */
mtr_t* mtr); /* in: mtr */
/*****************************************************************
@@ -228,8 +237,9 @@ btr_cur_pessimistic_update(
/* out: DB_SUCCESS or error code */
ulint flags, /* in: undo logging, locking, and rollback
flags */
- btr_cur_t* cursor, /* in: cursor on the record to update;
- cursor does not stay valid */
+ btr_cur_t* cursor, /* in: cursor on the record to update */
+ big_rec_t** big_rec,/* out: big rec vector whose fields have to
+ be stored externally by the caller, or NULL */
upd_t* update, /* in: update vector; this is allowed also
contain trx id and roll ptr fields, but
the values in update vector have no effect */
@@ -407,6 +417,92 @@ btr_estimate_number_of_different_key_vals(
/*======================================*/
/* out: estimated number of key values */
dict_index_t* index); /* in: index */
+/***********************************************************************
+Stores the fields in big_rec_vec to the tablespace and puts pointers to
+them in rec. The fields are stored on pages allocated from leaf node
+file segment of the index tree. */
+
+ulint
+btr_store_big_rec_extern_fields(
+/*============================*/
+ /* out: DB_SUCCESS or error */
+ dict_index_t* index, /* in: index of rec; the index tree
+ MUST be X-latched */
+ rec_t* rec, /* in: record */
+ big_rec_t* big_rec_vec, /* in: vector containing fields
+ to be stored externally */
+ mtr_t* local_mtr); /* in: mtr containing the latch to
+ rec and to the tree */
+/***********************************************************************
+Frees the space in an externally stored field to the file space
+management. */
+
+void
+btr_free_externally_stored_field(
+/*=============================*/
+ dict_index_t* index, /* in: index of the data, the index
+ tree MUST be X-latched */
+ byte* data, /* in: internally stored data
+ + reference to the externally
+ stored part */
+ ulint local_len, /* in: length of data */
+ mtr_t* local_mtr); /* in: mtr containing the latch to
+ data an an X-latch to the index
+ tree */
+/***************************************************************
+Frees the externally stored fields for a record. */
+
+void
+btr_rec_free_externally_stored_fields(
+/*==================================*/
+ dict_index_t* index, /* in: index of the data, the index
+ tree MUST be X-latched */
+ rec_t* rec, /* in: record */
+ mtr_t* mtr); /* in: mini-transaction handle which contains
+ an X-latch to record page and to the index
+ tree */
+/***********************************************************************
+Copies an externally stored field of a record to mem heap. */
+
+byte*
+btr_rec_copy_externally_stored_field(
+/*=================================*/
+ /* out: the field copied to heap */
+ rec_t* rec, /* in: record */
+ ulint no, /* in: field number */
+ ulint* len, /* out: length of the field */
+ mem_heap_t* heap); /* in: mem heap */
+/***********************************************************************
+Copies an externally stored field of a record to mem heap. Parameter
+data contains a pointer to 'internally' stored part of the field:
+possibly some data, and the reference to the externally stored part in
+the last 20 bytes of data. */
+
+byte*
+btr_copy_externally_stored_field(
+/*=============================*/
+ /* out: the whole field copied to heap */
+ ulint* len, /* out: length of the whole field */
+ byte* data, /* in: 'internally' stored part of the
+ field containing also the reference to
+ the external part */
+ ulint local_len,/* in: length of data */
+ mem_heap_t* heap); /* in: mem heap */
+/***********************************************************************
+Stores the positions of the fields marked as extern storage in the update
+vector, and also those fields who are marked as extern storage in rec
+and not mentioned in updated fields. We use this function to remember
+which fields we must mark as extern storage in a record inserted for an
+update. */
+
+ulint
+btr_push_update_extern_fields(
+/*==========================*/
+ /* out: number of values stored in ext_vect */
+ ulint* ext_vect, /* in: array of ulints, must be preallocated
+ to have place for all fields in rec */
+ rec_t* rec, /* in: record */
+ upd_t* update); /* in: update vector */
/*######################################################################*/
@@ -516,6 +612,19 @@ and sleep this many microseconds in between */
#define BTR_CUR_RETRY_DELETE_N_TIMES 100
#define BTR_CUR_RETRY_SLEEP_TIME 50000
+/* The reference in a field of which data is stored on a different page */
+/*--------------------------------------*/
+#define BTR_EXTERN_SPACE_ID 0 /* space id where stored */
+#define BTR_EXTERN_PAGE_NO 4 /* page no where stored */
+#define BTR_EXTERN_OFFSET 8 /* offset of BLOB header
+ on that page */
+#define BTR_EXTERN_LEN 12 /* 8 bytes containing the
+ length of the externally
+ stored part of the BLOB */
+/*--------------------------------------*/
+#define BTR_EXTERN_FIELD_REF_SIZE 20
+
+
extern ulint btr_cur_n_non_sea;
#ifndef UNIV_NONINL
diff --git a/innobase/include/buf0buf.h b/innobase/include/buf0buf.h
index 7f3e20a4505..8b22561adf8 100644
--- a/innobase/include/buf0buf.h
+++ b/innobase/include/buf0buf.h
@@ -378,6 +378,14 @@ buf_calc_page_checksum(
/*===================*/
/* out: checksum */
byte* page); /* in: buffer page */
+/************************************************************************
+Checks if a page is corrupt. */
+
+ibool
+buf_page_is_corrupted(
+/*==================*/
+ /* out: TRUE if corrupted */
+ byte* read_buf); /* in: a database page */
/**************************************************************************
Gets the page number of a pointer pointing within a buffer frame containing
a file page. */
diff --git a/innobase/include/buf0flu.h b/innobase/include/buf0flu.h
index 9317950904f..cb1c0965a65 100644
--- a/innobase/include/buf0flu.h
+++ b/innobase/include/buf0flu.h
@@ -101,7 +101,7 @@ make sure that a read-ahead batch can be read efficiently in a single
sweep). */
#define BUF_FLUSH_FREE_BLOCK_MARGIN (5 + BUF_READ_AHEAD_AREA)
-#define BUF_FLUSH_EXTRA_MARGIN (BUF_FLUSH_FREE_BLOCK_MARGIN / 4)
+#define BUF_FLUSH_EXTRA_MARGIN (BUF_FLUSH_FREE_BLOCK_MARGIN / 4 + 100)
#ifndef UNIV_NONINL
#include "buf0flu.ic"
diff --git a/innobase/include/data0data.h b/innobase/include/data0data.h
index d7f0986b0b6..f695e0989a5 100644
--- a/innobase/include/data0data.h
+++ b/innobase/include/data0data.h
@@ -14,6 +14,9 @@ Created 5/30/1994 Heikki Tuuri
#include "data0types.h"
#include "data0type.h"
#include "mem0mem.h"
+#include "dict0types.h"
+
+typedef struct big_rec_struct big_rec_t;
/* Some non-inlined functions used in the MySQL interface: */
void
@@ -312,6 +315,41 @@ dtuple_sprintf(
char* buf, /* in: print buffer */
ulint buf_len,/* in: buf length in bytes */
dtuple_t* tuple); /* in: tuple */
+/******************************************************************
+Moves parts of long fields in entry to the big record vector so that
+the size of tuple drops below the maximum record size allowed in the
+database. Moves data only from those fields which are not necessary
+to determine uniquely the insertion place of the tuple in the index. */
+
+big_rec_t*
+dtuple_convert_big_rec(
+/*===================*/
+ /* out, own: created big record vector,
+ NULL if we are not able to shorten
+ the entry enough, i.e., if there are
+ too many short fields in entry */
+ dict_index_t* index, /* in: index */
+ dtuple_t* entry); /* in: index entry */
+/******************************************************************
+Puts back to entry the data stored in vector. Note that to ensure the
+fields in entry can accommodate the data, vector must have been created
+from entry with dtuple_convert_big_rec. */
+
+void
+dtuple_convert_back_big_rec(
+/*========================*/
+ dict_index_t* index, /* in: index */
+ dtuple_t* entry, /* in: entry whose data was put to vector */
+ big_rec_t* vector);/* in, own: big rec vector; it is
+ freed in this function */
+/******************************************************************
+Frees the memory in a big rec vector. */
+
+void
+dtuple_big_rec_free(
+/*================*/
+ big_rec_t* vector); /* in, own: big rec vector; it is
+ freed in this function */
/***************************************************************
Generates a random tuple. */
@@ -396,7 +434,7 @@ dtuple_gen_search_tuple_TPC_C(
/* Structure for an SQL data field */
struct dfield_struct{
void* data; /* pointer to data */
- ulint len; /* data length; UNIV_SQL_NULL if SQL null */
+ ulint len; /* data length; UNIV_SQL_NULL if SQL null; */
dtype_t type; /* type of data */
ulint col_no; /* when building index entries, the column
number can be stored here */
@@ -423,6 +461,24 @@ struct dtuple_struct {
};
#define DATA_TUPLE_MAGIC_N 65478679
+/* A slot for a field in a big rec vector */
+
+typedef struct big_rec_field_struct big_rec_field_t;
+struct big_rec_field_struct {
+ ulint field_no; /* field number in record */
+ ulint len; /* stored data len */
+ byte* data; /* stored data */
+};
+
+/* Storage format for overflow data in a big record, that is, a record
+which needs external storage of data fields */
+
+struct big_rec_struct {
+ mem_heap_t* heap; /* memory heap from which allocated */
+ ulint n_fields; /* number of stored fields */
+ big_rec_field_t* fields; /* stored fields */
+};
+
#ifndef UNIV_NONINL
#include "data0data.ic"
#endif
diff --git a/innobase/include/data0data.ic b/innobase/include/data0data.ic
index 27b5552d338..b886ad6c69c 100644
--- a/innobase/include/data0data.ic
+++ b/innobase/include/data0data.ic
@@ -307,12 +307,13 @@ dtuple_create(
/**************************************************************
The following function returns the sum of data lengths of a tuple. The space
-occupied by the field structs or the tuple struct is not counted. */
+occupied by the field structs or the tuple struct is not counted. Neither
+is possible space in externally stored parts of the field. */
UNIV_INLINE
ulint
dtuple_get_data_size(
/*=================*/
- /* out: sum of data lens */
+ /* out: sum of data lengths */
dtuple_t* tuple) /* in: typed data tuple */
{
dfield_t* field;
@@ -382,7 +383,7 @@ dtuple_datas_are_equal(
field2 = dtuple_get_nth_field(tuple2, i);
data2 = (byte*) dfield_get_data(field2);
- len2 = dfield_get_len(field2);
+ len2 = dfield_get_len(field2);
if (len1 != len2) {
diff --git a/innobase/include/dict0mem.h b/innobase/include/dict0mem.h
index be9cd42b7be..74ecbc8bba2 100644
--- a/innobase/include/dict0mem.h
+++ b/innobase/include/dict0mem.h
@@ -143,7 +143,7 @@ struct dict_col_struct{
ulint clust_pos;/* position of the column in the
clustered index */
ulint ord_part;/* count of how many times this column
- appears in an ordering fields of an index */
+ appears in ordering fields of an index */
char* name; /* name */
dtype_t type; /* data type */
dict_table_t* table; /* back pointer to table of this column */
diff --git a/innobase/include/fil0fil.h b/innobase/include/fil0fil.h
index 9905b5a2c3c..bfc322270fc 100644
--- a/innobase/include/fil0fil.h
+++ b/innobase/include/fil0fil.h
@@ -196,6 +196,16 @@ fil_space_get_size(
/* out: space size */
ulint id); /* in: space id */
/***********************************************************************
+Checks if the pair space, page_no refers to an existing page in a
+tablespace file space. */
+
+ibool
+fil_check_adress_in_tablespace(
+/*===========================*/
+ /* out: TRUE if the address is meaningful */
+ ulint id, /* in: space id */
+ ulint page_no);/* in: page number */
+/***********************************************************************
Appends a new file to the chain of files of a space.
File must be closed. */
diff --git a/innobase/include/fsp0fsp.h b/innobase/include/fsp0fsp.h
index f1be4de4d40..e7f9eab330b 100644
--- a/innobase/include/fsp0fsp.h
+++ b/innobase/include/fsp0fsp.h
@@ -70,7 +70,7 @@ page_t*
fseg_create(
/*========*/
/* out: the page where the segment header is placed,
- x-latched, FIL_NULL if could not create segment
+ x-latched, NULL if could not create segment
because of lack of space */
ulint space, /* in: space id */
ulint page, /* in: page where the segment header is placed: if
diff --git a/innobase/include/mach0data.ic b/innobase/include/mach0data.ic
index 176f3415281..1d6badd035b 100644
--- a/innobase/include/mach0data.ic
+++ b/innobase/include/mach0data.ic
@@ -115,7 +115,7 @@ mach_write_to_4(
{
ut_ad(b);
-#if notdefined && !defined(__STDC__) && defined(UNIV_INTEL) && (UNIV_WORD_SIZE == 4) && defined(UNIV_VISUALC)
+#if (0 == 1) && !defined(__STDC__) && defined(UNIV_INTEL) && (UNIV_WORD_SIZE == 4) && defined(UNIV_VISUALC)
/* We do not use this even on Intel, because unaligned accesses may
be slow */
@@ -143,7 +143,7 @@ mach_read_from_4(
/* out: ulint integer */
byte* b) /* in: pointer to four bytes */
{
-#if notdefined && !defined(__STDC__) && defined(UNIV_INTEL) && (UNIV_WORD_SIZE == 4) && defined(UNIV_VISUALC)
+#if (0 == 1) && !defined(__STDC__) && defined(UNIV_INTEL) && (UNIV_WORD_SIZE == 4) && defined(UNIV_VISUALC)
/* We do not use this even on Intel, because unaligned accesses may
be slow */
diff --git a/innobase/include/os0file.h b/innobase/include/os0file.h
index c093cb92ca9..75bbbba549f 100644
--- a/innobase/include/os0file.h
+++ b/innobase/include/os0file.h
@@ -59,6 +59,10 @@ log. */
#define OS_FILE_AIO 61
#define OS_FILE_NORMAL 62
+/* Types for file create */
+#define OS_DATA_FILE 100
+#define OS_LOG_FILE 101
+
/* Error codes from os_file_get_last_error */
#define OS_FILE_NOT_FOUND 71
#define OS_FILE_DISK_FULL 72
@@ -125,6 +129,7 @@ os_file_create(
if a new file is created or an old overwritten */
ulint purpose,/* in: OS_FILE_AIO, if asynchronous, non-buffered i/o
is desired, OS_FILE_NORMAL, if any normal file */
+ ulint type, /* in: OS_DATA_FILE or OS_LOG_FILE */
ibool* success);/* out: TRUE if succeed, FALSE if error */
/***************************************************************************
Closes a file handle. In case of error, error number can be retrieved with
@@ -263,6 +268,13 @@ os_aio(
operation); if mode is OS_AIO_SYNC, these
are ignored */
void* message2);
+/****************************************************************************
+Waits until there are no pending writes in os_aio_write_array. There can
+be other, synchronous, pending writes. */
+
+void
+os_aio_wait_until_no_pending_writes(void);
+/*=====================================*/
/**************************************************************************
Wakes up simulated aio i/o-handler threads if they have something to do. */
@@ -298,7 +310,8 @@ os_aio_windows_handle(
the aio operation failed, these output
parameters are valid and can be used to
restart the operation, for example */
- void** message2);
+ void** message2,
+ ulint* type); /* out: OS_FILE_WRITE or ..._READ */
#endif
#ifdef POSIX_ASYNC_IO
/**************************************************************************
@@ -335,7 +348,8 @@ os_aio_simulated_handle(
the aio operation failed, these output
parameters are valid and can be used to
restart the operation, for example */
- void** message2);
+ void** message2,
+ ulint* type); /* out: OS_FILE_WRITE or ..._READ */
/**************************************************************************
Validates the consistency of the aio system. */
diff --git a/innobase/include/rem0cmp.h b/innobase/include/rem0cmp.h
index 77b9ef9edc8..10c428cb9ca 100644
--- a/innobase/include/rem0cmp.h
+++ b/innobase/include/rem0cmp.h
@@ -1,7 +1,7 @@
/***********************************************************************
Comparison services for records
-(c) 1994-1996 Innobase Oy
+(c) 1994-2001 Innobase Oy
Created 7/1/1994 Heikki Tuuri
************************************************************************/
@@ -31,14 +31,18 @@ This function is used to compare a data tuple to a physical record.
Only dtuple->n_fields_cmp first fields are taken into account for
the the data tuple! If we denote by n = n_fields_cmp, then rec must
have either m >= n fields, or it must differ from dtuple in some of
-the m fields rec has. */
+the m fields rec has. If rec has an externally stored field we do not
+compare it but return with value 0 if such a comparison should be
+made. */
int
cmp_dtuple_rec_with_match(
/*======================*/
/* out: 1, 0, -1, if dtuple is greater, equal,
less than rec, respectively, when only the
- common first fields are compared */
+ common first fields are compared, or
+ until the first externally stored field in
+ rec */
dtuple_t* dtuple, /* in: data tuple */
rec_t* rec, /* in: physical record which differs from
dtuple in some of the common fields, or which
@@ -89,7 +93,8 @@ cmp_dtuple_rec_prefix_equal(
fields in dtuple */
/*****************************************************************
This function is used to compare two physical records. Only the common
-first fields are compared. */
+first fields are compared, and if an externally stored field is
+encountered, then 0 is returned. */
int
cmp_rec_rec_with_match(
diff --git a/innobase/include/rem0rec.h b/innobase/include/rem0rec.h
index 62c0aa14519..12e3a8b39d6 100644
--- a/innobase/include/rem0rec.h
+++ b/innobase/include/rem0rec.h
@@ -12,6 +12,7 @@ Created 5/30/1994 Heikki Tuuri
#include "univ.i"
#include "data0data.h"
#include "rem0types.h"
+#include "mtr0types.h"
/* Maximum values for various fields (for non-blob tuples) */
#define REC_MAX_N_FIELDS (1024 - 1)
@@ -162,6 +163,49 @@ rec_get_nth_field_size(
/* out: field size in bytes */
rec_t* rec, /* in: record */
ulint n); /* in: index of the field */
+/***************************************************************
+Gets the value of the ith field extern storage bit. If it is TRUE
+it means that the field is stored on another page. */
+UNIV_INLINE
+ibool
+rec_get_nth_field_extern_bit(
+/*=========================*/
+ /* in: TRUE or FALSE */
+ rec_t* rec, /* in: record */
+ ulint i); /* in: ith field */
+/**********************************************************
+Returns TRUE if the extern bit is set in any of the fields
+of rec. */
+UNIV_INLINE
+ibool
+rec_contains_externally_stored_field(
+/*=================================*/
+ /* out: TRUE if a field is stored externally */
+ rec_t* rec); /* in: record */
+/***************************************************************
+Sets the value of the ith field extern storage bit. */
+
+void
+rec_set_nth_field_extern_bit(
+/*=========================*/
+ rec_t* rec, /* in: record */
+ ulint i, /* in: ith field */
+ ibool val, /* in: value to set */
+ mtr_t* mtr); /* in: mtr holding an X-latch to the page where
+ rec is, or NULL; in the NULL case we do not
+ write to log about the change */
+/***************************************************************
+Sets TRUE the extern storage bits of fields mentioned in an array. */
+
+void
+rec_set_field_extern_bits(
+/*======================*/
+ rec_t* rec, /* in: record */
+ ulint* vec, /* in: array of field numbers */
+ ulint n_fields, /* in: number of fields numbers */
+ mtr_t* mtr); /* in: mtr holding an X-latch to the page
+ where rec is, or NULL; in the NULL case we
+ do not write to log about the change */
/****************************************************************
The following function is used to get a copy of the nth
data field in the record to a buffer. */
@@ -350,6 +394,15 @@ rec_sprintf(
#define REC_INFO_BITS 6 /* This is single byte bit-field */
+/* Maximum lengths for the data in a physical record if the offsets
+are given in one byte (resp. two byte) format. */
+#define REC_1BYTE_OFFS_LIMIT 0x7F
+#define REC_2BYTE_OFFS_LIMIT 0x7FFF
+
+/* The data size of record must be smaller than this because we reserve
+two upmost bits in a two byte offset for special purposes */
+#define REC_MAX_DATA_SIZE (16 * 1024)
+
#ifndef UNIV_NONINL
#include "rem0rec.ic"
#endif
diff --git a/innobase/include/rem0rec.ic b/innobase/include/rem0rec.ic
index c63b25374dd..1e9ecb47e2e 100644
--- a/innobase/include/rem0rec.ic
+++ b/innobase/include/rem0rec.ic
@@ -25,12 +25,6 @@ significant bytes and bits are written below less significant.
4 bits info bits
*/
-
-/* Maximum lengths for the data in a physical record if the offsets
-are given as one byte (resp. two byte) format. */
-#define REC_1BYTE_OFFS_LIMIT 0x7F
-#define REC_2BYTE_OFFS_LIMIT 0x7FFF
-
/* We list the byte offsets from the origin of the record, the mask,
and the shift needed to obtain each bit-field of the record. */
@@ -66,6 +60,11 @@ one-byte and two-byte offsets */
#define REC_1BYTE_SQL_NULL_MASK 0x80
#define REC_2BYTE_SQL_NULL_MASK 0x8000
+/* In a 2-byte offset the second most significant bit denotes
+a field stored to another page: */
+
+#define REC_2BYTE_EXTERN_MASK 0x4000
+
/***************************************************************
Sets the value of the ith field SQL null bit. */
@@ -489,7 +488,7 @@ ulint
rec_2_get_field_end_info(
/*=====================*/
/* out: offset of the start of the field, SQL null
- flag ORed */
+ flag and extern storage flag ORed */
rec_t* rec, /* in: record */
ulint n) /* in: field index */
{
@@ -499,6 +498,63 @@ rec_2_get_field_end_info(
return(mach_read_from_2(rec - (REC_N_EXTRA_BYTES + 2 * n + 2)));
}
+/***************************************************************
+Gets the value of the ith field extern storage bit. If it is TRUE
+it means that the field is stored on another page. */
+UNIV_INLINE
+ibool
+rec_get_nth_field_extern_bit(
+/*=========================*/
+ /* in: TRUE or FALSE */
+ rec_t* rec, /* in: record */
+ ulint i) /* in: ith field */
+{
+ ulint info;
+
+ if (rec_get_1byte_offs_flag(rec)) {
+
+ return(FALSE);
+ }
+
+ info = rec_2_get_field_end_info(rec, i);
+
+ if (info & REC_2BYTE_EXTERN_MASK) {
+ return(TRUE);
+ }
+
+ return(FALSE);
+}
+
+/**********************************************************
+Returns TRUE if the extern bit is set in any of the fields
+of rec. */
+UNIV_INLINE
+ibool
+rec_contains_externally_stored_field(
+/*=================================*/
+ /* out: TRUE if a field is stored externally */
+ rec_t* rec) /* in: record */
+{
+ ulint n;
+ ulint i;
+
+ if (rec_get_1byte_offs_flag(rec)) {
+
+ return(FALSE);
+ }
+
+ n = rec_get_n_fields(rec);
+
+ for (i = 0; i < n; i++) {
+ if (rec_get_nth_field_extern_bit(rec, i)) {
+
+ return(TRUE);
+ }
+ }
+
+ return(FALSE);
+}
+
/**********************************************************
Returns the offset of n - 1th field end if the record is stored in the 1-byte
offsets form. If the field is SQL null, the flag is ORed in the returned
@@ -616,7 +672,7 @@ rec_2_get_field_start_offs(
}
return(rec_2_get_prev_field_end_info(rec, n)
- & ~REC_2BYTE_SQL_NULL_MASK);
+ & ~(REC_2BYTE_SQL_NULL_MASK | REC_2BYTE_EXTERN_MASK));
}
/**********************************************************
diff --git a/innobase/include/row0ins.h b/innobase/include/row0ins.h
index 94b0e8dec37..612b9e8d73a 100644
--- a/innobase/include/row0ins.h
+++ b/innobase/include/row0ins.h
@@ -56,6 +56,9 @@ row_ins_index_entry_low(
pessimistic descent down the index tree */
dict_index_t* index, /* in: index */
dtuple_t* entry, /* in: index entry to insert */
+ ulint* ext_vec,/* in: array containing field numbers of
+ externally stored fields in entry, or NULL */
+ ulint n_ext_vec,/* in: number of fields in ext_vec */
que_thr_t* thr); /* in: query thread */
/*******************************************************************
Inserts an index entry to index. Tries first optimistic, then pessimistic
@@ -70,6 +73,9 @@ row_ins_index_entry(
DB_DUPLICATE_KEY, or some other error code */
dict_index_t* index, /* in: index */
dtuple_t* entry, /* in: index entry to insert */
+ ulint* ext_vec,/* in: array containing field numbers of
+ externally stored fields in entry, or NULL */
+ ulint n_ext_vec,/* in: number of fields in ext_vec */
que_thr_t* thr); /* in: query thread */
/***************************************************************
Inserts a row to a table. */
diff --git a/innobase/include/row0mysql.h b/innobase/include/row0mysql.h
index 554da2c035c..31f9e15cddc 100644
--- a/innobase/include/row0mysql.h
+++ b/innobase/include/row0mysql.h
@@ -189,7 +189,9 @@ row_update_for_mysql(
row_prebuilt_t* prebuilt); /* in: prebuilt struct in MySQL
handle */
/*************************************************************************
-Does a table creation operation for MySQL. */
+Does a table creation operation for MySQL. If the name of the created
+table ends to characters INNODB_MONITOR, then this also starts
+printing of monitor output by the master thread. */
int
row_create_table_for_mysql(
@@ -209,7 +211,9 @@ row_create_index_for_mysql(
dict_index_t* index, /* in: index defintion */
trx_t* trx); /* in: transaction handle */
/*************************************************************************
-Drops a table for MySQL. */
+Drops a table for MySQL. If the name of the dropped table ends to
+characters INNODB_MONITOR, then this also stops printing of monitor
+output by the master thread. */
int
row_drop_table_for_mysql(
diff --git a/innobase/include/row0row.h b/innobase/include/row0row.h
index fb1e1b01ee3..09a79e19fd7 100644
--- a/innobase/include/row0row.h
+++ b/innobase/include/row0row.h
@@ -250,6 +250,7 @@ row_search_index_entry(
#define ROW_COPY_DATA 1
#define ROW_COPY_POINTERS 2
+#define ROW_COPY_ALSO_EXTERNALS 3
/* The allowed latching order of index records is the following:
(1) a secondary index record ->
diff --git a/innobase/include/row0upd.h b/innobase/include/row0upd.h
index 3046345f446..9bb73726b29 100644
--- a/innobase/include/row0upd.h
+++ b/innobase/include/row0upd.h
@@ -147,6 +147,9 @@ row_upd_build_difference(
fields, excluding roll ptr and trx id */
dict_index_t* index, /* in: clustered index */
dtuple_t* entry, /* in: entry to insert */
+ ulint* ext_vec,/* in: array containing field numbers of
+ externally stored fields in entry, or NULL */
+ ulint n_ext_vec,/* in: number of fields in ext_vec */
rec_t* rec, /* in: clustered index record */
mem_heap_t* heap); /* in: memory heap from which allocated */
/***************************************************************
@@ -262,6 +265,9 @@ struct upd_field_struct{
constants in the symbol table of the
query graph */
dfield_t new_val; /* new value for the column */
+ ibool extern_storage; /* this is set to TRUE if dfield
+ actually contains a reference to
+ an externally stored field */
};
/* Update vector structure */
@@ -318,6 +324,10 @@ struct upd_node_struct{
dtuple_t* row; /* NULL, or a copy (also fields copied to
heap) of the row to update; this must be reset
to NULL after a successful update */
+ ulint* ext_vec;/* array describing which fields are stored
+ externally in the clustered index record of
+ row */
+ ulint n_ext_vec;/* number of fields in ext_vec */
mem_heap_t* heap; /* memory heap used as auxiliary storage for
row; this must be emptied after a successful
update if node->row != NULL */
@@ -349,7 +359,7 @@ struct upd_node_struct{
looked at and updated if an ordering
field changed */
-/* Compilation info flags: these must fit within one byte */
+/* Compilation info flags: these must fit within 3 bits; see trx0rec.h */
#define UPD_NODE_NO_ORD_CHANGE 1 /* no secondary index record will be
changed in the update and no ordering
field of the clustered index */
diff --git a/innobase/include/row0upd.ic b/innobase/include/row0upd.ic
index b1b10bef0e8..b785e52caa0 100644
--- a/innobase/include/row0upd.ic
+++ b/innobase/include/row0upd.ic
@@ -23,6 +23,7 @@ upd_create(
mem_heap_t* heap) /* in: heap from which memory allocated */
{
upd_t* update;
+ ulint i;
update = mem_heap_alloc(heap, sizeof(upd_t));
@@ -30,6 +31,10 @@ upd_create(
update->n_fields = n;
update->fields = mem_heap_alloc(heap, sizeof(upd_field_t) * n);
+ for (i = 0; i < n; i++) {
+ update->fields[i].extern_storage = 0;
+ }
+
return(update);
}
diff --git a/innobase/include/srv0srv.h b/innobase/include/srv0srv.h
index f80abda19c6..e635964e5ec 100644
--- a/innobase/include/srv0srv.h
+++ b/innobase/include/srv0srv.h
@@ -27,6 +27,9 @@ extern char** srv_data_file_names;
extern ulint* srv_data_file_sizes;
extern ulint* srv_data_file_is_raw_partition;
+#define SRV_NEW_RAW 1
+#define SRV_OLD_RAW 2
+
extern char** srv_log_group_home_dirs;
extern ulint srv_n_log_groups;
@@ -52,10 +55,14 @@ extern ulint srv_lock_wait_timeout;
extern char* srv_unix_file_flush_method_str;
extern ulint srv_unix_file_flush_method;
+extern ibool srv_use_doublewrite_buf;
+
extern ibool srv_set_thread_priorities;
extern int srv_query_thread_priority;
/*-------------------------------------------*/
+
+extern ibool srv_print_innodb_monitor;
extern ulint srv_n_spin_wait_rounds;
extern ulint srv_spin_wait_delay;
extern ibool srv_priority_boost;
@@ -104,26 +111,13 @@ typedef struct srv_sys_struct srv_sys_t;
/* The server system */
extern srv_sys_t* srv_sys;
-/* Alternatives for file flush option in Unix; see the InnoDB manual about
+/* Alternatives for fiel flush option in Unix; see the InnoDB manual about
what these mean */
#define SRV_UNIX_FDATASYNC 1
#define SRV_UNIX_O_DSYNC 2
#define SRV_UNIX_LITTLESYNC 3
#define SRV_UNIX_NOSYNC 4
-/* Raw partition flags */
-#define SRV_OLD_RAW 1
-#define SRV_NEW_RAW 2
-
-void
-srv_mysql_thread_release(void);
-/*==========================*/
-os_event_t
-srv_mysql_thread_event_get(void);
-void
-srv_mysql_thread_slot_free(
-/*==========================*/
- os_event_t event);
/*************************************************************************
Boots Innobase server. */
diff --git a/innobase/include/sync0sync.h b/innobase/include/sync0sync.h
index 4b12dd3c86d..cb86b2b815c 100644
--- a/innobase/include/sync0sync.h
+++ b/innobase/include/sync0sync.h
@@ -393,6 +393,7 @@ Memory pool mutex */
#define SYNC_RSEG_HEADER_NEW 591
#define SYNC_RSEG_HEADER 590
#define SYNC_TRX_UNDO_PAGE 570
+#define SYNC_EXTERN_STORAGE 500
#define SYNC_FSP 400
#define SYNC_FSP_PAGE 395
/*------------------------------------- Insert buffer headers */
@@ -415,6 +416,7 @@ Memory pool mutex */
the level is SYNC_MEM_HASH. */
#define SYNC_BUF_POOL 150
#define SYNC_BUF_BLOCK 149
+#define SYNC_DOUBLEWRITE 140
#define SYNC_ANY_LATCH 135
#define SYNC_MEM_HASH 131
#define SYNC_MEM_POOL 130
diff --git a/innobase/include/trx0rec.h b/innobase/include/trx0rec.h
index ea9e9f3fce5..edfc283d1b2 100644
--- a/innobase/include/trx0rec.h
+++ b/innobase/include/trx0rec.h
@@ -45,6 +45,14 @@ trx_undo_rec_get_cmpl_info(
/* out: compiler info */
trx_undo_rec_t* undo_rec); /* in: undo log record */
/**************************************************************************
+Returns TRUE if an undo log record contains an extern storage field. */
+UNIV_INLINE
+ibool
+trx_undo_rec_get_extern_storage(
+/*============================*/
+ /* out: TRUE if extern */
+ trx_undo_rec_t* undo_rec); /* in: undo log record */
+/**************************************************************************
Reads the undo log record number. */
UNIV_INLINE
dulint
@@ -65,6 +73,8 @@ trx_undo_rec_get_pars(
TRX_UNDO_INSERT_REC, ... */
ulint* cmpl_info, /* out: compiler info, relevant only
for update type records */
+ ibool* updated_extern, /* out: TRUE if we updated an
+ externally stored fild */
dulint* undo_no, /* out: undo log record number */
dulint* table_id); /* out: table id */
/***********************************************************************
@@ -272,7 +282,11 @@ record */
do not change */
#define TRX_UNDO_CMPL_INFO_MULT 16 /* compilation info is multiplied by
this and ORed to the type above */
-
+#define TRX_UNDO_UPD_EXTERN 128 /* This bit can be ORed to type_cmpl
+ to denote that we updated external
+ storage fields: used by purge to
+ free the external storage */
+
/* Operation type flags used in trx_undo_report_row_operation */
#define TRX_UNDO_INSERT_OP 1
#define TRX_UNDO_MODIFY_OP 2
diff --git a/innobase/include/trx0rec.ic b/innobase/include/trx0rec.ic
index f813a52ff9c..cd02ed9e04c 100644
--- a/innobase/include/trx0rec.ic
+++ b/innobase/include/trx0rec.ic
@@ -31,6 +31,23 @@ trx_undo_rec_get_cmpl_info(
}
/**************************************************************************
+Returns TRUE if an undo log record contains an extern storage field. */
+UNIV_INLINE
+ibool
+trx_undo_rec_get_extern_storage(
+/*============================*/
+ /* out: TRUE if extern */
+ trx_undo_rec_t* undo_rec) /* in: undo log record */
+{
+ if (mach_read_from_1(undo_rec + 2) & TRX_UNDO_UPD_EXTERN) {
+
+ return(TRUE);
+ }
+
+ return(FALSE);
+}
+
+/**************************************************************************
Reads the undo log record number. */
UNIV_INLINE
dulint
diff --git a/innobase/include/trx0sys.h b/innobase/include/trx0sys.h
index d0506dd65b7..e26f7e19850 100644
--- a/innobase/include/trx0sys.h
+++ b/innobase/include/trx0sys.h
@@ -27,6 +27,23 @@ Created 3/26/1996 Heikki Tuuri
/* The transaction system */
extern trx_sys_t* trx_sys;
+/* Doublewrite system */
+extern trx_doublewrite_t* trx_doublewrite;
+
+/********************************************************************
+Creates the doublewrite buffer at a database start. The header of the
+doublewrite buffer is placed on the trx system header page. */
+
+void
+trx_sys_create_doublewrite_buf(void);
+/*================================*/
+/********************************************************************
+At a database startup uses a possible doublewrite buffer to restore
+half-written pages in the data files. */
+
+void
+trx_sys_doublewrite_restore_corrupt_pages(void);
+/*===========================================*/
/*******************************************************************
Checks if a page address is the trx sys header page. */
UNIV_INLINE
@@ -235,6 +252,59 @@ therefore 256 */
segment specification slots */
/*-------------------------------------------------------------*/
+/* The offset of the doublewrite buffer header on the trx system header page */
+#define TRX_SYS_DOUBLEWRITE (UNIV_PAGE_SIZE - 200)
+/*-------------------------------------------------------------*/
+#define TRX_SYS_DOUBLEWRITE_FSEG 0 /* fseg header of the fseg
+ containing the doublewrite
+ buffer */
+#define TRX_SYS_DOUBLEWRITE_MAGIC FSEG_HEADER_SIZE
+ /* 4-byte magic number which
+ shows if we already have
+ created the doublewrite
+ buffer */
+#define TRX_SYS_DOUBLEWRITE_BLOCK1 (4 + FSEG_HEADER_SIZE)
+ /* page number of the
+ first page in the first
+ sequence of 64
+ (= FSP_EXTENT_SIZE) consecutive
+ pages in the doublewrite
+ buffer */
+#define TRX_SYS_DOUBLEWRITE_BLOCK2 (8 + FSEG_HEADER_SIZE)
+ /* page number of the
+ first page in the second
+ sequence of 64 consecutive
+ pages in the doublewrite
+ buffer */
+#define TRX_SYS_DOUBLEWRITE_REPEAT 12 /* we repeat the above 3
+ numbers so that if the trx
+ sys header is half-written
+ to disk, we still may be able
+ to recover the information */
+/*-------------------------------------------------------------*/
+#define TRX_SYS_DOUBLEWRITE_MAGIC_N 536853855
+
+#define TRX_SYS_DOUBLEWRITE_BLOCK_SIZE FSP_EXTENT_SIZE
+
+/* Doublewrite control struct */
+struct trx_doublewrite_struct{
+ mutex_t mutex; /* mutex protecting the first_free field and
+ write_buf */
+ ulint block1; /* the page number of the first
+ doublewrite block (64 pages) */
+ ulint block2; /* page number of the second block */
+ ulint first_free; /* first free position in write_buf measured
+ in units of UNIV_PAGE_SIZE */
+ byte* write_buf; /* write buffer used in writing to the
+ doublewrite buffer, aligned to an
+ address divisible by UNIV_PAGE_SIZE
+ (which is required by Windows aio) */
+ byte* write_buf_unaligned; /* pointer to write_buf, but unaligned */
+ buf_block_t**
+ buf_block_arr; /* array to store pointers to the buffer
+ blocks which have been cached to write_buf */
+};
+
/* The transaction system central memory data structure; protected by the
kernel mutex */
struct trx_sys_struct{
diff --git a/innobase/include/trx0types.h b/innobase/include/trx0types.h
index 02da1605077..b8befe7172f 100644
--- a/innobase/include/trx0types.h
+++ b/innobase/include/trx0types.h
@@ -15,6 +15,7 @@ Created 3/26/1996 Heikki Tuuri
/* Memory objects */
typedef struct trx_struct trx_t;
typedef struct trx_sys_struct trx_sys_t;
+typedef struct trx_doublewrite_struct trx_doublewrite_t;
typedef struct trx_sig_struct trx_sig_t;
typedef struct trx_rseg_struct trx_rseg_t;
typedef struct trx_undo_struct trx_undo_t;
diff --git a/innobase/include/trx0undo.h b/innobase/include/trx0undo.h
index 82c21f756e6..7f0378c68d3 100644
--- a/innobase/include/trx0undo.h
+++ b/innobase/include/trx0undo.h
@@ -341,7 +341,9 @@ struct trx_undo_struct{
have delete marked records, because of
a delete of a row or an update of an
indexed field; purge is then
- necessary. */
+ necessary; also TRUE if the transaction
+ has updated an externally stored
+ field */
dulint trx_id; /* id of the trx assigned to the undo
log */
ibool dict_operation; /* TRUE if a dict operation trx */
diff --git a/innobase/include/univ.i b/innobase/include/univ.i
index 73bf48b1bc0..6ffbb1b8fef 100644
--- a/innobase/include/univ.i
+++ b/innobase/include/univ.i
@@ -9,11 +9,12 @@ Created 1/20/1994 Heikki Tuuri
#ifndef univ_i
#define univ_i
-#undef UNIV_INTEL_X86
-
-#if (defined(_WIN32) || defined(_WIN64)) && !defined(MYSQL_SERVER)
+#if (defined(_WIN32) || defined(_WIN64))
#define __WIN__
+
+#ifndef MYSQL_SERVER
#include <windows.h>
+#endif
/* If you want to check for errors with compiler level -W4,
comment out the above include of windows.h and let the following defines
@@ -40,10 +41,8 @@ subdirectory of 'mysql'. */
#include <global.h>
#include <my_pthread.h>
-#ifndef __WIN__
/* Include <sys/stat.h> to get S_I... macros defined for os0file.c */
#include <sys/stat.h>
-#endif
#undef PACKAGE
#undef VERSION
@@ -63,19 +62,21 @@ subdirectory of 'mysql'. */
/* DEBUG VERSION CONTROL
===================== */
+
+/*
+#define UNIV_SYNC_DEBUG
+*/
+
/* Make a non-inline debug version */
/*
#define UNIV_DEBUG
#define UNIV_MEM_DEBUG
-#define UNIV_SYNC_DEBUG
#define UNIV_SEARCH_DEBUG
#define UNIV_IBUF_DEBUG
#define UNIV_SYNC_PERF_STAT
#define UNIV_SEARCH_PERF_STAT
-
-#define UNIV_DEBUG_FILE_ACCESSES
*/
#define UNIV_LIGHT_MEM_DEBUG
@@ -192,6 +193,13 @@ headers may define 'bool' differently. Do not assume that 'bool' is a ulint! */
has the SQL NULL as its value. */
#define UNIV_SQL_NULL ULINT_UNDEFINED
+/* Lengths which are not UNIV_SQL_NULL, but bigger than the following
+number indicate that a field contains a reference to an externally
+stored part of the field in the tablespace. The length field then
+contains the sum of the following flag and the locally stored len. */
+
+#define UNIV_EXTERN_STORAGE_FIELD (UNIV_SQL_NULL - UNIV_PAGE_SIZE)
+
/* The following definition of __FILE__ removes compiler warnings
associated with const char* / char* mismatches with __FILE__ */
diff --git a/innobase/include/ut0dbg.h b/innobase/include/ut0dbg.h
index 657d1bf95b2..fc5d493ca5e 100644
--- a/innobase/include/ut0dbg.h
+++ b/innobase/include/ut0dbg.h
@@ -41,7 +41,7 @@ extern ulint* ut_dbg_null_ptr;
}\
if (ut_dbg_stop_threads) {\
fprintf(stderr,\
- "Innobase: Thread %lu stopped in file %s line %lu\n",\
+ "InnoDB: Thread %lu stopped in file %s line %lu\n",\
os_thread_get_curr_id(), IB__FILE__, (ulint)__LINE__);\
os_thread_sleep(1000000000);\
}\
@@ -50,19 +50,17 @@ extern ulint* ut_dbg_null_ptr;
#define ut_error {\
ulint dbg_i;\
fprintf(stderr,\
- "Innobase: Assertion failure in thread %lu in file %s line %lu\n",\
+ "InnoDB: Assertion failure in thread %lu in file %s line %lu\n",\
os_thread_get_curr_id(), IB__FILE__, (ulint)__LINE__);\
fprintf(stderr,\
- "Innobase: we intentionally generate a memory trap.\n");\
+ "InnoDB: We intentionally generate a memory trap.\n");\
fprintf(stderr,\
- "Innobase: Send a bug report to mysql@lists.mysql.com\n");\
+ "InnoDB: Send a detailed bug report to mysql@lists.mysql.com\n");\
ut_dbg_stop_threads = TRUE;\
dbg_i = *(ut_dbg_null_ptr);\
printf("%lu", dbg_i);\
}
-
-
#ifdef UNIV_DEBUG
#define ut_ad(EXPR) ut_a(EXPR)
#define ut_d(EXPR) {EXPR;}
diff --git a/innobase/include/ut0ut.h b/innobase/include/ut0ut.h
index 1e93a2b8a36..e1813e763bd 100644
--- a/innobase/include/ut0ut.h
+++ b/innobase/include/ut0ut.h
@@ -11,8 +11,7 @@ Created 1/20/1994 Heikki Tuuri
#include "univ.i"
#include <time.h>
-#include <m_ctype.h>
-
+#include <ctype.h>
typedef time_t ib_time_t;
diff --git a/innobase/lock/lock0lock.c b/innobase/lock/lock0lock.c
index 79fb66459b2..5f8f538f392 100644
--- a/innobase/lock/lock0lock.c
+++ b/innobase/lock/lock0lock.c
@@ -3219,6 +3219,7 @@ lock_rec_print(
ulint space;
ulint page_no;
ulint i;
+ ulint count = 0;
mtr_t mtr;
ut_ad(mutex_own(&kernel_mutex));
@@ -3230,7 +3231,8 @@ lock_rec_print(
printf("\nRECORD LOCKS space id %lu page no %lu n bits %lu",
space, page_no, lock_rec_get_n_bits(lock));
- printf(" index %s trx id %lu %lu", (lock->index)->name,
+ printf(" table %s index %s trx id %lu %lu",
+ lock->index->table->name, lock->index->name,
(lock->trx)->id.high, (lock->trx)->id.low);
if (lock_get_mode(lock) == LOCK_S) {
@@ -3281,10 +3283,18 @@ lock_rec_print(
rec_print(page_find_rec_with_heap_no(page, i));
}
+ count++;
+
printf("\n");
}
- }
+ if (count >= 3) {
+ printf(
+ "3 LOCKS PRINTED FOR THIS TRX AND PAGE: SUPPRESSING FURTHER PRINTS\n");
+ goto end_prints;
+ }
+ }
+end_prints:
mtr_commit(&mtr);
}
@@ -3335,7 +3345,6 @@ lock_print_info(void)
lock_mutex_enter_kernel();
- printf("------------------------------------\n");
printf("LOCK INFO:\n");
printf("Number of locks in the record hash table %lu\n",
lock_get_n_rec_locks());
@@ -3352,7 +3361,7 @@ loop:
if (trx == NULL) {
lock_mutex_exit_kernel();
- lock_validate();
+ /* lock_validate(); */
return;
}
@@ -3360,6 +3369,19 @@ loop:
if (nth_lock == 0) {
printf("\nLOCKS FOR TRANSACTION ID %lu %lu\n", trx->id.high,
trx->id.low);
+ if (trx->que_state == TRX_QUE_LOCK_WAIT) {
+ printf(
+ "################# TRX IS WAITING FOR THE LOCK: ###\n");
+
+ if (lock_get_type(trx->wait_lock) == LOCK_REC) {
+ lock_rec_print(trx->wait_lock);
+ } else {
+ lock_table_print(trx->wait_lock);
+ }
+
+ printf(
+ "##################################################\n");
+ }
}
i = 0;
@@ -3409,6 +3431,16 @@ loop:
nth_lock++;
+ if (nth_lock >= 25) {
+ printf(
+ "25 LOCKS PRINTED FOR THIS TRX: SUPPRESSING FURTHER PRINTS\n");
+
+ nth_trx++;
+ nth_lock = 0;
+
+ goto loop;
+ }
+
goto loop;
}
diff --git a/innobase/log/log0log.c b/innobase/log/log0log.c
index 31cf595e59e..351ea7f2fd5 100644
--- a/innobase/log/log0log.c
+++ b/innobase/log/log0log.c
@@ -838,7 +838,9 @@ log_io_complete(
/* It was a checkpoint write */
group = (log_group_t*)((ulint)group - 1);
- if (srv_unix_file_flush_method == SRV_UNIX_LITTLESYNC) {
+ if (srv_unix_file_flush_method != SRV_UNIX_O_DSYNC
+ && srv_unix_file_flush_method != SRV_UNIX_NOSYNC) {
+
fil_flush(group->space_id);
}
@@ -847,7 +849,9 @@ log_io_complete(
return;
}
- if (srv_unix_file_flush_method == SRV_UNIX_LITTLESYNC) {
+ if (srv_unix_file_flush_method != SRV_UNIX_O_DSYNC
+ && srv_unix_file_flush_method != SRV_UNIX_NOSYNC) {
+
fil_flush(group->space_id);
}
@@ -1478,7 +1482,7 @@ log_checkpoint(
recv_apply_hashed_log_recs(TRUE);
}
- if (srv_unix_file_flush_method == SRV_UNIX_LITTLESYNC) {
+ if (srv_unix_file_flush_method != SRV_UNIX_NOSYNC) {
fil_flush_file_spaces(FIL_TABLESPACE);
}
@@ -1885,10 +1889,11 @@ loop:
fil_reserve_right_to_open();
file_handle = os_file_create(name, open_mode, OS_FILE_AIO,
- &ret);
+ OS_DATA_FILE, &ret);
+
if (!ret && (open_mode == OS_FILE_CREATE)) {
file_handle = os_file_create(name, OS_FILE_OPEN,
- OS_FILE_AIO, &ret);
+ OS_FILE_AIO, OS_DATA_FILE, &ret);
}
if (!ret) {
diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c
index d16085a2d6f..edab98fa39c 100644
--- a/innobase/log/log0recv.c
+++ b/innobase/log/log0recv.c
@@ -2234,7 +2234,8 @@ try_open_again:
fil_reserve_right_to_open();
- file_handle = os_file_create(name, OS_FILE_OPEN, OS_FILE_AIO, &ret);
+ file_handle = os_file_create(name, OS_FILE_OPEN,
+ OS_FILE_LOG, OS_FILE_AIO, &ret);
if (ret == FALSE) {
fil_release_right_to_open();
diff --git a/innobase/os/os0file.c b/innobase/os/os0file.c
index d3c6232031a..0525fd7b59a 100644
--- a/innobase/os/os0file.c
+++ b/innobase/os/os0file.c
@@ -10,6 +10,7 @@ Created 10/21/1995 Heikki Tuuri
#include "os0sync.h"
#include "ut0mem.h"
#include "srv0srv.h"
+#include "trx0sys.h"
#undef HAVE_FDATASYNC
@@ -74,9 +75,12 @@ typedef struct os_aio_array_struct os_aio_array_t;
struct os_aio_array_struct{
os_mutex_t mutex; /* the mutex protecting the aio array */
- os_event_t not_full; /* The event which is set to signaled
+ os_event_t not_full; /* The event which is set to the signaled
state when there is space in the aio
outside the ibuf segment */
+ os_event_t is_empty; /* The event which is set to the signaled
+ state when there are no pending i/os
+ in this array */
ulint n_slots; /* Total number of slots in the aio array.
This must be divisible by n_threads. */
ulint n_segments;/* Number of segments in the aio array of
@@ -254,6 +258,7 @@ os_file_create(
if a new is created or an old overwritten */
ulint purpose,/* in: OS_FILE_AIO, if asynchronous, non-buffered i/o
is desired, OS_FILE_NORMAL, if any normal file */
+ ulint type, /* in: OS_DATA_FILE or OS_LOG_FILE */
ibool* success)/* out: TRUE if succeed, FALSE if error */
{
#ifdef __WIN__
@@ -347,11 +352,10 @@ try_again:
UT_NOT_USED(purpose);
- /* Currently use only O_SYNC because there may be a bug in
- Linux O_DSYNC! */
-
#ifdef O_SYNC
- if (srv_unix_file_flush_method == SRV_UNIX_O_DSYNC) {
+ if ((!srv_use_doublewrite_buf || type != OS_DATA_FILE)
+ && srv_unix_file_flush_method == SRV_UNIX_O_DSYNC) {
+
create_flag = create_flag | O_SYNC;
}
#endif
@@ -551,12 +555,6 @@ os_file_flush(
#else
int ret;
-#ifdef O_DSYNC
- if (srv_unix_file_flush_method == SRV_UNIX_O_DSYNC) {
- return(TRUE);
- }
-#endif
-
#ifdef HAVE_FDATASYNC
ret = fdatasync(file);
#else
@@ -637,7 +635,8 @@ os_file_pwrite(
ret = pwrite(file, buf, n, offs);
if (srv_unix_file_flush_method != SRV_UNIX_LITTLESYNC
- && srv_unix_file_flush_method != SRV_UNIX_NOSYNC) {
+ && srv_unix_file_flush_method != SRV_UNIX_NOSYNC
+ && !trx_doublewrite) {
/* Always do fsync to reduce the probability that when
the OS crashes, a database page is only partially
@@ -666,7 +665,8 @@ os_file_pwrite(
ret = write(file, buf, n);
if (srv_unix_file_flush_method != SRV_UNIX_LITTLESYNC
- && srv_unix_file_flush_method != SRV_UNIX_NOSYNC) {
+ && srv_unix_file_flush_method != SRV_UNIX_NOSYNC
+ && !trx_doublewrite) {
/* Always do fsync to reduce the probability that when
the OS crashes, a database page is only partially
@@ -825,7 +825,9 @@ try_again:
/* Always do fsync to reduce the probability that when the OS crashes,
a database page is only partially physically written to disk. */
- ut_a(TRUE == os_file_flush(file));
+ if (!trx_doublewrite) {
+ ut_a(TRUE == os_file_flush(file));
+ }
os_mutex_exit(os_file_seek_mutexes[i]);
@@ -900,6 +902,10 @@ os_aio_array_create(
array->mutex = os_mutex_create(NULL);
array->not_full = os_event_create(NULL);
+ array->is_empty = os_event_create(NULL);
+
+ os_event_set(array->is_empty);
+
array->n_slots = n;
array->n_segments = n_segments;
array->n_reserved = 0;
@@ -999,6 +1005,17 @@ os_aio_init(
#endif
}
+/****************************************************************************
+Waits until there are no pending writes in os_aio_write_array. There can
+be other, synchronous, pending writes. */
+
+void
+os_aio_wait_until_no_pending_writes(void)
+/*=====================================*/
+{
+ os_event_wait(os_aio_write_array->is_empty);
+}
+
/**************************************************************************
Calculates segment number for a slot. */
static
@@ -1191,6 +1208,10 @@ loop:
array->n_reserved++;
+ if (array->n_reserved == 1) {
+ os_event_reset(array->is_empty);
+ }
+
if (array->n_reserved == array->n_slots) {
os_event_reset(array->not_full);
}
@@ -1264,6 +1285,10 @@ os_aio_array_free_slot(
os_event_set(array->not_full);
}
+ if (array->n_reserved == 0) {
+ os_event_set(array->is_empty);
+ }
+
#ifdef WIN_ASYNC_IO
os_event_reset(slot->control.hEvent);
#endif
@@ -1377,6 +1402,7 @@ os_aio(
DWORD len = n;
void* dummy_mess1;
void* dummy_mess2;
+ ulint dummy_type;
#endif
ulint err = 0;
ibool retry;
@@ -1489,8 +1515,9 @@ try_again:
use the same wait mechanism as for async i/o */
return(os_aio_windows_handle(ULINT_UNDEFINED,
- slot->pos,
- &dummy_mess1, &dummy_mess2));
+ slot->pos,
+ &dummy_mess1, &dummy_mess2,
+ &dummy_type));
}
return(TRUE);
@@ -1547,7 +1574,8 @@ os_aio_windows_handle(
the aio operation failed, these output
parameters are valid and can be used to
restart the operation, for example */
- void** message2)
+ void** message2,
+ ulint* type) /* out: OS_FILE_WRITE or ..._READ */
{
os_aio_array_t* array;
os_aio_slot_t* slot;
@@ -1592,10 +1620,12 @@ os_aio_windows_handle(
*message1 = slot->message1;
*message2 = slot->message2;
+ *type = slot->type;
+
if (ret && len == slot->len) {
ret_val = TRUE;
- if (slot->type == OS_FILE_WRITE) {
+ if (slot->type == OS_FILE_WRITE && !trx_doublewrite) {
ut_a(TRUE == os_file_flush(slot->file));
}
} else {
@@ -1679,7 +1709,7 @@ os_aio_posix_handle(
*message1 = slot->message1;
*message2 = slot->message2;
- if (slot->type == OS_FILE_WRITE) {
+ if (slot->type == OS_FILE_WRITE && !trx_doublewrite) {
ut_a(TRUE == os_file_flush(slot->file));
}
@@ -1709,7 +1739,8 @@ os_aio_simulated_handle(
the aio operation failed, these output
parameters are valid and can be used to
restart the operation, for example */
- void** message2)
+ void** message2,
+ ulint* type) /* out: OS_FILE_WRITE or ..._READ */
{
os_aio_array_t* array;
ulint segment;
@@ -1906,6 +1937,8 @@ slot_io_done:
*message1 = slot->message1;
*message2 = slot->message2;
+ *type = slot->type;
+
os_mutex_exit(array->mutex);
os_aio_array_free_slot(array, slot);
@@ -1989,13 +2022,13 @@ os_aio_print(void)
os_aio_slot_t* slot;
ulint n_reserved;
ulint i;
-
+
+ printf("Pending normal aio reads:\n");
+
array = os_aio_read_array;
loop:
ut_a(array);
- printf("INFO OF AN AIO ARRAY\n");
-
os_mutex_enter(array->mutex);
ut_a(array->n_slots > 0);
@@ -2022,24 +2055,29 @@ loop:
os_mutex_exit(array->mutex);
if (array == os_aio_read_array) {
+ printf("Pending aio writes:\n");
+
array = os_aio_write_array;
goto loop;
}
if (array == os_aio_write_array) {
+ printf("Pending insert buffer aio reads:\n");
array = os_aio_ibuf_array;
goto loop;
}
if (array == os_aio_ibuf_array) {
+ printf("Pending log writes or reads:\n");
array = os_aio_log_array;
goto loop;
}
if (array == os_aio_log_array) {
+ printf("Pending synchronous reads or writes:\n");
array = os_aio_sync_array;
goto loop;
diff --git a/innobase/page/page0cur.c b/innobase/page/page0cur.c
index e329b916b1b..0b233b4dd72 100644
--- a/innobase/page/page0cur.c
+++ b/innobase/page/page0cur.c
@@ -1019,16 +1019,16 @@ page_cur_delete_rec(
page_cur_t* cursor, /* in: a page cursor */
mtr_t* mtr) /* in: mini-transaction handle */
{
+ page_dir_slot_t* cur_dir_slot;
+ page_dir_slot_t* prev_slot;
page_t* page;
rec_t* current_rec;
rec_t* prev_rec = NULL;
rec_t* next_rec;
ulint cur_slot_no;
- page_dir_slot_t* cur_dir_slot;
- page_dir_slot_t* prev_slot;
ulint cur_n_owned;
rec_t* rec;
-
+
ut_ad(cursor && mtr);
page = page_cur_get_page(cursor);
@@ -1037,7 +1037,7 @@ page_cur_delete_rec(
/* The record must not be the supremum or infimum record. */
ut_ad(current_rec != page_get_supremum_rec(page));
ut_ad(current_rec != page_get_infimum_rec(page));
-
+
/* Save to local variables some data associated with current_rec */
cur_slot_no = page_dir_find_owner_slot(current_rec);
cur_dir_slot = page_dir_get_nth_slot(page, cur_slot_no);
diff --git a/innobase/pars/pars0pars.c b/innobase/pars/pars0pars.c
index f6c51f3905a..4a298426476 100644
--- a/innobase/pars/pars0pars.c
+++ b/innobase/pars/pars0pars.c
@@ -2028,11 +2028,7 @@ pars_complete_graph_for_exec(
que_node_set_parent(node, thr);
- mutex_enter(&kernel_mutex);
-
trx->graph = NULL;
- mutex_exit(&kernel_mutex);
-
return(thr);
}
diff --git a/innobase/rem/rem0cmp.c b/innobase/rem/rem0cmp.c
index 78f4e450269..cdf1f363946 100644
--- a/innobase/rem/rem0cmp.c
+++ b/innobase/rem/rem0cmp.c
@@ -295,14 +295,18 @@ This function is used to compare a data tuple to a physical record.
Only dtuple->n_fields_cmp first fields are taken into account for
the the data tuple! If we denote by n = n_fields_cmp, then rec must
have either m >= n fields, or it must differ from dtuple in some of
-the m fields rec has. */
+the m fields rec has. If rec has an externally stored field we do not
+compare it but return with value 0 if such a comparison should be
+made. */
int
cmp_dtuple_rec_with_match(
/*======================*/
/* out: 1, 0, -1, if dtuple is greater, equal,
less than rec, respectively, when only the
- common first fields are compared */
+ common first fields are compared, or
+ until the first externally stored field in
+ rec */
dtuple_t* dtuple, /* in: data tuple */
rec_t* rec, /* in: physical record which differs from
dtuple in some of the common fields, or which
@@ -344,7 +348,8 @@ cmp_dtuple_rec_with_match(
ut_ad(cur_field <= dtuple_get_n_fields_cmp(dtuple));
ut_ad(cur_field <= rec_get_n_fields(rec));
- /* Match fields in a loop; stop if we run out of fields in dtuple */
+ /* Match fields in a loop; stop if we run out of fields in dtuple
+ or find an externally stored field */
while (cur_field < dtuple_get_n_fields_cmp(dtuple)) {
@@ -357,7 +362,8 @@ cmp_dtuple_rec_with_match(
/* If we have matched yet 0 bytes, it may be that one or
both the fields are SQL null, or the record or dtuple may be
- the predefined minimum record */
+ the predefined minimum record, or the field is externally
+ stored */
if (cur_bytes == 0) {
if (cur_field == 0) {
@@ -384,6 +390,15 @@ cmp_dtuple_rec_with_match(
}
}
+ if (rec_get_nth_field_extern_bit(rec, cur_field)) {
+ /* We do not compare to an externally
+ stored field */
+
+ ret = 0;
+
+ goto order_resolved;
+ }
+
if (dtuple_f_len == UNIV_SQL_NULL
|| rec_f_len == UNIV_SQL_NULL) {
@@ -604,7 +619,8 @@ cmp_dtuple_rec_prefix_equal(
/*****************************************************************
This function is used to compare two physical records. Only the common
-first fields are compared. */
+first fields are compared, and if an externally stored field is
+encountered, then 0 is returned. */
int
cmp_rec_rec_with_match(
@@ -688,8 +704,18 @@ cmp_rec_rec_with_match(
goto order_resolved;
}
- }
+ }
+
+ if (rec_get_nth_field_extern_bit(rec1, cur_field)
+ || rec_get_nth_field_extern_bit(rec2, cur_field)) {
+ /* We do not compare to an externally
+ stored field */
+ ret = 0;
+
+ goto order_resolved;
+ }
+
if (rec1_f_len == UNIV_SQL_NULL
|| rec2_f_len == UNIV_SQL_NULL) {
@@ -812,7 +838,8 @@ order_resolved:
Used in debug checking of cmp_dtuple_... .
This function is used to compare a data tuple to a physical record. If
dtuple has n fields then rec must have either m >= n fields, or it must
-differ from dtuple in some of the m fields rec has. */
+differ from dtuple in some of the m fields rec has. If encounters an
+externally stored field, returns 0. */
static
int
cmp_debug_dtuple_rec_with_match(
@@ -882,6 +909,14 @@ cmp_debug_dtuple_rec_with_match(
rec_f_data = rec_get_nth_field(rec, cur_field, &rec_f_len);
+ if (rec_get_nth_field_extern_bit(rec, cur_field)) {
+ /* We do not compare to an externally stored field */
+
+ ret = 0;
+
+ goto order_resolved;
+ }
+
ret = cmp_data_data(cur_type, dtuple_f_data, dtuple_f_len,
rec_f_data, rec_f_len);
if (ret != 0) {
diff --git a/innobase/rem/rem0rec.c b/innobase/rem/rem0rec.c
index 9ddfe7a4b9a..88009f2f5c9 100644
--- a/innobase/rem/rem0rec.c
+++ b/innobase/rem/rem0rec.c
@@ -1,7 +1,7 @@
/************************************************************************
Record manager
-(c) 1994-1996 Innobase Oy
+(c) 1994-2001 Innobase Oy
Created 5/30/1994 Heikki Tuuri
*************************************************************************/
@@ -12,6 +12,9 @@ Created 5/30/1994 Heikki Tuuri
#include "rem0rec.ic"
#endif
+#include "mtr0mtr.h"
+#include "mtr0log.h"
+
/* PHYSICAL RECORD
===============
@@ -21,7 +24,10 @@ found in index pages of the database, has the following format
represented on a higher text line):
| offset of the end of the last field of data, the most significant
- bit is set to 1 if and only if the field is SQL-null |
+ bit is set to 1 if and only if the field is SQL-null,
+ if the offset is 2-byte, then the second most significant
+ bit is set to 1 if the field is stored on another page:
+ mostly this will occur in the case of big BLOB fields |
...
| offset of the end of the first field of data + the SQL-null bit |
| 4 bits used to delete mark a record, and mark a predefined
@@ -122,7 +128,8 @@ rec_get_nth_field(
return(rec + os);
}
- next_os = next_os & ~REC_2BYTE_SQL_NULL_MASK;
+ next_os = next_os & ~(REC_2BYTE_SQL_NULL_MASK
+ | REC_2BYTE_EXTERN_MASK);
}
*len = next_os - os;
@@ -170,6 +177,60 @@ rec_set_nth_field_null_bit(
rec_2_set_field_end_info(rec, i, info);
}
+/***************************************************************
+Sets the value of the ith field extern storage bit. */
+
+void
+rec_set_nth_field_extern_bit(
+/*=========================*/
+ rec_t* rec, /* in: record */
+ ulint i, /* in: ith field */
+ ibool val, /* in: value to set */
+ mtr_t* mtr) /* in: mtr holding an X-latch to the page where
+ rec is, or NULL; in the NULL case we do not
+ write to log about the change */
+{
+ ulint info;
+
+ ut_a(!rec_get_1byte_offs_flag(rec));
+ ut_a(i < rec_get_n_fields(rec));
+
+ info = rec_2_get_field_end_info(rec, i);
+
+ if (val) {
+ info = info | REC_2BYTE_EXTERN_MASK;
+ } else {
+ info = info & ~REC_2BYTE_EXTERN_MASK;
+ }
+
+ if (mtr) {
+ mlog_write_ulint(rec - REC_N_EXTRA_BYTES - 2 * (i + 1), info,
+ MLOG_2BYTES, mtr);
+ } else {
+ rec_2_set_field_end_info(rec, i, info);
+ }
+}
+
+/***************************************************************
+Sets TRUE the extern storage bits of fields mentioned in an array. */
+
+void
+rec_set_field_extern_bits(
+/*======================*/
+ rec_t* rec, /* in: record */
+ ulint* vec, /* in: array of field numbers */
+ ulint n_fields, /* in: number of fields numbers */
+ mtr_t* mtr) /* in: mtr holding an X-latch to the page
+ where rec is, or NULL; in the NULL case we
+ do not write to log about the change */
+{
+ ulint i;
+
+ for (i = 0; i < n_fields; i++) {
+ rec_set_nth_field_extern_bit(rec, vec[i], TRUE, mtr);
+ }
+}
+
/***************************************************************
Sets a record field to SQL null. The physical size of the field is not
changed. */
diff --git a/innobase/row/row0ins.c b/innobase/row/row0ins.c
index e57622fd1c5..8542dcae326 100644
--- a/innobase/row/row0ins.c
+++ b/innobase/row/row0ins.c
@@ -234,7 +234,13 @@ row_ins_clust_index_entry_by_modify(
depending on whether mtr holds just a leaf
latch or also a tree latch */
btr_cur_t* cursor, /* in: B-tree cursor */
+ big_rec_t** big_rec,/* out: possible big rec vector of fields
+ which have to be stored externally by the
+ caller */
dtuple_t* entry, /* in: index entry to insert */
+ ulint* ext_vec,/* in: array containing field numbers of
+ externally stored fields in entry, or NULL */
+ ulint n_ext_vec,/* in: number of fields in ext_vec */
que_thr_t* thr, /* in: query thread */
mtr_t* mtr) /* in: mtr */
{
@@ -243,8 +249,10 @@ row_ins_clust_index_entry_by_modify(
upd_t* update;
ulint err;
- ut_ad((cursor->index)->type & DICT_CLUSTERED);
+ ut_ad(cursor->index->type & DICT_CLUSTERED);
+ *big_rec = NULL;
+
rec = btr_cur_get_rec(cursor);
ut_ad(rec_get_deleted_flag(rec));
@@ -254,21 +262,21 @@ row_ins_clust_index_entry_by_modify(
/* Build an update vector containing all the fields to be modified;
NOTE that this vector may contain also system columns! */
- update = row_upd_build_difference(cursor->index, entry, rec, heap);
-
+ update = row_upd_build_difference(cursor->index, entry, ext_vec,
+ n_ext_vec, rec, heap);
if (mode == BTR_MODIFY_LEAF) {
/* Try optimistic updating of the record, keeping changes
within the page */
- err = btr_cur_optimistic_update(0, cursor, update, 0, thr,
- mtr);
- if ((err == DB_OVERFLOW) || (err == DB_UNDERFLOW)) {
+ err = btr_cur_optimistic_update(0, cursor, update, 0, thr, mtr);
+
+ if (err == DB_OVERFLOW || err == DB_UNDERFLOW) {
err = DB_FAIL;
}
} else {
- ut_ad(mode == BTR_MODIFY_TREE);
- err = btr_cur_pessimistic_update(0, cursor, update, 0, thr,
- mtr);
+ ut_a(mode == BTR_MODIFY_TREE);
+ err = btr_cur_pessimistic_update(0, cursor, big_rec, update,
+ 0, thr, mtr);
}
mem_heap_free(heap);
@@ -597,14 +605,18 @@ row_ins_index_entry_low(
pessimistic descent down the index tree */
dict_index_t* index, /* in: index */
dtuple_t* entry, /* in: index entry to insert */
+ ulint* ext_vec,/* in: array containing field numbers of
+ externally stored fields in entry, or NULL */
+ ulint n_ext_vec,/* in: number of fields in ext_vec */
que_thr_t* thr) /* in: query thread */
{
btr_cur_t cursor;
ulint modify;
- rec_t* dummy_rec;
+ rec_t* insert_rec;
rec_t* rec;
ulint err;
ulint n_unique;
+ big_rec_t* big_rec = NULL;
mtr_t mtr;
log_free_check();
@@ -682,24 +694,54 @@ row_ins_index_entry_low(
if (index->type & DICT_CLUSTERED) {
err = row_ins_clust_index_entry_by_modify(mode,
- &cursor, entry,
- thr, &mtr);
+ &cursor, &big_rec,
+ entry,
+ ext_vec, n_ext_vec,
+ thr, &mtr);
} else {
err = row_ins_sec_index_entry_by_modify(&cursor,
thr, &mtr);
}
- } else if (mode == BTR_MODIFY_LEAF) {
- err = btr_cur_optimistic_insert(0, &cursor, entry,
- &dummy_rec, thr, &mtr);
} else {
- ut_ad(mode == BTR_MODIFY_TREE);
- err = btr_cur_pessimistic_insert(0, &cursor, entry,
- &dummy_rec, thr, &mtr);
+ if (mode == BTR_MODIFY_LEAF) {
+ err = btr_cur_optimistic_insert(0, &cursor, entry,
+ &insert_rec, &big_rec, thr, &mtr);
+ } else {
+ ut_a(mode == BTR_MODIFY_TREE);
+ err = btr_cur_pessimistic_insert(0, &cursor, entry,
+ &insert_rec, &big_rec, thr, &mtr);
+ }
+
+ if (err == DB_SUCCESS) {
+ if (ext_vec) {
+ rec_set_field_extern_bits(insert_rec,
+ ext_vec, n_ext_vec, &mtr);
+ }
+ }
}
+
function_exit:
mtr_commit(&mtr);
+ if (big_rec) {
+ mtr_start(&mtr);
+
+ btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE,
+ BTR_MODIFY_TREE, &cursor, 0, &mtr);
+
+ err = btr_store_big_rec_extern_fields(index,
+ btr_cur_get_rec(&cursor),
+ big_rec, &mtr);
+ if (modify) {
+ dtuple_big_rec_free(big_rec);
+ } else {
+ dtuple_convert_back_big_rec(index, entry, big_rec);
+ }
+
+ mtr_commit(&mtr);
+ }
+
return(err);
}
@@ -716,14 +758,17 @@ row_ins_index_entry(
DB_DUPLICATE_KEY, or some other error code */
dict_index_t* index, /* in: index */
dtuple_t* entry, /* in: index entry to insert */
+ ulint* ext_vec,/* in: array containing field numbers of
+ externally stored fields in entry, or NULL */
+ ulint n_ext_vec,/* in: number of fields in ext_vec */
que_thr_t* thr) /* in: query thread */
{
ulint err;
/* Try first optimistic descent to the B-tree */
- err = row_ins_index_entry_low(BTR_MODIFY_LEAF, index, entry, thr);
-
+ err = row_ins_index_entry_low(BTR_MODIFY_LEAF, index, entry,
+ ext_vec, n_ext_vec, thr);
if (err != DB_FAIL) {
return(err);
@@ -731,8 +776,8 @@ row_ins_index_entry(
/* Try then pessimistic descent to the B-tree */
- err = row_ins_index_entry_low(BTR_MODIFY_TREE, index, entry, thr);
-
+ err = row_ins_index_entry_low(BTR_MODIFY_TREE, index, entry,
+ ext_vec, n_ext_vec, thr);
return(err);
}
@@ -784,7 +829,7 @@ row_ins_index_entry_step(
ut_ad(dtuple_check_typed(node->entry));
- err = row_ins_index_entry(node->index, node->entry, thr);
+ err = row_ins_index_entry(node->index, node->entry, NULL, 0, thr);
return(err);
}
diff --git a/innobase/row/row0mysql.c b/innobase/row/row0mysql.c
index 8e1a584f667..9bbc45a5c9a 100644
--- a/innobase/row/row0mysql.c
+++ b/innobase/row/row0mysql.c
@@ -625,7 +625,8 @@ row_update_for_mysql(
ut_ad(prebuilt && trx);
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
-
+ UT_NOT_USED(mysql_rec);
+
node = prebuilt->upd_node;
clust_index = dict_table_get_first_index(table);
@@ -777,7 +778,9 @@ row_get_mysql_key_number_for_index(
}
/*************************************************************************
-Does a table creation operation for MySQL. */
+Does a table creation operation for MySQL. If the name of the created
+table ends to characters INNODB_MONITOR, then this also starts
+printing of monitor output by the master thread. */
int
row_create_table_for_mysql(
@@ -789,6 +792,8 @@ row_create_table_for_mysql(
tab_node_t* node;
mem_heap_t* heap;
que_thr_t* thr;
+ ulint namelen;
+ ulint keywordlen;
ulint err;
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
@@ -833,6 +838,20 @@ row_create_table_for_mysql(
}
trx->error_state = DB_SUCCESS;
+ } else {
+ namelen = ut_strlen(table->name);
+
+ keywordlen = ut_strlen("innodb_monitor");
+
+ if (namelen >= keywordlen
+ && 0 == ut_memcmp(table->name + namelen - keywordlen,
+ "innodb_monitor", keywordlen)) {
+
+ /* Table name ends to characters innodb_monitor:
+ start monitor prints */
+
+ srv_print_innodb_monitor = TRUE;
+ }
}
mutex_exit(&(dict_sys->mutex));
@@ -900,7 +919,9 @@ row_create_index_for_mysql(
}
/*************************************************************************
-Drops a table for MySQL. */
+Drops a table for MySQL. If the name of the dropped table ends to
+characters INNODB_MONITOR, then this also stops printing of monitor
+output by the master thread. */
int
row_drop_table_for_mysql(
@@ -918,11 +939,26 @@ row_drop_table_for_mysql(
char* str1;
char* str2;
ulint len;
+ ulint namelen;
+ ulint keywordlen;
char buf[10000];
ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
ut_a(name != NULL);
+ namelen = ut_strlen(name);
+ keywordlen = ut_strlen("innodb_monitor");
+
+ if (namelen >= keywordlen
+ && 0 == ut_memcmp(name + namelen - keywordlen,
+ "innodb_monitor", keywordlen)) {
+
+ /* Table name ends to characters innodb_monitor:
+ stop monitor prints */
+
+ srv_print_innodb_monitor = FALSE;
+ }
+
/* We use the private SQL parser of Innobase to generate the
query graphs needed in deleting the dictionary data from system
tables in Innobase. Deleting a row from SYS_INDEXES table also
diff --git a/innobase/row/row0purge.c b/innobase/row/row0purge.c
index 0a6fabe584c..ec880d3fe04 100644
--- a/innobase/row/row0purge.c
+++ b/innobase/row/row0purge.c
@@ -347,20 +347,36 @@ row_purge_del_mark(
}
/***************************************************************
-Purges an update of an existing record. */
+Purges an update of an existing record. Also purges an update of a delete
+marked record if that record contained an externally stored field. */
static
void
-row_purge_upd_exist(
-/*================*/
+row_purge_upd_exist_or_extern(
+/*==========================*/
purge_node_t* node, /* in: row purge node */
que_thr_t* thr) /* in: query thread */
{
mem_heap_t* heap;
dtuple_t* entry;
dict_index_t* index;
+ upd_field_t* ufield;
+ ibool is_insert;
+ ulint rseg_id;
+ ulint page_no;
+ ulint offset;
+ ulint internal_offset;
+ byte* data_field;
+ ulint data_field_len;
+ ulint i;
+ mtr_t mtr;
ut_ad(node && thr);
+ if (node->rec_type == TRX_UNDO_UPD_DEL_REC) {
+
+ goto skip_secondaries;
+ }
+
heap = mem_heap_create(1024);
while (node->index != NULL) {
@@ -378,6 +394,53 @@ row_purge_upd_exist(
}
mem_heap_free(heap);
+
+skip_secondaries:
+ /* Free possible externally stored fields */
+ for (i = 0; i < upd_get_n_fields(node->update); i++) {
+
+ ufield = upd_get_nth_field(node->update, i);
+
+ if (ufield->extern_storage) {
+ /* We use the fact that new_val points to
+ node->undo_rec and get thus the offset of
+ dfield data inside the unod record. Then we
+ can calculate from node->roll_ptr the file
+ address of the new_val data */
+
+ internal_offset = ((byte*)ufield->new_val.data)
+ - node->undo_rec;
+
+ ut_a(internal_offset < UNIV_PAGE_SIZE);
+
+ trx_undo_decode_roll_ptr(node->roll_ptr,
+ &is_insert, &rseg_id,
+ &page_no, &offset);
+ mtr_start(&mtr);
+
+ /* We have to acquire an X-latch to the clustered
+ index tree */
+
+ index = dict_table_get_first_index(node->table);
+
+ mtr_x_lock(dict_tree_get_lock(index->tree), &mtr);
+
+ /* We assume in purge of externally stored fields
+ that the space id of the undo log record is 0! */
+
+ data_field = buf_page_get(0, page_no, RW_X_LATCH, &mtr)
+ + offset + internal_offset;
+
+ buf_page_dbg_add_level(buf_frame_align(data_field),
+ SYNC_TRX_UNDO_PAGE);
+
+ data_field_len = ufield->new_val.len;
+
+ btr_free_externally_stored_field(index, data_field,
+ data_field_len, &mtr);
+ mtr_commit(&mtr);
+ }
+ }
}
/***************************************************************
@@ -388,6 +451,9 @@ row_purge_parse_undo_rec(
/*=====================*/
/* out: TRUE if purge operation required */
purge_node_t* node, /* in: row undo node */
+ ibool* updated_extern,
+ /* out: TRUE if an externally stored field
+ was updated */
que_thr_t* thr) /* in: query thread */
{
dict_index_t* clust_index;
@@ -403,10 +469,10 @@ row_purge_parse_undo_rec(
ut_ad(node && thr);
ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &cmpl_info,
- &undo_no, &table_id);
+ updated_extern, &undo_no, &table_id);
node->rec_type = type;
- if (type == TRX_UNDO_UPD_DEL_REC) {
+ if (type == TRX_UNDO_UPD_DEL_REC && !(*updated_extern)) {
return(FALSE);
}
@@ -416,7 +482,7 @@ row_purge_parse_undo_rec(
node->table = NULL;
if (type == TRX_UNDO_UPD_EXIST_REC
- && cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
+ && cmpl_info & UPD_NODE_NO_ORD_CHANGE && !(*updated_extern)) {
/* Purge requires no changes to indexes: we may return */
@@ -455,8 +521,11 @@ row_purge_parse_undo_rec(
/* Read to the partial row the fields that occur in indexes */
- ptr = trx_undo_rec_get_partial_row(ptr, clust_index, &(node->row),
- node->heap);
+ if (!cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
+ ptr = trx_undo_rec_get_partial_row(ptr, clust_index,
+ &(node->row), node->heap);
+ }
+
return(TRUE);
}
@@ -475,6 +544,7 @@ row_purge(
{
dulint roll_ptr;
ibool purge_needed;
+ ibool updated_extern;
ut_ad(node && thr);
@@ -494,7 +564,8 @@ row_purge(
if (node->undo_rec == &trx_purge_dummy_rec) {
purge_needed = FALSE;
} else {
- purge_needed = row_purge_parse_undo_rec(node, thr);
+ purge_needed = row_purge_parse_undo_rec(node, &updated_extern,
+ thr);
}
if (purge_needed) {
@@ -503,11 +574,13 @@ row_purge(
node->index = dict_table_get_next_index(
dict_table_get_first_index(node->table));
- if (node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
- row_purge_upd_exist(node, thr);
- } else {
- ut_ad(node->rec_type == TRX_UNDO_DEL_MARK_REC);
+ if (node->rec_type == TRX_UNDO_DEL_MARK_REC) {
row_purge_del_mark(node, thr);
+
+ } else if (updated_extern
+ || node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
+
+ row_purge_upd_exist_or_extern(node, thr);
}
if (node->found_clust) {
diff --git a/innobase/row/row0row.c b/innobase/row/row0row.c
index f85789fa0d6..59169ef2a98 100644
--- a/innobase/row/row0row.c
+++ b/innobase/row/row0row.c
@@ -146,15 +146,17 @@ row_build_index_entry(
/***********************************************************************
An inverse function to dict_row_build_index_entry. Builds a row from a
-record in a clustered index. */
+record in a clustered index. NOTE that externally stored (often big)
+fields are always copied to heap. */
dtuple_t*
row_build(
/*======*/
/* out, own: row built; see the NOTE below! */
- ulint type, /* in: ROW_COPY_DATA, or ROW_COPY_POINTERS:
- the former copies also the data fields to
- heap as the latter only places pointers to
+ ulint type, /* in: ROW_COPY_POINTERS, ROW_COPY_DATA, or
+ ROW_COPY_ALSO_EXTERNALS,
+ the two last copy also the data fields to
+ heap as the first only places pointers to
data fields on the index page, and thus is
more efficient */
dict_index_t* index, /* in: clustered index */
@@ -170,19 +172,19 @@ row_build(
{
dtuple_t* row;
dict_table_t* table;
- ulint n_fields;
- ulint i;
+ dict_col_t* col;
dfield_t* dfield;
+ ulint n_fields;
byte* field;
ulint len;
ulint row_len;
- dict_col_t* col;
byte* buf;
+ ulint i;
ut_ad(index && rec && heap);
ut_ad(index->type & DICT_CLUSTERED);
- if (type == ROW_COPY_DATA) {
+ if (type != ROW_COPY_POINTERS) {
/* Take a copy of rec to heap */
buf = mem_heap_alloc(heap, rec_get_size(rec));
rec = rec_copy(buf, rec);
@@ -207,6 +209,13 @@ row_build(
dfield = dtuple_get_nth_field(row, dict_col_get_no(col));
field = rec_get_nth_field(rec, i, &len);
+ if (type == ROW_COPY_ALSO_EXTERNALS
+ && rec_get_nth_field_extern_bit(rec, i)) {
+
+ field = btr_rec_copy_externally_stored_field(rec,
+ i, &len, heap);
+ }
+
dfield_set_data(dfield, field, len);
}
@@ -215,6 +224,7 @@ row_build(
return(row);
}
+#ifdef notdefined
/***********************************************************************
An inverse function to dict_row_build_index_entry. Builds a row from a
record in a clustered index. */
@@ -229,7 +239,9 @@ row_build_to_tuple(
directly into this record, therefore,
the buffer page of this record must be
at least s-latched and the latch held
- as long as the row dtuple is used! */
+ as long as the row dtuple is used!
+ NOTE 2: does not work with externally
+ stored fields! */
{
dict_table_t* table;
ulint n_fields;
@@ -265,9 +277,11 @@ row_build_to_tuple(
ut_ad(dtuple_check_typed(row));
}
+#endif
/***********************************************************************
-Converts an index record to a typed data tuple. */
+Converts an index record to a typed data tuple. NOTE that externally
+stored (often big) fields are NOT copied to heap. */
dtuple_t*
row_rec_to_index_entry(
diff --git a/innobase/row/row0sel.c b/innobase/row/row0sel.c
index 5a77fc5daaa..b74bd29a89e 100644
--- a/innobase/row/row0sel.c
+++ b/innobase/row/row0sel.c
@@ -2036,7 +2036,8 @@ row_sel_store_mysql_rec(
which was described in prebuilt's
template */
{
- mysql_row_templ_t* templ;
+ mysql_row_templ_t* templ;
+ mem_heap_t* extern_field_heap = NULL;
byte* data;
ulint len;
byte* blob_buf;
@@ -2059,6 +2060,24 @@ row_sel_store_mysql_rec(
data = rec_get_nth_field(rec, templ->rec_field_no, &len);
+ if (rec_get_nth_field_extern_bit(rec, templ->rec_field_no)) {
+ /* Copy an externally stored field to the temporary
+ heap */
+
+ if (prebuilt->trx->has_search_latch) {
+ rw_lock_s_unlock(&btr_search_latch);
+ prebuilt->trx->has_search_latch = FALSE;
+ }
+
+ extern_field_heap = mem_heap_create(UNIV_PAGE_SIZE);
+
+ data = btr_rec_copy_externally_stored_field(rec,
+ templ->rec_field_no, &len,
+ extern_field_heap);
+
+ ut_a(len != UNIV_SQL_NULL);
+ }
+
if (len != UNIV_SQL_NULL) {
if (templ->type == DATA_BLOB) {
@@ -2081,6 +2100,10 @@ row_sel_store_mysql_rec(
mysql_rec + templ->mysql_col_offset,
templ->mysql_col_len, data, len,
templ->type, templ->is_unsigned);
+
+ if (extern_field_heap) {
+ mem_heap_free(extern_field_heap);
+ }
} else {
mysql_rec[templ->mysql_null_byte_offset] |=
(byte) (templ->mysql_null_bit_mask);
@@ -2450,6 +2473,7 @@ row_search_for_mysql(
ibool unique_search_from_clust_index = FALSE;
ibool mtr_has_extra_clust_latch = FALSE;
ibool moves_up = FALSE;
+ ulint cnt = 0;
mtr_t mtr;
ut_ad(index && pcur && search_tuple);
@@ -2457,6 +2481,11 @@ row_search_for_mysql(
ut_ad(sync_thread_levels_empty_gen(FALSE));
+/* printf("Match mode %lu\n search tuple ", match_mode);
+ dtuple_print(search_tuple);
+
+ printf("N tables locked %lu\n", trx->mysql_n_tables_locked);
+*/
if (direction == 0) {
prebuilt->n_rows_fetched = 0;
prebuilt->n_fetch_cached = 0;
@@ -2528,6 +2557,8 @@ row_search_for_mysql(
mtr_commit(&mtr);
+ /* printf("%s record not found 1\n", index->name); */
+
return(DB_RECORD_NOT_FOUND);
}
@@ -2565,17 +2596,18 @@ row_search_for_mysql(
mtr_commit(&mtr);
+ /* printf("%s shortcut\n", index->name); */
+
return(DB_SUCCESS);
} else if (shortcut == SEL_EXHAUSTED) {
mtr_commit(&mtr);
+ /* printf("%s record not found 2\n",
+ index->name); */
return(DB_RECORD_NOT_FOUND);
}
-
- /* Commit the mini-transaction since it can
- hold latches */
mtr_commit(&mtr);
mtr_start(&mtr);
@@ -2659,7 +2691,12 @@ rec_loop:
cons_read_requires_clust_rec = FALSE;
rec = btr_pcur_get_rec(pcur);
-
+/*
+ printf("Using index %s cnt %lu ", index->name, cnt);
+ printf("; Page no %lu\n",
+ buf_frame_get_page_no(buf_frame_align(rec)));
+ rec_print(rec);
+*/
if (rec == page_get_infimum_rec(buf_frame_align(rec))) {
/* The infimum record on a page cannot be in the result set,
@@ -2700,12 +2737,15 @@ rec_loop:
/* Test if the index record matches completely to search_tuple
in prebuilt: if not, then we return with DB_RECORD_NOT_FOUND */
+ /* printf("Comparing rec and search tuple\n"); */
+
if (0 != cmp_dtuple_rec(search_tuple, rec)) {
btr_pcur_store_position(pcur, &mtr);
ret = DB_RECORD_NOT_FOUND;
-
+ /* printf("%s record not found 3\n", index->name); */
+
goto normal_return;
}
@@ -2716,6 +2756,7 @@ rec_loop:
btr_pcur_store_position(pcur, &mtr);
ret = DB_RECORD_NOT_FOUND;
+ /* printf("%s record not found 4\n", index->name); */
goto normal_return;
}
@@ -2884,6 +2925,8 @@ next_rec:
moved = sel_restore_position_for_mysql(BTR_SEARCH_LEAF, pcur,
moves_up, &mtr);
if (moved) {
+ cnt++;
+
goto rec_loop;
}
}
@@ -2906,6 +2949,8 @@ next_rec:
goto normal_return;
}
+ cnt++;
+
goto rec_loop;
/*-------------------------------------------------------------*/
lock_wait_or_error:
@@ -2931,7 +2976,9 @@ lock_wait_or_error:
goto rec_loop;
}
-
+
+ /* printf("Using index %s cnt %lu ret value %lu err\n", index->name,
+ cnt, err); */
return(err);
normal_return:
@@ -2945,5 +2992,7 @@ normal_return:
ret = DB_SUCCESS;
}
+ /* printf("Using index %s cnt %lu ret value %lu\n", index->name,
+ cnt, err); */
return(ret);
}
diff --git a/innobase/row/row0uins.c b/innobase/row/row0uins.c
index c9330318ac0..47807877779 100644
--- a/innobase/row/row0uins.c
+++ b/innobase/row/row0uins.c
@@ -242,11 +242,12 @@ row_undo_ins_parse_undo_rec(
dulint table_id;
ulint type;
ulint dummy;
+ ibool dummy_extern;
ut_ad(node && thr);
- ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &dummy, &undo_no,
- &table_id);
+ ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &dummy,
+ &dummy_extern, &undo_no, &table_id);
ut_ad(type == TRX_UNDO_INSERT_REC);
node->rec_type = type;
@@ -284,9 +285,9 @@ row_undo_ins(
row_undo_ins_parse_undo_rec(node, thr);
if (node->table == NULL) {
- found = FALSE;
+ found = FALSE;
} else {
- found = row_undo_search_clust_to_pcur(node, thr);
+ found = row_undo_search_clust_to_pcur(node, thr);
}
if (!found) {
diff --git a/innobase/row/row0umod.c b/innobase/row/row0umod.c
index c8db428bade..0221c51b985 100644
--- a/innobase/row/row0umod.c
+++ b/innobase/row/row0umod.c
@@ -94,12 +94,12 @@ row_undo_mod_clust_low(
mtr_t* mtr, /* in: mtr */
ulint mode) /* in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */
{
+ big_rec_t* dummy_big_rec;
dict_index_t* index;
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
ulint err;
ibool success;
- ibool do_remove;
index = dict_table_get_first_index(node->table);
@@ -110,49 +110,80 @@ row_undo_mod_clust_low(
ut_ad(success);
+ if (mode == BTR_MODIFY_LEAF) {
+
+ err = btr_cur_optimistic_update(BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG
+ | BTR_KEEP_SYS_FLAG,
+ btr_cur, node->update,
+ node->cmpl_info, thr, mtr);
+ } else {
+ ut_ad(mode == BTR_MODIFY_TREE);
+
+ err = btr_cur_pessimistic_update(BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG
+ | BTR_KEEP_SYS_FLAG,
+ btr_cur, &dummy_big_rec, node->update,
+ node->cmpl_info, thr, mtr);
+ }
+
+ return(err);
+}
+
+/***************************************************************
+Removes a clustered index record after undo if possible. */
+static
+ulint
+row_undo_mod_remove_clust_low(
+/*==========================*/
+ /* out: DB_SUCCESS, DB_FAIL, or error code:
+ we may run out of file space */
+ undo_node_t* node, /* in: row undo node */
+ que_thr_t* thr, /* in: query thread */
+ mtr_t* mtr, /* in: mtr */
+ ulint mode) /* in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */
+{
+ btr_pcur_t* pcur;
+ btr_cur_t* btr_cur;
+ ulint err;
+ ibool success;
+
+ pcur = &(node->pcur);
+ btr_cur = btr_pcur_get_btr_cur(pcur);
+
+ success = btr_pcur_restore_position(mode, pcur, mtr);
+
+ if (!success) {
+
+ return(DB_SUCCESS);
+ }
+
/* Find out if we can remove the whole clustered index record */
if (node->rec_type == TRX_UNDO_UPD_DEL_REC
&& !row_vers_must_preserve_del_marked(node->new_trx_id, mtr)) {
- do_remove = TRUE;
+ /* Ok, we can remove */
} else {
- do_remove = FALSE;
+ return(DB_SUCCESS);
}
if (mode == BTR_MODIFY_LEAF) {
+ success = btr_cur_optimistic_delete(btr_cur, mtr);
- if (do_remove) {
- success = btr_cur_optimistic_delete(btr_cur, mtr);
-
- if (success) {
- err = DB_SUCCESS;
- } else {
- err = DB_FAIL;
- }
+ if (success) {
+ err = DB_SUCCESS;
} else {
- err = btr_cur_optimistic_update(BTR_NO_LOCKING_FLAG
- | BTR_NO_UNDO_LOG_FLAG
- | BTR_KEEP_SYS_FLAG,
- btr_cur, node->update,
- node->cmpl_info, thr, mtr);
+ err = DB_FAIL;
}
} else {
ut_ad(mode == BTR_MODIFY_TREE);
- if (do_remove) {
- btr_cur_pessimistic_delete(&err, FALSE, btr_cur, mtr);
+ btr_cur_pessimistic_delete(&err, FALSE, btr_cur, mtr);
- /* The delete operation may fail if we have little
- file space left: TODO: easiest to crash the database
- and restart with more file space */
- } else {
- err = btr_cur_pessimistic_update(BTR_NO_LOCKING_FLAG
- | BTR_NO_UNDO_LOG_FLAG
- | BTR_KEEP_SYS_FLAG,
- btr_cur, node->update,
- node->cmpl_info, thr, mtr);
- }
+ /* The delete operation may fail if we have little
+ file space left: TODO: easiest to crash the database
+ and restart with more file space */
}
return(err);
@@ -204,10 +235,31 @@ row_undo_mod_clust(
err = row_undo_mod_clust_low(node, thr, &mtr, BTR_MODIFY_TREE);
}
- node->state = UNDO_NODE_FETCH_NEXT;
-
btr_pcur_commit_specify_mtr(pcur, &mtr);
+ if (err == DB_SUCCESS && node->rec_type == TRX_UNDO_UPD_DEL_REC) {
+
+ mtr_start(&mtr);
+
+ err = row_undo_mod_remove_clust_low(node, thr, &mtr,
+ BTR_MODIFY_LEAF);
+ if (err != DB_SUCCESS) {
+ btr_pcur_commit_specify_mtr(pcur, &mtr);
+
+ /* We may have to modify tree structure: do a
+ pessimistic descent down the index tree */
+
+ mtr_start(&mtr);
+
+ err = row_undo_mod_remove_clust_low(node, thr, &mtr,
+ BTR_MODIFY_TREE);
+ }
+
+ btr_pcur_commit_specify_mtr(pcur, &mtr);
+ }
+
+ node->state = UNDO_NODE_FETCH_NEXT;
+
trx_undo_rec_release(node->trx, node->undo_no);
if (more_vers && err == DB_SUCCESS) {
@@ -388,7 +440,6 @@ row_undo_mod_del_unmark_sec(
mem_free(err_buf);
} else {
-
btr_cur = btr_pcur_get_btr_cur(&pcur);
err = btr_cur_del_mark_set_sec_rec(BTR_NO_LOCKING_FLAG,
@@ -546,11 +597,12 @@ row_undo_mod_parse_undo_rec(
ulint info_bits;
ulint type;
ulint cmpl_info;
+ ibool dummy_extern;
ut_ad(node && thr);
ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &cmpl_info,
- &undo_no, &table_id);
+ &dummy_extern, &undo_no, &table_id);
node->rec_type = type;
node->table = dict_table_get_on_id(table_id, thr_get_trx(thr));
@@ -598,10 +650,9 @@ row_undo_mod(
row_undo_mod_parse_undo_rec(node, thr);
if (node->table == NULL) {
- found = FALSE;
+ found = FALSE;
} else {
-
- found = row_undo_search_clust_to_pcur(node, thr);
+ found = row_undo_search_clust_to_pcur(node, thr);
}
if (!found) {
diff --git a/innobase/row/row0undo.c b/innobase/row/row0undo.c
index 10ac3af6de9..5119254f405 100644
--- a/innobase/row/row0undo.c
+++ b/innobase/row/row0undo.c
@@ -124,6 +124,8 @@ row_undo_node_create(
undo->state = UNDO_NODE_FETCH_NEXT;
undo->trx = trx;
+ btr_pcur_init(&(undo->pcur));
+
undo->heap = mem_heap_create(256);
return(undo);
@@ -303,6 +305,16 @@ row_undo_step(
if (err != DB_SUCCESS) {
/* SQL error detected */
+ fprintf(stderr, "InnoDB: Fatal error %lu in rollback.\n", err);
+
+ if (err == DB_OUT_OF_FILE_SPACE) {
+ fprintf(stderr,
+ "InnoDB: Error 13 means out of tablespace.\n"
+ "InnoDB: Consider increasing your tablespace.\n");
+
+ exit(1);
+ }
+
ut_a(0);
return(NULL);
diff --git a/innobase/row/row0upd.c b/innobase/row/row0upd.c
index 5bca2a24c01..d339474df61 100644
--- a/innobase/row/row0upd.c
+++ b/innobase/row/row0upd.c
@@ -90,8 +90,10 @@ upd_node_create(
node->in_mysql_interface = FALSE;
node->row = NULL;
+ node->ext_vec = NULL;
node->index = NULL;
-
+ node->update = NULL;
+
node->select = NULL;
node->heap = mem_heap_create(128);
@@ -160,7 +162,8 @@ row_upd_index_entry_sys_field(
}
/***************************************************************
-Returns TRUE if row update changes size of some field in index. */
+Returns TRUE if row update changes size of some field in index
+or if some field to be updated is stored externally in rec or update. */
ibool
row_upd_changes_field_size(
@@ -199,6 +202,16 @@ row_upd_changes_field_size(
return(TRUE);
}
+
+ if (rec_get_nth_field_extern_bit(rec, upd_field->field_no)) {
+
+ return(TRUE);
+ }
+
+ if (upd_field->extern_storage) {
+
+ return(TRUE);
+ }
}
return(FALSE);
@@ -441,6 +454,34 @@ row_upd_index_parse(
return(ptr);
}
+
+/*******************************************************************
+Returns TRUE if ext_vec contains i. */
+UNIV_INLINE
+ibool
+upd_ext_vec_contains(
+/*=================*/
+ /* out: TRUE if i is in ext_vec */
+ ulint* ext_vec, /* in: array of indexes or NULL */
+ ulint n_ext_vec, /* in: number of numbers in ext_vec */
+ ulint i) /* in: a number */
+{
+ ulint j;
+
+ if (ext_vec == NULL) {
+
+ return(FALSE);
+ }
+
+ for (j = 0; j < n_ext_vec; j++) {
+ if (ext_vec[j] == i) {
+
+ return(TRUE);
+ }
+ }
+
+ return(FALSE);
+}
/*******************************************************************
Builds an update vector from those fields, excluding the roll ptr and
@@ -454,6 +495,9 @@ row_upd_build_difference(
fields, excluding roll ptr and trx id */
dict_index_t* index, /* in: clustered index */
dtuple_t* entry, /* in: entry to insert */
+ ulint* ext_vec,/* in: array containing field numbers of
+ externally stored fields in entry, or NULL */
+ ulint n_ext_vec,/* in: number of fields in ext_vec */
rec_t* rec, /* in: clustered index record */
mem_heap_t* heap) /* in: memory heap from which allocated */
{
@@ -480,16 +524,25 @@ row_upd_build_difference(
for (i = 0; i < dtuple_get_n_fields(entry); i++) {
data = rec_get_nth_field(rec, i, &len);
+
dfield = dtuple_get_nth_field(entry, i);
- if ((i != trx_id_pos) && (i != roll_ptr_pos)
- && !dfield_data_is_equal(dfield, len, data)) {
+ if ((rec_get_nth_field_extern_bit(rec, i)
+ != upd_ext_vec_contains(ext_vec, n_ext_vec, i))
+ || ((i != trx_id_pos) && (i != roll_ptr_pos)
+ && !dfield_data_is_equal(dfield, len, data))) {
upd_field = upd_get_nth_field(update, n_diff);
dfield_copy(&(upd_field->new_val), dfield);
upd_field_set_field_no(upd_field, i, index);
+
+ if (upd_ext_vec_contains(ext_vec, n_ext_vec, i)) {
+ upd_field->extern_storage = TRUE;
+ } else {
+ upd_field->extern_storage = FALSE;
+ }
n_diff++;
}
@@ -630,9 +683,7 @@ row_upd_changes_ord_field(
}
/***************************************************************
-Checks if an update vector changes an ordering field of an index record.
-This function is fast if the update vector is short or the number of ordering
-fields in the index is small. Otherwise, this can be quadratic. */
+Checks if an update vector changes an ordering field of an index record. */
ibool
row_upd_changes_some_index_ord_field(
@@ -642,19 +693,24 @@ row_upd_changes_some_index_ord_field(
dict_table_t* table, /* in: table */
upd_t* update) /* in: update vector for the row */
{
+ upd_field_t* upd_field;
dict_index_t* index;
-
+ ulint i;
+
index = dict_table_get_first_index(table);
- while (index) {
- if (row_upd_changes_ord_field(NULL, index, update)) {
+ for (i = 0; i < upd_get_n_fields(update); i++) {
- return(TRUE);
- }
+ upd_field = upd_get_nth_field(update, i);
- index = dict_table_get_next_index(index);
- }
+ if (dict_field_get_col(dict_index_get_nth_field(index,
+ upd_field->field_no))
+ ->ord_part) {
+ return(TRUE);
+ }
+ }
+
return(FALSE);
}
@@ -710,15 +766,17 @@ row_upd_eval_new_vals(
/***************************************************************
Stores to the heap the row on which the node->pcur is positioned. */
-UNIV_INLINE
+static
void
row_upd_store_row(
/*==============*/
upd_node_t* node) /* in: row update node */
{
dict_index_t* clust_index;
+ upd_t* update;
+ rec_t* rec;
- ut_ad((node->pcur)->latch_mode != BTR_NO_LATCHES);
+ ut_ad(node->pcur->latch_mode != BTR_NO_LATCHES);
if (node->row != NULL) {
mem_heap_empty(node->heap);
@@ -727,8 +785,20 @@ row_upd_store_row(
clust_index = dict_table_get_first_index(node->table);
- node->row = row_build(ROW_COPY_DATA, clust_index,
- btr_pcur_get_rec(node->pcur), node->heap);
+ rec = btr_pcur_get_rec(node->pcur);
+
+ node->row = row_build(ROW_COPY_DATA, clust_index, rec, node->heap);
+
+ node->ext_vec = mem_heap_alloc(node->heap, rec_get_n_fields(rec));
+
+ if (node->is_delete) {
+ update = NULL;
+ } else {
+ update = node->update;
+ }
+
+ node->n_ext_vec = btr_push_update_extern_fields(node->ext_vec,
+ rec, update);
}
/***************************************************************
@@ -812,7 +882,7 @@ row_upd_sec_index_entry(
row_upd_index_replace_new_col_vals(entry, index, node->update);
/* Insert new index entry */
- err = row_ins_index_entry(index, entry, thr);
+ err = row_ins_index_entry(index, entry, NULL, 0, thr);
mem_heap_free(heap);
@@ -870,6 +940,8 @@ row_upd_clust_rec_by_insert(
dict_table_t* table;
mem_heap_t* heap;
dtuple_t* entry;
+ ulint* ext_vec;
+ ulint n_ext_vec;
ulint err;
ut_ad(node);
@@ -897,14 +969,18 @@ row_upd_clust_rec_by_insert(
heap = mem_heap_create(1024);
+ ext_vec = mem_heap_alloc(heap,
+ sizeof(ulint) * dtuple_get_n_fields(node->row));
+ n_ext_vec = 0;
+
entry = row_build_index_entry(node->row, index, heap);
row_upd_clust_index_replace_new_col_vals(entry, node->update);
-
+
row_upd_index_entry_sys_field(entry, index, DATA_TRX_ID, trx->id);
- err = row_ins_index_entry(index, entry, thr);
-
+ err = row_ins_index_entry(index, entry, node->ext_vec,
+ node->n_ext_vec, thr);
mem_heap_free(heap);
return(err);
@@ -924,6 +1000,7 @@ row_upd_clust_rec(
que_thr_t* thr, /* in: query thread */
mtr_t* mtr) /* in: mtr; gets committed here */
{
+ big_rec_t* big_rec = NULL;
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
ulint err;
@@ -973,9 +1050,24 @@ row_upd_clust_rec(
ut_ad(FALSE == rec_get_deleted_flag(btr_pcur_get_rec(pcur)));
err = btr_cur_pessimistic_update(BTR_NO_LOCKING_FLAG, btr_cur,
- node->update, node->cmpl_info, thr, mtr);
+ &big_rec, node->update,
+ node->cmpl_info, thr, mtr);
mtr_commit(mtr);
+ if (err == DB_SUCCESS && big_rec) {
+ mtr_start(mtr);
+ ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr));
+
+ err = btr_store_big_rec_extern_fields(index,
+ btr_cur_get_rec(btr_cur),
+ big_rec, mtr);
+ mtr_commit(mtr);
+ }
+
+ if (big_rec) {
+ dtuple_big_rec_free(big_rec);
+ }
+
return(err);
}
@@ -1194,10 +1286,12 @@ row_upd(
ut_ad(node && thr);
if (node->in_mysql_interface) {
+
/* We do not get the cmpl_info value from the MySQL
interpreter: we must calculate it on the fly: */
- if (row_upd_changes_some_index_ord_field(node->table,
+ if (node->is_delete ||
+ row_upd_changes_some_index_ord_field(node->table,
node->update)) {
node->cmpl_info = 0;
} else {
@@ -1239,6 +1333,7 @@ function_exit:
if (node->row != NULL) {
mem_heap_empty(node->heap);
node->row = NULL;
+ node->n_ext_vec = 0;
}
node->state = UPD_NODE_UPDATE_CLUSTERED;
diff --git a/innobase/srv/srv0srv.c b/innobase/srv/srv0srv.c
index 028fae010d5..8dd9c9f3feb 100644
--- a/innobase/srv/srv0srv.c
+++ b/innobase/srv/srv0srv.c
@@ -93,6 +93,8 @@ ulint srv_lock_wait_timeout = 1024 * 1024 * 1024;
char* srv_unix_file_flush_method_str = NULL;
ulint srv_unix_file_flush_method = 0;
+ibool srv_use_doublewrite_buf = TRUE;
+
ibool srv_set_thread_priorities = TRUE;
int srv_query_thread_priority = 0;
/*-------------------------------------------*/
@@ -109,6 +111,8 @@ ibool srv_print_buf_io = FALSE;
ibool srv_print_log_io = FALSE;
ibool srv_print_latch_waits = FALSE;
+ibool srv_print_innodb_monitor = FALSE;
+
/* The parameters below are obsolete: */
ibool srv_print_parsed_sql = FALSE;
@@ -1492,7 +1496,6 @@ srv_init(void)
slot = srv_mysql_table + i;
slot->in_use = FALSE;
slot->event = os_event_create(NULL);
- slot->suspended = FALSE;
ut_a(slot->event);
}
@@ -1661,7 +1664,6 @@ srv_suspend_mysql_thread(
slot->thr = thr;
os_event_reset(event);
- slot->suspended = TRUE;
slot->suspend_time = ut_time();
@@ -1693,27 +1695,6 @@ srv_suspend_mysql_thread(
return(FALSE);
}
-os_event_t
-srv_mysql_thread_event_get(void)
-{
- srv_slot_t* slot;
- os_event_t event;
-
- mutex_enter(&kernel_mutex);
-
- slot = srv_table_reserve_slot_for_mysql();
-
- event = slot->event;
-
- os_event_reset(event);
-
- slot->suspended = TRUE;
-
- mutex_exit(&kernel_mutex);
-
- return(event);
-}
-
/************************************************************************
Releases a MySQL OS thread waiting for a lock to be released, if the
thread is already suspended. */
@@ -1737,7 +1718,6 @@ srv_release_mysql_thread_if_suspended(
/* Found */
os_event_set(slot->event);
- slot->suspended = FALSE;
return;
}
@@ -1746,59 +1726,6 @@ srv_release_mysql_thread_if_suspended(
/* not found */
}
-void
-srv_mysql_thread_release(void)
-/*==========================*/
-{
- srv_slot_t* slot;
- ulint i;
-
- mutex_enter(&kernel_mutex);
-
- for (i = 0; i < OS_THREAD_MAX_N; i++) {
-
- slot = srv_mysql_table + i;
-
- if (slot->in_use && slot->suspended) {
- /* Found */
- slot->suspended = FALSE;
- mutex_exit(&kernel_mutex);
-
- os_event_set(slot->event);
-
- return;
- }
- }
-
- ut_a(0);
-}
-
-void
-srv_mysql_thread_slot_free(
-/*==========================*/
- os_event_t event)
-{
- srv_slot_t* slot;
- ulint i;
-
- mutex_enter(&kernel_mutex);
-
- for (i = 0; i < OS_THREAD_MAX_N; i++) {
-
- slot = srv_mysql_table + i;
-
- if (slot->in_use && slot->event == event) {
- /* Found */
- slot->in_use = FALSE;
- mutex_exit(&kernel_mutex);
-
- return;
- }
- }
-
- ut_a(0);
-}
-
/*************************************************************************
A thread which wakes up threads whose lock wait may have lasted too long. */
@@ -1924,6 +1851,7 @@ srv_master_thread(
ulint i;
time_t last_flush_time;
time_t current_time;
+ time_t last_monitor_time;
UT_NOT_USED(arg);
@@ -1936,6 +1864,8 @@ srv_master_thread(
mutex_exit(&kernel_mutex);
os_event_set(srv_sys->operational);
+
+ last_monitor_time = time(NULL);
loop:
mutex_enter(&kernel_mutex);
@@ -1975,8 +1905,18 @@ loop:
while (n_pages_purged) {
/* TODO: replace this by a check if we are running
out of file space! */
+ if (srv_print_innodb_monitor) {
+ ut_print_timestamp(stdout);
+ printf(" InnoDB starts purge\n");
+ }
+
n_pages_purged = trx_purge();
+ if (srv_print_innodb_monitor) {
+ ut_print_timestamp(stdout);
+ printf(" InnoDB purged %lu pages\n", n_pages_purged);
+ }
+
current_time = time(NULL);
if (difftime(current_time, last_flush_time) > 1) {
@@ -1986,14 +1926,40 @@ loop:
}
background_loop:
- /*
- sync_array_print_info(sync_primary_wait_array);
- os_aio_print();
- buf_print_io();
- */
/* In this loop we run background operations while the server
is quiet */
+ current_time = time(NULL);
+
+ if (srv_print_innodb_monitor
+ && difftime(current_time, last_monitor_time) > 8) {
+
+ printf("================================\n");
+ last_monitor_time = time(NULL);
+ ut_print_timestamp(stdout);
+
+ printf(" INNODB MONITOR OUTPUT\n"
+ "================================\n");
+ printf("--------------------------\n"
+ "LOCKS HELD BY TRANSACTIONS\n"
+ "--------------------------\n");
+ lock_print_info();
+ printf("-----------------------------------------------\n"
+ "CURRENT SEMAPHORES RESERVED AND SEMAPHORE WAITS\n"
+ "-----------------------------------------------\n");
+ sync_print();
+ printf("CURRENT PENDING FILE I/O'S\n"
+ "--------------------------\n");
+ os_aio_print();
+ printf("-----------\n"
+ "BUFFER POOL\n"
+ "-----------\n");
+ buf_print_io();
+ printf("----------------------------\n"
+ "END OF INNODB MONITOR OUTPUT\n"
+ "============================\n");
+ }
+
mutex_enter(&kernel_mutex);
if (srv_activity_count != old_activity_count) {
mutex_exit(&kernel_mutex);
@@ -2005,8 +1971,18 @@ background_loop:
/* The server has been quiet for a while: start running background
operations */
+ if (srv_print_innodb_monitor) {
+ ut_print_timestamp(stdout);
+ printf(" InnoDB starts purge\n");
+ }
+
n_pages_purged = trx_purge();
+ if (srv_print_innodb_monitor) {
+ ut_print_timestamp(stdout);
+ printf(" InnoDB purged %lu pages\n", n_pages_purged);
+ }
+
mutex_enter(&kernel_mutex);
if (srv_activity_count != old_activity_count) {
mutex_exit(&kernel_mutex);
@@ -2014,8 +1990,18 @@ background_loop:
}
mutex_exit(&kernel_mutex);
+ if (srv_print_innodb_monitor) {
+ ut_print_timestamp(stdout);
+ printf(" InnoDB starts insert buffer merge\n");
+ }
+
n_bytes_merged = ibuf_contract(TRUE);
+ if (srv_print_innodb_monitor) {
+ ut_print_timestamp(stdout);
+ printf(" InnoDB merged %lu bytes\n", n_bytes_merged);
+ }
+
mutex_enter(&kernel_mutex);
if (srv_activity_count != old_activity_count) {
mutex_exit(&kernel_mutex);
@@ -2023,7 +2009,7 @@ background_loop:
}
mutex_exit(&kernel_mutex);
- n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 20, ut_dulint_max);
+ n_pages_flushed = buf_flush_batch(BUF_FLUSH_LIST, 100, ut_dulint_max);
mutex_enter(&kernel_mutex);
if (srv_activity_count != old_activity_count) {
@@ -2052,14 +2038,12 @@ background_loop:
/* mem_print_new_info();
*/
-
-/* fsp_print(0); */
-
-/* fprintf(stderr, "Validating tablespace\n");
+/*
+ fsp_print(0);
+ fprintf(stderr, "Validating tablespace\n");
fsp_validate(0);
fprintf(stderr, "Validation ok\n");
*/
-
#ifdef UNIV_SEARCH_PERF_STAT
/* btr_search_print_info(); */
#endif
diff --git a/innobase/srv/srv0start.c b/innobase/srv/srv0start.c
index a343f2115e7..a79a808ba2e 100644
--- a/innobase/srv/srv0start.c
+++ b/innobase/srv/srv0start.c
@@ -1,7 +1,7 @@
/************************************************************************
Starts the InnoDB database server
-(c) 1996-2000 InnoDB Oy
+(c) 1996-2000 Innobase Oy
Created 2/16/1996 Heikki Tuuri
*************************************************************************/
@@ -203,8 +203,8 @@ open_or_create_log_file(
sprintf(name, "%s%s%lu", srv_log_group_home_dirs[k], "ib_logfile", i);
- files[i] = os_file_create(name, OS_FILE_CREATE, OS_FILE_NORMAL, &ret);
-
+ files[i] = os_file_create(name, OS_FILE_CREATE, OS_FILE_NORMAL,
+ OS_LOG_FILE, &ret);
if (ret == FALSE) {
if (os_file_get_last_error() != OS_FILE_ALREADY_EXISTS) {
fprintf(stderr,
@@ -214,7 +214,8 @@ open_or_create_log_file(
}
files[i] = os_file_create(
- name, OS_FILE_OPEN, OS_FILE_AIO, &ret);
+ name, OS_FILE_OPEN, OS_FILE_AIO,
+ OS_LOG_FILE, &ret);
if (!ret) {
fprintf(stderr,
"InnoDB: Error in opening %s\n", name);
@@ -239,7 +240,7 @@ open_or_create_log_file(
fprintf(stderr,
"InnoDB: Log file %s did not exist: new to be created\n",
name);
- printf("InnoDB: Setting log file %s size to %lu\n",
+ fprintf(stderr, "InnoDB: Setting log file %s size to %lu\n",
name, UNIV_PAGE_SIZE * srv_log_file_size);
ret = os_file_set_size(name, files[i],
@@ -330,27 +331,28 @@ open_or_create_data_files(
sprintf(name, "%s%s", srv_data_home, srv_data_file_names[i]);
- if (srv_data_file_is_raw_partition[i] == 0) {
-
- files[i] = os_file_create(name, OS_FILE_CREATE,
- OS_FILE_NORMAL, &ret);
- } else if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) {
- ret = FALSE;
- } else if (srv_data_file_is_raw_partition[i] == SRV_NEW_RAW) {
+ files[i] = os_file_create(name, OS_FILE_CREATE,
+ OS_FILE_NORMAL, OS_DATA_FILE, &ret);
- files[i] = os_file_create(
- name, OS_FILE_OPEN, OS_FILE_NORMAL, &ret);
+ if (srv_data_file_is_raw_partition[i] == SRV_NEW_RAW) {
+ /* The partition is opened, not created; then it is
+ written over */
- if (!ret) {
+ files[i] = os_file_create(
+ name, OS_FILE_OPEN, OS_FILE_NORMAL,
+ OS_DATA_FILE, &ret);
+ if (!ret) {
fprintf(stderr,
"InnoDB: Error in opening %s\n", name);
return(DB_ERROR);
- }
+ }
+ } else if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) {
+ ret = FALSE;
}
if (ret == FALSE) {
- if (srv_data_file_is_raw_partition[i] == 0
+ if (srv_data_file_is_raw_partition[i] != SRV_OLD_RAW
&& os_file_get_last_error() !=
OS_FILE_ALREADY_EXISTS) {
fprintf(stderr,
@@ -370,8 +372,8 @@ open_or_create_data_files(
}
files[i] = os_file_create(
- name, OS_FILE_OPEN, OS_FILE_NORMAL, &ret);
-
+ name, OS_FILE_OPEN, OS_FILE_NORMAL,
+ OS_DATA_FILE, &ret);
if (!ret) {
fprintf(stderr,
"InnoDB: Error in opening %s\n", name);
@@ -379,18 +381,21 @@ open_or_create_data_files(
return(DB_ERROR);
}
- ret = os_file_get_size(files[i], &size, &size_high);
- ut_a(ret);
+ if (srv_data_file_is_raw_partition[i] != SRV_OLD_RAW) {
+
+ ret = os_file_get_size(files[i], &size,
+ &size_high);
+ ut_a(ret);
- if (srv_data_file_is_raw_partition[i] == 0
- && (size != UNIV_PAGE_SIZE * srv_data_file_sizes[i]
- || size_high != 0)) {
-
- fprintf(stderr,
+ if (size !=
+ UNIV_PAGE_SIZE * srv_data_file_sizes[i]
+ || size_high != 0) {
+ fprintf(stderr,
"InnoDB: Error: data file %s is of different size\n"
"InnoDB: than specified in the .cnf file!\n", name);
- return(DB_ERROR);
+ return(DB_ERROR);
+ }
}
fil_read_flushed_lsn_and_arch_log_no(files[i],
@@ -403,7 +408,8 @@ open_or_create_data_files(
if (i > 0) {
fprintf(stderr,
- "InnoDB: Data file %s did not exist: new to be created\n", name);
+ "InnoDB: Data file %s did not exist: new to be created\n",
+ name);
} else {
fprintf(stderr,
"InnoDB: The first specified data file %s did not exist:\n"
@@ -411,10 +417,10 @@ open_or_create_data_files(
*create_new_db = TRUE;
}
- printf("InnoDB: Setting file %s size to %lu\n",
+ fprintf(stderr, "InnoDB: Setting file %s size to %lu\n",
name, UNIV_PAGE_SIZE * srv_data_file_sizes[i]);
- printf(
+ fprintf(stderr,
"InnoDB: Database physically writes the file full: wait...\n");
ret = os_file_set_size(name, files[i],
@@ -555,19 +561,22 @@ innobase_start_or_create_for_mysql(void)
srv_startup_is_before_trx_rollback_phase = TRUE;
if (0 == ut_strcmp(srv_unix_file_flush_method_str, "fdatasync")) {
- srv_unix_file_flush_method = SRV_UNIX_FDATASYNC;
+ srv_unix_file_flush_method = SRV_UNIX_FDATASYNC;
+
} else if (0 == ut_strcmp(srv_unix_file_flush_method_str, "O_DSYNC")) {
- srv_unix_file_flush_method = SRV_UNIX_O_DSYNC;
+ srv_unix_file_flush_method = SRV_UNIX_O_DSYNC;
+
} else if (0 == ut_strcmp(srv_unix_file_flush_method_str,
"littlesync")) {
- srv_unix_file_flush_method = SRV_UNIX_LITTLESYNC;
+ srv_unix_file_flush_method = SRV_UNIX_LITTLESYNC;
+
} else if (0 == ut_strcmp(srv_unix_file_flush_method_str, "nosync")) {
- srv_unix_file_flush_method = SRV_UNIX_NOSYNC;
+ srv_unix_file_flush_method = SRV_UNIX_NOSYNC;
} else {
- fprintf(stderr,
- "InnoDB: Unrecognized value for innodb_unix_file_flush_method\n");
-
- return(DB_ERROR);
+ fprintf(stderr,
+ "InnoDB: Unrecognized value %s for innodb_flush_method\n",
+ srv_unix_file_flush_method_str);
+ return(DB_ERROR);
}
/*
@@ -593,14 +602,15 @@ innobase_start_or_create_for_mysql(void)
#ifdef __WIN__
if (os_get_os_version() == OS_WIN95
|| os_get_os_version() == OS_WIN31) {
- /* On Win 95, 98, ME, and Win32 subsystem for Windows 3.1 use
- simulated aio */
- os_aio_use_native_aio = FALSE;
- srv_n_file_io_threads = 4;
+ /* On Win 95, 98, ME, and Win32 subsystem for Windows 3.1 use
+ simulated aio */
+
+ os_aio_use_native_aio = FALSE;
+ srv_n_file_io_threads = 4;
} else {
- /* On NT and Win 2000 always use aio */
- os_aio_use_native_aio = TRUE;
+ /* On NT and Win 2000 always use aio */
+ os_aio_use_native_aio = TRUE;
}
#endif
if (!os_aio_use_native_aio) {
@@ -652,14 +662,21 @@ innobase_start_or_create_for_mysql(void)
sum_of_new_sizes = 0;
for (i = 0; i < srv_n_data_files; i++) {
- sum_of_new_sizes += srv_data_file_sizes[i];
+ if (srv_data_file_sizes[i] >= 262144) {
+ fprintf(stderr,
+ "InnoDB: Error: file size must be < 4 GB, or on some OS's < 2 GB\n");
+
+ return(DB_ERROR);
+ }
+
+ sum_of_new_sizes += srv_data_file_sizes[i];
}
if (sum_of_new_sizes < 640) {
- fprintf(stderr,
+ fprintf(stderr,
"InnoDB: Error: tablespace size must be at least 10 MB\n");
- return(DB_ERROR);
+ return(DB_ERROR);
}
err = open_or_create_data_files(&create_new_db,
@@ -673,6 +690,15 @@ innobase_start_or_create_for_mysql(void)
return((int) err);
}
+ if (!create_new_db) {
+ /* If we are using the doublewrite method, we will
+ check if there are half-written pages in data files,
+ and restore them from the doublewrite buffer if
+ possible */
+
+ trx_sys_doublewrite_restore_corrupt_pages();
+ }
+
srv_normalize_path_for_win(srv_arch_dir);
srv_arch_dir = srv_add_path_separator_if_needed(srv_arch_dir);
@@ -742,7 +768,6 @@ innobase_start_or_create_for_mysql(void)
mutex_exit(&(log_sys->mutex));
}
- /* mutex_create(&row_mysql_thread_mutex); */
sess_sys_init_at_db_start();
if (create_new_db) {
@@ -834,7 +859,7 @@ innobase_start_or_create_for_mysql(void)
}
if (srv_measure_contention) {
- /* os_thread_create(&test_measure_cont, NULL, thread_ids +
+ /* os_thread_create(&test_measure_cont, NULL, thread_ids +
SRV_MAX_N_IO_THREADS); */
}
@@ -849,16 +874,20 @@ innobase_start_or_create_for_mysql(void)
/* Create the thread which watches the timeouts for lock waits */
os_thread_create(&srv_lock_timeout_monitor_thread, NULL,
thread_ids + 2 + SRV_MAX_N_IO_THREADS);
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Started\n");
-
srv_was_started = TRUE;
srv_is_being_started = FALSE;
sync_order_checks_on = TRUE;
+ if (srv_use_doublewrite_buf && trx_doublewrite == NULL) {
+ trx_sys_create_doublewrite_buf();
+ }
+
/* buf_debug_prints = TRUE; */
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Started\n");
+
return((int) DB_SUCCESS);
}
diff --git a/innobase/sync/sync0rw.c b/innobase/sync/sync0rw.c
index a77cc76ed37..dc49ce2197e 100644
--- a/innobase/sync/sync0rw.c
+++ b/innobase/sync/sync0rw.c
@@ -810,11 +810,10 @@ rw_lock_print(
ulint count = 0;
rw_lock_debug_t* info;
- printf("----------------------------------------------\n");
+ printf("-------------------------------------------------\n");
printf("RW-LOCK INFO\n");
printf("RW-LOCK: %lx ", (ulint)lock);
- mutex_enter(&(lock->mutex));
if ((rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED)
|| (rw_lock_get_reader_count(lock) != 0)
|| (rw_lock_get_waiters(lock) != 0)) {
@@ -831,8 +830,6 @@ rw_lock_print(
info = UT_LIST_GET_NEXT(list, info);
}
}
-
- mutex_exit(&(lock->mutex));
#endif
}
diff --git a/innobase/sync/sync0sync.c b/innobase/sync/sync0sync.c
index c3a1ac3b47f..a125f65be41 100644
--- a/innobase/sync/sync0sync.c
+++ b/innobase/sync/sync0sync.c
@@ -158,7 +158,7 @@ struct sync_thread_struct{
};
/* Number of slots reserved for each OS thread in the sync level array */
-#define SYNC_THREAD_N_LEVELS 256
+#define SYNC_THREAD_N_LEVELS 10000
struct sync_level_struct{
void* latch; /* pointer to a mutex or an rw-lock; NULL means that
@@ -768,6 +768,9 @@ sync_thread_levels_g(
thread */
ulint limit) /* in: level limit */
{
+ char* file_name;
+ ulint line;
+ ulint thread_id;
sync_level_t* slot;
rw_lock_t* lock;
mutex_t* mutex;
@@ -783,8 +786,29 @@ sync_thread_levels_g(
lock = slot->latch;
mutex = slot->latch;
- ut_error;
-
+ printf(
+ "InnoDB error: sync levels should be > %lu but a level is %lu\n",
+ limit, slot->level);
+
+ if (mutex->magic_n == MUTEX_MAGIC_N) {
+ printf("Mutex created at %s %lu\n", &(mutex->cfile_name),
+ mutex->cline);
+
+ if (mutex_get_lock_word(mutex) != 0) {
+
+ mutex_get_debug_info(mutex,
+ &file_name, &line, &thread_id);
+
+ printf("InnoDB: Locked mutex: addr %lx thread %ld file %s line %ld\n",
+ (ulint)mutex, thread_id,
+ file_name, line);
+ } else {
+ printf("Not locked\n");
+ }
+ } else {
+ rw_lock_print(lock);
+ }
+
return(FALSE);
}
}
@@ -973,6 +997,8 @@ sync_thread_add_level(
ut_a(sync_thread_levels_g(array, SYNC_ANY_LATCH));
} else if (level == SYNC_TRX_SYS_HEADER) {
ut_a(sync_thread_levels_contain(array, SYNC_KERNEL));
+ } else if (level == SYNC_DOUBLEWRITE) {
+ ut_a(sync_thread_levels_g(array, SYNC_DOUBLEWRITE));
} else if (level == SYNC_BUF_BLOCK) {
ut_a((sync_thread_levels_contain(array, SYNC_BUF_POOL)
&& sync_thread_levels_g(array, SYNC_BUF_BLOCK - 1))
@@ -1000,6 +1026,8 @@ sync_thread_add_level(
} else if (level == SYNC_FSP) {
ut_a(sync_thread_levels_contain(array, SYNC_FSP)
|| sync_thread_levels_g(array, SYNC_FSP));
+ } else if (level == SYNC_EXTERN_STORAGE) {
+ ut_a(TRUE);
} else if (level == SYNC_TRX_UNDO_PAGE) {
ut_a(sync_thread_levels_contain(array, SYNC_TRX_UNDO)
|| sync_thread_levels_contain(array, SYNC_RSEG)
@@ -1221,10 +1249,10 @@ void
sync_print(void)
/*============*/
{
- printf("SYNC INFO:------------------------------------------\n");
+ printf("SYNC INFO:\n");
mutex_list_print_info();
rw_lock_list_print_info();
sync_array_print_info(sync_primary_wait_array);
sync_print_wait_info();
- printf("----------------------------------------------------\n");
+ printf("-----------------------------------------------------\n");
}
diff --git a/innobase/trx/trx0purge.c b/innobase/trx/trx0purge.c
index f65943f27e3..032b3ffcf3b 100644
--- a/innobase/trx/trx0purge.c
+++ b/innobase/trx/trx0purge.c
@@ -692,6 +692,9 @@ trx_purge_choose_next_log(void)
min_rseg = rseg;
min_trx_no = rseg->last_trx_no;
space = rseg->space;
+ ut_a(space == 0); /* We assume in purge of
+ externally stored fields
+ that space id == 0 */
page_no = rseg->last_page_no;
offset = rseg->last_offset;
}
@@ -820,6 +823,10 @@ trx_purge_get_next_rec(
}
cmpl_info = trx_undo_rec_get_cmpl_info(rec2);
+
+ if (trx_undo_rec_get_extern_storage(rec2)) {
+ break;
+ }
if ((type == TRX_UNDO_UPD_EXIST_REC)
&& !(cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
diff --git a/innobase/trx/trx0rec.c b/innobase/trx/trx0rec.c
index c31d786011d..64febb8f523 100644
--- a/innobase/trx/trx0rec.c
+++ b/innobase/trx/trx0rec.c
@@ -292,6 +292,8 @@ trx_undo_rec_get_pars(
TRX_UNDO_INSERT_REC, ... */
ulint* cmpl_info, /* out: compiler info, relevant only
for update type records */
+ ibool* updated_extern, /* out: TRUE if we updated an
+ externally stored fild */
dulint* undo_no, /* out: undo log record number */
dulint* table_id) /* out: table id */
{
@@ -303,7 +305,14 @@ trx_undo_rec_get_pars(
type_cmpl = mach_read_from_1(ptr);
ptr++;
-
+
+ if (type_cmpl & TRX_UNDO_UPD_EXTERN) {
+ *updated_extern = TRUE;
+ type_cmpl -= TRX_UNDO_UPD_EXTERN;
+ } else {
+ *updated_extern = FALSE;
+ }
+
*type = type_cmpl & (TRX_UNDO_CMPL_INFO_MULT - 1);
*cmpl_info = type_cmpl / TRX_UNDO_CMPL_INFO_MULT;
@@ -336,7 +345,11 @@ trx_undo_rec_get_col_val(
*field = ptr;
if (*len != UNIV_SQL_NULL) {
- ptr += *len;
+ if (*len >= UNIV_EXTERN_STORAGE_FIELD) {
+ ptr += (*len - UNIV_EXTERN_STORAGE_FIELD);
+ } else {
+ ptr += *len;
+ }
}
return(ptr);
@@ -452,6 +465,7 @@ trx_undo_page_report_modify(
ulint col_no;
byte* old_ptr;
ulint type_cmpl;
+ byte* type_cmpl_ptr;
ulint i;
ut_ad(index->type & DICT_CLUSTERED);
@@ -491,6 +505,8 @@ trx_undo_page_report_modify(
mach_write_to_1(ptr, type_cmpl);
+ type_cmpl_ptr = ptr;
+
ptr++;
len = mach_dulint_write_much_compressed(ptr, trx->undo_no);
ptr += len;
@@ -577,7 +593,23 @@ trx_undo_page_report_modify(
return(0);
}
- len = mach_write_compressed(ptr, flen);
+ if (rec_get_nth_field_extern_bit(rec, pos)) {
+ /* If a field has external storage, we add to
+ flen the flag */
+
+ len = mach_write_compressed(ptr,
+ UNIV_EXTERN_STORAGE_FIELD + flen);
+
+ /* Notify purge that it eventually has to free the old
+ externally stored field */
+
+ (trx->update_undo)->del_marks = TRUE;
+
+ *type_cmpl_ptr = *type_cmpl_ptr | TRX_UNDO_UPD_EXTERN;
+ } else {
+ len = mach_write_compressed(ptr, flen);
+ }
+
ptr += len;
if (flen != UNIV_SQL_NULL) {
@@ -825,6 +857,13 @@ trx_undo_update_rec_get_update(
upd_field_set_field_no(upd_field, field_no, index);
+ if (len != UNIV_SQL_NULL && len >= UNIV_EXTERN_STORAGE_FIELD) {
+
+ upd_field->extern_storage = TRUE;
+
+ len -= UNIV_EXTERN_STORAGE_FIELD;
+ }
+
dfield_set_data(&(upd_field->new_val), field, len);
}
@@ -1222,8 +1261,10 @@ trx_undo_prev_version_build(
byte* ptr;
ulint info_bits;
ulint cmpl_info;
+ ibool dummy_extern;
byte* buf;
ulint err;
+ ulint i;
ut_ad(rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
ut_ad(mtr_memo_contains(index_mtr, buf_block_align(index_rec),
@@ -1252,8 +1293,9 @@ trx_undo_prev_version_build(
return(err);
}
- ptr = trx_undo_rec_get_pars(undo_rec, &type, &cmpl_info, &undo_no,
- &table_id);
+ ptr = trx_undo_rec_get_pars(undo_rec, &type, &cmpl_info,
+ &dummy_extern, &undo_no, &table_id);
+
ptr = trx_undo_update_rec_get_sys_cols(ptr, &trx_id, &roll_ptr,
&info_bits);
ptr = trx_undo_rec_skip_row_ref(ptr, index);
@@ -1278,5 +1320,15 @@ trx_undo_prev_version_build(
row_upd_rec_in_place(*old_vers, update);
}
+ for (i = 0; i < upd_get_n_fields(update); i++) {
+
+ if (upd_get_nth_field(update, i)->extern_storage) {
+
+ rec_set_nth_field_extern_bit(*old_vers,
+ upd_get_nth_field(update, i)->field_no,
+ TRUE, NULL);
+ }
+ }
+
return(DB_SUCCESS);
}
diff --git a/innobase/trx/trx0sys.c b/innobase/trx/trx0sys.c
index 99ec5b50237..b056975d28a 100644
--- a/innobase/trx/trx0sys.c
+++ b/innobase/trx/trx0sys.c
@@ -19,9 +19,326 @@ Created 3/26/1996 Heikki Tuuri
#include "trx0undo.h"
#include "srv0srv.h"
#include "trx0purge.h"
+#include "log0log.h"
/* The transaction system */
-trx_sys_t* trx_sys = NULL;
+trx_sys_t* trx_sys = NULL;
+trx_doublewrite_t* trx_doublewrite = NULL;
+
+/********************************************************************
+Creates or initialializes the doublewrite buffer at a database start. */
+static
+void
+trx_doublewrite_init(
+/*=================*/
+ byte* doublewrite) /* in: pointer to the doublewrite buf
+ header on trx sys page */
+{
+ trx_doublewrite = mem_alloc(sizeof(trx_doublewrite_t));
+
+ mutex_create(&(trx_doublewrite->mutex));
+ mutex_set_level(&(trx_doublewrite->mutex), SYNC_DOUBLEWRITE);
+
+ trx_doublewrite->first_free = 0;
+
+ trx_doublewrite->block1 = mach_read_from_4(
+ doublewrite
+ + TRX_SYS_DOUBLEWRITE_BLOCK1);
+ trx_doublewrite->block2 = mach_read_from_4(
+ doublewrite
+ + TRX_SYS_DOUBLEWRITE_BLOCK2);
+ trx_doublewrite->write_buf_unaligned =
+ ut_malloc(
+ (1 + 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE)
+ * UNIV_PAGE_SIZE);
+
+ trx_doublewrite->write_buf = ut_align(
+ trx_doublewrite->write_buf_unaligned,
+ UNIV_PAGE_SIZE);
+ trx_doublewrite->buf_block_arr = mem_alloc(
+ 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
+ * sizeof(void*));
+}
+
+/********************************************************************
+Creates the doublewrite buffer at a database start. The header of the
+doublewrite buffer is placed on the trx system header page. */
+
+void
+trx_sys_create_doublewrite_buf(void)
+/*================================*/
+{
+ page_t* page;
+ page_t* page2;
+ page_t* new_page;
+ byte* doublewrite;
+ byte* fseg_header;
+ ulint page_no;
+ ulint prev_page_no;
+ ulint i;
+ mtr_t mtr;
+
+ if (trx_doublewrite) {
+ /* Already inited */
+
+ return;
+ }
+
+start_again:
+ mtr_start(&mtr);
+
+ page = buf_page_get(TRX_SYS_SPACE, TRX_SYS_PAGE_NO, RW_X_LATCH, &mtr);
+ buf_page_dbg_add_level(page, SYNC_NO_ORDER_CHECK);
+
+ doublewrite = page + TRX_SYS_DOUBLEWRITE;
+
+ if (mach_read_from_4(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC)
+ == TRX_SYS_DOUBLEWRITE_MAGIC_N) {
+
+ /* The doublewrite buffer has already been created:
+ just read in some numbers */
+
+ trx_doublewrite_init(doublewrite);
+
+ mtr_commit(&mtr);
+ } else {
+ fprintf(stderr,
+ "InnoDB: Doublewrite buffer not found: creating new\n");
+
+ if (buf_pool_get_curr_size() <
+ (2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
+ + FSP_EXTENT_SIZE / 2 + 100)
+ * UNIV_PAGE_SIZE) {
+ fprintf(stderr,
+ "InnoDB: Cannot create doublewrite buffer: you must\n"
+ "InnoDB: increase your buffer pool size.\n"
+ "InnoDB: Cannot continue operation.\n");
+
+ exit(1);
+ }
+
+ page2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO,
+ TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_FSEG, &mtr);
+
+ /* fseg_create acquires a second latch on the page,
+ therefore we must declare it: */
+
+ buf_page_dbg_add_level(page2, SYNC_NO_ORDER_CHECK);
+
+ if (page2 == NULL) {
+ fprintf(stderr,
+ "InnoDB: Cannot create doublewrite buffer: you must\n"
+ "InnoDB: increase your tablespace size.\n"
+ "InnoDB: Cannot continue operation.\n");
+
+ /* We exit without committing the mtr to prevent
+ its modifications to the database getting to disk */
+
+ exit(1);
+ }
+
+ fseg_header = page + TRX_SYS_DOUBLEWRITE
+ + TRX_SYS_DOUBLEWRITE_FSEG;
+ prev_page_no = 0;
+
+ for (i = 0; i < 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
+ + FSP_EXTENT_SIZE / 2; i++) {
+ page_no = fseg_alloc_free_page(fseg_header,
+ prev_page_no + 1,
+ FSP_UP, &mtr);
+ if (page_no == FIL_NULL) {
+ fprintf(stderr,
+ "InnoDB: Cannot create doublewrite buffer: you must\n"
+ "InnoDB: increase your tablespace size.\n"
+ "InnoDB: Cannot continue operation.\n");
+
+ exit(1);
+ }
+
+ /* We read the allocated pages to the buffer pool;
+ when they are written to disk in a flush, the space
+ id and page number fields are also written to the
+ pages. When we at database startup read pages
+ from the doublewrite buffer, we know that if the
+ space id and page number in them are the same as
+ the page position in the tablespace, then the page
+ has not been written to in doublewrite. */
+
+ new_page = buf_page_get(TRX_SYS_SPACE, page_no,
+ RW_X_LATCH, &mtr);
+ buf_page_dbg_add_level(new_page, SYNC_NO_ORDER_CHECK);
+
+ /* Make a dummy change to the page to ensure it will
+ be written to disk in a flush */
+
+ mlog_write_ulint(new_page + FIL_PAGE_DATA,
+ TRX_SYS_DOUBLEWRITE_MAGIC_N,
+ MLOG_4BYTES, &mtr);
+
+ if (i == FSP_EXTENT_SIZE / 2) {
+ mlog_write_ulint(doublewrite
+ + TRX_SYS_DOUBLEWRITE_BLOCK1,
+ page_no, MLOG_4BYTES, &mtr);
+ mlog_write_ulint(doublewrite
+ + TRX_SYS_DOUBLEWRITE_REPEAT
+ + TRX_SYS_DOUBLEWRITE_BLOCK1,
+ page_no, MLOG_4BYTES, &mtr);
+ } else if (i == FSP_EXTENT_SIZE / 2
+ + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
+ mlog_write_ulint(doublewrite
+ + TRX_SYS_DOUBLEWRITE_BLOCK2,
+ page_no, MLOG_4BYTES, &mtr);
+ mlog_write_ulint(doublewrite
+ + TRX_SYS_DOUBLEWRITE_REPEAT
+ + TRX_SYS_DOUBLEWRITE_BLOCK2,
+ page_no, MLOG_4BYTES, &mtr);
+ } else if (i > FSP_EXTENT_SIZE / 2) {
+ ut_a(page_no == prev_page_no + 1);
+ }
+
+ prev_page_no = page_no;
+ }
+
+ mlog_write_ulint(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC,
+ TRX_SYS_DOUBLEWRITE_MAGIC_N, MLOG_4BYTES, &mtr);
+ mlog_write_ulint(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC
+ + TRX_SYS_DOUBLEWRITE_REPEAT,
+ TRX_SYS_DOUBLEWRITE_MAGIC_N, MLOG_4BYTES, &mtr);
+ mtr_commit(&mtr);
+
+ /* Flush the modified pages to disk and make a checkpoint */
+ log_make_checkpoint_at(ut_dulint_max, TRUE);
+
+ fprintf(stderr, "InnoDB: Doublewrite buffer created\n");
+
+ goto start_again;
+ }
+}
+
+/********************************************************************
+At a database startup uses a possible doublewrite buffer to restore
+half-written pages in the data files. */
+
+void
+trx_sys_doublewrite_restore_corrupt_pages(void)
+/*===========================================*/
+{
+ byte* buf;
+ byte* read_buf;
+ byte* unaligned_read_buf;
+ ulint block1;
+ ulint block2;
+ byte* page;
+ byte* doublewrite;
+ ulint space_id;
+ ulint page_no;
+ ulint i;
+
+ /* We do the file i/o past the buffer pool */
+
+ unaligned_read_buf = ut_malloc(2 * UNIV_PAGE_SIZE);
+ read_buf = ut_align(unaligned_read_buf, UNIV_PAGE_SIZE);
+
+ /* Read the trx sys header to check if we are using the
+ doublewrite buffer */
+
+ fil_io(OS_FILE_READ, TRUE, TRX_SYS_SPACE, TRX_SYS_PAGE_NO, 0,
+ UNIV_PAGE_SIZE, read_buf, NULL);
+
+ doublewrite = read_buf + TRX_SYS_DOUBLEWRITE;
+
+ if (mach_read_from_4(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC)
+ == TRX_SYS_DOUBLEWRITE_MAGIC_N) {
+ /* The doublewrite buffer has been created */
+
+ trx_doublewrite_init(doublewrite);
+
+ block1 = trx_doublewrite->block1;
+ block2 = trx_doublewrite->block2;
+
+ buf = trx_doublewrite->write_buf;
+ } else {
+ goto leave_func;
+ }
+
+ /* Read the pages from the doublewrite buffer to memory */
+
+ fil_io(OS_FILE_READ, TRUE, TRX_SYS_SPACE, block1, 0,
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE,
+ buf, NULL);
+ fil_io(OS_FILE_READ, TRUE, TRX_SYS_SPACE, block2, 0,
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE,
+ buf + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE,
+ NULL);
+ /* Check if any of these pages is half-written in data files, in the
+ intended position */
+
+ page = buf;
+
+ for (i = 0; i < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 2; i++) {
+
+ space_id = mach_read_from_4(page + FIL_PAGE_SPACE);
+ page_no = mach_read_from_4(page + FIL_PAGE_OFFSET);
+
+ if (!fil_check_adress_in_tablespace(space_id, page_no)) {
+ fprintf(stderr,
+ "InnoDB: Warning: an inconsistent page in the doublewrite buffer\n"
+ "InnoDB: space id %lu page number %lu, %lu'th page in dblwr buf.\n",
+ space_id, page_no, i);
+
+ } else if (space_id == TRX_SYS_SPACE
+ && ( (page_no >= block1
+ && page_no
+ < block1 + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE)
+ || (page_no >= block2
+ && page_no
+ < block2 + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE))) {
+
+ /* It is an unwritten doublewrite buffer page:
+ do nothing */
+
+ } else {
+ /* Read in the actual page from the data files */
+
+ fil_io(OS_FILE_READ, TRUE, space_id, page_no, 0,
+ UNIV_PAGE_SIZE, read_buf, NULL);
+ /* Check if the page is corrupt */
+
+ if (buf_page_is_corrupted(read_buf)) {
+
+ fprintf(stderr,
+ "InnoDB: Warning: database page corruption or a failed\n"
+ "InnoDB: file read of page %lu.\n", page_no);
+ fprintf(stderr,
+ "InnoDB: Trying to recover it from the doublewrite buffer.\n");
+
+ if (buf_page_is_corrupted(page)) {
+ fprintf(stderr,
+ "InnoDB: Also the page in the doublewrite buffer is corrupt.\n"
+ "InnoDB: Cannot continue operation.\n");
+ exit(1);
+ }
+
+ /* Write the good page from the
+ doublewrite buffer to the intended
+ position */
+
+ fil_io(OS_FILE_WRITE, TRUE, space_id,
+ page_no, 0,
+ UNIV_PAGE_SIZE, page, NULL);
+ fprintf(stderr,
+ "InnoDB: Recovered the page from the doublewrite buffer.\n");
+ }
+ }
+
+ page += UNIV_PAGE_SIZE;
+ }
+
+ fil_flush_file_spaces(FIL_TABLESPACE);
+
+leave_func:
+ ut_free(unaligned_read_buf);
+}
/********************************************************************
Checks that trx is in the trx list. */
diff --git a/myisam/myisamchk.c b/myisam/myisamchk.c
index 61ad939d6ed..ec3f0c8dca4 100644
--- a/myisam/myisamchk.c
+++ b/myisam/myisamchk.c
@@ -201,7 +201,7 @@ static struct option long_options[] =
static void print_version(void)
{
- printf("%s Ver 1.48 for %s at %s\n",my_progname,SYSTEM_TYPE,
+ printf("%s Ver 1.49 for %s at %s\n",my_progname,SYSTEM_TYPE,
MACHINE_TYPE);
}
@@ -468,7 +468,7 @@ static void get_options(register int *argc,register char ***argv)
if ((check_param.testflag & T_READONLY) &&
(check_param.testflag &
(T_REP_BY_SORT | T_REP | T_STATISTICS | T_AUTO_INC |
- T_SORT_RECORDS | T_SORT_INDEX)))
+ T_SORT_RECORDS | T_SORT_INDEX | T_FORCE_CREATE)))
{
VOID(fprintf(stderr,
"%s: Can't use --readonly when repairing or sorting\n",
diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result
index 74c8bd53af2..1a292b1203c 100644
--- a/mysql-test/r/order_by.result
+++ b/mysql-test/r/order_by.result
@@ -111,3 +111,34 @@ DateOfAction TransactionID
member_id nickname voornaam
1
2
+gid sid uid
+104620 5 15
+103867 5 27
+103962 5 27
+104619 5 75
+104505 5 117
+103853 5 250
+gid sid uid
+104620 5 15
+103867 5 27
+103962 5 27
+104619 5 75
+104505 5 117
+103853 5 250
+table type possible_keys key key_len ref rows Extra
+t1 index PRIMARY PRIMARY 4 NULL 6 Using index
+t2 eq_ref PRIMARY,uid PRIMARY 4 t1.gid 1
+t3 eq_ref PRIMARY PRIMARY 2 t2.uid 1 where used; Using index
+table type possible_keys key key_len ref rows Extra
+t1 index PRIMARY PRIMARY 4 NULL 6 Using index
+t3 eq_ref PRIMARY PRIMARY 2 t1.gid 1 where used
+table type possible_keys key key_len ref rows Extra
+t1 index PRIMARY PRIMARY 4 NULL 6 Using index; Using temporary; Using filesort
+t2 eq_ref PRIMARY,uid PRIMARY 4 t1.gid 1
+t3 eq_ref PRIMARY PRIMARY 2 t2.uid 1 where used; Using index
+table type possible_keys key key_len ref rows Extra
+t1 index PRIMARY PRIMARY 4 NULL 6 Using index; Using temporary; Using filesort
+t3 eq_ref PRIMARY PRIMARY 2 t1.gid 1 where used
+table type possible_keys key key_len ref rows Extra
+t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
+t3 eq_ref PRIMARY PRIMARY 2 t1.skr 1 where used
diff --git a/mysql-test/t/fulltext.test b/mysql-test/t/fulltext.test
index 064219c6ad3..153fdefd960 100644
--- a/mysql-test/t/fulltext.test
+++ b/mysql-test/t/fulltext.test
@@ -2,7 +2,7 @@
# Test of fulltext index
#
-drop table if exists t1,t2;
+drop table if exists t1,t2,t3;
CREATE TABLE t1 (a VARCHAR(200), b TEXT, FULLTEXT (a,b));
INSERT INTO t1 VALUES('MySQL has now support', 'for full-text search'),('Full-text indexes', 'are called collections'),('Only MyISAM tables','support collections'),('Function MATCH ... AGAINST()','is used to do a search'),('Full-text search in MySQL', 'implements vector space model');
@@ -61,4 +61,23 @@ select * from t2 where MATCH inhalt AGAINST (NULL);
select * from t2 where MATCH inhalt AGAINST ('foobar');
select * from t2 having MATCH inhalt AGAINST ('foobar');
-drop table t1,t2;
+#
+# check of fulltext errors
+#
+
+CREATE TABLE t3 (
+ ticket int(11),
+ inhalt text,
+ KEY tig (ticket),
+ fulltext index tix (inhalt)
+);
+
+--error 1210
+select * from t2 having MATCH inhalt AGAINST (t1.id);
+--error 1210
+select * from t2 having MATCH ticket AGAINST ('foobar');
+--error 1210
+select * from t2,t3 having MATCH (t2.inhalt,t3.inhalt) AGAINST ('foobar');
+
+drop table t1,t2,t3;
+
diff --git a/mysql-test/t/order_by.test b/mysql-test/t/order_by.test
index 4e5cee0d0ff..16094206745 100644
--- a/mysql-test/t/order_by.test
+++ b/mysql-test/t/order_by.test
@@ -168,8 +168,8 @@ drop table t1,t2,t3;
#bug reported by Wouter de Jong
-drop table if exists members;
-CREATE TABLE members (
+drop table if exists t1;
+CREATE TABLE t1 (
member_id int(11) NOT NULL auto_increment,
inschrijf_datum varchar(20) NOT NULL default '',
lastchange_datum varchar(20) NOT NULL default '',
@@ -200,8 +200,50 @@ CREATE TABLE members (
PRIMARY KEY (member_id)
) TYPE=MyISAM PACK_KEYS=1;
-insert into members (member_id) values (1),(2),(3);
-select member_id, nickname, voornaam FROM members
+insert into t1 (member_id) values (1),(2),(3);
+select member_id, nickname, voornaam FROM t1
ORDER by lastchange_datum DESC LIMIT 2;
-drop table members;
+drop table t1;
+
+#
+# Test optimizing bug with EQ_REF tables, where some ORDER BY parts where
+# wrongly removed.
+CREATE TABLE t1 (
+ gid int(10) unsigned NOT NULL auto_increment,
+ cid smallint(5) unsigned NOT NULL default '0',
+ PRIMARY KEY (gid),
+ KEY component_id (cid)
+) TYPE=MyISAM;
+INSERT INTO t1 VALUES (103853,108),(103867,108),(103962,108),(104505,108),(104619,108),(104620,108);
+ALTER TABLE t1 add skr int(10) not null;
+
+CREATE TABLE t2 (
+ gid int(10) unsigned NOT NULL default '0',
+ uid smallint(5) unsigned NOT NULL default '1',
+ sid tinyint(3) unsigned NOT NULL default '1',
+ PRIMARY KEY (gid),
+ KEY uid (uid),
+ KEY status_id (sid)
+) TYPE=MyISAM;
+INSERT INTO t2 VALUES (103853,250,5),(103867,27,5),(103962,27,5),(104505,117,5),(104619,75,5),(104620,15,5);
+
+CREATE TABLE t3 (
+ uid smallint(6) NOT NULL auto_increment,
+ PRIMARY KEY (uid)
+) TYPE=MyISAM;
+INSERT INTO t3 VALUES (1),(15),(27),(75),(117),(250);
+ALTER TABLE t3 add skr int(10) not null;
+
+select t1.gid, t2.sid, t3.uid from t2, t1, t3 where t2.gid = t1.gid and t2.uid = t3.uid order by t3.uid, t1.gid;
+select t1.gid, t2.sid, t3.uid from t3, t2, t1 where t2.gid = t1.gid and t2.uid = t3.uid order by t3.uid, t1.gid;
+
+# The following ORDER BY can be optimimized
+EXPLAIN select t1.gid, t2.sid, t3.uid from t3, t2, t1 where t2.gid = t1.gid and t2.uid = t3.uid order by t1.gid, t3.uid;
+EXPLAIN SELECT t1.gid, t3.uid from t1, t3 where t1.gid = t3.uid order by t1.gid,t3.skr;
+
+# The following ORDER BY can't be optimimized
+EXPLAIN SELECT t1.gid, t2.sid, t3.uid from t2, t1, t3 where t2.gid = t1.gid and t2.uid = t3.uid order by t3.uid, t1.gid;
+EXPLAIN SELECT t1.gid, t3.uid from t1, t3 where t1.gid = t3.uid order by t3.skr,t1.gid;
+EXPLAIN SELECT t1.gid, t3.uid from t1, t3 where t1.skr = t3.uid order by t1.gid,t3.skr;
+drop table t1,t2,t3;
diff --git a/mysys/default.c b/mysys/default.c
index cb842da0f02..126c0270a17 100644
--- a/mysys/default.c
+++ b/mysys/default.c
@@ -222,7 +222,7 @@ static my_bool search_default_file(DYNAMIC_ARRAY *args, MEM_ROOT *alloc,
const char *dir, const char *config_file,
const char *ext, TYPELIB *group)
{
- char name[FN_REFLEN+10],buff[FN_REFLEN+1],*ptr,*end,*value,*tmp;
+ char name[FN_REFLEN+10],buff[4096],*ptr,*end,*value,*tmp;
FILE *fp;
uint line=0;
my_bool read_values=0,found_group=0;
diff --git a/scripts/mysqlhotcopy.sh b/scripts/mysqlhotcopy.sh
index 1c26bf8e2d6..71359fa5612 100644
--- a/scripts/mysqlhotcopy.sh
+++ b/scripts/mysqlhotcopy.sh
@@ -223,18 +223,27 @@ foreach my $rdb ( @db_desc ) {
my $db = $rdb->{src};
eval { $dbh->do( "use $db" ); };
die "Database '$db' not accessible: $@" if ( $@ );
- my @dbh_tables = $dbh->func( '_ListTables' );
+ my @dbh_tables = $dbh->tables();
## generate regex for tables/files
- my $t_regex = $rdb->{t_regex}; ## assign temporary regex
- my $negated = $t_regex =~ tr/~//d; ## remove and count negation operator: we don't allow ~ in table names
- $t_regex = qr/$t_regex/; ## make regex string from user regex
-
- ## filter (out) tables specified in t_regex
- print "Filtering tables with '$t_regex'\n" if $opt{debug};
- @dbh_tables = ( $negated
- ? grep { $_ !~ $t_regex } @dbh_tables
- : grep { $_ =~ $t_regex } @dbh_tables );
+ my $t_regex;
+ my $negated;
+ if ($rdb->{t_regex}) {
+ $t_regex = $rdb->{t_regex}; ## assign temporary regex
+ $negated = $t_regex =~ tr/~//d; ## remove and count
+ ## negation operator: we
+ ## don't allow ~ in table
+ ## names
+
+ $t_regex = qr/$t_regex/; ## make regex string from
+ ## user regex
+
+ ## filter (out) tables specified in t_regex
+ print "Filtering tables with '$t_regex'\n" if $opt{debug};
+ @dbh_tables = ( $negated
+ ? grep { $_ !~ $t_regex } @dbh_tables
+ : grep { $_ =~ $t_regex } @dbh_tables );
+ }
## get list of files to copy
my $db_dir = "$datadir/$db";
@@ -249,10 +258,18 @@ foreach my $rdb ( @db_desc ) {
closedir( DBDIR );
## filter (out) files specified in t_regex
- my @db_files = ( $negated
- ? grep { $db_files{$_} !~ $t_regex } keys %db_files
- : grep { $db_files{$_} =~ $t_regex } keys %db_files );
+ my @db_files;
+ if ($rdb->{t_regex}) {
+ @db_files = ($negated
+ ? grep { $db_files{$_} !~ $t_regex } keys %db_files
+ : grep { $db_files{$_} =~ $t_regex } keys %db_files );
+ }
+ else {
+ @db_files = keys %db_files;
+ }
+
@db_files = sort @db_files;
+
my @index_files=();
## remove indices unless we're told to keep them
@@ -809,3 +826,7 @@ Ask Bjoern Hansen - Cleanup code to fix a few bugs and enable -w again.
Emil S. Hansen - Added resetslave and resetmaster.
+Jeremy D. Zawodny - Removed depricated DBI calls. Fixed bug which
+resulted in nothing being copied when a regexp was specified but no
+database name(s).
+
diff --git a/sql/item_func.cc b/sql/item_func.cc
index e540f850063..10298ce67f2 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1954,13 +1954,17 @@ bool Item_func_match::fix_fields(THD *thd,struct st_table_list *tlist)
maybe_null=1;
join_key=0;
- /* Why testing for const_item ? Monty */
- /* I'll remove it later, but this should include modifications to
- find_best and auto_close as complement to auto_init code above. SerG */
- /* I'd rather say now that const_item is assumed in quite a bit of
- places, so it would be difficult to remove. SerG */
+ /* Serg:
+ I'd rather say now that const_item is assumed in quite a bit of
+ places, so it would be difficult to remove; If it would ever to be
+ removed, this should include modifications to find_best and auto_close
+ as complement to auto_init code above.
+ */
if (Item_func::fix_fields(thd,tlist) || !const_item())
+ {
+ my_error(ER_WRONG_ARGUMENTS,MYF(0),"AGAINST");
return 1;
+ }
while ((item=li++))
{
@@ -1969,12 +1973,18 @@ bool Item_func_match::fix_fields(THD *thd,struct st_table_list *tlist)
if (item->type() == Item::REF_ITEM)
li.replace(item= *((Item_ref *)item)->ref);
if (item->type() != Item::FIELD_ITEM || !item->used_tables())
+ {
+ my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH");
return 1;
+ }
used_tables_cache|=item->used_tables();
}
/* check that all columns come from the same table */
if (count_bits(used_tables_cache) != 1)
+ {
+ my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH");
return 1;
+ }
const_item_cache=0;
table=((Item_field *)fields.head())->field->table;
return 0;
diff --git a/sql/log_event.h b/sql/log_event.h
index 41f847e8d92..8d8ac183f61 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -145,6 +145,9 @@ public:
time(&end_time);
exec_time = (ulong) (end_time - thd->start_time);
db_len = (db) ? (uint32) strlen(db) : 0;
+ // do not log stray system errors such as EE_WRITE
+ if (error_code < ERRMOD)
+ error_code = 0;
}
#endif
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 5d4339d3ca6..a9771184b4b 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1481,7 +1481,7 @@ static void open_log(MYSQL_LOG *log, const char *hostname,
// get rid of extention if the log is binary to avoid problems
if (type == LOG_BIN)
{
- char* p = strrchr(opt_name, FN_EXTCHAR);
+ char* p = strrchr((char*) opt_name, FN_EXTCHAR);
if (p)
*p = 0;
}
diff --git a/sql/share/czech/errmsg.txt b/sql/share/czech/errmsg.txt
index 6d35e913ffd..a0540bfe270 100644
--- a/sql/share/czech/errmsg.txt
+++ b/sql/share/czech/errmsg.txt
@@ -220,3 +220,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/danish/errmsg.txt b/sql/share/danish/errmsg.txt
index d1e0ea71175..73fa8b79f0f 100644
--- a/sql/share/danish/errmsg.txt
+++ b/sql/share/danish/errmsg.txt
@@ -214,3 +214,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/dutch/errmsg.txt b/sql/share/dutch/errmsg.txt
index 7ae6c564283..df3d35600ba 100644
--- a/sql/share/dutch/errmsg.txt
+++ b/sql/share/dutch/errmsg.txt
@@ -211,3 +211,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt
index 2a6e23b6281..f6ab398e92f 100644
--- a/sql/share/english/errmsg.txt
+++ b/sql/share/english/errmsg.txt
@@ -211,3 +211,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/estonian/errmsg.txt b/sql/share/estonian/errmsg.txt
index 264badebe38..8686b7e17a4 100644
--- a/sql/share/estonian/errmsg.txt
+++ b/sql/share/estonian/errmsg.txt
@@ -215,3 +215,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/french/errmsg.txt b/sql/share/french/errmsg.txt
index 0da5cf94ed8..fb181535764 100644
--- a/sql/share/french/errmsg.txt
+++ b/sql/share/french/errmsg.txt
@@ -211,3 +211,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt
index 9abbb3a8a2f..eabbff043f3 100644
--- a/sql/share/german/errmsg.txt
+++ b/sql/share/german/errmsg.txt
@@ -214,3 +214,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/greek/errmsg.txt b/sql/share/greek/errmsg.txt
index 8f81fcfda31..2dcbad5ffba 100644
--- a/sql/share/greek/errmsg.txt
+++ b/sql/share/greek/errmsg.txt
@@ -211,3 +211,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/hungarian/errmsg.txt b/sql/share/hungarian/errmsg.txt
index 84d8c56cd04..edeaec62590 100644
--- a/sql/share/hungarian/errmsg.txt
+++ b/sql/share/hungarian/errmsg.txt
@@ -213,3 +213,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/italian/errmsg.txt b/sql/share/italian/errmsg.txt
index b85dc03286a..434fb2fc7a0 100644
--- a/sql/share/italian/errmsg.txt
+++ b/sql/share/italian/errmsg.txt
@@ -211,3 +211,4 @@
"I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt
index 49e58079588..306fe22ab1d 100644
--- a/sql/share/japanese/errmsg.txt
+++ b/sql/share/japanese/errmsg.txt
@@ -213,3 +213,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/korean/errmsg.txt b/sql/share/korean/errmsg.txt
index 2e278dbd129..89e3abd9680 100644
--- a/sql/share/korean/errmsg.txt
+++ b/sql/share/korean/errmsg.txt
@@ -211,3 +211,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/norwegian-ny/errmsg.txt b/sql/share/norwegian-ny/errmsg.txt
index df9efbd28a4..dd9b153acff 100644
--- a/sql/share/norwegian-ny/errmsg.txt
+++ b/sql/share/norwegian-ny/errmsg.txt
@@ -213,3 +213,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/norwegian/errmsg.txt b/sql/share/norwegian/errmsg.txt
index c95669aa016..87c25bd933f 100644
--- a/sql/share/norwegian/errmsg.txt
+++ b/sql/share/norwegian/errmsg.txt
@@ -213,3 +213,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/polish/errmsg.txt b/sql/share/polish/errmsg.txt
index d708bc6fffb..2bb0dbb9802 100644
--- a/sql/share/polish/errmsg.txt
+++ b/sql/share/polish/errmsg.txt
@@ -215,3 +215,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/portuguese/errmsg.txt b/sql/share/portuguese/errmsg.txt
index 8acb2365996..a8a7b0a565f 100644
--- a/sql/share/portuguese/errmsg.txt
+++ b/sql/share/portuguese/errmsg.txt
@@ -211,3 +211,4 @@
"Travamentos de atualização não podem ser obtidos durante um READ UNCOMMITTED na transação",
"DROP DATABASE não permitido enquanto uma 'thread' está assegurando um travamento global de leitura",
"CREATE DATABASE não permitido enquanto uma 'thread' está assegurando um travamento global de leitura",
+"Wrong arguments to %s",
diff --git a/sql/share/romanian/errmsg.txt b/sql/share/romanian/errmsg.txt
index 8069f9907bb..9a964780398 100644
--- a/sql/share/romanian/errmsg.txt
+++ b/sql/share/romanian/errmsg.txt
@@ -215,3 +215,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/russian/errmsg.txt b/sql/share/russian/errmsg.txt
index 6bc845d5599..6d4fd4bcea6 100644
--- a/sql/share/russian/errmsg.txt
+++ b/sql/share/russian/errmsg.txt
@@ -214,3 +214,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/slovak/errmsg.txt b/sql/share/slovak/errmsg.txt
index 8631ee6bdeb..de12b57638f 100644
--- a/sql/share/slovak/errmsg.txt
+++ b/sql/share/slovak/errmsg.txt
@@ -219,3 +219,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Wrong arguments to %s",
diff --git a/sql/share/spanish/errmsg.txt b/sql/share/spanish/errmsg.txt
index a56bebbcf47..3bb80df41a9 100644
--- a/sql/share/spanish/errmsg.txt
+++ b/sql/share/spanish/errmsg.txt
@@ -212,3 +212,4 @@
"Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED",
"DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global",
"CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global",
+"Wrong arguments to %s",
diff --git a/sql/share/swedish/errmsg.OLD b/sql/share/swedish/errmsg.OLD
index fc26a08e9ee..cc54e051e63 100644
--- a/sql/share/swedish/errmsg.OLD
+++ b/sql/share/swedish/errmsg.OLD
@@ -209,3 +209,7 @@
"Lock wait timeout exceeded",
"The total number of locks exceeds the lock table size",
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+#ER_WRONG_ARGUMENTS
+"Felaktiga argument till %s",
diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt
index 7f43afd04b6..0451e4fe6eb 100644
--- a/sql/share/swedish/errmsg.txt
+++ b/sql/share/swedish/errmsg.txt
@@ -211,3 +211,4 @@
"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
"DROP DATABASE not allowed while thread is holding global read lock",
"CREATE DATABASE not allowed while thread is holding global read lock",
+"Felaktiga argument till %s",
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 82b73e5a48f..d23a7edd37e 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -2649,12 +2649,12 @@ static void update_depend_map(JOIN *join)
for (i=0 ; i < ref->key_parts ; i++,item++)
depend_map|=(*item)->used_tables();
ref->depend_map=depend_map;
- for (JOIN_TAB *join_tab2=join->join_tab;
+ for (JOIN_TAB **tab=join->map2table;
depend_map ;
- join_tab2++,depend_map>>=1 )
+ tab++,depend_map>>=1 )
{
if (depend_map & 1)
- ref->depend_map|=join_tab2->ref.depend_map;
+ ref->depend_map|=(*tab)->ref.depend_map;
}
}
}
@@ -2671,12 +2671,12 @@ static void update_depend_map(JOIN *join, ORDER *order)
order->depend_map=depend_map=order->item[0]->used_tables();
if (!(order->depend_map & RAND_TABLE_BIT)) // Not item_sum() or RAND()
{
- for (JOIN_TAB *join_tab=join->join_tab;
+ for (JOIN_TAB **tab=join->map2table;
depend_map ;
- join_tab++, depend_map>>=1)
+ tab++, depend_map>>=1)
{
if (depend_map & 1)
- order->depend_map|=join_tab->ref.depend_map;
+ order->depend_map|=(*tab)->ref.depend_map;
}
}
}