summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <monty@donna.mysql.com>2000-09-14 02:39:07 +0300
committerunknown <monty@donna.mysql.com>2000-09-14 02:39:07 +0300
commitd5964ba20ca4c00a443c185186def44bb90831b3 (patch)
tree47d3199e561726437875c3247556ac5797525366
parent9e37676d7cd9ca30a05025b9fcc3424c4e4a1932 (diff)
downloadmariadb-git-d5964ba20ca4c00a443c185186def44bb90831b3.tar.gz
Fixes for MERGE TABLES and HEAP tables
Docs/manual.texi: Updated MERGE table stuff + more extra/perror.c: Added missing error messages include/myisammrg.h: Fixes for MERGE TABLE include/queues.h: Fixes for MERGE TABLE isam/isamlog.c: Fixed hard bug myisam/mi_log.c: cleanup myisam/mi_open.c: Fixed file name format in myisam log myisam/myisamlog.c: Bug fixes myisammrg/mymrgdef.h: Fixes for MERGE TABLE myisammrg/myrg_create.c: Fixes for MERGE TABLE myisammrg/myrg_open.c: Fixes for MERGE TABLE myisammrg/myrg_queue.c: Fixes for MERGE TABLE myisammrg/myrg_rfirst.c: Fixes for MERGE TABLE myisammrg/myrg_rkey.c: Fixes for MERGE TABLE myisammrg/myrg_rlast.c: Fixes for MERGE TABLE myisammrg/myrg_rnext.c: Fixes for MERGE TABLE myisammrg/myrg_rprev.c: Fixes for MERGE TABLE myisammrg/myrg_rrnd.c: Fixes for MERGE TABLE mysql.proj: update mysys/queues.c: Fixed bug when using reverse queues sql-bench/test-insert.sh: Separated some things to get better timings sql/ha_heap.cc: Fixed heap table bug sql/ha_heap.h: Fixed heap table bug sql/ha_myisam.h: Fixed wrong max_keys sql/ha_myisammrg.cc: Fixed MERGE TABLES sql/ha_myisammrg.h: Fixed MERGE TABLES sql/handler.h: Fix for MERGE TABLES and HEAP tables sql/lex.h: Fixed MERGE TABLES sql/mysql_priv.h: Cleanup of code sql/sql_acl.cc: Fixed that privilege tables are flushed at start sql/sql_lex.h: Fixed MERGE TABLES sql/sql_parse.cc: Fixed MERGE TABLES sql/sql_select.cc: Fixes for HEAP tables sql/sql_table.cc: Cleanup sql/sql_yacc.yy: Fixed MERGE TABLES
-rw-r--r--Docs/manual.texi135
-rw-r--r--extra/perror.c8
-rw-r--r--include/myisammrg.h3
-rw-r--r--include/queues.h1
-rw-r--r--isam/isamlog.c4
-rw-r--r--myisam/mi_log.c3
-rw-r--r--myisam/mi_open.c6
-rw-r--r--myisam/myisamlog.c24
-rw-r--r--myisammrg/mymrgdef.h2
-rw-r--r--myisammrg/myrg_create.c11
-rw-r--r--myisammrg/myrg_open.c4
-rw-r--r--myisammrg/myrg_queue.c23
-rw-r--r--myisammrg/myrg_rfirst.c16
-rw-r--r--myisammrg/myrg_rkey.c33
-rw-r--r--myisammrg/myrg_rlast.c16
-rw-r--r--myisammrg/myrg_rnext.c69
-rw-r--r--myisammrg/myrg_rprev.c47
-rw-r--r--myisammrg/myrg_rrnd.c8
-rw-r--r--mysql.projbin151552 -> 163840 bytes
-rw-r--r--mysys/queues.c19
-rwxr-xr-xsql-bench/test-insert.sh26
-rw-r--r--sql/ha_heap.cc2
-rw-r--r--sql/ha_heap.h9
-rw-r--r--sql/ha_myisam.h2
-rw-r--r--sql/ha_myisammrg.cc18
-rw-r--r--sql/ha_myisammrg.h16
-rw-r--r--sql/handler.h3
-rw-r--r--sql/lex.h5
-rw-r--r--sql/mysql_priv.h6
-rw-r--r--sql/sql_acl.cc1
-rw-r--r--sql/sql_lex.h27
-rw-r--r--sql/sql_parse.cc50
-rw-r--r--sql/sql_select.cc33
-rw-r--r--sql/sql_table.cc4
-rw-r--r--sql/sql_yacc.yy13
35 files changed, 408 insertions, 239 deletions
diff --git a/Docs/manual.texi b/Docs/manual.texi
index cba039b6a9c..183c58f23c2 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -195,8 +195,8 @@ Installing a MySQL binary distribution
System-specific issues
-* Binary notes-Linux:: Linux notes
-* Binary notes-HP-UX:: HP-UX notes
+* Binary notes-Linux:: Linux notes for binary distribution
+* Binary notes-HP-UX:: HP-UX notes for binary distribution
Installing a MySQL source distribution
@@ -259,6 +259,7 @@ Windows notes
* Windows and SSH:: Connecting to a remote @strong{MySQL} from Windows with SSH
* Windows symbolic links:: Splitting data across different disks under Win32
* Windows compiling:: Compiling MySQL clients on Windows.
+* Windows and BDB tables.::
* Windows vs Unix:: @strong{MySQL}-Windows compared to Unix @strong{MySQL}
Post-installation setup and testing
@@ -4568,8 +4569,8 @@ files.
@subsection System-specific issues
@menu
-* Binary notes-Linux:: Linux notes
-* Binary notes-HP-UX:: HP-UX notes
+* Binary notes-Linux:: Linux notes for binary distribution
+* Binary notes-HP-UX:: HP-UX notes for binary distribution
@end menu
The following sections indicate some of the issues that have been observed
@@ -4577,7 +4578,7 @@ on particular systems when installing @strong{MySQL} from a binary
distribution.
@node Binary notes-Linux, Binary notes-HP-UX, Binary install system issues, Binary install system issues
-@subsubsection Linux notes
+@subsubsection Linux notes for binary distribution
@strong{MySQL} needs at least Linux 2.0.
@@ -4653,7 +4654,7 @@ and clients on the same machine. We hope that the @code{Linux 2.4}
kernel will fix this problem in the future.
@node Binary notes-HP-UX, , Binary notes-Linux, Binary install system issues
-@subsubsection HP-UX notes
+@subsubsection HP-UX notes for binary distribution
Some of the binary distributions of @strong{MySQL} for HP-UX is
distributed as an HP depot file and as a tar file. To use the depot
@@ -7203,6 +7204,7 @@ is also described in the @file{README} file that comes with the
* Windows and SSH:: Connecting to a remote @strong{MySQL} from Windows with SSH
* Windows symbolic links:: Splitting data across different disks under Win32
* Windows compiling:: Compiling MySQL clients on Windows.
+* Windows and BDB tables.::
* Windows vs Unix:: @strong{MySQL}-Windows compared to Unix @strong{MySQL}
@end menu
@@ -7511,7 +7513,7 @@ should create the file @file{C:\mysql\data\foo.sym} that should contains the
text @code{D:\data\foo}. After this, all tables created in the database
@code{foo} will be created in @file{D:\data\foo}.
-@node Windows compiling, Windows vs Unix, Windows symbolic links, Windows
+@node Windows compiling, Windows and BDB tables., Windows symbolic links, Windows
@subsection Compiling MySQL clients on Windows.
In your source files, you should include @file{windows.h} before you include
@@ -7531,7 +7533,17 @@ with the static @file{mysqlclient.lib} library.
Note that as the mysqlclient libraries are compiled as threaded libraries,
you should also compile your code to be multi-threaded!
-@node Windows vs Unix, , Windows compiling, Windows
+@node Windows and BDB tables., Windows vs Unix, Windows compiling, Windows
+@subsection Windows and BDB tables.
+
+We are working on removing the requirement that one must have a primary
+key in a BDB table; As soon as this is fixed we will throughly test the
+BDB interface by running the @strong{MySQL} benchmark + our internal
+test suite on it. When the above is done we will start release binary
+distributions (for windows and Unix) of @strong{MySQL} that will include
+support for BDB tables.
+
+@node Windows vs Unix, , Windows and BDB tables., Windows
@subsection MySQL-Windows compared to Unix MySQL
@strong{MySQL}-Windows has by now proven itself to be very stable. This version
@@ -16445,6 +16457,7 @@ or PASSWORD = "string"
or DELAY_KEY_WRITE = @{0 | 1@}
or ROW_FORMAT= @{ default | dynamic | static | compressed @}
or RAID_TYPE= @{1 | STRIPED | RAID0 @} RAID_CHUNKS=# RAID_CHUNKSIZE=#;
+or UNION = (table_name,[table_name...])
select_statement:
[IGNORE | REPLACE] SELECT ... (Some legal select statement)
@@ -16742,8 +16755,14 @@ If you specify @code{RAID_TYPE=STRIPED} for a @code{MyISAM} table,
to the data file, the @code{RAID} handler will map the first
@code{RAID_CHUNKSIZE} *1024 bytes to the first file, the next
@code{RAID_CHUNKSIZE} *1024 bytes to the next file and so on.
-@end itemize
+@code{UNION} is used when you want to use a collection of identical
+tables as one. This only works with MERGE tables. @xref{MERGE}.
+
+For the moment you need to have @code{SELECT}, @code{UPDATE} and
+@code{DELETE} privileges on the tables you map to a @code{MERGE} table.
+All mapped tables must be in the same database as the @code{MERGE} table.
+@end itemize
@node Silent column changes, , CREATE TABLE, CREATE TABLE
@subsection Silent column specification changes
@@ -20633,9 +20652,10 @@ missing is a way from the SQL prompt to say which tables are part of the
@code{MERGE} table.
A @code{MERGE} table is a collection of identical @code{MyISAM} tables
-that can be used as one. You can only @code{SELECT} from the collection
-of tables. If you @code{DROP} the @code{MERGE} table, you are only
-dropping the @code{MERGE} specification.
+that can be used as one. You can only @code{SELECT}, @code{DELETE} and
+@code{UPDATE} from the collection of tables. If you @code{DROP} the
+@code{MERGE} table, you are only dropping the @code{MERGE}
+specification.
With identical tables we mean that all tables are created with identical
column information. Some of the tables can be compressed with
@@ -20646,7 +20666,10 @@ definition file and a @code{.MRG} table list file. The @code{.MRG} just
contains a list of the index files (@code{.MYI} files) that should
be used as one.
-@code{MERGE} tables helps you solve the following problems:
+For the moment you need to have @code{SELECT}, @code{UPDATE} and
+@code{DELETE} privileges on the tables you map to a @code{MERGE} table.
+
+@code{MERGE} tables can help you solve the following problems:
@itemize @bullet
@item
@@ -20671,13 +20694,22 @@ are mapped to a @code{MERGE} file than trying to repair a real big file.
Instant mapping of many files as one; A @code{MERGE} table uses the
index of the individual tables; It doesn't need an index of its one.
This makes @code{MERGE} table collections VERY fast to make or remap.
+@item
+If you have a set of tables which you join to a big tables on demand or
+batch, you should instead create a @code{MERGE} table on them on demand.
+This is much faster and will save a lot of disk space.
+@item
+Go around the file size limit for the operating system.
@end itemize
The disadvantages with @code{MERGE} tables are:
@itemize @bullet
@item
-@code{MERGE} tables are read-only.
+You can't use @code{INSERT} on @code{MERGE} tables, as @strong{MySQL} can't know
+in which of the tables we should insert the row.
+@item
+You can only use identical @code{MyISAM} tables for a @code{MERGE} table.
@item
@code{MERGE} tables uses more file descriptors: If you are using a
@strong{MERGE} that maps over 10 tables and 10 users are using this, you
@@ -20703,18 +20735,15 @@ CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, message CHAR(20));
CREATE TABLE t2 (a INT AUTO_INCREMENT PRIMARY KEY, message CHAR(20));
INSERT INTO t1 (message) VALUES ("Testing"),("table"),("t1");
INSERT INTO t2 (message) VALUES ("Testing"),("table"),("t2");
-CREATE TABLE total (a INT NOT NULL, message CHAR(20), KEY(a)) TYPE=MERGE;
+CREATE TABLE total (a INT NOT NULL, message CHAR(20), KEY(a)) TYPE=MERGE UNION=(t1,t2);
@end example
Note that we didn't create an @code{UNIQUE} or @code{PRIMARY KEY} in the
@code{total} table as the key isn't going to be unique in the @code{total}
table.
-(We plan to in the future add the information in the @code{MERGE} handler
-that unique keys are not necessarily unique in the @code{MERGE} table.)
-
-Now you have to use tool (editor, unix command...) to insert the file
-names into the 'total' table:
+Note that you can also manipulate the @code{.MRG} file directly from
+the outside of the @code{MySQL} server:
@example
shell> cd /mysql-data-directory/current-database
@@ -20737,13 +20766,11 @@ mysql> select * from total;
+---+---------+
@end example
-To remap a @code{MERGE} table you must either @code{DROP} it and recreate it
-or change the @code{.MRG} file and issue a @code{FLUSH TABLE} on the
-@code{MERGE} table to force the handler to read the new definition file.
-
-You can also put full paths to the index files in the @code{.MRG} file; If
-you don't do this, the @code{MERGE} handler assumes that the index files
-are in the same directory as the @code{.MRG} file.
+To remap a @code{MERGE} table you must either @code{DROP} it and
+recreate it, use @code{ALTER TABLE} with a new @code{UNION}
+specification or change the @code{.MRG} file and issue a @code{FLUSH
+TABLE} on the @code{MERGE} table and all underlying tables to force the
+handler to read the new definition file.
@node ISAM, HEAP, MERGE, Table types
@section ISAM tables
@@ -28799,6 +28826,48 @@ string to a time. This would be great if the source was a text file, but
is plain stupid when the source is an ODBC connection that reports
exact types for each column.
@end itemize
+@item Word
+
+To retrieve data from @strong{MySQL}to Word/Excel documents, you need to
+use the @code{MyODBC} driver and the Add-in Microsoft Query help.
+
+For example, create a db with a table with 2 columns text.
+
+@itemize @bullet
+@item
+Insert rows using the mysql client command line tool.
+@item
+Create a DSN file using the MyODBC driver e.g. my for the db above.
+@item
+Open the Word application.
+@item
+Create a blank new documentation.
+@item
+Using the tool bar called Database, press the button insert database.
+@item
+Press the button Get Data.
+@item
+At the right hand of the screen Get Data, press the button Ms Query.
+@item
+In the Ms Query create a New Data Source using the DSN file my.
+@item
+Select the new query.
+@item
+Select the columns that you want.
+@item
+Make a filter if you want.
+@item
+Make a Sort if you want.
+@item
+Select Return Data to Microsoft Word.
+@item
+Click Finish.
+@item
+Click Insert data and select the records.
+@item
+Click OK and you see the rows in your Word document.
+@end itemize
+
@item odbcadmin
Test program for ODBC.
@item Delphi
@@ -36224,6 +36293,11 @@ though, so 3.23 is not released as a stable version yet.
@appendixsubsec Changes in release 3.23.25
@itemize @bullet
@item
+@code{HEAP} tables didn't use keys properly. (Bug from 3.23.23)
+@item
+Added better support for @code{MERGE} tables (keys, mapping, creation,
+documentation...). @xref{MERGE}.
+@item
Fixed bug in mysqldump from 3.23 which caused that some @code{CHAR} columns
wheren't quoted.
@item
@@ -40304,6 +40378,8 @@ Fixed @code{DISTINCT} with calculated columns.
@itemize @bullet
@item
+For the moment @code{MATCH} only works with @code{SELECT} statements.
+@item
You cannot build in another directory when using
MIT-pthreads. Because this requires changes to MIT-pthreads, we are not
likely to fix this.
@@ -40391,6 +40467,9 @@ the error value 'empty string', with numeric value 0.
@item
If you execute a @code{PROCEDURE} on a query with returns an empty set then
in some cases the @code{PROCEDURE} will not transform the columns.
+@item
+Creation of a table of type @code{MERGE} doesn't check if the underlaying
+tables are of compatible types.
@end itemize
The following is known bugs in earlier versions of @strong{MySQL}:
@@ -40464,6 +40543,8 @@ Allow users to change startup options.
@item
Subqueries. @code{select id from t where grp in (select grp from g where u > 100)}
@item
+Add range checking to @code{MERGE} tables.
+@item
Port of @strong{MySQL} to BeOS.
@item
Add a temporary key buffer cache during @code{insert/delete/update} so that we
diff --git a/extra/perror.c b/extra/perror.c
index b6d32d50868..6f2fbd864b6 100644
--- a/extra/perror.c
+++ b/extra/perror.c
@@ -17,7 +17,7 @@
/* Return error-text for system error messages and nisam messages */
-#define PERROR_VERSION "2.2"
+#define PERROR_VERSION "2.3"
#include <global.h>
#include <my_sys.h>
@@ -59,9 +59,11 @@ static HA_ERRORS ha_errlist[]=
{ 136,"No more room in index file" },
{ 137,"No more records (read after end of file)" },
{ 138,"Unsupported extension used for table" },
- { 139,"Too big row (>= 24 M)"},
+ { 139,"Too big row (>= 16 M)"},
{ 140,"Wrong create options"},
- { 141,"Dupplicate unique on write or update"},
+ { 141,"Duplicate unique on write or update"},
+ { 142,"Unknown character set used"},
+ { 143,"Conflicting table definition between MERGE and mapped table"},
{ 0,NullS },
};
diff --git a/include/myisammrg.h b/include/myisammrg.h
index 64c2067b70f..6b1124fa180 100644
--- a/include/myisammrg.h
+++ b/include/myisammrg.h
@@ -83,7 +83,8 @@ extern int myrg_rsame(MYRG_INFO *file,byte *record,int inx);
extern int myrg_update(MYRG_INFO *file,const byte *old,byte *new_rec);
extern int myrg_status(MYRG_INFO *file,MYMERGE_INFO *x,int flag);
extern int myrg_lock_database(MYRG_INFO *file,int lock_type);
-extern int myrg_create(const char *name,const char **table_names);
+extern int myrg_create(const char *name,const char **table_names,
+ my_bool fix_names);
extern int myrg_extra(MYRG_INFO *file,enum ha_extra_function function);
extern ha_rows myrg_records_in_range(MYRG_INFO *info,int inx,
const byte *start_key,uint start_key_len,
diff --git a/include/queues.h b/include/queues.h
index 73907327e4e..66125e650ca 100644
--- a/include/queues.h
+++ b/include/queues.h
@@ -53,6 +53,7 @@ void delete_queue(QUEUE *queue);
void queue_insert(QUEUE *queue,byte *element);
byte *queue_remove(QUEUE *queue,uint idx);
void _downheap(QUEUE *queue,uint idx);
+#define is_queue_inited(queue) ((queue)->root != 0)
#ifdef __cplusplus
}
diff --git a/isam/isamlog.c b/isam/isamlog.c
index ddeea8a267d..d1347d46c2e 100644
--- a/isam/isamlog.c
+++ b/isam/isamlog.c
@@ -246,7 +246,7 @@ register char ***argv;
/* Fall through */
case 'I':
case '?':
- printf("%s Ver 3.1 for %s at %s\n",my_progname,SYSTEM_TYPE,
+ printf("%s Ver 3.2 for %s at %s\n",my_progname,SYSTEM_TYPE,
MACHINE_TYPE);
puts("TCX Datakonsult AB, by Monty, for your professional use\n");
if (version)
@@ -325,7 +325,7 @@ static int examine_log(my_string file_name, char **table_names)
init_io_cache(&cache,file,0,READ_CACHE,start_offset,0,MYF(0));
bzero((gptr) com_count,sizeof(com_count));
- init_tree(&tree,0,sizeof(file_info),(qsort_cmp) file_info_compare,0,
+ init_tree(&tree,0,sizeof(file_info),(qsort_cmp) file_info_compare,1,
(void(*)(void*)) file_info_free);
VOID(init_key_cache(KEY_CACHE_SIZE,(uint) (10*4*(IO_SIZE+MALLOC_OVERHEAD))));
diff --git a/myisam/mi_log.c b/myisam/mi_log.c
index 9f08b835d14..d223cc69bcc 100644
--- a/myisam/mi_log.c
+++ b/myisam/mi_log.c
@@ -69,7 +69,8 @@ int mi_log(int activate_log)
/* Logging of records and commands on logfile */
/* All logs starts with command(1) dfile(2) process(4) result(2) */
-void _myisam_log(enum myisam_log_commands command, MI_INFO *info, const byte *buffert, uint length)
+void _myisam_log(enum myisam_log_commands command, MI_INFO *info,
+ const byte *buffert, uint length)
{
char buff[11];
int error,old_errno;
diff --git a/myisam/mi_open.c b/myisam/mi_open.c
index 2067e343246..70096f33c5e 100644
--- a/myisam/mi_open.c
+++ b/myisam/mi_open.c
@@ -524,7 +524,11 @@ MI_INFO *mi_open(const char *name, int mode, uint handle_locking)
myisam_open_list=list_add(myisam_open_list,&m_info->open_list);
pthread_mutex_unlock(&THR_LOCK_myisam);
- myisam_log(MI_LOG_OPEN,m_info,share->filename,(uint) strlen(share->filename));
+ if (myisam_log_file >= 0)
+ {
+ intern_filename(name_buff,share->filename);
+ _myisam_log(MI_LOG_OPEN,m_info,name_buff,(uint) strlen(name_buff));
+ }
DBUG_RETURN(m_info);
err:
diff --git a/myisam/myisamlog.c b/myisam/myisamlog.c
index c55aecfdfa6..e5e8bba6ea6 100644
--- a/myisam/myisamlog.c
+++ b/myisam/myisamlog.c
@@ -70,7 +70,7 @@ static void printf_log(const char *str,...);
static bool cmp_filename(struct file_info *file_info,my_string name);
static uint verbose=0,update=0,test_info=0,max_files=0,re_open_count=0,
- recover=0,prefix_remove=0;
+ recover=0,prefix_remove=0,opt_processes=0;
static my_string log_filename=0,filepath=0,write_filename=0,record_pos_file=0;
static ulong com_count[10][3],number_of_commands=(ulong) ~0L,
isamlog_process;
@@ -199,6 +199,9 @@ static void get_options(register int *argc, register char ***argv)
update=1;
recover++;
break;
+ case 'P':
+ opt_processes=1;
+ break;
case 'R':
if (! *++pos)
{
@@ -243,7 +246,7 @@ static void get_options(register int *argc, register char ***argv)
/* Fall through */
case 'I':
case '?':
- printf("%s Ver 1.1 for %s at %s\n",my_progname,SYSTEM_TYPE,
+ printf("%s Ver 1.2 for %s at %s\n",my_progname,SYSTEM_TYPE,
MACHINE_TYPE);
puts("By Monty, for your professional use\n");
if (version)
@@ -258,6 +261,7 @@ static void get_options(register int *argc, register char ***argv)
puts(" -o \"offset\" -p # \"remove # components from path\"");
puts(" -r \"recover\" -R \"file recordposition\"");
puts(" -u \"update\" -v \"verbose\" -w \"write file\"");
+ puts(" -P \"processes\"");
puts("\nOne can give a second and a third '-v' for more verbose.");
puts("Normaly one does a update (-u).");
puts("If a recover is done all writes and all possibly updates and deletes is done\nand errors are only counted.");
@@ -322,7 +326,7 @@ static int examine_log(my_string file_name, char **table_names)
init_io_cache(&cache,file,0,READ_CACHE,start_offset,0,MYF(0));
bzero((gptr) com_count,sizeof(com_count));
- init_tree(&tree,0,sizeof(file_info),(qsort_cmp) file_info_compare,0,
+ init_tree(&tree,0,sizeof(file_info),(qsort_cmp) file_info_compare,1,
(void(*)(void*)) file_info_free);
VOID(init_key_cache(KEY_CACHE_SIZE,(uint) (10*4*(IO_SIZE+MALLOC_OVERHEAD))));
@@ -333,6 +337,8 @@ static int examine_log(my_string file_name, char **table_names)
isamlog_filepos=my_b_tell(&cache)-9L;
file_info.filenr= mi_uint2korr(head+1);
isamlog_process=file_info.process=(long) mi_uint4korr(head+3);
+ if (!opt_processes)
+ file_info.process=0;
result= mi_uint2korr(head+7);
if ((curr_file_info=(struct file_info*) tree_search(&tree,&file_info)))
{
@@ -374,11 +380,17 @@ static int examine_log(my_string file_name, char **table_names)
goto err;
{
uint i;
- char *pos=file_info.name,*to;
+ char *pos,*to;
+
+ /* Fix if old DOS files to new format */
+ for (pos=file_info.name; pos=strchr(pos,'\\') ; pos++)
+ *pos= '/';
+
+ pos=file_info.name;
for (i=0 ; i < prefix_remove ; i++)
{
char *next;
- if (!(next=strchr(pos,FN_LIBCHAR)))
+ if (!(next=strchr(pos,'/')))
break;
pos=next+1;
}
@@ -436,7 +448,7 @@ static int examine_log(my_string file_name, char **table_names)
if (file_info.used)
{
if (verbose && !record_pos_file)
- printf_log("%s: open",file_info.show_name);
+ printf_log("%s: open -> %d",file_info.show_name, file_info.filenr);
com_count[command][0]++;
if (result)
com_count[command][1]++;
diff --git a/myisammrg/mymrgdef.h b/myisammrg/mymrgdef.h
index 945a415525f..564900614e2 100644
--- a/myisammrg/mymrgdef.h
+++ b/myisammrg/mymrgdef.h
@@ -29,4 +29,4 @@ extern pthread_mutex_t THR_LOCK_open;
#endif
int _myrg_init_queue(MYRG_INFO *info,int inx,enum ha_rkey_function search_flag);
-
+int _myrg_finish_scan(MYRG_INFO *info, int inx, enum ha_rkey_function type);
diff --git a/myisammrg/myrg_create.c b/myisammrg/myrg_create.c
index e5f5b988d80..113831b9d7f 100644
--- a/myisammrg/myrg_create.c
+++ b/myisammrg/myrg_create.c
@@ -23,8 +23,7 @@
a NULL-pointer last
*/
-int myrg_create(name,table_names)
-const char *name,**table_names;
+int myrg_create(const char *name, const char **table_names, my_bool fix_names)
{
int save_errno;
uint errpos;
@@ -38,15 +37,19 @@ const char *name,**table_names;
goto err;
errpos=1;
if (table_names)
+ {
for ( ; *table_names ; table_names++)
{
strmov(buff,*table_names);
- fn_same(buff,name,4);
+ if (fix_names)
+ fn_same(buff,name,4);
*(end=strend(buff))='\n';
- if (my_write(file,*table_names,(uint) (end-buff+1),
+ end[1]=0;
+ if (my_write(file,buff,(uint) (end-buff+1),
MYF(MY_WME | MY_NABP)))
goto err;
}
+ }
if (my_close(file,MYF(0)))
goto err;
DBUG_RETURN(0);
diff --git a/myisammrg/myrg_open.c b/myisammrg/myrg_open.c
index d3bb0b4e7b6..c12fa1fa52b 100644
--- a/myisammrg/myrg_open.c
+++ b/myisammrg/myrg_open.c
@@ -58,7 +58,7 @@ int handle_locking;
{
if ((end=strend(buff))[-1] == '\n')
end[-1]='\0';
- if (buff[0]) /* Skipp empty lines */
+ if (buff[0] && buff[0] != '#') /* Skipp empty lines and comments */
{
last_isam=isam;
if (!test_if_hard_path(buff))
@@ -93,7 +93,7 @@ int handle_locking;
m_info->options|=isam->s->options;
m_info->records+=isam->state->records;
m_info->del+=isam->state->del;
- m_info->data_file_length=isam->state->data_file_length;
+ m_info->data_file_length+=isam->state->data_file_length;
if (i)
isam=(MI_INFO*) (isam->open_list.next->data);
}
diff --git a/myisammrg/myrg_queue.c b/myisammrg/myrg_queue.c
index 4917cbf7cf8..4d94f984722 100644
--- a/myisammrg/myrg_queue.c
+++ b/myisammrg/myrg_queue.c
@@ -23,31 +23,32 @@ static int queue_key_cmp(void *keyseg, byte *a, byte *b)
MI_INFO *aa=((MYRG_TABLE *)a)->table;
MI_INFO *bb=((MYRG_TABLE *)b)->table;
uint not_used;
-
- return (_mi_key_cmp((MI_KEYSEG *)keyseg, aa->lastkey, bb->lastkey,
- USE_WHOLE_KEY, SEARCH_FIND, &not_used));
+ int ret= _mi_key_cmp((MI_KEYSEG *)keyseg, aa->lastkey, bb->lastkey,
+ USE_WHOLE_KEY, SEARCH_FIND, &not_used);
+ return ret < 0 ? -1 : ret > 0 ? 1 : 0;
} /* queue_key_cmp */
+
int _myrg_init_queue(MYRG_INFO *info,int inx,enum ha_rkey_function search_flag)
{
- QUEUE *q=&(info->by_key);
+ int error=0;
+ QUEUE *q= &(info->by_key);
- if (!q->root)
+ if (!is_queue_inited(q))
{
if (init_queue(q,info->tables, 0,
- (myisam_read_vec[search_flag]==SEARCH_SMALLER),
+ (myisam_readnext_vec[search_flag] == SEARCH_SMALLER),
queue_key_cmp,
info->open_tables->table->s->keyinfo[inx].seg))
- return my_errno;
+ error=my_errno;
}
else
{
if (reinit_queue(q,info->tables, 0,
- (myisam_read_vec[search_flag]==SEARCH_SMALLER),
+ (myisam_readnext_vec[search_flag] == SEARCH_SMALLER),
queue_key_cmp,
info->open_tables->table->s->keyinfo[inx].seg))
- return my_errno;
+ error=my_errno;
}
- return 0;
+ return error;
}
-
diff --git a/myisammrg/myrg_rfirst.c b/myisammrg/myrg_rfirst.c
index f344eb2318f..3f29414f076 100644
--- a/myisammrg/myrg_rfirst.c
+++ b/myisammrg/myrg_rfirst.c
@@ -16,7 +16,7 @@
#include "mymrgdef.h"
- /* Read first row through a specfic key */
+ /* Read first row according to specific key */
int myrg_rfirst(MYRG_INFO *info, byte *buf, int inx)
{
@@ -29,17 +29,17 @@ int myrg_rfirst(MYRG_INFO *info, byte *buf, int inx)
for (table=info->open_tables ; table < info->end_table ; table++)
{
- err=mi_rfirst(table->table,NULL,inx);
- info->last_used_table=table;
-
- if (err == HA_ERR_END_OF_FILE)
- continue;
- if (err)
+ if ((err=mi_rfirst(table->table,NULL,inx)))
+ {
+ if (err == HA_ERR_END_OF_FILE)
+ continue;
return err;
-
+ }
/* adding to queue */
queue_insert(&(info->by_key),(byte *)table);
}
+ /* We have done a read in all tables */
+ info->last_used_table=table;
if (!info->by_key.elements)
return HA_ERR_END_OF_FILE;
diff --git a/myisammrg/myrg_rkey.c b/myisammrg/myrg_rkey.c
index c0123588a06..465d61ce3c6 100644
--- a/myisammrg/myrg_rkey.c
+++ b/myisammrg/myrg_rkey.c
@@ -16,6 +16,17 @@
/* Read record based on a key */
+/*
+ * HA_READ_KEY_EXACT => SEARCH_BIGGER
+ * HA_READ_KEY_OR_NEXT => SEARCH_BIGGER
+ * HA_READ_AFTER_KEY => SEARCH_BIGGER
+ * HA_READ_PREFIX => SEARCH_BIGGER
+ * HA_READ_KEY_OR_PREV => SEARCH_SMALLER
+ * HA_READ_BEFORE_KEY => SEARCH_SMALLER
+ * HA_READ_PREFIX_LAST => SEARCH_SMALLER
+ */
+
+
#include "mymrgdef.h"
/* todo: we could store some additional info to speedup lookups:
@@ -33,7 +44,7 @@ int myrg_rkey(MYRG_INFO *info,byte *record,int inx, const byte *key,
MYRG_TABLE *table;
MI_INFO *mi;
int err;
- byte *buf=((search_flag == HA_READ_KEY_EXACT)?record:0);
+ byte *buf=((search_flag == HA_READ_KEY_EXACT) ? record: 0);
if (_myrg_init_queue(info,inx,search_flag))
return my_errno;
@@ -52,13 +63,14 @@ int myrg_rkey(MYRG_INFO *info,byte *record,int inx, const byte *key,
{
err=_mi_rkey(mi,buf,inx,key_buff,pack_key_length,search_flag,FALSE);
}
- info->last_used_table=table;
+ info->last_used_table=table+1;
- if (err == HA_ERR_KEY_NOT_FOUND)
- continue;
if (err)
+ {
+ if (err == HA_ERR_KEY_NOT_FOUND)
+ continue;
return err;
-
+ }
/* adding to queue */
queue_insert(&(info->by_key),(byte *)table);
@@ -76,14 +88,3 @@ int myrg_rkey(MYRG_INFO *info,byte *record,int inx, const byte *key,
mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table;
return mi_rrnd(mi,record,mi->lastpos);
}
-
-/*
- * HA_READ_KEY_EXACT => SEARCH_BIGGER
- * HA_READ_KEY_OR_NEXT => SEARCH_BIGGER
- * HA_READ_AFTER_KEY => SEARCH_BIGGER
- * HA_READ_PREFIX => SEARCH_BIGGER
- * HA_READ_KEY_OR_PREV => SEARCH_SMALLER
- * HA_READ_BEFORE_KEY => SEARCH_SMALLER
- * HA_READ_PREFIX_LAST => SEARCH_SMALLER
- */
-
diff --git a/myisammrg/myrg_rlast.c b/myisammrg/myrg_rlast.c
index ab7aacda716..f41844dfd5c 100644
--- a/myisammrg/myrg_rlast.c
+++ b/myisammrg/myrg_rlast.c
@@ -24,22 +24,22 @@ int myrg_rlast(MYRG_INFO *info, byte *buf, int inx)
MI_INFO *mi;
int err;
- if (_myrg_init_queue(info,inx,HA_READ_KEY_OR_PREV))
+ if (_myrg_init_queue(info,inx, HA_READ_KEY_OR_PREV))
return my_errno;
for (table=info->open_tables ; table < info->end_table ; table++)
{
- err=mi_rlast(table->table,NULL,inx);
- info->last_used_table=table;
-
- if (err == HA_ERR_END_OF_FILE)
- continue;
- if (err)
+ if ((err=mi_rlast(table->table,NULL,inx)))
+ {
+ if (err == HA_ERR_END_OF_FILE)
+ continue;
return err;
-
+ }
/* adding to queue */
queue_insert(&(info->by_key),(byte *)table);
}
+ /* We have done a read in all tables */
+ info->last_used_table=table;
if (!info->by_key.elements)
return HA_ERR_END_OF_FILE;
diff --git a/myisammrg/myrg_rnext.c b/myisammrg/myrg_rnext.c
index e714ce3b139..71a4d081e8a 100644
--- a/myisammrg/myrg_rnext.c
+++ b/myisammrg/myrg_rnext.c
@@ -22,22 +22,21 @@
int myrg_rnext(MYRG_INFO *info, byte *buf, int inx)
{
- MYRG_TABLE *table;
- MI_INFO *mi;
- byte *key_buff;
- uint pack_key_length;
int err;
+ MI_INFO *mi;
/* at first, do rnext for the table found before */
- err=mi_rnext(info->current_table->table,NULL,inx);
- if (err == HA_ERR_END_OF_FILE)
+ if ((err=mi_rnext(info->current_table->table,NULL,inx)))
{
- queue_remove(&(info->by_key),0);
- if (!info->by_key.elements)
- return HA_ERR_END_OF_FILE;
+ if (err == HA_ERR_END_OF_FILE)
+ {
+ queue_remove(&(info->by_key),0);
+ if (!info->by_key.elements)
+ return HA_ERR_END_OF_FILE;
+ }
+ else
+ return err;
}
- else if (err)
- return err;
else
{
/* Found here, adding to queue */
@@ -46,30 +45,42 @@ int myrg_rnext(MYRG_INFO *info, byte *buf, int inx)
}
/* next, let's finish myrg_rkey's initial scan */
- table=info->last_used_table+1;
+ if ((err=_myrg_finish_scan(info, inx, HA_READ_KEY_OR_NEXT)))
+ return err;
+
+ /* now, mymerge's read_next is as simple as one queue_top */
+ mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table;
+ return mi_rrnd(mi,buf,mi->lastpos);
+}
+
+
+/* let's finish myrg_rkey's initial scan */
+
+int _myrg_finish_scan(MYRG_INFO *info, int inx, enum ha_rkey_function type)
+{
+ int err;
+ MYRG_TABLE *table=info->last_used_table;
if (table < info->end_table)
{
- mi=info->last_used_table->table;
- key_buff=(byte*) mi->lastkey+mi->s->base.max_key_length;
- pack_key_length=mi->last_rkey_length;
+ MI_INFO *mi= table[-1].table;
+ byte *key_buff=(byte*) mi->lastkey+mi->s->base.max_key_length;
+ uint pack_key_length= mi->last_rkey_length;
+
for (; table < info->end_table ; table++)
{
mi=table->table;
- err=_mi_rkey(mi,NULL,inx,key_buff,pack_key_length,HA_READ_KEY_OR_NEXT,FALSE);
- info->last_used_table=table;
-
- if (err == HA_ERR_KEY_NOT_FOUND)
- continue;
- if (err)
- return err;
-
+ if ((err=_mi_rkey(mi,NULL,inx,key_buff,pack_key_length,
+ type,FALSE)))
+ {
+ if (err == HA_ERR_KEY_NOT_FOUND) /* If end of file */
+ continue;
+ return err;
+ }
/* Found here, adding to queue */
- queue_insert(&(info->by_key),(byte *)table);
+ queue_insert(&(info->by_key),(byte *) table);
}
+ /* All tables are now used */
+ info->last_used_table=table;
}
-
- /* now, mymerge's read_next is as simple as one queue_top */
- mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table;
- return mi_rrnd(mi,buf,mi->lastpos);
+ return 0;
}
-
diff --git a/myisammrg/myrg_rprev.c b/myisammrg/myrg_rprev.c
index 0523dc7f4e7..8d7a810696f 100644
--- a/myisammrg/myrg_rprev.c
+++ b/myisammrg/myrg_rprev.c
@@ -22,22 +22,21 @@
int myrg_rprev(MYRG_INFO *info, byte *buf, int inx)
{
- MYRG_TABLE *table;
- MI_INFO *mi;
- byte *key_buff;
- uint pack_key_length;
int err;
+ MI_INFO *mi;
- /* at first, do rnext for the table found before */
- err=mi_rprev(info->current_table->table,NULL,inx);
- if (err == HA_ERR_END_OF_FILE)
+ /* at first, do rprev for the table found before */
+ if ((err=mi_rprev(info->current_table->table,NULL,inx)))
{
- queue_remove(&(info->by_key),0);
- if (!info->by_key.elements)
- return HA_ERR_END_OF_FILE;
+ if (err == HA_ERR_END_OF_FILE)
+ {
+ queue_remove(&(info->by_key),0);
+ if (!info->by_key.elements)
+ return HA_ERR_END_OF_FILE;
+ }
+ else
+ return err;
}
- else if (err)
- return err;
else
{
/* Found here, adding to queue */
@@ -46,28 +45,8 @@ int myrg_rprev(MYRG_INFO *info, byte *buf, int inx)
}
/* next, let's finish myrg_rkey's initial scan */
- table=info->last_used_table+1;
- if (table < info->end_table)
- {
- mi=info->last_used_table->table;
- key_buff=(byte*) mi->lastkey+mi->s->base.max_key_length;
- pack_key_length=mi->last_rkey_length;
- for (; table < info->end_table ; table++)
- {
- mi=table->table;
- err=_mi_rkey(mi,NULL,inx,key_buff,pack_key_length,
- HA_READ_KEY_OR_PREV,FALSE);
- info->last_used_table=table;
-
- if (err == HA_ERR_KEY_NOT_FOUND)
- continue;
- if (err)
- return err;
-
- /* Found here, adding to queue */
- queue_insert(&(info->by_key),(byte *)table);
- }
- }
+ if ((err=_myrg_finish_scan(info, inx, HA_READ_KEY_OR_PREV)))
+ return err;
/* now, mymerge's read_prev is as simple as one queue_top */
mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table;
diff --git a/myisammrg/myrg_rrnd.c b/myisammrg/myrg_rrnd.c
index da11b230f27..c64f48c93cd 100644
--- a/myisammrg/myrg_rrnd.c
+++ b/myisammrg/myrg_rrnd.c
@@ -84,10 +84,10 @@ int myrg_rrnd(MYRG_INFO *info,byte *buf,ulonglong filepos)
info->end_table-1,filepos);
isam_info=info->current_table->table;
isam_info->update&= HA_STATE_CHANGED;
- return ((*isam_info->s->read_rnd)(isam_info,(byte*) buf,
- (ha_rows) (filepos -
- info->current_table->file_offset),
- 0));
+ return ((*isam_info->s->read_rnd)
+ (isam_info, (byte*) buf,
+ (ha_rows) (filepos - info->current_table->file_offset),
+ 0));
}
diff --git a/mysql.proj b/mysql.proj
index b79381ad8c8..f8a4ea2d46f 100644
--- a/mysql.proj
+++ b/mysql.proj
Binary files differ
diff --git a/mysys/queues.c b/mysys/queues.c
index 40aa3c8db53..1c7a1a4a618 100644
--- a/mysys/queues.c
+++ b/mysys/queues.c
@@ -25,7 +25,7 @@
#include <queues.h>
- /* The actuall code for handling queues */
+/* Init queue */
int init_queue(QUEUE *queue, uint max_elements, uint offset_to_key,
pbool max_at_top, int (*compare) (void *, byte *, byte *),
@@ -44,6 +44,12 @@ int init_queue(QUEUE *queue, uint max_elements, uint offset_to_key,
DBUG_RETURN(0);
}
+/*
+ Reinitialize queue for new usage; Note that you can't currently resize
+ the number of elements! If you need this, fix it :)
+*/
+
+
int reinit_queue(QUEUE *queue, uint max_elements, uint offset_to_key,
pbool max_at_top, int (*compare) (void *, byte *, byte *),
void *first_cmp_arg)
@@ -78,6 +84,7 @@ void delete_queue(QUEUE *queue)
void queue_insert(register QUEUE *queue, byte *element)
{
reg2 uint idx,next;
+ int cmp;
#ifndef DBUG_OFF
if (queue->elements < queue->max_elements)
@@ -86,10 +93,12 @@ void queue_insert(register QUEUE *queue, byte *element)
queue->root[0]=element;
idx= ++queue->elements;
- while ((queue->compare(queue->first_cmp_arg,
- element+queue->offset_to_key,
- queue->root[(next=idx >> 1)]+queue->offset_to_key)
- ^ queue->max_at_top) < 0)
+ /* max_at_top swaps the comparison if we want to order by desc */
+ while ((cmp=queue->compare(queue->first_cmp_arg,
+ element+queue->offset_to_key,
+ queue->root[(next=idx >> 1)] +
+ queue->offset_to_key)) &&
+ (cmp ^ queue->max_at_top) < 0)
{
queue->root[idx]=queue->root[next];
idx=next;
diff --git a/sql-bench/test-insert.sh b/sql-bench/test-insert.sh
index e1674f3e18d..427a42aea35 100755
--- a/sql-bench/test-insert.sh
+++ b/sql-bench/test-insert.sh
@@ -348,12 +348,12 @@ print " for select_diff_key ($count:$rows): " .
# Test select that is very popular when using ODBC
check_or_range("id","select_range_prefix");
-check_or_range("id3","select_range");
+check_or_range("id3","select_range_key2");
# Check reading on direct key on id and id3
check_select_key("id","select_key_prefix");
-check_select_key("id3","select_key");
+check_select_key("id3","select_key_key2");
####
#### A lot of simple selects on ranges
@@ -403,7 +403,7 @@ check_select_key("id3","select_key");
print "\nTest of compares with simple ranges\n";
check_select_range("id","select_range_prefix");
-check_select_range("id3","select_range");
+check_select_range("id3","select_range_key2");
####
#### Some group queries
@@ -1107,20 +1107,28 @@ if ($server->small_rollback_segment())
# Delete everything from table
#
-print "Deleting everything from table\n";
+print "Deleting rows from the table\n";
$loop_time=new Benchmark;
$count=0;
+
+for ($i=0 ; $i < 128 ; $i++)
+{
+ $dbh->do("delete from bench1 where field1 = $i") or die $DBI::errstr;
+}
+
+$end_time=new Benchmark;
+print "Time for delete_big_many_keys ($count): " .
+timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+
+print "Deleting everything from table\n";
+$count=1;
if ($opt_fast)
{
- $dbh->do("delete from bench1 where field1 = 0") or die $DBI::errstr;
$dbh->do("delete from bench1") or die $DBI::errstr;
- $count+=2;
}
else
{
- $dbh->do("delete from bench1 where field1 = 0") or die $DBI::errstr;
$dbh->do("delete from bench1 where field1 > 0") or die $DBI::errstr;
- $count+=2;
}
if ($opt_lock_tables)
@@ -1129,7 +1137,7 @@ if ($opt_lock_tables)
}
$end_time=new Benchmark;
-print "Time for delete_big_many_keys ($count): " .
+print "Time for delete_all_many_keys ($count): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
$sth = $dbh->do("drop table bench1") or die $DBI::errstr;
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index 591ca0bc813..5bdbf75749e 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -261,7 +261,7 @@ ha_rows ha_heap::records_in_range(int inx,
if (start_key_len != end_key_len ||
start_key_len != pos->key_length ||
start_search_flag != HA_READ_KEY_EXACT ||
- end_search_flag != HA_READ_KEY_EXACT)
+ end_search_flag != HA_READ_AFTER_KEY)
return HA_POS_ERROR; // Can't only use exact keys
return 10; // Good guess
}
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
index b56ee84822f..b3651a3957b 100644
--- a/sql/ha_heap.h
+++ b/sql/ha_heap.h
@@ -33,14 +33,15 @@ class ha_heap: public handler
const char *table_type() const { return "HEAP"; }
const char **bas_ext() const;
ulong option_flag() const
- { return (HA_READ_RND_SAME+HA_NO_INDEX+HA_BINARY_KEYS+HA_WRONG_ASCII_ORDER+
- HA_KEYPOS_TO_RNDPOS+HA_NO_BLOBS+HA_REC_NOT_IN_SEQ); }
+ { return (HA_READ_RND_SAME | HA_NO_INDEX | HA_ONLY_WHOLE_INDEX |
+ HA_WRONG_ASCII_ORDER | HA_KEYPOS_TO_RNDPOS | HA_NO_BLOBS |
+ HA_REC_NOT_IN_SEQ); }
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_keys() const { return MAX_KEY; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return HA_MAX_REC_LENGTH; }
- virtual double scan_time() { return (double) (records+deleted) / 100.0; }
- virtual double read_time(ha_rows rows) { return (double) rows / 100.0; }
+ virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
+ virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
virtual bool fast_key_read() { return 1;}
int open(const char *name, int mode, int test_if_locked);
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index c8f097e792f..e9ae9670b2f 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -45,7 +45,7 @@ class ha_myisam: public handler
const char **bas_ext() const;
ulong option_flag() const { return int_option_flag; }
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return 1; }
+ uint max_keys() const { return MI_MAX_KEY; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return MAX_KEY_LENGTH; }
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index 4e6a1f19583..45822444527 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -180,11 +180,7 @@ void ha_myisammrg::info(uint flag)
mean_rec_length=info.reclength;
block_size=0;
update_time=0;
-#if SIZEOF_OFF_T > 4
ref_length=6; // Should be big enough
-#else
- ref_length=4;
-#endif
}
@@ -228,6 +224,16 @@ THR_LOCK_DATA **ha_myisammrg::store_lock(THD *thd,
int ha_myisammrg::create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info)
{
- char buff[FN_REFLEN];
- return myrg_create(fn_format(buff,name,"","",2+4+16),0);
+ char buff[FN_REFLEN],**table_names,**pos;
+ TABLE_LIST *tables= (TABLE_LIST*) create_info->merge_list.first;
+ DBUG_ENTER("ha_myisammrg::create");
+
+ if (!(table_names= (char**) sql_alloc((create_info->merge_list.elements+1)*
+ sizeof(char*))))
+ DBUG_RETURN(1);
+ for (pos=table_names ; tables ; tables=tables->next)
+ *pos++= tables->real_name;
+ *pos=0;
+ DBUG_RETURN(myrg_create(fn_format(buff,name,"","",2+4+16),
+ (const char **) table_names, (my_bool) 0));
}
diff --git a/sql/ha_myisammrg.h b/sql/ha_myisammrg.h
index 864b2f1760c..376c4edf18f 100644
--- a/sql/ha_myisammrg.h
+++ b/sql/ha_myisammrg.h
@@ -32,15 +32,19 @@ class ha_myisammrg: public handler
~ha_myisammrg() {}
const char *table_type() const { return "MRG_MyISAM"; }
const char **bas_ext() const;
- ulong option_flag() const { return HA_REC_NOT_IN_SEQ+HA_READ_NEXT+
- HA_READ_PREV+HA_READ_RND_SAME+HA_HAVE_KEY_READ_ONLY+
- HA_KEYPOS_TO_RNDPOS+HA_READ_ORDER+
- HA_LASTKEY_ORDER+HA_READ_NOT_EXACT_KEY+
- HA_LONGLONG_KEYS+HA_NULL_KEY+HA_BLOB_KEY; }
+ ulong option_flag() const
+ { return (HA_REC_NOT_IN_SEQ | HA_READ_NEXT |
+ HA_READ_PREV | HA_READ_RND_SAME |
+ HA_HAVE_KEY_READ_ONLY |
+ HA_KEYPOS_TO_RNDPOS | HA_READ_ORDER |
+ HA_LASTKEY_ORDER | HA_READ_NOT_EXACT_KEY |
+ HA_LONGLONG_KEYS | HA_NULL_KEY | HA_BLOB_KEY); }
uint max_record_length() const { return HA_MAX_REC_LENGTH; }
- uint max_keys() const { return 1; }
+ uint max_keys() const { return MI_MAX_KEY; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return MAX_KEY_LENGTH; }
+ virtual double scan_time()
+ { return ulonglong2double(data_file_length) / IO_SIZE + file->tables; }
int open(const char *name, int mode, int test_if_locked);
int close(void);
diff --git a/sql/handler.h b/sql/handler.h
index 70b05f0c7f7..3bf35cc8804 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -47,7 +47,7 @@
if database is updated after read) */
#define HA_REC_NOT_IN_SEQ 64 /* ha_info don't return recnumber;
It returns a position to ha_r_rnd */
-#define HA_BINARY_KEYS 128 /* Keys must be exact */
+#define HA_ONLY_WHOLE_INDEX 128 /* Can't use part key searches */
#define HA_RSAME_NO_INDEX 256 /* RSAME can't restore index */
#define HA_WRONG_ASCII_ORDER 512 /* Can't use sorting through key */
#define HA_HAVE_KEY_READ_ONLY 1024 /* Can read only keys (no record) */
@@ -127,6 +127,7 @@ typedef struct st_ha_create_information
ulong raid_chunksize;
bool if_not_exists;
ulong used_fields;
+ SQL_LIST merge_list;
} HA_CREATE_INFO;
diff --git a/sql/lex.h b/sql/lex.h
index 2be54a56b1a..de4bd69fa87 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -297,11 +297,12 @@ static SYMBOL symbols[] = {
{ "TRAILING", SYM(TRAILING),0,0},
{ "TO", SYM(TO_SYM),0,0},
{ "TYPE", SYM(TYPE_SYM),0,0},
- { "USE", SYM(USE_SYM),0,0},
- { "USING", SYM(USING),0,0},
+ { "UNION", SYM(UNION_SYM),0,0},
{ "UNIQUE", SYM(UNIQUE_SYM),0,0},
{ "UNLOCK", SYM(UNLOCK_SYM),0,0},
{ "UNSIGNED", SYM(UNSIGNED),0,0},
+ { "USE", SYM(USE_SYM),0,0},
+ { "USING", SYM(USING),0,0},
{ "UPDATE", SYM(UPDATE_SYM),0,0},
{ "USAGE", SYM(USAGE),0,0},
{ "VALUES", SYM(VALUES),0,0},
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 3b5c87fcab2..18930468541 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -78,7 +78,11 @@ void sql_element_free(void *ptr);
// The following is used to decide if MySQL should use table scanning
// instead of reading with keys. The number says how many evaluation of the
// WHERE clause is comparable to reading one extra row from a table.
-#define TIME_FOR_COMPARE 5 // 5 compares == one read
+#define TIME_FOR_COMPARE 5 // 5 compares == one read
+// Number of rows in a reference table when refereed through a not unique key.
+// This value is only used when we don't know anything about the key
+// distribution.
+#define MATCHING_ROWS_IN_OTHER_TABLE 10
/* Don't pack string keys shorter than this (if PACK_KEYS=1 isn't used) */
#define KEY_DEFAULT_PACK_LENGTH 8
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 6e1bfb23abe..5089c8d75ee 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -1905,6 +1905,7 @@ int grant_init (void)
{
t_table->file->index_end();
mysql_unlock_tables(thd, lock);
+ thd->version--; // Force close to free memory
close_thread_tables(thd);
delete thd;
DBUG_RETURN(0); // Empty table is ok!
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 083bc8a83b5..9a22d4dab04 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -94,16 +94,9 @@ typedef struct st_lex {
LEX_YYSTYPE yylval;
uchar *ptr,*tok_start,*tok_end,*end_of_query;
ha_rows select_limit,offset_limit;
- bool create_refs,drop_primary,drop_if_exists,local_file,
- in_comment,ignore_space,verbose;
- enum_sql_command sql_command;
- enum lex_states next_state;
- ulong options;
- uint in_sum_expr,grant,grant_tot_col,which_columns, sort_default;
char *length,*dec,*change,*name;
String *wild;
sql_exchange *exchange;
- thr_lock_type lock_option;
List<List_item> expr_list;
List<List_item> when_list;
@@ -124,17 +117,25 @@ typedef struct st_lex {
create_field *last_field;
Item *where,*having,*default_value;
- enum enum_duplicates duplicates;
- ulong thread_id,type;
- HA_CREATE_INFO create_info;
CONVERT *convert_set;
- LEX_USER *grant_user;
+ LEX_USER *grant_user;
char *db,*db1,*table1,*db2,*table2; /* For outer join using .. */
gptr yacc_yyss,yacc_yyvs;
THD *thd;
udf_func udf;
- HA_CHECK_OPT check_opt; // check/repair options
- LEX_MASTER_INFO mi; // used by CHANGE MASTER
+ HA_CHECK_OPT check_opt; // check/repair options
+ HA_CREATE_INFO create_info;
+ LEX_MASTER_INFO mi; // used by CHANGE MASTER
+ ulong thread_id,type;
+ ulong options;
+ enum_sql_command sql_command;
+ enum lex_states next_state;
+ enum enum_duplicates duplicates;
+ uint in_sum_expr,grant,grant_tot_col,which_columns, sort_default;
+ thr_lock_type lock_option;
+ bool create_refs,drop_primary,drop_if_exists,local_file;
+ bool in_comment,ignore_space,verbose;
+
} LEX;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index a2ef4354521..97eb7c80b50 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -38,8 +38,9 @@ extern "C" pthread_mutex_t THR_LOCK_keycache;
extern "C" int gethostname(char *name, int namelen);
#endif
-static bool check_table_access(THD *thd,uint want_access,TABLE_LIST *tables);
+static bool check_table_access(THD *thd,uint want_access, TABLE_LIST *tables);
static bool check_db_used(THD *thd,TABLE_LIST *tables);
+static bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *tables);
static bool check_dup(THD *thd,const char *db,const char *name,
TABLE_LIST *tables);
static void mysql_init_query(THD *thd);
@@ -504,9 +505,9 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd)
if(!(table=open_ltable(thd, table_list, TL_READ_NO_INSERT)))
DBUG_RETURN(1);
- if(check_access(thd, SELECT_ACL, db, &table_list->grant.privilege))
+ if (check_access(thd, SELECT_ACL, db, &table_list->grant.privilege))
goto err;
- if(grant_option && check_grant(thd, SELECT_ACL, table_list))
+ if (grant_option && check_grant(thd, SELECT_ACL, table_list))
goto err;
thd->free_list = 0;
@@ -988,10 +989,12 @@ mysql_execute_command(void)
break;
case SQLCOM_CREATE_TABLE:
-#ifdef DEMO_VERSION
- send_error(&thd->net,ER_NOT_ALLOWED_COMMAND);
-#else
- if (check_access(thd,CREATE_ACL,tables->db,&tables->grant.privilege))
+ if (!tables->db)
+ tables->db=thd->db;
+ if (check_access(thd,CREATE_ACL,tables->db,&tables->grant.privilege) ||
+ check_merge_table_access(thd, tables->db,
+ (TABLE_LIST *)
+ lex->create_info.merge_list.first))
goto error; /* purecov: inspected */
if (grant_option)
{
@@ -1072,7 +1075,6 @@ mysql_execute_command(void)
if (grant_option && check_grant(thd,INDEX_ACL,tables))
goto error;
res = mysql_create_index(thd, tables, lex->key_list);
-#endif
break;
case SQLCOM_SLAVE_START:
@@ -1082,7 +1084,6 @@ mysql_execute_command(void)
stop_slave(thd);
break;
-
case SQLCOM_ALTER_TABLE:
#if defined(DONT_ALLOW_SHOW_COMMANDS)
send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */
@@ -1096,11 +1097,16 @@ mysql_execute_command(void)
res=0;
break;
}
+ if (!tables->db)
+ tables->db=thd->db;
if (!lex->db)
lex->db=tables->db;
if (check_access(thd,ALTER_ACL,tables->db,&tables->grant.privilege) ||
- check_access(thd,INSERT_ACL | CREATE_ACL,lex->db,&priv))
- goto error; /* purecov: inspected */
+ check_access(thd,INSERT_ACL | CREATE_ACL,lex->db,&priv) ||
+ check_merge_table_access(thd, tables->db,
+ (TABLE_LIST *)
+ lex->create_info.merge_list.first))
+ goto error; /* purecov: inspected */
if (!tables->db)
tables->db=thd->db;
if (grant_option)
@@ -1354,7 +1360,7 @@ mysql_execute_command(void)
res = mysql_drop_index(thd, tables, lex->drop_list);
break;
case SQLCOM_SHOW_DATABASES:
-#if defined(DONT_ALLOW_SHOW_COMMANDS) || defined(DEMO_VERSION)
+#if defined(DONT_ALLOW_SHOW_COMMANDS)
send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */
DBUG_VOID_RETURN;
#else
@@ -1810,6 +1816,22 @@ static bool check_db_used(THD *thd,TABLE_LIST *tables)
}
+static bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list)
+{
+ int error=0;
+ if (table_list)
+ {
+ /* Force all tables to use the current database */
+ TABLE_LIST *tmp;
+ for (tmp=table_list; tmp ; tmp=tmp->next)
+ tmp->db=db;
+ error=check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL,
+ table_list);
+ }
+ return error;
+}
+
+
/****************************************************************************
Check stack size; Send error if there isn't enough stack to continue
****************************************************************************/
@@ -2462,7 +2484,7 @@ static int start_slave(THD* thd , bool net_report)
if(!thd) thd = current_thd;
NET* net = &thd->net;
const char* err = 0;
- if(check_access(thd, PROCESS_ACL, any_db))
+ if (check_access(thd, PROCESS_ACL, any_db))
return 1;
pthread_mutex_lock(&LOCK_slave);
if(!slave_running)
@@ -2497,7 +2519,7 @@ static int stop_slave(THD* thd, bool net_report )
NET* net = &thd->net;
const char* err = 0;
- if(check_access(thd, PROCESS_ACL, any_db))
+ if (check_access(thd, PROCESS_ACL, any_db))
return 1;
pthread_mutex_lock(&LOCK_slave);
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index d9d3d90f0d8..54569e241e0 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -942,7 +942,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
}
/* Approximate found rows and time to read them */
s->found_records=s->records=s->table->file->records;
- s->read_time=(ha_rows) ((s->table->file->data_file_length)/IO_SIZE)+1;
+ s->read_time=(ha_rows) s->table->file->scan_time();
/* Set a max range of how many seeks we can expect when using keys */
s->worst_seeks= (double) (s->read_time*2);
@@ -1419,18 +1419,18 @@ update_ref_and_keys(DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,uint tables,
for (i=0 ; i < keyuse->elements-1 ; i++,use++)
{
if (!use->used_tables)
- use->table->const_key_parts[use->key]|=
+ use->table->const_key_parts[use->key] |=
(key_part_map) 1 << use->keypart;
if (use->keypart != FT_KEYPART)
{
- if (use->key == prev->key && use->table == prev->table)
- {
- if (prev->keypart+1 < use->keypart ||
- prev->keypart == use->keypart && found_eq_constant)
- continue; /* remove */
- }
- else if (use->keypart != 0) // First found must be 0
- continue;
+ if (use->key == prev->key && use->table == prev->table)
+ {
+ if (prev->keypart+1 < use->keypart ||
+ prev->keypart == use->keypart && found_eq_constant)
+ continue; /* remove */
+ }
+ else if (use->keypart != 0) // First found must be 0
+ continue;
}
*save_pos= *use;
@@ -1532,7 +1532,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
double best_records=DBL_MAX;
/* Test how we can use keys */
- rec= s->records/10; /* Assume 10 records/key */
+ rec= s->records/MATCHING_ROWS_IN_OTHER_TABLE; /* Assumed records/key */
for (keyuse=s->keyuse ; keyuse->table == table ;)
{
key_map found_part=0;
@@ -1571,7 +1571,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
if (map == 1) // Only one table
{
TABLE *tmp_table=join->all_tables[tablenr];
- if (rec > tmp_table->file->records)
+ if (rec > tmp_table->file->records && rec > 100)
rec=max(tmp_table->file->records,100);
}
}
@@ -1615,12 +1615,12 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
}
else
{
- if (!found_ref) // If not const key
- {
+ if (!found_ref)
+ { // We found a const key
if (table->quick_keys & ((key_map) 1 << key))
records= (double) table->quick_rows[key];
else
- records= (double) s->records; // quick_range couldn't use key!
+ records= (double) s->records/rec; // quick_range couldn't use key!
}
else
{
@@ -1654,7 +1654,8 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
** than a not unique key
** Set tmp to (previous record count) * (records / combination)
*/
- if (found_part & 1)
+ if ((found_part & 1) &&
+ !(table->file->option_flag() & HA_ONLY_WHOLE_INDEX))
{
uint max_key_part=max_part_bit(found_part);
/* Check if quick_range could determinate how many rows we
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index a3da4daa9f4..5a983c8cf06 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -176,7 +176,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
DBUG_ENTER("mysql_create_table");
/*
- ** Check for dupplicate fields and check type of table to create
+ ** Check for duplicate fields and check type of table to create
*/
if (!fields.elements)
@@ -302,7 +302,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
bool primary_key=0,unique_key=0;
Key *key;
uint tmp;
- tmp=max(file->max_keys(), MAX_KEY);
+ tmp=min(file->max_keys(), MAX_KEY);
if (key_count > tmp)
{
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index d4fec1289ba..e6952741b60 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -262,6 +262,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token UDF_RETURNS_SYM
%token UDF_SONAME_SYM
%token UDF_SYM
+%token UNION_SYM
%token UNIQUE_SYM
%token USAGE
%token USE_SYM
@@ -712,6 +713,18 @@ create_table_option:
| RAID_TYPE EQ raid_types { Lex->create_info.raid_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;}
| RAID_CHUNKS EQ ULONG_NUM { Lex->create_info.raid_chunks= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;}
| RAID_CHUNKSIZE EQ ULONG_NUM { Lex->create_info.raid_chunksize= $3*RAID_BLOCK_SIZE; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;}
+ | UNION_SYM EQ '(' table_list ')'
+ {
+ /* Move the union list to the merge_list */
+ LEX *lex=Lex;
+ TABLE_LIST *table_list= (TABLE_LIST*) lex->table_list.first;
+ lex->create_info.merge_list= lex->table_list;
+ lex->create_info.merge_list.elements--;
+ lex->create_info.merge_list.first= (byte*) (table_list->next);
+ lex->table_list.elements=1;
+ lex->table_list.next= (byte**) &(table_list->next);
+ table_list->next=0;
+ }
table_types:
ISAM_SYM { $$= DB_TYPE_ISAM; }