summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/Makefile.am8
-rw-r--r--sql/field.cc55
-rw-r--r--sql/field.h7
-rw-r--r--sql/filesort.cc70
-rw-r--r--sql/ha_gemini.cc1365
-rw-r--r--sql/ha_gemini.h53
-rw-r--r--sql/ha_innobase.cc69
-rw-r--r--sql/ha_innobase.h2
-rw-r--r--sql/ha_myisam.cc26
-rw-r--r--sql/ha_myisam.h1
-rw-r--r--sql/ha_myisammrg.cc4
-rw-r--r--sql/handler.cc30
-rw-r--r--sql/handler.h3
-rw-r--r--sql/item.cc2
-rw-r--r--sql/item_cmpfunc.cc2
-rw-r--r--sql/item_func.cc2
-rw-r--r--sql/item_strfunc.cc9
-rw-r--r--sql/item_sum.cc64
-rw-r--r--sql/item_sum.h18
-rw-r--r--sql/lex.h4
-rw-r--r--sql/lock.cc26
-rw-r--r--sql/log.cc42
-rw-r--r--sql/log_event.cc628
-rw-r--r--sql/log_event.h192
-rw-r--r--sql/md5.c14
-rw-r--r--sql/md5.h12
-rw-r--r--sql/mini_client.cc523
-rw-r--r--sql/mini_client.h11
-rw-r--r--sql/mysql_priv.h25
-rw-r--r--sql/mysqlbinlog.cc18
-rw-r--r--sql/mysqld.cc121
-rw-r--r--sql/net_pkg.cc2
-rw-r--r--sql/net_serv.cc2
-rw-r--r--sql/opt_range.cc232
-rw-r--r--sql/opt_range.h18
-rw-r--r--sql/share/czech/errmsg.txt8
-rw-r--r--sql/share/danish/errmsg.txt8
-rw-r--r--sql/share/dutch/errmsg.txt8
-rw-r--r--sql/share/english/errmsg.txt12
-rw-r--r--sql/share/estonian/errmsg.txt8
-rw-r--r--sql/share/french/errmsg.txt8
-rw-r--r--sql/share/german/errmsg.txt36
-rw-r--r--sql/share/greek/errmsg.txt8
-rw-r--r--sql/share/hungarian/errmsg.txt8
-rw-r--r--sql/share/italian/errmsg.txt22
-rw-r--r--sql/share/japanese/errmsg.txt8
-rw-r--r--sql/share/korean/errmsg.txt8
-rw-r--r--sql/share/norwegian-ny/errmsg.txt8
-rw-r--r--sql/share/norwegian/errmsg.txt8
-rw-r--r--sql/share/polish/errmsg.txt8
-rw-r--r--sql/share/portuguese/errmsg.txt404
-rw-r--r--sql/share/romanian/errmsg.txt8
-rw-r--r--sql/share/russian/errmsg.txt8
-rw-r--r--sql/share/slovak/errmsg.txt8
-rw-r--r--sql/share/spanish/errmsg.txt8
-rw-r--r--sql/share/swedish/errmsg.OLD5
-rw-r--r--sql/share/swedish/errmsg.txt8
-rw-r--r--sql/slave.cc337
-rw-r--r--sql/slave.h19
-rw-r--r--sql/sql_base.cc83
-rw-r--r--sql/sql_class.cc4
-rw-r--r--sql/sql_class.h55
-rw-r--r--sql/sql_db.cc137
-rw-r--r--sql/sql_delete.cc672
-rw-r--r--sql/sql_insert.cc3
-rw-r--r--sql/sql_lex.cc19
-rw-r--r--sql/sql_lex.h68
-rw-r--r--sql/sql_parse.cc412
-rw-r--r--sql/sql_repl.cc497
-rw-r--r--sql/sql_repl.h23
-rw-r--r--sql/sql_select.cc87
-rw-r--r--sql/sql_select.h7
-rw-r--r--sql/sql_show.cc10
-rw-r--r--sql/sql_table.cc108
-rw-r--r--sql/sql_test.cc3
-rw-r--r--sql/sql_unions.cc34
-rw-r--r--sql/sql_update.cc4
-rw-r--r--sql/sql_yacc.yy809
-rw-r--r--sql/structs.h1
-rw-r--r--sql/time.cc4
-rw-r--r--sql/uniques.cc44
-rw-r--r--sql/unireg.h2
-rw-r--r--sql/violite.c430
83 files changed, 6189 insertions, 1958 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am
index c4ed5c05cd3..70415be03a4 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -36,11 +36,11 @@ LDADD = ../isam/libnisam.a \
../myisam/libmyisam.a \
../myisammrg/libmyisammrg.a \
../heap/libheap.a \
+ ../vio/libvio.a \
../mysys/libmysys.a \
../dbug/libdbug.a \
../regex/libregex.a \
../strings/libmystrings.a
- #../vio/libvio.a
mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
@bdb_libs@ @innodb_libs@ @pstack_libs@ \
@@ -64,7 +64,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
thr_malloc.cc item_create.cc \
field.cc key.cc sql_class.cc sql_list.cc \
- net_serv.cc violite.c net_pkg.cc lock.cc my_lock.c \
+ net_serv.cc net_pkg.cc lock.cc my_lock.c \
sql_string.cc sql_manager.cc sql_map.cc \
mysqld.cc password.c hash_filo.cc hostname.cc \
convert.cc sql_parse.cc sql_yacc.yy \
@@ -83,10 +83,10 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
slave.cc sql_repl.cc \
mini_client.cc mini_client_errors.c \
- md5.c stacktrace.c
+ md5.c stacktrace.c sql_unions.cc
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
-mysqlbinlog_SOURCES = mysqlbinlog.cc mini_client.cc net_serv.cc violite.c \
+mysqlbinlog_SOURCES = mysqlbinlog.cc mini_client.cc net_serv.cc \
mini_client_errors.c password.c
mysqlbinlog_LDADD = $(LDADD) $(CXXLDFLAGS) $(mysqld_LDADD)
diff --git a/sql/field.cc b/sql/field.cc
index 1f1f00b161b..78f57c5ceb5 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1593,7 +1593,7 @@ double Field_longlong::val_real(void)
else
#endif
longlongget(j,ptr);
- return unsigned_flag ? ulonglong2double(j) : (double) j;
+ return unsigned_flag ? ulonglong2double((ulonglong) j) : (double) j;
}
longlong Field_longlong::val_int(void)
@@ -4087,6 +4087,59 @@ const char *Field_blob::unpack(char *to, const char *from)
}
+#ifdef HAVE_GEMINI_DB
+/* Blobs in Gemini tables are stored separately from the rows which contain
+** them (except for tiny blobs, which are stored in the row). For all other
+** blob types (blob, mediumblob, longblob), the row contains the length of
+** the blob data and a blob id. These methods (pack_id, get_id, and
+** unpack_id) handle packing and unpacking blob fields in Gemini rows.
+*/
+char *Field_blob::pack_id(char *to, const char *from, ulonglong id, uint max_length)
+{
+ char *save=ptr;
+ ptr=(char*) from;
+ ulong length=get_length(); // Length of from string
+ if (length > max_length)
+ {
+ ptr=to;
+ length=max_length;
+ store_length(length); // Store max length
+ ptr=(char*) from;
+ }
+ else
+ memcpy(to,from,packlength); // Copy length
+ if (length)
+ {
+ int8store(to+packlength, id);
+ }
+ ptr=save; // Restore org row pointer
+ return to+packlength+sizeof(id);
+}
+
+
+ulonglong Field_blob::get_id(const char *from)
+{
+ ulonglong id = 0;
+ ulong length=get_length(from);
+ if (length)
+ longlongget(id, from+packlength);
+ return id;
+}
+
+
+const char *Field_blob::unpack_id(char *to, const char *from, const char *bdata)
+{
+ memcpy(to,from,packlength);
+ ulong length=get_length(from);
+ from+=packlength;
+ if (length)
+ memcpy_fixed(to+packlength, &bdata, sizeof(bdata));
+ else
+ bzero(to+packlength,sizeof(bdata));
+ return from+sizeof(ulonglong);
+}
+#endif /* HAVE_GEMINI_DB */
+
/* Keys for blobs are like keys on varchars */
int Field_blob::pack_cmp(const char *a, const char *b, uint key_length)
diff --git a/sql/field.h b/sql/field.h
index 2f03d849c9b..b5d7c613701 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -869,6 +869,13 @@ public:
}
char *pack(char *to, const char *from, uint max_length= ~(uint) 0);
const char *unpack(char *to, const char *from);
+#ifdef HAVE_GEMINI_DB
+ char *pack_id(char *to, const char *from, ulonglong id,
+ uint max_length= ~(uint) 0);
+ ulonglong get_id(const char *from);
+ const char *unpack_id(char *to, const char *from, const char *bdata);
+ enum_field_types blobtype() { return (packlength == 1 ? FIELD_TYPE_TINY_BLOB : FIELD_TYPE_BLOB);}
+#endif
char *pack_key(char *to, const char *from, uint max_length);
char *pack_key_from_key_image(char* to, const char *from, uint max_length);
int pack_cmp(const char *a, const char *b, uint key_length);
diff --git a/sql/filesort.cc b/sql/filesort.cc
index e5e6c7d97c8..3b59a0c09bb 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -53,11 +53,19 @@ static int merge_index(SORTPARAM *param,uchar *sort_buffer,
static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count);
static uint sortlength(SORT_FIELD *sortorder,uint length);
- /* Makes a indexfil of recordnumbers of a sorted database */
- /* outfile is reset before data is written to it, if it wasn't
- open a new file is opened */
+ /*
+ Creates a set of pointers that can be used to read the rows
+ in sorted order. This should be done with the functions
+ in records.cc
-ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
+ Before calling filesort, one must have done
+ table->file->info(HA_STATUS_VARIABLE)
+
+ The result set is stored in table->io_cache or
+ table->record_pointers
+ */
+
+ha_rows filesort(TABLE *table, SORT_FIELD *sortorder, uint s_length,
SQL_SELECT *select, ha_rows special, ha_rows max_rows,
ha_rows *examined_rows)
{
@@ -69,19 +77,20 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
IO_CACHE tempfile,*selected_records_file,*outfile;
SORTPARAM param;
DBUG_ENTER("filesort");
- DBUG_EXECUTE("info",TEST_filesort(table,sortorder,s_length,special););
+ DBUG_EXECUTE("info",TEST_filesort(sortorder,s_length,special););
#ifdef SKIPP_DBUG_IN_FILESORT
DBUG_PUSH(""); /* No DBUG here */
#endif
- outfile= table[0]->io_cache;
+ outfile= table->io_cache;
my_b_clear(&tempfile);
buffpek= (BUFFPEK *) NULL; sort_keys= (uchar **) NULL; error= 1;
maxbuffer=1;
- param.ref_length= table[0]->file->ref_length;
+ param.ref_length= table->file->ref_length;
param.sort_length=sortlength(sortorder,s_length)+ param.ref_length;
param.max_rows= max_rows;
param.examined_rows=0;
+ param.unique_buff=0;
if (select && select->quick)
{
@@ -106,17 +115,14 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
#ifdef CAN_TRUST_RANGE
else if (select && select->quick && select->quick->records > 0L)
{
- /* Get record-count */
- table[0]->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
records=min((ha_rows) (select->quick->records*2+EXTRA_RECORDS*2),
- table[0]->file->records)+EXTRA_RECORDS;
+ table->file->records)+EXTRA_RECORDS;
selected_records_file=0;
}
#endif
else
{
- table[0]->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);/* Get record-count */
- records=table[0]->file->estimate_number_of_rows();
+ records=table->file->estimate_number_of_rows();
selected_records_file= 0;
}
if (param.sort_length == param.ref_length && records > param.max_rows)
@@ -170,7 +176,7 @@ ha_rows filesort(TABLE **table, SORT_FIELD *sortorder, uint s_length,
my_error(ER_OUTOFMEMORY,MYF(ME_ERROR+ME_WAITTANG),sortbuff_size);
goto err;
}
- param.sort_form= table[0];
+ param.sort_form= table;
param.end=(param.local_sortorder=sortorder)+s_length;
if ((records=find_all_keys(&param,select,sort_keys,buffpek,&maxbuffer,
&tempfile, selected_records_file)) ==
@@ -674,23 +680,22 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
int error;
uint sort_length,offset;
ulong maxcount;
- ha_rows count,max_rows;
+ ha_rows max_rows,org_max_rows;
my_off_t to_start_filepos;
uchar *strpos;
BUFFPEK *buffpek,**refpek;
QUEUE queue;
- volatile bool *killed= &current_thd->killed;
qsort2_cmp cmp;
DBUG_ENTER("merge_buffers");
statistic_increment(filesort_merge_passes, &LOCK_status);
- count=error=0;
+ error=0;
offset=(sort_length=param->sort_length)-param->ref_length;
maxcount=(ulong) (param->keys/((uint) (Tb-Fb) +1));
to_start_filepos=my_b_tell(to_file);
strpos=(uchar*) sort_buffer;
- max_rows=param->max_rows;
+ org_max_rows=max_rows=param->max_rows;
if (init_queue(&queue,(uint) (Tb-Fb)+1,offsetof(BUFFPEK,key),0,
(int (*) (void *, byte *,byte*))
@@ -698,7 +703,6 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
DBUG_RETURN(1); /* purecov: inspected */
for (buffpek= Fb ; buffpek <= Tb ; buffpek++)
{
- count+= buffpek->count;
buffpek->base= strpos;
buffpek->max_keys=maxcount;
strpos+= (uint) (error=(int) read_to_buffer(from_file,buffpek,
@@ -724,22 +728,23 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
{
error=1; goto err; /* purecov: inspected */
}
+ buffpek->key+=sort_length;
+ buffpek->mem_count--;
+ max_rows--;
+ queue_replaced(&queue); // Top element has been used
}
else
cmp=0; // Not unique
while (queue.elements > 1)
{
- if (*killed)
- {
- error=1; goto err; /* purecov: inspected */
- }
for (;;)
{
buffpek=(BUFFPEK*) queue_top(&queue);
if (cmp) // Remove duplicates
{
- if (!cmp(&sort_length, param->unique_buff, (uchar*) buffpek->key))
+ if (!(*cmp)(&sort_length, &(param->unique_buff),
+ (uchar**) &buffpek->key))
goto skip_duplicate;
memcpy(param->unique_buff, (uchar*) buffpek->key,sort_length);
}
@@ -793,7 +798,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
break; /* One buffer have been removed */
}
else if (error == -1)
- goto err; /* purecov: inspected */
+ goto err; /* purecov: inspected */
}
queue_replaced(&queue); /* Top element has been replaced */
}
@@ -801,6 +806,20 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
buffpek=(BUFFPEK*) queue_top(&queue);
buffpek->base= sort_buffer;
buffpek->max_keys=param->keys;
+
+ /*
+ As we know all entries in the buffer are unique, we only have to
+ check if the first one is the same as the last one we wrote
+ */
+ if (cmp)
+ {
+ if (!(*cmp)(&sort_length, &(param->unique_buff), (uchar**) &buffpek->key))
+ {
+ buffpek->key+=sort_length; // Remove duplicate
+ --buffpek->mem_count;
+ }
+ }
+
do
{
if ((ha_rows) buffpek->mem_count > max_rows)
@@ -808,6 +827,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
buffpek->mem_count=(uint) max_rows;
buffpek->count=0; /* Don't read more */
}
+ max_rows-=buffpek->mem_count;
if (flag == 0)
{
if (my_b_write(to_file,(byte*) buffpek->key,
@@ -832,7 +852,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
!= -1 && error != 0);
end:
- lastbuff->count=min(count,param->max_rows);
+ lastbuff->count=min(org_max_rows-max_rows,param->max_rows);
lastbuff->file_pos=to_start_filepos;
err:
delete_queue(&queue);
diff --git a/sql/ha_gemini.cc b/sql/ha_gemini.cc
index 73241c60be7..c95a348f238 100644
--- a/sql/ha_gemini.cc
+++ b/sql/ha_gemini.cc
@@ -21,8 +21,7 @@
#include "mysql_priv.h"
#ifdef HAVE_GEMINI_DB
-
-#include "my_pthread.h"
+#include "ha_gemini.h"
#include "dbconfig.h"
#include "dsmpub.h"
#include "recpub.h"
@@ -30,11 +29,22 @@
#include <m_ctype.h>
#include <myisampack.h>
+#include <m_string.h>
#include <assert.h>
#include <hash.h>
#include <stdarg.h>
#include "geminikey.h"
-#include "ha_gemini.h"
+
+#define gemini_msg MSGD_CALLBACK
+
+pthread_mutex_t gem_mutex;
+
+static HASH gem_open_tables;
+static GEM_SHARE *get_share(const char *table_name, TABLE *table);
+static int free_share(GEM_SHARE *share, bool mutex_is_locked);
+static byte* gem_get_key(GEM_SHARE *share,uint *length,
+ my_bool not_used __attribute__((unused)));
+static void gemini_lock_table_overflow_error(dsmContext_t *pcontext);
const char *ha_gemini_ext=".gmd";
const char *ha_gemini_idx_ext=".gmi";
@@ -48,6 +58,7 @@ long gemini_locktablesize;
long gemini_lock_wait_timeout;
long gemini_spin_retries;
long gemini_connection_limit;
+char *gemini_basedir;
const char gemini_dbname[] = "gemini";
dsmContext_t *pfirstContext = NULL;
@@ -61,7 +72,7 @@ TYPELIB gemini_recovery_typelib= {array_elements(gemini_recovery_names),"",
const int start_of_name = 2; /* Name passed as ./<db>/<table-name>
and we're not interested in the ./ */
-static const int keyBufSize = MYMAXKEYSIZE * 2;
+static const int keyBufSize = MAXKEYSZ + FULLKEYHDRSZ + MAX_REF_PARTS + 16;
static int gemini_tx_begin(THD *thd);
static void print_msg(THD *thd, const char *table_name, const char *op_name,
@@ -87,40 +98,56 @@ bool gemini_init(void)
goto badret;
}
+ /* dsmContextCreate and dsmContextSetString(DSM_TAGDB_DBNAME) must
+ ** be the first DSM calls we make so that we can log any errors which
+ ** occur in subsequent DSM calls. DO NOT INSERT ANY DSM CALLS IN
+ ** BETWEEN THIS COMMENT AND THE COMMENT THAT SAYS "END OF CODE..."
+ */
/* Gotta connect to the database regardless of the operation */
rc = dsmContextCreate(&pfirstContext);
if( rc != 0 )
{
- printf("dsmContextCreate failed %ld\n",rc);
+ gemini_msg(pfirstContext, "dsmContextCreate failed %l",rc);
goto badret;
}
+ /* This call will also open the log file */
rc = dsmContextSetString(pfirstContext, DSM_TAGDB_DBNAME,
strlen(gemini_dbname), (TEXT *)gemini_dbname);
if( rc != 0 )
{
- printf("Dbname tag failed %ld\n", rc);
+ gemini_msg(pfirstContext, "Dbname tag failed %l", rc);
goto badret;
}
+ /* END OF CODE NOT TO MESS WITH */
fn_format(pmsgsfile, GEM_MSGS_FILE, language, ".db", 2 | 4);
rc = dsmContextSetString(pfirstContext, DSM_TAGDB_MSGS_FILE,
strlen(pmsgsfile), (TEXT *)pmsgsfile);
if( rc != 0 )
{
- printf("MSGS_DIR tag failed %ld\n", rc);
+ gemini_msg(pfirstContext, "MSGS_DIR tag failed %l", rc);
+ goto badret;
+ }
+
+ strxmov(pmsgsfile, gemini_basedir, GEM_SYM_FILE, NullS);
+ rc = dsmContextSetString(pfirstContext, DSM_TAGDB_SYMFILE,
+ strlen(pmsgsfile), (TEXT *)pmsgsfile);
+ if( rc != 0 )
+ {
+ gemini_msg(pfirstContext, "SYMFILE tag failed %l", rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_TYPE,DSM_ACCESS_STARTUP);
if ( rc != 0 )
{
- printf("ACCESS TAG set failed %ld\n",rc);
+ gemini_msg(pfirstContext, "ACCESS TAG set failed %l",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_ENV, DSM_SQL_ENGINE);
if( rc != 0 )
{
- printf("ACCESS_ENV set failed %ld",rc);
+ gemini_msg(pfirstContext, "ACCESS_ENV set failed %l",rc);
goto badret;
}
@@ -129,7 +156,7 @@ bool gemini_init(void)
(TEXT *)mysql_real_data_home);
if( rc != 0 )
{
- printf("Datadir tag failed %ld\n", rc);
+ gemini_msg(pfirstContext, "Datadir tag failed %l", rc);
goto badret;
}
@@ -137,7 +164,7 @@ bool gemini_init(void)
gemini_connection_limit);
if(rc != 0)
{
- printf("MAX_USERS tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "MAX_USERS tag set failed %l",rc);
goto badret;
}
@@ -145,7 +172,7 @@ bool gemini_init(void)
gemini_lock_wait_timeout);
if(rc != 0)
{
- printf("MAX_LOCK_ENTRIES tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "MAX_LOCK_ENTRIES tag set failed %l",rc);
goto badret;
}
@@ -153,7 +180,7 @@ bool gemini_init(void)
gemini_locktablesize);
if(rc != 0)
{
- printf("MAX_LOCK_ENTRIES tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "MAX_LOCK_ENTRIES tag set failed %l",rc);
goto badret;
}
@@ -161,7 +188,7 @@ bool gemini_init(void)
gemini_spin_retries);
if(rc != 0)
{
- printf("SPIN_AMOUNT tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "SPIN_AMOUNT tag set failed %l",rc);
goto badret;
}
@@ -172,22 +199,22 @@ bool gemini_init(void)
gemini_buffer_cache);
if(rc != 0)
{
- printf("DB_BUFFERS tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "DB_BUFFERS tag set failed %l",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_FLUSH_AT_COMMIT,
- ((gemini_options & GEMOPT_FLUSH_LOG) ? 1 : 0));
+ ((gemini_options & GEMOPT_FLUSH_LOG) ? 0 : 1));
if(rc != 0)
{
- printf("FLush_Log_At_Commit tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "FLush_Log_At_Commit tag set failed %l",rc);
goto badret;
}
rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_DIRECT_IO,
((gemini_options & GEMOPT_UNBUFFERED_IO) ? 1 : 0));
if(rc != 0)
{
- printf("DIRECT_IO tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "DIRECT_IO tag set failed %l",rc);
goto badret;
}
@@ -195,10 +222,20 @@ bool gemini_init(void)
((gemini_recovery_options & GEMINI_RECOVERY_FULL) ? 1 : 0));
if(rc != 0)
{
- printf("CRASH_PROTECTION tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "CRASH_PROTECTION tag set failed %l",rc);
goto badret;
}
+ if (gemini_recovery_options & GEMINI_RECOVERY_FORCE)
+ {
+ rc = dsmContextSetLong(pfirstContext, DSM_TAGDB_FORCE_ACCESS, 1);
+ if(rc != 0)
+ {
+ printf("CRASH_PROTECTION tag set failed %ld",rc);
+ goto badret;
+ }
+ }
+
/* cluster size will come in bytes, need to convert it to
16 K units. */
gemini_log_cluster_size = (gemini_log_cluster_size + 16383) / 16384;
@@ -207,7 +244,7 @@ bool gemini_init(void)
if(rc != 0)
{
- printf("CRASH_PROTECTION tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "CRASH_PROTECTION tag set failed %l",rc);
goto badret;
}
@@ -215,12 +252,20 @@ bool gemini_init(void)
DSM_DB_OPENDB | DSM_DB_OPENFILE);
if( rc != 0 )
{
- printf("dsmUserConnect failed rc = %ld\n",rc);
+ /* Message is output in dbenv() */
goto badret;
}
/* Set access to shared for subsequent user connects */
rc = dsmContextSetLong(pfirstContext,DSM_TAGDB_ACCESS_TYPE,DSM_ACCESS_SHARED);
+
rc = gemini_helper_threads(pfirstContext);
+
+
+ (void) hash_init(&gem_open_tables,32,0,0,
+ (hash_get_key) gem_get_key,0,0);
+ pthread_mutex_init(&gem_mutex,NULL);
+
+
DBUG_RETURN(0);
badret:
@@ -231,30 +276,40 @@ badret:
static int gemini_helper_threads(dsmContext_t *pContext)
{
int rc = 0;
+ int i;
+ pthread_attr_t thr_attr;
+
pthread_t hThread;
DBUG_ENTER("gemini_helper_threads");
- rc = pthread_create (&hThread, 0, gemini_watchdog, (void *)pContext);
+
+ (void) pthread_attr_init(&thr_attr);
+#if !defined(HAVE_DEC_3_2_THREADS)
+ pthread_attr_setscope(&thr_attr,PTHREAD_SCOPE_SYSTEM);
+ (void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
+ pthread_attr_setstacksize(&thr_attr,32768);
+#endif
+ rc = pthread_create (&hThread, &thr_attr, gemini_watchdog, (void *)pContext);
if (rc)
{
- printf("Can't create gemini watchdog thread");
+ gemini_msg(pContext, "Can't Create gemini watchdog thread");
goto done;
}
if(!gemini_io_threads)
goto done;
- rc = pthread_create(&hThread, 0, gemini_rl_writer, (void *)pContext);
+ rc = pthread_create(&hThread, &thr_attr, gemini_rl_writer, (void *)pContext);
if(rc)
{
- printf("Can't create gemini recovery log writer thread");
+ gemini_msg(pContext, "Can't create Gemini recovery log writer thread");
goto done;
}
- for( int i = gemini_io_threads - 1;i;i--)
+ for(i = gemini_io_threads - 1;i;i--)
{
- rc = pthread_create(&hThread, 0, gemini_apw, (void *)pContext);
+ rc = pthread_create(&hThread, &thr_attr, gemini_apw, (void *)pContext);
if(rc)
{
- printf("Can't create gemini page writer thread");
+ gemini_msg(pContext, "Can't create Gemini database page writer thread");
goto done;
}
}
@@ -273,7 +328,7 @@ pthread_handler_decl(gemini_watchdog,arg )
rc = dsmContextCopy(pcontext,&pmyContext, DSMCONTEXTDB);
if( rc != 0 )
{
- printf("dsmContextCopy failed for watchdog %d\n",rc);
+ gemini_msg(pcontext, "dsmContextCopy failed for Gemini watchdog %d",rc);
return 0;
}
@@ -281,7 +336,7 @@ pthread_handler_decl(gemini_watchdog,arg )
if( rc != 0 )
{
- printf("dsmUserConnect failed for watchdog %d\n",rc);
+ gemini_msg(pcontext, "dsmUserConnect failed for Gemini watchdog %d",rc);
return 0;
}
@@ -311,7 +366,7 @@ pthread_handler_decl(gemini_rl_writer,arg )
rc = dsmContextCopy(pcontext,&pmyContext, DSMCONTEXTDB);
if( rc != 0 )
{
- printf("dsmContextCopy failed for recovery log writer %d\n",rc);
+ gemini_msg(pcontext, "dsmContextCopy failed for Gemini recovery log writer %d",rc);
return 0;
}
@@ -319,7 +374,7 @@ pthread_handler_decl(gemini_rl_writer,arg )
if( rc != 0 )
{
- printf("dsmUserConnect failed for recovery log writer %d\n",rc);
+ gemini_msg(pcontext, "dsmUserConnect failed for Gemini recovery log writer %d",rc);
return 0;
}
@@ -348,7 +403,7 @@ pthread_handler_decl(gemini_apw,arg )
rc = dsmContextCopy(pcontext,&pmyContext, DSMCONTEXTDB);
if( rc != 0 )
{
- printf("dsmContextCopy failed for gemini page writer %d\n",rc);
+ gemini_msg(pcontext, "dsmContextCopy failed for Gemini page writer %d",rc);
my_thread_end();
return 0;
}
@@ -356,7 +411,7 @@ pthread_handler_decl(gemini_apw,arg )
if( rc != 0 )
{
- printf("dsmUserConnect failed for gemini page writer %d\n",rc);
+ gemini_msg(pcontext, "dsmUserConnect failed for Gemini page writer %d",rc);
my_thread_end();
return 0;
}
@@ -388,7 +443,7 @@ int gemini_set_option_long(int optid, long optval)
}
if (rc)
{
- printf("SPIN_AMOUNT tag set failed %ld",rc);
+ gemini_msg(pfirstContext, "SPIN_AMOUNT tag set failed %l",rc);
}
else
{
@@ -410,7 +465,7 @@ static int gemini_connect(THD *thd)
DSMCONTEXTDB);
if( rc != 0 )
{
- printf("dsmContextCopy failed %ld\n",rc);
+ gemini_msg(pfirstContext, "dsmContextCopy failed %l",rc);
return(rc);
}
@@ -418,7 +473,7 @@ static int gemini_connect(THD *thd)
if( rc != 0 )
{
- printf("dsmUserConnect failed %ld\n",rc);
+ gemini_msg(pfirstContext, "dsmUserConnect failed %l",rc);
return(rc);
}
@@ -444,6 +499,9 @@ bool gemini_end(void)
THD *thd;
DBUG_ENTER("gemini_end");
+
+ hash_free(&gem_open_tables);
+ pthread_mutex_destroy(&gem_mutex);
if(pfirstContext)
{
rc = dsmShutdownSet(pfirstContext, DSM_SHUTDOWN_NORMAL);
@@ -534,6 +592,24 @@ int gemini_rollback_to_savepoint(THD *thd)
DBUG_RETURN(rc);
}
+int gemini_recovery_logging(THD *thd, bool on)
+{
+ int error;
+ int noLogging;
+
+ if(!thd->gemini.context)
+ return 0;
+
+ if(on)
+ noLogging = 0;
+ else
+ noLogging = 1;
+
+ error = dsmContextSetLong((dsmContext_t *)thd->gemini.context,
+ DSM_TAGCONTEXT_NO_LOGGING,noLogging);
+ return error;
+}
+
/* gemDataType - translates from mysql data type constant to gemini
key services data type contstant */
int gemDataType ( int mysqlType )
@@ -599,8 +675,13 @@ int ha_gemini::open(const char *name, int mode, uint test_if_locked)
DBUG_ENTER("ha_gemini::open");
thd = current_thd;
- thr_lock_init(&alock);
- thr_lock_data_init(&alock,&lock,(void*)0);
+ /* Init shared structure */
+ if (!(share=get_share(name,table)))
+ {
+ DBUG_RETURN(1); /* purecov: inspected */
+ }
+ thr_lock_data_init(&share->lock,&lock,(void*) 0);
+
ref_length = sizeof(dsmRecid_t);
if(thd->gemini.context == NULL)
@@ -610,7 +691,7 @@ int ha_gemini::open(const char *name, int mode, uint test_if_locked)
if(rc)
return rc;
}
- if (!(rec_buff=my_malloc(table->rec_buff_length,
+ if (!(rec_buff=(byte*)my_malloc(table->rec_buff_length,
MYF(MY_WME))))
{
DBUG_RETURN(1);
@@ -635,6 +716,12 @@ int ha_gemini::open(const char *name, int mode, uint test_if_locked)
rc = dsmObjectNameToNum((dsmContext_t *)thd->gemini.context,
(dsmText_t *)name_buff,
&tableId);
+ if (rc)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Unable to find table number for %s", name_buff);
+ DBUG_RETURN(rc);
+ }
}
tableNumber = tableId;
@@ -649,8 +736,33 @@ int ha_gemini::open(const char *name, int mode, uint test_if_locked)
crashed while being in the midst of a repair operation */
rc = dsmTableStatus((dsmContext_t *)thd->gemini.context,
tableNumber,&tableStatus);
- if(tableStatus)
+ if(tableStatus == DSM_OBJECT_IN_REPAIR)
tableStatus = HA_ERR_CRASHED;
+
+ pthread_mutex_lock(&share->mutex);
+ share->use_count++;
+ pthread_mutex_unlock(&share->mutex);
+
+ if (table->blob_fields)
+ {
+ /* Allocate room for the blob ids from an unpacked row. Note that
+ ** we may not actually need all of this space because tiny blobs
+ ** are stored in the packed row, not in a separate storage object
+ ** like larger blobs. But we allocate an entry for all blobs to
+ ** keep the code simpler.
+ */
+ pBlobDescs = (gemBlobDesc_t *)my_malloc(
+ table->blob_fields * sizeof(gemBlobDesc_t),
+ MYF(MY_WME | MY_ZEROFILL));
+ }
+ else
+ {
+ pBlobDescs = 0;
+ }
+
+ get_index_stats(thd);
+ info(HA_STATUS_CONST);
+
DBUG_RETURN (rc);
}
@@ -680,6 +792,12 @@ int ha_gemini::index_open(char *tableName)
rc = dsmObjectNameToNum((dsmContext_t *)thd->gemini.context,
(dsmText_t *)tableName,
&objectNumber);
+ if (rc)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Unable to file Index number for %s", tableName);
+ DBUG_RETURN(rc);
+ }
pindexNumbers[i] = objectNumber;
}
}
@@ -692,12 +810,22 @@ int ha_gemini::index_open(char *tableName)
int ha_gemini::close(void)
{
DBUG_ENTER("ha_gemini::close");
- thr_lock_delete(&alock);
- my_free(rec_buff,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)rec_buff,MYF(MY_ALLOW_ZERO_PTR));
rec_buff = 0;
my_free((char *)pindexNumbers,MYF(MY_ALLOW_ZERO_PTR));
pindexNumbers = 0;
- DBUG_RETURN(0);
+
+ if (pBlobDescs)
+ {
+ for (uint i = 0; i < table->blob_fields; i++)
+ {
+ my_free((char*)pBlobDescs[i].pBlob, MYF(MY_ALLOW_ZERO_PTR));
+ }
+ my_free((char *)pBlobDescs, MYF(0));
+ pBlobDescs = 0;
+ }
+
+ DBUG_RETURN(free_share(share, 0));
}
@@ -709,7 +837,7 @@ int ha_gemini::write_row(byte * record)
DBUG_ENTER("write_row");
- if(tableStatus)
+ if(tableStatus == HA_ERR_CRASHED)
DBUG_RETURN(tableStatus);
thd = current_thd;
@@ -737,10 +865,11 @@ int ha_gemini::write_row(byte * record)
/* A set insert-id statement so set the auto-increment value if this
value is higher than it's current value */
error = dsmTableAutoIncrement((dsmContext_t *)thd->gemini.context,
- tableNumber, (ULONG64 *)&nr);
+ tableNumber, (ULONG64 *)&nr,1);
if(thd->next_insert_id > nr)
{
- error = dsmTableAutoIncrementSet((dsmContext_t *)thd->gemini.context,tableNumber,
+ error = dsmTableAutoIncrementSet((dsmContext_t *)thd->gemini.context,
+ tableNumber,
(ULONG64)thd->next_insert_id);
}
}
@@ -749,11 +878,13 @@ int ha_gemini::write_row(byte * record)
}
dsmRecord.table = tableNumber;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
if ((error=pack_row((byte **)&dsmRecord.pbuffer, (int *)&dsmRecord.recLength,
- record)))
+ record, FALSE)))
+ {
DBUG_RETURN(error);
+ }
error = dsmRecordCreate((dsmContext_t *)thd->gemini.context,
&dsmRecord,0);
@@ -769,6 +900,8 @@ int ha_gemini::write_row(byte * record)
thd->gemini.needSavepoint = 1;
}
}
+ if(error == DSM_S_RQSTREJ)
+ error = HA_ERR_LOCK_WAIT_TIMEOUT;
DBUG_RETURN(error);
}
@@ -777,10 +910,17 @@ longlong ha_gemini::get_auto_increment()
{
longlong nr;
int error;
+ int update;
THD *thd=current_thd;
+ if(thd->lex.sql_command == SQLCOM_SHOW_TABLES)
+ update = 0;
+ else
+ update = 1;
+
error = dsmTableAutoIncrement((dsmContext_t *)thd->gemini.context,
- tableNumber, (ULONG64 *)&nr);
+ tableNumber, (ULONG64 *)&nr,
+ update);
return nr;
}
@@ -828,8 +968,8 @@ int ha_gemini::handleIndexEntry(const byte * record, dsmRecid_t recid,
expects that the three lead bytes of the header are
not counted in this length -- But cxKeyPrepare also
expects that these three bytes are present in the keystr */
- theKey.akey.keyLen = (COUNT)keyStringLen - 3;
- theKey.akey.unknown_comp = thereIsAnull;
+ theKey.akey.keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
+ theKey.akey.unknown_comp = (dsmBoolean_t)thereIsAnull;
theKey.akey.word_index = 0;
theKey.akey.descending_key =0;
if(option == KEY_CREATE)
@@ -880,6 +1020,7 @@ int ha_gemini::createKeyString(const byte * record, KEY *pkeyinfo,
int componentLen;
int fieldType;
int isNull;
+ uint key_part_length;
KEY_PART_INFO *key_part;
@@ -892,21 +1033,35 @@ int ha_gemini::createKeyString(const byte * record, KEY *pkeyinfo,
unsigned char *pos;
key_part = pkeyinfo->key_part + i;
+ key_part_length = key_part->length;
fieldType = gemDataType(key_part->field->type());
- if(fieldType == GEM_CHAR)
+ switch (fieldType)
{
+ case GEM_CHAR:
+ {
/* Save the current ptr to the field in case we're building a key
to remove an old key value when an indexed character column
gets updated. */
char *ptr = key_part->field->ptr;
key_part->field->ptr = (char *)record + key_part->offset;
- key_part->field->sort_string(rec_buff, key_part->length);
+ key_part->field->sort_string((char*)rec_buff, key_part->length);
key_part->field->ptr = ptr;
pos = (unsigned char *)rec_buff;
- }
- else
- {
+ }
+ break;
+
+ case GEM_TINYBLOB:
+ case GEM_BLOB:
+ case GEM_MEDIUMBLOB:
+ case GEM_LONGBLOB:
+ ((Field_blob*)key_part->field)->get_ptr((char**)&pos);
+ key_part_length = ((Field_blob*)key_part->field)->get_length(
+ (char*)record + key_part->offset);
+ break;
+
+ default:
pos = (unsigned char *)record + key_part->offset;
+ break;
}
isNull = record[key_part->null_offset] & key_part->null_bit;
@@ -914,7 +1069,7 @@ int ha_gemini::createKeyString(const byte * record, KEY *pkeyinfo,
*thereIsAnull = true;
rc = gemFieldToIdxComponent(pos,
- (unsigned long) key_part->length,
+ (unsigned long) key_part_length,
fieldType,
isNull ,
key_part->field->flags & UNSIGNED_FLAG,
@@ -951,7 +1106,7 @@ int ha_gemini::update_row(const byte * old_record, byte * new_record)
}
for (uint keynr=0 ; keynr < table->keys ; keynr++)
{
- if(key_cmp(keynr,old_record, new_record))
+ if(key_cmp(keynr,old_record, new_record,false))
{
error = handleIndexEntry(old_record,lastRowid,KEY_DELETE,keynr);
if(error)
@@ -973,10 +1128,10 @@ int ha_gemini::update_row(const byte * old_record, byte * new_record)
dsmRecord.table = tableNumber;
dsmRecord.recid = lastRowid;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
if ((error=pack_row((byte **)&dsmRecord.pbuffer, (int *)&dsmRecord.recLength,
- new_record)))
+ new_record, TRUE)))
{
DBUG_RETURN(error);
}
@@ -992,6 +1147,7 @@ int ha_gemini::delete_row(const byte * record)
int error = 0;
dsmRecord_t dsmRecord;
THD *thd = current_thd;
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
DBUG_ENTER("delete_row");
statistic_increment(ha_delete_count,&LOCK_status);
@@ -999,9 +1155,7 @@ int ha_gemini::delete_row(const byte * record)
if(thd->gemini.needSavepoint)
{
thd->gemini.savepoint++;
- error = dsmTransaction((dsmContext_t *)thd->gemini.context,
- &thd->gemini.savepoint,
- DSMTXN_SAVE, 0, 0);
+ error = dsmTransaction(pcontext, &thd->gemini.savepoint, DSMTXN_SAVE, 0, 0);
if (error)
DBUG_RETURN(error);
thd->gemini.needSavepoint = 0;
@@ -1013,8 +1167,27 @@ int ha_gemini::delete_row(const byte * record)
error = handleIndexEntries(record, dsmRecord.recid,KEY_DELETE);
if(!error)
{
- error = dsmRecordDelete((dsmContext_t *)thd->gemini.context,
- &dsmRecord, 0, NULL);
+ error = dsmRecordDelete(pcontext, &dsmRecord, 0, NULL);
+ }
+
+ /* Delete any blobs associated with this row */
+ if (table->blob_fields)
+ {
+ dsmBlob_t gemBlob;
+
+ gemBlob.areaType = DSMOBJECT_BLOB;
+ gemBlob.blobObjNo = tableNumber;
+ for (uint i = 0; i < table->blob_fields; i++)
+ {
+ if (pBlobDescs[i].blobId)
+ {
+ gemBlob.blobId = pBlobDescs[i].blobId;
+ my_free((char *)pBlobDescs[i].pBlob, MYF(MY_ALLOW_ZERO_PTR));
+ dsmBlobStart(pcontext, &gemBlob);
+ dsmBlobDelete(pcontext, &gemBlob, NULL);
+ /* according to DSM doc, no need to call dsmBlobEnd() */
+ }
+ }
}
DBUG_RETURN(error);
@@ -1023,7 +1196,6 @@ int ha_gemini::delete_row(const byte * record)
int ha_gemini::index_init(uint keynr)
{
int error = 0;
- int keyStringLen;
THD *thd;
DBUG_ENTER("index_init");
thd = current_thd;
@@ -1046,19 +1218,9 @@ int ha_gemini::index_init(uint keynr)
}
pbracketBase->index = 0;
pbracketLimit->index = (dsmIndex_t)pindexNumbers[keynr];
- pbracketLimit->keycomps = 1;
- keyStringLen = 0;
- error = gemKeyHigh(pbracketLimit->keystr, &keyStringLen,
- pbracketLimit->index);
-
- /* We have to subtract three here since cxKeyPrepare
- expects that the three lead bytes of the header are
- not counted in this length -- But cxKeyPrepare also
- expects that these three bytes are present in the keystr */
- pbracketLimit->keyLen = (COUNT)keyStringLen - 3;
-
pbracketBase->descending_key = pbracketLimit->descending_key = 0;
pbracketBase->ksubstr = pbracketLimit->ksubstr = 0;
+ pbracketLimit->keycomps = pbracketBase->keycomps = 1;
pfoundKey = (dsmKey_t *)my_malloc(sizeof(dsmKey_t) + keyBufSize,MYF(MY_WME));
if(!pfoundKey)
@@ -1130,6 +1292,7 @@ int ha_gemini::pack_key( uint keynr, dsmKey_t *pkey,
{
uint offset=0;
unsigned char *pos;
+ uint key_part_length = key_part->length;
int fieldType;
if (key_part->null_bit)
@@ -1141,7 +1304,7 @@ int ha_gemini::pack_key( uint keynr, dsmKey_t *pkey,
key_ptr+= key_part->store_length;
rc = gemFieldToIdxComponent(
(unsigned char *)key_ptr + offset,
- (unsigned long) key_part->length,
+ (unsigned long) key_part_length,
0,
1 , /* Tells it to build a null component */
key_part->field->flags & UNSIGNED_FLAG,
@@ -1153,20 +1316,31 @@ int ha_gemini::pack_key( uint keynr, dsmKey_t *pkey,
}
}
fieldType = gemDataType(key_part->field->type());
- if(fieldType == GEM_CHAR)
+ switch (fieldType)
{
- key_part->field->store(key_ptr + offset, key_part->length);
- key_part->field->sort_string(rec_buff, key_part->length);
+ case GEM_CHAR:
+ key_part->field->store((char*)key_ptr + offset, key_part->length);
+ key_part->field->sort_string((char*)rec_buff, key_part->length);
pos = (unsigned char *)rec_buff;
- }
- else
- {
+ break;
+
+ case GEM_TINYBLOB:
+ case GEM_BLOB:
+ case GEM_MEDIUMBLOB:
+ case GEM_LONGBLOB:
+ ((Field_blob*)key_part->field)->get_ptr((char**)&pos);
+ key_part_length = ((Field_blob*)key_part->field)->get_length(
+ (char*)key_ptr + offset);
+ break;
+
+ default:
pos = (unsigned char *)key_ptr + offset;
+ break;
}
rc = gemFieldToIdxComponent(
pos,
- (unsigned long) key_part->length,
+ (unsigned long) key_part_length,
fieldType,
0 ,
key_part->field->flags & UNSIGNED_FLAG,
@@ -1189,7 +1363,7 @@ void ha_gemini::unpack_key(char *record, dsmKey_t *key, uint index)
int fieldIsNull, fieldType;
int rc = 0;
- char unsigned *pos= &key->keystr[7];
+ char unsigned *pos= &key->keystr[FULLKEYHDRSZ+4/* 4 for the index number*/];
for ( ; key_part != end; key_part++)
{
@@ -1202,7 +1376,8 @@ void ha_gemini::unpack_key(char *record, dsmKey_t *key, uint index)
}
rc = gemIdxComponentToField(pos, fieldType,
(unsigned char *)record + key_part->field->offset(),
- key_part->field->field_length,
+ //key_part->field->field_length,
+ key_part->length,
key_part->field->decimals(),
&fieldIsNull);
if(fieldIsNull)
@@ -1266,12 +1441,12 @@ int ha_gemini::index_read(byte * buf, const byte * key,
pbracketLimit->keyLen = componentLen;
}
- /* We have to subtract three here since cxKeyPrepare
+ /* We have to subtract the header size here since cxKeyPrepare
expects that the three lead bytes of the header are
not counted in this length -- But cxKeyPrepare also
expects that these three bytes are present in the keystr */
- pbracketBase->keyLen -= 3;
- pbracketLimit->keyLen -= 3;
+ pbracketBase->keyLen -= FULLKEYHDRSZ;
+ pbracketLimit->keyLen -= FULLKEYHDRSZ;
thd = current_thd;
@@ -1294,7 +1469,7 @@ int ha_gemini::index_next(byte * buf)
dsmMask_t findMode;
DBUG_ENTER("index_next");
- if(tableStatus)
+ if(tableStatus == HA_ERR_CRASHED)
DBUG_RETURN(tableStatus);
thd = current_thd;
@@ -1304,9 +1479,12 @@ int ha_gemini::index_next(byte * buf)
error = gemKeyLow(pbracketBase->keystr, &keyStringLen,
pbracketLimit->index);
- pbracketBase->keyLen = (COUNT)keyStringLen - 3;
+ pbracketBase->keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
pbracketBase->index = pbracketLimit->index;
- pbracketBase->keycomps = 1;
+ error = gemKeyHigh(pbracketLimit->keystr, &keyStringLen,
+ pbracketLimit->index);
+ pbracketLimit->keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
+
findMode = DSMFINDFIRST;
}
else
@@ -1369,24 +1547,20 @@ int ha_gemini::index_last(byte * buf)
error = gemKeyLow(pbracketBase->keystr, &keyStringLen,
pbracketLimit->index);
- if(error)
- goto errorReturn;
- pbracketBase->keyLen = (COUNT)keyStringLen - 3;
+ pbracketBase->keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
pbracketBase->index = pbracketLimit->index;
- pbracketBase->keycomps = 1;
+ error = gemKeyHigh(pbracketLimit->keystr, &keyStringLen,
+ pbracketLimit->index);
+ pbracketLimit->keyLen = (COUNT)keyStringLen - FULLKEYHDRSZ;
error = findRow(thd,DSMFINDLAST,buf);
-errorReturn:
if (error == DSM_S_ENDLOOP)
error = HA_ERR_END_OF_FILE;
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
-
- table->status = error ? STATUS_NOT_FOUND : 0;
- DBUG_RETURN(error);
}
int ha_gemini::rnd_init(bool scan)
@@ -1414,7 +1588,7 @@ int ha_gemini::rnd_next(byte *buf)
DBUG_ENTER("rnd_next");
- if(tableStatus)
+ if(tableStatus == HA_ERR_CRASHED)
DBUG_RETURN(tableStatus);
thd = current_thd;
@@ -1429,7 +1603,7 @@ int ha_gemini::rnd_next(byte *buf)
dsmRecord.recid = lastRowid;
dsmRecord.pbuffer = (dsmBuffer_t *)rec_buff;
dsmRecord.recLength = table->reclength;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
error = dsmTableScan((dsmContext_t *)thd->gemini.context,
&dsmRecord, DSMFINDNEXT, lockMode, 0);
@@ -1437,17 +1611,23 @@ int ha_gemini::rnd_next(byte *buf)
if(!error)
{
lastRowid = dsmRecord.recid;
- unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
+ error = unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
}
if(!error)
;
- else if (error == DSM_S_ENDLOOP)
- error = HA_ERR_END_OF_FILE;
- else if (error == DSM_S_RQSTREJ)
- error = HA_ERR_LOCK_WAIT_TIMEOUT;
- else if (error == DSM_S_LKTBFULL)
- error = HA_ERR_LOCK_TABLE_FULL;
-
+ else
+ {
+ lastRowid = 0;
+ if (error == DSM_S_ENDLOOP)
+ error = HA_ERR_END_OF_FILE;
+ else if (error == DSM_S_RQSTREJ)
+ error = HA_ERR_LOCK_WAIT_TIMEOUT;
+ else if (error == DSM_S_LKTBFULL)
+ {
+ error = HA_ERR_LOCK_TABLE_FULL;
+ gemini_lock_table_overflow_error((dsmContext_t *)thd->gemini.context);
+ }
+ }
table->status = error ? STATUS_NOT_FOUND : 0;
DBUG_RETURN(error);
}
@@ -1500,14 +1680,14 @@ int ha_gemini::fetch_row(void *gemini_context,const byte *buf)
dsmRecord.recid = lastRowid;
dsmRecord.pbuffer = (dsmBuffer_t *)rec_buff;
dsmRecord.recLength = table->reclength;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
rc = dsmRecordGet((dsmContext_t *)gemini_context,
&dsmRecord, 0);
if(!rc)
{
- unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
+ rc = unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
}
DBUG_RETURN(rc);
@@ -1544,7 +1724,7 @@ int ha_gemini::findRow(THD *thd, dsmMask_t findMode, byte *buf)
if(key_read)
{
- unpack_key(buf, pkey, active_index);
+ unpack_key((char*)buf, pkey, active_index);
}
if(!key_read) /* unpack_key may have turned off key_read */
{
@@ -1554,10 +1734,17 @@ int ha_gemini::findRow(THD *thd, dsmMask_t findMode, byte *buf)
errorReturn:
if(!rc)
;
- else if(rc == DSM_S_RQSTREJ)
- rc = HA_ERR_LOCK_WAIT_TIMEOUT;
- else if (rc == DSM_S_LKTBFULL)
- rc = HA_ERR_LOCK_TABLE_FULL;
+ else
+ {
+ lastRowid = 0;
+ if(rc == DSM_S_RQSTREJ)
+ rc = HA_ERR_LOCK_WAIT_TIMEOUT;
+ else if (rc == DSM_S_LKTBFULL)
+ {
+ rc = HA_ERR_LOCK_TABLE_FULL;
+ gemini_lock_table_overflow_error((dsmContext_t *)thd->gemini.context);
+ }
+ }
DBUG_RETURN(rc);
}
@@ -1578,25 +1765,47 @@ void ha_gemini::info(uint flag)
dsmStatus_t error;
ULONG64 rows;
+ if(thd->gemini.context == NULL)
+ {
+ /* Need to get this thread a connection into the database */
+ error = gemini_connect(thd);
+ if(error)
+ DBUG_VOID_RETURN;
+ }
+
error = dsmRowCount((dsmContext_t *)thd->gemini.context,tableNumber,&rows);
records = (ha_rows)rows;
deleted = 0;
}
- else if ((flag & HA_STATUS_CONST))
+ if ((flag & HA_STATUS_CONST))
{
- ;
+ ha_rows *rec_per_key = share->rec_per_key;
+ for (uint i = 0; i < table->keys; i++)
+ for(uint k=0;
+ k < table->key_info[i].key_parts; k++,rec_per_key++)
+ table->key_info[i].rec_per_key[k] = *rec_per_key;
}
- else if ((flag & HA_STATUS_ERRKEY))
+ if ((flag & HA_STATUS_ERRKEY))
{
errkey=last_dup_key;
}
- else if ((flag & HA_STATUS_TIME))
+ if ((flag & HA_STATUS_TIME))
{
;
}
- else if ((flag & HA_STATUS_AUTO))
+ if ((flag & HA_STATUS_AUTO))
{
- ;
+ THD *thd = current_thd;
+ dsmStatus_t error;
+
+ error = dsmTableAutoIncrement((dsmContext_t *)thd->gemini.context,
+ tableNumber,
+ (ULONG64 *)&auto_increment_value,
+ 0);
+ /* Should return the next auto-increment value that
+ will be given -- so we need to increment the one dsm
+ currently reports. */
+ auto_increment_value++;
}
DBUG_VOID_RETURN;
@@ -1658,7 +1867,22 @@ int ha_gemini::external_lock(THD *thd, int lock_type)
thd->gemini.lock_count = 1;
thd->gemini.tx_isolation = thd->tx_isolation;
}
-
+ // lockMode has already been set in store_lock
+ // If the statement about to be executed calls for
+ // exclusive locks and we're running at read uncommitted
+ // isolation level then raise an error.
+ if(thd->gemini.tx_isolation == ISO_READ_UNCOMMITTED)
+ {
+ if(lockMode == DSM_LK_EXCL)
+ {
+ DBUG_RETURN(HA_ERR_READ_ONLY_TRANSACTION);
+ }
+ else
+ {
+ lockMode = DSM_LK_NOLOCK;
+ }
+ }
+
if(thd->gemini.context == NULL)
{
/* Need to get this thread a connection into the database */
@@ -1678,6 +1902,8 @@ int ha_gemini::external_lock(THD *thd, int lock_type)
rc = dsmObjectLock((dsmContext_t *)thd->gemini.context,
(dsmObject_t)tableNumber,DSMOBJECT_TABLE,0,
lockMode, 1, 0);
+ if(rc == DSM_S_RQSTREJ)
+ rc = HA_ERR_LOCK_WAIT_TIMEOUT;
}
}
else /* lock_type == F_UNLK */
@@ -1703,18 +1929,24 @@ THR_LOCK_DATA **ha_gemini::store_lock(THD *thd, THR_LOCK_DATA **to,
!thd->in_lock_tables)
lock_type = TL_WRITE_ALLOW_WRITE;
lock.type=lock_type;
-
- if(thd->gemini.tx_isolation == ISO_READ_UNCOMMITTED)
- lockMode = DSM_LK_NOLOCK;
- else if(table->reginfo.lock_type > TL_WRITE_ALLOW_READ)
- lockMode = DSM_LK_EXCL;
- else
- lockMode = DSM_LK_SHARE;
}
+ if(table->reginfo.lock_type > TL_WRITE_ALLOW_READ)
+ lockMode = DSM_LK_EXCL;
+ else
+ lockMode = DSM_LK_SHARE;
+
*to++= &lock;
return to;
}
+void ha_gemini::update_create_info(HA_CREATE_INFO *create_info)
+{
+ table->file->info(HA_STATUS_AUTO | HA_STATUS_CONST);
+ if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
+ {
+ create_info->auto_increment_value=auto_increment_value;
+ }
+}
int ha_gemini::create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info)
@@ -1777,7 +2009,7 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)"gemini_data_area");
if( rc != 0 )
{
- printf("dsmAreaNew failed %ld\n",rc);
+ gemini_msg(pcontext, "dsmAreaNew failed %l",rc);
return(rc);
}
@@ -1787,7 +2019,7 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)&name_buff[start_of_name]);
if( rc != 0 )
{
- printf("dsmExtentCreate failed %ld\n",rc);
+ gemini_msg(pcontext, "dsmExtentCreate failed %l",rc);
return(rc);
}
@@ -1805,6 +2037,20 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)&name_buff[start_of_name],
&dummy,&dummy);
+ if (rc == 0 && table->blob_fields)
+ {
+ /* create a storage object record for blob fields */
+ rc = dsmObjectCreate(pcontext, areaNumber, &tableNumber,
+ DSMOBJECT_BLOB,0,0,0,
+ (dsmText_t *)&name_buff[start_of_name],
+ &dummy,&dummy);
+ if( rc != 0 )
+ {
+ gemini_msg(pcontext, "dsmObjectCreate for blob object failed %l",rc);
+ return(rc);
+ }
+ }
+
if(rc == 0 && form->keys)
{
fn_format(name_buff, name, "", ha_gemini_idx_ext, 2 | 4);
@@ -1814,7 +2060,7 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)"gemini_index_area");
if( rc != 0 )
{
- printf("dsmAreaNew failed %ld\n",rc);
+ gemini_msg(pcontext, "dsmAreaNew failed %l",rc);
return(rc);
}
/* Create an extent */
@@ -1823,7 +2069,7 @@ int ha_gemini::create(const char *name, register TABLE *form,
(dsmText_t *)&name_buff[start_of_name]);
if( rc != 0 )
{
- printf("dsmExtentCreate failed %ld\n",rc);
+ gemini_msg(pcontext, "dsmExtentCreate failed %l",rc);
return(rc);
}
@@ -1859,10 +2105,11 @@ int ha_gemini::create(const char *name, register TABLE *form,
}
}
- rc = dsmTableAutoIncrementSet(pcontext,tableNumber,
- create_info->auto_increment_value);
-
-
+ /* The auto_increment value is the next one to be given
+ out so give dsm one less than this value */
+ if(create_info->auto_increment_value)
+ rc = dsmTableAutoIncrementSet(pcontext,tableNumber,
+ create_info->auto_increment_value-1);
/* Get a table lock on this table in case this table is being
created as part of an alter table statement. We don't want
@@ -1950,26 +2197,25 @@ int ha_gemini::delete_table(const char *pname)
(dsmObject_t *)&tableNum);
if (rc)
{
- printf("Cound not find table number for %s with string %s, %ld\n",
- pname,name_buff,rc);
+ gemini_msg(pcontext, "Unable to find table number for %s", name_buff);
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
DBUG_RETURN(rc);
}
- rc = dsmObjectInfo(pcontext, tableNum, DSMOBJECT_MIXTABLE, &tableArea,
- &objectAttr, &associate, &associateType, &block, &root);
+ rc = dsmObjectInfo(pcontext, tableNum, DSMOBJECT_MIXTABLE, tableNum,
+ &tableArea, &objectAttr, &associateType, &block, &root);
if (rc)
{
- printf("Failed to get area number for table %d, %s, return %ld\n",
+ gemini_msg(pcontext, "Failed to get area number for table %d, %s, return %l",
tableNum, pname, rc);
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
}
@@ -1979,14 +2225,14 @@ int ha_gemini::delete_table(const char *pname)
rc = dsmObjectDeleteAssociate(pcontext, tableNum, &indexArea);
if (rc)
{
- printf("Error deleting storage objects for table number %d, return %ld\n",
+ gemini_msg(pcontext, "Error deleting storage objects for table number %d, return %l",
(int)tableNum, rc);
/* roll back txn and return */
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
DBUG_RETURN(rc);
}
@@ -1994,33 +2240,33 @@ int ha_gemini::delete_table(const char *pname)
if (indexArea != DSMAREA_INVALID)
{
/* Delete the extents for both Index and Table */
- rc = dsmExtentDelete(pcontext, indexArea, 0);
+ rc = dsmExtentDelete(pcontext, indexArea);
rc = dsmAreaDelete(pcontext, indexArea);
if (rc)
{
- printf("Error deleting Index Area %ld, return %ld\n", indexArea, rc);
+ gemini_msg(pcontext, "Error deleting Index Area %l, return %l", indexArea, rc);
/* roll back txn and return */
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
DBUG_RETURN(rc);
}
}
- rc = dsmExtentDelete(pcontext, tableArea, 0);
+ rc = dsmExtentDelete(pcontext, tableArea);
rc = dsmAreaDelete(pcontext, tableArea);
if (rc)
{
- printf("Error deleting table Area %ld, name %s, return %ld\n",
+ gemini_msg(pcontext, "Error deleting table Area %l, name %s, return %l",
tableArea, pname, rc);
/* roll back txn and return */
rc = gemini_rollback(thd);
if (rc)
{
- printf("Error in rollback %ld\n",rc);
+ gemini_msg(pcontext, "Error in rollback %l",rc);
}
DBUG_RETURN(rc);
}
@@ -2030,7 +2276,7 @@ int ha_gemini::delete_table(const char *pname)
rc = gemini_commit(thd);
if (rc)
{
- printf("Failed to commit transaction %ld\n",rc);
+ gemini_msg(pcontext, "Failed to commit transaction %l",rc);
}
@@ -2047,7 +2293,6 @@ int ha_gemini::rename_table(const char *pfrom, const char *pto)
THD *thd;
dsmContext_t *pcontext;
dsmStatus_t rc;
- char tabname_buff[FN_REFLEN];
char dbname_buff[FN_REFLEN];
char name_buff[FN_REFLEN];
char newname_buff[FN_REFLEN];
@@ -2056,6 +2301,7 @@ int ha_gemini::rename_table(const char *pfrom, const char *pto)
unsigned i, nameLen;
dsmObject_t tableNum;
dsmArea_t indexArea = 0;
+ dsmArea_t tableArea = 0;
DBUG_ENTER("ha_gemini::rename_table");
@@ -2068,7 +2314,7 @@ int ha_gemini::rename_table(const char *pfrom, const char *pto)
{
if (gemini_is_vst(name_buff))
{
- return 0;
+ return DSM_S_CANT_RENAME_VST;
}
}
}
@@ -2113,21 +2359,51 @@ int ha_gemini::rename_table(const char *pfrom, const char *pto)
rc = dsmObjectNameToNum(pcontext, (dsmText_t *)name_buff, &tableNum);
if (rc)
+ {
+ gemini_msg(pcontext, "Unable to file Table number for %s", name_buff);
goto errorReturn;
+ }
rc = dsmObjectRename(pcontext, tableNum,
(dsmText_t *)newname_buff,
(dsmText_t *)&newidxextname_buff[start_of_name],
(dsmText_t *)&newextname_buff[start_of_name],
- &indexArea);
+ &indexArea, &tableArea);
if (rc)
+ {
+ gemini_msg(pcontext, "Failed to rename %s to %s",name_buff,newname_buff);
goto errorReturn;
+ }
+
+ /* Rename the physical table and index files (if necessary).
+ ** Close the file, rename it, and reopen it (have to do it this
+ ** way so rename works on Windows).
+ */
+ if (!(rc = dsmAreaClose(pcontext, tableArea)))
+ {
+ if (!(rc = rename_file_ext(pfrom, pto, ha_gemini_ext)))
+ {
+ rc = dsmAreaOpen(pcontext, tableArea, 0);
+ if (rc)
+ {
+ gemini_msg(pcontext, "Failed to reopen area %d",tableArea);
+ }
+ }
+ }
- /* rename the physical table and index files (if necessary) */
- rc = rename_file_ext(pfrom, pto, ha_gemini_ext);
if (!rc && indexArea)
{
- rc = rename_file_ext(pfrom, pto, ha_gemini_idx_ext);
+ if (!(rc = dsmAreaClose(pcontext, indexArea)))
+ {
+ if (!(rc = rename_file_ext(pfrom, pto, ha_gemini_idx_ext)))
+ {
+ rc = dsmAreaOpen(pcontext, indexArea, 0);
+ if (rc)
+ {
+ gemini_msg(pcontext, "Failed to reopen area %d",tableArea);
+ }
+ }
+ }
}
errorReturn:
@@ -2143,17 +2419,38 @@ errorReturn:
double ha_gemini::scan_time()
{
- return records / (gemini_blocksize / table->reclength);
+ return (double)records /
+ (double)((gemini_blocksize / (double)table->reclength));
}
-int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
+int ha_gemini::analyze(THD* thd, HA_CHECK_OPT* check_opt)
{
int error;
+ uint saveIsolation;
+ dsmMask_t saveLockMode;
+
+ check_opt->quick = true;
+ check_opt->optimize = true; // Tells check not to get table lock
+ saveLockMode = lockMode;
+ saveIsolation = thd->gemini.tx_isolation;
+ thd->gemini.tx_isolation = ISO_READ_UNCOMMITTED;
+ lockMode = DSM_LK_NOLOCK;
+ error = check(thd,check_opt);
+ lockMode = saveLockMode;
+ thd->gemini.tx_isolation = saveIsolation;
+ return (error);
+}
+
+int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ int error = 0;
int checkStatus = HA_ADMIN_OK;
ha_rows indexCount;
- byte *buf = 0, *indexBuf = 0;
+ byte *buf = 0, *indexBuf = 0, *prevBuf = 0;
int errorCount = 0;
+ info(HA_STATUS_VARIABLE); // Makes sure row count is up to date
+
/* Get a shared table lock */
if(thd->gemini.needSavepoint)
{
@@ -2167,23 +2464,33 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
return(error);
thd->gemini.needSavepoint = 0;
}
- buf = my_malloc(table->rec_buff_length,MYF(MY_WME));
- indexBuf = my_malloc(table->rec_buff_length,MYF(MY_WME));
+ buf = (byte*)my_malloc(table->rec_buff_length,MYF(MY_WME));
+ indexBuf = (byte*)my_malloc(table->rec_buff_length,MYF(MY_WME));
+ prevBuf = (byte*)my_malloc(table->rec_buff_length,MYF(MY_WME |MY_ZEROFILL ));
+
/* Lock the table */
- error = dsmObjectLock((dsmContext_t *)thd->gemini.context,
- (dsmObject_t)tableNumber,
- DSMOBJECT_TABLE,0,
- DSM_LK_SHARE, 1, 0);
+ if (!check_opt->optimize)
+ error = dsmObjectLock((dsmContext_t *)thd->gemini.context,
+ (dsmObject_t)tableNumber,
+ DSMOBJECT_TABLE,0,
+ DSM_LK_SHARE, 1, 0);
if(error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Failed to lock table %d, error %d",tableNumber, error);
return error;
+ }
- info(HA_STATUS_VARIABLE);
-
+ ha_rows *rec_per_key = share->rec_per_key;
/* If quick option just scan along index converting and counting entries */
for (uint i = 0; i < table->keys; i++)
{
- key_read = 1;
+ key_read = 1; // Causes data to be extracted from the keys
indexCount = 0;
+ // Clear the cardinality stats for this index
+ memset(table->key_info[i].rec_per_key,0,
+ sizeof(table->key_info[0].rec_per_key[0]) *
+ table->key_info[i].key_parts);
error = index_init(i);
error = index_first(indexBuf);
while(!error)
@@ -2195,8 +2502,12 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
error = fetch_row(thd->gemini.context,buf);
if(!error)
{
- if(key_cmp(i,buf,indexBuf))
+ if(key_cmp(i,buf,indexBuf,false))
{
+
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Check Error! Key does not match row for rowid %d for index %s",
+ lastRowid,table->key_info[i].name);
print_msg(thd,table->real_name,"check","error",
"Key does not match row for rowid %d for index %s",
lastRowid,table->key_info[i].name);
@@ -2209,6 +2520,9 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
{
errorCount++;
checkStatus = HA_ADMIN_CORRUPT;
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Check Error! Key does not have a valid row pointer %d for index %s",
+ lastRowid,table->key_info[i].name);
print_msg(thd,table->real_name,"check","error",
"Key does not have a valid row pointer %d for index %s",
lastRowid,table->key_info[i].name);
@@ -2218,10 +2532,27 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
}
}
}
+
+ key_cmp(i,indexBuf,prevBuf,true);
+ bcopy((void *)indexBuf,(void *)prevBuf,table->rec_buff_length);
+
if(!error)
error = index_next(indexBuf);
}
-
+
+ for(uint j=1; j < table->key_info[i].key_parts; j++)
+ {
+ table->key_info[i].rec_per_key[j] += table->key_info[i].rec_per_key[j-1];
+ }
+ for(uint k=0; k < table->key_info[i].key_parts; k++)
+ {
+ if (table->key_info[i].rec_per_key[k])
+ table->key_info[i].rec_per_key[k] =
+ records / table->key_info[i].rec_per_key[k];
+ *rec_per_key = table->key_info[i].rec_per_key[k];
+ rec_per_key++;
+ }
+
if(error == HA_ERR_END_OF_FILE)
{
/* Check count of rows */
@@ -2231,6 +2562,10 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
/* Number of index entries does not agree with the number of
rows in the index. */
checkStatus = HA_ADMIN_CORRUPT;
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Check Error! Total rows %d does not match total index entries %d for %s",
+ records, indexCount,
+ table->key_info[i].name);
print_msg(thd,table->real_name,"check","error",
"Total rows %d does not match total index entries %d for %s",
records, indexCount,
@@ -2248,23 +2583,61 @@ int ha_gemini::check(THD* thd, HA_CHECK_OPT* check_opt)
{
/* Now scan the table and for each row generate the keys
and find them in the index */
- error = fullCheck(thd, buf);\
+ error = fullCheck(thd, buf);
if(error)
checkStatus = error;
}
+ // Store the key distribution information
+ error = saveKeyStats(thd);
error_return:
- my_free(buf,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)buf,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)indexBuf,MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)prevBuf,MYF(MY_ALLOW_ZERO_PTR));
+
index_end();
key_read = 0;
- error = dsmObjectUnlock((dsmContext_t *)thd->gemini.context,
- (dsmObject_t)tableNumber,
- DSMOBJECT_TABLE,0,
- DSM_LK_SHARE,0);
+ if(!check_opt->optimize)
+ {
+ error = dsmObjectUnlock((dsmContext_t *)thd->gemini.context,
+ (dsmObject_t)tableNumber,
+ DSMOBJECT_TABLE,0,
+ DSM_LK_SHARE,0);
+ if (error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Unable to unlock table %d", tableNumber);
+ }
+ }
return checkStatus;
}
+int ha_gemini::saveKeyStats(THD *thd)
+{
+ dsmStatus_t rc = 0;
+
+ /* Insert a row in the indexStats table for each column of
+ each index of the table */
+
+ for(uint i = 0; i < table->keys; i++)
+ {
+ for (uint j = 0; j < table->key_info[i].key_parts && !rc ;j++)
+ {
+ rc = dsmIndexStatsPut((dsmContext_t *)thd->gemini.context,
+ tableNumber, pindexNumbers[i],
+ j, (LONG64)table->key_info[i].rec_per_key[j]);
+ if (rc)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Failed to update index stats for table %d, index %d",
+ tableNumber, pindexNumbers[i]);
+ }
+ }
+ }
+ return rc;
+}
+
int ha_gemini::fullCheck(THD *thd,byte *buf)
{
int error;
@@ -2319,7 +2692,12 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
&thd->gemini.savepoint,
DSMTXN_SAVE, 0, 0);
if (error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Error setting savepoint number %d, error %d",
+ thd->gemini.savepoint++, error);
return(error);
+ }
thd->gemini.needSavepoint = 0;
}
@@ -2330,7 +2708,11 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
DSMOBJECT_TABLE,0,
DSM_LK_EXCL, 1, 0);
if(error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Failed to lock table %d, error %d",tableNumber, error);
return error;
+ }
error = dsmContextSetLong((dsmContext_t *)thd->gemini.context,
DSM_TAGCONTEXT_NO_LOGGING,1);
@@ -2338,13 +2720,18 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
error = dsmTableReset((dsmContext_t *)thd->gemini.context,
(dsmTable_t)tableNumber, table->keys,
pindexNumbers);
+ if (error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "dsmTableReset failed for table %d, error %d",tableNumber, error);
+ }
- buf = my_malloc(table->rec_buff_length,MYF(MY_WME));
+ buf = (byte*)my_malloc(table->rec_buff_length,MYF(MY_WME));
dsmRecord.table = tableNumber;
dsmRecord.recid = 0;
dsmRecord.pbuffer = (dsmBuffer_t *)rec_buff;
dsmRecord.recLength = table->reclength;
- dsmRecord.maxLength = table->reclength;
+ dsmRecord.maxLength = table->rec_buff_length;
while(!error)
{
error = dsmTableScan((dsmContext_t *)thd->gemini.context,
@@ -2352,13 +2739,15 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
1);
if(!error)
{
- unpack_row((char *)buf,(char *)dsmRecord.pbuffer);
- error = handleIndexEntries(buf,dsmRecord.recid,KEY_CREATE);
- if(error == HA_ERR_FOUND_DUPP_KEY)
+ if (!(error = unpack_row((char *)buf,(char *)dsmRecord.pbuffer)))
{
- /* We don't want to stop on duplicate keys -- we're repairing
- here so let's get as much repaired as possible. */
- error = 0;
+ error = handleIndexEntries(buf,dsmRecord.recid,KEY_CREATE);
+ if(error == HA_ERR_FOUND_DUPP_KEY)
+ {
+ /* We don't want to stop on duplicate keys -- we're repairing
+ here so let's get as much repaired as possible. */
+ error = 0;
+ }
}
}
}
@@ -2366,7 +2755,13 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
(dsmObject_t)tableNumber,
DSMOBJECT_TABLE,0,
DSM_LK_EXCL,0);
- my_free(buf,MYF(MY_ALLOW_ZERO_PTR));
+ if (error)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Unable to unlock table %d", tableNumber);
+ }
+
+ my_free((char*)buf,MYF(MY_ALLOW_ZERO_PTR));
error = dsmContextSetLong((dsmContext_t *)thd->gemini.context,
DSM_TAGCONTEXT_NO_LOGGING,0);
@@ -2374,6 +2769,313 @@ int ha_gemini::repair(THD* thd, HA_CHECK_OPT* check_opt)
return error;
}
+
+int ha_gemini::restore(THD* thd, HA_CHECK_OPT *check_opt)
+{
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
+ char* backup_dir = thd->lex.backup_dir;
+ char src_path[FN_REFLEN], dst_path[FN_REFLEN];
+ char* table_name = table->real_name;
+ int error = 0;
+ int errornum;
+ const char* errmsg = "";
+ dsmArea_t tableArea = 0;
+ dsmObjectAttr_t objectAttr;
+ dsmObject_t associate;
+ dsmObjectType_t associateType;
+ dsmDbkey_t block, root;
+ dsmStatus_t rc;
+
+ rc = dsmObjectInfo(pcontext, tableNumber, DSMOBJECT_MIXTABLE, tableNumber,
+ &tableArea, &objectAttr, &associateType, &block, &root);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmObjectInfo (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaFlush(pcontext, tableArea, FLUSH_BUFFERS | FLUSH_SYNC);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaFlush (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaClose(pcontext, tableArea);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaClose (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Restore the data file */
+ if (!fn_format(src_path, table_name, backup_dir, ha_gemini_ext, 4 + 64))
+ {
+ return HA_ADMIN_INVALID;
+ }
+
+ if (my_copy(src_path, fn_format(dst_path, table->path, "",
+ ha_gemini_ext, 4), MYF(MY_WME)))
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in my_copy (.gmd) (Error %d)";
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaFlush(pcontext, tableArea, FREE_BUFFERS);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaFlush (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaOpen(pcontext, tableArea, 1);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaOpen (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+#ifdef GEMINI_BACKUP_IDX
+ dsmArea_t indexArea = 0;
+
+ rc = dsmObjectInfo(pcontext, tableNumber, DSMOBJECT_MIXINDEX, &indexArea,
+ &objectAttr, &associate, &associateType, &block, &root);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmObjectInfo (.gmi) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaClose(pcontext, indexArea);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaClose (.gmi) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Restore the index file */
+ if (!fn_format(src_path, table_name, backup_dir, ha_gemini_idx_ext, 4 + 64))
+ {
+ return HA_ADMIN_INVALID;
+ }
+
+ if (my_copy(src_path, fn_format(dst_path, table->path, "",
+ ha_gemini_idx_ext, 4), MYF(MY_WME)))
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in my_copy (.gmi) (Error %d)";
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ rc = dsmAreaOpen(pcontext, indexArea, 1);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaOpen (.gmi) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ return HA_ADMIN_OK;
+#else /* #ifdef GEMINI_BACKUP_IDX */
+ HA_CHECK_OPT tmp_check_opt;
+ tmp_check_opt.init();
+ /* The following aren't currently implemented in ha_gemini::repair
+ ** tmp_check_opt.quick = 1;
+ ** tmp_check_opt.flags |= T_VERY_SILENT;
+ */
+ return (repair(thd, &tmp_check_opt));
+#endif /* #ifdef GEMINI_BACKUP_IDX */
+
+ err:
+ {
+#if 0
+ /* mi_check_print_error is in ha_myisam.cc, so none of the informative
+ ** error messages above is currently being printed
+ */
+ MI_CHECK param;
+ myisamchk_init(&param);
+ param.thd = thd;
+ param.op_name = (char*)"restore";
+ param.table_name = table->table_name;
+ param.testflag = 0;
+ mi_check_print_error(&param,errmsg, errornum);
+#endif
+ return error;
+ }
+}
+
+
+int ha_gemini::backup(THD* thd, HA_CHECK_OPT *check_opt)
+{
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
+ char* backup_dir = thd->lex.backup_dir;
+ char src_path[FN_REFLEN], dst_path[FN_REFLEN];
+ char* table_name = table->real_name;
+ int error = 0;
+ int errornum;
+ const char* errmsg = "";
+ dsmArea_t tableArea = 0;
+ dsmObjectAttr_t objectAttr;
+ dsmObject_t associate;
+ dsmObjectType_t associateType;
+ dsmDbkey_t block, root;
+ dsmStatus_t rc;
+
+ rc = dsmObjectInfo(pcontext, tableNumber, DSMOBJECT_MIXTABLE, tableNumber,
+ &tableArea, &objectAttr, &associateType, &block, &root);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmObjectInfo (.gmd) (Error %d)";
+ errornum = rc;
+ goto err;
+ }
+
+ /* Flush the buffers before backing up the table */
+ dsmAreaFlush((dsmContext_t *)thd->gemini.context, tableArea,
+ FLUSH_BUFFERS | FLUSH_SYNC);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmAreaFlush (.gmd) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Backup the .FRM file */
+ if (!fn_format(dst_path, table_name, backup_dir, reg_ext, 4 + 64))
+ {
+ errmsg = "Failed in fn_format() for .frm file: errno = %d";
+ error = HA_ADMIN_INVALID;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ if (my_copy(fn_format(src_path, table->path,"", reg_ext, 4),
+ dst_path,
+ MYF(MY_WME | MY_HOLD_ORIGINAL_MODES )))
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed copying .frm file: errno = %d";
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Backup the data file */
+ if (!fn_format(dst_path, table_name, backup_dir, ha_gemini_ext, 4 + 64))
+ {
+ errmsg = "Failed in fn_format() for .GMD file: errno = %d";
+ error = HA_ADMIN_INVALID;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ if (my_copy(fn_format(src_path, table->path,"", ha_gemini_ext, 4),
+ dst_path,
+ MYF(MY_WME | MY_HOLD_ORIGINAL_MODES )) )
+ {
+ errmsg = "Failed copying .GMD file: errno = %d";
+ error= HA_ADMIN_FAILED;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+#ifdef GEMINI_BACKUP_IDX
+ dsmArea_t indexArea = 0;
+
+ rc = dsmObjectInfo(pcontext, tableNumber, DSMOBJECT_MIXINDEX, &indexArea,
+ &objectAttr, &associate, &associateType, &block, &root);
+ if (rc)
+ {
+ error = HA_ADMIN_FAILED;
+ errmsg = "Failed in dsmObjectInfo (.gmi) (Error %d)";
+ errornum = rc;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ /* Backup the index file */
+ if (!fn_format(dst_path, table_name, backup_dir, ha_gemini_idx_ext, 4 + 64))
+ {
+ errmsg = "Failed in fn_format() for .GMI file: errno = %d";
+ error = HA_ADMIN_INVALID;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+
+ if (my_copy(fn_format(src_path, table->path,"", ha_gemini_idx_ext, 4),
+ dst_path,
+ MYF(MY_WME | MY_HOLD_ORIGINAL_MODES )) )
+ {
+ errmsg = "Failed copying .GMI file: errno = %d";
+ error= HA_ADMIN_FAILED;
+ errornum = errno;
+ gemini_msg(pcontext, errmsg ,errornum);
+ goto err;
+ }
+#endif /* #ifdef GEMINI_BACKUP_IDX */
+
+ return HA_ADMIN_OK;
+
+ err:
+ {
+#if 0
+ /* mi_check_print_error is in ha_myisam.cc, so none of the informative
+ ** error messages above is currently being printed
+ */
+ MI_CHECK param;
+ myisamchk_init(&param);
+ param.thd = thd;
+ param.op_name = (char*)"backup";
+ param.table_name = table->table_name;
+ param.testflag = 0;
+ mi_check_print_error(&param,errmsg, errornum);
+#endif
+ return error;
+ }
+}
+
+
+int ha_gemini::optimize(THD* thd, HA_CHECK_OPT *check_opt)
+{
+ return HA_ADMIN_ALREADY_DONE;
+}
+
+
ha_rows ha_gemini::records_in_range(int keynr,
const byte *start_key,uint start_key_len,
enum ha_rkey_function start_search_flag,
@@ -2412,7 +3114,7 @@ ha_rows ha_gemini::records_in_range(int keynr,
pbracketBase->keyLen = componentLen;
}
- pbracketBase->keyLen -= 3;
+ pbracketBase->keyLen -= FULLKEYHDRSZ;
if(end_key)
{
@@ -2431,9 +3133,10 @@ ha_rows ha_gemini::records_in_range(int keynr,
pbracketLimit->keyLen = componentLen;
}
- pbracketLimit->keyLen -= 3;
+ pbracketLimit->keyLen -= FULLKEYHDRSZ;
error = dsmIndexRowsInRange((dsmContext_t *)current_thd->gemini.context,
pbracketBase,pbracketLimit,
+ tableNumber,
&pctInrange);
if(pctInrange >= 1)
rows = (ha_rows)pctInrange;
@@ -2457,32 +3160,82 @@ ha_rows ha_gemini::records_in_range(int keynr,
may only happen in rows with blobs, as the default row length is
pre-allocated.
*/
-int ha_gemini::pack_row(byte **pprow, int *ppackedLength, const byte *record)
+int ha_gemini::pack_row(byte **pprow, int *ppackedLength, const byte *record,
+ bool update)
{
+ THD *thd = current_thd;
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
+ gemBlobDesc_t *pBlobDesc = pBlobDescs;
+
if (fixed_length_row)
{
*pprow = (byte *)record;
*ppackedLength=(int)table->reclength;
return 0;
}
- if (table->blob_fields)
- {
- return HA_ERR_WRONG_COMMAND;
- }
/* Copy null bits */
memcpy(rec_buff, record, table->null_bytes);
byte *ptr=rec_buff + table->null_bytes;
for (Field **field=table->field ; *field ; field++)
- ptr=(byte*) (*field)->pack((char*) ptr,record + (*field)->offset());
+ {
+#ifdef GEMINI_TINYBLOB_IN_ROW
+ /* Tiny blobs (255 bytes or less) are stored in the row; larger
+ ** blobs are stored in a separate storage object (see ha_gemini::create).
+ */
+ if ((*field)->type() == FIELD_TYPE_BLOB &&
+ ((Field_blob*)*field)->blobtype() != FIELD_TYPE_TINY_BLOB)
+#else
+ if ((*field)->type() == FIELD_TYPE_BLOB)
+#endif
+ {
+ dsmBlob_t gemBlob;
+ char *blobptr;
+
+ gemBlob.areaType = DSMOBJECT_BLOB;
+ gemBlob.blobObjNo = tableNumber;
+ gemBlob.blobId = 0;
+ gemBlob.totLength = gemBlob.segLength =
+ ((Field_blob*)*field)->get_length((char*)record + (*field)->offset());
+ ((Field_blob*)*field)->get_ptr((char**) &blobptr);
+ gemBlob.pBuffer = (dsmBuffer_t *)blobptr;
+ gemBlob.blobContext.blobOffset = 0;
+ if (gemBlob.totLength)
+ {
+ dsmBlobStart(pcontext, &gemBlob);
+ if (update && pBlobDesc->blobId)
+ {
+ gemBlob.blobId = pBlobDesc->blobId;
+ dsmBlobUpdate(pcontext, &gemBlob, NULL);
+ }
+ else
+ {
+ dsmBlobPut(pcontext, &gemBlob, NULL);
+ }
+ dsmBlobEnd(pcontext, &gemBlob);
+ }
+ ptr = (byte*)((Field_blob*)*field)->pack_id((char*) ptr,
+ (char*)record + (*field)->offset(), (longlong)gemBlob.blobId);
+
+ pBlobDesc++;
+ }
+ else
+ {
+ ptr=(byte*) (*field)->pack((char*) ptr, (char*)record + (*field)->offset());
+ }
+ }
*pprow=rec_buff;
*ppackedLength= (ptr - rec_buff);
return 0;
}
-void ha_gemini::unpack_row(char *record, char *prow)
+int ha_gemini::unpack_row(char *record, char *prow)
{
+ THD *thd = current_thd;
+ dsmContext_t *pcontext = (dsmContext_t *)thd->gemini.context;
+ gemBlobDesc_t *pBlobDesc = pBlobDescs;
+
if (fixed_length_row)
{
/* If the table is a VST, the row is in Gemini internal format.
@@ -2568,38 +3321,129 @@ void ha_gemini::unpack_row(char *record, char *prow)
const char *ptr= (const char*) prow;
memcpy(record, ptr, table->null_bytes);
ptr+=table->null_bytes;
+
for (Field **field=table->field ; *field ; field++)
- ptr= (*field)->unpack(record + (*field)->offset(), ptr);
+ {
+#ifdef GEMINI_TINYBLOB_IN_ROW
+ /* Tiny blobs (255 bytes or less) are stored in the row; larger
+ ** blobs are stored in a separate storage object (see ha_gemini::create).
+ */
+ if ((*field)->type() == FIELD_TYPE_BLOB &&
+ ((Field_blob*)*field)->blobtype() != FIELD_TYPE_TINY_BLOB)
+#else
+ if ((*field)->type() == FIELD_TYPE_BLOB)
+#endif
+ {
+ dsmBlob_t gemBlob;
+
+ gemBlob.areaType = DSMOBJECT_BLOB;
+ gemBlob.blobObjNo = tableNumber;
+ gemBlob.blobId = (dsmBlobId_t)(((Field_blob*)*field)->get_id(ptr));
+ if (gemBlob.blobId)
+ {
+ gemBlob.totLength =
+ gemBlob.segLength = ((Field_blob*)*field)->get_length(ptr);
+ /* Allocate memory to store the blob. This memory is freed
+ ** the next time unpack_row is called for this table.
+ */
+ gemBlob.pBuffer = (dsmBuffer_t *)my_malloc(gemBlob.totLength,
+ MYF(0));
+ if (!gemBlob.pBuffer)
+ {
+ return HA_ERR_OUT_OF_MEM;
+ }
+ gemBlob.blobContext.blobOffset = 0;
+ dsmBlobStart(pcontext, &gemBlob);
+ dsmBlobGet(pcontext, &gemBlob, NULL);
+ dsmBlobEnd(pcontext, &gemBlob);
+ }
+ else
+ {
+ gemBlob.pBuffer = 0;
+ }
+ ptr = ((Field_blob*)*field)->unpack_id(record + (*field)->offset(),
+ ptr, (char *)gemBlob.pBuffer);
+ pBlobDesc->blobId = gemBlob.blobId;
+ my_free((char*)pBlobDesc->pBlob, MYF(MY_ALLOW_ZERO_PTR));
+ pBlobDesc->pBlob = gemBlob.pBuffer;
+ pBlobDesc++;
+ }
+ else
+ {
+ ptr= (*field)->unpack(record + (*field)->offset(), ptr);
+ }
+ }
}
+
+ return 0;
}
int ha_gemini::key_cmp(uint keynr, const byte * old_row,
- const byte * new_row)
+ const byte * new_row, bool updateStats)
{
KEY_PART_INFO *key_part=table->key_info[keynr].key_part;
KEY_PART_INFO *end=key_part+table->key_info[keynr].key_parts;
- for ( ; key_part != end ; key_part++)
+ for ( uint i = 0 ; key_part != end ; key_part++, i++)
{
if (key_part->null_bit)
{
if ((old_row[key_part->null_offset] & key_part->null_bit) !=
(new_row[key_part->null_offset] & key_part->null_bit))
+ {
+ if(updateStats)
+ table->key_info[keynr].rec_per_key[i]++;
return 1;
+ }
+ else if((old_row[key_part->null_offset] & key_part->null_bit) &&
+ (new_row[key_part->null_offset] & key_part->null_bit))
+ /* Both are null */
+ continue;
}
if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH))
{
-
- if (key_part->field->cmp_binary(old_row + key_part->offset,
- new_row + key_part->offset,
+ if (key_part->field->cmp_binary((char*)(old_row + key_part->offset),
+ (char*)(new_row + key_part->offset),
(ulong) key_part->length))
+ {
+ if(updateStats)
+ table->key_info[keynr].rec_per_key[i]++;
return 1;
+ }
}
else
{
if (memcmp(old_row+key_part->offset, new_row+key_part->offset,
key_part->length))
+ {
+ /* Check for special case of -0 which causes table check
+ to find an invalid key when comparing the the index
+ value of 0 to the -0 stored in the row */
+ if(key_part->field->type() == FIELD_TYPE_DECIMAL)
+ {
+ double fieldValue;
+ char *ptr = key_part->field->ptr;
+
+ key_part->field->ptr = (char *)old_row + key_part->offset;
+ fieldValue = key_part->field->val_real();
+ if(fieldValue == 0)
+ {
+ key_part->field->ptr = (char *)new_row + key_part->offset;
+ fieldValue = key_part->field->val_real();
+ if(fieldValue == 0)
+ {
+ key_part->field->ptr = ptr;
+ continue;
+ }
+ }
+ key_part->field->ptr = ptr;
+ }
+ if(updateStats)
+ {
+ table->key_info[keynr].rec_per_key[i]++;
+ }
return 1;
+ }
}
}
return 0;
@@ -2612,13 +3456,13 @@ int gemini_parse_table_name(const char *fullname, char *dbname, char *tabname)
/* separate out the name of the table and the database
*/
- namestart = strchr(fullname + start_of_name, '/');
+ namestart = (char *)strchr(fullname + start_of_name, '/');
if (!namestart)
{
/* if on Windows, slashes go the other way */
- namestart = strchr(fullname + start_of_name, '\\');
+ namestart = (char *)strchr(fullname + start_of_name, '\\');
}
- nameend = strchr(fullname + start_of_name, '.');
+ nameend = (char *)strchr(fullname + start_of_name, '.');
/* sometimes fullname has an extension, sometimes it doesn't */
if (!nameend)
{
@@ -2680,4 +3524,105 @@ static void print_msg(THD *thd, const char *table_name, const char *op_name,
thd->killed=1;
}
+/* Load shared area with rows per key statistics */
+void
+ha_gemini::get_index_stats(THD *thd)
+{
+ dsmStatus_t rc = 0;
+ ha_rows *rec_per_key = share->rec_per_key;
+
+ for(uint i = 0; i < table->keys && !rc; i++)
+ {
+ for (uint j = 0; j < table->key_info[i].key_parts && !rc;j++)
+ {
+ LONG64 rows_per_key;
+ rc = dsmIndexStatsGet((dsmContext_t *)thd->gemini.context,
+ tableNumber, pindexNumbers[i],(int)j,
+ &rows_per_key);
+ if (rc)
+ {
+ gemini_msg((dsmContext_t *)thd->gemini.context,
+ "Index Statistics faild for table %d index %d, error %d",
+ tableNumber, pindexNumbers[i], rc);
+ }
+ *rec_per_key = (ha_rows)rows_per_key;
+ rec_per_key++;
+ }
+ }
+ return;
+}
+
+/****************************************************************************
+ Handling the shared GEM_SHARE structure that is needed to provide
+ a global in memory storage location of the rec_per_key stats used
+ by the optimizer.
+****************************************************************************/
+
+static byte* gem_get_key(GEM_SHARE *share,uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length=share->table_name_length;
+ return (byte*) share->table_name;
+}
+
+static GEM_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ GEM_SHARE *share;
+
+ pthread_mutex_lock(&gem_mutex);
+ uint length=(uint) strlen(table_name);
+ if (!(share=(GEM_SHARE*) hash_search(&gem_open_tables, (byte*) table_name,
+ length)))
+ {
+ ha_rows *rec_per_key;
+ char *tmp_name;
+
+ if ((share=(GEM_SHARE *)
+ my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &share, sizeof(*share),
+ &rec_per_key, table->key_parts * sizeof(ha_rows),
+ &tmp_name, length+1,
+ NullS)))
+ {
+ share->rec_per_key = rec_per_key;
+ share->table_name = tmp_name;
+ share->table_name_length=length;
+ strcpy(share->table_name,table_name);
+ if (hash_insert(&gem_open_tables, (byte*) share))
+ {
+ pthread_mutex_unlock(&gem_mutex);
+ my_free((gptr) share,0);
+ return 0;
+ }
+ thr_lock_init(&share->lock);
+ pthread_mutex_init(&share->mutex,NULL);
+ }
+ }
+ pthread_mutex_unlock(&gem_mutex);
+ return share;
+}
+
+static int free_share(GEM_SHARE *share, bool mutex_is_locked)
+{
+ pthread_mutex_lock(&gem_mutex);
+ if (mutex_is_locked)
+ pthread_mutex_unlock(&share->mutex);
+ if (!--share->use_count)
+ {
+ hash_delete(&gem_open_tables, (byte*) share);
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+ my_free((gptr) share, MYF(0));
+ }
+ pthread_mutex_unlock(&gem_mutex);
+ return 0;
+}
+
+static void gemini_lock_table_overflow_error(dsmContext_t *pcontext)
+{
+ gemini_msg(pcontext, "The total number of locks exceeds the lock table size");
+ gemini_msg(pcontext, "Either increase gemini_lock_table_size or use a");
+ gemini_msg(pcontext, "different transaction isolation level");
+}
+
#endif /* HAVE_GEMINI_DB */
diff --git a/sql/ha_gemini.h b/sql/ha_gemini.h
index 7e6e8f26588..495dc2fd1c9 100644
--- a/sql/ha_gemini.h
+++ b/sql/ha_gemini.h
@@ -19,17 +19,26 @@
#pragma interface /* gcc class implementation */
#endif
+#include "gem_global.h"
#include "dstd.h"
#include "dsmpub.h"
/* class for the the gemini handler */
enum enum_key_string_options{KEY_CREATE,KEY_DELETE,KEY_CHECK};
-
-#define READ_UNCOMMITED 0
-#define READ_COMMITED 1
-#define REPEATABLE_READ 2
-#define SERIALIZEABLE 3
+typedef struct st_gemini_share {
+ ha_rows *rec_per_key;
+ THR_LOCK lock;
+ pthread_mutex_t mutex;
+ char *table_name;
+ uint table_name_length,use_count;
+} GEM_SHARE;
+
+typedef struct gemBlobDesc
+{
+ dsmBlobId_t blobId;
+ dsmBuffer_t *pBlob;
+} gemBlobDesc_t;
class ha_gemini: public handler
{
@@ -38,7 +47,7 @@ class ha_gemini: public handler
uint int_option_flag;
int tableNumber;
dsmIndex_t *pindexNumbers; // dsm object numbers for the indexes on this table
- unsigned long lastRowid;
+ dsmRecid_t lastRowid;
uint last_dup_key;
bool fixed_length_row, key_read, using_ignore;
byte *rec_buff;
@@ -46,10 +55,12 @@ class ha_gemini: public handler
dsmKey_t *pbracketLimit;
dsmKey_t *pfoundKey;
dsmMask_t tableStatus; // Crashed/repair status
+ gemBlobDesc_t *pBlobDescs;
int index_open(char *tableName);
- int pack_row(byte **prow, int *ppackedLength, const byte *record);
- void unpack_row(char *record, char *prow);
+ int pack_row(byte **prow, int *ppackedLength, const byte *record,
+ bool update);
+ int unpack_row(char *record, char *prow);
int findRow(THD *thd, dsmMask_t findMode, byte *buf);
int fetch_row(void *gemini_context, const byte *buf);
int handleIndexEntries(const byte * record, dsmRecid_t recid,
@@ -70,24 +81,28 @@ class ha_gemini: public handler
void unpack_key(char *record, dsmKey_t *key, uint index);
int key_cmp(uint keynr, const byte * old_row,
- const byte * new_row);
+ const byte * new_row, bool updateStats);
+ int saveKeyStats(THD *thd);
+ void get_index_stats(THD *thd);
short cursorId; /* cursorId of active index cursor if any */
dsmMask_t lockMode; /* Shared or exclusive */
/* FIXFIX Don't know why we need this because I don't know what
store_lock method does but we core dump without this */
- THR_LOCK alock;
THR_LOCK_DATA lock;
+ GEM_SHARE *share;
+
public:
ha_gemini(TABLE *table): handler(table), file(0),
int_option_flag(HA_READ_NEXT | HA_READ_PREV |
HA_REC_NOT_IN_SEQ |
HA_KEYPOS_TO_RNDPOS | HA_READ_ORDER | HA_LASTKEY_ORDER |
HA_LONGLONG_KEYS | HA_NULL_KEY | HA_HAVE_KEY_READ_ONLY |
- HA_NO_BLOBS | HA_NO_TEMP_TABLES |
- /* HA_BLOB_KEY | */ /*HA_NOT_EXACT_COUNT | */
+ HA_BLOB_KEY |
+ HA_NO_TEMP_TABLES | HA_NO_FULLTEXT_KEY |
+ /*HA_NOT_EXACT_COUNT | */
/*HA_KEY_READ_WRONG_STR |*/ HA_DROP_BEFORE_CREATE),
pbracketBase(0),pbracketLimit(0),pfoundKey(0),
cursorId(0)
@@ -100,7 +115,7 @@ class ha_gemini: public handler
uint max_record_length() const { return MAXRECSZ; }
uint max_keys() const { return MAX_KEY-1; }
uint max_key_parts() const { return MAX_REF_PARTS; }
- uint max_key_length() const { return MAXKEYSZ; }
+ uint max_key_length() const { return MAXKEYSZ / 2; }
bool fast_key_read() { return 1;}
bool has_transactions() { return 1;}
@@ -129,8 +144,12 @@ class ha_gemini: public handler
void info(uint);
int extra(enum ha_extra_function operation);
int reset(void);
+ int analyze(THD* thd, HA_CHECK_OPT* check_opt);
int check(THD* thd, HA_CHECK_OPT* check_opt);
int repair(THD* thd, HA_CHECK_OPT* check_opt);
+ int restore(THD* thd, HA_CHECK_OPT* check_opt);
+ int backup(THD* thd, HA_CHECK_OPT* check_opt);
+ int optimize(THD* thd, HA_CHECK_OPT* check_opt);
int external_lock(THD *thd, int lock_type);
virtual longlong get_auto_increment();
void position(byte *record);
@@ -139,7 +158,7 @@ class ha_gemini: public handler
enum ha_rkey_function start_search_flag,
const byte *end_key,uint end_key_len,
enum ha_rkey_function end_search_flag);
-
+ void update_create_info(HA_CREATE_INFO *create_info);
int create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info);
int delete_table(const char *name);
@@ -167,6 +186,7 @@ extern long gemini_locktablesize;
extern long gemini_lock_wait_timeout;
extern long gemini_spin_retries;
extern long gemini_connection_limit;
+extern char *gemini_basedir;
extern TYPELIB gemini_recovery_typelib;
extern ulong gemini_recovery_options;
@@ -175,12 +195,13 @@ bool gemini_end(void);
bool gemini_flush_logs(void);
int gemini_commit(THD *thd);
int gemini_rollback(THD *thd);
+int gemini_recovery_logging(THD *thd, bool on);
void gemini_disconnect(THD *thd);
int gemini_rollback_to_savepoint(THD *thd);
int gemini_parse_table_name(const char *fullname, char *dbname, char *tabname);
int gemini_is_vst(const char *pname);
int gemini_set_option_long(int optid, long optval);
-const int gemini_blocksize = 8192;
-const int gemini_recbits = 7;
+const int gemini_blocksize = BLKSIZE;
+const int gemini_recbits = DEFAULT_RECBITS;
diff --git a/sql/ha_innobase.cc b/sql/ha_innobase.cc
index fa44cebe19d..184c97837db 100644
--- a/sql/ha_innobase.cc
+++ b/sql/ha_innobase.cc
@@ -679,7 +679,7 @@ ha_innobase::bas_ext() const
/* out: file extension strings, currently not
used */
{
- static const char* ext[] = {".not_used", NullS};
+ static const char* ext[] = {".InnoDB", NullS};
return(ext);
}
@@ -779,6 +779,13 @@ ha_innobase::open(
if (NULL == (ib_table = dict_table_get(norm_name, NULL))) {
+ fprintf(stderr, "\
+Cannot find table %s from the internal data dictionary\n\
+of InnoDB though the .frm file for the table exists. Maybe you have deleted\n\
+and created again an InnoDB database but forgotten to delete the\n\
+corresponding .frm files of old InnoDB tables?\n",
+ norm_name);
+
free_share(share);
my_free((char*) upd_buff, MYF(0));
my_errno = ENOENT;
@@ -1516,6 +1523,10 @@ ha_innobase::update_row(
DBUG_ENTER("ha_innobase::update_row");
+ if (table->time_stamp) {
+ update_timestamp(new_row + table->time_stamp - 1);
+ }
+
if (last_query_id != user_thd->query_id) {
prebuilt->sql_stat_start = TRUE;
last_query_id = user_thd->query_id;
@@ -2142,6 +2153,7 @@ ha_innobase::external_lock(
prebuilt->in_update_remember_pos = TRUE;
if (lock_type == F_WRLCK) {
+
/* If this is a SELECT, then it is in UPDATE TABLE ...
or SELECT ... FOR UPDATE */
prebuilt->select_lock_type = LOCK_X;
@@ -2153,13 +2165,27 @@ ha_innobase::external_lock(
}
trx->n_mysql_tables_in_use++;
+
+ if (prebuilt->select_lock_type != LOCK_NONE) {
+
+ trx->mysql_n_tables_locked++;
+ }
} else {
trx->n_mysql_tables_in_use--;
- if (trx->n_mysql_tables_in_use == 0 &&
- !(thd->options
- & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))) {
- innobase_commit(thd, trx);
+ if (trx->n_mysql_tables_in_use == 0) {
+
+ trx->mysql_n_tables_locked = 0;
+
+ if (trx->has_search_latch) {
+
+ trx_search_latch_release_if_reserved(trx);
+ }
+
+ if (!(thd->options
+ & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))) {
+ innobase_commit(thd, trx);
+ }
}
}
@@ -2690,6 +2716,39 @@ ha_innobase::info(
DBUG_VOID_RETURN;
}
+/***********************************************************************
+Tries to check that an InnoDB table is not corrupted. If corruption is
+noticed, prints to stderr information about it. In case of corruption
+may also assert a failure and crash the server. */
+
+int
+ha_innobase::check(
+/*===============*/
+ /* out: HA_ADMIN_CORRUPT or
+ HA_ADMIN_OK */
+ THD* thd, /* in: user thread handle */
+ HA_CHECK_OPT* check_opt) /* in: check options, currently
+ ignored */
+{
+ row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
+ ulint ret;
+
+ if (prebuilt->mysql_template == NULL) {
+ /* Build the template; we will use a dummy template
+ in index scans done in checking */
+
+ build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
+ }
+
+ ret = row_check_table_for_mysql(prebuilt);
+
+ if (ret == DB_SUCCESS) {
+ return(HA_ADMIN_OK);
+ }
+
+ return(HA_ADMIN_CORRUPT);
+}
+
/*****************************************************************
Adds information about free space in the InnoDB tablespace to a
table comment which is printed out when a user calls SHOW TABLE STATUS. */
diff --git a/sql/ha_innobase.h b/sql/ha_innobase.h
index 258e34cbf86..d832ac93d0f 100644
--- a/sql/ha_innobase.h
+++ b/sql/ha_innobase.h
@@ -142,7 +142,7 @@ class ha_innobase: public handler
HA_CREATE_INFO *create_info);
int delete_table(const char *name);
int rename_table(const char* from, const char* to);
-
+ int check(THD* thd, HA_CHECK_OPT* check_opt);
char* update_table_comment(const char* comment);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 91c837e8023..63e2cf7c201 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -527,8 +527,8 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
int error=0;
uint extra_testflag=0;
bool optimize_done= !optimize, statistics_done=0;
- char fixed_name[FN_REFLEN];
const char *old_proc_info=thd->proc_info;
+ char fixed_name[FN_REFLEN];
MYISAM_SHARE* share = file->s;
ha_rows rows= file->state->records;
DBUG_ENTER("ha_myisam::repair");
@@ -540,8 +540,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
param.thd=thd;
param.tmpdir=mysql_tmpdir;
param.out_flag=0;
- VOID(fn_format(fixed_name,file->filename,"",MI_NAME_IEXT,
- 4+ (param.opt_follow_links ? 16 : 0)));
+ strmov(fixed_name,file->filename);
// Don't lock tables if we have used LOCK TABLE
if (!thd->locked_tables && mi_lock_database(file,F_WRLCK))
@@ -831,6 +830,8 @@ void ha_myisam::position(const byte* record)
void ha_myisam::info(uint flag)
{
MI_ISAMINFO info;
+ char name_buff[FN_REFLEN];
+
(void) mi_status(file,&info,flag);
if (flag & HA_STATUS_VARIABLE)
{
@@ -860,6 +861,18 @@ void ha_myisam::info(uint flag)
raid_type=info.raid_type;
raid_chunks=info.raid_chunks;
raid_chunksize=info.raid_chunksize;
+
+ /*
+ Set data_file_name and index_file_name to point at the symlink value
+ if table is symlinked (Ie; Real name is not same as generated name)
+ */
+ data_file_name=index_file_name=0;
+ fn_format(name_buff, file->filename, "", MI_NAME_DEXT, 2);
+ if (strcmp(name_buff, info.data_file_name))
+ data_file_name=info.data_file_name;
+ strmov(fn_ext(name_buff),MI_NAME_IEXT);
+ if (strcmp(name_buff, info.index_file_name))
+ index_file_name=info.index_file_name;
}
if (flag & HA_STATUS_ERRKEY)
{
@@ -915,6 +928,7 @@ THR_LOCK_DATA **ha_myisam::store_lock(THD *thd,
void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
{
+ MI_ISAMINFO info;
table->file->info(HA_STATUS_AUTO | HA_STATUS_CONST);
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
{
@@ -926,6 +940,8 @@ void ha_myisam::update_create_info(HA_CREATE_INFO *create_info)
create_info->raid_chunks= raid_chunks;
create_info->raid_chunksize= raid_chunksize;
}
+ create_info->data_file_name=data_file_name;
+ create_info->index_file_name=index_file_name;
}
@@ -1097,8 +1113,10 @@ int ha_myisam::create(const char *name, register TABLE *form,
create_info.raid_type=info->raid_type;
create_info.raid_chunks=info->raid_chunks ? info->raid_chunks : RAID_DEFAULT_CHUNKS;
create_info.raid_chunksize=info->raid_chunksize ? info->raid_chunksize : RAID_DEFAULT_CHUNKSIZE;
+ create_info.data_file_name= info->data_file_name;
+ create_info.index_file_name=info->index_file_name;
- error=mi_create(fn_format(buff,name,"","",2+4+16),
+ error=mi_create(fn_format(buff,name,"","",2+4),
form->keys,keydef,
(uint) (recinfo_pos-recinfo), recinfo,
0, (MI_UNIQUEDEF*) 0,
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index 21b7e5bbd39..ca5007228d8 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -38,6 +38,7 @@ class ha_myisam: public handler
{
MI_INFO *file;
uint int_option_flag,enable_activate_all_index;
+ char *data_file_name, *index_file_name;
int repair(THD *thd, MI_CHECK &param, bool optimize);
public:
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index b842c15cce0..e3e1d959438 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -232,7 +232,7 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info)
for (table=file->open_tables ; table != file->end_table ; table++)
{
- char *name=table->table->s->filename;
+ char *name=table->table->filename;
char buff[FN_REFLEN];
TABLE_LIST *ptr;
if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))))
@@ -278,7 +278,7 @@ void ha_myisammrg::append_create_info(String *packet)
for (first=table=file->open_tables ; table != file->end_table ; table++)
{
- char *name=table->table->s->filename;
+ char *name=table->table->filename;
fn_format(buff,name,"","",3);
if (table != first)
packet->append(',');
diff --git a/sql/handler.cc b/sql/handler.cc
index 212fcea11ae..5b5d6d4764c 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -694,6 +694,15 @@ void handler::print_error(int error, myf errflag)
case HA_ERR_RECORD_FILE_FULL:
textno=ER_RECORD_FILE_FULL;
break;
+ case HA_ERR_LOCK_WAIT_TIMEOUT:
+ textno=ER_LOCK_WAIT_TIMEOUT;
+ break;
+ case HA_ERR_LOCK_TABLE_FULL:
+ textno=ER_LOCK_TABLE_FULL;
+ break;
+ case HA_ERR_READ_ONLY_TRANSACTION:
+ textno=ER_READ_ONLY_TRANSACTION;
+ break;
default:
{
my_error(ER_GET_ERRNO,errflag,error);
@@ -757,6 +766,25 @@ int ha_commit_rename(THD *thd)
return error;
}
+/* Tell the handler to turn on or off logging to the handler's
+ recovery log
+*/
+int ha_recovery_logging(THD *thd, bool on)
+{
+ int error=0;
+
+ DBUG_ENTER("ha_recovery_logging");
+#ifdef USING_TRANSACTIONS
+ if (opt_using_transactions)
+ {
+#ifdef HAVE_GEMINI_DB
+ error = gemini_recovery_logging(thd, on);
+#endif
+ }
+#endif
+ DBUG_RETURN(error);
+}
+
int handler::index_next_same(byte *buf, const byte *key, uint keylen)
{
int error;
@@ -825,5 +853,5 @@ static int NEAR_F delete_file(const char *name,const char *ext,int extflag)
{
char buff[FN_REFLEN];
VOID(fn_format(buff,name,"",ext,extflag | 4));
- return(my_delete(buff,MYF(MY_WME)));
+ return(my_delete_with_symlink(buff,MYF(MY_WME)));
}
diff --git a/sql/handler.h b/sql/handler.h
index 076bf783f80..fc20e563f9f 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -74,6 +74,7 @@
#define HA_NOT_DELETE_WITH_CACHE (HA_NOT_READ_AFTER_KEY*2)
#define HA_NO_TEMP_TABLES (HA_NOT_DELETE_WITH_CACHE*2)
#define HA_NO_PREFIX_CHAR_KEYS (HA_NO_TEMP_TABLES*2)
+#define HA_NO_FULLTEXT_KEY (HA_NO_PREFIX_CHAR_KEYS*2)
/* Parameters for open() (in register form->filestat) */
/* HA_GET_INFO does a implicit HA_ABORT_IF_LOCKED */
@@ -141,6 +142,7 @@ typedef struct st_ha_create_information
ulonglong max_rows,min_rows;
ulonglong auto_increment_value;
char *comment,*password;
+ char *data_file_name, *index_file_name;
uint options; /* OR of HA_CREATE_ options */
uint raid_type,raid_chunks;
ulong raid_chunksize;
@@ -353,3 +355,4 @@ int ha_autocommit_or_rollback(THD *thd, int error);
void ha_set_spin_retries(uint retries);
bool ha_flush_logs(void);
int ha_commit_rename(THD *thd);
+int ha_recovery_logging(THD *thd, bool on);
diff --git a/sql/item.cc b/sql/item.cc
index b268c5eb928..44bbf9a9cbc 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -561,7 +561,7 @@ bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables)
{
if (!ref)
{
- if (!(ref=find_item_in_list(this,thd->lex.item_list)))
+ if (!(ref=find_item_in_list(this,thd->lex.select->item_list)))
return 1;
max_length= (*ref)->max_length;
maybe_null= (*ref)->maybe_null;
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index e7a6c52dfd9..373aede7b6b 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -487,7 +487,7 @@ Item_func_if::fix_length_and_dec()
{
maybe_null=args[1]->maybe_null || args[2]->maybe_null;
max_length=max(args[1]->max_length,args[2]->max_length);
- decimals=max(args[0]->decimals,args[1]->decimals);
+ decimals=max(args[1]->decimals,args[2]->decimals);
enum Item_result arg1_type=args[1]->result_type();
enum Item_result arg2_type=args[2]->result_type();
if (arg1_type == STRING_RESULT || arg2_type == STRING_RESULT)
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 84bc972608e..8a2bd15ae6d 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1758,7 +1758,7 @@ Item_func_get_user_var::val_str(String *str)
return NULL;
switch (entry->type) {
case REAL_RESULT:
- str->set(*(double*) entry->value);
+ str->set(*(double*) entry->value,decimals);
break;
case INT_RESULT:
str->set(*(longlong*) entry->value);
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 80f72c30e57..9d69b713611 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -30,7 +30,6 @@
#ifdef HAVE_CRYPT_H
#include <crypt.h>
#endif
-
#include "md5.h"
String empty_string("");
@@ -66,13 +65,13 @@ String *Item_func_md5::val_str(String *str)
String * sptr= args[0]->val_str(str);
if (sptr)
{
- MD5_CTX context;
+ my_MD5_CTX context;
unsigned char digest[16];
null_value=0;
- MD5Init (&context);
- MD5Update (&context,(unsigned char *) sptr->ptr(), sptr->length());
- MD5Final (digest, &context);
+ my_MD5Init (&context);
+ my_MD5Update (&context,(unsigned char *) sptr->ptr(), sptr->length());
+ my_MD5Final (digest, &context);
str->alloc(32); // Ensure that memory is free
sprintf((char *) str->ptr(),
"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x",
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 40fe52a12e5..8d025891877 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -811,7 +811,7 @@ int composite_key_cmp(void* arg, byte* key1, byte* key2)
{
int res;
Field* f = *field;
- int len = f->field_length;
+ int len = f->pack_length();
switch((*field)->type())
{
case FIELD_TYPE_STRING:
@@ -839,7 +839,8 @@ int dump_leaf(byte* key, uint32 count __attribute__((unused)),
{
char* buf = item->table->record[0];
int error;
- memset(buf, 0xff, item->rec_offset); // make up for cheating in the tree
+ // the first item->rec_offset bytes are taken care of with
+ // restore_record(table,2) in setup()
memcpy(buf + item->rec_offset, key, item->tree.size_of_element);
if ((error = item->table->file->write_row(buf)))
{
@@ -874,8 +875,19 @@ bool Item_sum_count_distinct::setup(THD *thd)
List<Item> list;
/* Create a table with an unique key over all parameters */
for (uint i=0; i < arg_count ; i++)
- if (list.push_back(args[i]))
- return 1;
+ {
+ Item *item=args[i];
+ if (list.push_back(item))
+ return 1; // End of memory
+ if (item->const_item())
+ {
+ (void) item->val_int();
+ if (item->null_value)
+ always_null=1;
+ }
+ }
+ if (always_null)
+ return 0;
count_field_types(tmp_table_param,list,0);
if (table)
{
@@ -883,17 +895,22 @@ bool Item_sum_count_distinct::setup(THD *thd)
tmp_table_param->cleanup();
}
if (!(table=create_tmp_table(thd, tmp_table_param, list, (ORDER*) 0, 1,
- 0, 0, current_lex->options | thd->options)))
+ 0, 0, current_lex->select->options | thd->options)))
return 1;
table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows
table->no_rows=1;
+
if(table->db_type == DB_TYPE_HEAP) // no blobs, otherwise it would be
// MyISAM
{
qsort_cmp2 compare_key;
void* cmp_arg;
int key_len;
+
+ // to make things easier for dump_leaf if we ever have to dump to
+ // MyISAM
+ restore_record(table,2);
if(table->fields == 1) // if we have only one field, which is
// the most common use of count(distinct), it is much faster
@@ -915,20 +932,31 @@ bool Item_sum_count_distinct::setup(THD *thd)
compare_key = (qsort_cmp2)simple_raw_key_cmp;
break;
}
- cmp_arg = (void*)(key_len = field->field_length);
+ cmp_arg = (void*)(key_len = field->pack_length());
rec_offset = 1;
}
else // too bad, cannot cheat - there is more than one field
{
- cmp_arg = (void*)this;
- compare_key = (qsort_cmp2)composite_key_cmp;
+ bool all_binary = 1;
Field** field, **field_end;
field_end = (field = table->field) + table->fields;
for(key_len = 0; field < field_end; ++field)
{
- key_len += (*field)->field_length;
+ key_len += (*field)->pack_length();
+ if(!(*field)->binary())
+ all_binary = 0;
}
rec_offset = table->reclength - key_len;
+ if(all_binary)
+ {
+ compare_key = (qsort_cmp2)simple_raw_key_cmp;
+ cmp_arg = (void*)key_len;
+ }
+ else
+ {
+ compare_key = (qsort_cmp2)composite_key_cmp ;
+ cmp_arg = (void*)this;
+ }
}
init_tree(&tree, min(max_heap_table_size, sortbuff_size/16), 0,
@@ -940,7 +968,7 @@ bool Item_sum_count_distinct::setup(THD *thd)
// but this has to be handled - otherwise someone can crash
// the server with a DoS attack
max_elements_in_tree = (key_len) ? max_heap_table_size/key_len :
- max_heap_table_size;
+ 1;
}
return 0;
@@ -960,20 +988,22 @@ int Item_sum_count_distinct::tree_to_myisam()
void Item_sum_count_distinct::reset()
{
- if(use_tree)
+ if (use_tree)
reset_tree(&tree);
- else
- {
- table->file->extra(HA_EXTRA_NO_CACHE);
- table->file->delete_all_rows();
- table->file->extra(HA_EXTRA_WRITE_CACHE);
- }
+ else if (table)
+ {
+ table->file->extra(HA_EXTRA_NO_CACHE);
+ table->file->delete_all_rows();
+ table->file->extra(HA_EXTRA_WRITE_CACHE);
+ }
(void) add();
}
bool Item_sum_count_distinct::add()
{
int error;
+ if (always_null)
+ return 0;
copy_fields(tmp_table_param);
copy_funcs(tmp_table_param->funcs);
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 1aa7f78d786..753a9de8b48 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -148,19 +148,21 @@ class Item_sum_count_distinct :public Item_sum_int
bool fix_fields(THD *thd,TABLE_LIST *tables);
TMP_TABLE_PARAM *tmp_table_param;
TREE tree;
- uint max_elements_in_tree;
+
// calculated based on max_heap_table_size. If reached,
// walk the tree and dump it into MyISAM table
+ uint max_elements_in_tree;
+
+ // the first few bytes of record ( at least one)
+ // are just markers for deleted and NULLs. We want to skip them since
+ // they will just bloat the tree without providing any valuable info
+ int rec_offset;
- bool use_tree;
// If there are no blobs, we can use a tree, which
// is faster than heap table. In that case, we still use the table
// to help get things set up, but we insert nothing in it
-
- int rec_offset;
- // the first few bytes of record ( at least one)
- // are just markers for deleted and NULLs. We want to skip them since
- // they will just bloat the tree without providing any valuable info
+ bool use_tree;
+ bool always_null; // Set to 1 if the result is always NULL
int tree_to_myisam();
@@ -171,7 +173,7 @@ class Item_sum_count_distinct :public Item_sum_int
public:
Item_sum_count_distinct(List<Item> &list)
:Item_sum_int(list),table(0),used_table_cache(~(table_map) 0),
- tmp_table_param(0),use_tree(0)
+ tmp_table_param(0),use_tree(0),always_null(0)
{ quick_group=0; }
~Item_sum_count_distinct();
table_map used_tables() const { return used_table_cache; }
diff --git a/sql/lex.h b/sql/lex.h
index c29c4081787..e9ab150f5b2 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -70,6 +70,7 @@ static SYMBOL symbols[] = {
{ "BIGINT", SYM(BIGINT),0,0},
{ "BIT", SYM(BIT_SYM),0,0},
{ "BINARY", SYM(BINARY),0,0},
+ { "BINLOG", SYM(BINLOG_SYM),0,0},
{ "BLOB", SYM(BLOB_SYM),0,0},
{ "BOOL", SYM(BOOL_SYM),0,0},
{ "BOTH", SYM(BOTH),0,0},
@@ -113,6 +114,7 @@ static SYMBOL symbols[] = {
{ "DELETE", SYM(DELETE_SYM),0,0},
{ "DESC", SYM(DESC),0,0},
{ "DESCRIBE", SYM(DESCRIBE),0,0},
+ { "DIRECTORY", SYM(DIRECTORY_SYM),0,0},
{ "DISABLE", SYM(DISABLE_SYM),0,0},
{ "DISTINCT", SYM(DISTINCT),0,0},
{ "DISTINCTROW", SYM(DISTINCT),0,0}, /* Access likes this */
@@ -127,6 +129,7 @@ static SYMBOL symbols[] = {
{ "ENABLE", SYM(ENABLE_SYM),0,0},
{ "ENCLOSED", SYM(ENCLOSED),0,0},
{ "ENUM", SYM(ENUM),0,0},
+ { "EVENTS", SYM(EVENTS_SYM),0,0},
{ "EXPLAIN", SYM(DESCRIBE),0,0},
{ "EXISTS", SYM(EXISTS),0,0},
{ "EXTENDED", SYM(EXTENDED_SYM),0,0},
@@ -167,6 +170,7 @@ static SYMBOL symbols[] = {
{ "IGNORE", SYM(IGNORE_SYM),0,0},
{ "IN", SYM(IN_SYM),0,0},
{ "INDEX", SYM(INDEX),0,0},
+ { "INDEXES", SYM(INDEXES),0,0},
{ "INFILE", SYM(INFILE),0,0},
{ "INNER", SYM(INNER_SYM),0,0},
{ "INNOBASE", SYM(INNOBASE_SYM),0,0},
diff --git a/sql/lock.cc b/sql/lock.cc
index 23f81c9c164..1d9aca66e74 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -35,6 +35,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table,uint count,
bool unlock, TABLE **write_locked);
static int lock_external(TABLE **table,uint count);
static int unlock_external(THD *thd, TABLE **table,uint count);
+static void print_lock_error(int error);
MYSQL_LOCK *mysql_lock_tables(THD *thd,TABLE **tables,uint count)
@@ -154,7 +155,7 @@ static int lock_external(TABLE **tables,uint count)
(*tables)->file->external_lock(thd, F_UNLCK);
(*tables)->current_lock=F_UNLCK;
}
- my_error(ER_CANT_LOCK,MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG),error);
+ print_lock_error(error);
DBUG_RETURN(error);
}
else
@@ -325,7 +326,7 @@ static int unlock_external(THD *thd, TABLE **table,uint count)
}
}
if (error_code)
- my_error(ER_CANT_LOCK,MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG),error_code);
+ print_lock_error(error_code);
DBUG_RETURN(error_code);
}
@@ -480,3 +481,24 @@ bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list)
}
DBUG_RETURN(result);
}
+
+static void print_lock_error(int error)
+{
+ int textno;
+ DBUG_ENTER("print_lock_error");
+
+ switch (error) {
+ case HA_ERR_LOCK_WAIT_TIMEOUT:
+ textno=ER_LOCK_WAIT_TIMEOUT;
+ break;
+ case HA_ERR_READ_ONLY_TRANSACTION:
+ textno=ER_READ_ONLY_TRANSACTION;
+ break;
+ default:
+ textno=ER_CANT_LOCK;
+ break;
+ }
+ my_error(textno,MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG),error);
+ DBUG_VOID_RETURN;
+}
+
diff --git a/sql/log.cc b/sql/log.cc
index 4cd93261973..40e5d5673be 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -81,7 +81,7 @@ static int find_uniq_filename(char *name)
MYSQL_LOG::MYSQL_LOG(): last_time(0), query_start(0),index_file(-1),
name(0), log_type(LOG_CLOSED),write_error(0),
- inited(0), no_rotate(0)
+ inited(0), log_seq(1), no_rotate(0)
{
/*
We don't want to intialize LOCK_Log here as the thread system may
@@ -230,8 +230,11 @@ void MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
if ((do_magic && my_b_write(&log_file, (byte*) BINLOG_MAGIC, 4)) ||
open_index(O_APPEND | O_RDWR | O_CREAT))
goto err;
+
+ log_seq = 1;
Start_log_event s;
bool error;
+ s.set_log_seq(0, this);
s.write(&log_file);
flush_io_cache(&log_file);
pthread_mutex_lock(&LOCK_index);
@@ -531,6 +534,14 @@ void MYSQL_LOG::new_file()
to change base names at some point.
*/
Rotate_log_event r(new_name+dirname_length(new_name));
+ THD* thd = current_thd;
+ r.set_log_seq(0, this);
+ // this log rotation could have been initiated by a master of
+ // the slave running with log-bin
+ // we set the flag on rotate event to prevent inifinite log rotation
+ // loop
+ if(thd && slave_thd && thd == slave_thd)
+ r.flags |= LOG_EVENT_FORCED_ROTATE_F;
r.write(&log_file);
VOID(pthread_cond_broadcast(&COND_binlog_update));
}
@@ -626,6 +637,21 @@ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command,
/* Write to binary log in a format to be used for replication */
+bool MYSQL_LOG::write(Slave_log_event* event_info)
+{
+ bool error;
+ if (!inited) // Can't use mutex if not init
+ return 0;
+ VOID(pthread_mutex_lock(&LOCK_log));
+ if(!event_info->log_seq)
+ event_info->set_log_seq(current_thd, this);
+ error = event_info->write(&log_file);
+ flush_io_cache(&log_file);
+ VOID(pthread_mutex_unlock(&LOCK_log));
+ return error;
+}
+
+
bool MYSQL_LOG::write(Query_log_event* event_info)
{
/* In most cases this is only called if 'is_open()' is true */
@@ -638,8 +664,12 @@ bool MYSQL_LOG::write(Query_log_event* event_info)
if (is_open())
{
THD *thd=event_info->thd;
+#ifdef USING_TRANSACTIONS
IO_CACHE *file = (event_info->cache_stmt ? &thd->transaction.trans_log :
&log_file);
+#else
+ IO_CACHE *file = &log_file;
+#endif
if ((!(thd->options & OPTION_BIN_LOG) &&
(thd->master_access & PROCESS_ACL)) ||
!db_ok(event_info->db, binlog_do_db, binlog_ignore_db))
@@ -652,12 +682,18 @@ bool MYSQL_LOG::write(Query_log_event* event_info)
if (thd->last_insert_id_used)
{
Intvar_log_event e((uchar)LAST_INSERT_ID_EVENT, thd->last_insert_id);
+ e.set_log_seq(thd, this);
+ if (thd->server_id)
+ e.server_id = thd->server_id;
if (e.write(file))
goto err;
}
if (thd->insert_id_used)
{
Intvar_log_event e((uchar)INSERT_ID_EVENT, thd->last_insert_id);
+ e.set_log_seq(thd, this);
+ if (thd->server_id)
+ e.server_id = thd->server_id;
if (e.write(file))
goto err;
}
@@ -670,10 +706,12 @@ bool MYSQL_LOG::write(Query_log_event* event_info)
// just in case somebody wants it later
thd->query_length = (uint)(p - buf);
Query_log_event e(thd, buf);
+ e.set_log_seq(thd, this);
if (e.write(file))
goto err;
thd->query_length = save_query_length; // clean up
}
+ event_info->set_log_seq(thd, this);
if (event_info->write(file) ||
file == &log_file && flush_io_cache(file))
goto err;
@@ -768,6 +806,7 @@ bool MYSQL_LOG::write(Load_log_event* event_info)
if ((thd->options & OPTION_BIN_LOG) ||
!(thd->master_access & PROCESS_ACL))
{
+ event_info->set_log_seq(thd, this);
if (event_info->write(&log_file) || flush_io_cache(&log_file))
{
if (!write_error)
@@ -919,6 +958,7 @@ void MYSQL_LOG::close(bool exiting)
if (log_type == LOG_BIN)
{
Stop_log_event s;
+ s.set_log_seq(0, this);
s.write(&log_file);
VOID(pthread_cond_broadcast(&COND_binlog_update));
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index ac985c266c8..5538e6c0b7f 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -20,6 +20,7 @@
#pragma implementation // gcc: Class implementation
#endif
#include "mysql_priv.h"
+#include "slave.h"
#endif /* MYSQL_CLIENT */
@@ -31,6 +32,7 @@ static void pretty_print_char(FILE* file, int c)
case '\r': fprintf(file, "\\r"); break;
case '\\': fprintf(file, "\\\\"); break;
case '\b': fprintf(file, "\\b"); break;
+ case '\t': fprintf(file, "\\t"); break;
case '\'': fprintf(file, "\\'"); break;
case 0 : fprintf(file, "\\0"); break;
default:
@@ -40,6 +42,220 @@ static void pretty_print_char(FILE* file, int c)
fputc('\'', file);
}
+#ifndef MYSQL_CLIENT
+
+static void pretty_print_char(String* packet, int c)
+{
+ packet->append('\'');
+ switch(c) {
+ case '\n': packet->append( "\\n"); break;
+ case '\r': packet->append( "\\r"); break;
+ case '\\': packet->append( "\\\\"); break;
+ case '\b': packet->append( "\\b"); break;
+ case '\t': packet->append( "\\t"); break;
+ case '\'': packet->append( "\\'"); break;
+ case 0 : packet->append( "\\0"); break;
+ default:
+ packet->append((char)c);
+ break;
+ }
+ packet->append('\'');
+}
+
+#endif
+
+const char* Log_event::get_type_str()
+{
+ switch(get_type_code())
+ {
+ case START_EVENT: return "Start";
+ case STOP_EVENT: return "Stop";
+ case QUERY_EVENT: return "Query";
+ case ROTATE_EVENT: return "Rotate";
+ case INTVAR_EVENT: return "Intvar";
+ case LOAD_EVENT: return "Load";
+ case SLAVE_EVENT: return "Slave";
+ default: /* impossible */ return "Unknown";
+ }
+}
+
+#ifndef MYSQL_CLIENT
+
+void Log_event::pack_info(String* packet)
+{
+ net_store_data(packet, "", 0);
+}
+
+void Query_log_event::pack_info(String* packet)
+{
+ String tmp;
+ if(db && db_len)
+ {
+ tmp.append("use ");
+ tmp.append(db, db_len);
+ tmp.append("; ", 2);
+ }
+
+ if(query && q_len)
+ tmp.append(query, q_len);
+ net_store_data(packet, (char*)tmp.ptr(), tmp.length());
+}
+
+void Start_log_event::pack_info(String* packet)
+{
+ String tmp;
+ char buf[22];
+
+ tmp.append("Server ver: ");
+ tmp.append(server_version);
+ tmp.append(", Binlog ver: ");
+ tmp.append(llstr(binlog_version, buf));
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+void Load_log_event::pack_info(String* packet)
+{
+ String tmp;
+ if(db && db_len)
+ {
+ tmp.append("use ");
+ tmp.append(db, db_len);
+ tmp.append("; ", 2);
+ }
+
+ tmp.append("LOAD DATA INFILE '");
+ tmp.append(fname);
+ tmp.append("' ", 2);
+ if(sql_ex.opt_flags && REPLACE_FLAG )
+ tmp.append(" REPLACE ");
+ else if(sql_ex.opt_flags && IGNORE_FLAG )
+ tmp.append(" IGNORE ");
+
+ tmp.append("INTO TABLE ");
+ tmp.append(table_name);
+ if (!(sql_ex.empty_flags & FIELD_TERM_EMPTY))
+ {
+ tmp.append(" FIELDS TERMINATED BY ");
+ pretty_print_char(&tmp, sql_ex.field_term);
+ }
+
+ if (!(sql_ex.empty_flags & ENCLOSED_EMPTY))
+ {
+ if (sql_ex.opt_flags && OPT_ENCLOSED_FLAG )
+ tmp.append(" OPTIONALLY ");
+ tmp.append( " ENCLOSED BY ");
+ pretty_print_char(&tmp, sql_ex.enclosed);
+ }
+
+ if (!(sql_ex.empty_flags & ESCAPED_EMPTY))
+ {
+ tmp.append( " ESCAPED BY ");
+ pretty_print_char(&tmp, sql_ex.escaped);
+ }
+
+ if (!(sql_ex.empty_flags & LINE_TERM_EMPTY))
+ {
+ tmp.append(" LINES TERMINATED BY ");
+ pretty_print_char(&tmp, sql_ex.line_term);
+ }
+
+ if (!(sql_ex.empty_flags & LINE_START_EMPTY))
+ {
+ tmp.append(" LINES STARTING BY ");
+ pretty_print_char(&tmp, sql_ex.line_start);
+ }
+
+ if ((int)skip_lines > 0)
+ tmp.append( " IGNORE %ld LINES ", (long) skip_lines);
+
+ if (num_fields)
+ {
+ uint i;
+ const char* field = fields;
+ tmp.append(" (");
+ for(i = 0; i < num_fields; i++)
+ {
+ if(i)
+ tmp.append(" ,");
+ tmp.append( field);
+
+ field += field_lens[i] + 1;
+ }
+ tmp.append(')');
+ }
+
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+void Rotate_log_event::pack_info(String* packet)
+{
+ String tmp;
+ char buf[22];
+ tmp.append(new_log_ident, ident_len);
+ tmp.append(";pos=");
+ tmp.append(llstr(pos,buf));
+ if(flags & LOG_EVENT_FORCED_ROTATE_F)
+ tmp.append("; forced by master");
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+void Intvar_log_event::pack_info(String* packet)
+{
+ String tmp;
+ char buf[22];
+ tmp.append(get_var_type_name());
+ tmp.append('=');
+ tmp.append(llstr(val, buf));
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+void Slave_log_event::pack_info(String* packet)
+{
+ String tmp;
+ char buf[22];
+ tmp.append("host=");
+ tmp.append(master_host);
+ tmp.append(",port=");
+ tmp.append(llstr(master_port,buf));
+ tmp.append(",log=");
+ tmp.append(master_log);
+ tmp.append(",pos=");
+ tmp.append(llstr(master_pos,buf));
+ net_store_data(packet, tmp.ptr(), tmp.length());
+}
+
+
+void Log_event::init_show_field_list(List<Item>* field_list)
+{
+ field_list->push_back(new Item_empty_string("Log_name", 20));
+ field_list->push_back(new Item_empty_string("Pos", 20));
+ field_list->push_back(new Item_empty_string("Event_type", 20));
+ field_list->push_back(new Item_empty_string("Server_id", 20));
+ field_list->push_back(new Item_empty_string("Log_seq", 20));
+ field_list->push_back(new Item_empty_string("Info", 20));
+}
+
+int Log_event::net_send(THD* thd, const char* log_name, ulong pos)
+{
+ String* packet = &thd->packet;
+ const char* p = strrchr(log_name, FN_LIBCHAR);
+ const char* event_type;
+ if (p)
+ log_name = p + 1;
+
+ packet->length(0);
+ net_store_data(packet, log_name, strlen(log_name));
+ net_store_data(packet, (longlong)pos);
+ event_type = get_type_str();
+ net_store_data(packet, event_type, strlen(event_type));
+ net_store_data(packet, server_id);
+ net_store_data(packet, log_seq);
+ pack_info(packet);
+ return my_net_write(&thd->net, (char*)packet->ptr(), packet->length());
+}
+
+#endif
+
int Query_log_event::write(IO_CACHE* file)
{
return query ? Log_event::write(file) : -1;
@@ -52,7 +268,6 @@ int Log_event::write(IO_CACHE* file)
int Log_event::write_header(IO_CACHE* file)
{
- // make sure to change this when the header gets bigger
char buf[LOG_EVENT_HEADER_LEN];
char* pos = buf;
int4store(pos, when); // timestamp
@@ -63,6 +278,10 @@ int Log_event::write_header(IO_CACHE* file)
long tmp=get_data_size() + LOG_EVENT_HEADER_LEN;
int4store(pos, tmp);
pos += 4;
+ int4store(pos, log_seq);
+ pos += 4;
+ int2store(pos, flags);
+ pos += 2;
return (my_b_write(file, (byte*) buf, (uint) (pos - buf)));
}
@@ -115,91 +334,51 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet,
Log_event* Log_event::read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock)
{
- time_t timestamp;
- uint32 server_id;
-
- char buf[LOG_EVENT_HEADER_LEN-4];
+ char head[LOG_EVENT_HEADER_LEN];
if(log_lock) pthread_mutex_lock(log_lock);
- if (my_b_read(file, (byte *) buf, sizeof(buf)))
+ if (my_b_read(file, (byte *) head, sizeof(head)))
{
if (log_lock) pthread_mutex_unlock(log_lock);
- return NULL;
- }
- timestamp = uint4korr(buf);
- server_id = uint4korr(buf + 5);
-
- switch(buf[EVENT_TYPE_OFFSET])
- {
- case QUERY_EVENT:
- {
- Query_log_event* q = new Query_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
- if (!q->query)
- {
- delete q;
- q=NULL;
- }
- return q;
- }
-
- case LOAD_EVENT:
- {
- Load_log_event* l = new Load_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
- if (!l->table_name)
- {
- delete l;
- l=NULL;
- }
- return l;
+ return 0;
}
+ uint data_len = uint4korr(head + EVENT_LEN_OFFSET);
+ char* buf = 0;
+ const char* error = 0;
+ Log_event* res = 0;
- case ROTATE_EVENT:
+ if (data_len > max_allowed_packet)
{
- Rotate_log_event* r = new Rotate_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
-
- if (!r->new_log_ident)
- {
- delete r;
- r=NULL;
- }
- return r;
+ error = "Event too big";
+ goto err;
}
- case INTVAR_EVENT:
+ if (data_len < LOG_EVENT_HEADER_LEN)
{
- Intvar_log_event* e = new Intvar_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
-
- if (e->type == INVALID_INT_EVENT)
- {
- delete e;
- e=NULL;
- }
- return e;
+ error = "Event too small";
+ goto err;
}
- case START_EVENT:
- {
- Start_log_event* e = new Start_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
- return e;
- }
- case STOP_EVENT:
- {
- Stop_log_event* e = new Stop_log_event(file, timestamp, server_id);
- if(log_lock) pthread_mutex_unlock(log_lock);
- return e;
- }
- default:
- break;
+ if (!(buf = my_malloc(data_len, MYF(MY_WME))))
+ {
+ error = "Out of memory";
+ goto err;
}
- // default
+ memcpy(buf, head, LOG_EVENT_HEADER_LEN);
+ if(my_b_read(file, (byte*) buf + LOG_EVENT_HEADER_LEN,
+ data_len - LOG_EVENT_HEADER_LEN))
+ {
+ error = "read error";
+ goto err;
+ }
+ res = read_log_event(buf, data_len);
+err:
if (log_lock) pthread_mutex_unlock(log_lock);
- return NULL;
+ if(error)
+ sql_print_error(error);
+ my_free(buf, MYF(MY_ALLOW_ZERO_PTR));
+ return res;
}
Log_event* Log_event::read_log_event(const char* buf, int event_len)
@@ -245,6 +424,17 @@ Log_event* Log_event::read_log_event(const char* buf, int event_len)
return r;
}
+ case SLAVE_EVENT:
+ {
+ Slave_log_event* s = new Slave_log_event(buf, event_len);
+ if (!s->master_host)
+ {
+ delete s;
+ return NULL;
+ }
+
+ return s;
+ }
case START_EVENT: return new Start_log_event(buf);
case STOP_EVENT: return new Stop_log_event(buf);
case INTVAR_EVENT: return new Intvar_log_event(buf);
@@ -305,6 +495,7 @@ void Stop_log_event::print(FILE* file, bool short_form, char* last_db)
void Rotate_log_event::print(FILE* file, bool short_form, char* last_db)
{
+ char buf[22];
if (short_form)
return;
@@ -313,51 +504,25 @@ void Rotate_log_event::print(FILE* file, bool short_form, char* last_db)
if (new_log_ident)
my_fwrite(file, (byte*) new_log_ident, (uint)ident_len,
MYF(MY_NABP | MY_WME));
- fprintf(file, "\n");
+ fprintf(file, "pos=%s\n", llstr(pos, buf));
fflush(file);
}
-Rotate_log_event::Rotate_log_event(IO_CACHE* file, time_t when_arg,
- uint32 server_id):
- Log_event(when_arg, 0, 0, server_id),new_log_ident(NULL),alloced(0)
-{
- char *tmp_ident;
- char buf[4];
-
- if (my_b_read(file, (byte*) buf, sizeof(buf)))
- return;
- ulong event_len;
- event_len = uint4korr(buf);
- if (event_len < ROTATE_EVENT_OVERHEAD)
- return;
-
- ident_len = (uchar)(event_len - ROTATE_EVENT_OVERHEAD);
- if (!(tmp_ident = (char*) my_malloc((uint)ident_len, MYF(MY_WME))))
- return;
- if (my_b_read( file, (byte*) tmp_ident, (uint) ident_len))
- {
- my_free((gptr) tmp_ident, MYF(0));
- return;
- }
-
- new_log_ident = tmp_ident;
- alloced = 1;
-}
-
Start_log_event::Start_log_event(const char* buf) :Log_event(buf)
{
- buf += EVENT_LEN_OFFSET + 4; // skip even length
- binlog_version = uint2korr(buf);
- memcpy(server_version, buf + 2, sizeof(server_version));
- created = uint4korr(buf + 2 + sizeof(server_version));
+ binlog_version = uint2korr(buf + LOG_EVENT_HEADER_LEN +
+ ST_BINLOG_VER_OFFSET);
+ memcpy(server_version, buf + ST_SERVER_VER_OFFSET + LOG_EVENT_HEADER_LEN,
+ ST_SERVER_VER_LEN);
+ created = uint4korr(buf + ST_CREATED_OFFSET + LOG_EVENT_HEADER_LEN);
}
int Start_log_event::write_data(IO_CACHE* file)
{
- char buff[sizeof(server_version)+2+4];
- int2store(buff,binlog_version);
- memcpy(buff+2,server_version,sizeof(server_version));
- int4store(buff+2+sizeof(server_version),created);
+ char buff[START_HEADER_LEN];
+ int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version);
+ memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN);
+ int4store(buff + ST_CREATED_OFFSET,created);
return (my_b_write(file, (byte*) buff, sizeof(buff)) ? -1 : 0);
}
@@ -369,8 +534,10 @@ Rotate_log_event::Rotate_log_event(const char* buf, int event_len):
if(event_len < ROTATE_EVENT_OVERHEAD)
return;
+ pos = uint8korr(buf + R_POS_OFFSET + LOG_EVENT_HEADER_LEN);
ident_len = (uchar)(event_len - ROTATE_EVENT_OVERHEAD);
- if (!(new_log_ident = (char*) my_memdup((byte*) buf + LOG_EVENT_HEADER_LEN,
+ if (!(new_log_ident = (char*) my_memdup((byte*) buf + R_IDENT_OFFSET
+ + LOG_EVENT_HEADER_LEN,
(uint) ident_len, MYF(MY_WME))))
return;
@@ -379,42 +546,10 @@ Rotate_log_event::Rotate_log_event(const char* buf, int event_len):
int Rotate_log_event::write_data(IO_CACHE* file)
{
- return my_b_write(file, (byte*) new_log_ident, (uint) ident_len) ? -1 :0;
-}
-
-Query_log_event::Query_log_event(IO_CACHE* file, time_t when_arg,
- uint32 server_id):
- Log_event(when_arg,0,0,server_id),data_buf(0),query(NULL),db(NULL)
-{
- char buf[QUERY_HEADER_LEN + 4];
- ulong data_len;
- if (my_b_read(file, (byte*) buf, sizeof(buf)))
- return; // query == NULL will tell the
- // caller there was a problem
- data_len = uint4korr(buf);
- if (data_len < QUERY_EVENT_OVERHEAD)
- return; // tear-drop attack protection :)
-
- data_len -= QUERY_EVENT_OVERHEAD;
- exec_time = uint4korr(buf + 8);
- db_len = (uint)buf[12];
- error_code = uint2korr(buf + 13);
-
- /* Allocate one byte extra for end \0 */
- if (!(data_buf = (char*) my_malloc(data_len+1, MYF(MY_WME))))
- return;
- if (my_b_read( file, (byte*) data_buf, data_len))
- {
- my_free((gptr) data_buf, MYF(0));
- data_buf = 0;
- return;
- }
-
- thread_id = uint4korr(buf + 4);
- db = data_buf;
- query=data_buf + db_len + 1;
- q_len = data_len - 1 - db_len;
- *((char*) query + q_len) = 0; // Safety
+ char buf[ROTATE_HEADER_LEN];
+ int8store(buf, pos + R_POS_OFFSET);
+ return my_b_write(file, (byte*)buf, ROTATE_HEADER_LEN) ||
+ my_b_write(file, (byte*)new_log_ident, (uint) ident_len);
}
Query_log_event::Query_log_event(const char* buf, int event_len):
@@ -423,19 +558,19 @@ Query_log_event::Query_log_event(const char* buf, int event_len):
if ((uint)event_len < QUERY_EVENT_OVERHEAD)
return;
ulong data_len;
- buf += EVENT_LEN_OFFSET;
data_len = event_len - QUERY_EVENT_OVERHEAD;
+
- exec_time = uint4korr(buf + 8);
- error_code = uint2korr(buf + 13);
+ exec_time = uint4korr(buf + LOG_EVENT_HEADER_LEN + Q_EXEC_TIME_OFFSET);
+ error_code = uint2korr(buf + LOG_EVENT_HEADER_LEN + Q_ERR_CODE_OFFSET);
if (!(data_buf = (char*) my_malloc(data_len + 1, MYF(MY_WME))))
return;
- memcpy(data_buf, buf + QUERY_HEADER_LEN + 4, data_len);
- thread_id = uint4korr(buf + 4);
+ memcpy(data_buf, buf + LOG_EVENT_HEADER_LEN + Q_DATA_OFFSET, data_len);
+ thread_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + Q_THREAD_ID_OFFSET);
db = data_buf;
- db_len = (uint)buf[12];
+ db_len = (uint)buf[LOG_EVENT_HEADER_LEN + Q_DB_LEN_OFFSET];
query=data_buf + db_len + 1;
q_len = data_len - 1 - db_len;
*((char*)query+q_len) = 0;
@@ -474,44 +609,38 @@ int Query_log_event::write_data(IO_CACHE* file)
if (!query) return -1;
char buf[QUERY_HEADER_LEN];
- char* pos = buf;
- int4store(pos, thread_id);
- pos += 4;
- int4store(pos, exec_time);
- pos += 4;
- *pos++ = (char)db_len;
- int2store(pos, error_code);
- pos += 2;
+ int4store(buf + Q_THREAD_ID_OFFSET, thread_id);
+ int4store(buf + Q_EXEC_TIME_OFFSET, exec_time);
+ buf[Q_DB_LEN_OFFSET] = (char)db_len;
+ int2store(buf + Q_ERR_CODE_OFFSET, error_code);
- return (my_b_write(file, (byte*) buf, (uint)(pos - buf)) ||
+ return (my_b_write(file, (byte*) buf, QUERY_HEADER_LEN) ||
my_b_write(file, (db) ? (byte*) db : (byte*)"", db_len + 1) ||
my_b_write(file, (byte*) query, q_len)) ? -1 : 0;
}
-Intvar_log_event:: Intvar_log_event(IO_CACHE* file, time_t when_arg,
- uint32 server_id)
- :Log_event(when_arg,0,0,server_id), type(INVALID_INT_EVENT)
+Intvar_log_event::Intvar_log_event(const char* buf):Log_event(buf)
{
- char buf[9+4];
- if (!my_b_read(file, (byte*) buf, sizeof(buf)))
- {
- type = buf[4];
- val = uint8korr(buf+1+4);
- }
+ buf += LOG_EVENT_HEADER_LEN;
+ type = buf[I_TYPE_OFFSET];
+ val = uint8korr(buf+I_VAL_OFFSET);
}
-Intvar_log_event::Intvar_log_event(const char* buf):Log_event(buf)
+const char* Intvar_log_event::get_var_type_name()
{
- buf += LOG_EVENT_HEADER_LEN;
- type = buf[0];
- val = uint8korr(buf+1);
+ switch(type)
+ {
+ case LAST_INSERT_ID_EVENT: return "LAST_INSERT_ID";
+ case INSERT_ID_EVENT: return "INSERT_ID";
+ default: /* impossible */ return "UNKNOWN";
+ }
}
int Intvar_log_event::write_data(IO_CACHE* file)
{
char buf[9];
- buf[0] = type;
- int8store(buf + 1, val);
+ buf[I_TYPE_OFFSET] = type;
+ int8store(buf + I_VAL_OFFSET, val);
return my_b_write(file, (byte*) buf, sizeof(buf));
}
@@ -542,12 +671,12 @@ void Intvar_log_event::print(FILE* file, bool short_form, char* last_db)
int Load_log_event::write_data(IO_CACHE* file)
{
char buf[LOAD_HEADER_LEN];
- int4store(buf, thread_id);
- int4store(buf + 4, exec_time);
- int4store(buf + 8, skip_lines);
- buf[12] = (char)table_name_len;
- buf[13] = (char)db_len;
- int4store(buf + 14, num_fields);
+ int4store(buf + L_THREAD_ID_OFFSET, thread_id);
+ int4store(buf + L_EXEC_TIME_OFFSET, exec_time);
+ int4store(buf + L_SKIP_LINES_OFFSET, skip_lines);
+ buf[L_TBL_LEN_OFFSET] = (char)table_name_len;
+ buf[L_DB_LEN_OFFSET] = (char)db_len;
+ int4store(buf + L_NUM_FIELDS_OFFSET, num_fields);
if(my_b_write(file, (byte*)buf, sizeof(buf)) ||
my_b_write(file, (byte*)&sql_ex, sizeof(sql_ex)))
@@ -566,52 +695,33 @@ int Load_log_event::write_data(IO_CACHE* file)
return 0;
}
-Load_log_event::Load_log_event(IO_CACHE* file, time_t when, uint32 server_id):
- Log_event(when,0,0,server_id),data_buf(0),num_fields(0),
- fields(0),field_lens(0),field_block_len(0),
- table_name(0),db(0),fname(0)
-{
- char buf[LOAD_HEADER_LEN + 4];
- ulong data_len;
- if (my_b_read(file, (byte*)buf, sizeof(buf)) ||
- my_b_read(file, (byte*)&sql_ex, sizeof(sql_ex)))
- return;
-
- data_len = uint4korr(buf) - LOAD_EVENT_OVERHEAD;
- if (!(data_buf = (char*)my_malloc(data_len + 1, MYF(MY_WME))))
- return;
- if (my_b_read(file, (byte*)data_buf, data_len))
- return;
- copy_log_event(buf,data_len);
-}
-
Load_log_event::Load_log_event(const char* buf, int event_len):
Log_event(buf),data_buf(0),num_fields(0),fields(0),
field_lens(0),field_block_len(0),
table_name(0),db(0),fname(0)
{
- ulong data_len;
-
+ uint data_len;
if((uint)event_len < (LOAD_EVENT_OVERHEAD + LOG_EVENT_HEADER_LEN))
return;
- buf += EVENT_LEN_OFFSET;
- memcpy(&sql_ex, buf + LOAD_HEADER_LEN + 4, sizeof(sql_ex));
- data_len = event_len;
-
+ memcpy(&sql_ex, buf + LOAD_HEADER_LEN + LOG_EVENT_HEADER_LEN,
+ sizeof(sql_ex));
+ data_len = event_len - LOAD_HEADER_LEN - LOG_EVENT_HEADER_LEN -
+ sizeof(sql_ex);
if(!(data_buf = (char*)my_malloc(data_len + 1, MYF(MY_WME))))
return;
- memcpy(data_buf, buf + 22 + sizeof(sql_ex), data_len);
+ memcpy(data_buf, buf +LOG_EVENT_HEADER_LEN + LOAD_HEADER_LEN
+ + sizeof(sql_ex), data_len);
copy_log_event(buf, data_len);
}
void Load_log_event::copy_log_event(const char *buf, ulong data_len)
{
- thread_id = uint4korr(buf+4);
- exec_time = uint4korr(buf+8);
- skip_lines = uint4korr(buf + 12);
- table_name_len = (uint)buf[16];
- db_len = (uint)buf[17];
- num_fields = uint4korr(buf + 18);
+ thread_id = uint4korr(buf + L_THREAD_ID_OFFSET + LOG_EVENT_HEADER_LEN);
+ exec_time = uint4korr(buf + L_EXEC_TIME_OFFSET + LOG_EVENT_HEADER_LEN);
+ skip_lines = uint4korr(buf + L_SKIP_LINES_OFFSET + LOG_EVENT_HEADER_LEN);
+ table_name_len = (uint)buf[L_TBL_LEN_OFFSET + LOG_EVENT_HEADER_LEN];
+ db_len = (uint)buf[L_DB_LEN_OFFSET + LOG_EVENT_HEADER_LEN];
+ num_fields = uint4korr(buf + L_NUM_FIELDS_OFFSET + LOG_EVENT_HEADER_LEN);
if (num_fields > data_len) // simple sanity check against corruption
return;
@@ -717,6 +827,12 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db)
#ifndef MYSQL_CLIENT
+void Log_event::set_log_seq(THD* thd, MYSQL_LOG* log)
+ {
+ log_seq = (thd && thd->log_seq) ? thd->log_seq++ : log->log_seq++;
+ }
+
+
void Load_log_event::set_fields(List<Item> &fields)
{
uint i;
@@ -729,4 +845,92 @@ void Load_log_event::set_fields(List<Item> &fields)
}
+Slave_log_event::Slave_log_event(THD* thd_arg,MASTER_INFO* mi):
+ Log_event(thd_arg->start_time, 0, 1, thd_arg->server_id),
+ mem_pool(0),master_host(0)
+{
+ if(!mi->inited)
+ return;
+ pthread_mutex_lock(&mi->lock);
+ master_host_len = strlen(mi->host);
+ master_log_len = strlen(mi->log_file_name);
+ // on OOM, just do not initialize the structure and print the error
+ if((mem_pool = (char*)my_malloc(get_data_size() + 1,
+ MYF(MY_WME))))
+ {
+ master_host = mem_pool + SL_MASTER_HOST_OFFSET ;
+ memcpy(master_host, mi->host, master_host_len + 1);
+ master_log = master_host + master_host_len + 1;
+ memcpy(master_log, mi->log_file_name, master_log_len + 1);
+ master_port = mi->port;
+ master_pos = mi->pos;
+ }
+ else
+ sql_print_error("Out of memory while recording slave event");
+ pthread_mutex_unlock(&mi->lock);
+}
+
+
#endif
+
+
+Slave_log_event::~Slave_log_event()
+{
+ my_free(mem_pool, MYF(MY_ALLOW_ZERO_PTR));
+}
+
+void Slave_log_event::print(FILE* file, bool short_form = 0,
+ char* last_db = 0)
+{
+ char llbuff[22];
+ if(short_form)
+ return;
+ print_header(file);
+ fputc('\n', file);
+ fprintf(file, "Slave: master_host='%s' master_port=%d \
+ master_log=%s master_pos=%s\n", master_host, master_port, master_log,
+ llstr(master_pos, llbuff));
+}
+
+int Slave_log_event::get_data_size()
+{
+ return master_host_len + master_log_len + 1 + SL_MASTER_HOST_OFFSET;
+}
+
+int Slave_log_event::write_data(IO_CACHE* file)
+{
+ int8store(mem_pool + SL_MASTER_POS_OFFSET, master_pos);
+ int2store(mem_pool + SL_MASTER_PORT_OFFSET, master_port);
+ // log and host are already there
+ return my_b_write(file, (byte*)mem_pool, get_data_size());
+}
+
+void Slave_log_event::init_from_mem_pool(int data_size)
+{
+ master_pos = uint8korr(mem_pool + SL_MASTER_POS_OFFSET);
+ master_port = uint2korr(mem_pool + SL_MASTER_PORT_OFFSET);
+ master_host = mem_pool + SL_MASTER_HOST_OFFSET;
+ master_host_len = strlen(master_host);
+ // safety
+ master_log = master_host + master_host_len + 1;
+ if(master_log > mem_pool + data_size)
+ {
+ master_host = 0;
+ return;
+ }
+
+ master_log_len = strlen(master_log);
+}
+
+Slave_log_event::Slave_log_event(const char* buf, int event_len):
+ Log_event(buf),mem_pool(0),master_host(0)
+{
+ event_len -= LOG_EVENT_HEADER_LEN;
+ if(event_len < 0)
+ return;
+ if(!(mem_pool = (char*)my_malloc(event_len + 1, MYF(MY_WME))))
+ return;
+ memcpy(mem_pool, buf + LOG_EVENT_HEADER_LEN, event_len);
+ mem_pool[event_len] = 0;
+ init_from_mem_pool(event_len);
+}
diff --git a/sql/log_event.h b/sql/log_event.h
index 41f847e8d92..f38ddef05a2 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -34,40 +34,110 @@
#define LOG_READ_TOO_LARGE -7
#define LOG_EVENT_OFFSET 4
-#define BINLOG_VERSION 1
+#define BINLOG_VERSION 2
+
+/* we could have used SERVER_VERSION_LENGTH, but this introduces an
+ obscure dependency - if somebody decided to change SERVER_VERSION_LENGTH
+ this would have broke the replication protocol
+*/
+#define ST_SERVER_VER_LEN 50
+
+/* Binary log consists of events. Each event has a fixed length header,
+ followed by possibly variable ( depending on the type of event) length
+ data body. The data body consists of an optional fixed length segment
+ (post-header), and an optional variable length segment. See #defines and
+ comments below for the format specifics
+*/
+
+/* event-specific post-header sizes */
+#define LOG_EVENT_HEADER_LEN 19
+#define QUERY_HEADER_LEN (4 + 4 + 1 + 2)
+#define LOAD_HEADER_LEN (4 + 4 + 4 + 1 +1 + 4)
+#define START_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4)
+#define ROTATE_HEADER_LEN 8
+
+/* event header offsets */
-#define LOG_EVENT_HEADER_LEN 13
-#define QUERY_HEADER_LEN (sizeof(uint32) + sizeof(uint32) + \
- sizeof(uchar) + sizeof(uint16))
-#define LOAD_HEADER_LEN (sizeof(uint32) + sizeof(uint32) + \
- + sizeof(uint32) + 2 + sizeof(uint32))
-#define EVENT_LEN_OFFSET 9
#define EVENT_TYPE_OFFSET 4
+#define SERVER_ID_OFFSET 5
+#define EVENT_LEN_OFFSET 9
+#define LOG_SEQ_OFFSET 13
+#define FLAGS_OFFSET 17
+
+/* start event post-header */
+
+#define ST_BINLOG_VER_OFFSET 0
+#define ST_SERVER_VER_OFFSET 2
+#define ST_CREATED_OFFSET (ST_SERVER_VER_OFFSET + ST_SERVER_VER_LEN)
+
+/* slave event post-header */
+
+#define SL_MASTER_PORT_OFFSET 8
+#define SL_MASTER_POS_OFFSET 0
+#define SL_MASTER_HOST_OFFSET 10
+
+/* query event post-header */
+
+#define Q_THREAD_ID_OFFSET 0
+#define Q_EXEC_TIME_OFFSET 4
+#define Q_DB_LEN_OFFSET 8
+#define Q_ERR_CODE_OFFSET 9
+#define Q_DATA_OFFSET QUERY_HEADER_LEN
+
+/* Intvar event post-header */
+
+#define I_TYPE_OFFSET 0
+#define I_VAL_OFFSET 1
+
+/* Load event post-header */
+
+#define L_THREAD_ID_OFFSET 0
+#define L_EXEC_TIME_OFFSET 4
+#define L_SKIP_LINES_OFFSET 8
+#define L_DB_LEN_OFFSET 12
+#define L_TBL_LEN_OFFSET 13
+#define L_NUM_FIELDS_OFFSET 14
+#define L_DATA_OFFSET LOAD_HEADER_LEN
+
+/* Rotate event post-header */
+
+#define R_POS_OFFSET 0
+#define R_IDENT_OFFSET 8
+
#define QUERY_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN)
-#define ROTATE_EVENT_OVERHEAD LOG_EVENT_HEADER_LEN
+#define QUERY_DATA_OFFSET (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN)
+#define ROTATE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+ROTATE_HEADER_LEN)
#define LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+LOAD_HEADER_LEN+sizeof(sql_ex_info))
#define BINLOG_MAGIC "\xfe\x62\x69\x6e"
+#define LOG_EVENT_TIME_F 0x1
+#define LOG_EVENT_FORCED_ROTATE_F 0x2
+
enum Log_event_type { START_EVENT = 1, QUERY_EVENT =2,
STOP_EVENT=3, ROTATE_EVENT = 4, INTVAR_EVENT=5,
- LOAD_EVENT=6};
+ LOAD_EVENT=6, SLAVE_EVENT=7, FILE_EVENT=8};
enum Int_event_type { INVALID_INT_EVENT = 0, LAST_INSERT_ID_EVENT = 1, INSERT_ID_EVENT = 2
};
#ifndef MYSQL_CLIENT
class String;
+class MYSQL_LOG;
+class THD;
#endif
extern uint32 server_id;
+struct st_master_info;
+
class Log_event
{
public:
time_t when;
ulong exec_time;
- int valid_exec_time; // if false, the exec time setting is bogus
uint32 server_id;
+ uint32 log_seq;
+ uint16 flags;
static void *operator new(size_t size)
{
@@ -84,17 +154,22 @@ public:
virtual int write_data(IO_CACHE* file __attribute__((unused))) { return 0; }
virtual Log_event_type get_type_code() = 0;
Log_event(time_t when_arg, ulong exec_time_arg = 0,
- int valid_exec_time_arg = 0, uint32 server_id_arg = 0):
+ int valid_exec_time = 0, uint32 server_id_arg = 0,
+ uint32 log_seq_arg = 0, uint16 flags_arg = 0):
when(when_arg), exec_time(exec_time_arg),
- valid_exec_time(valid_exec_time_arg)
+ log_seq(log_seq_arg),flags(0)
{
server_id = server_id_arg ? server_id_arg : (::server_id);
+ if(valid_exec_time)
+ flags |= LOG_EVENT_TIME_F;
}
- Log_event(const char* buf): valid_exec_time(0)
+ Log_event(const char* buf)
{
when = uint4korr(buf);
- server_id = uint4korr(buf + 5);
+ server_id = uint4korr(buf + SERVER_ID_OFFSET);
+ log_seq = uint4korr(buf + LOG_SEQ_OFFSET);
+ flags = uint2korr(buf + FLAGS_OFFSET);
}
virtual ~Log_event() {}
@@ -108,10 +183,15 @@ public:
// if mutex is 0, the read will proceed without mutex
static Log_event* read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock);
static Log_event* read_log_event(const char* buf, int event_len);
+ const char* get_type_str();
#ifndef MYSQL_CLIENT
static int read_log_event(IO_CACHE* file, String* packet,
pthread_mutex_t* log_lock);
+ void set_log_seq(THD* thd, MYSQL_LOG* log);
+ virtual void pack_info(String* packet);
+ int net_send(THD* thd, const char* log_name, ulong pos);
+ static void init_show_field_list(List<Item>* field_list);
#endif
};
@@ -134,7 +214,8 @@ public:
THD* thd;
bool cache_stmt;
Query_log_event(THD* thd_arg, const char* query_arg, bool using_trans=0):
- Log_event(thd_arg->start_time,0,1,thd_arg->server_id), data_buf(0),
+ Log_event(thd_arg->start_time,0,1,thd_arg->server_id,thd_arg->log_seq),
+ data_buf(0),
query(query_arg), db(thd_arg->db), q_len(thd_arg->query_length),
error_code(thd_arg->killed ? ER_SERVER_SHUTDOWN: thd_arg->net.last_errno),
thread_id(thd_arg->thread_id), thd(thd_arg),
@@ -146,9 +227,10 @@ public:
exec_time = (ulong) (end_time - thd->start_time);
db_len = (db) ? (uint32) strlen(db) : 0;
}
+
+ void pack_info(String* packet);
#endif
- Query_log_event(IO_CACHE* file, time_t when, uint32 server_id_arg);
Query_log_event(const char* buf, int event_len);
~Query_log_event()
{
@@ -172,6 +254,33 @@ public:
void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
+class Slave_log_event: public Log_event
+{
+protected:
+ char* mem_pool;
+ void init_from_mem_pool(int data_size);
+public:
+ char* master_host;
+ int master_host_len;
+ uint16 master_port;
+ char* master_log;
+ int master_log_len;
+ ulonglong master_pos;
+
+#ifndef MYSQL_CLIENT
+ Slave_log_event(THD* thd_arg, struct st_master_info* mi);
+ void pack_info(String* packet);
+#endif
+
+ Slave_log_event(const char* buf, int event_len);
+ ~Slave_log_event();
+ int get_data_size();
+ Log_event_type get_type_code() { return SLAVE_EVENT; }
+ void print(FILE* file, bool short_form = 0, char* last_db = 0);
+ int write_data(IO_CACHE* file );
+
+};
+
#define DUMPFILE_FLAG 0x1
#define OPT_ENCLOSED_FLAG 0x2
#define REPLACE_FLAG 0x4
@@ -234,7 +343,6 @@ public:
time_t end_time;
time(&end_time);
exec_time = (ulong) (end_time - thd->start_time);
- valid_exec_time = 1;
db_len = (db) ? (uint32) strlen(db) : 0;
table_name_len = (table_name) ? (uint32) strlen(table_name) : 0;
fname_len = (fname) ? (uint) strlen(fname) : 0;
@@ -288,9 +396,9 @@ public:
fields = fields_buf.ptr();
}
void set_fields(List<Item> &fields_arg);
+ void pack_info(String* packet);
#endif
- Load_log_event(IO_CACHE * file, time_t when, uint32 server_id_arg);
Load_log_event(const char* buf, int event_len);
~Load_log_event()
{
@@ -322,23 +430,12 @@ class Start_log_event: public Log_event
public:
uint32 created;
uint16 binlog_version;
- char server_version[50];
+ char server_version[ST_SERVER_VER_LEN];
Start_log_event() :Log_event(time(NULL)),binlog_version(BINLOG_VERSION)
{
created = (uint32) when;
- memcpy(server_version, ::server_version, sizeof(server_version));
- }
- Start_log_event(IO_CACHE* file, time_t when_arg, uint32 server_id_arg) :
- Log_event(when_arg, 0, 0, server_id_arg)
- {
- char buf[sizeof(server_version) + 2 + 4 + 4];
- if (my_b_read(file, (byte*) buf, sizeof(buf)))
- return;
- binlog_version = uint2korr(buf+4);
- memcpy(server_version, buf + 6, sizeof(server_version));
- server_version[sizeof(server_version)-1]=0;
- created = uint4korr(buf + 6 + sizeof(server_version));
+ memcpy(server_version, ::server_version, ST_SERVER_VER_LEN);
}
Start_log_event(const char* buf);
@@ -347,9 +444,11 @@ public:
int write_data(IO_CACHE* file);
int get_data_size()
{
- // sizeof(binlog_version) + sizeof(server_version) sizeof(created)
- return 2 + sizeof(server_version) + 4;
+ return START_HEADER_LEN;
}
+#ifndef MYSQL_CLIENT
+ void pack_info(String* packet);
+#endif
void print(FILE* file, bool short_form = 0, char* last_db = 0);
};
@@ -361,12 +460,15 @@ public:
Intvar_log_event(uchar type_arg, ulonglong val_arg)
:Log_event(time(NULL)),val(val_arg),type(type_arg)
{}
- Intvar_log_event(IO_CACHE* file, time_t when, uint32 server_id_arg);
Intvar_log_event(const char* buf);
~Intvar_log_event() {}
Log_event_type get_type_code() { return INTVAR_EVENT;}
+ const char* get_var_type_name();
int get_data_size() { return sizeof(type) + sizeof(val);}
int write_data(IO_CACHE* file);
+#ifndef MYSQL_CLIENT
+ void pack_info(String* packet);
+#endif
void print(FILE* file, bool short_form = 0, char* last_db = 0);
@@ -377,12 +479,6 @@ class Stop_log_event: public Log_event
public:
Stop_log_event() :Log_event(time(NULL))
{}
- Stop_log_event(IO_CACHE* file, time_t when_arg, uint32 server_id_arg):
- Log_event(when_arg,0,0,server_id_arg)
- {
- byte skip[4];
- my_b_read(file, skip, sizeof(skip)); // skip the event length
- }
Stop_log_event(const char* buf):Log_event(buf)
{
}
@@ -396,16 +492,18 @@ class Rotate_log_event: public Log_event
public:
const char* new_log_ident;
uchar ident_len;
+ ulonglong pos;
bool alloced;
- Rotate_log_event(const char* new_log_ident_arg, uint ident_len_arg = 0) :
+ Rotate_log_event(const char* new_log_ident_arg, uint ident_len_arg = 0,
+ ulonglong pos_arg = 4) :
Log_event(time(NULL)),
new_log_ident(new_log_ident_arg),
- ident_len(ident_len_arg ? ident_len_arg : (uint) strlen(new_log_ident_arg)),
+ ident_len(ident_len_arg ? ident_len_arg :
+ (uint) strlen(new_log_ident_arg)), pos(pos_arg),
alloced(0)
{}
- Rotate_log_event(IO_CACHE* file, time_t when, uint32 server_id_arg) ;
Rotate_log_event(const char* buf, int event_len);
~Rotate_log_event()
{
@@ -413,10 +511,16 @@ public:
my_free((gptr) new_log_ident, MYF(0));
}
Log_event_type get_type_code() { return ROTATE_EVENT;}
- int get_data_size() { return ident_len;}
+ int get_data_size() { return ident_len + ROTATE_HEADER_LEN;}
int write_data(IO_CACHE* file);
void print(FILE* file, bool short_form = 0, char* last_db = 0);
+#ifndef MYSQL_CLIENT
+ void pack_info(String* packet);
+#endif
};
#endif
+
+
+
diff --git a/sql/md5.c b/sql/md5.c
index 0775ba3bd1a..a19f8639f3a 100644
--- a/sql/md5.c
+++ b/sql/md5.c
@@ -108,7 +108,7 @@ Rotation is separate from addition to prevent recomputation.
/* MD5 initialization. Begins an MD5 operation, writing a new context.
*/
-void MD5Init (MD5_CTX *context) /* context */
+void my_MD5Init (my_MD5_CTX *context) /* context */
{
context->count[0] = context->count[1] = 0;
/* Load magic initialization constants.
@@ -123,8 +123,8 @@ void MD5Init (MD5_CTX *context) /* context */
operation, processing another message block, and updating the
context.
*/
-void MD5Update (context, input, inputLen)
-MD5_CTX *context; /* context */
+void my_MD5Update (context, input, inputLen)
+my_MD5_CTX *context; /* context */
unsigned char *input; /* input block */
unsigned int inputLen; /* length of input block */
{
@@ -164,9 +164,9 @@ unsigned int inputLen; /* length of input block */
/* MD5 finalization. Ends an MD5 message-digest operation, writing the
the message digest and zeroizing the context.
*/
-void MD5Final (digest, context)
+void my_MD5Final (digest, context)
unsigned char digest[16]; /* message digest */
-MD5_CTX *context; /* context */
+my_MD5_CTX *context; /* context */
{
unsigned char bits[8];
unsigned int idx, padLen;
@@ -178,10 +178,10 @@ MD5_CTX *context; /* context */
*/
idx = (unsigned int)((context->count[0] >> 3) & 0x3f);
padLen = (idx < 56) ? (56 - idx) : (120 - idx);
- MD5Update (context, PADDING, padLen);
+ my_MD5Update (context, PADDING, padLen);
/* Append length (before padding) */
- MD5Update (context, bits, 8);
+ my_MD5Update (context, bits, 8);
/* Store state in digest */
Encode (digest, context->state, 16);
diff --git a/sql/md5.h b/sql/md5.h
index 862129391f1..6fe4e543bb0 100644
--- a/sql/md5.h
+++ b/sql/md5.h
@@ -57,22 +57,20 @@ If using PROTOTYPES, then PROTO_LIST returns the list, otherwise it
#else
#define PROTO_LIST(list) ()
#endif
-
-
/* MD5 context. */
typedef struct {
UINT4 state[4]; /* state (ABCD) */
UINT4 count[2]; /* number of bits, modulo 2^64 (lsb first) */
unsigned char buffer[64]; /* input buffer */
-} MD5_CTX;
+} my_MD5_CTX;
#ifdef __cplusplus
extern "C" {
#endif
- void MD5Init PROTO_LIST ((MD5_CTX *));
- void MD5Update PROTO_LIST
- ((MD5_CTX *, unsigned char *, unsigned int));
- void MD5Final PROTO_LIST ((unsigned char [16], MD5_CTX *));
+ void my_MD5Init PROTO_LIST ((my_MD5_CTX *));
+ void my_MD5Update PROTO_LIST
+ ((my_MD5_CTX *, unsigned char *, unsigned int));
+ void my_MD5Final PROTO_LIST ((unsigned char [16], my_MD5_CTX *));
#ifdef __cplusplus
}
diff --git a/sql/mini_client.cc b/sql/mini_client.cc
index 38180c0c6c8..8966b303000 100644
--- a/sql/mini_client.cc
+++ b/sql/mini_client.cc
@@ -28,6 +28,8 @@
#include <odbcinst.h>
#endif
#include <global.h>
+#include <mysql_com.h>
+#include <violite.h>
#include <my_sys.h>
#include <mysys_err.h>
#include <m_string.h>
@@ -37,7 +39,6 @@
#include "mysql_version.h"
#include "mysqld_error.h"
#include "errmsg.h"
-#include <violite.h>
extern "C" { // Because of SCO 3.2V4.2
#include <sys/stat.h>
@@ -69,9 +70,22 @@ extern "C" { // Because of SCO 3.2V4.2
}
+static void mc_free_rows(MYSQL_DATA *cur);
+static MYSQL_FIELD *unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields,
+ my_bool default_value,
+ my_bool long_flag_protocol);
+
static void mc_end_server(MYSQL *mysql);
static int mc_sock_connect(File s, const struct sockaddr *name, uint namelen, uint to);
static void mc_free_old_query(MYSQL *mysql);
+static int mc_send_file_to_server(MYSQL *mysql, const char *filename);
+static my_ulonglong mc_net_field_length_ll(uchar **packet);
+static ulong mc_net_field_length(uchar **packet);
+static int mc_read_one_row(MYSQL *mysql,uint fields,MYSQL_ROW row,
+ ulong *lengths);
+static MYSQL_DATA *mc_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
+ uint fields);
+
#define CLIENT_CAPABILITIES (CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG | CLIENT_LOCAL_FILES)
@@ -735,18 +749,18 @@ mc_mysql_connect(MYSQL *mysql,const char *host, const char *user,
#ifdef HAVE_OPENSSL
/* Oops.. are we careful enough to not send ANY information */
/* without encryption? */
- if (client_flag & CLIENT_SSL)
+/* if (client_flag & CLIENT_SSL)
{
if (my_net_write(net,buff,(uint) (2)) || net_flush(net))
- goto error;
+ goto error;*/
/* Do the SSL layering. */
- DBUG_PRINT("info", ("IO layer change in progress..."));
+ /* DBUG_PRINT("info", ("IO layer change in progress..."));
VioSSLConnectorFd* connector_fd = (VioSSLConnectorFd*)
(mysql->connector_fd);
VioSocket* vio_socket = (VioSocket*)(mysql->net.vio);
VioSSL* vio_ssl = connector_fd->connect(vio_socket);
mysql->net.vio = (NetVio*)(vio_ssl);
- }
+ }*/
#endif /* HAVE_OPENSSL */
int3store(buff+2,max_allowed_packet);
@@ -816,11 +830,506 @@ mc_mysql_close(MYSQL *mysql)
bzero((char*) &mysql->options,sizeof(mysql->options));
mysql->net.vio = 0;
#ifdef HAVE_OPENSSL
- ((VioConnectorFd*)(mysql->connector_fd))->delete();
- mysql->connector_fd = 0;
+/* ((VioConnectorFd*)(mysql->connector_fd))->delete();
+ mysql->connector_fd = 0;*/
#endif /* HAVE_OPENSSL */
if (mysql->free_me)
my_free((gptr) mysql,MYF(0));
}
DBUG_VOID_RETURN;
}
+
+void STDCALL mc_mysql_free_result(MYSQL_RES *result)
+{
+ DBUG_ENTER("mc_mysql_free_result");
+ DBUG_PRINT("enter",("mysql_res: %lx",result));
+ if (result)
+ {
+ if (result->handle && result->handle->status == MYSQL_STATUS_USE_RESULT)
+ {
+ DBUG_PRINT("warning",("Not all rows in set were read; Ignoring rows"));
+ for (;;)
+ {
+ uint pkt_len;
+ if ((pkt_len=(uint) mc_net_safe_read(result->handle)) == packet_error)
+ break;
+ if (pkt_len == 1 && result->handle->net.read_pos[0] == 254)
+ break; /* End of data */
+ }
+ result->handle->status=MYSQL_STATUS_READY;
+ }
+ mc_free_rows(result->data);
+ if (result->fields)
+ free_root(&result->field_alloc,MYF(0));
+ if (result->row)
+ my_free((gptr) result->row,MYF(0));
+ my_free((gptr) result,MYF(0));
+ }
+ DBUG_VOID_RETURN;
+}
+
+static void mc_free_rows(MYSQL_DATA *cur)
+{
+ if (cur)
+ {
+ free_root(&cur->alloc,MYF(0));
+ my_free((gptr) cur,MYF(0));
+ }
+}
+
+static MYSQL_FIELD *
+mc_unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields,
+ my_bool default_value, my_bool long_flag_protocol)
+{
+ MYSQL_ROWS *row;
+ MYSQL_FIELD *field,*result;
+ DBUG_ENTER("unpack_fields");
+
+ field=result=(MYSQL_FIELD*) alloc_root(alloc,sizeof(MYSQL_FIELD)*fields);
+ if (!result)
+ DBUG_RETURN(0);
+
+ for (row=data->data; row ; row = row->next,field++)
+ {
+ field->table= strdup_root(alloc,(char*) row->data[0]);
+ field->name= strdup_root(alloc,(char*) row->data[1]);
+ field->length= (uint) uint3korr(row->data[2]);
+ field->type= (enum enum_field_types) (uchar) row->data[3][0];
+ if (long_flag_protocol)
+ {
+ field->flags= uint2korr(row->data[4]);
+ field->decimals=(uint) (uchar) row->data[4][2];
+ }
+ else
+ {
+ field->flags= (uint) (uchar) row->data[4][0];
+ field->decimals=(uint) (uchar) row->data[4][1];
+ }
+ if (INTERNAL_NUM_FIELD(field))
+ field->flags|= NUM_FLAG;
+ if (default_value && row->data[5])
+ field->def=strdup_root(alloc,(char*) row->data[5]);
+ else
+ field->def=0;
+ field->max_length= 0;
+ }
+ mc_free_rows(data); /* Free old data */
+ DBUG_RETURN(result);
+}
+
+int STDCALL
+mc_mysql_send_query(MYSQL* mysql, const char* query, uint length)
+{
+ return mc_simple_command(mysql, COM_QUERY, query, length, 1);
+}
+
+int STDCALL mc_mysql_read_query_result(MYSQL *mysql)
+{
+ uchar *pos;
+ ulong field_count;
+ MYSQL_DATA *fields;
+ uint length;
+ DBUG_ENTER("mc_mysql_read_query_result");
+
+ if ((length = mc_net_safe_read(mysql)) == packet_error)
+ DBUG_RETURN(-1);
+ mc_free_old_query(mysql); /* Free old result */
+get_info:
+ pos=(uchar*) mysql->net.read_pos;
+ if ((field_count= mc_net_field_length(&pos)) == 0)
+ {
+ mysql->affected_rows= mc_net_field_length_ll(&pos);
+ mysql->insert_id= mc_net_field_length_ll(&pos);
+ if (mysql->server_capabilities & CLIENT_TRANSACTIONS)
+ {
+ mysql->server_status=uint2korr(pos); pos+=2;
+ }
+ if (pos < mysql->net.read_pos+length && mc_net_field_length(&pos))
+ mysql->info=(char*) pos;
+ DBUG_RETURN(0);
+ }
+ if (field_count == NULL_LENGTH) /* LOAD DATA LOCAL INFILE */
+ {
+ int error=mc_send_file_to_server(mysql,(char*) pos);
+ if ((length=mc_net_safe_read(mysql)) == packet_error || error)
+ DBUG_RETURN(-1);
+ goto get_info; /* Get info packet */
+ }
+ if (!(mysql->server_status & SERVER_STATUS_AUTOCOMMIT))
+ mysql->server_status|= SERVER_STATUS_IN_TRANS;
+
+ mysql->extra_info= mc_net_field_length_ll(&pos); /* Maybe number of rec */
+ if (!(fields=mc_read_rows(mysql,(MYSQL_FIELD*) 0,5)))
+ DBUG_RETURN(-1);
+ if (!(mysql->fields=mc_unpack_fields(fields,&mysql->field_alloc,
+ (uint) field_count,0,
+ (my_bool) test(mysql->server_capabilities &
+ CLIENT_LONG_FLAG))))
+ DBUG_RETURN(-1);
+ mysql->status=MYSQL_STATUS_GET_RESULT;
+ mysql->field_count=field_count;
+ DBUG_RETURN(0);
+}
+
+int STDCALL mc_mysql_query(MYSQL *mysql, const char *query, uint length)
+{
+ DBUG_ENTER("mysql_real_query");
+ DBUG_PRINT("enter",("handle: %lx",mysql));
+ DBUG_PRINT("query",("Query = \"%s\"",query));
+ if(!length)
+ length = strlen(query);
+ if (mc_simple_command(mysql,COM_QUERY,query,length,1))
+ DBUG_RETURN(-1);
+ DBUG_RETURN(mc_mysql_read_query_result(mysql));
+}
+
+static int mc_send_file_to_server(MYSQL *mysql, const char *filename)
+{
+ int fd, readcount;
+ char buf[IO_SIZE*15],*tmp_name;
+ DBUG_ENTER("send_file_to_server");
+
+ fn_format(buf,filename,"","",4); /* Convert to client format */
+ if (!(tmp_name=my_strdup(buf,MYF(0))))
+ {
+ strmov(mysql->net.last_error, ER(mysql->net.last_errno=CR_OUT_OF_MEMORY));
+ DBUG_RETURN(-1);
+ }
+ if ((fd = my_open(tmp_name,O_RDONLY, MYF(0))) < 0)
+ {
+ mysql->net.last_errno=EE_FILENOTFOUND;
+ sprintf(buf,EE(mysql->net.last_errno),tmp_name,errno);
+ strmake(mysql->net.last_error,buf,sizeof(mysql->net.last_error)-1);
+ my_net_write(&mysql->net,"",0); net_flush(&mysql->net);
+ my_free(tmp_name,MYF(0));
+ DBUG_RETURN(-1);
+ }
+
+ while ((readcount = (int) my_read(fd,buf,sizeof(buf),MYF(0))) > 0)
+ {
+ if (my_net_write(&mysql->net,buf,readcount))
+ {
+ mysql->net.last_errno=CR_SERVER_LOST;
+ strmov(mysql->net.last_error,ER(mysql->net.last_errno));
+ DBUG_PRINT("error",("Lost connection to MySQL server during LOAD DATA of local file"));
+ (void) my_close(fd,MYF(0));
+ my_free(tmp_name,MYF(0));
+ DBUG_RETURN(-1);
+ }
+ }
+ (void) my_close(fd,MYF(0));
+ /* Send empty packet to mark end of file */
+ if (my_net_write(&mysql->net,"",0) || net_flush(&mysql->net))
+ {
+ mysql->net.last_errno=CR_SERVER_LOST;
+ sprintf(mysql->net.last_error,ER(mysql->net.last_errno),errno);
+ my_free(tmp_name,MYF(0));
+ DBUG_RETURN(-1);
+ }
+ if (readcount < 0)
+ {
+ mysql->net.last_errno=EE_READ; /* the errmsg for not entire file read */
+ sprintf(buf,EE(mysql->net.last_errno),tmp_name,errno);
+ strmake(mysql->net.last_error,buf,sizeof(mysql->net.last_error)-1);
+ my_free(tmp_name,MYF(0));
+ DBUG_RETURN(-1);
+ }
+ DBUG_RETURN(0);
+}
+
+/* Get the length of next field. Change parameter to point at fieldstart */
+static ulong mc_net_field_length(uchar **packet)
+{
+ reg1 uchar *pos= *packet;
+ if (*pos < 251)
+ {
+ (*packet)++;
+ return (ulong) *pos;
+ }
+ if (*pos == 251)
+ {
+ (*packet)++;
+ return NULL_LENGTH;
+ }
+ if (*pos == 252)
+ {
+ (*packet)+=3;
+ return (ulong) uint2korr(pos+1);
+ }
+ if (*pos == 253)
+ {
+ (*packet)+=4;
+ return (ulong) uint3korr(pos+1);
+ }
+ (*packet)+=9; /* Must be 254 when here */
+ return (ulong) uint4korr(pos+1);
+}
+
+/* Same as above, but returns ulonglong values */
+
+static my_ulonglong mc_net_field_length_ll(uchar **packet)
+{
+ reg1 uchar *pos= *packet;
+ if (*pos < 251)
+ {
+ (*packet)++;
+ return (my_ulonglong) *pos;
+ }
+ if (*pos == 251)
+ {
+ (*packet)++;
+ return (my_ulonglong) NULL_LENGTH;
+ }
+ if (*pos == 252)
+ {
+ (*packet)+=3;
+ return (my_ulonglong) uint2korr(pos+1);
+ }
+ if (*pos == 253)
+ {
+ (*packet)+=4;
+ return (my_ulonglong) uint3korr(pos+1);
+ }
+ (*packet)+=9; /* Must be 254 when here */
+#ifdef NO_CLIENT_LONGLONG
+ return (my_ulonglong) uint4korr(pos+1);
+#else
+ return (my_ulonglong) uint8korr(pos+1);
+#endif
+}
+
+/* Read all rows (fields or data) from server */
+
+static MYSQL_DATA *mc_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
+ uint fields)
+{
+ uint field,pkt_len;
+ ulong len;
+ uchar *cp;
+ char *to;
+ MYSQL_DATA *result;
+ MYSQL_ROWS **prev_ptr,*cur;
+ NET *net = &mysql->net;
+ DBUG_ENTER("mc_read_rows");
+
+ if ((pkt_len=(uint) mc_net_safe_read(mysql)) == packet_error)
+ DBUG_RETURN(0);
+ if (!(result=(MYSQL_DATA*) my_malloc(sizeof(MYSQL_DATA),
+ MYF(MY_WME | MY_ZEROFILL))))
+ {
+ net->last_errno=CR_OUT_OF_MEMORY;
+ strmov(net->last_error,ER(net->last_errno));
+ DBUG_RETURN(0);
+ }
+ init_alloc_root(&result->alloc,8192,0); /* Assume rowlength < 8192 */
+ result->alloc.min_malloc=sizeof(MYSQL_ROWS);
+ prev_ptr= &result->data;
+ result->rows=0;
+ result->fields=fields;
+
+ while (*(cp=net->read_pos) != 254 || pkt_len != 1)
+ {
+ result->rows++;
+ if (!(cur= (MYSQL_ROWS*) alloc_root(&result->alloc,
+ sizeof(MYSQL_ROWS))) ||
+ !(cur->data= ((MYSQL_ROW)
+ alloc_root(&result->alloc,
+ (fields+1)*sizeof(char *)+pkt_len))))
+ {
+ mc_free_rows(result);
+ net->last_errno=CR_OUT_OF_MEMORY;
+ strmov(net->last_error,ER(net->last_errno));
+ DBUG_RETURN(0);
+ }
+ *prev_ptr=cur;
+ prev_ptr= &cur->next;
+ to= (char*) (cur->data+fields+1);
+ for (field=0 ; field < fields ; field++)
+ {
+ if ((len=(ulong) mc_net_field_length(&cp)) == NULL_LENGTH)
+ { /* null field */
+ cur->data[field] = 0;
+ }
+ else
+ {
+ cur->data[field] = to;
+ memcpy(to,(char*) cp,len); to[len]=0;
+ to+=len+1;
+ cp+=len;
+ if (mysql_fields)
+ {
+ if (mysql_fields[field].max_length < len)
+ mysql_fields[field].max_length=len;
+ }
+ }
+ }
+ cur->data[field]=to; /* End of last field */
+ if ((pkt_len=mc_net_safe_read(mysql)) == packet_error)
+ {
+ mc_free_rows(result);
+ DBUG_RETURN(0);
+ }
+ }
+ *prev_ptr=0; /* last pointer is null */
+ DBUG_PRINT("exit",("Got %d rows",result->rows));
+ DBUG_RETURN(result);
+}
+
+
+/*
+** Read one row. Uses packet buffer as storage for fields.
+** When next packet is read, the previous field values are destroyed
+*/
+
+
+static int mc_read_one_row(MYSQL *mysql,uint fields,MYSQL_ROW row,
+ ulong *lengths)
+{
+ uint field;
+ ulong pkt_len,len;
+ uchar *pos,*prev_pos;
+
+ if ((pkt_len=(uint) mc_net_safe_read(mysql)) == packet_error)
+ return -1;
+ if (pkt_len == 1 && mysql->net.read_pos[0] == 254)
+ return 1; /* End of data */
+ prev_pos= 0; /* allowed to write at packet[-1] */
+ pos=mysql->net.read_pos;
+ for (field=0 ; field < fields ; field++)
+ {
+ if ((len=(ulong) mc_net_field_length(&pos)) == NULL_LENGTH)
+ { /* null field */
+ row[field] = 0;
+ *lengths++=0;
+ }
+ else
+ {
+ row[field] = (char*) pos;
+ pos+=len;
+ *lengths++=len;
+ }
+ if (prev_pos)
+ *prev_pos=0; /* Terminate prev field */
+ prev_pos=pos;
+ }
+ row[field]=(char*) prev_pos+1; /* End of last field */
+ *prev_pos=0; /* Terminate last field */
+ return 0;
+}
+
+my_ulonglong STDCALL mc_mysql_num_rows(MYSQL_RES *res)
+{
+ return res->row_count;
+}
+
+unsigned int STDCALL mc_mysql_num_fields(MYSQL_RES *res)
+{
+ return res->field_count;
+}
+
+void STDCALL mc_mysql_data_seek(MYSQL_RES *result, my_ulonglong row)
+{
+ MYSQL_ROWS *tmp=0;
+ DBUG_PRINT("info",("mysql_data_seek(%ld)",(long) row));
+ if (result->data)
+ for (tmp=result->data->data; row-- && tmp ; tmp = tmp->next) ;
+ result->current_row=0;
+ result->data_cursor = tmp;
+}
+
+MYSQL_ROW STDCALL mc_mysql_fetch_row(MYSQL_RES *res)
+{
+ DBUG_ENTER("mc_mysql_fetch_row");
+ if (!res->data)
+ { /* Unbufferred fetch */
+ if (!res->eof)
+ {
+ if (!(mc_read_one_row(res->handle,res->field_count,res->row,
+ res->lengths)))
+ {
+ res->row_count++;
+ DBUG_RETURN(res->current_row=res->row);
+ }
+ else
+ {
+ DBUG_PRINT("info",("end of data"));
+ res->eof=1;
+ res->handle->status=MYSQL_STATUS_READY;
+ }
+ }
+ DBUG_RETURN((MYSQL_ROW) NULL);
+ }
+ {
+ MYSQL_ROW tmp;
+ if (!res->data_cursor)
+ {
+ DBUG_PRINT("info",("end of data"));
+ DBUG_RETURN(res->current_row=(MYSQL_ROW) NULL);
+ }
+ tmp = res->data_cursor->data;
+ res->data_cursor = res->data_cursor->next;
+ DBUG_RETURN(res->current_row=tmp);
+ }
+}
+
+int STDCALL mc_mysql_select_db(MYSQL *mysql, const char *db)
+{
+ int error;
+ DBUG_ENTER("mysql_select_db");
+ DBUG_PRINT("enter",("db: '%s'",db));
+
+ if ((error=mc_simple_command(mysql,COM_INIT_DB,db,(uint) strlen(db),0)))
+ DBUG_RETURN(error);
+ my_free(mysql->db,MYF(MY_ALLOW_ZERO_PTR));
+ mysql->db=my_strdup(db,MYF(MY_WME));
+ DBUG_RETURN(0);
+}
+
+
+MYSQL_RES * STDCALL mc_mysql_store_result(MYSQL *mysql)
+{
+ MYSQL_RES *result;
+ DBUG_ENTER("mysql_store_result");
+
+ if (!mysql->fields)
+ DBUG_RETURN(0);
+ if (mysql->status != MYSQL_STATUS_GET_RESULT)
+ {
+ strmov(mysql->net.last_error,
+ ER(mysql->net.last_errno=CR_COMMANDS_OUT_OF_SYNC));
+ DBUG_RETURN(0);
+ }
+ mysql->status=MYSQL_STATUS_READY; /* server is ready */
+ if (!(result=(MYSQL_RES*) my_malloc(sizeof(MYSQL_RES)+
+ sizeof(ulong)*mysql->field_count,
+ MYF(MY_WME | MY_ZEROFILL))))
+ {
+ mysql->net.last_errno=CR_OUT_OF_MEMORY;
+ strmov(mysql->net.last_error, ER(mysql->net.last_errno));
+ DBUG_RETURN(0);
+ }
+ result->eof=1; /* Marker for buffered */
+ result->lengths=(ulong*) (result+1);
+ if (!(result->data=mc_read_rows(mysql,mysql->fields,mysql->field_count)))
+ {
+ my_free((gptr) result,MYF(0));
+ DBUG_RETURN(0);
+ }
+ mysql->affected_rows= result->row_count= result->data->rows;
+ result->data_cursor= result->data->data;
+ result->fields= mysql->fields;
+ result->field_alloc= mysql->field_alloc;
+ result->field_count= mysql->field_count;
+ result->current_field=0;
+ result->current_row=0; /* Must do a fetch first */
+ mysql->fields=0; /* fields is now in result */
+ DBUG_RETURN(result); /* Data fetched */
+}
+
+
+
+
+
+
+
+
diff --git a/sql/mini_client.h b/sql/mini_client.h
index f7d95a1b66e..22cdb31f846 100644
--- a/sql/mini_client.h
+++ b/sql/mini_client.h
@@ -42,6 +42,17 @@ char * STDCALL mc_mysql_error(MYSQL *mysql);
int STDCALL mc_mysql_errno(MYSQL *mysql);
my_bool STDCALL mc_mysql_reconnect(MYSQL* mysql);
+int STDCALL mc_mysql_send_query(MYSQL* mysql, const char* query, uint length);
+int STDCALL mc_mysql_read_query_result(MYSQL *mysql);
+int STDCALL mc_mysql_query(MYSQL *mysql, const char *query, uint length);
+MYSQL_RES * STDCALL mc_mysql_store_result(MYSQL *mysql);
+void STDCALL mc_mysql_free_result(MYSQL_RES *result);
+void STDCALL mc_mysql_data_seek(MYSQL_RES *result, my_ulonglong row);
+my_ulonglong STDCALL mc_mysql_num_rows(MYSQL_RES *res);
+unsigned int STDCALL mc_mysql_num_fields(MYSQL_RES *res);
+MYSQL_ROW STDCALL mc_mysql_fetch_row(MYSQL_RES *res);
+int STDCALL mc_mysql_select_db(MYSQL *mysql, const char *db);
+
#endif
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index d00eb09a363..63c9478d236 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -26,7 +26,6 @@
#include <thr_lock.h>
#include <my_base.h> /* Needed by field.h */
#include <my_bitmap.h>
-#include <violite.h>
#undef write // remove pthread.h macro definition for EMX
@@ -35,6 +34,7 @@ typedef ulong key_map; /* Used for finding keys */
typedef ulong key_part_map; /* Used for finding key parts */
#include "mysql_com.h"
+#include <violite.h>
#include "unireg.h"
void init_sql_alloc(MEM_ROOT *root, uint block_size, uint pre_alloc_size);
@@ -147,7 +147,7 @@ void kill_one_thread(THD *thd, ulong id);
#define SELECT_BIG_RESULT 16
#define OPTION_FOUND_ROWS 32
#define SELECT_HIGH_PRIORITY 64 /* Intern */
-#define SELECT_USE_CACHE 256 /* Intern */
+#define SELECT_NO_JOIN_CACHE 256 /* Intern */
#define OPTION_BIG_TABLES 512 /* for SQL OPTION */
#define OPTION_BIG_SELECTS 1024 /* for SQL OPTION */
@@ -223,7 +223,7 @@ inline THD *_current_thd(void)
#include "opt_range.h"
-void mysql_create_db(THD *thd, char *db, uint create_info);
+int mysql_create_db(THD *thd, char *db, uint create_info);
void mysql_binlog_send(THD* thd, char* log_ident, ulong pos, ushort flags);
int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists);
int quick_rm_table(enum db_type base,const char *db,
@@ -232,6 +232,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list);
bool mysql_change_db(THD *thd,const char *name);
void mysql_parse(THD *thd,char *inBuf,uint length);
void mysql_init_select(LEX *lex);
+void mysql_new_select(LEX *lex);
void init_max_user_conn(void);
void free_max_user_conn(void);
pthread_handler_decl(handle_one_connection,arg);
@@ -245,7 +246,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
char* packet, uint packet_length);
bool check_stack_overrun(THD *thd,char *dummy);
bool reload_acl_and_cache(THD *thd, uint options, TABLE_LIST *tables);
-void mysql_rm_db(THD *thd,char *db,bool if_exists);
+int mysql_rm_db(THD *thd,char *db,bool if_exists);
void table_cache_init(void);
void table_cache_free(void);
uint cached_tables(void);
@@ -304,6 +305,7 @@ int mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &list,COND *conds,
List<Item_func_match> &ftfuncs,
ORDER *order, ORDER *group,Item *having,ORDER *proc_param,
uint select_type,select_result *result);
+int mysql_union(THD *thd,LEX *lex, uint no);
Field *create_tmp_field(TABLE *table,Item *item, Item::Type type,
Item_result_field ***copy_func, Field **from_field,
bool group,bool modify_item);
@@ -473,8 +475,7 @@ pthread_handler_decl(handle_manager, arg);
#ifndef DBUG_OFF
void print_where(COND *cond,const char *info);
void print_cached_tables(void);
-void TEST_filesort(TABLE **form,SORT_FIELD *sortorder,uint s_length,
- ha_rows special);
+void TEST_filesort(SORT_FIELD *sortorder,uint s_length, ha_rows special);
#endif
void mysql_print_status(THD *thd);
/* key.cc */
@@ -520,7 +521,7 @@ extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open,
LOCK_thread_count,LOCK_mapped_file,LOCK_user_locks, LOCK_status,
LOCK_grant, LOCK_error_log, LOCK_delayed_insert,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
- LOCK_binlog_update, LOCK_slave, LOCK_server_id;
+ LOCK_binlog_update, LOCK_slave, LOCK_server_id, LOCK_slave_list;
extern pthread_cond_t COND_refresh,COND_thread_count, COND_binlog_update,
COND_slave_stopped, COND_slave_start;
extern pthread_attr_t connection_attrib;
@@ -548,7 +549,7 @@ extern ulong keybuff_size,sortbuff_size,max_item_sort_length,table_cache_size,
binlog_cache_size, max_binlog_cache_size;
extern ulong specialflag, current_pid;
extern bool low_priority_updates, using_update_log;
-extern bool opt_sql_bin_update, opt_safe_show_db;
+extern bool opt_sql_bin_update, opt_safe_show_db, opt_warnings;
extern char language[LIBLEN],reg_ext[FN_EXTLEN],blob_newline;
extern const char **errmesg; /* Error messages */
extern const char *default_tx_isolation_name;
@@ -616,7 +617,7 @@ void init_read_record(READ_RECORD *info, THD *thd, TABLE *reg_form,
SQL_SELECT *select,
int use_record_cache, bool print_errors);
void end_read_record(READ_RECORD *info);
-ha_rows filesort(TABLE **form,struct st_sort_field *sortorder, uint s_length,
+ha_rows filesort(TABLE *form,struct st_sort_field *sortorder, uint s_length,
SQL_SELECT *select, ha_rows special,ha_rows max_rows,
ha_rows *examined_rows);
void change_double_for_sort(double nr,byte *to);
@@ -667,7 +668,7 @@ extern int sql_cache_hit(THD *thd, char *inBuf, uint length);
inline bool add_item_to_list(Item *item)
{
- return current_lex->item_list.push_back(item);
+ return current_lex->select->item_list.push_back(item);
}
inline bool add_value_to_list(Item *value)
{
@@ -675,11 +676,11 @@ inline bool add_value_to_list(Item *value)
}
inline bool add_order_to_list(Item *item,bool asc)
{
- return add_to_list(current_lex->order_list,item,asc);
+ return add_to_list(current_lex->select->order_list,item,asc);
}
inline bool add_group_to_list(Item *item,bool asc)
{
- return add_to_list(current_lex->group_list,item,asc);
+ return add_to_list(current_lex->select->group_list,item,asc);
}
inline void mark_as_null_row(TABLE *table)
{
diff --git a/sql/mysqlbinlog.cc b/sql/mysqlbinlog.cc
index f0a9692cc2d..5edfe6e0591 100644
--- a/sql/mysqlbinlog.cc
+++ b/sql/mysqlbinlog.cc
@@ -108,7 +108,7 @@ static void die(const char* fmt, ...)
static void print_version()
{
- printf("%s Ver 1.3 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
+ printf("%s Ver 1.4 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
}
@@ -132,7 +132,7 @@ the mysql command line client\n\n");
-s, --short-form Just show the queries, no extra info\n\
-o, --offset=N Skip the first N entries\n\
-h, --host=server Get the binlog from server\n\
--P, --port=port Use port to connect to the remove server\n\
+-P, --port=port Use port to connect to the remote server\n\
-u, --user=username Connect to the remove server as username\n\
-p, --password=password Password to connect to remote server\n\
-r, --result-file=file Direct output to a given file\n\
@@ -303,14 +303,12 @@ static void dump_remote_log_entries(const char* logname)
uint len;
NET* net = &mysql->net;
if(!position) position = 4; // protect the innocent from spam
- if(position < 4)
- {
- position = 4;
- // warn the guity
- fprintf(stderr,
- "Warning: with the position so small you would hit the magic number\n\
-Unfortunately, no sweepstakes today, adjusted position to 4\n");
- }
+ if (position < 4)
+ {
+ position = 4;
+ // warn the guity
+ sql_print_error("Warning: The position in the binary log can't be less than 4.\nStarting from position 4\n");
+ }
int4store(buf, position);
int2store(buf + 4, binlog_flags);
len = (uint) strlen(logname);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 91fde2b9131..6eb4625224e 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -20,6 +20,7 @@
#include <my_dir.h>
#include "sql_acl.h"
#include "slave.h"
+#include "sql_repl.h"
#include "stacktrace.h"
#ifdef HAVE_BERKELEY_DB
#include "ha_berkeley.h"
@@ -34,7 +35,6 @@
#include <nisam.h>
#include <thr_alarm.h>
#include <ft_global.h>
-#include "vio.h"
#ifndef DBUG_OFF
#define ONE_THREAD
@@ -95,17 +95,16 @@ int deny_severity = LOG_WARNING;
#include <sys/mman.h>
#endif
+#ifdef _AIX41
+int initgroups(const char *,unsigned int);
+#endif
+
#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H)
#include <ieeefp.h>
#ifdef HAVE_FP_EXCEPT // Fix type conflict
typedef fp_except fp_except_t;
#endif
-#ifdef _AIX41
-extern "C" int initgroups(const char *,int);
-#endif
-
-
/* We can't handle floating point expections with threads, so disable
this on freebsd
*/
@@ -207,6 +206,7 @@ SHOW_COMP_OPTION have_ssl=SHOW_OPTION_YES;
#else
SHOW_COMP_OPTION have_ssl=SHOW_OPTION_NO;
#endif
+SHOW_COMP_OPTION have_symlink=SHOW_OPTION_YES;
static bool opt_skip_slave_start = 0; // if set, slave is not autostarted
@@ -220,9 +220,10 @@ static char mysql_home[FN_REFLEN],pidfile_name[FN_REFLEN];
static pthread_t select_thread;
static bool opt_log,opt_update_log,opt_bin_log,opt_slow_log,opt_noacl,
opt_disable_networking=0, opt_bootstrap=0,opt_skip_show_db=0,
- opt_ansi_mode=0,opt_myisam_log=0,
+ opt_ansi_mode=0,opt_myisam_log=0,
opt_large_files=sizeof(my_off_t) > 4;
-bool opt_sql_bin_update = 0, opt_log_slave_updates = 0, opt_safe_show_db=0;
+bool opt_sql_bin_update = 0, opt_log_slave_updates = 0, opt_safe_show_db=0,
+ opt_show_slave_auth_info = 0;
FILE *bootstrap_file=0;
int segfaulted = 0; // ensure we do not enter SIGSEGV handler twice
extern MASTER_INFO glob_mi;
@@ -244,7 +245,7 @@ static char *opt_ssl_key = 0;
static char *opt_ssl_cert = 0;
static char *opt_ssl_ca = 0;
static char *opt_ssl_capath = 0;
-static struct st_VioSSLAcceptorFd * ssl_acceptor_fd = 0;
+struct st_VioSSLAcceptorFd * ssl_acceptor_fd = 0;
#endif /* HAVE_OPENSSL */
@@ -277,15 +278,18 @@ volatile ulong cached_thread_count=0;
// replication parameters, if master_host is not NULL, we are a slave
my_string master_user = (char*) "test", master_password = 0, master_host=0,
master_info_file = (char*) "master.info";
+my_string report_user = 0, report_password = 0, report_host=0;
+
const char *localhost=LOCAL_HOST;
const char *delayed_user="DELAYED";
uint master_port = MYSQL_PORT, master_connect_retry = 60;
+uint report_port = MYSQL_PORT;
ulong max_tmp_tables,max_heap_table_size;
ulong bytes_sent = 0L, bytes_received = 0L;
bool opt_endinfo,using_udf_functions,low_priority_updates, locked_in_memory;
-bool opt_using_transactions, using_update_log;
+bool opt_using_transactions, using_update_log, opt_warnings=0;
bool volatile abort_loop,select_thread_in_use,grant_option;
bool volatile ready_to_exit,shutdown_in_progress;
ulong refresh_version=1L,flush_version=1L; /* Increments on each reload */
@@ -341,7 +345,7 @@ pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create,
LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received,
LOCK_binlog_update, LOCK_slave, LOCK_server_id,
- LOCK_user_conn;
+ LOCK_user_conn, LOCK_slave_list;
pthread_cond_t COND_refresh,COND_thread_count,COND_binlog_update,
COND_slave_stopped, COND_slave_start;
@@ -695,6 +699,7 @@ void clean_up(bool print_message)
bitmap_free(&temp_pool);
free_max_user_conn();
end_slave();
+ end_slave_list();
#ifndef __WIN__
if (!opt_bootstrap)
(void) my_delete(pidfile_name,MYF(0)); // This may not always exist
@@ -1200,12 +1205,12 @@ Some pointers may be invalid and cause the dump to abort...\n");
fprintf(stderr, "\n
Successfully dumped variables, if you ran with --log, take a look at the\n\
details of what thread %ld did to cause the crash. In some cases of really\n\
-bad corruption, the above values may be invalid\n\n",
+bad corruption, the values shown above may be invalid\n\n",
thd->thread_id);
}
fprintf(stderr, "\
-Please use the information above to create a repeatable test case for the\n\
-crash, and send it to bugs@lists.mysql.com\n");
+The manual page at http://www.mysql.com/doc/C/r/Crashing.html contains\n\
+information that should help you find out what is causing the crash\n");
fflush(stderr);
#endif /* HAVE_STACKTRACE */
@@ -1685,7 +1690,8 @@ int main(int argc, char **argv)
randominit(&sql_rand,(ulong) start_time,(ulong) start_time/2);
reset_floating_point_exceptions();
init_thr_lock();
-
+ init_slave_list();
+
/* Fix varibles that are base 1024*1024 */
myisam_max_temp_length= (my_off_t) min(((ulonglong) myisam_max_sort_file_size)*1024*1024, (ulonglong) MAX_FILE_SIZE);
myisam_max_extra_temp_length= (my_off_t) min(((ulonglong) myisam_max_extra_sort_file_size)*1024*1024, (ulonglong) MAX_FILE_SIZE);
@@ -2473,14 +2479,16 @@ enum options {
OPT_INNODB_LOG_ARCH_DIR,
OPT_INNODB_LOG_ARCHIVE,
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
- OPT_INNODB_UNIX_FILE_FLUSH_METHOD,
+ OPT_INNODB_FLUSH_METHOD,
OPT_SAFE_SHOW_DB,
OPT_GEMINI_SKIP, OPT_INNODB_SKIP,
OPT_TEMP_POOL, OPT_DO_PSTACK, OPT_TX_ISOLATION,
OPT_GEMINI_FLUSH_LOG, OPT_GEMINI_RECOVER,
OPT_GEMINI_UNBUFFERED_IO, OPT_SKIP_SAFEMALLOC,
- OPT_SKIP_STACK_TRACE
-};
+ OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINK, OPT_REPORT_HOST,
+ OPT_REPORT_USER, OPT_REPORT_PASSWORD, OPT_REPORT_PORT,
+ OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL,
+ OPT_SHOW_SLAVE_AUTH_INFO};
static struct option long_options[] = {
{"ansi", no_argument, 0, 'a'},
@@ -2536,8 +2544,8 @@ static struct option long_options[] = {
OPT_INNODB_LOG_ARCHIVE},
{"innodb_flush_log_at_trx_commit", optional_argument, 0,
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT},
- {"innodb_unix_file_flush_method", required_argument, 0,
- OPT_INNODB_UNIX_FILE_FLUSH_METHOD},
+ {"innodb_flush_method", required_argument, 0,
+ OPT_INNODB_FLUSH_METHOD},
#endif
{"help", no_argument, 0, '?'},
{"init-file", required_argument, 0, (int) OPT_INIT_FILE},
@@ -2565,6 +2573,10 @@ static struct option long_options[] = {
(int) OPT_DISCONNECT_SLAVE_EVENT_COUNT},
{"abort-slave-event-count", required_argument, 0,
(int) OPT_ABORT_SLAVE_EVENT_COUNT},
+ {"max-binlog-dump-events", required_argument, 0,
+ (int) OPT_MAX_BINLOG_DUMP_EVENTS},
+ {"sporadic-binlog-dump-fail", no_argument, 0,
+ (int) OPT_SPORADIC_BINLOG_DUMP_FAIL},
{"safemalloc-mem-limit", required_argument, 0, (int)
OPT_SAFEMALLOC_MEM_LIMIT},
{"new", no_argument, 0, 'n'},
@@ -2587,11 +2599,19 @@ static struct option long_options[] = {
(int) OPT_REPLICATE_WILD_IGNORE_TABLE},
{"replicate-rewrite-db", required_argument, 0,
(int) OPT_REPLICATE_REWRITE_DB},
+ // In replication, we may need to tell the other servers how to connect
+ // to us
+ {"report-host", required_argument, 0, (int) OPT_REPORT_HOST},
+ {"report-user", required_argument, 0, (int) OPT_REPORT_USER},
+ {"report-password", required_argument, 0, (int) OPT_REPORT_PASSWORD},
+ {"report-port", required_argument, 0, (int) OPT_REPORT_PORT},
{"safe-mode", no_argument, 0, (int) OPT_SAFE},
{"safe-show-database", no_argument, 0, (int) OPT_SAFE_SHOW_DB},
{"socket", required_argument, 0, (int) OPT_SOCKET},
{"server-id", required_argument, 0, (int) OPT_SERVER_ID},
{"set-variable", required_argument, 0, 'O'},
+ {"show-slave-auth-info", no_argument, 0,
+ (int) OPT_SHOW_SLAVE_AUTH_INFO},
{"skip-bdb", no_argument, 0, (int) OPT_BDB_SKIP},
{"skip-innodb", no_argument, 0, (int) OPT_INNODB_SKIP},
{"skip-gemini", no_argument, 0, (int) OPT_GEMINI_SKIP},
@@ -2607,6 +2627,7 @@ static struct option long_options[] = {
{"skip-show-database", no_argument, 0, (int) OPT_SKIP_SHOW_DB},
{"skip-slave-start", no_argument, 0, (int) OPT_SKIP_SLAVE_START},
{"skip-stack-trace", no_argument, 0, (int) OPT_SKIP_STACK_TRACE},
+ {"skip-symlink", no_argument, 0, (int) OPT_SKIP_SYMLINK},
{"skip-thread-priority", no_argument, 0, (int) OPT_SKIP_PRIOR},
{"sql-bin-update-same", no_argument, 0, (int) OPT_SQL_BIN_UPDATE_SAME},
#include "sslopt-longopts.h"
@@ -2622,6 +2643,7 @@ static struct option long_options[] = {
#endif
{"user", required_argument, 0, 'u'},
{"version", no_argument, 0, 'V'},
+ {"warnings", no_argument, 0, 'W'},
{0, 0, 0, 0}
};
@@ -2817,6 +2839,7 @@ struct show_var_st init_vars[]= {
{"have_innodb", (char*) &have_innodb, SHOW_HAVE},
{"have_isam", (char*) &have_isam, SHOW_HAVE},
{"have_raid", (char*) &have_raid, SHOW_HAVE},
+ {"have_symlink", (char*) &have_symlink, SHOW_HAVE},
{"have_ssl", (char*) &have_ssl, SHOW_HAVE},
{"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR},
#ifdef HAVE_INNOBASE_DB
@@ -2826,7 +2849,7 @@ struct show_var_st init_vars[]= {
{"innodb_log_arch_dir", (char*) &innobase_log_arch_dir, SHOW_CHAR_PTR},
{"innodb_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL},
{"innodb_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR},
- {"innodb_unix_file_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
+ {"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
#endif
{"interactive_timeout", (char*) &net_interactive_timeout, SHOW_LONG},
{"join_buffer_size", (char*) &join_buff_size, SHOW_LONG},
@@ -3038,6 +3061,8 @@ static void usage(void)
-O, --set-variable var=option\n\
Give a variable an value. --help lists variables\n\
--safe-mode Skip some optimize stages (for testing)\n\
+ --safe-show-database Don't show databases for which the user has no\n\
+ privileges\n\
--skip-concurrent-insert\n\
Don't use concurrent insert with MyISAM\n\
--skip-delay-key-write\n\
@@ -3054,6 +3079,7 @@ static void usage(void)
/* We have to break the string here because of VC++ limits */
puts("\
--skip-stack-trace Don't print a stack trace on failure\n\
+ --skip-symlink Don't allow symlinking of tables\n\
--skip-show-database Don't allow 'SHOW DATABASE' commands\n\
--skip-thread-priority\n\
Don't give threads different priorities.\n\
@@ -3063,14 +3089,19 @@ static void usage(void)
Default transaction isolation level\n\
--temp-pool Use a pool of temporary files\n\
-u, --user=user_name Run mysqld daemon as user\n\
- -V, --version output version information and exit");
+ -V, --version output version information and exit\n\
+ -W, --warnings Log some not critical warnings to the log file\n");
#ifdef __WIN__
puts("NT and Win32 specific options:\n\
--console Don't remove the console window\n\
--install Install mysqld as a service (NT)\n\
--remove Remove mysqld from the service list (NT)\n\
- --standalone Dummy option to start as a standalone program (NT)\n\
+ --standalone Dummy option to start as a standalone program (NT)\
");
+#ifdef USE_SYMDIR
+ puts("--use-symbolic-links Enable symbolic link support");
+#endif
+ puts("");
#endif
#ifdef HAVE_BERKELEY_DB
puts("\
@@ -3099,6 +3130,7 @@ static void usage(void)
puts("\
--innodb_data_home_dir=dir The common part for Innodb table spaces\n\
--innodb_data_file_path=dir Path to individual files and their sizes\n\
+ --innodb_flush_method=# With which method to flush data\n\
--innodb_flush_log_at_trx_commit[=#]\n\
Set to 0 if you don't want to flush logs\n\
--innodb_log_arch_dir=dir Where full logs should be archived\n\
@@ -3192,7 +3224,9 @@ static void get_options(int argc,char **argv)
int c,option_index=0;
myisam_delay_key_write=1; // Allow use of this
- while ((c=getopt_long(argc,argv,"ab:C:h:#::T::?l::L:O:P:sS::t:u:noVvI?",
+ my_use_symdir=1; // Use internal symbolic links
+
+ while ((c=getopt_long(argc,argv,"ab:C:h:#::T::?l::L:O:P:sS::t:u:noVvWI?",
long_options, &option_index)) != EOF)
{
switch(c) {
@@ -3202,6 +3236,9 @@ static void get_options(int argc,char **argv)
#endif
opt_endinfo=1; /* unireg: memory allocation */
break;
+ case 'W':
+ opt_warnings=1;
+ break;
case 'a':
opt_ansi_mode=1;
thd_startup_options|=OPTION_ANSI_MODE;
@@ -3241,6 +3278,9 @@ static void get_options(int argc,char **argv)
safemalloc_mem_limit = atoi(optarg);
#endif
break;
+ case OPT_SHOW_SLAVE_AUTH_INFO:
+ opt_show_slave_auth_info = 1;
+ break;
case OPT_SOCKET:
mysql_unix_port= optarg;
break;
@@ -3305,6 +3345,17 @@ static void get_options(int argc,char **argv)
abort_slave_event_count = atoi(optarg);
#endif
break;
+ case (int)OPT_SPORADIC_BINLOG_DUMP_FAIL:
+#ifndef DBUG_OFF
+ opt_sporadic_binlog_dump_fail = 1;
+#endif
+ break;
+ case (int)OPT_MAX_BINLOG_DUMP_EVENTS:
+#ifndef DBUG_OFF
+ max_binlog_dump_events = atoi(optarg);
+#endif
+ break;
+
case (int) OPT_LOG_SLAVE_UPDATES:
opt_log_slave_updates = 1;
break;
@@ -3432,6 +3483,9 @@ static void get_options(int argc,char **argv)
myisam_delay_key_write=0;
myisam_concurrent_insert=0;
myisam_recover_options= HA_RECOVER_NONE;
+ my_disable_symlinks=1;
+ my_use_symdir=0;
+ have_symlink=SHOW_OPTION_DISABLED;
ha_open_options&= ~HA_OPEN_ABORT_IF_CRASHED;
break;
case (int) OPT_SAFE:
@@ -3488,6 +3542,11 @@ static void get_options(int argc,char **argv)
case (int) OPT_SKIP_STACK_TRACE:
test_flags|=TEST_NO_STACKTRACE;
break;
+ case (int) OPT_SKIP_SYMLINK:
+ my_disable_symlinks=1;
+ my_use_symdir=0;
+ have_symlink=SHOW_OPTION_DISABLED;
+ break;
case (int) OPT_BIND_ADDRESS:
if (optarg && isdigit(optarg[0]))
{
@@ -3673,7 +3732,7 @@ static void get_options(int argc,char **argv)
case OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT:
innobase_flush_log_at_trx_commit= optarg ? test(atoi(optarg)) : 1;
break;
- case OPT_INNODB_UNIX_FILE_FLUSH_METHOD:
+ case OPT_INNODB_FLUSH_METHOD:
innobase_unix_file_flush_method=optarg;
break;
#endif /* HAVE_INNOBASE_DB */
@@ -3715,6 +3774,18 @@ static void get_options(int argc,char **argv)
case OPT_MASTER_PORT:
master_port= atoi(optarg);
break;
+ case OPT_REPORT_HOST:
+ report_host=optarg;
+ break;
+ case OPT_REPORT_USER:
+ report_user=optarg;
+ break;
+ case OPT_REPORT_PASSWORD:
+ report_password=optarg;
+ break;
+ case OPT_REPORT_PORT:
+ report_port= atoi(optarg);
+ break;
case OPT_MASTER_CONNECT_RETRY:
master_connect_retry= atoi(optarg);
break;
diff --git a/sql/net_pkg.cc b/sql/net_pkg.cc
index 073c716d793..0b50b34c7bd 100644
--- a/sql/net_pkg.cc
+++ b/sql/net_pkg.cc
@@ -140,7 +140,7 @@ net_printf(NET *net, uint errcode, ...)
void
send_ok(NET *net,ha_rows affected_rows,ulonglong id,const char *message)
{
- if(net->no_send_ok)
+ if (net->no_send_ok) // hack for re-parsing queries
return;
char buff[MYSQL_ERRMSG_SIZE+10],*pos;
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index a5600dffa4c..cde27d4933a 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -31,6 +31,7 @@
#include <winsock.h>
#endif
#include <global.h>
+#include <mysql_com.h>
#include <violite.h>
#include <my_sys.h>
#include <m_string.h>
@@ -39,7 +40,6 @@
#include <signal.h>
#include <errno.h>
#include <sys/types.h>
-#include <violite.h>
#include <assert.h>
extern "C" {
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index b95b97d670f..0b3ac27d1f6 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -33,6 +33,7 @@
#include <m_ctype.h>
#include <nisam.h>
#include "sql_select.h"
+#include <assert.h>
#ifndef EXTRA_DEBUG
@@ -289,7 +290,6 @@ typedef struct st_qsel_param {
max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
} PARAM;
-
static SEL_TREE * get_mm_parts(PARAM *param,Field *field,
Item_func::Functype type,Item *value,
Item_result cmp_type);
@@ -382,7 +382,7 @@ SQL_SELECT::~SQL_SELECT()
#undef index // Fix for Unixware 7
QUICK_SELECT::QUICK_SELECT(TABLE *table,uint key_nr,bool no_alloc)
- :error(0),index(key_nr),max_used_key_length(0),head(table),
+ :dont_free(0),error(0),index(key_nr),max_used_key_length(0),head(table),
it(ranges),range(0)
{
if (!no_alloc)
@@ -399,8 +399,11 @@ QUICK_SELECT::QUICK_SELECT(TABLE *table,uint key_nr,bool no_alloc)
QUICK_SELECT::~QUICK_SELECT()
{
- file->index_end();
- free_root(&alloc,MYF(0));
+ if (!dont_free)
+ {
+ file->index_end();
+ free_root(&alloc,MYF(0));
+ }
}
int QUICK_SELECT::init()
@@ -2455,8 +2458,8 @@ int QUICK_SELECT::get_next()
if ((error=file->index_first(record)))
DBUG_RETURN(error); // Empty table
if (cmp_next(range) == 0)
- DBUG_RETURN(0); // No matching records
- range=0; // To next range
+ DBUG_RETURN(0);
+ range=0; // No matching records; go to next range
continue;
}
if ((result = file->index_read(record,(byte*) range->min_key,
@@ -2516,6 +2519,223 @@ int QUICK_SELECT::cmp_next(QUICK_RANGE *range)
return (range->flag & NEAR_MAX) ? 1 : 0; // Exact match
}
+
+/*
+ * This is a hack: we inherit from QUICK_SELECT so that we can use the
+ * get_next() interface, but we have to hold a pointer to the original
+ * QUICK_SELECT because its data are used all over the place. What
+ * should be done is to factor out the data that is needed into a base
+ * class (QUICK_SELECT), and then have two subclasses (_ASC and _DESC)
+ * which handle the ranges and implement the get_next() function. But
+ * for now, this seems to work right at least.
+ */
+
+QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts)
+ : QUICK_SELECT(*q), rev_it(rev_ranges)
+{
+ bool not_read_after_key = file->option_flag() & HA_NOT_READ_AFTER_KEY;
+ for (QUICK_RANGE *r = it++; r; r = it++)
+ {
+ rev_ranges.push_front(r);
+ if (not_read_after_key && range_reads_after_key(r) ||
+ test_if_null_range(r,used_key_parts))
+ {
+ it.rewind(); // Reset range
+ error = HA_ERR_UNSUPPORTED;
+ dont_free=1; // Don't free memory from 'q'
+ return;
+ }
+ }
+ /* Remove EQ_RANGE flag for keys that are not using the full key */
+ for (QUICK_RANGE *r = rev_it++; r; r = rev_it++)
+ {
+ if ((r->flag & EQ_RANGE) &&
+ head->key_info[index].key_length != r->max_length)
+ r->flag&= ~EQ_RANGE;
+ }
+ rev_it.rewind();
+ q->dont_free=1; // Don't free shared mem
+ delete q;
+}
+
+
+int QUICK_SELECT_DESC::get_next()
+{
+ DBUG_ENTER("QUICK_SELECT_DESC::get_next");
+
+ /* The max key is handled as follows:
+ * - if there is NO_MAX_RANGE, start at the end and move backwards
+ * - if it is an EQ_RANGE, which means that max key covers the entire
+ * key, go directly to the key and read through it (sorting backwards is
+ * same as sorting forwards)
+ * - if it is NEAR_MAX, go to the key or next, step back once, and
+ * move backwards
+ * - otherwise (not NEAR_MAX == include the key), go after the key,
+ * step back once, and move backwards
+ */
+
+ for (;;)
+ {
+ int result;
+ if (range)
+ { // Already read through key
+ result = ((range->flag & EQ_RANGE)
+ ? file->index_next_same(record, (byte*) range->min_key,
+ range->min_length) :
+ file->index_prev(record));
+ if (!result)
+ {
+ if (cmp_prev(*rev_it.ref()) == 0)
+ DBUG_RETURN(0);
+ }
+ else if (result != HA_ERR_END_OF_FILE)
+ DBUG_RETURN(result);
+ }
+
+ if (!(range=rev_it++))
+ DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used
+
+ if (range->flag & NO_MAX_RANGE) // Read last record
+ {
+ int error;
+ if ((error=file->index_last(record)))
+ DBUG_RETURN(error); // Empty table
+ if (cmp_prev(range) == 0)
+ DBUG_RETURN(0);
+ range=0; // No matching records; go to next range
+ continue;
+ }
+
+ if (range->flag & EQ_RANGE)
+ {
+ result = file->index_read(record, (byte*) range->max_key,
+ range->max_length, HA_READ_KEY_EXACT);
+ }
+ else
+ {
+ dbug_assert(range->flag & NEAR_MAX || range_reads_after_key(range));
+ /* Note: even if max_key is only a prefix, HA_READ_AFTER_KEY will
+ * do the right thing - go past all keys which match the prefix */
+ result=file->index_read(record, (byte*) range->max_key,
+ range->max_length,
+ ((range->flag & NEAR_MAX) ?
+ HA_READ_KEY_EXACT : HA_READ_AFTER_KEY));
+ result = file->index_prev(record);
+ }
+ if (result)
+ {
+ if (result != HA_ERR_KEY_NOT_FOUND)
+ DBUG_RETURN(result);
+ range=0; // Not found, to next range
+ continue;
+ }
+ if (cmp_prev(range) == 0)
+ {
+ if (range->flag == (UNIQUE_RANGE | EQ_RANGE))
+ range = 0; // Stop searching
+ DBUG_RETURN(0); // Found key is in range
+ }
+ range = 0; // To next range
+ }
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+/*
+ * Returns 0 if found key is inside range (found key >= range->min_key).
+ */
+int QUICK_SELECT_DESC::cmp_prev(QUICK_RANGE *range)
+{
+ if (range->flag & NO_MIN_RANGE)
+ return (0); /* key can't be to small */
+
+ KEY_PART *key_part = key_parts;
+ for (char *key = range->min_key, *end = key + range->min_length;
+ key < end;
+ key += key_part++->part_length)
+ {
+ int cmp;
+ if (key_part->null_bit)
+ {
+ // this key part allows null values; NULL is lower than everything else
+ if (*key++)
+ {
+ // the range is expecting a null value
+ if (!key_part->field->is_null())
+ return 0; // not null -- still inside the range
+ continue; // null -- exact match, go to next key part
+ }
+ else if (key_part->field->is_null())
+ return 1; // null -- outside the range
+ }
+ if ((cmp = key_part->field->key_cmp((byte*) key,
+ key_part->part_length)) > 0)
+ return 0;
+ if (cmp < 0)
+ return 1;
+ }
+ return (range->flag & NEAR_MIN) ? 1 : 0; // Exact match
+}
+
+/*
+ * True if this range will require using HA_READ_AFTER_KEY
+ See comment in get_next() about this
+ */
+
+bool QUICK_SELECT_DESC::range_reads_after_key(QUICK_RANGE *range)
+{
+ return ((range->flag & (NO_MAX_RANGE | NEAR_MAX)) ||
+ !(range->flag & EQ_RANGE) ||
+ head->key_info[index].key_length != range->max_length) ? 1 : 0;
+}
+
+/* True if we are reading over a key that may have a NULL value */
+
+bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range,
+ uint used_key_parts)
+{
+ uint offset,end;
+ KEY_PART *key_part = key_parts,
+ *key_part_end= key_part+used_key_parts;
+
+ for (offset= 0, end = min(range->min_length, range->max_length) ;
+ offset < end && key_part != key_part_end ;
+ offset += key_part++->part_length)
+ {
+ uint null_length=test(key_part->null_bit);
+ if (!memcmp((char*) range->min_key+offset, (char*) range->max_key+offset,
+ key_part->part_length + null_length))
+ {
+ offset+=null_length;
+ continue;
+ }
+ if (null_length && range->min_key[offset])
+ return 1; // min_key is null and max_key isn't
+ // Range doesn't cover NULL. This is ok if there is no more null parts
+ break;
+ }
+ /*
+ If the next min_range is > NULL, then we can use this, even if
+ it's a NULL key
+ Example: SELECT * FROM t1 WHERE a = 2 AND b >0 ORDER BY a DESC,b DESC;
+
+ */
+ if (key_part != key_part_end && key_part->null_bit)
+ {
+ if (offset >= range->min_length || range->min_key[offset])
+ return 1; // Could be null
+ key_part++;
+ }
+ /*
+ If any of the key parts used in the ORDER BY could be NULL, we can't
+ use the key to sort the data.
+ */
+ for (; key_part != key_part_end ; key_part++)
+ if (key_part->null_bit)
+ return 1; // Covers null part
+ return 0;
+}
+
+
/*****************************************************************************
** Print a quick range for debugging
** TODO:
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 247dd260817..50215b94be0 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -54,9 +54,10 @@ class QUICK_RANGE :public Sql_alloc {
{}
};
+
class QUICK_SELECT {
public:
- bool next;
+ bool next,dont_free;
int error;
uint index,max_used_key_length;
TABLE *head;
@@ -80,6 +81,21 @@ public:
bool unique_key_range();
};
+
+class QUICK_SELECT_DESC: public QUICK_SELECT
+{
+public:
+ QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts);
+ int get_next();
+private:
+ int cmp_prev(QUICK_RANGE *range);
+ bool range_reads_after_key(QUICK_RANGE *range);
+ bool test_if_null_range(QUICK_RANGE *range, uint used_key_parts);
+ void reset(void) { next=0; rev_it.rewind(); }
+ List<QUICK_RANGE> rev_ranges;
+ List_iterator<QUICK_RANGE> rev_it;
+};
+
class SQL_SELECT :public Sql_alloc {
public:
QUICK_SELECT *quick; // If quick-select used
diff --git a/sql/share/czech/errmsg.txt b/sql/share/czech/errmsg.txt
index 666d70c957a..a8d7c187ad3 100644
--- a/sql/share/czech/errmsg.txt
+++ b/sql/share/czech/errmsg.txt
@@ -215,3 +215,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/danish/errmsg.txt b/sql/share/danish/errmsg.txt
index 9f1f6accc1f..57a6ad3d13f 100644
--- a/sql/share/danish/errmsg.txt
+++ b/sql/share/danish/errmsg.txt
@@ -209,3 +209,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/dutch/errmsg.txt b/sql/share/dutch/errmsg.txt
index 8b44af7eb7b..b886ba43f6f 100644
--- a/sql/share/dutch/errmsg.txt
+++ b/sql/share/dutch/errmsg.txt
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt
index ff29fffe958..f0887f5b376 100644
--- a/sql/share/english/errmsg.txt
+++ b/sql/share/english/errmsg.txt
@@ -153,7 +153,7 @@
"You have an error in your SQL syntax",
"Delayed insert thread couldn't get requested lock for table %-.64s",
"Too many delayed threads in use",
-"Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)",
+"Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s) - see http://www.mysql.com/doc/C/o/Communication_errors.html",
"Got a packet bigger than 'max_allowed_packet'",
"Got a read error from the connection pipe",
"Got an error from fcntl()",
@@ -185,7 +185,7 @@
"Got error %d during ROLLBACK",
"Got error %d during FLUSH_LOGS",
"Got error %d during CHECKPOINT",
-"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)",
+"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s) - see http://www.mysql.com/doc/C/o/Communication_errors.html",
"The handler for the table does not support binary table dump",
"Binlog closed, cannot RESET MASTER",
"Failed rebuilding the index of dumped table '%-.64s'",
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/estonian/errmsg.txt b/sql/share/estonian/errmsg.txt
index e807c8d4a64..77a7d2f7841 100644
--- a/sql/share/estonian/errmsg.txt
+++ b/sql/share/estonian/errmsg.txt
@@ -210,3 +210,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/french/errmsg.txt b/sql/share/french/errmsg.txt
index 5cbcfe81b87..2e375bd5e15 100644
--- a/sql/share/french/errmsg.txt
+++ b/sql/share/french/errmsg.txt
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt
index 307ed7a00f4..85289b46967 100644
--- a/sql/share/german/errmsg.txt
+++ b/sql/share/german/errmsg.txt
@@ -2,7 +2,7 @@
This file is public domain and comes with NO WARRANTY of any kind
Dirk Munzinger (dmun@4t2.com)
- Version: 17.03.1999 */
+ Version: 07.06.2001 */
"hashchk",
"isamchk",
@@ -196,16 +196,24 @@
"Netzfehler beim Lesen vom Master",
"Netzfehler beim Schreiben zum Master",
"Kann keinen FULLTEXT-Index finden der der Spaltenliste entspricht",
-"Can't execute the given command because you have active locked tables or an active transaction",
-"Unknown system variable '%-.64'",
-"Table '%-.64s' is marked as crashed and should be repaired",
-"Table '%-.64s' is marked as crashed and last (automatic?) repair failed",
-"Warning: Some non-transactional changed tables couldn't be rolled back",
-"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again',
-"This operation cannot be performed with a running slave, run SLAVE STOP first",
-"This operation requires a running slave, configure slave and do SLAVE START",
-"The server is not configured as slave, fix in config file or with CHANGE MASTER TO",
-"Could not initialize master info structure, check permisions on master.info",
-"Could not create slave thread, check system resources",
-"User %-.64s has already more than 'max_user_connections' active connections",
-"You may only use constant expressions with SET",
+"Kann das aktuelle Kommando wegen aktiver Tabellensperre oder aktiver Transaktion nicht ausführen",
+"Unbekannte System-Variabel '%-.64'",
+"Tabelle '%-.64s' ist als defekt makiert und sollte repariert werden",
+"Tabelle '%-.64s' ist als defekt makiert und der letzte (automatische) Reparaturversuch schlug fehl.",
+"Warnung: Das Rollback konnte bei einigen Tabellen, die nicht mittels Transaktionen geändert wurden, nicht ausgeführt werden.",
+"Multi-Statement Transaktionen benötigen mehr als 'max_binlog_cache_size' Bytes An Speicher. Diese mysqld-Variabel vergrössern und nochmal versuchen.',
+"Diese Operation kann nicht bei einem aktiven Slave durchgeführt werden. Das Kommand SLAVE STOP muss zuerst ausgeführt werden.",
+"Diese Operationbenötigt einen aktiven Slave. Slave konfigurieren und mittels SLAVE START aktivieren.",
+"Der Server ist nicht als Slave konfigiriert. Im Konfigurations-File oder mittel CHANGE MASTER TO beheben.",
+"Konnte Master-Info-Struktur nicht initialisieren; Berechtigungen von master.info prüfen.",
+"Konnte keinen Slave-Thread starten. System-Resourcen überprüfen.",
+"Benutzer %-.64s hat mehr als 'max_user_connections' aktive Verbindungen",
+"Bei der Verwendung mit SET dürfen nur Constante Ausdrücke verwendet werden",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/greek/errmsg.txt b/sql/share/greek/errmsg.txt
index 119de63b2a7..bdae260f2f8 100644
--- a/sql/share/greek/errmsg.txt
+++ b/sql/share/greek/errmsg.txt
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/hungarian/errmsg.txt b/sql/share/hungarian/errmsg.txt
index 7e9b9e6a3bf..f2d45b94b50 100644
--- a/sql/share/hungarian/errmsg.txt
+++ b/sql/share/hungarian/errmsg.txt
@@ -208,3 +208,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/italian/errmsg.txt b/sql/share/italian/errmsg.txt
index d6c857d44a4..a46e712a6e6 100644
--- a/sql/share/italian/errmsg.txt
+++ b/sql/share/italian/errmsg.txt
@@ -199,10 +199,18 @@
"La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita",
"Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)",
"La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare',
-"This operation cannot be performed with a running slave, run SLAVE STOP first",
-"This operation requires a running slave, configure slave and do SLAVE START",
-"The server is not configured as slave, fix in config file or with CHANGE MASTER TO",
-"Could not initialize master info structure, check permisions on master.info",
-"Could not create slave thread, check system resources",
-"User %-.64s has already more than 'max_user_connections' active connections",
-"You may only use constant expressions with SET",
+"Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima SLAVE STOP",
+"Questa operaione richiede un database 'slave', configurarlo ed eseguire SLAVE START",
+"Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO",
+"Impossibile inizializzare la struttura 'master info', controllare i permessi sul file master.info",
+"Impossibile creare il thread 'slave', controllare le risorse di sistema",
+"L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive",
+"Si possono usare solo espressioni costanti con SET",
+"E' scaduto il timeout per l'attesa del lock",
+"Il numero totale di lock e' maggiore della grandezza della tabella di lock",
+"I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt
index a62f22c253d..6d5ab99f86d 100644
--- a/sql/share/japanese/errmsg.txt
+++ b/sql/share/japanese/errmsg.txt
@@ -208,3 +208,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/korean/errmsg.txt b/sql/share/korean/errmsg.txt
index c476ad8fa3c..5fa44f581bf 100644
--- a/sql/share/korean/errmsg.txt
+++ b/sql/share/korean/errmsg.txt
@@ -206,3 +206,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/norwegian-ny/errmsg.txt b/sql/share/norwegian-ny/errmsg.txt
index 2a57c93cc84..f45daa00449 100644
--- a/sql/share/norwegian-ny/errmsg.txt
+++ b/sql/share/norwegian-ny/errmsg.txt
@@ -208,3 +208,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/norwegian/errmsg.txt b/sql/share/norwegian/errmsg.txt
index cf23991eefa..951631cae75 100644
--- a/sql/share/norwegian/errmsg.txt
+++ b/sql/share/norwegian/errmsg.txt
@@ -208,3 +208,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/polish/errmsg.txt b/sql/share/polish/errmsg.txt
index 03e9d59dacd..79b420022bf 100644
--- a/sql/share/polish/errmsg.txt
+++ b/sql/share/polish/errmsg.txt
@@ -210,3 +210,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/portuguese/errmsg.txt b/sql/share/portuguese/errmsg.txt
index 37f2bf9e7ac..cd3e948546e 100644
--- a/sql/share/portuguese/errmsg.txt
+++ b/sql/share/portuguese/errmsg.txt
@@ -1,208 +1,216 @@
/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB
This file is public domain and comes with NO WARRANTY of any kind */
-
+/* Updated by Roberto M. Serqueira - martinsc@uol.com.br - 05.24.2001 */
"hashchk",
"isamchk",
-"NO",
-"YES",
-"Nao consegui criar o arquivo '%-.64s' (Erro: %d)",
-"Nao consegui criar a tabela '%-.64s' (Erro: %d)",
-"Nao consegui criar o banco de dados '%-.64s'. Erro %d",
-"Nao consegui criar o banco de dados '%-.64s'. Este banco ja existe",
-"Nao consegui deletar o banco de dados '%-.64s'. Este banco nao existe",
-"Erro deletando o banco de dados(Nao foi possivel deletar '%-.64s', erro %d)",
-"Erro deletando o banco de dados(Nao foi possivel remover o diretorio '%-.64s', erro %d)",
-"Erro ao deletar '%-.64s' (Erro: %d)",
-"Nao foi possivel ler o registro na tabela do sistema",
-"Nao foi possivel obter o status de '%-.64s' (Erro: %d)",
-"Nao foi possivel obter o diretorio corrente (Erro: %d)",
-"Nao foi possivel travar o arquivo (Erro: %d)",
-"Nao foi possivel abrir arquivo: '%-.64s'. (Erro: %d)",
-"Nao foi possivel encontrar arquivo: '%-.64s' (Erro: %d)",
-"Nao foi possivel ler o diretorio de '%-.64s' (Erro: %d)",
-"Nao foi possivel ir para o diretorio '%-.64s' (Erro: %d)",
-"Registro alterado apos a ultima leitura da tabela '%-.64s'",
-"Disco cheio (%s). Aguardando espaco livre....",
-"Nao foi possivel gravar, chave duplicada na tabela '%-.64s'",
-"Erro ao fechar '%-.64s' (Erro: %d)",
-"Erro lendo arquivo '%-.64s' (Erro: %d)",
-"Erro ao renomear '%-.64s' to '%-.64s' (Erro: %d)",
-"Error gravando arquivo '%-.64s' (Erro: %d)",
-"'%-.64s' esta travado contra alteracoes",
-"Ordenacao cancelada",
-"Visao '%-.64s' nao existe para '%-.64s'",
-"Erro %d do manipulador de tabelas",
-"Manipulador da tabela '%-.64s' nao suporta esta opcao",
-"Nao foi possivel encontrar o registro em '%-.64s'",
-"Informacao invalida no arquivo: '%-.64s'",
-"Arquivo de indice invalido na tabela: '%-.64s'. Tente conserta-lo!",
-"Arquivo de indice destaualizado na tabela '%-.64s'; Conserte-o!",
-"'%-.64s' esta disponivel somente para leitura",
-"Sem memoria. Renicie o programa e tente novamente (Necessita de %d bytes)",
-"Sem memoria para ordenacao. Aumente o espaco de memoria para ordenacao.",
-"Fim de arquivo inesperado enquanto lendo o arquivo '%-.64s' (Erro: %d)",
-"Excesso de conexoes",
-"Thread sem memoria disponivel",
-"Nao foi possivel obter o nome da maquina para este endereco IP",
-"Comunicacao invalida",
-"Acesso negado ao usuario : '%-.32s@%-.64s' ao banco de dados '%-.64s'",
-"Acesso negado ao usuario: '%-.32s@%-.64s' (usando a senha: %s)",
-"Nenhum banco de dados selecionado",
+"não",
+"sim",
+"Não pode criar arquivo '%-.64s' (erro no. %d)",
+"Não pode criar tabela '%-.64s' (erro no. %d)",
+"Não pode criar banco de dados '%-.64s' (erro no. %d)",
+"Não pode criar banco de dados '%-.64s'. Banco de dados já existe",
+"Não pode eliminar banco de dados '%-.64s'. Banco de dados não existe",
+"Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)",
+"Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)",
+"Erro na deleção de '%-.64s' (erro no. %d)",
+"Não pode ler registro em tabela do sistema",
+"Não pode obter status de '%-.64s' (erro no. %d)",
+"Não pode obter diretório corrente (erro no. %d)",
+"Não pode travar arquivo (erro no. %d)",
+"Não pode abrir arquivo '%-.64s' (erro no. %d)",
+"Não pode encontrar arquivo '%-.64s' (erro no. %d)",
+"Não pode ler diretório de '%-.64s' (erro no. %d)",
+"Não pode mudar para o diretório '%-.64s' (erro no. %d)",
+"Registro alterado desde a última leitura da tabela '%-.64s'",
+"Disco cheio (%s). Aguardando alguém liberar algum espaço....",
+"Não pode gravar. Chave duplicada na tabela '%-.64s'",
+"Erro ao fechar '%-.64s' (erro no. %d)",
+"Erro ao ler arquivo '%-.64s' (erro no. %d)",
+"Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)",
+"Erro ao gravar arquivo '%-.64s' (erro no. %d)",
+"'%-.64s' está com travamento contra alterações",
+"Ordenação abortada",
+"'View' '%-.64s' não existe para '%-.64s'",
+"Obteve erro %d no manipulador de tabelas",
+"Manipulador de tabela para '%-.64s' não tem esta opção",
+"Não pode encontrar registro em '%-.64s'",
+"Informação incorreta no arquivo '%-.64s'",
+"Arquivo chave incorreto para tabela '%-.64s'. Tente reparar",
+"Arquivo chave desatualizado para tabela '%-.64s'. Repare-o!",
+"Tabela '%-.64s' é somente para leitura",
+"Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)",
+"Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação",
+"Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)",
+"Excesso de conexões",
+"Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou se você pode adicionar mais área de 'swap'",
+"Não pode obter nome do 'host' para seu endereço",
+"Negociação de acesso falhou",
+"Acesso negado para o usuário '%-.32s@%-.64s' ao banco de dados '%-.64s'",
+"Acesso negado para o usuário '%-.32s@%-.64s' (uso de senha: %s)",
+"Nenhum banco de dados foi selecionado",
"Comando desconhecido",
-"Coluna '%-.64s' nao pode ser vazia",
+"Coluna '%-.64s' não pode ter NULL",
"Banco de dados '%-.64s' desconhecido",
-"Tabela '%-.64s' ja existe",
+"Tabela '%-.64s' já existe",
"Tabela '%-.64s' desconhecida",
-"Coluna: '%-.64s' em %s e ambigua",
-"Finalizacao do servidor em andamento",
-"Coluna '%-.64s' desconhecida em %s",
-"'%-.64s' utilizado nao esta em 'group by'",
-"Nao foi possivel agrupar em '%-.64s'",
-"Clausula contem funcoes de soma e colunas juntos",
-"Contagem de colunas nao confere com a contagem de valores",
-"Nome do identificador '%-.64s' muito grande",
+"Coluna '%-.64s' em '%-.64s' é ambígua",
+"'Shutdown' do servidor em andamento",
+"Coluna '%-.64s' desconhecida em '%-.64s'",
+"'%-.64s' não está em 'GROUP BY'",
+"Não pode agrupar em '%-.64s'",
+"Cláusula contém funções de soma e colunas juntos",
+"Contagem de colunas não confere com a contagem de valores",
+"Nome identificador '%-.100s' é longo demais",
"Nome da coluna '%-.64s' duplicado",
"Nome da chave '%-.64s' duplicado",
-"Inclusao de '%-.64s' duplicada para a chave %d",
-"Especificador de coluna invalido para a coluna '%-.64s'",
-"%s proximo de '%-.64s' a linha %d",
-"Selecao vazia",
-"Tabela/alias nao e unica: '%-.64s'",
-"Valor padrao invalido para '%-.64s'",
-"Mais de uma chave primaria definida",
-"Muitas chaves definidas. O maximo permitido sao %d chaves",
-"Muitas partes de chave definidas. O maximo permitido sao %d partes",
-"Chave especificada e muito longa. O comprimento maximo permitido e %d",
-"Coluna chave '%-.64s' nao existe na tabela",
-"Coluna binaria '%-.64s' nao pode ser utilizada na definicao de chaves",
-"Comprimento da coluna '%-.64s' muito grande(max = %d). Utilize o campo binario",
-"Somente e permitido um campo auto incrementado, e ele deve ser chave da tabela",
-"%s: pronto para conexoes\n",
-"%s: Finalizacao concluida normalmente\n",
-"%s: Recebeu o sinal %d. Cancelando!\n",
-"%s: Finalizacao concluida\n",
-"%s: Forcando a finalizacao da tarefa %ld usuario: '%-.64s'\n",
-"Nao foi possivel criar o socket IP",
-"Tabela '%-.64s' nao possui um indice criado por CREATE INDEX. Recrie a tabela",
-"O separador de campos nao esta conforme esperado. Confira no manual",
-"Nao e possivel utilizar comprimento de linha fixo com campos binarios. Favor usar 'fields terminated by'.",
-"O arquivo '%-.64s' precisa estar no diretorio do banco de dados, e sua leitura permitida a todos",
-"Arquivo '%-.64s' ja existe",
-"Registros: %ld Apagados: %ld Ignorados: %ld Avisos: %ld",
-"Registros: %ld Duplicados: %ld",
-"Parte da chave errada. A parte utilizada nao e um texto ou tem comprimento maior que o definido",
-"Nao e possivel retirar todas as colunas da tabela com ALTER TABLE. Use DROP TABLE",
-"Nao foi possivel DROP '%-.64s'. Confira se este campo/chave existe",
-"Registros: %ld Duplicados: %ld Avisos: %ld",
-"INSERT TABLE '%-.64s' nao e permitido em FROM lista de tabelas",
-"Tarefa desconhecida id: %lu",
-"Voce nao e o responsavel pela tarefa %lu",
-"Nenhuma tabela em uso",
-"Muitos textos para a coluna %s e SET",
-"Nao foi possivel um unico nome para o arquivo %s.(1-999)\n",
-"Tabela '%-.64s' esta travada para leitura, e nao pode ser atualizada",
-"Tabela '%-.64s' nao foi travada com LOCK TABLES",
-"Campo binario '%-.64s' nao pode ter um valor inicial",
-"Nome de banco de dados invalido: '%-.64s'",
-"Nome de tabela invalido: '%-.64s'",
-"O SELECT muitos registros, e possivelmente vai demorar. Confira sua clausula WHERE e utilize SET OPTION SQL_BIG_SELECTS=1 se o SELECT esta correto",
+"Entrada '%-.64s' duplicada para a chave %d",
+"Especificador de coluna incorreto para a coluna '%-.64s'",
+"%s próximo a '%-.80s' na linha %d",
+"'Query' estava vazia",
+"Tabela/alias '%-.64s' não única",
+"Valor 'default' inválido para '%-.64s'",
+"Definida mais de uma chave primária",
+"Especificadas chaves demais. O máximo permitido são %d chaves",
+"Especificadas partes de chave demais. O máximo permitido são %d partes",
+"Chave especificada longa demais. O comprimento máximo permitido é %d",
+"Coluna chave '%-.64s' não existe na tabela",
+"Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado",
+"Comprimento da coluna '%-.64s' grande demais (max = %d). Use BLOB em seu lugar",
+"Definição incorreta de tabela. Somente é permitido um campo auto-incrementado e ele tem que ser definido como chave",
+"%s: Pronto para conexões\n",
+"%s: 'Shutdown' normal\n",
+"%s: Obteve sinal %d. Abortando!\n",
+"%s: 'Shutdown' completo\n",
+"%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n",
+"Não pode criar 'socket' IP",
+"Tabela '%-.64s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela",
+"Argumento separador de campos não é o esperado. Confira no manual",
+"Você não pode usar comprimento de linha fixo com BLOBs. Favor usar 'fields terminated by'",
+"Arquivo '%-.64s' tem que estar no diretório do banco de dados ou ter leitura permitida para todos",
+"Arquivo '%-.80s' já existe",
+"Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld",
+"Registros: %ld - Duplicados: %ld",
+"Parte de chave incorreta. A parte de chave usada não é um 'string' ou o comprimento usado é maior do que a parte de chave",
+"Você não pode deletar todas as colunas com ALTER TABLE. Use DROP TABLE em seu lugar",
+"Não pode fazer DROP '%-.64s'. Confira se este campo/chave existe",
+"Registros: %ld - Duplicados: %ld - Avisos: %ld",
+"INSERT TABLE '%-.64s' não é permitido em lista de tabelas FROM",
+"'Id' de 'thread' %lu desconhecido",
+"Você não é proprietário da 'thread' %lu",
+"Nenhuma tabela usada",
+"'Strings' demais para coluna '%-.64s' e SET",
+"Não pode gerar um nome de arquivo de 'log' único '%-.64s'.(1-999)\n",
+"Tabela '%-.64s' foi travada com trava de READ e não pode ser atualizada",
+"Tabela '%-.64s' não foi travada com LOCK TABLES",
+"Coluna BLOB '%-.64s' não pode ter um valor 'default'",
+"Nome de banco de dados '%-.100s' incorreto",
+"Nome de tabela '%-.100s' incorreto",
+"O SELECT examinaria registros demais e provavelmente tomaria um tempo muito longo. Confira sua cláusula WHERE e use SET OPTION SQL_BIG_SELECTS=1, se o SELECT estiver correto",
"Erro desconhecido",
-"Procedimento %s desconhecido",
-"Numero de parametros para o procedimento %s esta incorreto",
-"Parametro incorreto para o procedimento %s",
-"Tabela '%-.64s' descohecida em %s",
-"Campo '%-.64s' definido em duplicidade",
-"Invalid use of group function",
-"Table '%-.64s' uses a extension that doesn't exist in this MySQL version",
-"A table must have at least 1 column",
-"The table '%-.64s' is full",
-"Unknown character set: '%-.64s'",
-"Too many tables. MySQL can only use %d tables in a join",
-"Too many fields",
-"Too big row size. The maximum row size, not counting blobs, is %d. You have to change some fields to blobs",
-"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed",
-"Cross dependency found in OUTER JOIN. Examine your ON conditions",
-"Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL",
-"Can't load function '%-.64s'",
-"Can't initialize function '%-.64s'; %-.80s",
-"No paths allowed for shared library",
-"Function '%-.64s' already exist",
-"Can't open shared library '%-.64s' (errno: %d %s)",
-"Can't find function '%-.64s' in library'",
-"Function '%-.64s' is not defined",
-"Host '%-.64s' is blocked because of many connection errors. Unblock with 'mysqladmin flush-hosts'",
-"Host '%-.64s' is not allowed to connect to this MySQL server",
-"You are using MySQL as an anonymous users and anonymous users are not allowed to change passwords",
-"You must have privileges to update tables in the mysql database to be able to change passwords for others",
-"Can't find any matching row in the user table",
-"Rows matched: %ld Changed: %ld Warnings: %ld",
-"Can't create a new thread (errno %d). If you are not out of available memory you can consult the manual for any possible OS dependent bug",
-"Column count doesn't match value count at row %ld",
-"Can't reopen table: '%-.64s',
-"Invalid use of NULL value",
-"Got error '%-.64s' from regexp",
-"Mixing of GROUP columns (MIN(),MAX(),COUNT()...) with no GROUP columns is illegal if there is no GROUP BY clause",
-"There is no such grant defined for user '%-.32s' on host '%-.64s'",
-"%-.16s command denied to user: '%-.32s@%-.64s' for table '%-.64s'",
-"%-.16s command denied to user: '%-.32s@%-.64s' for column '%-.64s' in table '%-.64s'",
-"Illegal GRANT/REVOKE command. Please consult the manual which privleges can be used.",
-"The host or user argument to GRANT is too long",
-"Table '%-64s.%s' doesn't exist",
-"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'",
-"The used command is not allowed with this MySQL version",
-"Something is wrong in your syntax",
-"Delayed insert thread couldn't get requested lock for table %-.64s",
-"Too many delayed threads in use",
-"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)",
-"Got a packet bigger than 'max_allowed_packet'",
-"Got a read error from the connection pipe",
-"Got an error from fcntl()",
-"Got packets out of order",
-"Couldn't uncompress communication packet",
-"Got an error reading communication packets"
-"Got timeout reading communication packets",
-"Got an error writing communication packets",
-"Got timeout writing communication packets",
-"Result string is longer than max_allowed_packet",
-"The used table type doesn't support BLOB/TEXT columns",
-"The used table type doesn't support AUTO_INCREMENT columns",
-"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES",
-"Incorrect column name '%-.100s'",
-"The used table handler can't index column '%-.64s'",
-"All tables in the MERGE table are not defined identically",
-"Can't write, because of unique constraint, to table '%-.64s'",
-"BLOB column '%-.64s' used in key specification without a key length",
-"All parts of a PRIMARY KEY must be NOT NULL; If you need NULL in a key, use UNIQUE instead",
-"Result consisted of more than one row",
-"This table type requires a primary key",
-"This version of MySQL is not compiled with RAID support",
-"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column",
-"Key '%-.64s' doesn't exist in table '%-.64s'",
-"Can't open table",
-"The handler for the table doesn't support check/repair",
-"You are not allowed to execute this command in a transaction",
-"Got error %d during COMMIT",
-"Got error %d during ROLLBACK",
-"Got error %d during FLUSH_LOGS",
-"Got error %d during CHECKPOINT",
-"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)",
-"The handler for the table does not support binary table dump",
-"Binlog closed while trying to FLUSH MASTER",
-"Failed rebuilding the index of dumped table '%-.64s'",
-"Error from master: '%-.64s'",
-"Net error reading from master",
-"Net error writing to master",
-"Can't find FULLTEXT index matching the column list",
-"Can't execute the given command because you have active locked tables or an active transaction",
-"Unknown system variable '%-.64'",
-"Table '%-.64s' is marked as crashed and should be repaired",
-"Table '%-.64s' is marked as crashed and last (automatic?) repair failed",
-"Warning: Some non-transactional changed tables couldn't be rolled back",
-"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage. Increase this mysqld variable and try again',
-"This operation cannot be performed with a running slave, run SLAVE STOP first",
-"This operation requires a running slave, configure slave and do SLAVE START",
-"The server is not configured as slave, fix in config file or with CHANGE MASTER TO",
-"Could not initialize master info structure, check permisions on master.info",
-"Could not create slave thread, check system resources",
-"User %-.64s has already more than 'max_user_connections' active connections",
-"You may only use constant expressions with SET",
+"'Procedure' '%-.64s' desconhecida",
+"Número de parâmetros incorreto para a 'procedure' '%-.64s'",
+"Parâmetros incorretos para a 'procedure' '%-.64s'",
+"Tabela '%-.64s' desconhecida em '%-.32s'",
+"Coluna '%-.64s' especificada duas vezes",
+"Uso inválido da função GROUP",
+"Tabela '%-.64s' usa uma extensão que não existe nesta versão do MySQL",
+"Uma tabela tem que ter pelo menos uma (1) coluna",
+"Tabela '%-.64s' está cheia",
+"Conjunto de caracteres '%-.64s' desconhecido",
+"Tabelas demais. O MySQL pode usar somente %d tabelas em um JOIN",
+"Colunas demais",
+"Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é de %d. Você tem que mudar alguns campos para BLOBs",
+"Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld . Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário",
+"Dependência cruzada encontrada em OUTER JOIN. Examine suas condições ON",
+"Coluna '%-.64s' é usada com UNIQUE ou INDEX, mas não está definida como NOT NULL",
+"Não pode carregar a função '%-.64s'",
+"Não pode inicializar a função '%-.64s' - '%-.80s'",
+"Não é permitido caminho para biblioteca compartilhada",
+"Função '%-.64s' já existe",
+"Não pode abrir biblioteca compartilhada '%-.64s' (erro no. '%d' - '%-.64s')",
+"Não pode encontrar a função '%-.64s' na biblioteca",
+"Função '%-.64s' não está definida",
+"'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'",
+"'Host' '%-.64s' não tem permissão para se conectar com este servidor MySQL",
+"Você está usando o MySQL como usuário anônimo e usuários anônimos não têm permissão para mudar senhas",
+"Você tem que ter o privilégio para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros",
+"Não pode encontrar nenhuma linha que combine na tabela user",
+"Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld",
+"Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre uma possível falha dependente do sistema operacional",
+"Contagem de colunas não confere com a contagem de valores na linha %ld",
+"Não pode reabrir a tabela '%-.64s',
+"Uso inválido do valor NULL",
+"Obteve erro '%-.64s' em regexp",
+"Mistura de colunas GROUP (MIN(),MAX(),COUNT()...) com colunas não GROUP é ilegal, se não existir cláusula GROUP BY",
+"Não existe tal 'grant' definido para o usuário '%-.32s' no 'host' '%-.64s'",
+"Comando '%-.16s' negado para o usuário '%-.32s@%-.64s' na tabela '%-.64s'",
+"Comando '%-.16s' negado para o usuário '%-.32s@%-.64s' na coluna '%-.64s', na tabela '%-.64s'",
+"Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados.",
+"Argumento de 'host' ou de usuário para o GRANT é longo demais",
+"Tabela '%-.64s.%-.64s' não existe",
+"Não existe tal 'grant' definido para o usuário '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'",
+"Comando usado não é permitido para esta versão do MySQL",
+"Você tem um erro de sintaxe no seu SQL",
+"'Thread' de inserção retardada ('delayed') não conseguiu obter trava solicitada na tabela '%-.64s'",
+"Excesso de 'threads' retardadas ('delayed') em uso",
+"Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)",
+"Obteve um pacote maior do que 'max_allowed_packet'",
+"Obteve um erro de leitura no 'pipe' de conexão",
+"Obteve um erro em fcntl()",
+"Obteve pacotes fora de ordem",
+"Não conseguiu descomprimir pacote de comunicação",
+"Obteve um erro na leitura de pacotes de comunicação",
+"Obteve expiração de tempo ('timeout') na leitura de pacotes de comunicação",
+"Obteve um erro na gravação de pacotes de comunicação",
+"Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação",
+"'String' resultante é mais longa do que 'max_allowed_packet'",
+"Tipo de tabela usado não permite colunas BLOB/TEXT",
+"Tipo de tabela usado não permite colunas AUTO_INCREMENT",
+"INSERT DELAYED não pode ser usado com a tabela '%-.64s', porque está travada com LOCK TABLES",
+"Nome de coluna '%-.100s' incorreto",
+"O manipulador de tabela usado não pode indexar a coluna '%-.64s'",
+"Tabelas no MERGE não estão todas definidas identicamente",
+"Não pode gravar, devido à restrição UNIQUE, na tabela '%-.64s'",
+"Coluna BLOB '%-.64s' usada na especificação de chave sem o comprimento da chave",
+"Todas as partes de uma PRIMARY KEY têm que ser NOT NULL. Se você precisar de NULL em uma chave, use UNIQUE em seu lugar",
+"O resultado consistiu em mais do que uma linha",
+"Este tipo de tabela requer uma chave primária",
+"Esta versão do MySQL não foi compilada com suporte a RAID",
+"Você está usando modo de atualização seguro e tentou atualizar uma tabela sem um WHERE que use uma coluna tipo KEY",
+"Chave '%-.64s' não existe na tabela '%-.64s'",
+"Não pode abrir a tabela",
+"O manipulador de tabela não suporta check/repair",
+"Não lhe é permitido executar este comando em uma 'transaction'",
+"Obteve erro %d durante COMMIT",
+"Obteve erro %d durante ROLLBACK",
+"Obteve erro %d durante FLUSH_LOGS",
+"Obteve erro %d durante CHECKPOINT",
+"Conexão %ld abortada ao banco de dados '%-.64s' - usuário '%-.32s' - 'host' `%-.64s' ('%-.64s')",
+"O manipulador de tabela não suporta DUMP binário de tabela",
+"Binlog fechado. Não pode fazer RESET MASTER",
+"Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'",
+"Erro no 'master' '%-.64s'",
+"Erro de rede na leitura do 'master'",
+"Erro de rede na gravação do 'master'",
+"Não pode encontrar índice FULLTEXT que combine com a lista de colunas",
+"Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma 'transaction' ativa",
+"Variável de sistema '%-.64' desconhecida",
+"Tabela '%-.64s' está marcada como danificada e deve ser reparada",
+"Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou",
+"Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas ('rolled back')",
+"'Multi-statement transaction' requereu mais do que 'max_binlog_cache_size' bytes de armazenagem. Aumente o valor desta variável do mysqld e tente novamente',
+"Esta operação não pode ser realizada com um 'slave' em execução. Execute SLAVE STOP primeiro",
+"Esta operação requer um 'slave' em execução. Configure o 'slave' e execute SLAVE START",
+"O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO",
+"Não pode inicializar a estrutura de informação do 'master'. Verifique as permissões em 'master.info'",
+"Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema",
+"Usuário '%-.64s' já possui 'max_user_connections' conexões ativas",
+"Você pode usar apenas expressões de constante com SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/romanian/errmsg.txt b/sql/share/romanian/errmsg.txt
index 6bc2695bed5..05362606c44 100644
--- a/sql/share/romanian/errmsg.txt
+++ b/sql/share/romanian/errmsg.txt
@@ -210,3 +210,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/russian/errmsg.txt b/sql/share/russian/errmsg.txt
index 75d21dda888..64adb134c34 100644
--- a/sql/share/russian/errmsg.txt
+++ b/sql/share/russian/errmsg.txt
@@ -209,3 +209,11 @@
"îÅ ÍÏÇÕ ÓÏÚÄÁÔØ ÐÒÏÃÅÓÓ SLAVE, ÐÒÏ×ÅÒØÔÅ ÓÉÓÔÅÍÎÙÅ ÒÅÓÕÒÓÙ",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/slovak/errmsg.txt b/sql/share/slovak/errmsg.txt
index 673499f5105..f951e8f9435 100644
--- a/sql/share/slovak/errmsg.txt
+++ b/sql/share/slovak/errmsg.txt
@@ -214,3 +214,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/spanish/errmsg.txt b/sql/share/spanish/errmsg.txt
index d470556fd58..3088c9b4ee1 100644
--- a/sql/share/spanish/errmsg.txt
+++ b/sql/share/spanish/errmsg.txt
@@ -207,3 +207,11 @@
"Could not create slave thread, check system resources",
"User %-.64s has already more than 'max_user_connections' active connections",
"You may only use constant expressions with SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/share/swedish/errmsg.OLD b/sql/share/swedish/errmsg.OLD
index 672ce97c575..227a02ac873 100644
--- a/sql/share/swedish/errmsg.OLD
+++ b/sql/share/swedish/errmsg.OLD
@@ -206,3 +206,8 @@
"Kunde inte starta en tråd för replikering",
"Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar",
"Du kan endast använda konstant-uttryck med SET",
+"Tiden att få ett lås var för lång",
+"Antal lås är större än vad som ryms i lock tabellen",
+"Du kan inte låsa tabeller/poster under READ UNCOMMITTED",
+"Fick fel vid inloggning till master: %-.128s",
+"Fick fel vid exekvering av fråga på master: %-.128s",
diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt
index 672ce97c575..b8ee9e62b03 100644
--- a/sql/share/swedish/errmsg.txt
+++ b/sql/share/swedish/errmsg.txt
@@ -206,3 +206,11 @@
"Kunde inte starta en tråd för replikering",
"Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar",
"Du kan endast använda konstant-uttryck med SET",
+"Lock wait timeout exceeded",
+"The total number of locks exceeds the lock table size",
+"Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+"DROP DATABASE not allowed while thread is holding global read lock",
+"CREATE DATABASE not allowed while thread is holding global read lock",
+"Error connecting to master: %-.128s",
+"Error running query on master: %-.128s",
+"Error in SHOW BINLOG EVENTS: %-.128s",
diff --git a/sql/slave.cc b/sql/slave.cc
index 6b9c376a625..e8ffb15110b 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -20,6 +20,7 @@
#include <myisam.h>
#include "mini_client.h"
#include "slave.h"
+#include "sql_repl.h"
#include <thr_alarm.h>
#include <my_dir.h>
@@ -55,7 +56,7 @@ static int init_slave_thread(THD* thd);
static int safe_connect(THD* thd, MYSQL* mysql, MASTER_INFO* mi);
static int safe_reconnect(THD* thd, MYSQL* mysql, MASTER_INFO* mi);
static int safe_sleep(THD* thd, int sec);
-static int request_table_dump(MYSQL* mysql, char* db, char* table);
+static int request_table_dump(MYSQL* mysql, const char* db, const char* table);
static int create_table_from_dump(THD* thd, NET* net, const char* db,
const char* table_name);
inline char* rewrite_db(char* db);
@@ -314,28 +315,31 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db,
const char* table_name)
{
uint packet_len = my_net_read(net); // read create table statement
+ Vio* save_vio;
+ HA_CHECK_OPT check_opt;
TABLE_LIST tables;
- int error = 0;
+ int error= 1;
+ handler *file;
- if(packet_len == packet_error)
- {
- send_error(&thd->net, ER_MASTER_NET_READ);
- return 1;
- }
- if(net->read_pos[0] == 255) // error from master
- {
- net->read_pos[packet_len] = 0;
- net_printf(&thd->net, ER_MASTER, net->read_pos + 3);
- return 1;
- }
+ if (packet_len == packet_error)
+ {
+ send_error(&thd->net, ER_MASTER_NET_READ);
+ return 1;
+ }
+ if (net->read_pos[0] == 255) // error from master
+ {
+ net->read_pos[packet_len] = 0;
+ net_printf(&thd->net, ER_MASTER, net->read_pos + 3);
+ return 1;
+ }
thd->command = COM_TABLE_DUMP;
thd->query = sql_alloc(packet_len + 1);
- if(!thd->query)
- {
- sql_print_error("create_table_from_dump: out of memory");
- net_printf(&thd->net, ER_GET_ERRNO, "Out of memory");
- return 1;
- }
+ if (!thd->query)
+ {
+ sql_print_error("create_table_from_dump: out of memory");
+ net_printf(&thd->net, ER_GET_ERRNO, "Out of memory");
+ return 1;
+ }
memcpy(thd->query, net->read_pos, packet_len);
thd->query[packet_len] = 0;
thd->current_tablenr = 0;
@@ -344,15 +348,12 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db,
thd->proc_info = "Creating table from master dump";
// save old db in case we are creating in a different database
char* save_db = thd->db;
- thd->db = thd->last_nx_db;
+ thd->db = (char*)db;
mysql_parse(thd, thd->query, packet_len); // run create table
- thd->db = save_db; // leave things the way the were before
+ thd->db = save_db; // leave things the way the were before
- if(thd->query_error)
- {
- close_thread_tables(thd); // mysql_parse takes care of the error send
- return 1;
- }
+ if (thd->query_error)
+ goto err; // mysql_parse took care of the error send
bzero((char*) &tables,sizeof(tables));
tables.db = (char*)db;
@@ -361,83 +362,90 @@ static int create_table_from_dump(THD* thd, NET* net, const char* db,
thd->proc_info = "Opening master dump table";
if (!open_ltable(thd, &tables, TL_WRITE))
{
- // open tables will send the error
+ send_error(&thd->net,0,0); // Send error from open_ltable
sql_print_error("create_table_from_dump: could not open created table");
- close_thread_tables(thd);
- return 1;
+ goto err;
}
- handler *file = tables.table->file;
+ file = tables.table->file;
thd->proc_info = "Reading master dump table data";
if (file->net_read_dump(net))
{
net_printf(&thd->net, ER_MASTER_NET_READ);
sql_print_error("create_table_from_dump::failed in\
handler::net_read_dump()");
- close_thread_tables(thd);
- return 1;
+ goto err;
}
- HA_CHECK_OPT check_opt;
check_opt.init();
check_opt.flags|= T_VERY_SILENT;
check_opt.quick = 1;
thd->proc_info = "Rebuilding the index on master dump table";
- Vio* save_vio = thd->net.vio;
// we do not want repair() to spam us with messages
// just send them to the error log, and report the failure in case of
// problems
+ save_vio = thd->net.vio;
thd->net.vio = 0;
- if (file->repair(thd,&check_opt ))
- {
- net_printf(&thd->net, ER_INDEX_REBUILD,tables.table->real_name );
- error = 1;
- }
+ error=file->repair(thd,&check_opt) != 0;
thd->net.vio = save_vio;
+ if (error)
+ net_printf(&thd->net, ER_INDEX_REBUILD,tables.table->real_name);
+
+err:
close_thread_tables(thd);
-
thd->net.no_send_ok = 0;
return error;
}
-int fetch_nx_table(THD* thd, MASTER_INFO* mi)
+int fetch_nx_table(THD* thd, const char* db_name, const char* table_name,
+ MASTER_INFO* mi, MYSQL* mysql)
{
- MYSQL* mysql = mc_mysql_init(NULL);
int error = 1;
int nx_errno = 0;
- if(!mysql)
- {
- sql_print_error("fetch_nx_table: Error in mysql_init()");
- nx_errno = ER_GET_ERRNO;
- goto err;
- }
-
- safe_connect(thd, mysql, mi);
- if(slave_killed(thd))
+ bool called_connected = (mysql != NULL);
+ if (!called_connected && !(mysql = mc_mysql_init(NULL)))
+ {
+ sql_print_error("fetch_nx_table: Error in mysql_init()");
+ nx_errno = ER_GET_ERRNO;
goto err;
+ }
- if(request_table_dump(mysql, thd->last_nx_db, thd->last_nx_table))
+ if (!called_connected)
+ {
+ if (connect_to_master(thd, mysql, mi))
{
- nx_errno = ER_GET_ERRNO;
- sql_print_error("fetch_nx_table: failed on table dump request ");
+ sql_print_error("Could not connect to master while fetching table\
+ '%-64s.%-64s'", db_name, table_name);
+ nx_errno = ER_CONNECT_TO_MASTER;
goto err;
}
+ }
+ if (slave_killed(thd))
+ goto err;
- if(create_table_from_dump(thd, &mysql->net, thd->last_nx_db,
- thd->last_nx_table))
- {
- // create_table_from_dump will have sent the error alread
- sql_print_error("fetch_nx_table: failed on create table ");
- goto err;
- }
+ if (request_table_dump(mysql, db_name, table_name))
+ {
+ nx_errno = ER_GET_ERRNO;
+ sql_print_error("fetch_nx_table: failed on table dump request ");
+ goto err;
+ }
+
+ if (create_table_from_dump(thd, &mysql->net, db_name,
+ table_name))
+ {
+ // create_table_from_dump will have sent the error alread
+ sql_print_error("fetch_nx_table: failed on create table ");
+ goto err;
+ }
error = 0;
err:
- if (mysql)
+ if (mysql && !called_connected)
mc_mysql_close(mysql);
if (nx_errno && thd->net.vio)
send_error(&thd->net, nx_errno, "Error in fetch_nx_table");
+ thd->net.no_send_ok = 0; // Clear up garbage after create_table_from_dump
return error;
}
@@ -460,7 +468,7 @@ int init_master_info(MASTER_INFO* mi)
MY_STAT stat_area;
char fname[FN_REFLEN+128];
const char *msg;
- fn_format(fname, master_info_file, mysql_data_home, "", 4+16+32);
+ fn_format(fname, master_info_file, mysql_data_home, "", 4+32);
// we need a mutex while we are changing master info parameters to
// keep other threads from reading bogus info
@@ -537,7 +545,9 @@ int init_master_info(MASTER_INFO* mi)
master_password) ||
init_intvar_from_file((int*)&mi->port, &mi->file, master_port) ||
init_intvar_from_file((int*)&mi->connect_retry, &mi->file,
- master_connect_retry))
+ master_connect_retry) ||
+ init_intvar_from_file((int*)&mi->last_log_seq, &mi->file, 0)
+ )
{
msg="Error reading master configuration";
goto error;
@@ -560,6 +570,44 @@ error:
return 1;
}
+int register_slave_on_master(MYSQL* mysql)
+{
+ String packet;
+ uint len;
+ char buf[4];
+
+ if(!report_host)
+ return 0;
+
+ int4store(buf, server_id);
+ packet.append(buf, 4);
+
+ net_store_data(&packet, report_host);
+ if(report_user)
+ net_store_data(&packet, report_user);
+ else
+ packet.append((char)0);
+
+ if(report_password)
+ net_store_data(&packet, report_user);
+ else
+ packet.append((char)0);
+
+ int2store(buf, (uint16)report_port);
+ packet.append(buf, 2);
+
+ if(mc_simple_command(mysql, COM_REGISTER_SLAVE, (char*)packet.ptr(),
+ packet.length(), 0))
+ {
+ sql_print_error("Error on COM_REGISTER_SLAVE: '%s'",
+ mc_mysql_error(mysql));
+ return 1;
+ }
+
+ return 0;
+}
+
+
int show_master_info(THD* thd)
{
DBUG_ENTER("show_master_info");
@@ -579,10 +627,12 @@ int show_master_info(THD* thd)
field_list.push_back(new Item_empty_string("Last_errno", 4));
field_list.push_back(new Item_empty_string("Last_error", 20));
field_list.push_back(new Item_empty_string("Skip_counter", 12));
+ field_list.push_back(new Item_empty_string("Last_log_seq", 12));
if(send_fields(thd, field_list, 1))
DBUG_RETURN(-1);
String* packet = &thd->packet;
+ uint32 last_log_seq;
packet->length(0);
pthread_mutex_lock(&glob_mi.lock);
@@ -591,7 +641,8 @@ int show_master_info(THD* thd)
net_store_data(packet, (uint32) glob_mi.port);
net_store_data(packet, (uint32) glob_mi.connect_retry);
net_store_data(packet, glob_mi.log_file_name);
- net_store_data(packet, (uint32) glob_mi.pos); // QQ: Should be fixed
+ net_store_data(packet, (longlong) glob_mi.pos);
+ last_log_seq = glob_mi.last_log_seq;
pthread_mutex_unlock(&glob_mi.lock);
pthread_mutex_lock(&LOCK_slave);
net_store_data(packet, slave_running ? "Yes":"No");
@@ -601,6 +652,7 @@ int show_master_info(THD* thd)
net_store_data(packet, (uint32)last_slave_errno);
net_store_data(packet, last_slave_error);
net_store_data(packet, slave_skip_counter);
+ net_store_data(packet, last_log_seq);
if (my_net_write(&thd->net, (char*)thd->packet.ptr(), packet->length()))
DBUG_RETURN(-1);
@@ -613,11 +665,13 @@ int flush_master_info(MASTER_INFO* mi)
{
IO_CACHE* file = &mi->file;
char lbuf[22];
+ char lbuf1[22];
my_b_seek(file, 0L);
- my_b_printf(file, "%s\n%s\n%s\n%s\n%s\n%d\n%d\n",
+ my_b_printf(file, "%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n",
mi->log_file_name, llstr(mi->pos, lbuf), mi->host, mi->user,
- mi->password, mi->port, mi->connect_retry);
+ mi->password, mi->port, mi->connect_retry,
+ llstr(mi->last_log_seq, lbuf1));
flush_io_cache(file);
return 0;
}
@@ -764,7 +818,7 @@ static int request_dump(MYSQL* mysql, MASTER_INFO* mi)
return 0;
}
-static int request_table_dump(MYSQL* mysql, char* db, char* table)
+static int request_table_dump(MYSQL* mysql, const char* db, const char* table)
{
char buf[1024];
char * p = buf;
@@ -882,7 +936,10 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
thd->server_id = ev->server_id; // use the original server id for logging
thd->set_time(); // time the query
- if(!ev->when)
+ if(!thd->log_seq)
+ thd->log_seq = ev->log_seq;
+
+ if (!ev->when)
ev->when = time(NULL);
switch(type_code) {
@@ -901,7 +958,6 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
VOID(pthread_mutex_lock(&LOCK_thread_count));
thd->query_id = query_id++;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
- thd->last_nx_table = thd->last_nx_db = 0;
thd->query_error = 0; // clear error
thd->net.last_errno = 0;
thd->net.last_error[0] = 0;
@@ -909,36 +965,37 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
// sanity check to make sure the master did not get a really bad
// error on the query
- if(!check_expected_error(thd, (expected_error = qev->error_code)))
+ if (!check_expected_error(thd, (expected_error = qev->error_code)))
+ {
+ mysql_parse(thd, thd->query, q_len);
+ if (expected_error !=
+ (actual_error = thd->net.last_errno) && expected_error)
{
- mysql_parse(thd, thd->query, q_len);
- if (expected_error !=
- (actual_error = thd->net.last_errno) && expected_error)
- {
- const char* errmsg = "Slave: did not get the expected error\
- running query from master - expected: '%s', got '%s'";
- sql_print_error(errmsg, ER(expected_error),
- actual_error ? thd->net.last_error:"no error"
- );
- thd->query_error = 1;
- }
- else if (expected_error == actual_error)
- {
- thd->query_error = 0;
- *last_slave_error = 0;
- last_slave_errno = 0;
- }
+ const char* errmsg = "Slave: did not get the expected error\
+ running query from master - expected: '%s'(%d), got '%s'(%d)";
+ sql_print_error(errmsg, ER_SAFE(expected_error),
+ expected_error,
+ actual_error ? thd->net.last_error:"no error",
+ actual_error);
+ thd->query_error = 1;
}
- else // master could be inconsistent, abort and tell DBA to
- // check/fix it
+ else if (expected_error == actual_error)
{
- thd->db = thd->query = 0;
- thd->convert_set = 0;
- close_thread_tables(thd);
- free_root(&thd->mem_root,0);
- delete ev;
- return 1;
+ thd->query_error = 0;
+ *last_slave_error = 0;
+ last_slave_errno = 0;
}
+ }
+ else
+ {
+ // master could be inconsistent, abort and tell DBA to check/fix it
+ thd->db = thd->query = 0;
+ thd->convert_set = 0;
+ close_thread_tables(thd);
+ free_root(&thd->mem_root,0);
+ delete ev;
+ return 1;
+ }
}
thd->db = 0; // prevent db from being freed
thd->query = 0; // just to be sure
@@ -962,8 +1019,25 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
return 1;
}
free_root(&thd->mem_root,0);
+ mi->last_log_seq = ev->log_seq;
delete ev;
+ thd->log_seq = 0;
+ mi->inc_pos(event_len);
+ flush_master_info(mi);
+ break;
+ }
+ case SLAVE_EVENT:
+ {
+ if(mysql_bin_log.is_open())
+ {
+ Slave_log_event *sev = (Slave_log_event*)ev;
+ mysql_bin_log.write(sev);
+ }
+
+ mi->last_log_seq = ev->log_seq;
+ delete ev;
+ thd->log_seq = 0;
mi->inc_pos(event_len);
flush_master_info(mi);
break;
@@ -1076,7 +1150,9 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
return 1;
}
+ mi->last_log_seq = ev->log_seq;
delete ev;
+ thd->log_seq = 0;
free_root(&thd->mem_root,0);
if(thd->fatal_error)
@@ -1094,8 +1170,10 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
case START_EVENT:
close_temporary_tables(thd);
mi->inc_pos(event_len);
+ mi->last_log_seq = ev->log_seq;
flush_master_info(mi);
delete ev;
+ thd->log_seq = 0;
break;
case STOP_EVENT:
@@ -1105,24 +1183,56 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
mi->inc_pos(event_len);
flush_master_info(mi);
}
+ mi->last_log_seq = ev->log_seq;
delete ev;
+ thd->log_seq = 0;
break;
case ROTATE_EVENT:
{
Rotate_log_event* rev = (Rotate_log_event*)ev;
int ident_len = rev->ident_len;
+ bool rotate_binlog = 0, write_slave_event = 0;
+ char* log_name = mi->log_file_name;
pthread_mutex_lock(&mi->lock);
- memcpy(mi->log_file_name, rev->new_log_ident,ident_len );
- mi->log_file_name[ident_len] = 0;
- mi->pos = 4; // skip magic number
+
+ // rotate local binlog only if the name of remote has changed
+ if (!*log_name || !(log_name[ident_len] == 0 &&
+ !memcmp(log_name, rev->new_log_ident, ident_len)))
+ {
+ write_slave_event = (!(rev->flags & LOG_EVENT_FORCED_ROTATE_F)
+ && mysql_bin_log.is_open());
+ rotate_binlog = (*log_name && write_slave_event);
+ memcpy(log_name, rev->new_log_ident,ident_len );
+ log_name[ident_len] = 0;
+ }
+ mi->pos = rev->pos;
+ mi->last_log_seq = ev->log_seq;
pthread_cond_broadcast(&mi->cond);
pthread_mutex_unlock(&mi->lock);
- flush_master_info(mi);
#ifndef DBUG_OFF
- if(abort_slave_event_count)
+ if (abort_slave_event_count)
++events_till_abort;
-#endif
+#endif
+ if (rotate_binlog)
+ {
+ mi->last_log_seq = 0;
+ mysql_bin_log.new_file();
+ }
+ flush_master_info(mi);
+
+ if (write_slave_event)
+ {
+ Slave_log_event s(thd, mi);
+ if (s.master_host)
+ {
+ s.set_log_seq(0, &mysql_bin_log);
+ s.server_id = ::server_id;
+ mysql_bin_log.write(&s);
+ }
+ }
+
delete ev;
+ thd->log_seq = 0;
break;
}
@@ -1142,6 +1252,7 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
}
mi->inc_pending(event_len);
delete ev;
+ // do not reset log_seq
break;
}
}
@@ -1237,6 +1348,14 @@ pthread_handler_decl(handle_slave,arg __attribute__((unused)))
sql_print_error("Slave thread killed while connecting to master");
goto err;
}
+
+connected:
+
+ // register ourselves with the master
+ // if fails, this is not fatal - we just print the error message and go
+ // on with life
+ thd->proc_info = "Registering slave on master";
+ register_slave_on_master(mysql);
while (!slave_killed(thd))
{
@@ -1280,7 +1399,7 @@ try again, log '%s' at postion %s", RPL_LOG_NAME,
goto err;
}
- continue;
+ goto connected;
}
@@ -1331,8 +1450,9 @@ reconnecting to retry, log '%s' position %s", RPL_LOG_NAME,
reconnect done to recover from failed read");
goto err;
}
- break;
- }
+
+ goto connected;
+ } // if(event_len == packet_error)
thd->proc_info = "Processing master log event";
if(exec_event(thd, &mysql->net, &glob_mi, event_len))
@@ -1369,15 +1489,14 @@ the slave thread with \"mysqladmin start-slave\". We stopped at log \
{
// show a little mercy, allow slave to read one more event
// before cutting him off - otherwise he gets stuck
- // on Invar events, since they do not advance the offset
+ // on Intvar events, since they do not advance the offset
// immediately
if (++stuck_count > 2)
events_till_disconnect++;
}
#endif
-
- }
- }
+ } // while(!slave_killed(thd)) - read/exec loop
+ } // while(!slave_killed(thd)) - slave loop
// error = 0;
err:
diff --git a/sql/slave.h b/sql/slave.h
index 311368a4b82..d9131bb53be 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -1,6 +1,8 @@
#ifndef SLAVE_H
#define SLAVE_H
+#include "mysql.h"
+
typedef struct st_master_info
{
char log_file_name[FN_REFLEN];
@@ -13,11 +15,12 @@ typedef struct st_master_info
char password[HASH_PASSWORD_LENGTH+1];
uint port;
uint connect_retry;
+ uint32 last_log_seq; // log sequence number of last processed event
pthread_mutex_t lock;
pthread_cond_t cond;
bool inited;
- st_master_info():pending(0),fd(-1),inited(0)
+ st_master_info():pending(0),fd(-1),last_log_seq(0),inited(0)
{
host[0] = 0; user[0] = 0; password[0] = 0;
pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);
@@ -64,12 +67,16 @@ typedef struct st_table_rule_ent
#define TABLE_RULE_ARR_SIZE 16
int flush_master_info(MASTER_INFO* mi);
+int register_slave_on_master(MYSQL* mysql);
-int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd = -1);
+int mysql_table_dump(THD* thd, const char* db,
+ const char* tbl_name, int fd = -1);
// if fd is -1, dump to NET
-int fetch_nx_table(THD* thd, MASTER_INFO* mi);
+
+int fetch_nx_table(THD* thd, const char* db_name, const char* table_name,
+ MASTER_INFO* mi, MYSQL* mysql);
// retrieve non-exitent table from master
-// the caller must set thd->last_nx_table and thd->last_nx_db first
+
int show_master_info(THD* thd);
int show_binlog_info(THD* thd);
@@ -112,9 +119,9 @@ extern int disconnect_slave_event_count, abort_slave_event_count ;
#endif
// the master variables are defaults read from my.cnf or command line
-extern uint master_port, master_connect_retry;
+extern uint master_port, master_connect_retry, report_port;
extern my_string master_user, master_password, master_host,
- master_info_file;
+ master_info_file, report_user, report_host, report_password;
extern I_List<i_string> replicate_do_db, replicate_ignore_db;
extern I_List<i_string_pair> replicate_rewrite_db;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 37a14f02bcc..ea3d77c5158 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -384,6 +384,9 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
thd->in_lock_tables=1;
result=reopen_tables(thd,1,1);
thd->in_lock_tables=0;
+ /* Set version for table */
+ for (TABLE *table=thd->open_tables; table ; table=table->next)
+ table->version=refresh_version;
}
VOID(pthread_mutex_unlock(&LOCK_open));
if (if_wait_for_refresh)
@@ -501,11 +504,12 @@ void close_temporary(TABLE *table,bool delete_table)
void close_temporary_tables(THD *thd)
{
TABLE *table,*next;
- uint init_query_buf_size = 11, query_buf_size; // "drop table "
- char* query, *p;
+ char *query, *end;
+ const uint init_query_buf_size = 11; // "drop table "
+ uint query_buf_size;
bool found_user_tables = 0;
- LINT_INIT(p);
+ LINT_INIT(end);
query_buf_size = init_query_buf_size;
for (table=thd->temporary_tables ; table ; table=table->next)
@@ -513,37 +517,37 @@ void close_temporary_tables(THD *thd)
query_buf_size += table->key_length;
}
- if(query_buf_size == init_query_buf_size)
+ if (query_buf_size == init_query_buf_size)
return; // no tables to close
- if((query = alloc_root(&thd->mem_root, query_buf_size)))
- {
- memcpy(query, "drop table ", init_query_buf_size);
- p = query + init_query_buf_size;
- }
+ if ((query = alloc_root(&thd->mem_root, query_buf_size)))
+ {
+ memcpy(query, "drop table ", init_query_buf_size);
+ end = query + init_query_buf_size;
+ }
for (table=thd->temporary_tables ; table ; table=next)
{
- if(query) // we might be out of memory, but this is not fatal
+ if (query) // we might be out of memory, but this is not fatal
+ {
+ // skip temporary tables not created directly by the user
+ if (table->table_name[0] != '#')
{
- // skip temporary tables not created directly by the user
- if(table->table_name[0] != '#')
- {
- p = strxmov(p,table->table_cache_key,".",
- table->table_name,",", NullS);
- // here we assume table_cache_key always starts
- // with \0 terminated db name
- found_user_tables = 1;
- }
+ end = strxmov(end,table->table_cache_key,".",
+ table->table_name,",", NullS);
+ // here we assume table_cache_key always starts
+ // with \0 terminated db name
+ found_user_tables = 1;
}
+ }
next=table->next;
close_temporary(table);
}
if (query && found_user_tables && mysql_bin_log.is_open())
{
uint save_query_len = thd->query_length;
- *--p = 0;
- thd->query_length = (uint)(p-query);
+ *--end = 0; // Remove last ','
+ thd->query_length = (uint)(end-query);
Query_log_event qinfo(thd, query);
mysql_bin_log.write(&qinfo);
thd->query_length = save_query_len;
@@ -837,25 +841,6 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
!(table->table_cache_key=memdup_root(&table->mem_root,(char*) key,
key_length)))
{
- MEM_ROOT* glob_alloc;
- LINT_INIT(glob_alloc);
-
- if (errno == ENOENT &&
- (glob_alloc = my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC)))
- // Sasha: needed for replication
- // remember the name of the non-existent table
- // so we can try to download it from the master
- {
- int table_name_len = (uint) strlen(table_name);
- int db_len = (uint) strlen(db);
- thd->last_nx_db = alloc_root(glob_alloc,db_len + table_name_len + 2);
- if(thd->last_nx_db)
- {
- thd->last_nx_table = thd->last_nx_db + db_len + 1;
- memcpy(thd->last_nx_table, table_name, table_name_len + 1);
- memcpy(thd->last_nx_db, db, db_len + 1);
- }
- }
table->next=table->prev=table;
free_cache_entry(table);
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -1394,11 +1379,6 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type)
bool refresh;
DBUG_ENTER("open_ltable");
-#ifdef __WIN__
- /* Win32 can't drop a file that is open */
- if (lock_type == TL_WRITE_ALLOW_READ)
- lock_type= TL_WRITE;
-#endif
thd->proc_info="Opening table";
while (!(table=open_table(thd,table_list->db ? table_list->db : thd->db,
table_list->real_name,table_list->name,
@@ -1406,6 +1386,19 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type)
if (table)
{
int error;
+
+#ifdef __WIN__
+ /* Win32 can't drop a file that is open */
+ if (lock_type == TL_WRITE_ALLOW_READ
+#ifdef HAVE_GEMINI_DB
+ && table->db_type != DB_TYPE_GEMINI
+#endif /* HAVE_GEMINI_DB */
+ )
+ {
+ lock_type= TL_WRITE;
+ }
+#endif /* __WIN__ */
+
table_list->table=table;
table->grant= table_list->grant;
if (thd->locked_tables)
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index ffcb15b4c9b..f196e4ff852 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -49,6 +49,8 @@ template class List<Alter_drop>;
template class List_iterator<Alter_drop>;
template class List<Alter_column>;
template class List_iterator<Alter_column>;
+template class List<Set_option>;
+template class List_iterator<Set_option>;
#endif
/****************************************************************************
@@ -96,7 +98,7 @@ THD::THD():user_time(0),fatal_error(0),last_insert_id_used(0),
current_linfo = 0;
slave_thread = 0;
slave_proxy_id = 0;
- last_nx_table = last_nx_db = 0;
+ log_seq = 0;
cond_count=0;
convert_set=0;
mysys_var=0;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 603e4bdeeb9..14055a1444c 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -23,6 +23,7 @@
class Query_log_event;
class Load_log_event;
+class Slave_log_event;
enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY };
@@ -62,11 +63,15 @@ class MYSQL_LOG {
char time_buff[20],db[NAME_LEN+1];
char log_file_name[FN_REFLEN],index_file_name[FN_REFLEN];
bool write_error,inited;
+ uint32 log_seq; // current event sequence number
+ // needed this for binlog
bool no_rotate; // for binlog - if log name can never change
// we should not try to rotate it or write any rotation events
// the user should use FLUSH MASTER instead of FLUSH LOGS for
// purging
+ friend class Log_event;
+
public:
MYSQL_LOG();
~MYSQL_LOG();
@@ -83,6 +88,7 @@ public:
time_t query_start=0);
bool write(Query_log_event* event_info); // binary log write
bool write(Load_log_event* event_info);
+ bool write(Slave_log_event* event_info);
bool write(IO_CACHE *cache);
int generate_new_name(char *new_name,const char *old_name);
void make_log_name(char* buf, const char* log_ident);
@@ -241,9 +247,8 @@ public:
struct st_my_thread_var *mysys_var;
enum enum_server_command command;
uint32 server_id;
+ uint32 log_seq;
const char *where;
- char* last_nx_table; // last non-existent table, we need this for replication
- char* last_nx_db; // database of the last nx table
time_t start_time,time_after_lock,user_time;
time_t connect_time,thr_create_time; // track down slow pthread_create
thr_lock_type update_lock_default;
@@ -405,6 +410,8 @@ public:
** This is used to get result from a select
*/
+class JOIN;
+
class select_result :public Sql_alloc {
protected:
THD *thd;
@@ -414,6 +421,7 @@ public:
virtual int prepare(List<Item> &list) { return 0; }
virtual bool send_fields(List<Item> &list,uint flag)=0;
virtual bool send_data(List<Item> &items)=0;
+ virtual void initialize_tables (JOIN *join=0) {};
virtual void send_error(uint errcode,const char *err)=0;
virtual bool send_eof()=0;
virtual void abort() {}
@@ -466,8 +474,6 @@ public:
void send_error(uint errcode,const char *err);
bool send_eof();
};
-
-
class select_insert :public select_result {
protected:
TABLE *table;
@@ -580,19 +586,48 @@ class Unique :public Sql_alloc
public:
ulong elements;
- Unique(qsort_cmp2 comp_func, uint size, ulong max_in_memory_size_arg);
+ Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
+ uint size, ulong max_in_memory_size_arg);
~Unique();
inline bool Unique::unique_add(gptr ptr)
{
if (tree.elements_in_tree > max_elements && flush())
return 1;
- return tree_insert(&tree,ptr,0);
+ return !tree_insert(&tree,ptr,0);
}
bool get(TABLE *table);
- friend int unique_write_to_file(gptr key, Unique *unique,
- element_count count);
- friend int unique_write_to_ptrs(gptr key, Unique *unique,
- element_count count);
+ friend int unique_write_to_file(gptr key, element_count count, Unique *unique);
+ friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique);
};
+
+ class multi_delete : public select_result {
+ TABLE_LIST *delete_tables, *table_being_deleted;
+#ifdef SINISAS_STRIP
+ IO_CACHE **tempfiles;
+ byte *memory_lane;
+#else
+ Unique **tempfiles;
+#endif
+ THD *thd;
+ ha_rows deleted;
+ uint num_of_tables;
+ int error;
+ thr_lock_type lock_option;
+ bool do_delete;
+ public:
+ multi_delete(THD *thd, TABLE_LIST *dt, thr_lock_type lock_option_arg,
+ uint num_of_tables);
+ ~multi_delete();
+ int prepare(List<Item> &list);
+ bool send_fields(List<Item> &list,
+ uint flag) { return 0; }
+ bool send_data(List<Item> &items);
+ void initialize_tables (JOIN *join);
+ void send_error(uint errcode,const char *err);
+ int do_deletes (bool from_send_error);
+ bool send_eof();
+ };
+
+
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 5243498f7fc..85d3f0a344c 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -30,14 +30,41 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *path,
/* db-name is already validated when we come here */
-void mysql_create_db(THD *thd, char *db, uint create_options)
+int mysql_create_db(THD *thd, char *db, uint create_options)
{
char path[FN_REFLEN+16];
MY_DIR *dirp;
long result=1;
+ int error = 0;
DBUG_ENTER("mysql_create_db");
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
+ VOID(pthread_mutex_lock(&LOCK_open));
+
+ // do not create database if another thread is holding read lock
+ if (global_read_lock)
+ {
+ if (thd->global_read_lock)
+ {
+ net_printf(&thd->net, ER_CREATE_DB_WITH_READ_LOCK);
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ goto exit;
+ }
+ while (global_read_lock && ! thd->killed)
+ {
+ (void) pthread_cond_wait(&COND_refresh,&LOCK_open);
+ }
+
+ if (thd->killed)
+ {
+ net_printf(&thd->net, ER_SERVER_SHUTDOWN);
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ goto exit;
+ }
+
+ }
+
+ VOID(pthread_mutex_unlock(&LOCK_open));
/* Check directory */
(void)sprintf(path,"%s/%s", mysql_data_home, db);
@@ -47,7 +74,9 @@ void mysql_create_db(THD *thd, char *db, uint create_options)
my_dirend(dirp);
if (!(create_options & HA_LEX_CREATE_IF_NOT_EXISTS))
{
- net_printf(&thd->net,ER_DB_CREATE_EXISTS,db);
+ if(thd)
+ net_printf(&thd->net,ER_DB_CREATE_EXISTS,db);
+ error = 1;
goto exit;
}
result = 0;
@@ -57,34 +86,39 @@ void mysql_create_db(THD *thd, char *db, uint create_options)
strend(path)[-1]=0; // Remove last '/' from path
if (my_mkdir(path,0777,MYF(0)) < 0)
{
- net_printf(&thd->net,ER_CANT_CREATE_DB,db,my_errno);
+ if(thd)
+ net_printf(&thd->net,ER_CANT_CREATE_DB,db,my_errno);
+ error = 1;
goto exit;
}
}
- if (!thd->query)
- {
- thd->query = path;
- thd->query_length = (uint) (strxmov(path,"create database ", db, NullS)-
- path);
- }
+
+ if(thd)
{
- mysql_update_log.write(thd,thd->query, thd->query_length);
- if (mysql_bin_log.is_open())
+ if (!thd->query)
{
- Query_log_event qinfo(thd, thd->query);
- mysql_bin_log.write(&qinfo);
+ thd->query = path;
+ thd->query_length = (uint) (strxmov(path,"create database ", db, NullS)-
+ path);
}
+ {
+ mysql_update_log.write(thd,thd->query, thd->query_length);
+ if (mysql_bin_log.is_open())
+ {
+ Query_log_event qinfo(thd, thd->query);
+ mysql_bin_log.write(&qinfo);
+ }
+ }
+ if (thd->query == path)
+ {
+ thd->query = 0; // just in case
+ thd->query_length = 0;
+ }
+ send_ok(&thd->net, result);
}
- if (thd->query == path)
- {
- thd->query = 0; // just in case
- thd->query_length = 0;
- }
- send_ok(&thd->net, result);
-
exit:
VOID(pthread_mutex_unlock(&LOCK_mysql_create_db));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
const char *del_exts[]=
@@ -94,10 +128,14 @@ static TYPELIB deletable_extentions=
/* db-name is already validated when we come here */
-
-void mysql_rm_db(THD *thd,char *db,bool if_exists)
+/* If thd == 0, do not write any messages
+ This is useful in replication when we want to remove
+ a stale database before replacing it with the new one
+*/
+int mysql_rm_db(THD *thd,char *db,bool if_exists)
{
long deleted=0;
+ int error = 0;
char path[FN_REFLEN+16];
MY_DIR *dirp;
DBUG_ENTER("mysql_rm_db");
@@ -105,20 +143,44 @@ void mysql_rm_db(THD *thd,char *db,bool if_exists)
VOID(pthread_mutex_lock(&LOCK_mysql_create_db));
VOID(pthread_mutex_lock(&LOCK_open));
+ // do not drop database if another thread is holding read lock
+ if (global_read_lock)
+ {
+ if (thd->global_read_lock)
+ {
+ net_printf(&thd->net, ER_DROP_DB_WITH_READ_LOCK);
+ goto exit;
+ }
+ while (global_read_lock && ! thd->killed)
+ {
+ (void) pthread_cond_wait(&COND_refresh,&LOCK_open);
+ }
+
+ if (thd->killed)
+ {
+ net_printf(&thd->net, ER_SERVER_SHUTDOWN);
+ goto exit;
+ }
+ }
+
(void) sprintf(path,"%s/%s",mysql_data_home,db);
unpack_dirname(path,path); // Convert if not unix
/* See if the directory exists */
if (!(dirp = my_dir(path,MYF(MY_WME | MY_DONT_SORT))))
{
- if (!if_exists)
- net_printf(&thd->net,ER_DB_DROP_EXISTS,db);
- else
- send_ok(&thd->net,0);
+ if(thd)
+ {
+ if (!if_exists)
+ net_printf(&thd->net,ER_DB_DROP_EXISTS,db);
+ else
+ send_ok(&thd->net,0);
+ }
+ error = !if_exists;
goto exit;
}
remove_db_from_cache(db);
- if ((deleted=mysql_rm_known_files(thd, dirp, path,0)) >= 0)
+ if ((deleted=mysql_rm_known_files(thd, dirp, path,0)) >= 0 && thd)
{
if (!thd->query)
{
@@ -137,13 +199,14 @@ void mysql_rm_db(THD *thd,char *db,bool if_exists)
thd->query = 0; // just in case
thd->query_length = 0;
}
+
send_ok(&thd->net,(ulong) deleted);
}
exit:
VOID(pthread_mutex_unlock(&LOCK_open));
VOID(pthread_mutex_unlock(&LOCK_mysql_create_db));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(error);
}
/*
@@ -151,6 +214,7 @@ exit:
are 2 digits (raid directories).
*/
+/* This one also needs to work with thd == 0 for replication */
static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
uint level)
{
@@ -162,7 +226,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
/* remove all files with known extensions */
for (uint idx=2 ;
- idx < (uint) dirp->number_off_files && !thd->killed ;
+ idx < (uint) dirp->number_off_files && (!thd || !thd->killed) ;
idx++)
{
FILEINFO *file=dirp->dir_entry+idx;
@@ -194,9 +258,10 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
}
strxmov(filePath,org_path,"/",file->name,NullS);
unpack_filename(filePath,filePath);
- if (my_delete(filePath,MYF(MY_WME)))
+ if (my_delete_with_symlink(filePath,MYF(MY_WME)))
{
- net_printf(&thd->net,ER_DB_DROP_DELETE,filePath,my_error);
+ if(thd)
+ net_printf(&thd->net,ER_DB_DROP_DELETE,filePath,my_error);
my_dirend(dirp);
DBUG_RETURN(-1);
}
@@ -205,7 +270,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
my_dirend(dirp);
- if (thd->killed)
+ if (thd && thd->killed)
{
send_error(&thd->net,ER_SERVER_SHUTDOWN);
DBUG_RETURN(-1);
@@ -229,7 +294,8 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
/* Don't give errors if we can't delete 'RAID' directory */
if (level)
DBUG_RETURN(deleted);
- send_error(&thd->net);
+ if(thd)
+ send_error(&thd->net);
DBUG_RETURN(-1);
}
path=filePath;
@@ -242,7 +308,8 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *org_path,
/* Don't give errors if we can't delete 'RAID' directory */
if (rmdir(path) < 0 && !level)
{
- net_printf(&thd->net,ER_DB_DROP_RMDIR, path,errno);
+ if(thd)
+ net_printf(&thd->net,ER_DB_DROP_RMDIR, path,errno);
DBUG_RETURN(-1);
}
}
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 85b1c947460..0f70bd71ddd 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB & Sinisa
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -19,6 +19,7 @@
#include "mysql_priv.h"
#include "ha_innobase.h"
+#include "sql_select.h"
/*
Optimize delete of all rows by doing a full generate of the table
@@ -151,7 +152,7 @@ int mysql_delete(THD *thd,
#ifdef HAVE_INNOBASE_DB
/* We need to add code to not generate table based on the table type */
if (!innodb_skip)
- use_generate_table=0; // Innodb can't use re-generate table
+ use_generate_table=0; // Innobase can't use re-generate table
#endif
if (use_generate_table && ! thd->open_tables)
{
@@ -186,7 +187,7 @@ int mysql_delete(THD *thd,
/* If running in safe sql mode, don't allow updates without keys */
if (!table->quick_keys)
{
- thd->lex.options|=QUERY_NO_INDEX_USED;
+ thd->lex.select_lex.options|=QUERY_NO_INDEX_USED;
if ((thd->options & OPTION_SAFE_UPDATES) && limit == HA_POS_ERROR)
{
delete select;
@@ -214,7 +215,7 @@ int mysql_delete(THD *thd,
MYF(MY_FAE | MY_ZEROFILL));
if (setup_order(thd, &tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length)) ||
- (table->found_records = filesort(&table, sortorder, length,
+ (table->found_records = filesort(table, sortorder, length,
(SQL_SELECT *) 0, 0L, HA_POS_ERROR,
&examined_rows))
== HA_POS_ERROR)
@@ -286,3 +287,666 @@ int mysql_delete(THD *thd,
}
+/***************************************************************************
+** delete multiple tables from join
+***************************************************************************/
+
+#define MEM_STRIP_BUF_SIZE sortbuff_size
+
+#ifndef SINISAS_STRIP
+int refposcmp2(void* arg, const void *a,const void *b)
+{
+ return memcmp(a,b,(int) arg);
+}
+#endif
+
+multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt,
+ thr_lock_type lock_option_arg,
+ uint num_of_tables_arg)
+ : delete_tables (dt), thd(thd_arg), deleted(0),
+ num_of_tables(num_of_tables_arg), error(0), lock_option(lock_option_arg),
+ do_delete(false)
+{
+ uint counter=0;
+#ifdef SINISAS_STRIP
+ tempfiles = (IO_CACHE **) sql_calloc(sizeof(IO_CACHE *)* num_of_tables);
+ memory_lane = (byte *)sql_alloc(MAX_REFLENGTH*MEM_STRIP_BUF_SIZE);
+#else
+ tempfiles = (Unique **) sql_calloc(sizeof(Unique *) * (num_of_tables-1));
+#endif
+
+ (void) dt->table->file->extra(HA_EXTRA_NO_READCHECK);
+ (void) dt->table->file->extra(HA_EXTRA_NO_KEYREAD);
+ /* Don't use key read with MULTI-TABLE-DELETE */
+ dt->table->used_keys=0;
+ for (dt=dt->next ; dt ; dt=dt->next,counter++)
+ {
+ TABLE *table=dt->table;
+ (void) dt->table->file->extra(HA_EXTRA_NO_READCHECK);
+ (void) dt->table->file->extra(HA_EXTRA_NO_KEYREAD);
+#ifdef SINISAS_STRIP
+ tempfiles[counter]=(IO_CACHE *) sql_alloc(sizeof(IO_CACHE));
+ if (open_cached_file(tempfiles[counter], mysql_tmpdir,TEMP_PREFIX,
+ DISK_BUFFER_SIZE, MYF(MY_WME)))
+ {
+ my_error(ER_CANT_OPEN_FILE,MYF(0),(tempfiles[counter])->file_name,errno);
+ thd->fatal_error=1;
+ return;
+ }
+#else
+ tempfiles[counter] = new Unique (refposcmp2,
+ (void *) table->file->ref_length,
+ table->file->ref_length,
+ MEM_STRIP_BUF_SIZE);
+#endif
+ }
+}
+
+
+int
+multi_delete::prepare(List<Item> &values)
+{
+ DBUG_ENTER("multi_delete::prepare");
+ do_delete = true;
+ thd->proc_info="deleting from main table";
+
+ if (thd->options & OPTION_SAFE_UPDATES)
+ {
+ TABLE_LIST *table_ref;
+ for (table_ref=delete_tables; table_ref; table_ref=table_ref->next)
+ {
+ TABLE *table=table_ref->table;
+ if ((thd->options & OPTION_SAFE_UPDATES) && !table->quick_keys)
+ {
+ my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0));
+ DBUG_RETURN(1);
+ }
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+inline static void
+link_in_list(SQL_LIST *list,byte *element,byte **next)
+{
+ list->elements++;
+ (*list->next)=element;
+ list->next=next;
+ *next=0;
+}
+
+void
+multi_delete::initialize_tables(JOIN *join)
+{
+ SQL_LIST *new_list=(SQL_LIST *) sql_alloc(sizeof(SQL_LIST));
+ new_list->elements=0; new_list->first=0;
+ new_list->next= (byte**) &(new_list->first);
+ for (JOIN_TAB *tab=join->join_tab, *end=join->join_tab+join->tables;
+ tab < end;
+ tab++)
+ {
+ TABLE_LIST *walk;
+ for (walk=(TABLE_LIST*) delete_tables ; walk ; walk=walk->next)
+ if (!strcmp(tab->table->path,walk->table->path))
+ break;
+ if (walk) // Table need not be the one to be deleted
+ {
+ register TABLE_LIST *ptr = (TABLE_LIST *) sql_alloc(sizeof(TABLE_LIST));
+ memcpy(ptr,walk,sizeof(TABLE_LIST)); ptr->next=0;
+ link_in_list(new_list,(byte*) ptr,(byte**) &ptr->next);
+ }
+ }
+ delete_tables=(TABLE_LIST *)new_list->first;
+ return;
+}
+
+multi_delete::~multi_delete()
+{
+
+ /* Add back EXTRA_READCHECK; In 4.0.1 we shouldn't need this anymore */
+ for (table_being_deleted=delete_tables ;
+ table_being_deleted ;
+ table_being_deleted=table_being_deleted->next)
+ {
+ VOID(table_being_deleted->table->file->extra(HA_EXTRA_READCHECK));
+ }
+ for (uint counter = 0; counter < num_of_tables-1; counter++)
+ {
+ if (tempfiles[counter])
+ {
+#ifdef SINISAS_STRIP
+ end_io_cache(tempfiles[counter]);
+#else
+ delete tempfiles[counter];
+#endif
+ }
+ }
+}
+
+
+bool multi_delete::send_data(List<Item> &values)
+{
+ int secure_counter= -1;
+ for (table_being_deleted=delete_tables ;
+ table_being_deleted ;
+ table_being_deleted=table_being_deleted->next, secure_counter++)
+ {
+ TABLE *table=table_being_deleted->table;
+
+ /* Check if we are using outer join and we didn't find the row */
+ if (table->status & (STATUS_NULL_ROW | STATUS_DELETED))
+ continue;
+
+ table->file->position(table->record[0]);
+ int rl = table->file->ref_length;
+
+ if (secure_counter < 0)
+ {
+ table->status|= STATUS_DELETED;
+ if (!(error=table->file->delete_row(table->record[0])))
+ deleted++;
+ else
+ {
+ table->file->print_error(error,MYF(0));
+ return 1;
+ }
+ }
+ else
+ {
+#ifdef SINISAS_STRIP
+ error=my_b_write(tempfiles[secure_counter],table->file->ref,rl);
+#else
+ error=tempfiles[secure_counter]->unique_add(table->file->ref);
+#endif
+ if (error)
+ {
+ error=-1;
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+
+#ifdef SINISAS_STRIP
+static inline int COMP (byte *ml,uint len,unsigned int left, unsigned int right)
+{
+ return memcmp(ml + left*len,ml + right*len,len);
+}
+
+#define EX(ML,LEN,LLLEFT,RRRIGHT) \
+ptr1 = ML + LLLEFT*LEN;\
+ptr2 = ML + RRRIGHT*LEN;\
+memcpy(tmp,ptr1,LEN);\
+memcpy(ptr1,ptr2,LEN);\
+memcpy(ptr2,tmp,LEN);\
+
+
+
+static void qsort_mem_pieces(byte *ml, uint length, unsigned short pivotP, unsigned int nElem)
+{
+ unsigned int leftP, rightP, pivotEnd, pivotTemp, leftTemp;
+ unsigned int lNum; byte tmp [MAX_REFLENGTH], *ptr1, *ptr2;
+ int retval;
+tailRecursion:
+ if (nElem <= 1) return;
+ if (nElem == 2)
+ {
+ if (COMP(ml,length,pivotP, rightP = pivotP + 1) > 0)
+ {
+ EX(ml,length,pivotP, rightP);
+ }
+ return;
+ }
+
+ rightP = (nElem - 1) + pivotP;
+ leftP = (nElem >> 1) + pivotP;
+
+/* sort the pivot, left, and right elements for "median of 3" */
+
+ if (COMP (ml,length,leftP, rightP) > 0)
+ {
+ EX (ml,length,leftP, rightP);
+ }
+ if (COMP (ml,length,leftP, pivotP) > 0)
+ {
+ EX (ml,length,leftP, pivotP);
+ }
+ else if (COMP (ml,length, pivotP, rightP) > 0)
+ {
+ EX (ml,length,pivotP, rightP);
+ }
+
+ if (nElem == 3) {
+ EX (ml,length,pivotP, leftP);
+ return;
+ }
+
+/* now for the classic Hoare algorithm */
+
+ leftP = pivotEnd = pivotP + 1;
+
+ do {
+ while ((retval = COMP (ml,length, leftP, pivotP)) <= 0)
+ {
+ if (retval == 0) {
+ EX(ml,length,leftP, pivotEnd);
+ pivotEnd++;
+ }
+ if (leftP < rightP)
+ leftP++;
+ else
+ goto qBreak;
+ }
+ while (leftP < rightP) {
+ if ((retval = COMP(ml,length,pivotP, rightP)) < 0)
+ rightP--;
+ else
+ {
+ EX (ml,length,leftP, rightP);
+ if (retval != 0) {
+ leftP++;
+ rightP--;
+ }
+ break;
+ }
+ }
+ } while (leftP < rightP);
+
+qBreak:
+
+ if (COMP(ml,length,leftP, pivotP) <= 0)
+ leftP++;
+
+ leftTemp = leftP - 1; pivotTemp = pivotP;
+
+ while ((pivotTemp < pivotEnd) && (leftTemp >= pivotEnd))
+ {
+ EX(ml,length,pivotTemp, leftTemp);
+ pivotTemp++; leftTemp--;
+ }
+
+ lNum = leftP - pivotEnd; nElem = (nElem + pivotP) - leftP;
+
+ /* Sort smaller partition first to reduce stack usage */
+ if (nElem < lNum)
+ {
+ qsort_mem_pieces(ml,length,leftP, nElem); nElem = lNum;
+ }
+ else
+ {
+ qsort_mem_pieces(ml,length,pivotP, lNum);
+ pivotP = leftP;
+ }
+ goto tailRecursion;
+}
+
+static byte * btree_search(byte *lane, byte *key,register int last, uint length)
+{
+ register int first = 0;
+ if (last == first)
+ {
+ if (!memcmp(lane,key,length)) return lane;
+ return (byte *)0;
+ }
+Recursion_is_too_slow:
+ if (last - first < 3)
+ {
+ if (!memcmp(lane + first*length,key,length)) return lane + first * length;
+ if (last == first + 1) return (byte *)0;
+ if (!memcmp(lane + last*length,key,length)) return lane + last * length;
+ return (byte *)0;
+ }
+ else
+ {
+ int half = first + (last - first)/2;
+ int result = memcmp(lane + half*length,key,length);
+ if (!result) return lane + half*length;
+ if (result < 0)
+ {
+ first = half + 1; goto Recursion_is_too_slow;
+ }
+ else
+ {
+ last = half + 1; goto Recursion_is_too_slow;
+ }
+ }
+}
+
+struct written_block {
+ byte first[MAX_REFLENGTH], last[MAX_REFLENGTH];
+ my_off_t offset;
+ uint how_many;
+};
+
+static IO_CACHE *strip_duplicates_from_temp (byte *memory_lane, IO_CACHE *ptr, uint ref_length, int *written)
+{
+ byte *mem_ptr; my_off_t off = 0;
+ int read_error, write_error, how_many_to_read, total_to_read = *written, pieces_in_memory = 0, mem_count,written_rows;
+ int offset = written_rows=*written=0;
+ int mem_pool_size = MEM_STRIP_BUF_SIZE * MAX_REFLENGTH / ref_length;
+ byte dup_record[MAX_REFLENGTH]; memset(dup_record,'\xFF',MAX_REFLENGTH);
+ if (reinit_io_cache(ptr,READ_CACHE,0L,0,0))
+ return ptr;
+ IO_CACHE *tempptr = (IO_CACHE *) my_malloc(sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL));
+ if (open_cached_file(tempptr, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME)))
+ {
+ my_free((gptr) tempptr, MYF (0));
+ return ptr;
+ }
+ DYNAMIC_ARRAY written_blocks;
+ VOID(init_dynamic_array(&written_blocks,sizeof(struct written_block),20,50));
+ for (;pieces_in_memory < total_to_read;)
+ {
+ how_many_to_read = total_to_read - pieces_in_memory; read_error=write_error=0;
+ if (how_many_to_read > mem_pool_size)
+ how_many_to_read = mem_pool_size;
+ if (my_b_read(ptr, memory_lane, (uint) how_many_to_read * ref_length))
+ {
+ read_error = 1;
+ break;
+ }
+ pieces_in_memory += how_many_to_read;
+ qsort_mem_pieces(memory_lane,0, how_many_to_read, ref_length);
+ byte *checking = dup_record, *cursor=NULL, *mem_end = memory_lane + how_many_to_read * ref_length;
+ int opt_unique_pieces, unique_pieces_in_memory=0; write_error=0;
+ for (mem_ptr=memory_lane; mem_ptr < mem_end ; mem_ptr += ref_length)
+ {
+ if (memcmp(mem_ptr,checking, ref_length))
+ {
+ if (cursor)
+ {
+ memmove(cursor,mem_ptr,mem_end - mem_ptr);
+ mem_end -= mem_ptr - cursor;
+ mem_ptr = cursor; cursor = NULL;
+ }
+ unique_pieces_in_memory++;
+ checking = mem_ptr;
+ }
+ else if (!cursor) cursor = mem_ptr;
+ }
+ opt_unique_pieces=unique_pieces_in_memory;
+ if (written_rows)
+ {
+ if (reinit_io_cache(tempptr,READ_CACHE,0L,0,0)) {write_error = -1; break;}
+ for (uint i=0 ; i < written_blocks.elements ; i++)
+ {
+ struct written_block *wbp=dynamic_element(&written_blocks,i,struct written_block*);
+ if ((memcmp(memory_lane,wbp->last,ref_length) == 1) || (memcmp(memory_lane + (unique_pieces_in_memory - 1) * ref_length, wbp->first, ref_length) == -1))
+ continue;
+ else
+ {
+ if (wbp->how_many < 3)
+ {
+ if ((mem_ptr=btree_search(memory_lane,wbp->first,unique_pieces_in_memory-1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(mem_ptr,dup_record,ref_length);
+ }
+ if (wbp->how_many == 2 && (mem_ptr=btree_search(memory_lane,wbp->last,unique_pieces_in_memory-1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(mem_ptr,dup_record,ref_length);
+ }
+ }
+ else
+ {
+ byte block[MAX_REFLENGTH * MEM_STRIP_BUF_SIZE]; // 16 K maximum and only temporary !!
+ if (my_b_read(tempptr, block, (uint) wbp->how_many * ref_length))
+ {
+ read_error = 1; goto skip_writting;
+ }
+ if (unique_pieces_in_memory < 3)
+ {
+ if ((mem_ptr=btree_search(block,memory_lane,wbp->how_many - 1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(memory_lane,dup_record,ref_length);
+ }
+ if (unique_pieces_in_memory == 2 && (mem_ptr=btree_search(block,memory_lane + ref_length,wbp->how_many - 1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(mem_ptr,dup_record,ref_length);
+ }
+ }
+ else
+ {
+ byte *cursor; bool do_check_past;
+ if (unique_pieces_in_memory < wbp->how_many)
+ {
+ do_check_past = (memcmp(memory_lane + (unique_pieces_in_memory - 1)*ref_length,wbp->last,ref_length) == 1);
+ for (cursor=memory_lane; cursor < memory_lane + unique_pieces_in_memory*ref_length; cursor += ref_length)
+ {
+ if ((mem_ptr=btree_search(block,cursor,wbp->how_many - 1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(cursor,dup_record,ref_length);
+ }
+ else if (do_check_past && (memcmp(cursor,wbp->last,ref_length) == 1)) break;
+ }
+ }
+ else
+ {
+ do_check_past = (memcmp(memory_lane + (unique_pieces_in_memory - 1)*ref_length,wbp->last,ref_length) == -1);
+ for (cursor=block; cursor < block + wbp->how_many*ref_length;cursor += ref_length)
+ {
+ if ((mem_ptr=btree_search(memory_lane,cursor,unique_pieces_in_memory-1, ref_length)))
+ {
+ if (!--opt_unique_pieces) goto skip_writting; // nice little optimization
+ memcpy(mem_ptr,dup_record,ref_length);
+ }
+ else if (do_check_past && (memcmp(cursor,memory_lane + (unique_pieces_in_memory - 1)*ref_length,ref_length) == 1)) break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ reinit_io_cache(tempptr, WRITE_CACHE,off,0,0);
+ struct written_block wb; wb.offset = off; wb.how_many=opt_unique_pieces; byte *last;
+ if (opt_unique_pieces < unique_pieces_in_memory)
+ {
+ for (mem_count=0, mem_ptr=memory_lane; mem_count<unique_pieces_in_memory;mem_count++, mem_ptr += ref_length)
+ {
+ if (memcmp(mem_ptr,dup_record,ref_length))
+ {
+ if (my_b_write(tempptr,mem_ptr,ref_length))
+ {
+ if (write_error == 9 || write_error == -1) write_error = 0;
+ if (write_error) break;
+ }
+ if (!mem_count) memcpy(wb.first,mem_ptr,ref_length);
+ last = mem_ptr;
+ written_rows++;
+ }
+ }
+ memcpy(wb.last,last,ref_length);
+ }
+ else
+ {
+ memcpy(wb.first,memory_lane,ref_length); memcpy(wb.last,memory_lane + (unique_pieces_in_memory -1)*ref_length,ref_length);
+ if (my_b_write(tempptr, memory_lane,unique_pieces_in_memory * ref_length))
+ {
+ write_error = 1; break;
+ }
+ written_rows += unique_pieces_in_memory;
+ }
+ off = my_b_tell(tempptr);
+ VOID(push_dynamic(&written_blocks,(gptr) &wb));
+ skip_writting:
+ if (write_error || read_error) break;
+ }
+ delete_dynamic(&written_blocks);
+ if (read_error || write_error)
+ {
+ close_cached_file(tempptr); end_io_cache(tempptr);
+ return ptr;
+ }
+ else
+ {
+ close_cached_file(ptr); *written=written_rows; end_io_cache(ptr);
+ reinit_io_cache(tempptr,READ_CACHE,0L,0,0);
+ return tempptr;
+ }
+}
+
+#endif /* SINISAS_STRIP */
+
+/* Return true if some table is not transaction safe */
+
+static bool some_table_is_not_transaction_safe (TABLE_LIST *tl)
+{
+ for (; tl ; tl=tl->next)
+ {
+ if (!(tl->table->file->has_transactions()))
+ return true;
+ }
+ return false;
+}
+
+
+void multi_delete::send_error(uint errcode,const char *err)
+{
+ /* First send error what ever it is ... */
+ ::send_error(&thd->net,errcode,err);
+ /* If nothing deleted return */
+ if (!deleted)
+ return;
+ /* Below can happen when thread is killed early ... */
+ if (!table_being_deleted)
+ table_being_deleted=delete_tables;
+
+ /*
+ If rows from the first table only has been deleted and it is transactional,
+ just do rollback.
+ The same if all tables are transactional, regardless of where we are.
+ In all other cases do attempt deletes ...
+ */
+ if ((table_being_deleted->table->file->has_transactions() &&
+ table_being_deleted == delete_tables) ||
+ !some_table_is_not_transaction_safe(delete_tables->next))
+ ha_rollback(thd);
+ else if (do_delete)
+ VOID(do_deletes(true));
+}
+
+
+int multi_delete::do_deletes (bool from_send_error)
+{
+ int error = 0, counter = 0, count;
+
+ if (from_send_error)
+ {
+ /* Found out table number for 'table_being_deleted' */
+ for (TABLE_LIST *aux=delete_tables;
+ aux != table_being_deleted;
+ aux=aux->next)
+ counter++;
+ }
+ else
+ table_being_deleted = delete_tables;
+
+ do_delete = false;
+ for (table_being_deleted=table_being_deleted->next;
+ table_being_deleted ;
+ table_being_deleted=table_being_deleted->next, counter++)
+ {
+ TABLE *table = table_being_deleted->table;
+ int rl = table->file->ref_length;
+#ifdef SINISAS_STRIP
+ int num_of_positions = (int)my_b_tell(tempfiles[counter])/rl;
+ if (!num_of_positions) continue;
+ tempfiles[counter] = strip_duplicates_from_temp(memory_lane, tempfiles[counter],rl,&num_of_positions);
+ if (!num_of_positions)
+ {
+ error=1; break;
+ }
+#else
+ if (tempfiles[counter]->get(table))
+ {
+ error=1;
+ break;
+ }
+#endif
+
+#if USE_REGENERATE_TABLE
+ // nice little optimization ....
+ // but Monty has to fix generate_table...
+ // This will not work for transactional tables because for other types
+ // records is not absolute
+ if (num_of_positions == table->file->records)
+ {
+ TABLE_LIST table_list;
+ bzero((char*) &table_list,sizeof(table_list));
+ table_list.name=table->table_name; table_list.real_name=table_being_deleted->real_name;
+ table_list.table=table;
+ table_list.grant=table->grant;
+ table_list.db = table_being_deleted->db;
+ error=generate_table(thd,&table_list,(TABLE *)0);
+ if (error <= 0) {error = 1; break;}
+ deleted += num_of_positions;
+ continue;
+ }
+#endif /* USE_REGENERATE_TABLE */
+
+ READ_RECORD info;
+ error=0;
+#ifdef SINISAS_STRIP
+ SQL_SELECT *select= new SQL_SELECT;
+ select->head=table;
+ select->file=*tempfiles[counter];
+ init_read_record(&info,thd,table,select,0,0);
+#else
+ init_read_record(&info,thd,table,NULL,0,0);
+#endif
+ bool not_trans_safe = some_table_is_not_transaction_safe(delete_tables);
+ while (!(error=info.read_record(&info)) &&
+ (!thd->killed || from_send_error || not_trans_safe))
+ {
+ error=table->file->delete_row(table->record[0]);
+ if (error)
+ {
+ table->file->print_error(error,MYF(0));
+ break;
+ }
+ else
+ deleted++;
+ }
+ end_read_record(&info);
+#ifdef SINISAS_STRIP
+ delete select;
+#endif
+ if (error == -1)
+ error = 0;
+ }
+ return error;
+}
+
+
+bool multi_delete::send_eof()
+{
+ thd->proc_info="deleting from reference tables";
+ int error = do_deletes(false);
+
+ thd->proc_info="end";
+ if (error && error != -1)
+ {
+ ::send_error(&thd->net);
+ return 1;
+ }
+
+ if (deleted &&
+ (error <= 0 || some_table_is_not_transaction_safe(delete_tables)))
+ {
+ mysql_update_log.write(thd,thd->query,thd->query_length);
+ Query_log_event qinfo(thd, thd->query);
+ if (mysql_bin_log.write(&qinfo) &&
+ !some_table_is_not_transaction_safe(delete_tables))
+ error=1; // Rollback
+ VOID(ha_autocommit_or_rollback(thd,error >= 0));
+ }
+ ::send_ok(&thd->net,deleted);
+ return 0;
+}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index cfd16df5d17..19dc239c050 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -642,7 +642,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
/* Copy error message and abort */
thd->fatal_error=1;
strmov(thd->net.last_error,tmp->thd.net.last_error);
- thd->net.last_errno=thd->net.last_errno;
+ thd->net.last_errno=tmp->thd.net.last_errno;
}
tmp->unlock();
pthread_mutex_unlock(&LOCK_delayed_create);
@@ -1362,6 +1362,7 @@ select_create::prepare(List<Item> &values)
if (info.handle_duplicates == DUP_IGNORE ||
info.handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
+ table->file->deactivate_non_unique_index((ha_rows) 0);
DBUG_RETURN(0);
}
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 0a1ee0649c4..3f6c09073e6 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -142,11 +142,11 @@ LEX *lex_start(THD *thd, uchar *buf,uint length)
lex->next_state=STATE_START;
lex->end_of_query=(lex->ptr=buf)+length;
lex->yylineno = 1;
- lex->create_refs=lex->in_comment=0;
+ lex->select->create_refs=lex->in_comment=0;
lex->length=0;
- lex->in_sum_expr=0;
- lex->expr_list.empty();
- lex->ftfunc_list.empty();
+ lex->select->in_sum_expr=0;
+ lex->select->expr_list.empty();
+ lex->select->ftfunc_list.empty();
lex->convert_set=(lex->thd=thd)->convert_set;
lex->yacc_yyss=lex->yacc_yyvs=0;
lex->ignore_space=test(thd->client_capabilities & CLIENT_IGNORE_SPACE);
@@ -155,7 +155,7 @@ LEX *lex_start(THD *thd, uchar *buf,uint length)
void lex_end(LEX *lex)
{
- lex->expr_list.delete_elements(); // If error when parsing sql-varargs
+ lex->select->expr_list.delete_elements(); // If error when parsing sql-varargs
x_free(lex->yacc_yyss);
x_free(lex->yacc_yyvs);
}
@@ -656,12 +656,9 @@ int yylex(void *arg)
if (c == 'e' || c == 'E')
{
c = yyGet();
- if (c != '-' && c != '+' && !isdigit(c))
- { // No exp sig found
- state= STATE_CHAR;
- break;
- }
- if (!isdigit(yyGet()))
+ if (c == '-' || c == '+')
+ c = yyGet(); // Skipp sign
+ if (!isdigit(c))
{ // No digit after sign
state= STATE_CHAR;
break;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 0df5bbebc37..e585ec65191 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -53,8 +53,10 @@ enum enum_sql_command {
SQLCOM_BEGIN, SQLCOM_LOAD_MASTER_TABLE, SQLCOM_CHANGE_MASTER,
SQLCOM_RENAME_TABLE, SQLCOM_BACKUP_TABLE, SQLCOM_RESTORE_TABLE,
SQLCOM_RESET, SQLCOM_PURGE, SQLCOM_SHOW_BINLOGS,
- SQLCOM_SHOW_OPEN_TABLES,
- SQLCOM_HA_OPEN, SQLCOM_HA_CLOSE, SQLCOM_HA_READ
+ SQLCOM_SHOW_OPEN_TABLES, SQLCOM_LOAD_MASTER_DATA,
+ SQLCOM_HA_OPEN, SQLCOM_HA_CLOSE, SQLCOM_HA_READ,
+ SQLCOM_SHOW_SLAVE_HOSTS, SQLCOM_MULTI_DELETE, SQLCOM_UNION_SELECT,
+ SQLCOM_SHOW_BINLOG_EVENTS
};
enum lex_states { STATE_START, STATE_CHAR, STATE_IDENT,
@@ -94,39 +96,70 @@ typedef struct st_lex_master_info
ulonglong pos;
} LEX_MASTER_INFO;
+
+enum sub_select_type {UNSPECIFIED_TYPE,UNION_TYPE, INTERSECT_TYPE, EXCEPT_TYPE};
+
+/* The state of the lex parsing for selects */
+
+typedef struct st_select_lex {
+ enum sub_select_type linkage;
+ uint select_number; /* For Item_select */
+ char *db,*db1,*table1,*db2,*table2; /* For outer join using .. */
+ Item *where,*having;
+ ha_rows select_limit,offset_limit;
+ ulong options;
+ List<List_item> expr_list;
+ List<List_item> when_list;
+ SQL_LIST order_list,table_list,group_list;
+ List<Item> item_list;
+ List<String> interval_list,use_index, *use_index_ptr, ignore_index, *ignore_index_ptr;
+ List<Item_func_match> ftfunc_list;
+ uint in_sum_expr, sort_default;
+ bool create_refs;
+ st_select_lex *next;
+} SELECT_LEX;
+
+
+class Set_option :public Sql_alloc {
+public:
+ const char *name;
+ Item *item;
+ uint name_length;
+ bool type; /* 1 if global */
+ Set_option(bool par_type, const char *par_name, uint length,
+ Item *par_item)
+ :name(par_name), item(par_item), name_length(length), type(par_type) {}
+};
+
+
/* The state of the lex parsing. This is saved in the THD struct */
typedef struct st_lex {
uint yylineno,yytoklen; /* Simulate lex */
LEX_YYSTYPE yylval;
+ SELECT_LEX select_lex, *select;
uchar *ptr,*tok_start,*tok_end,*end_of_query;
char *length,*dec,*change,*name;
- char *db,*db1,*table1,*db2,*table2; /* For outer join using .. */
char *backup_dir; /* For RESTORE/BACKUP */
char* to_log; /* For PURGE MASTER LOGS TO */
String *wild;
sql_exchange *exchange;
- ha_rows select_limit,offset_limit;
- List<List_item> expr_list;
- List<List_item> when_list;
- List<List_item> many_values;
List<key_part_spec> col_list;
List<Alter_drop> drop_list;
List<Alter_column> alter_list;
- List<String> interval_list,use_index,*use_index_ptr,
- ignore_index, *ignore_index_ptr;
+ List<String> interval_list;
List<st_lex_user> users_list;
List<LEX_COLUMN> columns;
List<Key> key_list;
List<create_field> create_list;
- List<Item> item_list,*insert_list,field_list,value_list;
- List<Item_func_match> ftfunc_list;
- SQL_LIST order_list,table_list,group_list,proc_list;
+ List<Item> *insert_list,field_list,value_list;
+ List<List_item> many_values;
+ List<Set_option> option_list;
+ SQL_LIST proc_list, auxilliary_table_list;
TYPELIB *interval;
create_field *last_field;
-
- Item *where,*having,*default_value;
+ Item *default_value;
CONVERT *convert_set;
LEX_USER *grant_user;
gptr yacc_yyss,yacc_yyvs;
@@ -136,7 +169,6 @@ typedef struct st_lex {
HA_CREATE_INFO create_info;
LEX_MASTER_INFO mi; // used by CHANGE MASTER
ulong thread_id,type;
- ulong options;
ulong gemini_spin_retries;
enum_sql_command sql_command;
enum lex_states next_state;
@@ -145,10 +177,10 @@ typedef struct st_lex {
enum enum_ha_read_modes ha_read_mode;
enum ha_rkey_function ha_rkey_mode;
enum enum_enable_or_disable alter_keys_onoff;
- uint in_sum_expr,grant,grant_tot_col,which_columns, sort_default;
+ uint grant,grant_tot_col,which_columns;
thr_lock_type lock_option;
- bool create_refs,drop_primary,drop_if_exists,local_file;
- bool in_comment,ignore_space,verbose,simple_alter;
+ bool drop_primary,drop_if_exists,local_file;
+ bool in_comment,ignore_space,verbose,simple_alter, option_type;
} LEX;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 3dda8d1cff7..928a62a397e 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -46,6 +46,8 @@ static bool check_dup(THD *thd,const char *db,const char *name,
static void mysql_init_query(THD *thd);
static void remove_escape(char *name);
static void refresh_status(void);
+static bool append_file_to_dir(char **filename_ptr, char *table_name);
+static int link_in_large_list_and_check_acl(THD *thd,LEX *lex,SQL_LIST *tables);
const char *any_db="*any*"; // Special symbol for check_access
@@ -53,13 +55,13 @@ const char *command_name[]={
"Sleep", "Quit", "Init DB", "Query", "Field List", "Create DB",
"Drop DB", "Refresh", "Shutdown", "Statistics", "Processlist",
"Connect","Kill","Debug","Ping","Time","Delayed_insert","Change user",
- "Binlog Dump","Table Dump", "Connect Out"
+ "Binlog Dump","Table Dump", "Connect Out", "Register Slave"
};
bool volatile abort_slave = 0;
#ifdef HAVE_OPENSSL
-extern VioSSLAcceptorFd* ssl_acceptor_fd;
+extern struct st_VioSSLAcceptorFd * ssl_acceptor_fd;
#endif /* HAVE_OPENSSL */
#ifdef __WIN__
@@ -423,9 +425,7 @@ check_connections(THD *thd)
DBUG_PRINT("info", ("Agreed to change IO layer to SSL") );
/* Do the SSL layering. */
DBUG_PRINT("info", ("IO layer change in progress..."));
- VioSocket* vio_socket = my_reinterpret_cast(VioSocket*)(net->vio);
- VioSSL* vio_ssl = ssl_acceptor_fd->accept(vio_socket);
- net->vio = my_reinterpret_cast(NetVio*) (vio_ssl);
+ net->vio = sslaccept(ssl_acceptor_fd, net->vio);
DBUG_PRINT("info", ("Reading user information over SSL layer"));
if ((pkt_len=my_net_read(net)) == packet_error ||
pkt_len < NORMAL_HANDSHAKE_SIZE)
@@ -556,6 +556,7 @@ pthread_handler_decl(handle_one_connection,arg)
free_root(&thd->mem_root,MYF(0));
if (net->error && net->vio != 0)
{
+ if (!thd->killed && ! opt_warnings)
sql_print_error(ER(ER_NEW_ABORTING_CONNECTION),
thd->thread_id,(thd->db ? thd->db : "unconnected"),
thd->user ? thd->user : "unauthenticated",
@@ -760,12 +761,20 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thread_running++;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
thd->set_time();
- thd->lex.options=0; // We store status here
+ thd->lex.select_lex.options=0; // We store status here
switch (command) {
case COM_INIT_DB:
if (!mysql_change_db(thd,packet))
mysql_log.write(thd,command,"%s",thd->db);
break;
+ case COM_REGISTER_SLAVE:
+ {
+ if(register_slave(thd, (uchar*)packet, packet_length))
+ send_error(&thd->net);
+ else
+ send_ok(&thd->net);
+ break;
+ }
case COM_TABLE_DUMP:
{
slow_command = TRUE;
@@ -1027,7 +1036,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->proc_info="logging slow query";
if ((ulong) (thd->start_time - thd->time_after_lock) > long_query_time ||
- ((thd->lex.options &
+ ((thd->lex.select_lex.options &
(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED)) &&
(specialflag & SPECIAL_LONG_LOG_FORMAT)))
{
@@ -1058,7 +1067,8 @@ mysql_execute_command(void)
int res=0;
THD *thd=current_thd;
LEX *lex= &thd->lex;
- TABLE_LIST *tables=(TABLE_LIST*) lex->table_list.first;
+ TABLE_LIST *tables=(TABLE_LIST*) lex->select->table_list.first;
+ SELECT_LEX *select_lex = lex->select;
DBUG_ENTER("mysql_execute_command");
if(table_rules_on && thd->slave_thread && tables && !tables_ok(thd,tables))
@@ -1070,7 +1080,7 @@ mysql_execute_command(void)
case SQLCOM_SELECT:
{
select_result *result;
- if (lex->options & SELECT_DESCRIBE)
+ if (select_lex->options & SELECT_DESCRIBE)
lex->exchange=0;
if (tables)
{
@@ -1088,12 +1098,12 @@ mysql_execute_command(void)
break; // Error message is given
}
- thd->offset_limit=lex->offset_limit;
- thd->select_limit=lex->select_limit+lex->offset_limit;
- if (thd->select_limit < lex->select_limit)
+ thd->offset_limit=select_lex->offset_limit;
+ thd->select_limit=select_lex->select_limit+select_lex->offset_limit;
+ if (thd->select_limit < select_lex->select_limit)
thd->select_limit= HA_POS_ERROR; // no limit
if (thd->select_limit == HA_POS_ERROR)
- lex->options&= ~OPTION_FOUND_ROWS;
+ select_lex->options&= ~OPTION_FOUND_ROWS;
if (lex->exchange)
{
@@ -1118,8 +1128,8 @@ mysql_execute_command(void)
{
res= -1;
#ifdef DELETE_ITEMS
- delete lex->having;
- delete lex->where;
+ delete select_lex->having;
+ delete select_lex->where;
#endif
break;
}
@@ -1137,22 +1147,22 @@ mysql_execute_command(void)
if (!(res=open_and_lock_tables(thd,tables)))
{
- res=mysql_select(thd,tables,lex->item_list,
- lex->where,
- lex->ftfunc_list,
- (ORDER*) lex->order_list.first,
- (ORDER*) lex->group_list.first,
- lex->having,
+ res=mysql_select(thd,tables,select_lex->item_list,
+ select_lex->where,
+ select_lex->ftfunc_list,
+ (ORDER*) select_lex->order_list.first,
+ (ORDER*) select_lex->group_list.first,
+ select_lex->having,
(ORDER*) lex->proc_list.first,
- lex->options | thd->options,
+ select_lex->options | thd->options,
result);
if (res)
result->abort();
}
delete result;
#ifdef DELETE_ITEMS
- delete lex->having;
- delete lex->where;
+ delete select_lex->having;
+ delete select_lex->where;
#endif
break;
}
@@ -1163,6 +1173,20 @@ mysql_execute_command(void)
res = purge_master_logs(thd, lex->to_log);
break;
}
+ case SQLCOM_SHOW_SLAVE_HOSTS:
+ {
+ if(check_access(thd, FILE_ACL, any_db))
+ goto error;
+ res = show_slave_hosts(thd);
+ break;
+ }
+ case SQLCOM_SHOW_BINLOG_EVENTS:
+ {
+ if(check_access(thd, FILE_ACL, any_db))
+ goto error;
+ res = show_binlog_events(thd);
+ break;
+ }
case SQLCOM_BACKUP_TABLE:
{
if (check_db_used(thd,tables) ||
@@ -1203,6 +1227,13 @@ mysql_execute_command(void)
res = show_binlog_info(thd);
break;
}
+
+ case SQLCOM_LOAD_MASTER_DATA: // sync with master
+ if(check_process_priv(thd))
+ goto error;
+ res = load_master_data(thd);
+ break;
+
case SQLCOM_LOAD_MASTER_TABLE:
if (!tables->db)
@@ -1222,23 +1253,11 @@ mysql_execute_command(void)
if (strlen(tables->name) > NAME_LEN)
{
net_printf(&thd->net,ER_WRONG_TABLE_NAME,tables->name);
- res=0;
break;
}
- thd->last_nx_table = tables->real_name;
- thd->last_nx_db = tables->db;
- if(fetch_nx_table(thd, &glob_mi))
- // fetch_nx_table is responsible for sending
- // the error
- {
- res = 0;
- thd->net.no_send_ok = 0; // easier to do it here
- // this way we make sure that when we are done, we are clean
- break;
- }
-
- res = 0;
+ if (fetch_nx_table(thd, tables->db, tables->real_name, &glob_mi, 0))
+ break; // fetch_nx_table did send the error to the client
send_ok(&thd->net);
break;
@@ -1266,7 +1285,14 @@ mysql_execute_command(void)
res=0;
break;
}
- if (lex->item_list.elements) // With select
+ /* Fix names if symlinked tables */
+ if (append_file_to_dir(&lex->create_info.data_file_name, tables->name) ||
+ append_file_to_dir(&lex->create_info.index_file_name, tables->name))
+ {
+ res=-1;
+ break;
+ }
+ if (select_lex->item_list.elements) // With select
{
select_result *result;
@@ -1284,9 +1310,9 @@ mysql_execute_command(void)
for (table = tables->next ; table ; table=table->next)
table->lock_type= lex->lock_option;
}
- thd->offset_limit=lex->offset_limit;
- thd->select_limit=lex->select_limit+lex->offset_limit;
- if (thd->select_limit < lex->select_limit)
+ thd->offset_limit=select_lex->offset_limit;
+ thd->select_limit=select_lex->select_limit+select_lex->offset_limit;
+ if (thd->select_limit < select_lex->select_limit)
thd->select_limit= HA_POS_ERROR; // No limit
if (!(res=open_and_lock_tables(thd,tables->next)))
@@ -1295,16 +1321,16 @@ mysql_execute_command(void)
tables->real_name, &lex->create_info,
lex->create_list,
lex->key_list,
- lex->item_list,lex->duplicates)))
+ select_lex->item_list,lex->duplicates)))
{
- res=mysql_select(thd,tables->next,lex->item_list,
- lex->where,
- lex->ftfunc_list,
- (ORDER*) lex->order_list.first,
- (ORDER*) lex->group_list.first,
- lex->having,
+ res=mysql_select(thd,tables->next,select_lex->item_list,
+ select_lex->where,
+ select_lex->ftfunc_list,
+ (ORDER*) select_lex->order_list.first,
+ (ORDER*) select_lex->group_list.first,
+ select_lex->having,
(ORDER*) lex->proc_list.first,
- lex->options | thd->options,
+ select_lex->options | thd->options,
result);
if (res)
result->abort();
@@ -1359,10 +1385,10 @@ mysql_execute_command(void)
}
if (!tables->db)
tables->db=thd->db;
- if (!lex->db)
- lex->db=tables->db;
+ if (!select_lex->db)
+ select_lex->db=tables->db;
if (check_access(thd,ALTER_ACL,tables->db,&tables->grant.privilege) ||
- check_access(thd,INSERT_ACL | CREATE_ACL,lex->db,&priv) ||
+ check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv) ||
check_merge_table_access(thd, tables->db,
(TABLE_LIST *)
lex->create_info.merge_list.first))
@@ -1378,21 +1404,23 @@ mysql_execute_command(void)
TABLE_LIST tmp_table;
bzero((char*) &tmp_table,sizeof(tmp_table));
tmp_table.real_name=lex->name;
- tmp_table.db=lex->db;
+ tmp_table.db=select_lex->db;
tmp_table.grant.privilege=priv;
if (check_grant(thd,INSERT_ACL | CREATE_ACL,tables))
goto error;
}
}
+ /* Don't yet allow changing of symlinks with ALTER TABLE */
+ lex->create_info.data_file_name=lex->create_info.index_file_name=0;
/* ALTER TABLE ends previous transaction */
if (end_active_trans(thd))
res= -1;
else
- res= mysql_alter_table(thd, lex->db, lex->name,
+ res= mysql_alter_table(thd, select_lex->db, lex->name,
&lex->create_info,
tables, lex->create_list,
lex->key_list, lex->drop_list, lex->alter_list,
- (ORDER *) lex->order_list.first,
+ (ORDER *) select_lex->order_list.first,
lex->drop_primary, lex->duplicates,
lex->alter_keys_onoff, lex->simple_alter);
break;
@@ -1512,22 +1540,22 @@ mysql_execute_command(void)
goto error;
if (grant_option && check_grant(thd,UPDATE_ACL,tables))
goto error;
- if (lex->item_list.elements != lex->value_list.elements)
+ if (select_lex->item_list.elements != lex->value_list.elements)
{
send_error(&thd->net,ER_WRONG_VALUE_COUNT);
DBUG_VOID_RETURN;
}
res = mysql_update(thd,tables,
- lex->item_list,
+ select_lex->item_list,
lex->value_list,
- lex->where,
- (ORDER *) lex->order_list.first,
- lex->select_limit,
+ select_lex->where,
+ (ORDER *) select_lex->order_list.first,
+ select_lex->select_limit,
lex->duplicates,
lex->lock_option);
#ifdef DELETE_ITEMS
- delete lex->where;
+ delete select_lex->where;
#endif
break;
case SQLCOM_INSERT:
@@ -1571,9 +1599,9 @@ mysql_execute_command(void)
}
select_result *result;
- thd->offset_limit=lex->offset_limit;
- thd->select_limit=lex->select_limit+lex->offset_limit;
- if (thd->select_limit < lex->select_limit)
+ thd->offset_limit=select_lex->offset_limit;
+ thd->select_limit=select_lex->select_limit+select_lex->offset_limit;
+ if (thd->select_limit < select_lex->select_limit)
thd->select_limit= HA_POS_ERROR; // No limit
if (check_dup(thd,tables->db,tables->real_name,tables->next))
@@ -1593,14 +1621,14 @@ mysql_execute_command(void)
lex->sql_command == SQLCOM_REPLACE_SELECT ?
DUP_REPLACE : DUP_IGNORE)))
{
- res=mysql_select(thd,tables->next,lex->item_list,
- lex->where,
- lex->ftfunc_list,
- (ORDER*) lex->order_list.first,
- (ORDER*) lex->group_list.first,
- lex->having,
+ res=mysql_select(thd,tables->next,select_lex->item_list,
+ select_lex->where,
+ select_lex->ftfunc_list,
+ (ORDER*) select_lex->order_list.first,
+ (ORDER*) select_lex->group_list.first,
+ select_lex->having,
(ORDER*) lex->proc_list.first,
- lex->options | thd->options,
+ select_lex->options | thd->options,
result);
delete result;
}
@@ -1608,14 +1636,14 @@ mysql_execute_command(void)
res= -1;
}
#ifdef DELETE_ITEMS
- delete lex->having;
- delete lex->where;
+ delete select_lex->having;
+ delete select_lex->where;
#endif
break;
}
case SQLCOM_TRUNCATE:
- lex->where=0;
- lex->select_limit=HA_POS_ERROR;
+ select_lex->where=0;
+ select_lex->select_limit=HA_POS_ERROR;
/* Fall through */
case SQLCOM_DELETE:
{
@@ -1629,8 +1657,100 @@ mysql_execute_command(void)
if (lex->sql_command == SQLCOM_TRUNCATE && end_active_trans(thd))
res= -1;
else
- res = mysql_delete(thd,tables, lex->where, (ORDER*)lex->order_list.first,
- lex->select_limit, lex->lock_option, lex->options);
+ res = mysql_delete(thd,tables, select_lex->where,
+ (ORDER*) select_lex->order_list.first,
+ select_lex->select_limit, lex->lock_option,
+ select_lex->options);
+ break;
+ }
+ case SQLCOM_MULTI_DELETE:
+ {
+ TABLE_LIST *aux_tables=(TABLE_LIST *)thd->lex.auxilliary_table_list.first;
+ TABLE_LIST *auxi;
+ uint table_count=0;
+ multi_delete *result;
+
+ /* sql_yacc guarantees that tables and aux_tables are not zero */
+ if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) ||
+ check_table_access(thd,SELECT_ACL, tables) ||
+ check_table_access(thd,DELETE_ACL, aux_tables))
+ goto error;
+ if ((thd->options & OPTION_SAFE_UPDATES) && !select_lex->where)
+ {
+ send_error(&thd->net,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE);
+ goto error;
+ }
+ for (auxi=(TABLE_LIST*) aux_tables ; auxi ; auxi=auxi->next)
+ {
+ table_count++;
+ /* All tables in aux_tables must be found in FROM PART */
+ TABLE_LIST *walk;
+ for (walk=(TABLE_LIST*) tables ; walk ; walk=walk->next)
+ {
+ if (!strcmp(auxi->real_name,walk->real_name) &&
+ !strcmp(walk->db,auxi->db))
+ break;
+ }
+ if (!walk)
+ {
+ net_printf(&thd->net,ER_NONUNIQ_TABLE,auxi->real_name);
+ goto error;
+ }
+ auxi->lock_type=walk->lock_type=TL_WRITE;
+ auxi->table= (TABLE *) walk; // Remember corresponding table
+ }
+ tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege);
+ if (add_item_to_list(new Item_null()))
+ {
+ res= -1;
+ break;
+ }
+ thd->proc_info="init";
+ if ((res=open_and_lock_tables(thd,tables)))
+ break;
+ /* Fix tables-to-be-deleted-from list to point at opened tables */
+ for (auxi=(TABLE_LIST*) aux_tables ; auxi ; auxi=auxi->next)
+ auxi->table= ((TABLE_LIST*) auxi->table)->table;
+ if ((result=new multi_delete(thd,aux_tables,lex->lock_option,
+ table_count)) && ! thd->fatal_error)
+ {
+ res=mysql_select(thd,tables,select_lex->item_list,
+ select_lex->where,select_lex->ftfunc_list,
+ (ORDER *)NULL,(ORDER *)NULL,(Item *)NULL,
+ (ORDER *)NULL,
+ select_lex->options | thd->options |
+ SELECT_NO_JOIN_CACHE,
+ result);
+ }
+ else
+ res= -1; // Error is not sent
+ delete result;
+ close_thread_tables(thd);
+ break;
+ }
+ case SQLCOM_UNION_SELECT:
+ {
+ SQL_LIST *total=(SQL_LIST *) thd->calloc(sizeof(SQL_LIST));
+ if (select_lex->options & SELECT_DESCRIBE)
+ lex->exchange=0;
+ if ((res = link_in_large_list_and_check_acl(thd,lex,total)) == -1)
+ {
+ res=0;
+ break;
+ }
+ if (res &&
+ (res=check_access(thd,
+ lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL,
+ any_db)))
+ {
+ res=0;
+ break;
+ }
+ if (!(res=open_and_lock_tables(thd,(TABLE_LIST *)total->first)))
+ {
+ res=mysql_union(thd,lex, select_lex->select_number+1);
+ if (res==-1) res=0;
+ }
break;
}
case SQLCOM_DROP_TABLE:
@@ -1657,7 +1777,7 @@ mysql_execute_command(void)
break;
case SQLCOM_SHOW_DATABASES:
#if defined(DONT_ALLOW_SHOW_COMMANDS)
- send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */
+ send_error(&thd->net,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */
DBUG_VOID_RETURN;
#else
if ((specialflag & SPECIAL_SKIP_SHOW_DB) &&
@@ -1698,7 +1818,7 @@ mysql_execute_command(void)
DBUG_VOID_RETURN;
#else
{
- char *db=lex->db ? lex->db : thd->db;
+ char *db=select_lex->db ? select_lex->db : thd->db;
if (!db)
{
send_error(&thd->net,ER_NO_DB_ERROR); /* purecov: inspected */
@@ -1713,7 +1833,7 @@ mysql_execute_command(void)
if (check_access(thd,SELECT_ACL,db,&thd->col_access))
goto error; /* purecov: inspected */
/* grant is checked in mysqld_show_tables */
- if (lex->options & SELECT_DESCRIBE)
+ if (select_lex->options & SELECT_DESCRIBE)
res= mysqld_extend_show_tables(thd,db,
(lex->wild ? lex->wild->ptr() : NullS));
else
@@ -1778,7 +1898,7 @@ mysql_execute_command(void)
}
#endif
case SQLCOM_CHANGE_DB:
- mysql_change_db(thd,lex->db);
+ mysql_change_db(thd,select_lex->db);
break;
case SQLCOM_LOAD:
{
@@ -1802,10 +1922,10 @@ mysql_execute_command(void)
case SQLCOM_SET_OPTION:
{
uint org_options=thd->options;
- thd->options=lex->options;
+ thd->options=select_lex->options;
thd->update_lock_default= ((thd->options & OPTION_LOW_PRIORITY_UPDATES) ?
TL_WRITE_LOW_PRIORITY : TL_WRITE);
- thd->default_select_limit=lex->select_limit;
+ thd->default_select_limit=select_lex->select_limit;
thd->tx_isolation=lex->tx_isolation;
if (thd->gemini_spin_retries != lex->gemini_spin_retries)
{
@@ -1816,7 +1936,7 @@ mysql_execute_command(void)
thd->options,(long) thd->default_select_limit));
/* Check if auto_commit mode changed */
- if ((org_options ^ lex->options) & OPTION_NOT_AUTO_COMMIT)
+ if ((org_options ^ select_lex->options) & OPTION_NOT_AUTO_COMMIT)
{
if ((org_options & OPTION_NOT_AUTO_COMMIT))
{
@@ -1864,6 +1984,8 @@ mysql_execute_command(void)
}
if (check_db_used(thd,tables) || end_active_trans(thd))
goto error;
+ if (grant_option && check_grant(thd,SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL,tables))
+ goto error;
thd->in_lock_tables=1;
if (!(res=open_and_lock_tables(thd,tables)))
{
@@ -1924,7 +2046,7 @@ mysql_execute_command(void)
if (tables && !tables->db)
tables->db=thd->db;
if (check_access(thd, lex->grant | lex->grant_tot_col | GRANT_ACL,
- tables && tables->db ? tables->db : lex->db,
+ tables && tables->db ? tables->db : select_lex->db,
tables ? &tables->grant.privilege : 0,
tables ? 0 : 1))
goto error;
@@ -1976,7 +2098,7 @@ mysql_execute_command(void)
res=1;
}
else
- res = mysql_grant(thd, lex->db, lex->users_list, lex->grant,
+ res = mysql_grant(thd, select_lex->db, lex->users_list, lex->grant,
lex->sql_command == SQLCOM_REVOKE);
if(!res)
{
@@ -2024,8 +2146,8 @@ mysql_execute_command(void)
if (check_db_used(thd,tables) || check_table_access(thd,SELECT_ACL, tables))
goto error;
res = mysql_ha_read(thd, tables, lex->ha_read_mode, lex->backup_dir,
- lex->insert_list, lex->ha_rkey_mode, lex->where,
- lex->select_limit, lex->offset_limit);
+ lex->insert_list, lex->ha_rkey_mode, select_lex->where,
+ select_lex->select_limit, select_lex->offset_limit);
break;
case SQLCOM_BEGIN:
@@ -2286,13 +2408,13 @@ static void
mysql_init_query(THD *thd)
{
DBUG_ENTER("mysql_init_query");
- thd->lex.item_list.empty();
+ thd->lex.select_lex.item_list.empty();
thd->lex.value_list.empty();
- thd->lex.table_list.elements=0;
+ thd->lex.select_lex.table_list.elements=0;
thd->free_list=0;
-
- thd->lex.table_list.first=0;
- thd->lex.table_list.next= (byte**) &thd->lex.table_list.first;
+ thd->lex.select = &thd->lex.select_lex;
+ thd->lex.select_lex.table_list.first=0;
+ thd->lex.select_lex.table_list.next= (byte**) &thd->lex.select_lex.table_list.first;
thd->fatal_error=0; // Safety
thd->last_insert_id_used=thd->query_start_used=thd->insert_id_used=0;
thd->sent_row_count=thd->examined_row_count=0;
@@ -2302,19 +2424,35 @@ mysql_init_query(THD *thd)
void
mysql_init_select(LEX *lex)
{
- lex->where=lex->having=0;
- lex->select_limit=current_thd->default_select_limit;
- lex->offset_limit=0L;
- lex->options=0;
- lex->exchange = 0;
+ SELECT_LEX *select_lex = lex->select;
+ select_lex->where=select_lex->having=0;
+ select_lex->select_limit=current_thd->default_select_limit;
+ select_lex->offset_limit=0L;
+ select_lex->options=0; select_lex->linkage=UNSPECIFIED_TYPE;
+ select_lex->select_number = 0; lex->exchange = 0;
lex->proc_list.first=0;
- lex->order_list.elements=lex->group_list.elements=0;
- lex->order_list.first=0;
- lex->order_list.next= (byte**) &lex->order_list.first;
- lex->group_list.first=0;
- lex->group_list.next= (byte**) &lex->group_list.first;
+ select_lex->order_list.elements=select_lex->group_list.elements=0;
+ select_lex->order_list.first=0;
+ select_lex->order_list.next= (byte**) &select_lex->order_list.first;
+ select_lex->group_list.first=0;
+ select_lex->group_list.next= (byte**) &select_lex->group_list.first;
+ select_lex->next = (SELECT_LEX *)NULL;
}
+void
+mysql_new_select(LEX *lex)
+{
+ uint select_no=lex->select->select_number;
+ SELECT_LEX *select_lex = (SELECT_LEX *)sql_calloc(sizeof(SELECT_LEX));
+ lex->select->next=select_lex;
+ lex->select=select_lex; lex->select->select_number = ++select_no;
+ lex->select->item_list = lex->select_lex.item_list;
+ lex->select->item_list.empty();
+ lex->select->table_list = lex->select_lex.table_list;
+ lex->select->table_list.elements=0;
+ lex->select->table_list.first=0;
+ lex->select->table_list.next= (byte**) &lex->select->table_list.first;
+}
void
mysql_parse(THD *thd,char *inBuf,uint length)
@@ -2732,7 +2870,7 @@ TABLE_LIST *add_table_to_list(Table_ident *table, LEX_STRING *alias,
if (flags != TL_IGNORE)
{
- for (TABLE_LIST *tables=(TABLE_LIST*) thd->lex.table_list.first ; tables ;
+ for (TABLE_LIST *tables=(TABLE_LIST*) thd->lex.select->table_list.first ; tables ;
tables=tables->next)
{
if (!strcmp(alias_str,tables->name) &&
@@ -2744,10 +2882,46 @@ TABLE_LIST *add_table_to_list(Table_ident *table, LEX_STRING *alias,
}
}
}
- link_in_list(&thd->lex.table_list,(byte*) ptr,(byte**) &ptr->next);
+ link_in_list(&thd->lex.select->table_list,(byte*) ptr,(byte**) &ptr->next);
DBUG_RETURN(ptr);
}
+static int link_in_large_list_and_check_acl(THD *thd,LEX *lex,SQL_LIST *tables)
+{
+ SELECT_LEX *sl; const char *current_db=thd->db ? thd->db : "";
+ for (sl=&lex->select_lex;sl;sl=sl->next)
+ {
+ if ((lex->sql_command == SQLCOM_UNION_SELECT) && (sl->order_list.first != (byte *)NULL) && (sl->next != (st_select_lex *)NULL))
+ {
+ net_printf(&thd->net,ER_ILLEGAL_GRANT_FOR_TABLE); // correct error message will come here; only last SELECt can have ORDER BY
+ return -1;
+ }
+ if (sl->table_list.first == (byte *)NULL) continue;
+ TABLE_LIST *cursor,*aux=(TABLE_LIST*) sl->table_list.first;
+ if (aux)
+ {
+ if (check_table_access(thd, lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL , aux))
+ return -1;
+ for (;aux;aux=aux->next)
+ {
+ if (!aux->db)
+ aux->db=(char *)current_db;
+ for (cursor=(TABLE_LIST *)tables->first;cursor;cursor=cursor->next)
+ if (!strcmp(cursor->db,aux->db) && (!strcmp(cursor->real_name,aux->real_name)))
+ break;
+ if (!cursor || !tables->first)
+ {
+ aux->lock_type= lex->lock_option;
+ if (!tables->next)
+ tables->next= (byte**) &tables->first;
+ link_in_list(tables,(byte*)aux,(byte**) &aux->next);
+ }
+ }
+ }
+ }
+ return (tables->first) ? 0 : 1;
+}
+
void add_join_on(TABLE_LIST *b,Item *expr)
{
if (!b->on_expr)
@@ -2864,3 +3038,29 @@ static void refresh_status(void)
pthread_mutex_unlock(&LOCK_status);
pthread_mutex_unlock(&THR_LOCK_keycache);
}
+
+
+ /* If pointer is not a null pointer, append filename to it */
+
+static bool append_file_to_dir(char **filename_ptr, char *table_name)
+{
+ char buff[FN_REFLEN],*ptr;
+ if (!*filename_ptr)
+ return 0; // nothing to do
+
+ /* Check that the filename is not too long and it's a hard path */
+ if (strlen(*filename_ptr)+strlen(table_name) >= FN_REFLEN-1 ||
+ !test_if_hard_path(*filename_ptr))
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), *filename_ptr);
+ return 1;
+ }
+ /* Fix is using unix filename format on dos */
+ strmov(buff,*filename_ptr);
+ convert_dirname(buff);
+ if (!(ptr=sql_alloc(strlen(buff)+strlen(table_name)+1)))
+ return 1; // End of memory
+ *filename_ptr=ptr;
+ strxmov(ptr,buff,table_name,NullS);
+ return 0;
+}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index e5039d118be..6153c4bd0f9 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -21,17 +21,54 @@
#include "sql_repl.h"
#include "sql_acl.h"
#include "log_event.h"
+#include "mini_client.h"
#include <thr_alarm.h>
#include <my_dir.h>
+#define SLAVE_LIST_CHUNK 128
+
extern const char* any_db;
extern pthread_handler_decl(handle_slave,arg);
+HASH slave_list;
+
+#ifndef DBUG_OFF
+int max_binlog_dump_events = 0; // unlimited
+bool opt_sporadic_binlog_dump_fail = 0;
+static int binlog_dump_count = 0;
+#endif
+
+static uint32* slave_list_key(SLAVE_INFO* si, uint* len,
+ my_bool not_used __attribute__((unused)))
+{
+ *len = 4;
+ return &si->server_id;
+}
+
+static void slave_info_free(void *s)
+{
+ my_free((byte*)s, MYF(MY_WME));
+}
+
+void init_slave_list()
+{
+ hash_init(&slave_list, SLAVE_LIST_CHUNK, 0, 0,
+ (hash_get_key) slave_list_key, slave_info_free, 0);
+ pthread_mutex_init(&LOCK_slave_list, MY_MUTEX_INIT_FAST);
+}
+
+void end_slave_list()
+{
+ pthread_mutex_lock(&LOCK_slave_list);
+ hash_free(&slave_list);
+ pthread_mutex_unlock(&LOCK_slave_list);
+ pthread_mutex_destroy(&LOCK_slave_list);
+}
static int fake_rotate_event(NET* net, String* packet, char* log_file_name,
const char**errmsg)
{
- char header[LOG_EVENT_HEADER_LEN];
+ char header[LOG_EVENT_HEADER_LEN], buf[ROTATE_HEADER_LEN];
memset(header, 0, 4); // when does not matter
header[EVENT_TYPE_OFFSET] = ROTATE_EVENT;
char* p = strrchr(log_file_name, FN_LIBCHAR);
@@ -42,10 +79,14 @@ static int fake_rotate_event(NET* net, String* packet, char* log_file_name,
p = log_file_name;
uint ident_len = (uint) strlen(p);
- ulong event_len = ident_len + sizeof(header);
- int4store(header + EVENT_TYPE_OFFSET + 1, server_id);
+ ulong event_len = ident_len + ROTATE_EVENT_OVERHEAD;
+ int4store(header + SERVER_ID_OFFSET, server_id);
int4store(header + EVENT_LEN_OFFSET, event_len);
+ int2store(header + FLAGS_OFFSET, 0);
+ int4store(header + LOG_SEQ_OFFSET, 0);
packet->append(header, sizeof(header));
+ int8store(buf, 4); // tell slave to skip magic number
+ packet->append(buf, ROTATE_HEADER_LEN);
packet->append(p,ident_len);
if(my_net_write(net, (char*)packet->ptr(), packet->length()))
{
@@ -55,6 +96,55 @@ static int fake_rotate_event(NET* net, String* packet, char* log_file_name,
return 0;
}
+int register_slave(THD* thd, uchar* packet, uint packet_length)
+{
+ uint len;
+ SLAVE_INFO* si, *old_si;
+ int res = 1;
+ uchar* p = packet, *p_end = packet + packet_length;
+
+ if(check_access(thd, FILE_ACL, any_db))
+ return 1;
+
+ if(!(si = (SLAVE_INFO*)my_malloc(sizeof(SLAVE_INFO), MYF(MY_WME))))
+ goto err;
+
+ si->server_id = uint4korr(p);
+ p += 4;
+ len = (uint)*p++;
+ if(p + len > p_end || len > sizeof(si->host) - 1)
+ goto err;
+ memcpy(si->host, p, len);
+ si->host[len] = 0;
+ p += len;
+ len = *p++;
+ if(p + len > p_end || len > sizeof(si->user) - 1)
+ goto err;
+ memcpy(si->user, p, len);
+ si->user[len] = 0;
+ p += len;
+ len = *p++;
+ if(p + len > p_end || len > sizeof(si->password) - 1)
+ goto err;
+ memcpy(si->password, p, len);
+ si->password[len] = 0;
+ p += len;
+ si->port = uint2korr(p);
+ pthread_mutex_lock(&LOCK_slave_list);
+
+ if((old_si = (SLAVE_INFO*)hash_search(&slave_list,
+ (byte*)&si->server_id, 4)))
+ hash_delete(&slave_list, (byte*)old_si);
+
+ res = hash_insert(&slave_list, (byte*)si);
+ pthread_mutex_unlock(&LOCK_slave_list);
+ return res;
+err:
+ if(si)
+ my_free((byte*)si, MYF(MY_WME));
+ return res;
+}
+
static int send_file(THD *thd)
{
@@ -265,8 +355,19 @@ void mysql_binlog_send(THD* thd, char* log_ident, ulong pos, ushort flags)
int error;
const char *errmsg = "Unknown error";
NET* net = &thd->net;
+#ifndef DBUG_OFF
+ int left_events = max_binlog_dump_events;
+#endif
DBUG_ENTER("mysql_binlog_send");
+#ifndef DBUG_OFF
+ if (opt_sporadic_binlog_dump_fail && (binlog_dump_count++ % 2))
+ {
+ errmsg = "Master failed COM_BINLOG_DUMP to test if slave can recover";
+ goto err;
+ }
+#endif
+
bzero((char*) &log,sizeof(log));
if(!mysql_bin_log.is_open())
@@ -297,10 +398,10 @@ void mysql_binlog_send(THD* thd, char* log_ident, ulong pos, ushort flags)
if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0)
goto err;
- if(pos < 4)
+ if (pos < 4)
{
- errmsg = "Congratulations! You have hit the magic number and can win \
-sweepstakes if you report the bug";
+ errmsg = "Client requested master to start repliction from \
+impossible position";
goto err;
}
@@ -326,6 +427,14 @@ sweepstakes if you report the bug";
while (!(error = Log_event::read_log_event(&log, packet, log_lock)))
{
+#ifndef DBUG_OFF
+ if(max_binlog_dump_events && !left_events--)
+ {
+ net_flush(net);
+ errmsg = "Debugging binlog dump abort";
+ goto err;
+ }
+#endif
if (my_net_write(net, (char*)packet->ptr(), packet->length()) )
{
errmsg = "Failed on my_net_write()";
@@ -400,6 +509,15 @@ sweepstakes if you report the bug";
bool read_packet = 0, fatal_error = 0;
+#ifndef DBUG_OFF
+ if(max_binlog_dump_events && !left_events--)
+ {
+ net_flush(net);
+ errmsg = "Debugging binlog dump abort";
+ goto err;
+ }
+#endif
+
// no one will update the log while we are reading
// now, but we'll be quick and just read one record
pthread_mutex_lock(log_lock);
@@ -614,7 +732,7 @@ void reset_slave()
pthread_mutex_unlock(&LOCK_slave);
end_master_info(&glob_mi);
- fn_format(fname, master_info_file, mysql_data_home, "", 4+16+32);
+ fn_format(fname, master_info_file, mysql_data_home, "", 4+32);
if(my_stat(fname, &stat_area, MYF(0)))
if(my_delete(fname, MYF(MY_WME)))
return;
@@ -685,14 +803,18 @@ int change_master(THD* thd)
// if we change host or port, we must reset the postion
glob_mi.log_file_name[0] = 0;
glob_mi.pos = 4; // skip magic number
+ glob_mi.pending = 0;
}
if(lex_mi->log_file_name)
strmake(glob_mi.log_file_name, lex_mi->log_file_name,
sizeof(glob_mi.log_file_name));
if(lex_mi->pos)
+ {
glob_mi.pos = lex_mi->pos;
-
+ glob_mi.pending = 0;
+ }
+
if(lex_mi->host)
{
strmake(glob_mi.host, lex_mi->host, sizeof(glob_mi.host));
@@ -741,6 +863,149 @@ void reset_master()
}
+
+int show_binlog_events(THD* thd)
+{
+ DBUG_ENTER("show_binlog_events");
+ List<Item> field_list;
+ const char* errmsg = 0;
+ IO_CACHE log;
+ File file = -1;
+
+ Log_event::init_show_field_list(&field_list);
+ if (send_fields(thd, field_list, 1))
+ DBUG_RETURN(-1);
+
+ if (mysql_bin_log.is_open())
+ {
+ LOG_INFO linfo;
+ char search_file_name[FN_REFLEN];
+ LEX_MASTER_INFO* lex_mi = &thd->lex.mi;
+ uint event_count, limit_start, limit_end;
+ const char* log_file_name = lex_mi->log_file_name;
+ Log_event* ev;
+ ulong pos = (ulong) lex_mi->pos;
+
+ limit_start = thd->lex.select->offset_limit;
+ limit_end = thd->lex.select->select_limit + limit_start;
+
+ if (log_file_name)
+ mysql_bin_log.make_log_name(search_file_name, log_file_name);
+ else
+ search_file_name[0] = 0;
+
+ linfo.index_file_offset = 0;
+ thd->current_linfo = &linfo;
+
+ if (mysql_bin_log.find_first_log(&linfo, search_file_name))
+ {
+ errmsg = "Could not find target log";
+ goto err;
+ }
+
+ if ((file=open_binlog(&log, linfo.log_file_name, &errmsg)) < 0)
+ goto err;
+
+ if (pos < 4)
+ {
+ errmsg = "Invalid log position";
+ goto err;
+ }
+
+ pthread_mutex_lock(mysql_bin_log.get_log_lock());
+
+ my_b_seek(&log, pos);
+
+ for (event_count = 0;
+ (ev = Log_event::read_log_event(&log, 0));)
+ {
+ if (event_count >= limit_start &&
+ ev->net_send(thd, linfo.log_file_name, pos))
+ {
+ errmsg = "Net error";
+ delete ev;
+ pthread_mutex_unlock(mysql_bin_log.get_log_lock());
+ goto err;
+ }
+
+ pos = my_b_tell(&log);
+ delete ev;
+
+ if (++event_count >= limit_end)
+ break;
+ }
+
+ if (event_count < limit_end && log.error)
+ {
+ errmsg = "Wrong offset or I/O error";
+ goto err;
+ }
+
+ pthread_mutex_unlock(mysql_bin_log.get_log_lock());
+ }
+
+err:
+ if (file >= 0)
+ {
+ end_io_cache(&log);
+ (void) my_close(file, MYF(MY_WME));
+ }
+
+ if (errmsg)
+ {
+ net_printf(&thd->net, ER_SHOW_BINLOG_EVENTS, errmsg);
+ DBUG_RETURN(1);
+ }
+
+ send_eof(&thd->net);
+ DBUG_RETURN(0);
+}
+
+
+int show_slave_hosts(THD* thd)
+{
+ DBUG_ENTER("show_slave_hosts");
+ List<Item> field_list;
+ field_list.push_back(new Item_empty_string("Server_id", 20));
+ field_list.push_back(new Item_empty_string("Host", 20));
+ if(opt_show_slave_auth_info)
+ {
+ field_list.push_back(new Item_empty_string("User",20));
+ field_list.push_back(new Item_empty_string("Password",20));
+ }
+ field_list.push_back(new Item_empty_string("Port",20));
+
+ if(send_fields(thd, field_list, 1))
+ DBUG_RETURN(-1);
+ String* packet = &thd->packet;
+ uint i;
+ NET* net = &thd->net;
+
+ pthread_mutex_lock(&LOCK_slave_list);
+
+ for(i = 0; i < slave_list.records; ++i)
+ {
+ SLAVE_INFO* si = (SLAVE_INFO*)hash_element(&slave_list, i);
+ packet->length(0);
+ net_store_data(packet, si->server_id);
+ net_store_data(packet, si->host);
+ if(opt_show_slave_auth_info)
+ {
+ net_store_data(packet, si->user);
+ net_store_data(packet, si->password);
+ }
+ net_store_data(packet, (uint)si->port);
+ if(my_net_write(net, (char*)packet->ptr(), packet->length()))
+ {
+ pthread_mutex_unlock(&LOCK_slave_list);
+ DBUG_RETURN(-1);
+ }
+ }
+ pthread_mutex_unlock(&LOCK_slave_list);
+ send_eof(net);
+ DBUG_RETURN(0);
+}
+
int show_binlog_info(THD* thd)
{
DBUG_ENTER("show_binlog_info");
@@ -845,5 +1110,221 @@ err:
return 1;
}
+int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi)
+{
+ if(!mc_mysql_connect(mysql, mi->host, mi->user, mi->password, 0,
+ mi->port, 0, 0))
+ {
+ sql_print_error("Connection to master failed: %s",
+ mc_mysql_error(mysql));
+ return 1;
+ }
+ return 0;
+}
+
+static inline void cleanup_mysql_results(MYSQL_RES* db_res,
+ MYSQL_RES** cur, MYSQL_RES** start)
+{
+ for( ; cur >= start; --cur)
+ if(*cur)
+ mc_mysql_free_result(*cur);
+ mc_mysql_free_result(db_res);
+}
+
+static inline int fetch_db_tables(THD* thd, MYSQL* mysql, const char* db,
+ MYSQL_RES* table_res)
+{
+ MYSQL_ROW row;
+
+ for( row = mc_mysql_fetch_row(table_res); row;
+ row = mc_mysql_fetch_row(table_res))
+ {
+ TABLE_LIST table;
+ const char* table_name = row[0];
+ int error;
+ if(table_rules_on)
+ {
+ table.next = 0;
+ table.db = (char*)db;
+ table.real_name = (char*)table_name;
+ table.updating = 1;
+ if(!tables_ok(thd, &table))
+ continue;
+ }
+
+ if((error = fetch_nx_table(thd, db, table_name, &glob_mi, mysql)))
+ return error;
+ }
+
+ return 0;
+}
+
+int load_master_data(THD* thd)
+{
+ MYSQL mysql;
+ MYSQL_RES* master_status_res = 0;
+ bool slave_was_running = 0;
+ int error = 0;
+
+ mc_mysql_init(&mysql);
+
+ pthread_mutex_lock(&LOCK_slave);
+ // we do not want anyone messing with the slave at all for the entire
+ // duration of the data load;
+
+ // first, kill the slave
+ if((slave_was_running = slave_running))
+ {
+ abort_slave = 1;
+ thr_alarm_kill(slave_real_id);
+ thd->proc_info = "waiting for slave to die";
+ while(slave_running)
+ pthread_cond_wait(&COND_slave_stopped, &LOCK_slave); // wait until done
+ }
+
+
+ if(connect_to_master(thd, &mysql, &glob_mi))
+ {
+ net_printf(&thd->net, error = ER_CONNECT_TO_MASTER,
+ mc_mysql_error(&mysql));
+ goto err;
+ }
+
+ // now that we are connected, get all database and tables in each
+ {
+ MYSQL_RES *db_res, **table_res, **table_res_end, **cur_table_res;
+ uint num_dbs;
+ MYSQL_ROW row;
+
+ if(mc_mysql_query(&mysql, "show databases", 0) ||
+ !(db_res = mc_mysql_store_result(&mysql)))
+ {
+ net_printf(&thd->net, error = ER_QUERY_ON_MASTER,
+ mc_mysql_error(&mysql));
+ goto err;
+ }
+
+ if(!(num_dbs = mc_mysql_num_rows(db_res)))
+ goto err;
+ // in theory, the master could have no databases at all
+ // and run with skip-grant
+
+ if(!(table_res = (MYSQL_RES**)thd->alloc(num_dbs * sizeof(MYSQL_RES*))))
+ {
+ net_printf(&thd->net, error = ER_OUTOFMEMORY);
+ goto err;
+ }
+
+ // this is a temporary solution until we have online backup
+ // capabilities - to be replaced once online backup is working
+ // we wait to issue FLUSH TABLES WITH READ LOCK for as long as we
+ // can to minimize the lock time
+ if(mc_mysql_query(&mysql, "FLUSH TABLES WITH READ LOCK", 0)
+ || mc_mysql_query(&mysql, "SHOW MASTER STATUS",0) ||
+ !(master_status_res = mc_mysql_store_result(&mysql)))
+ {
+ net_printf(&thd->net, error = ER_QUERY_ON_MASTER,
+ mc_mysql_error(&mysql));
+ goto err;
+ }
+
+ // go through every table in every database, and if the replication
+ // rules allow replicating it, get it
+
+ table_res_end = table_res + num_dbs;
+
+ for(cur_table_res = table_res; cur_table_res < table_res_end;
+ ++cur_table_res)
+ {
+ MYSQL_ROW row = mc_mysql_fetch_row(db_res);
+ // since we know how many rows we have, this can never be NULL
+
+ char* db = row[0];
+ int drop_error = 0;
+
+ // do not replicate databases excluded by rules
+ // also skip mysql database - in most cases the user will
+ // mess up and not exclude mysql database with the rules when
+ // he actually means to - in this case, he is up for a surprise if
+ // his priv tables get dropped and downloaded from master
+ // TO DO - add special option, not enabled
+ // by default, to allow inclusion of mysql database into load
+ // data from master
+ if(!db_ok(db, replicate_do_db, replicate_ignore_db) ||
+ !strcmp(db,"mysql"))
+ {
+ *cur_table_res = 0;
+ continue;
+ }
+
+ if((drop_error = mysql_rm_db(0, db, 1)) ||
+ mysql_create_db(0, db, 0))
+ {
+ error = (drop_error) ? ER_DB_DROP_DELETE : ER_CANT_CREATE_DB;
+ net_printf(&thd->net, error, db, my_error);
+ cleanup_mysql_results(db_res, cur_table_res - 1, table_res);
+ goto err;
+ }
+
+ if(mc_mysql_select_db(&mysql, db) ||
+ mc_mysql_query(&mysql, "show tables", 0) ||
+ !(*cur_table_res = mc_mysql_store_result(&mysql)))
+ {
+ net_printf(&thd->net, error = ER_QUERY_ON_MASTER,
+ mc_mysql_error(&mysql));
+ cleanup_mysql_results(db_res, cur_table_res - 1, table_res);
+ goto err;
+ }
+
+ if((error = fetch_db_tables(thd, &mysql, db, *cur_table_res)))
+ {
+ // we do not report the error - fetch_db_tables handles it
+ cleanup_mysql_results(db_res, cur_table_res, table_res);
+ goto err;
+ }
+ }
+
+ cleanup_mysql_results(db_res, cur_table_res - 1, table_res);
+
+ // adjust position in the master
+ if(master_status_res)
+ {
+ MYSQL_ROW row = mc_mysql_fetch_row(master_status_res);
+
+ // we need this check because the master may not be running with
+ // log-bin, but it will still allow us to do all the steps
+ // of LOAD DATA FROM MASTER - no reason to forbid it, really,
+ // although it does not make much sense for the user to do it
+ if(row[0] && row[1])
+ {
+ strmake(glob_mi.log_file_name, row[0], sizeof(glob_mi.log_file_name));
+ glob_mi.pos = atoi(row[1]); // atoi() is ok, since offset is <= 1GB
+ if(glob_mi.pos < 4)
+ glob_mi.pos = 4; // don't hit the magic number
+ glob_mi.pending = 0;
+ flush_master_info(&glob_mi);
+ }
+
+ mc_mysql_free_result(master_status_res);
+ }
+
+ if(mc_mysql_query(&mysql, "UNLOCK TABLES", 0))
+ {
+ net_printf(&thd->net, error = ER_QUERY_ON_MASTER,
+ mc_mysql_error(&mysql));
+ goto err;
+ }
+ }
+err:
+ pthread_mutex_unlock(&LOCK_slave);
+ if(slave_was_running)
+ start_slave(0, 0);
+ mc_mysql_close(&mysql); // safe to call since we always do mc_mysql_init()
+ if(!error)
+ send_ok(&thd->net);
+
+ return error;
+}
+
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 68f2b4ba6c4..a988658ed68 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -3,20 +3,43 @@
#include "slave.h"
+typedef struct st_slave_info
+{
+ uint32 server_id;
+ char host[HOSTNAME_LENGTH+1];
+ char user[USERNAME_LENGTH+1];
+ char password[HASH_PASSWORD_LENGTH+1];
+ uint16 port;
+} SLAVE_INFO;
+
+extern bool opt_show_slave_auth_info;
+extern HASH slave_list;
extern char* master_host;
extern my_string opt_bin_logname, master_info_file;
extern uint32 server_id;
extern bool server_id_supplied;
extern I_List<i_string> binlog_do_db, binlog_ignore_db;
+#ifndef DBUG_OFF
+extern int max_binlog_dump_events;
+extern bool opt_sporadic_binlog_dump_fail;
+#endif
+
File open_binlog(IO_CACHE *log, const char *log_file_name,
const char **errmsg);
int start_slave(THD* thd = 0, bool net_report = 1);
int stop_slave(THD* thd = 0, bool net_report = 1);
+int load_master_data(THD* thd);
+int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi);
int change_master(THD* thd);
+int show_slave_hosts(THD* thd);
+int show_binlog_events(THD* thd);
void reset_slave();
void reset_master();
+void init_slave_list();
+void end_slave_list();
+int register_slave(THD* thd, uchar* packet, uint packet_length);
int purge_master_logs(THD* thd, const char* to_log);
bool log_in_use(const char* log_name);
void adjust_linfo_offsets(my_off_t purge_offset);
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 90b3c6eefaf..eff200e9bdf 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -385,6 +385,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
thd->fatal_error)
goto err;
thd->proc_info="preparing";
+ result->initialize_tables(&join);
if ((tmp=join_read_const_tables(&join)) > 0)
goto err;
if (tmp && !(select_options & SELECT_DESCRIBE))
@@ -403,7 +404,22 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
goto err; /* purecov: inspected */
}
if (join.const_tables && !thd->locked_tables)
+ {
+ TABLE **table, **end;
+ for (table=join.table, end=table + join.const_tables ;
+ table != end;
+ table++)
+ {
+ /* BDB tables require that we call index_end() before doing an unlock */
+ if ((*table)->key_read)
+ {
+ (*table)->key_read=0;
+ (*table)->file->extra(HA_EXTRA_NO_KEYREAD);
+ }
+ (*table)->file->index_end();
+ }
mysql_unlock_some_tables(thd, join.table,join.const_tables);
+ }
if (!conds && join.outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
@@ -484,8 +500,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
(group && order) ||
test(select_options & OPTION_BUFFER_RESULT)));
- make_join_readinfo(&join, (select_options & SELECT_DESCRIBE) |
- (ftfuncs.elements ? 0 : SELECT_USE_CACHE)); // No cache for MATCH
+ // No cache for MATCH
+ make_join_readinfo(&join,
+ (select_options & (SELECT_DESCRIBE |
+ SELECT_NO_JOIN_CACHE)) |
+ (ftfuncs.elements ? SELECT_NO_JOIN_CACHE : 0));
/* Need to tell Innobase that to play it safe, it should fetch all
columns of the tables: this is because MySQL
@@ -2465,7 +2484,7 @@ make_join_readinfo(JOIN *join,uint options)
** if previous table use cache
*/
table->status=STATUS_NO_RECORD;
- if (i != join->const_tables && (options & SELECT_USE_CACHE) &&
+ if (i != join->const_tables && !(options & SELECT_NO_JOIN_CACHE) &&
tab->use_quick != 2 && !tab->on_expr)
{
if ((options & SELECT_DESCRIBE) ||
@@ -2478,7 +2497,7 @@ make_join_readinfo(JOIN *join,uint options)
/* These init changes read_record */
if (tab->use_quick == 2)
{
- join->thd->lex.options|=QUERY_NO_GOOD_INDEX_USED;
+ join->thd->lex.select_lex.options|=QUERY_NO_GOOD_INDEX_USED;
tab->read_first_record= join_init_quick_read_record;
statistic_increment(select_range_check_count, &LOCK_status);
}
@@ -2493,7 +2512,7 @@ make_join_readinfo(JOIN *join,uint options)
}
else
{
- join->thd->lex.options|=QUERY_NO_INDEX_USED;
+ join->thd->lex.select_lex.options|=QUERY_NO_INDEX_USED;
statistic_increment(select_scan_count, &LOCK_status);
}
}
@@ -2505,7 +2524,7 @@ make_join_readinfo(JOIN *join,uint options)
}
else
{
- join->thd->lex.options|=QUERY_NO_INDEX_USED;
+ join->thd->lex.select_lex.options|=QUERY_NO_INDEX_USED;
statistic_increment(select_full_join_count, &LOCK_status);
}
}
@@ -2807,7 +2826,12 @@ return_zero_rows(select_result *result,TABLE_LIST *tables,List<Item> &fields,
if (send_row)
result->send_data(fields);
if (tables) // Not from do_select()
+ {
+ /* Close open cursors */
+ for (TABLE_LIST *table=tables; table ; table=table->next)
+ table->table->file->index_end();
result->send_eof(); // Should be safe
+ }
}
DBUG_RETURN(0);
}
@@ -3920,7 +3944,7 @@ bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
thd->proc_info="converting HEAP to MyISAM";
if (create_myisam_tmp_table(&new_table,param,
- thd->lex.options | thd->options))
+ thd->lex.select_lex.options | thd->options))
goto err2;
if (open_tmp_table(&new_table))
goto err1;
@@ -5130,9 +5154,11 @@ part_of_refkey(TABLE *table,Field *field)
** Returns: 1 if key is ok.
** 0 if key can't be used
** -1 if reverse key can be used
+** used_key_parts is set to key parts used if length != 0
*****************************************************************************/
-static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx)
+static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
+ uint *used_key_parts)
{
KEY_PART_INFO *key_part,*key_part_end;
key_part=table->key_info[idx].key_part;
@@ -5164,6 +5190,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx)
reverse=flag; // Remember if reverse
key_part++;
}
+ *used_key_parts= (uint) (key_part - table->key_info[idx].key_part);
return reverse;
}
@@ -5225,10 +5252,41 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
if (ref_key >= 0)
{
+ int order_direction;
+ uint used_key_parts;
/* Check if we get the rows in requested sorted order by using the key */
if ((usable_keys & ((key_map) 1 << ref_key)) &&
- test_if_order_by_key(order,table,ref_key) == 1)
+ (order_direction = test_if_order_by_key(order,table,ref_key,
+ &used_key_parts)))
+ {
+ if (order_direction == -1)
+ {
+ if (select && select->quick)
+ {
+ // ORDER BY ref_key DESC
+ QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick,
+ used_key_parts);
+ if (!tmp || tmp->error)
+ {
+ delete tmp;
+ DBUG_RETURN(0); // Reverse sort not supported
+ }
+ select->quick=tmp;
+ DBUG_RETURN(1);
+ }
+ if (tab->ref.key_parts < used_key_parts)
+ {
+ /*
+ SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC
+ TODO:
+ Add a new traversal function to read last matching row and
+ traverse backwards.
+ */
+ DBUG_RETURN(0);
+ }
+ }
DBUG_RETURN(1); /* No need to sort */
+ }
}
else
{
@@ -5247,10 +5305,11 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
for (nr=0; keys ; keys>>=1, nr++)
{
+ uint not_used;
if (keys & 1)
{
int flag;
- if ((flag=test_if_order_by_key(order,table,nr)))
+ if ((flag=test_if_order_by_key(order, table, nr, &not_used)))
{
if (!no_changes)
{
@@ -5311,7 +5370,9 @@ create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows select_limit)
goto err;
}
}
- table->found_records=filesort(&table,sortorder,length,
+ if (table->tmp_table)
+ table->file->info(HA_STATUS_VARIABLE); // Get record count
+ table->found_records=filesort(table,sortorder,length,
select, 0L, select_limit, &examined_rows);
delete select; // filesort did select
tab->select=0;
@@ -6647,7 +6708,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
DBUG_ENTER("select_describe");
/* Don't log this into the slow query log */
- join->thd->lex.options&= ~(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED);
+ join->thd->lex.select_lex.options&= ~(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED);
field_list.push_back(new Item_empty_string("table",NAME_LEN));
field_list.push_back(new Item_empty_string("type",10));
field_list.push_back(item=new Item_empty_string("possible_keys",
@@ -6806,7 +6867,7 @@ static void describe_info(THD *thd, const char *info)
String *packet= &thd->packet;
/* Don't log this into the slow query log */
- thd->lex.options&= ~(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED);
+ thd->lex.select_lex.options&= ~(QUERY_NO_INDEX_USED | QUERY_NO_GOOD_INDEX_USED);
field_list.push_back(new Item_empty_string("Comment",80));
if (send_fields(thd,field_list,1))
return; /* purecov: inspected */
diff --git a/sql/sql_select.h b/sql/sql_select.h
index bb9bb374c76..0ec1854d641 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -138,8 +138,11 @@ class TMP_TABLE_PARAM {
}
inline void cleanup(void)
{
- delete [] copy_field;
- copy_field=0;
+ if (copy_field) /* Fix for Intel compiler */
+ {
+ delete [] copy_field;
+ copy_field=0;
+ }
}
};
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index ac89b7a2782..199d6a764e0 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -844,18 +844,22 @@ store_create_info(THD *thd, TABLE *table, String *packet)
for (uint i=0 ; i < table->keys ; i++,key_info++)
{
+ KEY_PART_INFO *key_part= key_info->key_part;
+ bool found_primary=0;
packet->append(",\n ", 4);
- KEY_PART_INFO *key_part= key_info->key_part;
- if (i == primary_key)
+ if (i == primary_key && !strcmp(key_info->name,"PRIMARY"))
+ {
+ found_primary=1;
packet->append("PRIMARY ", 8);
+ }
else if (key_info->flags & HA_NOSAME)
packet->append("UNIQUE ", 7);
else if (key_info->flags & HA_FULLTEXT)
packet->append("FULLTEXT ", 9);
packet->append("KEY ", 4);
- if (i != primary_key)
+ if (!found_primary)
append_identifier(thd,packet,key_info->name);
packet->append(" (", 2);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 95f5d4da24d..e91a9a83e73 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -59,9 +59,9 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists)
VOID(pthread_mutex_lock(&LOCK_open));
pthread_mutex_unlock(&thd->mysys_var->mutex);
- if(global_read_lock)
+ if (global_read_lock)
{
- if(thd->global_read_lock)
+ if (thd->global_read_lock)
{
my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE,MYF(0),
tables->real_name);
@@ -221,6 +221,13 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
db_options|=HA_OPTION_PACK_RECORD;
file=get_new_handler((TABLE*) 0, create_info->db_type);
+ if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
+ (file->option_flag() & HA_NO_TEMP_TABLES))
+ {
+ my_error(ER_ILLEGAL_HA,MYF(0),table_name);
+ DBUG_RETURN(-1);
+ }
+
/* Don't pack keys in old tables if the user has requested this */
while ((sql_field=it++))
@@ -423,6 +430,13 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
column->field_name);
DBUG_RETURN(-1);
}
+ if (key->type == Key::FULLTEXT &&
+ (file->option_flag() & HA_NO_FULLTEXT_KEY))
+ {
+ my_printf_error(ER_WRONG_KEY_COLUMN, ER(ER_WRONG_KEY_COLUMN), MYF(0),
+ column->field_name);
+ DBUG_RETURN(-1);
+ }
if (f_is_blob(sql_field->pack_flag))
{
if (!(file->option_flag() & HA_BLOB_KEY))
@@ -825,13 +839,13 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table)
int lock_retcode;
pthread_mutex_lock(&LOCK_open);
- if((lock_retcode = lock_table_name(thd, table)) < 0)
+ if ((lock_retcode = lock_table_name(thd, table)) < 0)
{
pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(-1);
}
- if(lock_retcode && wait_for_locked_table_names(thd, table))
+ if (lock_retcode && wait_for_locked_table_names(thd, table))
{
unlock_table_name(thd, table);
pthread_mutex_unlock(&LOCK_open);
@@ -839,7 +853,7 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table)
}
pthread_mutex_unlock(&LOCK_open);
- if(my_copy(src_path,
+ if (my_copy(src_path,
fn_format(dst_path, dst_path,"",
reg_ext, 4),
MYF(MY_WME)))
@@ -853,7 +867,7 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table)
// generate table will try to send OK which messes up the output
// for the client
- if(generate_table(thd, table, 0))
+ if (generate_table(thd, table, 0))
{
unlock_table_name(thd, table);
thd->net.no_send_ok = save_no_send_ok;
@@ -914,7 +928,7 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables,
// now we should be able to open the partially restored table
// to finish the restore in the handler later on
- if(!(table->table = reopen_name_locked_table(thd, table)))
+ if (!(table->table = reopen_name_locked_table(thd, table)))
unlock_table_name(thd, table);
}
@@ -1091,7 +1105,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
TABLE *table,*new_table;
int error;
char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN],
- *table_name,*db;
+ *table_name,*db;
+ char index_file[FN_REFLEN], data_file[FN_REFLEN];
bool use_timestamp=0;
ha_rows copied,deleted;
ulonglong next_insert_id;
@@ -1113,10 +1128,11 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
strmov(new_name_buff,new_name);
fn_same(new_name_buff,table_name,3);
+ // Check if name changed
#ifdef FN_LOWER_CASE
- if (!my_strcasecmp(new_name_buff,table_name))// Check if name changed
+ if (!strcmp(db,new_db) && !my_strcasecmp(new_name_buff,table_name))
#else
- if (!strcmp(new_name_buff,table_name)) // Check if name changed
+ if (!strcmp(db,new_db) && !strcmp(new_name_buff,table_name))
#endif
new_name=table_name; // No. Make later check easier
else
@@ -1233,7 +1249,16 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
if (drop->type == Alter_drop::COLUMN &&
!my_strcasecmp(field->field_name, drop->name))
+ {
+ /* Reset auto_increment value if it was dropped */
+ if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER &&
+ !(create_info->used_fields & HA_CREATE_USED_AUTO))
+ {
+ create_info->auto_increment_value=0;
+ create_info->used_fields|=HA_CREATE_USED_AUTO;
+ }
break;
+ }
}
if (drop)
{
@@ -1440,6 +1465,53 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (table->tmp_table)
create_info->options|=HA_LEX_CREATE_TMP_TABLE;
+ /*
+ Handling of symlinked tables:
+ If no rename:
+ Create new data file and index file on the same disk as the
+ old data and index files.
+ Copy data.
+ Rename new data file over old data file and new index file over
+ old index file.
+ Symlinks are not changed.
+
+ If rename:
+ Create new data file and index file on the same disk as the
+ old data and index files. Create also symlinks to point at
+ the new tables.
+ Copy data.
+ At end, rename temporary tables and symlinks to temporary table
+ to final table name.
+ Remove old table and old symlinks
+
+ If rename is made to another database:
+ Create new tables in new database.
+ Copy data.
+ Remove old table and symlinks.
+ */
+
+ if (!strcmp(db, new_db)) // Ignore symlink if db changed
+ {
+ if (create_info->index_file_name)
+ {
+ /* Fix index_file_name to have 'tmp_name' as basename */
+ strmov(index_file, tmp_name);
+ create_info->index_file_name=fn_same(index_file,
+ create_info->index_file_name,
+ 1);
+ }
+ if (create_info->data_file_name)
+ {
+ /* Fix data_file_name to have 'tmp_name' as basename */
+ strmov(data_file, tmp_name);
+ create_info->data_file_name=fn_same(data_file,
+ create_info->data_file_name,
+ 1);
+ }
+ }
+ else
+ create_info->data_file_name=create_info->index_file_name=0;
+
if ((error=mysql_create_table(thd, new_db, tmp_name,
create_info,
create_list,key_list,1,1))) // no logging
@@ -1685,13 +1757,23 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (setup_order(thd, &tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length)) ||
- (from->found_records = filesort(&from, sortorder, length,
- (SQL_SELECT *) 0, 0L, HA_POS_ERROR,
+ (from->found_records = filesort(from, sortorder, length,
+ (SQL_SELECT *) 0, 0L, HA_POS_ERROR,
&examined_rows))
== HA_POS_ERROR)
goto err;
};
+ /* Turn off recovery logging since rollback of an
+ alter table is to delete the new table so there
+ is no need to log the changes to it. */
+ error = ha_recovery_logging(thd,false);
+ if (error)
+ {
+ error = 1;
+ goto err;
+ }
+
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (handle_duplicates == DUP_IGNORE ||
handle_duplicates == DUP_REPLACE)
@@ -1737,6 +1819,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (to->file->activate_all_index(thd))
error=1;
+ tmp_error = ha_recovery_logging(thd,true);
/*
Ensure that the new table is saved properly to disk so that we
can do a rename
@@ -1748,6 +1831,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
if (to->file->external_lock(thd,F_UNLCK))
error=1;
err:
+ tmp_error = ha_recovery_logging(thd,true);
free_io_cache(from);
*copied= found_count;
*deleted=delete_count;
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 3edfdd3d5ef..d20bc74ecb2 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -96,8 +96,7 @@ void print_cached_tables(void)
}
-void TEST_filesort(TABLE **table,SORT_FIELD *sortorder,uint s_length,
- ha_rows special)
+void TEST_filesort(SORT_FIELD *sortorder,uint s_length, ha_rows special)
{
char buff[256],buff2[256];
String str(buff,sizeof(buff)),out(buff2,sizeof(buff2));
diff --git a/sql/sql_unions.cc b/sql/sql_unions.cc
new file mode 100644
index 00000000000..55aca0f5b68
--- /dev/null
+++ b/sql/sql_unions.cc
@@ -0,0 +1,34 @@
+/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & Monty & Sinisa
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+/* Union of selects */
+
+#include "mysql_priv.h"
+
+/*
+ Do a union of selects
+*/
+
+
+int mysql_union(THD *thd,LEX *lex,uint no_of_selects)
+{
+ SELECT_LEX *sl;
+ for (sl=&lex->select_lex;sl;sl=sl->next)
+ {
+ }
+ return 0;
+}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index b6b22ecbc99..a6ded7cef9c 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -128,7 +128,7 @@ int mysql_update(THD *thd,
/* If running in safe sql mode, don't allow updates without keys */
if (!table->quick_keys)
{
- thd->lex.options|=QUERY_NO_INDEX_USED;
+ thd->lex.select_lex.options|=QUERY_NO_INDEX_USED;
if ((thd->options & OPTION_SAFE_UPDATES) && limit == HA_POS_ERROR)
{
delete select;
@@ -184,7 +184,7 @@ int mysql_update(THD *thd,
MYF(MY_FAE | MY_ZEROFILL));
if (setup_order(thd, &tables, fields, all_fields, order) ||
!(sortorder=make_unireg_sortorder(order, &length)) ||
- (table->found_records = filesort(&table, sortorder, length,
+ (table->found_records = filesort(table, sortorder, length,
(SQL_SELECT *) 0, 0L,
HA_POS_ERROR, &examined_rows))
== HA_POS_ERROR)
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 172fb0830fe..c013ebe1c8c 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -21,6 +21,7 @@
#define YYINITDEPTH 100
#define YYMAXDEPTH 3200 /* Because of 64K stack */
#define Lex current_lex
+#define Select Lex->select
#include "mysql_priv.h"
#include "slave.h"
#include "sql_acl.h"
@@ -129,6 +130,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token LOAD
%token LOCK_SYM
%token UNLOCK_SYM
+%token BINLOG_SYM
+%token EVENTS_SYM
%token ACTION
%token AGGREGATE_SYM
@@ -161,6 +164,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token DELAY_KEY_WRITE_SYM
%token DESC
%token DESCRIBE
+%token DIRECTORY_SYM
%token DISTINCT
%token DISABLE_SYM
%token DYNAMIC_SYM
@@ -193,6 +197,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
%token IDENT
%token IGNORE_SYM
%token INDEX
+%token INDEXES
%token INFILE
%token INNER_SYM
%token INNOBASE_SYM
@@ -464,12 +469,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
opt_escape
%type <string>
- text_string
+ text_string
%type <num>
type int_type real_type order_dir opt_field_spec set_option lock_option
udf_type if_exists opt_local opt_table_options table_options
- table_option opt_if_not_exists
+ table_option opt_if_not_exists
%type <ulong_num>
ULONG_NUM raid_types
@@ -526,7 +531,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
select_item_list select_item values_list no_braces
limit_clause delete_limit_clause fields opt_values values
procedure_list procedure_list2 procedure_item
- when_list2 expr_list2 handler
+ when_list2 expr_list2 handler
opt_precision opt_ignore opt_column opt_restrict
grant revoke set lock unlock string_list field_options field_option
field_opt_list opt_binary table_lock_list table_lock varchar
@@ -541,7 +546,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize);
opt_mi_check_type opt_to mi_check_types normal_join
table_to_table_list table_to_table opt_table_list opt_as
handler_rkey_function handler_rkey_mode handler_read_or_scan
- END_OF_INPUT
+ single_multi table_wild_list table_wild_one opt_wild union union_list
+ precision
+END_OF_INPUT
%type <NONE>
'-' '+' '*' '/' '%' '(' ')'
@@ -647,7 +654,6 @@ master_def:
}
-
/* create a table */
create:
@@ -671,36 +677,41 @@ create:
| CREATE opt_unique_or_fulltext INDEX ident ON table_ident
{
- Lex->sql_command= SQLCOM_CREATE_INDEX;
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_CREATE_INDEX;
if (!add_table_to_list($6,NULL,1))
YYABORT;
- Lex->create_list.empty();
- Lex->key_list.empty();
- Lex->col_list.empty();
- Lex->change=NullS;
+ lex->create_list.empty();
+ lex->key_list.empty();
+ lex->col_list.empty();
+ lex->change=NullS;
}
'(' key_list ')'
{
- Lex->key_list.push_back(new Key($2,$4.str,Lex->col_list));
- Lex->col_list.empty();
+ LEX *lex=Lex;
+ lex->key_list.push_back(new Key($2,$4.str,lex->col_list));
+ lex->col_list.empty();
}
| CREATE DATABASE opt_if_not_exists ident
{
- Lex->sql_command=SQLCOM_CREATE_DB;
- Lex->name=$4.str;
- Lex->create_info.options=$3;
+ LEX *lex=Lex;
+ lex->sql_command=SQLCOM_CREATE_DB;
+ lex->name=$4.str;
+ lex->create_info.options=$3;
}
| CREATE udf_func_type UDF_SYM ident
{
- Lex->sql_command = SQLCOM_CREATE_FUNCTION;
- Lex->udf.name=$4.str;
- Lex->udf.name_length=$4.length;
- Lex->udf.type= $2;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_CREATE_FUNCTION;
+ lex->udf.name=$4.str;
+ lex->udf.name_length=$4.length;
+ lex->udf.type= $2;
}
UDF_RETURNS_SYM udf_type UDF_SONAME_SYM TEXT_STRING
{
- Lex->udf.returns=(Item_result) $7;
- Lex->udf.dl=$9.str;
+ LEX *lex=Lex;
+ lex->udf.returns=(Item_result) $7;
+ lex->udf.dl=$9.str;
}
create2:
@@ -711,8 +722,9 @@ create3:
/* empty */ {}
| opt_duplicate opt_as SELECT_SYM
{
- Lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ;
- mysql_init_select(Lex);
+ LEX *lex=Lex;
+ lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ;
+ mysql_init_select(lex);
}
select_options select_item_list opt_select_from {}
@@ -762,15 +774,17 @@ create_table_option:
{
/* Move the union list to the merge_list */
LEX *lex=Lex;
- TABLE_LIST *table_list= (TABLE_LIST*) lex->table_list.first;
- lex->create_info.merge_list= lex->table_list;
+ TABLE_LIST *table_list= (TABLE_LIST*) lex->select->table_list.first;
+ lex->create_info.merge_list= lex->select->table_list;
lex->create_info.merge_list.elements--;
lex->create_info.merge_list.first= (byte*) (table_list->next);
- lex->table_list.elements=1;
- lex->table_list.next= (byte**) &(table_list->next);
+ lex->select->table_list.elements=1;
+ lex->select->table_list.next= (byte**) &(table_list->next);
table_list->next=0;
lex->create_info.used_fields|= HA_CREATE_USED_UNION;
}
+ | DATA_SYM DIRECTORY_SYM EQ TEXT_STRING { Lex->create_info.data_file_name= $4.str; }
+ | INDEX DIRECTORY_SYM EQ TEXT_STRING { Lex->create_info.index_file_name= $4.str; }
table_types:
ISAM_SYM { $$= DB_TYPE_ISAM; }
@@ -818,8 +832,9 @@ field_list_item:
}
| key_type opt_ident '(' key_list ')'
{
- Lex->key_list.push_back(new Key($1,$2,Lex->col_list));
- Lex->col_list.empty(); /* Alloced by sql_alloc */
+ LEX *lex=Lex;
+ lex->key_list.push_back(new Key($1,$2,lex->col_list));
+ lex->col_list.empty(); /* Alloced by sql_alloc */
}
| opt_constraint FOREIGN KEY_SYM opt_ident '(' key_list ')' references
{
@@ -837,16 +852,18 @@ opt_constraint:
field_spec:
field_ident
{
- Lex->length=Lex->dec=0; Lex->type=0; Lex->interval=0;
- Lex->default_value=0;
+ LEX *lex=Lex;
+ lex->length=lex->dec=0; lex->type=0; lex->interval=0;
+ lex->default_value=0;
}
type opt_attribute
{
+ LEX *lex=Lex;
if (add_field_to_list($1.str,
(enum enum_field_types) $3,
- Lex->length,Lex->dec,Lex->type,
- Lex->default_value,Lex->change,
- Lex->interval))
+ lex->length,lex->dec,lex->type,
+ lex->default_value,lex->change,
+ lex->interval))
YYABORT;
}
@@ -898,12 +915,14 @@ type:
{ $$=FIELD_TYPE_DECIMAL;}
| ENUM {Lex->interval_list.empty();} '(' string_list ')'
{
- Lex->interval=typelib(Lex->interval_list);
+ LEX *lex=Lex;
+ lex->interval=typelib(lex->interval_list);
$$=FIELD_TYPE_ENUM;
}
| SET { Lex->interval_list.empty();} '(' string_list ')'
{
- Lex->interval=typelib(Lex->interval_list);
+ LEX *lex=Lex;
+ lex->interval=typelib(lex->interval_list);
$$=FIELD_TYPE_SET;
}
@@ -935,7 +954,14 @@ real_type:
float_options:
/* empty */ {}
| '(' NUM ')' { Lex->length=$2.str; }
- | '(' NUM ',' NUM ')' { Lex->length=$2.str; Lex->dec=$4.str; }
+ | precision {}
+
+precision:
+ '(' NUM ',' NUM ')'
+ {
+ LEX *lex=Lex;
+ lex->length=$2.str; lex->dec=$4.str;
+ }
field_options:
/* empty */ {}
@@ -955,7 +981,7 @@ opt_len:
opt_precision:
/* empty */ {}
- | '(' NUM ',' NUM ')' { Lex->length=$2.str; Lex->dec=$4.str; }
+ | precision {}
opt_attribute:
/* empty */ {}
@@ -1022,6 +1048,7 @@ key_or_index:
keys_or_index:
KEYS {}
| INDEX {}
+ | INDEXES {}
opt_unique_or_fulltext:
/* empty */ { $$= Key::MULTIPLE; }
@@ -1062,10 +1089,10 @@ alter:
lex->col_list.empty();
lex->drop_list.empty();
lex->alter_list.empty();
- lex->order_list.elements=0;
- lex->order_list.first=0;
- lex->order_list.next= (byte**) &lex->order_list.first;
- lex->db=lex->name=0;
+ lex->select->order_list.elements=0;
+ lex->select->order_list.first=0;
+ lex->select->order_list.next= (byte**) &lex->select->order_list.first;
+ lex->select->db=lex->name=0;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
lex->create_info.db_type= DB_TYPE_DEFAULT;
lex->alter_keys_onoff=LEAVE_AS_IS;
@@ -1083,40 +1110,70 @@ add_column:
alter_list_item:
add_column field_list_item opt_place { Lex->simple_alter=0; }
| add_column '(' field_list ')' { Lex->simple_alter=0; }
- | CHANGE opt_column field_ident { Lex->change= $3.str; Lex->simple_alter=0; }
+ | CHANGE opt_column field_ident
+ {
+ LEX *lex=Lex;
+ lex->change= $3.str; lex->simple_alter=0;
+ }
field_spec
| MODIFY_SYM opt_column field_ident
{
- Lex->length=Lex->dec=0; Lex->type=0; Lex->interval=0;
- Lex->default_value=0;
- Lex->simple_alter=0;
+ LEX *lex=Lex;
+ lex->length=lex->dec=0; lex->type=0; lex->interval=0;
+ lex->default_value=0;
+ lex->simple_alter=0;
}
type opt_attribute
{
+ LEX *lex=Lex;
if (add_field_to_list($3.str,
(enum enum_field_types) $5,
- Lex->length,Lex->dec,Lex->type,
- Lex->default_value, $3.str,
- Lex->interval))
+ lex->length,lex->dec,lex->type,
+ lex->default_value, $3.str,
+ lex->interval))
YYABORT;
- Lex->simple_alter=0;
+ lex->simple_alter=0;
}
| DROP opt_column field_ident opt_restrict
- { Lex->drop_list.push_back(new Alter_drop(Alter_drop::COLUMN,
- $3.str)); Lex->simple_alter=0; }
- | DROP PRIMARY_SYM KEY_SYM { Lex->drop_primary=1; Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->drop_list.push_back(new Alter_drop(Alter_drop::COLUMN,
+ $3.str)); lex->simple_alter=0;
+ }
+ | DROP PRIMARY_SYM KEY_SYM
+ {
+ LEX *lex=Lex;
+ lex->drop_primary=1; lex->simple_alter=0;
+ }
| DROP FOREIGN KEY_SYM opt_ident { Lex->simple_alter=0; }
| DROP key_or_index field_ident
- { Lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY,
- $3.str)); Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY,
+ $3.str));
+ lex->simple_alter=0;
+ }
| DISABLE_SYM KEYS { Lex->alter_keys_onoff=DISABLE; }
| ENABLE_SYM KEYS { Lex->alter_keys_onoff=ENABLE; }
| ALTER opt_column field_ident SET DEFAULT literal
- { Lex->alter_list.push_back(new Alter_column($3.str,$6)); Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->alter_list.push_back(new Alter_column($3.str,$6));
+ lex->simple_alter=0;
+ }
| ALTER opt_column field_ident DROP DEFAULT
- { Lex->alter_list.push_back(new Alter_column($3.str,(Item*) 0)); Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->alter_list.push_back(new Alter_column($3.str,(Item*) 0));
+ lex->simple_alter=0;
+ }
| RENAME opt_to table_alias table_ident
- { Lex->db=$4->db.str ; Lex->name= $4->table.str; Lex->simple_alter=0; }
+ {
+ LEX *lex=Lex;
+ lex->select->db=$4->db.str;
+ lex->name= $4->table.str;
+ lex->simple_alter=0;
+ }
| create_table_options { Lex->simple_alter=0; }
| order_clause { Lex->simple_alter=0; }
@@ -1146,14 +1203,16 @@ opt_to:
slave:
SLAVE START_SYM
{
- Lex->sql_command = SQLCOM_SLAVE_START;
- Lex->type = 0;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_START;
+ lex->type = 0;
}
|
SLAVE STOP_SYM
{
- Lex->sql_command = SQLCOM_SLAVE_STOP;
- Lex->type = 0;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_STOP;
+ lex->type = 0;
};
restore:
@@ -1179,8 +1238,9 @@ backup:
repair:
REPAIR table_or_tables
{
- Lex->sql_command = SQLCOM_REPAIR;
- Lex->check_opt.init();
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_REPAIR;
+ lex->check_opt.init();
}
table_list opt_mi_check_type
@@ -1204,24 +1264,27 @@ mi_check_type:
analyze:
ANALYZE_SYM table_or_tables
{
- Lex->sql_command = SQLCOM_ANALYZE;
- Lex->check_opt.init();
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_ANALYZE;
+ lex->check_opt.init();
}
table_list opt_mi_check_type
check:
CHECK_SYM table_or_tables
{
- Lex->sql_command = SQLCOM_CHECK;
- Lex->check_opt.init();
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_CHECK;
+ lex->check_opt.init();
}
table_list opt_mi_check_type
optimize:
OPTIMIZE table_or_tables
{
- Lex->sql_command = SQLCOM_OPTIMIZE;
- Lex->check_opt.init();
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_OPTIMIZE;
+ lex->check_opt.init();
}
table_list opt_mi_check_type
@@ -1252,11 +1315,11 @@ select:
SELECT_SYM
{
LEX *lex=Lex;
- lex->sql_command= SQLCOM_SELECT;
+ if (lex->sql_command!=SQLCOM_UNION_SELECT) lex->sql_command= SQLCOM_SELECT;
lex->lock_option=TL_READ;
mysql_init_select(lex);
}
- select_options select_item_list select_into select_lock_type
+ select_options select_item_list select_into select_lock_type union
select_into:
limit_clause {}
@@ -1277,13 +1340,13 @@ select_option_list:
| select_option
select_option:
- STRAIGHT_JOIN { Lex->options|= SELECT_STRAIGHT_JOIN; }
+ STRAIGHT_JOIN { Select->options|= SELECT_STRAIGHT_JOIN; }
| HIGH_PRIORITY { Lex->lock_option= TL_READ_HIGH_PRIORITY; }
- | DISTINCT { Lex->options|= SELECT_DISTINCT; }
- | SQL_SMALL_RESULT { Lex->options|= SELECT_SMALL_RESULT; }
- | SQL_BIG_RESULT { Lex->options|= SELECT_BIG_RESULT; }
- | SQL_BUFFER_RESULT { Lex->options|= OPTION_BUFFER_RESULT; }
- | SQL_CALC_FOUND_ROWS { Lex->options|= OPTION_FOUND_ROWS; }
+ | DISTINCT { Select->options|= SELECT_DISTINCT; }
+ | SQL_SMALL_RESULT { Select->options|= SELECT_SMALL_RESULT; }
+ | SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; }
+ | SQL_BUFFER_RESULT { Select->options|= OPTION_BUFFER_RESULT; }
+ | SQL_CALC_FOUND_ROWS { Select->options|= OPTION_FOUND_ROWS; }
| ALL {}
select_lock_type:
@@ -1470,10 +1533,10 @@ simple_expr:
| '(' expr ')' { $$= $2; }
| '{' ident expr '}' { $$= $3; }
| MATCH '(' ident_list ')' AGAINST '(' expr ')'
- { Lex->ftfunc_list.push_back(
+ { Select->ftfunc_list.push_back(
(Item_func_match *)($$=new Item_func_match(*$3,$7))); }
| MATCH ident_list AGAINST '(' expr ')'
- { Lex->ftfunc_list.push_back(
+ { Select->ftfunc_list.push_back(
(Item_func_match *)($$=new Item_func_match(*$2,$5))); }
| BINARY expr %prec NEG { $$= new Item_func_binary($2); }
| CASE_SYM opt_expr WHEN_SYM when_list opt_else END
@@ -1704,30 +1767,30 @@ sum_expr:
{ $$=new Item_sum_sum($3); }
in_sum_expr:
- { Lex->in_sum_expr++ }
+ { Select->in_sum_expr++ }
expr
{
- Lex->in_sum_expr--;
+ Select->in_sum_expr--;
$$=$2;
}
expr_list:
- { Lex->expr_list.push_front(new List<Item>); }
+ { Select->expr_list.push_front(new List<Item>); }
expr_list2
- { $$= Lex->expr_list.pop(); }
+ { $$= Select->expr_list.pop(); }
expr_list2:
- expr { Lex->expr_list.head()->push_back($1); }
- | expr_list2 ',' expr { Lex->expr_list.head()->push_back($3); }
+ expr { Select->expr_list.head()->push_back($1); }
+ | expr_list2 ',' expr { Select->expr_list.head()->push_back($3); }
ident_list:
- { Lex->expr_list.push_front(new List<Item>); }
+ { Select->expr_list.push_front(new List<Item>); }
ident_list2
- { $$= Lex->expr_list.pop(); }
+ { $$= Select->expr_list.pop(); }
ident_list2:
- simple_ident { Lex->expr_list.head()->push_back($1); }
- | ident_list2 ',' simple_ident { Lex->expr_list.head()->push_back($3); }
+ simple_ident { Select->expr_list.head()->push_back($1); }
+ | ident_list2 ',' simple_ident { Select->expr_list.head()->push_back($3); }
opt_expr:
/* empty */ { $$= NULL; }
@@ -1738,20 +1801,22 @@ opt_else:
| ELSE expr { $$= $2; }
when_list:
- { Lex->when_list.push_front(new List<Item>) }
+ { Select->when_list.push_front(new List<Item>) }
when_list2
- { $$= Lex->when_list.pop(); }
+ { $$= Select->when_list.pop(); }
when_list2:
expr THEN_SYM expr
{
- Lex->when_list.head()->push_back($1);
- Lex->when_list.head()->push_back($3);
+ SELECT_LEX *sel=Select;
+ sel->when_list.head()->push_back($1);
+ sel->when_list.head()->push_back($3);
}
| when_list2 WHEN_SYM expr THEN_SYM expr
{
- Lex->when_list.head()->push_back($3);
- Lex->when_list.head()->push_back($5);
+ SELECT_LEX *sel=Select;
+ sel->when_list.head()->push_back($3);
+ sel->when_list.head()->push_back($5);
}
opt_pad:
@@ -1766,15 +1831,21 @@ join_table_list:
| join_table_list INNER_SYM JOIN_SYM join_table ON expr
{ add_join_on($4,$6); $$=$4; }
| join_table_list INNER_SYM JOIN_SYM join_table
- { Lex->db1=$1->db; Lex->table1=$1->name;
- Lex->db2=$4->db; Lex->table2=$4->name; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->db1=$1->db; sel->table1=$1->name;
+ sel->db2=$4->db; sel->table2=$4->name;
+ }
USING '(' using_list ')'
{ add_join_on($4,$8); $$=$4; }
| join_table_list LEFT opt_outer JOIN_SYM join_table ON expr
{ add_join_on($5,$7); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; }
| join_table_list LEFT opt_outer JOIN_SYM join_table
- { Lex->db1=$1->db; Lex->table1=$1->name;
- Lex->db2=$5->db; Lex->table2=$5->name; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->db1=$1->db; sel->table1=$1->name;
+ sel->db2=$5->db; sel->table2=$5->name;
+ }
USING '(' using_list ')'
{ add_join_on($5,$9); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; }
| join_table_list NATURAL LEFT opt_outer JOIN_SYM join_table
@@ -1782,8 +1853,11 @@ join_table_list:
| join_table_list RIGHT opt_outer JOIN_SYM join_table ON expr
{ add_join_on($1,$7); $1->outer_join|=JOIN_TYPE_RIGHT; $$=$1; }
| join_table_list RIGHT opt_outer JOIN_SYM join_table
- { Lex->db1=$1->db; Lex->table1=$1->name;
- Lex->db2=$5->db; Lex->table2=$5->name; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->db1=$1->db; sel->table1=$1->name;
+ sel->db2=$5->db; sel->table2=$5->name;
+ }
USING '(' using_list ')'
{ add_join_on($1,$9); $1->outer_join|=JOIN_TYPE_RIGHT; $$=$1; }
| join_table_list NATURAL RIGHT opt_outer JOIN_SYM join_table
@@ -1797,10 +1871,16 @@ normal_join:
| CROSS JOIN_SYM {}
join_table:
- { Lex->use_index_ptr=Lex->ignore_index_ptr=0; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->use_index_ptr=sel->ignore_index_ptr=0;
+ }
table_ident opt_table_alias opt_key_definition
- { if (!($$=add_table_to_list($2,$3,0,TL_UNLOCK, Lex->use_index_ptr,
- Lex->ignore_index_ptr))) YYABORT; }
+ {
+ SELECT_LEX *sel=Select;
+ if (!($$=add_table_to_list($2,$3,0,TL_UNLOCK, sel->use_index_ptr,
+ sel->ignore_index_ptr))) YYABORT;
+ }
| '{' ident join_table LEFT OUTER JOIN_SYM join_table ON expr '}'
{ add_join_on($7,$9); $7->outer_join|=JOIN_TYPE_LEFT; $$=$7; }
@@ -1811,30 +1891,41 @@ opt_outer:
opt_key_definition:
/* empty */ {}
| USE_SYM key_usage_list
- { Lex->use_index= *$2; Lex->use_index_ptr= &Lex->use_index; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->use_index= *$2;
+ sel->use_index_ptr= &sel->use_index;
+ }
| IGNORE_SYM key_usage_list
- { Lex->ignore_index= *$2; Lex->ignore_index_ptr= &Lex->ignore_index;}
+ {
+ SELECT_LEX *sel=Select;
+ sel->ignore_index= *$2;
+ sel->ignore_index_ptr= &sel->ignore_index;
+ }
key_usage_list:
- key_or_index { Lex->interval_list.empty() } '(' key_usage_list2 ')'
- { $$= &Lex->interval_list; }
+ key_or_index { Select->interval_list.empty() } '(' key_usage_list2 ')'
+ { $$= &Select->interval_list; }
key_usage_list2:
key_usage_list2 ',' ident
- { Lex->interval_list.push_back(new String((const char*) $3.str,$3.length)); }
+ { Select->interval_list.push_back(new String((const char*) $3.str,$3.length)); }
| ident
- { Lex->interval_list.push_back(new String((const char*) $1.str,$1.length)); }
+ { Select->interval_list.push_back(new String((const char*) $1.str,$1.length)); }
| PRIMARY_SYM
- { Lex->interval_list.push_back(new String("PRIMARY",7)); }
+ { Select->interval_list.push_back(new String("PRIMARY",7)); }
using_list:
ident
- { if (!($$= new Item_func_eq(new Item_field(Lex->db1,Lex->table1, $1.str), new Item_field(Lex->db2,Lex->table2,$1.str))))
+ {
+ SELECT_LEX *sel=Select;
+ if (!($$= new Item_func_eq(new Item_field(sel->db1,sel->table1, $1.str), new Item_field(sel->db2,sel->table2,$1.str))))
YYABORT;
}
| using_list ',' ident
{
- if (!($$= new Item_cond_and(new Item_func_eq(new Item_field(Lex->db1,Lex->table1,$3.str), new Item_field(Lex->db2,Lex->table2,$3.str)), $1)))
+ SELECT_LEX *sel=Select;
+ if (!($$= new Item_cond_and(new Item_func_eq(new Item_field(sel->db1,sel->table1,$3.str), new Item_field(sel->db2,sel->table2,$3.str)), $1)))
YYABORT;
}
@@ -1865,13 +1956,16 @@ opt_table_alias:
where_clause:
- /* empty */ { Lex->where= 0; }
- | WHERE expr { Lex->where= $2; }
+ /* empty */ { Select->where= 0; }
+ | WHERE expr { Select->where= $2; }
having_clause:
/* empty */
- | HAVING { Lex->create_refs=1; } expr
- { Lex->having= $3; Lex->create_refs=0; }
+ | HAVING { Select->create_refs=1; } expr
+ {
+ SELECT_LEX *sel=Select;
+ sel->having= $3; sel->create_refs=0;
+ }
opt_escape:
ESCAPE_SYM TEXT_STRING { $$= $2.str; }
@@ -1901,7 +1995,7 @@ opt_order_clause:
| order_clause
order_clause:
- ORDER_SYM BY { Lex->sort_default=1; } order_list
+ ORDER_SYM BY { Select->sort_default=1; } order_list
order_list:
order_list ',' order_ident order_dir
@@ -1911,39 +2005,46 @@ order_list:
order_dir:
/* empty */ { $$ = 1; }
- | ASC { $$ = Lex->sort_default=1; }
- | DESC { $$ = Lex->sort_default=0; }
+ | ASC { $$ = Select->sort_default=1; }
+ | DESC { $$ = Select->sort_default=0; }
limit_clause:
/* empty */
{
- Lex->select_limit= (Lex->sql_command == SQLCOM_HA_READ) ?
+ SELECT_LEX *sel=Select;
+ sel->select_limit= (Lex->sql_command == SQLCOM_HA_READ) ?
1 : current_thd->default_select_limit;
- Lex->offset_limit= 0L;
+ sel->offset_limit= 0L;
}
| LIMIT ULONG_NUM
- { Lex->select_limit= $2; Lex->offset_limit=0L; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->select_limit= $2; sel->offset_limit=0L;
+ }
| LIMIT ULONG_NUM ',' ULONG_NUM
- { Lex->select_limit= $4; Lex->offset_limit=$2; }
+ {
+ SELECT_LEX *sel=Select;
+ sel->select_limit= $4; sel->offset_limit=$2;
+ }
delete_limit_clause:
/* empty */
{
- Lex->select_limit= HA_POS_ERROR;
+ Select->select_limit= HA_POS_ERROR;
}
| LIMIT ULONGLONG_NUM
- { Lex->select_limit= (ha_rows) $2; }
+ { Select->select_limit= (ha_rows) $2; }
ULONG_NUM:
- NUM { $$= strtoul($1.str,NULL,10); }
- | REAL_NUM { $$= strtoul($1.str,NULL,10); }
+ NUM { $$= strtoul($1.str,NULL,10); }
+ | REAL_NUM { $$= strtoul($1.str,NULL,10); }
| FLOAT_NUM { $$= strtoul($1.str,NULL,10); }
ULONGLONG_NUM:
- NUM { $$= (ulonglong) strtoul($1.str,NULL,10); }
- | LONG_NUM { $$= strtoull($1.str,NULL,10); }
- | REAL_NUM { $$= strtoull($1.str,NULL,10); }
+ NUM { $$= (ulonglong) strtoul($1.str,NULL,10); }
+ | LONG_NUM { $$= strtoull($1.str,NULL,10); }
+ | REAL_NUM { $$= strtoull($1.str,NULL,10); }
| FLOAT_NUM { $$= strtoull($1.str,NULL,10); }
procedure_clause:
@@ -1998,28 +2099,32 @@ opt_into:
drop:
DROP TABLE_SYM if_exists table_list opt_restrict
{
- Lex->sql_command = SQLCOM_DROP_TABLE;
- Lex->drop_if_exists = $3;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_DROP_TABLE;
+ lex->drop_if_exists = $3;
}
| DROP INDEX ident ON table_ident {}
{
- Lex->sql_command= SQLCOM_DROP_INDEX;
- Lex->drop_list.empty();
- Lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY,
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_DROP_INDEX;
+ lex->drop_list.empty();
+ lex->drop_list.push_back(new Alter_drop(Alter_drop::KEY,
$3.str));
if (!add_table_to_list($5,NULL, 1))
YYABORT;
}
| DROP DATABASE if_exists ident
{
- Lex->sql_command= SQLCOM_DROP_DB;
- Lex->drop_if_exists=$3;
- Lex->name=$4.str;
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_DROP_DB;
+ lex->drop_if_exists=$3;
+ lex->name=$4.str;
}
| DROP UDF_SYM ident
{
- Lex->sql_command = SQLCOM_DROP_FUNCTION;
- Lex->udf.name=$3.str;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_DROP_FUNCTION;
+ lex->udf.name=$3.str;
}
@@ -2062,17 +2167,19 @@ insert2:
insert_table:
table_name
{
- Lex->field_list.empty();
- Lex->many_values.empty();
- Lex->insert_list=0;
+ LEX *lex=Lex;
+ lex->field_list.empty();
+ lex->many_values.empty();
+ lex->insert_list=0;
}
insert_field_spec:
opt_field_spec insert_values {}
| SET
{
- if (!(Lex->insert_list = new List_item) ||
- Lex->many_values.push_back(Lex->insert_list))
+ LEX *lex=Lex;
+ if (!(lex->insert_list = new List_item) ||
+ lex->many_values.push_back(lex->insert_list))
YYABORT;
}
ident_eq_list
@@ -2110,8 +2217,9 @@ ident_eq_list:
ident_eq_value:
simple_ident equal expr
{
- if (Lex->field_list.push_back($1) ||
- Lex->insert_list->push_back($3))
+ LEX *lex=Lex;
+ if (lex->field_list.push_back($1) ||
+ lex->insert_list->push_back($3))
YYABORT;
}
@@ -2126,7 +2234,8 @@ no_braces:
}
opt_values ')'
{
- if (Lex->many_values.push_back(Lex->insert_list))
+ LEX *lex=Lex;
+ if (lex->many_values.push_back(lex->insert_list))
YYABORT;
}
@@ -2155,10 +2264,11 @@ update:
opt_order_clause
delete_limit_clause
{
- Lex->sql_command = SQLCOM_UPDATE;
- Lex->order_list.elements=0;
- Lex->order_list.first=0;
- Lex->order_list.next= (byte**) &Lex->order_list.first;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_UPDATE;
+ lex->select->order_list.elements=0;
+ lex->select->order_list.first=0;
+ lex->select->order_list.next= (byte**) &lex->select->order_list.first;
}
update_list:
@@ -2182,22 +2292,64 @@ opt_low_priority:
delete:
DELETE_SYM
{
- Lex->sql_command= SQLCOM_DELETE; Lex->options=0;
- Lex->lock_option= current_thd->update_lock_default;
- Lex->order_list.elements=0;
- Lex->order_list.first=0;
- Lex->order_list.next= (byte**) &Lex->order_list.first;
- }
- opt_delete_options FROM table_name
- where_clause opt_order_clause delete_limit_clause
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_DELETE; lex->select->options=0;
+ lex->lock_option= lex->thd->update_lock_default;
+ lex->select->order_list.elements=0;
+ lex->select->order_list.first=0;
+ lex->select->order_list.next= (byte**) &lex->select->order_list.first;
+ }
+ opt_delete_options single_multi {}
+
+single_multi:
+ FROM table_name where_clause opt_order_clause delete_limit_clause {}
+ | table_wild_list
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_MULTI_DELETE;
+ mysql_init_select(lex);
+ lex->select->select_limit=HA_POS_ERROR;
+ lex->auxilliary_table_list.elements=0;
+ lex->auxilliary_table_list.first=0;
+ lex->auxilliary_table_list.next= (byte**) &(lex->auxilliary_table_list.first);
+ }
+ FROM
+ {
+ LEX *lex=Lex;
+ lex->auxilliary_table_list=lex->select_lex.table_list;
+ lex->select->table_list.elements=0;
+ lex->select->table_list.first=0;
+ lex->select->table_list.next= (byte**) &(lex->select->table_list.first);
+ } join_table_list where_clause
+
+
+table_wild_list:
+ table_wild_one {}
+ | table_wild_list ',' table_wild_one {}
+
+table_wild_one:
+ ident opt_wild
+ {
+ if (!add_table_to_list(new Table_ident($1),NULL,1,TL_WRITE))
+ YYABORT;
+ }
+ | ident '.' ident opt_wild
+ {
+ if (!add_table_to_list(new Table_ident($1,$3,0),NULL,1,TL_WRITE))
+ YYABORT;
+ }
+
+opt_wild:
+ /* empty */ {}
+ | '.' '*' {}
opt_delete_options:
- /* empty */ {}
+ /* empty */ {}
| opt_delete_option opt_delete_options {}
opt_delete_option:
- QUICK { Lex->options|= OPTION_QUICK; }
+ QUICK { Select->options|= OPTION_QUICK; }
| LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; }
truncate:
@@ -2205,17 +2357,16 @@ truncate:
{
LEX* lex = Lex;
lex->sql_command= SQLCOM_TRUNCATE;
- lex->options=0;
- lex->order_list.elements=0;
- lex->order_list.first=0;
- lex->order_list.next= (byte**) &lex->order_list.first;
-
+ lex->select->options=0;
+ lex->select->order_list.elements=0;
+ lex->select->order_list.first=0;
+ lex->select->order_list.next= (byte**) &lex->select->order_list.first;
lex->lock_option= current_thd->update_lock_default; }
opt_table_sym:
/* empty */
| TABLE_SYM
-
+
/* Show things */
show: SHOW { Lex->wild=0;} show_param
@@ -2224,18 +2375,26 @@ show_param:
DATABASES wild
{ Lex->sql_command= SQLCOM_SHOW_DATABASES; }
| TABLES opt_db wild
- { Lex->sql_command= SQLCOM_SHOW_TABLES; Lex->db= $2; Lex->options=0;}
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_TABLES;
+ lex->select->db= $2; lex->select->options=0;
+ }
| TABLE_SYM STATUS_SYM opt_db wild
- { Lex->sql_command= SQLCOM_SHOW_TABLES;
- Lex->options|= SELECT_DESCRIBE;
- Lex->db= $3;
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_TABLES;
+ lex->select->options|= SELECT_DESCRIBE;
+ lex->select->db= $3;
}
| OPEN_SYM TABLES opt_db wild
- { Lex->sql_command= SQLCOM_SHOW_OPEN_TABLES;
- Lex->db= $3;
- Lex->options=0;
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_OPEN_TABLES;
+ lex->select->db= $3;
+ lex->select->options=0;
}
- | opt_full COLUMNS FROM table_ident opt_db wild
+ | opt_full COLUMNS from_or_in table_ident opt_db wild
{
Lex->sql_command= SQLCOM_SHOW_FIELDS;
if ($5)
@@ -2246,7 +2405,15 @@ show_param:
| MASTER_SYM LOGS_SYM
{
Lex->sql_command = SQLCOM_SHOW_BINLOGS;
- }
+ }
+ | SLAVE HOSTS_SYM
+ {
+ Lex->sql_command = SQLCOM_SHOW_SLAVE_HOSTS;
+ }
+ | BINLOG_SYM EVENTS_SYM binlog_in binlog_from limit_clause
+ {
+ Lex->sql_command = SQLCOM_SHOW_BINLOG_EVENTS;
+ }
| keys_or_index FROM table_ident opt_db
{
Lex->sql_command= SQLCOM_SHOW_KEYS;
@@ -2264,8 +2431,12 @@ show_param:
| LOGS_SYM
{ Lex->sql_command= SQLCOM_SHOW_LOGS; }
| GRANTS FOR_SYM user
- { Lex->sql_command= SQLCOM_SHOW_GRANTS;
- Lex->grant_user=$3; Lex->grant_user->password.str=NullS; }
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SHOW_GRANTS;
+ lex->grant_user=$3;
+ lex->grant_user->password.str=NullS;
+ }
| CREATE TABLE_SYM table_ident
{
Lex->sql_command = SQLCOM_SHOW_CREATE;
@@ -2283,7 +2454,7 @@ show_param:
opt_db:
/* empty */ { $$= 0; }
- | FROM ident { $$= $2.str; }
+ | from_or_in ident { $$= $2.str; }
wild:
/* empty */
@@ -2293,18 +2464,32 @@ opt_full:
/* empty */ { Lex->verbose=0; }
| FULL { Lex->verbose=1; }
+from_or_in:
+ FROM
+ | IN_SYM
+
+binlog_in:
+ /* empty */ { Lex->mi.log_file_name = 0; }
+ | IN_SYM TEXT_STRING { Lex->mi.log_file_name = $2.str; }
+
+binlog_from:
+ /* empty */ { Lex->mi.pos = 4; /* skip magic number */ }
+ | FROM ULONGLONG_NUM { Lex->mi.pos = $2; }
+
+
/* A Oracle compatible synonym for show */
describe:
describe_command table_ident
{
- Lex->wild=0;
- Lex->verbose=0;
- Lex->sql_command=SQLCOM_SHOW_FIELDS;
+ LEX *lex=Lex;
+ lex->wild=0;
+ lex->verbose=0;
+ lex->sql_command=SQLCOM_SHOW_FIELDS;
if (!add_table_to_list($2, NULL,0))
YYABORT;
}
opt_describe_column
- | describe_command select { Lex->options|= SELECT_DESCRIBE };
+ | describe_command select { Select->options|= SELECT_DESCRIBE };
describe_command:
@@ -2320,7 +2505,12 @@ opt_describe_column:
/* flush things */
flush:
- FLUSH_SYM {Lex->sql_command= SQLCOM_FLUSH; Lex->type=0; } flush_options
+ FLUSH_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_FLUSH; lex->type=0;
+ }
+ flush_options
flush_options:
flush_options ',' flush_option
@@ -2341,8 +2531,11 @@ opt_table_list:
| table_list {}
reset:
- RESET_SYM {Lex->sql_command= SQLCOM_RESET; Lex->type=0; } reset_options
-
+ RESET_SYM
+ {
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_RESET; lex->type=0;
+ } reset_options
reset_options:
reset_options ',' reset_option
| reset_option
@@ -2352,7 +2545,12 @@ reset_option:
| MASTER_SYM { Lex->type|= REFRESH_MASTER; }
purge:
- PURGE { Lex->sql_command = SQLCOM_PURGE; Lex->type=0;}
+ PURGE
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_PURGE;
+ lex->type=0;
+ }
MASTER_SYM LOGS_SYM TO_SYM TEXT_STRING
{
Lex->to_log = $6.str;
@@ -2363,29 +2561,34 @@ purge:
kill:
KILL_SYM expr
{
- if ($2->fix_fields(current_thd,0))
- {
- send_error(&current_thd->net, ER_SET_CONSTANTS_ONLY);
- YYABORT;
- }
- Lex->sql_command=SQLCOM_KILL;
- Lex->thread_id= (ulong) $2->val_int();
+ LEX *lex=Lex;
+ if ($2->fix_fields(lex->thd,0))
+ {
+ send_error(&lex->thd->net, ER_SET_CONSTANTS_ONLY);
+ YYABORT;
+ }
+ lex->sql_command=SQLCOM_KILL;
+ lex->thread_id= (ulong) $2->val_int();
}
/* change database */
use: USE_SYM ident
- { Lex->sql_command=SQLCOM_CHANGE_DB; Lex->db= $2.str; }
+ {
+ LEX *lex=Lex;
+ lex->sql_command=SQLCOM_CHANGE_DB; lex->select->db= $2.str;
+ }
/* import, export of files */
-load: LOAD DATA_SYM opt_low_priority opt_local INFILE TEXT_STRING
+load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING
{
- Lex->sql_command= SQLCOM_LOAD;
- Lex->local_file= $4;
- if (!(Lex->exchange= new sql_exchange($6.str,0)))
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_LOAD;
+ lex->local_file= $4;
+ if (!(lex->exchange= new sql_exchange($6.str,0)))
YYABORT;
- Lex->field_list.empty();
+ lex->field_list.empty();
}
opt_duplicate INTO TABLE_SYM table_ident opt_field_term opt_line_term
opt_ignore_lines opt_field_spec
@@ -2401,6 +2604,11 @@ load: LOAD DATA_SYM opt_low_priority opt_local INFILE TEXT_STRING
YYABORT;
}
+ |
+ LOAD DATA_SYM FROM MASTER_SYM
+ {
+ Lex->sql_command = SQLCOM_LOAD_MASTER_DATA;
+ }
opt_local:
/* empty */ { $$=0;}
@@ -2428,7 +2636,11 @@ field_term_list:
field_term:
TERMINATED BY text_string { Lex->exchange->field_term= $3;}
| OPTIONALLY ENCLOSED BY text_string
- { Lex->exchange->enclosed= $4; Lex->exchange->opt_enclosed=1;}
+ {
+ LEX *lex=Lex;
+ lex->exchange->enclosed= $4;
+ lex->exchange->opt_enclosed=1;
+ }
| ENCLOSED BY text_string { Lex->exchange->enclosed= $3;}
| ESCAPED BY text_string { Lex->exchange->escaped= $3;}
@@ -2498,13 +2710,25 @@ order_ident:
simple_ident:
ident
- { $$ = !Lex->create_refs || Lex->in_sum_expr > 0 ? (Item*) new Item_field(NullS,NullS,$1.str) : (Item*) new Item_ref(NullS,NullS,$1.str); }
+ {
+ SELECT_LEX *sel=Select;
+ $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field(NullS,NullS,$1.str) : (Item*) new Item_ref(NullS,NullS,$1.str);
+ }
| ident '.' ident
- { $$ = !Lex->create_refs || Lex->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$1.str,$3.str) : (Item*) new Item_ref(NullS,$1.str,$3.str); }
+ {
+ SELECT_LEX *sel=Select;
+ $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$1.str,$3.str) : (Item*) new Item_ref(NullS,$1.str,$3.str);
+ }
| '.' ident '.' ident
- { $$ = !Lex->create_refs || Lex->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$2.str,$4.str) : (Item*) new Item_ref(NullS,$2.str,$4.str); }
+ {
+ SELECT_LEX *sel=Select;
+ $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field(NullS,$2.str,$4.str) : (Item*) new Item_ref(NullS,$2.str,$4.str);
+ }
| ident '.' ident '.' ident
- { $$ = !Lex->create_refs || Lex->in_sum_expr > 0 ? (Item*) new Item_field((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str) : (Item*) new Item_ref((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str); }
+ {
+ SELECT_LEX *sel=Select;
+ $$ = !sel->create_refs || sel->in_sum_expr > 0 ? (Item*) new Item_field((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str) : (Item*) new Item_ref((current_thd->client_capabilities & CLIENT_NO_SCHEMA ? NullS :$1.str),$3.str,$5.str);
+ }
field_ident:
@@ -2521,10 +2745,11 @@ ident:
IDENT { $$=$1; }
| keyword
{
+ LEX *lex;
$$.str=sql_strmake($1.str,$1.length);
$$.length=$1.length;
- if (Lex->next_state != STATE_END)
- Lex->next_state=STATE_OPERATOR_OR_IDENT;
+ if ((lex=Lex)->next_state != STATE_END)
+ lex->next_state=STATE_OPERATOR_OR_IDENT;
}
ident_or_text:
@@ -2575,6 +2800,7 @@ keyword:
| DATETIME {}
| DATE_SYM {}
| DAY_SYM {}
+ | DIRECTORY_SYM {}
| DELAY_KEY_WRITE_SYM {}
| DISABLE_SYM {}
| DUMPFILE {}
@@ -2598,6 +2824,7 @@ keyword:
| HOSTS_SYM {}
| HOUR_SYM {}
| IDENTIFIED_SYM {}
+ | INDEXES {}
| ISOLATION {}
| ISAM_SYM {}
| INNOBASE_SYM {}
@@ -2675,12 +2902,14 @@ keyword:
set:
SET opt_option
{
- THD *thd=current_thd;
- Lex->sql_command= SQLCOM_SET_OPTION;
- Lex->options=thd->options;
- Lex->select_limit=thd->default_select_limit;
- Lex->gemini_spin_retries=thd->gemini_spin_retries;
- Lex->tx_isolation=thd->tx_isolation;
+ LEX *lex=Lex;
+ lex->sql_command= SQLCOM_SET_OPTION;
+ lex->select->options=lex->thd->options;
+ lex->select->select_limit=lex->thd->default_select_limit;
+ lex->gemini_spin_retries=lex->thd->gemini_spin_retries;
+ lex->tx_isolation=lex->thd->tx_isolation;
+ lex->option_type=0;
+ lex->option_list.empty()
}
option_value_list
@@ -2690,36 +2919,41 @@ opt_option:
option_value_list:
option_value
+ | GLOBAL_SYM { Lex->option_type=1; } option_value
+ | LOCAL_SYM { Lex->option_type=0; } option_value
| option_value_list ',' option_value
option_value:
set_option equal NUM
{
+ SELECT_LEX *sel=Select;
if (atoi($3.str) == 0)
- Lex->options&= ~$1;
+ sel->options&= ~$1;
else
- Lex->options|= $1;
+ sel->options|= $1;
}
| set_isolation
| AUTOCOMMIT equal NUM
{
+ SELECT_LEX *sel=Select;
if (atoi($3.str) != 0) /* Test NOT AUTOCOMMIT */
- Lex->options&= ~(OPTION_NOT_AUTO_COMMIT);
+ sel->options&= ~(OPTION_NOT_AUTO_COMMIT);
else
- Lex->options|= OPTION_NOT_AUTO_COMMIT;
+ sel->options|= OPTION_NOT_AUTO_COMMIT;
}
| SQL_SELECT_LIMIT equal ULONG_NUM
{
- Lex->select_limit= $3;
+ Select->select_limit= $3;
}
| SQL_SELECT_LIMIT equal DEFAULT
{
- Lex->select_limit= HA_POS_ERROR;
+ Select->select_limit= HA_POS_ERROR;
}
| SQL_MAX_JOIN_SIZE equal ULONG_NUM
{
- current_thd->max_join_size= $3;
- Lex->options&= ~OPTION_BIG_SELECTS;
+ LEX *lex=Lex;
+ lex->thd->max_join_size= $3;
+ lex->select->options&= ~OPTION_BIG_SELECTS;
}
| SQL_MAX_JOIN_SIZE equal DEFAULT
{
@@ -2794,6 +3028,28 @@ option_value:
slave_skip_counter = $3;
pthread_mutex_unlock(&LOCK_slave);
}
+ | ident equal DEFAULT
+ {
+ LEX *lex=Lex;
+ lex->option_list.push_back(new Set_option(lex->option_type,
+ $1.str,$1.length,
+ (Item*) 0));
+ }
+ | ident equal expr
+ {
+ THD *thd=current_thd;
+ Item *item= $3;
+ if (item->fix_fields(current_thd,0))
+ {
+ send_error(&thd->net, ER_SET_CONSTANTS_ONLY);
+ YYABORT;
+ }
+ thd->lex.option_list.
+ push_back(new Set_option(thd->lex.option_type,
+ $1.str,$1.length,
+ item));
+ }
+
text_or_password:
TEXT_STRING { $$=$1.str;}
@@ -2842,7 +3098,10 @@ set_isolation:
default_tx_isolation_name=tx_isolation_typelib.type_names[default_tx_isolation];
}
| SESSION_SYM tx_isolation
- { current_thd->session_tx_isolation= Lex->tx_isolation= $2; }
+ {
+ LEX *lex=Lex;
+ lex->thd->session_tx_isolation= lex->tx_isolation= $2;
+ }
| tx_isolation
{ Lex->tx_isolation= $1; }
@@ -2926,8 +3185,9 @@ handler_rkey_function:
| LAST_SYM { Lex->ha_read_mode = RLAST; }
| handler_rkey_mode
{
- Lex->ha_read_mode = RKEY;
- if (!(Lex->insert_list = new List_item))
+ LEX *lex=Lex;
+ lex->ha_read_mode = RKEY;
+ if (!(lex->insert_list = new List_item))
YYABORT;
} '(' values ')' { }
@@ -2943,22 +3203,24 @@ handler_rkey_mode:
revoke:
REVOKE
{
- Lex->sql_command = SQLCOM_REVOKE;
- Lex->users_list.empty();
- Lex->columns.empty();
- Lex->grant= Lex->grant_tot_col=0;
- Lex->db=0;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_REVOKE;
+ lex->users_list.empty();
+ lex->columns.empty();
+ lex->grant= lex->grant_tot_col=0;
+ lex->select->db=0;
}
grant_privileges ON opt_table FROM user_list
grant:
GRANT
{
- Lex->sql_command = SQLCOM_GRANT;
- Lex->users_list.empty();
- Lex->columns.empty();
- Lex->grant= Lex->grant_tot_col=0;
- Lex->db=0;
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_GRANT;
+ lex->users_list.empty();
+ lex->columns.empty();
+ lex->grant= lex->grant_tot_col=0;
+ lex->select->db=0;
}
grant_privileges ON opt_table TO_SYM user_list
grant_option
@@ -2998,43 +3260,47 @@ grant_privilege:
opt_table:
'*'
{
- Lex->db=current_thd->db;
- if (Lex->grant == UINT_MAX)
- Lex->grant = DB_ACLS & ~GRANT_ACL;
- else if (Lex->columns.elements)
+ LEX *lex=Lex;
+ lex->select->db=lex->thd->db;
+ if (lex->grant == UINT_MAX)
+ lex->grant = DB_ACLS & ~GRANT_ACL;
+ else if (lex->columns.elements)
{
- net_printf(&current_thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
+ net_printf(&lex->thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
YYABORT;
- }
+ }
}
| ident '.' '*'
{
- Lex->db = $1.str;
- if (Lex->grant == UINT_MAX)
- Lex->grant = DB_ACLS & ~GRANT_ACL;
- else if (Lex->columns.elements)
+ LEX *lex=Lex;
+ lex->select->db = $1.str;
+ if (lex->grant == UINT_MAX)
+ lex->grant = DB_ACLS & ~GRANT_ACL;
+ else if (lex->columns.elements)
{
- net_printf(&current_thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
+ net_printf(&lex->thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
YYABORT;
}
}
| '*' '.' '*'
{
- Lex->db = NULL;
- if (Lex->grant == UINT_MAX)
- Lex->grant = GLOBAL_ACLS & ~GRANT_ACL;
- else if (Lex->columns.elements)
+ LEX *lex=Lex;
+ lex->select->db = NULL;
+ if (lex->grant == UINT_MAX)
+ lex->grant = GLOBAL_ACLS & ~GRANT_ACL;
+ else if (lex->columns.elements)
{
- net_printf(&current_thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
+ net_printf(&lex->thd->net,ER_ILLEGAL_GRANT_FOR_TABLE);
YYABORT;
}
}
| table_ident
{
+ LEX *lex=Lex;
if (!add_table_to_list($1,NULL,0))
YYABORT;
- if (Lex->grant == UINT_MAX)
- Lex->grant = TABLE_ACLS & ~GRANT_ACL;
+ if (lex->grant == UINT_MAX)
+ lex->grant = TABLE_ACLS & ~GRANT_ACL;
}
@@ -3065,7 +3331,11 @@ grant_user:
opt_column_list:
- /* empty */ { Lex->grant |= Lex->which_columns; }
+ /* empty */
+ {
+ LEX *lex=Lex;
+ lex->grant |= lex->which_columns;
+ }
| '(' column_list ')'
column_list:
@@ -3078,16 +3348,17 @@ column_list_id:
String *new_str = new String((const char*) $1.str,$1.length);
List_iterator <LEX_COLUMN> iter(Lex->columns);
class LEX_COLUMN *point;
+ LEX *lex=Lex;
while ((point=iter++))
{
if (!my_strcasecmp(point->column.ptr(),new_str->ptr()))
break;
}
- Lex->grant_tot_col|= Lex->which_columns;
+ lex->grant_tot_col|= lex->which_columns;
if (point)
- point->rights |= Lex->which_columns;
+ point->rights |= lex->which_columns;
else
- Lex->columns.push_back(new LEX_COLUMN (*new_str,Lex->which_columns));
+ lex->columns.push_back(new LEX_COLUMN (*new_str,lex->which_columns));
}
grant_option:
@@ -3106,3 +3377,23 @@ commit:
rollback:
ROLLBACK_SYM { Lex->sql_command = SQLCOM_ROLLBACK;}
+
+
+/*
+** UNIONS : glue selects together
+*/
+
+
+union:
+ /* empty */ {}
+ | union_list
+
+union_list:
+ UNION_SYM
+ {
+ LEX *lex=Lex;
+ if (lex->exchange) YYABORT; /* Only the last SELECT can have INTO...... */
+ lex->sql_command=SQLCOM_UNION_SELECT;
+ mysql_new_select(lex); lex->select->linkage=UNION_TYPE;
+ }
+ select
diff --git a/sql/structs.h b/sql/structs.h
index 36f503312c0..594432134b2 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -164,3 +164,4 @@ typedef struct st_lex_user {
#define STATUS_NOT_READ 8 /* Record isn't read */
#define STATUS_UPDATED 16 /* Record is updated by formula */
#define STATUS_NULL_ROW 32 /* table->null_row is set */
+#define STATUS_DELETED 64
diff --git a/sql/time.cc b/sql/time.cc
index 086977af72f..e0b74fc9d25 100644
--- a/sql/time.cc
+++ b/sql/time.cc
@@ -455,8 +455,8 @@ str_to_TIME(const char *str, uint length, TIME *l_time,bool fuzzy_date)
if ((date[i]=tmp_value))
date_used=1; // Found something
if (i == 2 && str != end && *str == 'T')
- str++; // ISO8601: CCYYMMDDThhmmss
- else
+ str++; // ISO8601: CCYYMMDDThhmmss
+ else if ( i != 5 ) // Skip inter-field delimiters
{
while (str != end && (ispunct(*str) || isspace(*str)))
{
diff --git a/sql/uniques.cc b/sql/uniques.cc
index becb3d8a3a5..5ef7ead276b 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -34,14 +34,18 @@
#include "mysql_priv.h"
#include "sql_sort.h"
-Unique::Unique(qsort_cmp2 comp_func, uint size, ulong max_in_memory_size_arg)
+
+Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
+ uint size, ulong max_in_memory_size_arg)
:max_in_memory_size(max_in_memory_size_arg),elements(0)
{
my_b_clear(&file);
- init_tree(&tree, max_in_memory_size / 16, 0, size, comp_func, 0, NULL, NULL);
+ init_tree(&tree, max_in_memory_size / 16, 0, size, comp_func, 0, NULL, comp_func_fixed_arg);
/* If the following fail's the next add will also fail */
init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16);
max_elements= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+size);
+ open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE,
+ MYF(MY_WME));
}
@@ -69,12 +73,12 @@ bool Unique::flush()
}
-int unique_write_to_file(gptr key, Unique *unique, element_count count)
+int unique_write_to_file(gptr key, element_count count, Unique *unique)
{
return my_b_write(&unique->file, key, unique->tree.size_of_element) ? 1 : 0;
}
-int unique_write_to_ptrs(gptr key, Unique *unique, element_count count)
+int unique_write_to_ptrs(gptr key, element_count count, Unique *unique)
{
memcpy(unique->record_pointers, key, unique->tree.size_of_element);
unique->record_pointers+=unique->tree.size_of_element;
@@ -92,7 +96,7 @@ bool Unique::get(TABLE *table)
SORTPARAM sort_param;
table->found_records=elements+tree.elements_in_tree;
- if (!my_b_inited(&file))
+ if (my_b_tell(&file) == 0)
{
/* Whole tree is in memory; Don't use disk if you don't need to */
if ((record_pointers=table->record_pointers= (byte*)
@@ -107,47 +111,47 @@ bool Unique::get(TABLE *table)
if (flush())
return 1;
- IO_CACHE *outfile=table->io_cache, tempfile;
+ IO_CACHE *outfile=table->io_cache;
BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer;
- uint maxbuffer= file_ptrs.elements;
+ uint maxbuffer= file_ptrs.elements - 1;
uchar *sort_buffer;
my_off_t save_pos;
bool error=1;
- my_b_clear(&tempfile);
-
/* Open cached file if it isn't open */
- if (! my_b_inited(outfile) &&
+ outfile=table->io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
+ MYF(MY_ZEROFILL));
+
+ if (!outfile || ! my_b_inited(outfile) &&
open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
MYF(MY_WME)))
return 1;
reinit_io_cache(outfile,WRITE_CACHE,0L,0,0);
-
- sort_param.keys=elements;
+
+ sort_param.max_rows= elements;
sort_param.sort_form=table;
sort_param.sort_length=sort_param.ref_length=tree.size_of_element;
sort_param.keys= max_in_memory_size / sort_param.sort_length;
- if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) *
- sort_param.sort_length,
- MYF(0))))
+ if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) *
+ sort_param.sort_length,
+ MYF(0))))
return 1;
sort_param.unique_buff= sort_buffer+(sort_param.keys*
sort_param.sort_length);
/* Merge the buffers to one file, removing duplicates */
- if (merge_many_buff(&sort_param,sort_buffer,file_ptr,&maxbuffer,&tempfile))
+ if (merge_many_buff(&sort_param,sort_buffer,file_ptr,&maxbuffer,&file))
goto err;
- if (flush_io_cache(&tempfile) ||
- reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
+ if (flush_io_cache(&file) ||
+ reinit_io_cache(&file,READ_CACHE,0L,0,0))
goto err;
- if (merge_buffers(&sort_param, &tempfile, outfile, sort_buffer, file_ptr,
+ if (merge_buffers(&sort_param, &file, outfile, sort_buffer, file_ptr,
file_ptr, file_ptr+maxbuffer,0))
goto err;
error=0;
err:
x_free((gptr) sort_buffer);
- close_cached_file(&tempfile);
if (flush_io_cache(outfile))
error=1;
diff --git a/sql/unireg.h b/sql/unireg.h
index 7ad3bac2eab..159832295fd 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -38,6 +38,8 @@
#endif
#define ER(X) errmesg[(X)-1000]
+#define ER_SAFE(X) (((X) >= 1000 && (X) < ER_ERROR_MESSAGES + 1000) ? ER(X) : "Invalid error code")
+
#define ERRMAPP 1 /* Errormap f|r my_error */
#define LIBLEN FN_REFLEN-FN_LEN /* Max l{ngd p} dev */
diff --git a/sql/violite.c b/sql/violite.c
deleted file mode 100644
index 902110ff072..00000000000
--- a/sql/violite.c
+++ /dev/null
@@ -1,430 +0,0 @@
-/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
-
- You should have received a copy of the GNU Library General Public
- License along with this library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA */
-
-/*
- Note that we can't have assertion on file descriptors; The reason for
- this is that during mysql shutdown, another thread can close a file
- we are working on. In this case we should just return read errors from
- the file descriptior.
-*/
-
-#include <global.h>
-
-#ifndef HAVE_VIO /* is Vio suppored by the Vio lib ? */
-
-#include <errno.h>
-#include <assert.h>
-#include <violite.h>
-#include <my_sys.h>
-#include <my_net.h>
-#include <m_string.h>
-#ifdef HAVE_POLL
-#include <sys/poll.h>
-#endif
-#ifdef HAVE_SYS_IOCTL_H
-#include <sys/ioctl.h>
-#endif
-
-#if defined(__EMX__)
-#define ioctlsocket ioctl
-#endif /* defined(__EMX__) */
-
-#if defined(MSDOS) || defined(__WIN__)
-#ifdef __WIN__
-#undef errno
-#undef EINTR
-#undef EAGAIN
-#define errno WSAGetLastError()
-#define EINTR WSAEINTR
-#define EAGAIN WSAEINPROGRESS
-#endif /* __WIN__ */
-#define O_NONBLOCK 1 /* For emulation of fcntl() */
-#endif
-#ifndef EWOULDBLOCK
-#define EWOULDBLOCK EAGAIN
-#endif
-
-#ifndef __WIN__
-#define HANDLE void *
-#endif
-
-struct st_vio
-{
- my_socket sd; /* my_socket - real or imaginary */
- HANDLE hPipe;
- my_bool localhost; /* Are we from localhost? */
- int fcntl_mode; /* Buffered fcntl(sd,F_GETFL) */
- struct sockaddr_in local; /* Local internet address */
- struct sockaddr_in remote; /* Remote internet address */
- enum enum_vio_type type; /* Type of connection */
- char desc[30]; /* String description */
-};
-
-typedef void *vio_ptr;
-typedef char *vio_cstring;
-
-/*
- * Helper to fill most of the Vio* with defaults.
- */
-
-static void vio_reset(Vio* vio, enum enum_vio_type type,
- my_socket sd, HANDLE hPipe,
- my_bool localhost)
-{
- bzero((char*) vio, sizeof(*vio));
- vio->type = type;
- vio->sd = sd;
- vio->hPipe = hPipe;
- vio->localhost= localhost;
-}
-
-/* Open the socket or TCP/IP connection and read the fnctl() status */
-
-Vio *vio_new(my_socket sd, enum enum_vio_type type, my_bool localhost)
-{
- Vio *vio;
- DBUG_ENTER("vio_new");
- DBUG_PRINT("enter", ("sd=%d", sd));
- if ((vio = (Vio*) my_malloc(sizeof(*vio),MYF(MY_WME))))
- {
- vio_reset(vio, type, sd, 0, localhost);
- sprintf(vio->desc,
- (vio->type == VIO_TYPE_SOCKET ? "socket (%d)" : "TCP/IP (%d)"),
- vio->sd);
-#if !defined(___WIN__) && !defined(__EMX__)
-#if !defined(NO_FCNTL_NONBLOCK)
- vio->fcntl_mode = fcntl(sd, F_GETFL);
-#elif defined(HAVE_SYS_IOCTL_H) /* hpux */
- /* Non blocking sockets doesn't work good on HPUX 11.0 */
- (void) ioctl(sd,FIOSNBIO,0);
-#endif
-#else /* !defined(__WIN__) && !defined(__EMX__) */
- {
- /* set to blocking mode by default */
- ulong arg=0, r;
- r = ioctlsocket(sd,FIONBIO,(void*) &arg, sizeof(arg));
- }
-#endif
- }
- DBUG_RETURN(vio);
-}
-
-
-#ifdef __WIN__
-
-Vio *vio_new_win32pipe(HANDLE hPipe)
-{
- Vio *vio;
- DBUG_ENTER("vio_new_handle");
- if ((vio = (Vio*) my_malloc(sizeof(Vio),MYF(MY_WME))))
- {
- vio_reset(vio, VIO_TYPE_NAMEDPIPE, 0, hPipe, TRUE);
- strmov(vio->desc, "named pipe");
- }
- DBUG_RETURN(vio);
-}
-
-#endif
-
-void vio_delete(Vio * vio)
-{
- /* It must be safe to delete null pointers. */
- /* This matches the semantics of C++'s delete operator. */
- if (vio)
- {
- if (vio->type != VIO_CLOSED)
- vio_close(vio);
- my_free((gptr) vio,MYF(0));
- }
-}
-
-int vio_errno(Vio *vio __attribute__((unused)))
-{
- return errno; /* On Win32 this mapped to WSAGetLastError() */
-}
-
-
-int vio_read(Vio * vio, gptr buf, int size)
-{
- int r;
- DBUG_ENTER("vio_read");
- DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size));
-#ifdef __WIN__
- if (vio->type == VIO_TYPE_NAMEDPIPE)
- {
- DWORD length;
- if (!ReadFile(vio->hPipe, buf, size, &length, NULL))
- DBUG_RETURN(-1);
- DBUG_RETURN(length);
- }
- r = recv(vio->sd, buf, size,0);
-#else
- errno=0; /* For linux */
- r = read(vio->sd, buf, size);
-#endif /* __WIN__ */
-#ifndef DBUG_OFF
- if (r < 0)
- {
- DBUG_PRINT("vio_error", ("Got error %d during read",errno));
- }
-#endif /* DBUG_OFF */
- DBUG_PRINT("exit", ("%d", r));
- DBUG_RETURN(r);
-}
-
-
-int vio_write(Vio * vio, const gptr buf, int size)
-{
- int r;
- DBUG_ENTER("vio_write");
- DBUG_PRINT("enter", ("sd=%d, buf=%p, size=%d", vio->sd, buf, size));
-#ifdef __WIN__
- if ( vio->type == VIO_TYPE_NAMEDPIPE)
- {
- DWORD length;
- if (!WriteFile(vio->hPipe, (char*) buf, size, &length, NULL))
- DBUG_RETURN(-1);
- DBUG_RETURN(length);
- }
- r = send(vio->sd, buf, size,0);
-#else
- r = write(vio->sd, buf, size);
-#endif /* __WIN__ */
-#ifndef DBUG_OFF
- if (r < 0)
- {
- DBUG_PRINT("vio_error", ("Got error on write: %d",errno));
- }
-#endif /* DBUG_OFF */
- DBUG_PRINT("exit", ("%d", r));
- DBUG_RETURN(r);
-}
-
-
-int vio_blocking(Vio * vio, my_bool set_blocking_mode)
-{
- int r=0;
- DBUG_ENTER("vio_blocking");
- DBUG_PRINT("enter", ("set_blocking_mode: %d", (int) set_blocking_mode));
-
-#if !defined(___WIN__) && !defined(__EMX__)
-#if !defined(NO_FCNTL_NONBLOCK)
-
- if (vio->sd >= 0)
- {
- int old_fcntl=vio->fcntl_mode;
- if (set_blocking_mode)
- vio->fcntl_mode &= ~O_NONBLOCK; /* clear bit */
- else
- vio->fcntl_mode |= O_NONBLOCK; /* set bit */
- if (old_fcntl != vio->fcntl_mode)
- r = fcntl(vio->sd, F_SETFL, vio->fcntl_mode);
- }
-#endif /* !defined(NO_FCNTL_NONBLOCK) */
-#else /* !defined(__WIN__) && !defined(__EMX__) */
-#ifndef __EMX__
- if (vio->type != VIO_TYPE_NAMEDPIPE)
-#endif
- {
- ulong arg;
- int old_fcntl=vio->fcntl_mode;
- if (set_blocking_mode)
- {
- arg = 0;
- vio->fcntl_mode &= ~O_NONBLOCK; /* clear bit */
- }
- else
- {
- arg = 1;
- vio->fcntl_mode |= O_NONBLOCK; /* set bit */
- }
- if (old_fcntl != vio->fcntl_mode)
- r = ioctlsocket(vio->sd,FIONBIO,(void*) &arg, sizeof(arg));
- }
-#endif /* !defined(__WIN__) && !defined(__EMX__) */
- DBUG_RETURN(r);
-}
-
-my_bool
-vio_is_blocking(Vio * vio)
-{
- my_bool r;
- DBUG_ENTER("vio_is_blocking");
- r = !(vio->fcntl_mode & O_NONBLOCK);
- DBUG_PRINT("exit", ("%d", (int) r));
- DBUG_RETURN(r);
-}
-
-
-int vio_fastsend(Vio * vio __attribute__((unused)))
-{
- int r=0;
- DBUG_ENTER("vio_fastsend");
-
-#ifdef IPTOS_THROUGHPUT
- {
-#ifndef __EMX__
- int tos = IPTOS_THROUGHPUT;
- if (!setsockopt(vio->sd, IPPROTO_IP, IP_TOS, (void *) &tos, sizeof(tos)))
-#endif /* !__EMX__ */
- {
- int nodelay = 1;
- if (setsockopt(vio->sd, IPPROTO_TCP, TCP_NODELAY, (void *) &nodelay,
- sizeof(nodelay))) {
- DBUG_PRINT("warning",
- ("Couldn't set socket option for fast send"));
- r= -1;
- }
- }
- }
-#endif /* IPTOS_THROUGHPUT */
- DBUG_PRINT("exit", ("%d", r));
- DBUG_RETURN(r);
-}
-
-int vio_keepalive(Vio* vio, my_bool set_keep_alive)
-{
- int r=0;
- uint opt = 0;
- DBUG_ENTER("vio_keepalive");
- DBUG_PRINT("enter", ("sd=%d, set_keep_alive=%d", vio->sd, (int)
- set_keep_alive));
- if (vio->type != VIO_TYPE_NAMEDPIPE)
- {
- if (set_keep_alive)
- opt = 1;
- r = setsockopt(vio->sd, SOL_SOCKET, SO_KEEPALIVE, (char *) &opt,
- sizeof(opt));
- }
- DBUG_RETURN(r);
-}
-
-
-my_bool
-vio_should_retry(Vio * vio __attribute__((unused)))
-{
- int en = errno;
- return en == EAGAIN || en == EINTR || en == EWOULDBLOCK;
-}
-
-
-int vio_close(Vio * vio)
-{
- int r;
- DBUG_ENTER("vio_close");
-#ifdef __WIN__
- if (vio->type == VIO_TYPE_NAMEDPIPE)
- {
-#if defined(__NT__) && defined(MYSQL_SERVER)
- CancelIo(vio->hPipe);
- DisconnectNamedPipe(vio->hPipe);
-#endif
- r=CloseHandle(vio->hPipe);
- }
- else if (vio->type != VIO_CLOSED)
-#endif /* __WIN__ */
- {
- r=0;
- if (shutdown(vio->sd,2))
- r= -1;
- if (closesocket(vio->sd))
- r= -1;
- }
- if (r)
- {
- DBUG_PRINT("vio_error", ("close() failed, error: %d",errno));
- /* FIXME: error handling (not critical for MySQL) */
- }
- vio->type= VIO_CLOSED;
- vio->sd= -1;
- DBUG_RETURN(r);
-}
-
-
-const char *vio_description(Vio * vio)
-{
- return vio->desc;
-}
-
-enum enum_vio_type vio_type(Vio* vio)
-{
- return vio->type;
-}
-
-my_socket vio_fd(Vio* vio)
-{
- return vio->sd;
-}
-
-
-my_bool vio_peer_addr(Vio * vio, char *buf)
-{
- DBUG_ENTER("vio_peer_addr");
- DBUG_PRINT("enter", ("sd=%d", vio->sd));
- if (vio->localhost)
- {
- strmov(buf,"127.0.0.1");
- }
- else
- {
- size_socket addrLen = sizeof(struct sockaddr);
- if (getpeername(vio->sd, (struct sockaddr *) (& (vio->remote)),
- &addrLen) != 0)
- {
- DBUG_PRINT("exit", ("getpeername, error: %d", errno));
- DBUG_RETURN(1);
- }
- my_inet_ntoa(vio->remote.sin_addr,buf);
- }
- DBUG_PRINT("exit", ("addr=%s", buf));
- DBUG_RETURN(0);
-}
-
-
-void vio_in_addr(Vio *vio, struct in_addr *in)
-{
- DBUG_ENTER("vio_in_addr");
- if (vio->localhost)
- bzero((char*) in, sizeof(*in)); /* This should never be executed */
- else
- *in=vio->remote.sin_addr;
- DBUG_VOID_RETURN;
-}
-
-
-/* Return 0 if there is data to be read */
-
-my_bool vio_poll_read(Vio *vio,uint timeout)
-{
-#ifndef HAVE_POLL
- return 0;
-#else
- struct pollfd fds;
- int res;
- DBUG_ENTER("vio_poll");
- fds.fd=vio->sd;
- fds.events=POLLIN;
- fds.revents=0;
- if ((res=poll(&fds,1,(int) timeout*1000)) <= 0)
- {
- DBUG_RETURN(res < 0 ? 0 : 1); /* Don't return 1 on errors */
- }
- DBUG_RETURN(fds.revents & POLLIN ? 0 : 1);
-#endif
-}
-
-#endif /* HAVE_VIO */