summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/des_key_file.cc25
-rw-r--r--sql/examples/ha_archive.cc33
-rw-r--r--sql/examples/ha_archive.h9
-rw-r--r--sql/examples/ha_example.cc25
-rw-r--r--sql/examples/ha_example.h4
-rw-r--r--sql/examples/ha_tina.cc34
-rw-r--r--sql/examples/ha_tina.h14
-rw-r--r--sql/field.cc247
-rw-r--r--sql/field.h1
-rw-r--r--sql/field_conv.cc32
-rw-r--r--sql/ha_berkeley.cc17
-rw-r--r--sql/ha_berkeley.h7
-rw-r--r--sql/ha_blackhole.cc31
-rw-r--r--sql/ha_blackhole.h4
-rw-r--r--sql/ha_federated.cc1629
-rw-r--r--sql/ha_federated.h173
-rw-r--r--sql/ha_heap.cc27
-rw-r--r--sql/ha_heap.h5
-rw-r--r--sql/ha_innodb.cc111
-rw-r--r--sql/ha_innodb.h48
-rw-r--r--sql/ha_myisam.cc47
-rw-r--r--sql/ha_myisam.h8
-rw-r--r--sql/ha_myisammrg.cc27
-rw-r--r--sql/ha_myisammrg.h2
-rw-r--r--sql/ha_ndbcluster.cc69
-rw-r--r--sql/ha_ndbcluster.h89
-rw-r--r--sql/handler.cc23
-rw-r--r--sql/handler.h18
-rw-r--r--sql/hostname.cc10
-rw-r--r--sql/item.cc250
-rw-r--r--sql/item.h28
-rw-r--r--sql/item_buff.cc6
-rw-r--r--sql/item_cmpfunc.cc52
-rw-r--r--sql/item_create.cc12
-rw-r--r--sql/item_func.cc278
-rw-r--r--sql/item_func.h43
-rw-r--r--sql/item_strfunc.cc134
-rw-r--r--sql/item_strfunc.h26
-rw-r--r--sql/item_subselect.cc15
-rw-r--r--sql/item_sum.cc39
-rw-r--r--sql/item_timefunc.cc8
-rw-r--r--sql/item_timefunc.h6
-rw-r--r--sql/lex.h1
-rw-r--r--sql/lock.cc55
-rw-r--r--sql/log.cc10
-rw-r--r--sql/log_event.cc18
-rw-r--r--sql/mysql_priv.h58
-rw-r--r--sql/mysqld.cc199
-rw-r--r--sql/opt_range.cc21
-rw-r--r--sql/parse_file.cc70
-rw-r--r--sql/parse_file.h4
-rw-r--r--sql/protocol_cursor.cc3
-rw-r--r--sql/set_var.cc69
-rw-r--r--sql/set_var.h7
-rw-r--r--sql/share/charsets/Index.xml2
-rw-r--r--sql/share/errmsg.txt47
-rw-r--r--sql/slave.cc6
-rw-r--r--sql/sp.cc97
-rw-r--r--sql/sp.h6
-rw-r--r--sql/sp_cache.h17
-rw-r--r--sql/sp_head.cc107
-rw-r--r--sql/sp_head.h28
-rw-r--r--sql/sql_acl.cc4
-rw-r--r--sql/sql_base.cc212
-rw-r--r--sql/sql_bitmap.h9
-rw-r--r--sql/sql_cache.cc48
-rw-r--r--sql/sql_class.cc26
-rw-r--r--sql/sql_class.h50
-rw-r--r--sql/sql_derived.cc3
-rw-r--r--sql/sql_insert.cc6
-rw-r--r--sql/sql_lex.cc10
-rw-r--r--sql/sql_lex.h1
-rw-r--r--sql/sql_parse.cc132
-rw-r--r--sql/sql_prepare.cc72
-rw-r--r--sql/sql_repl.cc2
-rw-r--r--sql/sql_select.cc208
-rw-r--r--sql/sql_select.h9
-rw-r--r--sql/sql_show.cc197
-rw-r--r--sql/sql_table.cc79
-rw-r--r--sql/sql_trigger.cc470
-rw-r--r--sql/sql_trigger.h22
-rw-r--r--sql/sql_union.cc27
-rw-r--r--sql/sql_view.cc17
-rw-r--r--sql/sql_yacc.yy77
-rw-r--r--sql/table.cc21
-rw-r--r--sql/table.h25
-rw-r--r--sql/time.cc2
-rw-r--r--sql/tztime.cc34
-rw-r--r--sql/tztime.h2
-rw-r--r--sql/unireg.cc12
90 files changed, 4465 insertions, 1803 deletions
diff --git a/sql/des_key_file.cc b/sql/des_key_file.cc
index 34bcbd4fc4b..77cb0c8de0f 100644
--- a/sql/des_key_file.cc
+++ b/sql/des_key_file.cc
@@ -21,18 +21,6 @@
struct st_des_keyschedule des_keyschedule[10];
uint des_default_key;
-pthread_mutex_t LOCK_des_key_file;
-static int initialized= 0;
-
-void
-init_des_key_file()
-{
- if (!initialized)
- {
- initialized=1;
- pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST);
- }
-}
/*
Function which loads DES keys from plaintext file into memory on MySQL
@@ -55,8 +43,6 @@ load_des_key_file(const char *file_name)
DBUG_ENTER("load_des_key_file");
DBUG_PRINT("enter",("name: %s",file_name));
- init_des_key_file();
-
VOID(pthread_mutex_lock(&LOCK_des_key_file));
if ((file=my_open(file_name,O_RDONLY | O_BINARY ,MYF(MY_WME))) < 0 ||
init_io_cache(&io, file, IO_SIZE*2, READ_CACHE, 0, 0, MYF(MY_WME)))
@@ -113,15 +99,4 @@ error:
VOID(pthread_mutex_unlock(&LOCK_des_key_file));
DBUG_RETURN(result);
}
-
-
-void free_des_key_file()
-{
- if (initialized)
- {
- initialized= 01;
- pthread_mutex_destroy(&LOCK_des_key_file);
- }
-}
-
#endif /* HAVE_OPENSSL */
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index c362985f565..fd47b45ce52 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -140,16 +140,20 @@ static handlerton archive_hton = {
"archive",
0, /* slot */
0, /* savepoint size. */
- 0, /* close_connection */
- 0, /* savepoint */
- 0, /* rollback to savepoint */
- 0, /* releas savepoint */
- 0, /* commit */
- 0, /* rollback */
- 0, /* prepare */
- 0, /* recover */
- 0, /* commit_by_xid */
- 0 /* rollback_by_xid */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* releas savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
};
@@ -208,6 +212,15 @@ bool archive_db_end()
return FALSE;
}
+ha_archive::ha_archive(TABLE *table_arg)
+ :handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0)
+{
+ /* Set our original buffer from pre-allocated memory */
+ buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
+
+ /* The size of the offset value we will use for position() */
+ ref_length = sizeof(z_off_t);
+}
/*
This method reads the header of a datafile and returns whether or not it was successful.
diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h
index 3932b62980c..41835c5fb6f 100644
--- a/sql/examples/ha_archive.h
+++ b/sql/examples/ha_archive.h
@@ -58,14 +58,7 @@ class ha_archive: public handler
bool bulk_insert; /* If we are performing a bulk insert */
public:
- ha_archive(TABLE *table): handler(table), delayed_insert(0), bulk_insert(0)
- {
- /* Set our original buffer from pre-allocated memory */
- buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
-
- /* The size of the offset value we will use for position() */
- ref_length = sizeof(z_off_t);
- }
+ ha_archive(TABLE *table_arg);
~ha_archive()
{
}
diff --git a/sql/examples/ha_example.cc b/sql/examples/ha_example.cc
index 9da297ccd1f..a7e193b9730 100644
--- a/sql/examples/ha_example.cc
+++ b/sql/examples/ha_example.cc
@@ -72,6 +72,27 @@
#ifdef HAVE_EXAMPLE_DB
#include "ha_example.h"
+
+static handlerton example_hton= {
+ "CSV",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
+};
+
/* Variables for example share methods */
static HASH example_open_tables; // Hash used to track open tables
pthread_mutex_t example_mutex; // This is the mutex we use to init the hash
@@ -179,6 +200,10 @@ static int free_share(EXAMPLE_SHARE *share)
}
+ha_example::ha_example(TABLE *table_arg)
+ :handler(&example_hton, table_arg)
+{}
+
/*
If frm_error() is called then we will use this to to find out what file extentions
exist for the storage engine. This is also used by the default rename_table and
diff --git a/sql/examples/ha_example.h b/sql/examples/ha_example.h
index ae72e5bb275..37f38fe5210 100644
--- a/sql/examples/ha_example.h
+++ b/sql/examples/ha_example.h
@@ -45,9 +45,7 @@ class ha_example: public handler
EXAMPLE_SHARE *share; /* Shared lock info */
public:
- ha_example(TABLE *table): handler(table)
- {
- }
+ ha_example(TABLE *table_arg);
~ha_example()
{
}
diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc
index a030960d08a..1e2751f3016 100644
--- a/sql/examples/ha_tina.cc
+++ b/sql/examples/ha_tina.cc
@@ -54,6 +54,26 @@ pthread_mutex_t tina_mutex;
static HASH tina_open_tables;
static int tina_init= 0;
+static handlerton tina_hton= {
+ "CSV",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
+};
+
/*****************************************************************************
** TINA tables
*****************************************************************************/
@@ -228,6 +248,20 @@ byte * find_eoln(byte *data, off_t begin, off_t end)
return 0;
}
+
+ha_tina::ha_tina(TABLE *table_arg)
+ :handler(&tina_hton, table_arg),
+ /*
+ These definitions are found in hanler.h
+ These are not probably completely right.
+ */
+ current_position(0), next_position(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH)
+{
+ /* Set our original buffers from pre-allocated memory */
+ buffer.set(byte_buffer, IO_SIZE, system_charset_info);
+ chain= chain_buffer;
+}
+
/*
Encode a buffer into the quoted format.
*/
diff --git a/sql/examples/ha_tina.h b/sql/examples/ha_tina.h
index 22193c01013..5679d77a4dc 100644
--- a/sql/examples/ha_tina.h
+++ b/sql/examples/ha_tina.h
@@ -49,18 +49,8 @@ class ha_tina: public handler
byte chain_alloced;
uint32 chain_size;
- public:
- ha_tina(TABLE *table): handler(table),
- /*
- These definitions are found in hanler.h
- Theses are not probably completely right.
- */
- current_position(0), next_position(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH)
- {
- /* Set our original buffers from pre-allocated memory */
- buffer.set(byte_buffer, IO_SIZE, system_charset_info);
- chain = chain_buffer;
- }
+public:
+ ha_tina(TABLE *table_arg);
~ha_tina()
{
if (chain_alloced)
diff --git a/sql/field.cc b/sql/field.cc
index 925fca8ac43..224b6c279f3 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -47,6 +47,8 @@ uchar Field_null::null[1]={1};
const char field_separator=',';
#define DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE 320
+#define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \
+((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1)))
/*
Rules for merging different types of fields in UNION
@@ -4466,16 +4468,16 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
my_time_t tmp= 0;
int error;
bool have_smth_to_conv;
- bool in_dst_time_gap;
+ my_bool in_dst_time_gap;
THD *thd= table->in_use;
+ /* We don't want to store invalid or fuzzy datetime values in TIMESTAMP */
have_smth_to_conv= (str_to_datetime(from, len, &l_time,
- ((table->in_use->variables.sql_mode &
- MODE_NO_ZERO_DATE) |
- MODE_NO_ZERO_IN_DATE),
- &error) >
+ (table->in_use->variables.sql_mode &
+ MODE_NO_ZERO_DATE) |
+ MODE_NO_ZERO_IN_DATE, &error) >
MYSQL_TIMESTAMP_ERROR);
-
+
if (error || !have_smth_to_conv)
{
error= 1;
@@ -4488,16 +4490,15 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
{
if (!(tmp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap)))
{
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_DATA_OUT_OF_RANGE,
from, len, MYSQL_TIMESTAMP_DATETIME, !error);
-
error= 1;
}
else if (in_dst_time_gap)
{
set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_INVALID_TIMESTAMP,
+ ER_WARN_INVALID_TIMESTAMP,
from, len, MYSQL_TIMESTAMP_DATETIME, !error);
error= 1;
}
@@ -4522,8 +4523,8 @@ int Field_timestamp::store(double nr)
int error= 0;
if (nr < 0 || nr > 99991231235959.0)
{
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DATA_OUT_OF_RANGE,
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_DATA_OUT_OF_RANGE,
nr, MYSQL_TIMESTAMP_DATETIME);
nr= 0; // Avoid overflow on buff
error= 1;
@@ -4538,38 +4539,38 @@ int Field_timestamp::store(longlong nr)
TIME l_time;
my_time_t timestamp= 0;
int error;
- bool in_dst_time_gap;
+ my_bool in_dst_time_gap;
THD *thd= table->in_use;
- if (number_to_datetime(nr, &l_time, 0, &error))
+ /* We don't want to store invalid or fuzzy datetime values in TIMESTAMP */
+ longlong tmp= number_to_datetime(nr, &l_time, (thd->variables.sql_mode &
+ MODE_NO_ZERO_DATE) |
+ MODE_NO_ZERO_IN_DATE, &error);
+ if (tmp == LL(-1))
+ {
+ error= 2;
+ }
+
+ if (!error && tmp)
{
if (!(timestamp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap)))
{
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DATA_OUT_OF_RANGE,
- nr, MYSQL_TIMESTAMP_DATETIME, 1);
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_DATA_OUT_OF_RANGE,
+ nr, MYSQL_TIMESTAMP_DATETIME, 1);
error= 1;
}
-
if (in_dst_time_gap)
{
set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_INVALID_TIMESTAMP,
- nr, MYSQL_TIMESTAMP_DATETIME, !error);
+ ER_WARN_INVALID_TIMESTAMP,
+ nr, MYSQL_TIMESTAMP_DATETIME, 1);
error= 1;
}
- }
- else if (error)
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- WARN_DATA_TRUNCATED,
- nr, MYSQL_TIMESTAMP_DATETIME, 1);
- if (!error && timestamp == 0 &&
- (table->in_use->variables.sql_mode & MODE_NO_ZERO_DATE))
- {
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ } else if (error)
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED,
nr, MYSQL_TIMESTAMP_DATETIME, 1);
- }
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
@@ -4579,7 +4580,7 @@ int Field_timestamp::store(longlong nr)
else
#endif
longstore(ptr,(uint32) timestamp);
-
+
return error;
}
@@ -5152,14 +5153,14 @@ int Field_date::store(const char *from, uint len,CHARSET_INFO *cs)
TIME l_time;
uint32 tmp;
int error;
-
+
if (str_to_datetime(from, len, &l_time, TIME_FUZZY_DATE |
(table->in_use->variables.sql_mode &
(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
MODE_INVALID_DATES)),
&error) <= MYSQL_TIMESTAMP_ERROR)
{
- tmp=0;
+ tmp= 0;
error= 2;
}
else
@@ -5183,63 +5184,57 @@ int Field_date::store(const char *from, uint len,CHARSET_INFO *cs)
int Field_date::store(double nr)
{
- long tmp;
+ longlong tmp;
int error= 0;
if (nr >= 19000000000000.0 && nr <= 99991231235959.0)
nr=floor(nr/1000000.0); // Timestamp to date
if (nr < 0.0 || nr > 99991231.0)
{
- tmp=0L;
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DATA_OUT_OF_RANGE,
+ tmp= LL(0);
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_DATA_OUT_OF_RANGE,
nr, MYSQL_TIMESTAMP_DATE);
error= 1;
}
else
- tmp=(long) rint(nr);
-
- /*
- We don't need to check for zero dates here as this date type is only
- used in .frm tables from very old MySQL versions
- */
+ tmp= (longlong) rint(nr);
-#ifdef WORDS_BIGENDIAN
- if (table->s->db_low_byte_first)
- {
- int4store(ptr,tmp);
- }
- else
-#endif
- longstore(ptr,tmp);
- return error;
+ return Field_date::store(tmp);
}
int Field_date::store(longlong nr)
{
- long tmp;
- int error= 0;
- if (nr >= LL(19000000000000) && nr < LL(99991231235959))
- nr=nr/LL(1000000); // Timestamp to date
- if (nr < 0 || nr > LL(99991231))
+ TIME not_used;
+ int error;
+ longlong initial_nr= nr;
+
+ nr= number_to_datetime(nr, &not_used, (TIME_FUZZY_DATE |
+ (table->in_use->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE |
+ MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES))), &error);
+
+ if (nr == LL(-1))
{
- tmp=0L;
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DATA_OUT_OF_RANGE,
- nr, MYSQL_TIMESTAMP_DATE, 0);
- error= 1;
+ nr= 0;
+ error= 2;
}
- else
- tmp=(long) nr;
+
+ if (error)
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ error == 2 ? ER_WARN_DATA_OUT_OF_RANGE :
+ WARN_DATA_TRUNCATED, initial_nr,
+ MYSQL_TIMESTAMP_DATETIME, 1);
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
{
- int4store(ptr,tmp);
+ int4store(ptr, nr);
}
else
#endif
- longstore(ptr,tmp);
+ longstore(ptr, nr);
return error;
}
@@ -5363,7 +5358,7 @@ int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs)
MODE_INVALID_DATES))),
&error) <= MYSQL_TIMESTAMP_ERROR)
{
- tmp=0L;
+ tmp= 0L;
error= 2;
}
else
@@ -5372,7 +5367,7 @@ int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs)
if (error)
set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED,
from, len, MYSQL_TIMESTAMP_DATE, 1);
-
+
int3store(ptr,tmp);
return error;
}
@@ -5383,7 +5378,7 @@ int Field_newdate::store(double nr)
if (nr < 0.0 || nr > 99991231235959.0)
{
int3store(ptr,(int32) 0);
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, nr, MYSQL_TIMESTAMP_DATE);
return 1;
}
@@ -5393,52 +5388,28 @@ int Field_newdate::store(double nr)
int Field_newdate::store(longlong nr)
{
- int32 tmp;
- int error= 0;
- if (nr >= LL(100000000) && nr <= LL(99991231235959))
- nr=nr/LL(1000000); // Timestamp to date
- if (nr < 0L || nr > 99991231L)
- {
- tmp=0;
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DATA_OUT_OF_RANGE, nr,
- MYSQL_TIMESTAMP_DATE, 1);
- error= 1;
+ TIME l_time;
+ longlong tmp;
+ int error;
+ if (number_to_datetime(nr, &l_time,
+ (TIME_FUZZY_DATE |
+ (table->in_use->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES))),
+ &error) == LL(-1))
+ {
+ tmp= 0L;
+ error= 2;
}
else
- {
- uint month, day;
+ tmp= l_time.day + l_time.month*32 + l_time.year*16*32;
- tmp=(int32) nr;
- if (tmp)
- {
- if (tmp < YY_PART_YEAR*10000L) // Fix short dates
- tmp+= (uint32) 20000000L;
- else if (tmp < 999999L)
- tmp+= (uint32) 19000000L;
-
- month= (uint) ((tmp/100) % 100);
- day= (uint) (tmp%100);
- if (month > 12 || day > 31)
- {
- tmp=0L; // Don't allow date to change
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DATA_OUT_OF_RANGE, nr,
- MYSQL_TIMESTAMP_DATE, 1);
- error= 1;
- }
- else
- tmp= day + month*32 + (tmp/10000)*16*32;
- }
- else if (table->in_use->variables.sql_mode & MODE_NO_ZERO_DATE)
- {
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DATA_OUT_OF_RANGE,
- 0, MYSQL_TIMESTAMP_DATE);
- error= 1;
- }
- }
- int3store(ptr, tmp);
+ if (error)
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ error == 2 ? ER_WARN_DATA_OUT_OF_RANGE :
+ WARN_DATA_TRUNCATED,nr,MYSQL_TIMESTAMP_DATE, 1);
+
+ int3store(ptr,tmp);
return error;
}
@@ -5565,7 +5536,7 @@ int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs)
int error;
ulonglong tmp= 0;
enum enum_mysql_timestamp_type func_res;
-
+
func_res= str_to_datetime(from, len, &time_tmp,
(TIME_FUZZY_DATE |
(table->in_use->variables.sql_mode &
@@ -5578,7 +5549,7 @@ int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs)
error= 1; // Fix if invalid zero date
if (error)
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_DATA_OUT_OF_RANGE,
from, len, MYSQL_TIMESTAMP_DATETIME, 1);
@@ -5615,21 +5586,25 @@ int Field_datetime::store(longlong nr)
TIME not_used;
int error;
longlong initial_nr= nr;
-
- nr= number_to_datetime(nr, &not_used, 1, &error);
- if (error)
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- WARN_DATA_TRUNCATED, initial_nr,
- MYSQL_TIMESTAMP_DATETIME, 1);
- else if (nr == 0 && table->in_use->variables.sql_mode & MODE_NO_ZERO_DATE)
+ nr= number_to_datetime(nr, &not_used, (TIME_FUZZY_DATE |
+ (table->in_use->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE |
+ MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES))), &error);
+
+ if (nr == LL(-1))
{
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WARN_DATA_OUT_OF_RANGE,
- initial_nr, MYSQL_TIMESTAMP_DATE, 1);
- error= 1;
+ nr= 0;
+ error= 2;
}
+ if (error)
+ set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ error == 2 ? ER_WARN_DATA_OUT_OF_RANGE :
+ WARN_DATA_TRUNCATED, initial_nr,
+ MYSQL_TIMESTAMP_DATETIME, 1);
+
#ifdef WORDS_BIGENDIAN
if (table->s->db_low_byte_first)
{
@@ -6696,7 +6671,7 @@ Field_blob::Field_blob(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,uint blob_pack_length,
CHARSET_INFO *cs)
- :Field_longstr(ptr_arg, (1L << min(blob_pack_length,3)*8)-1L,
+ :Field_longstr(ptr_arg, BLOB_PACK_LENGTH_TO_MAX_LENGH(blob_pack_length),
null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg,
table_arg, cs),
packlength(blob_pack_length)
@@ -6933,8 +6908,8 @@ String *Field_blob::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_blob::val_decimal(my_decimal *decimal_value)
{
- char *blob;
- memcpy_fixed(&blob, ptr+packlength, sizeof(char*));
+ const char *blob;
+ memcpy_fixed(&blob, ptr+packlength, sizeof(const char*));
if (!blob)
blob= "";
str2my_decimal(E_DEC_FATAL_ERROR, blob, get_length(ptr), charset(),
@@ -7871,7 +7846,10 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
{
set_rec_bits(0xff, bit_ptr, bit_ofs, bit_len);
memset(ptr, 0xff, field_length);
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ if (table->in_use->really_abort_on_warning())
+ set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1);
+ else
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
/* delta is >= -1 here */
@@ -8088,7 +8066,10 @@ int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
memset(ptr, 0xff, field_length);
if (bits)
*ptr&= ((1 << bits) - 1); /* set first byte */
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ if (table->in_use->really_abort_on_warning())
+ set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1);
+ else
+ set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
bzero(ptr, delta);
@@ -8485,8 +8466,12 @@ create_field::create_field(Field *old_field,Field *orig_field)
else
interval=0;
def=0;
- if (!old_field->is_real_null() && ! (flags & BLOB_FLAG) &&
- old_field->ptr && orig_field)
+ if (!(flags & (NO_DEFAULT_VALUE_FLAG | BLOB_FLAG)) &&
+ !old_field->is_real_null() &&
+ old_field->ptr && orig_field &&
+ (sql_type != FIELD_TYPE_TIMESTAMP || /* set def only if */
+ old_field->table->timestamp_field != old_field || /* timestamp field */
+ unireg_check == Field::TIMESTAMP_UN_FIELD)) /* has default val */
{
char buff[MAX_FIELD_WIDTH],*pos;
String tmp(buff,sizeof(buff), charset), *res;
diff --git a/sql/field.h b/sql/field.h
index 523cf444c30..2b67ed3f599 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1204,6 +1204,7 @@ public:
int store(longlong nr);
int store_decimal(const my_decimal *);
void get_key_image(char *buff,uint length,imagetype type);
+ uint size_of() const { return sizeof(*this); }
};
#endif /*HAVE_SPATIAL*/
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index 0dc82666f52..fc7347ef9af 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -322,7 +322,34 @@ static void do_field_real(Copy_field *copy)
}
+/*
+ string copy for single byte characters set when to string is shorter than
+ from string
+*/
+
static void do_cut_string(Copy_field *copy)
+{
+ CHARSET_INFO *cs= copy->from_field->charset();
+ memcpy(copy->to_ptr,copy->from_ptr,copy->to_length);
+
+ /* Check if we loosed any important characters */
+ if (cs->cset->scan(cs,
+ copy->from_ptr + copy->to_length,
+ copy->from_ptr + copy->from_length,
+ MY_SEQ_SPACES) < copy->from_length - copy->to_length)
+ {
+ copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ WARN_DATA_TRUNCATED, 1);
+ }
+}
+
+
+/*
+ string copy for multi byte characters set when to string is shorter than
+ from string
+*/
+
+static void do_cut_string_complex(Copy_field *copy)
{ // Shorter string field
int well_formed_error;
CHARSET_INFO *cs= copy->from_field->charset();
@@ -349,6 +376,8 @@ static void do_cut_string(Copy_field *copy)
}
+
+
static void do_expand_string(Copy_field *copy)
{
CHARSET_INFO *cs= copy->from_field->charset();
@@ -550,7 +579,8 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*)
do_varstring1 : do_varstring2);
}
else if (to_length < from_length)
- return do_cut_string;
+ return (from->charset()->mbmaxlen == 1 ?
+ do_cut_string : do_cut_string_complex);
else if (to_length > from_length)
return do_expand_string;
}
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 568fb727e63..793029ab4c7 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -120,7 +120,11 @@ static handlerton berkeley_hton = {
NULL, /* prepare */
NULL, /* recover */
NULL, /* commit_by_xid */
- NULL /* rollback_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_CLOSE_CURSORS_AT_COMMIT
};
typedef struct st_berkeley_trx_data {
@@ -372,6 +376,17 @@ void berkeley_cleanup_log_files(void)
/*****************************************************************************
** Berkeley DB tables
*****************************************************************************/
+
+ha_berkeley::ha_berkeley(TABLE *table_arg)
+ :handler(&berkeley_hton, table_arg), alloc_ptr(0), rec_buff(0), file(0),
+ int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ |
+ HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT |
+ HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED |
+ HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX),
+ changed_rows(0), last_dup_key((uint) -1), version(0), using_ignore(0)
+{}
+
+
static const char *ha_berkeley_exts[] = {
ha_berkeley_ext,
NullS
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
index f6376939445..aa92908ecde 100644
--- a/sql/ha_berkeley.h
+++ b/sql/ha_berkeley.h
@@ -85,12 +85,7 @@ class ha_berkeley: public handler
DBT *get_pos(DBT *to, byte *pos);
public:
- ha_berkeley(TABLE *table): handler(table), alloc_ptr(0),rec_buff(0), file(0),
- int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ |
- HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT |
- HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED |
- HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX),
- changed_rows(0),last_dup_key((uint) -1),version(0),using_ignore(0) {}
+ ha_berkeley(TABLE *table_arg);
~ha_berkeley() {}
const char *table_type() const { return "BerkeleyDB"; }
ulong index_flags(uint idx, uint part, bool all_parts) const;
diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc
index 6abbe983f48..43a286a541f 100644
--- a/sql/ha_blackhole.cc
+++ b/sql/ha_blackhole.cc
@@ -24,6 +24,37 @@
#include "ha_blackhole.h"
+/* Blackhole storage engine handlerton */
+
+static handlerton blackhole_hton= {
+ "BLACKHOLE",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
+};
+
+/*****************************************************************************
+** BLACKHOLE tables
+*****************************************************************************/
+
+ha_blackhole::ha_blackhole(TABLE *table_arg)
+ :handler(&blackhole_hton, table_arg)
+{}
+
+
static const char *ha_blackhole_exts[] = {
NullS
};
diff --git a/sql/ha_blackhole.h b/sql/ha_blackhole.h
index 84a386e17f8..2dccabf17cc 100644
--- a/sql/ha_blackhole.h
+++ b/sql/ha_blackhole.h
@@ -28,9 +28,7 @@ class ha_blackhole: public handler
THR_LOCK thr_lock;
public:
- ha_blackhole(TABLE *table): handler(table)
- {
- }
+ ha_blackhole(TABLE *table_arg);
~ha_blackhole()
{
}
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index 383652e4a3a..639f09d10ca 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -54,6 +54,7 @@
***IMPORTANT***
+ This is a first release, conceptual release
Only 'mysql://' is supported at this release.
@@ -352,7 +353,8 @@
#ifdef HAVE_FEDERATED_DB
#include "ha_federated.h"
-#define MAX_REMOTE_SIZE IO_SIZE
+
+#include "m_string.h"
/* Variables for federated share methods */
static HASH federated_open_tables; // Hash used to track open
// tables
@@ -413,13 +415,14 @@ bool federated_db_end()
return FALSE;
}
-
/*
Check (in create) whether the tables exists, and that it can be connected to
SYNOPSIS
check_foreign_data_source()
share pointer to FEDERATED share
+ table_create_flag tells us that ::create is the caller,
+ therefore, return CANT_CREATE_FEDERATED_TABLE
DESCRIPTION
This method first checks that the connection information that parse url
@@ -427,23 +430,23 @@ bool federated_db_end()
table, and if so, does the foreign table exist.
*/
-static int check_foreign_data_source(FEDERATED_SHARE *share)
+static int check_foreign_data_source(
+ FEDERATED_SHARE *share,
+ bool table_create_flag)
{
- char escaped_table_base_name[IO_SIZE];
- MYSQL *mysql;
- MYSQL_RES *result=0;
+ char escaped_table_name[NAME_LEN*2];
+ char query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
uint error_code;
- char query_buffer[IO_SIZE];
- char error_buffer[IO_SIZE];
String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+ MYSQL *mysql;
DBUG_ENTER("ha_federated::check_foreign_data_source");
+ /* Zero the length, otherwise the string will have misc chars */
query.length(0);
/* error out if we can't alloc memory for mysql_init(NULL) (per Georg) */
- if (! (mysql= mysql_init(NULL)))
- {
+ if (!(mysql= mysql_init(NULL)))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- }
/* check if we can connect */
if (!mysql_real_connect(mysql,
share->hostname,
@@ -453,11 +456,18 @@ static int check_foreign_data_source(FEDERATED_SHARE *share)
share->port,
share->socket, 0))
{
+ /*
+ we want the correct error message, but it to return
+ ER_CANT_CREATE_FEDERATED_TABLE if called by ::create
+ */
+ error_code= table_create_flag?
+ ER_CANT_CREATE_FEDERATED_TABLE : ER_CONNECT_TO_FOREIGN_DATA_SOURCE;
+
my_sprintf(error_buffer,
- (error_buffer,
- "unable to connect to database '%s' on host '%s as user '%s' !",
- share->database, share->hostname, share->username));
- error_code= ER_CONNECT_TO_MASTER;
+ (error_buffer, " database %s username %s hostname %s",
+ share->database, share->username, share->hostname));
+
+ my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), error_buffer);
goto error;
}
else
@@ -468,48 +478,42 @@ static int check_foreign_data_source(FEDERATED_SHARE *share)
with transactions
*/
mysql->reconnect= 1;
- /*
+ /*
Note: I am not using INORMATION_SCHEMA because this needs to work with < 5.0
if we can connect, then make sure the table exists
+
+ the query will be: SELECT * FROM `tablename` WHERE 1=0
*/
- query.append("SHOW TABLES LIKE '");
- escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_base_name,
- sizeof(escaped_table_base_name),
- share->table_base_name,
- share->table_base_name_length);
- query.append(escaped_table_base_name);
- query.append("'");
-
- error_code= ER_QUERY_ON_MASTER;
+ query.append(FEDERATED_SELECT);
+ query.append(FEDERATED_STAR);
+ query.append(FEDERATED_FROM);
+ query.append(FEDERATED_BTICK);
+ escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name,
+ sizeof(escaped_table_name),
+ share->table_name,
+ share->table_name_length);
+ query.append(escaped_table_name);
+ query.append(FEDERATED_BTICK);
+ query.append(FEDERATED_WHERE);
+ query.append(FEDERATED_FALSE);
+
+ DBUG_PRINT("info", ("check_foreign_data_source query %s", query.c_ptr_quick()));
if (mysql_real_query(mysql, query.ptr(), query.length()))
- goto error;
-
- result= mysql_store_result(mysql);
- if (! result)
- goto error;
-
- /* if ! mysql_num_rows, the table doesn't exist, send error */
- if (! mysql_num_rows(result))
{
- my_sprintf(error_buffer,
- (error_buffer, "foreign table '%s' does not exist!",
- share->table_base_name));
+ error_code= table_create_flag ?
+ ER_CANT_CREATE_FEDERATED_TABLE : ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST;
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ mysql_errno(mysql), mysql_error(mysql)));
+
+ my_error(error_code, MYF(0), error_buffer);
goto error;
}
- mysql_free_result(result);
- result= 0;
- mysql_close(mysql);
-
}
- DBUG_RETURN(0);
+ error_code=0;
error:
- if (result)
- mysql_free_result(result);
mysql_close(mysql);
- my_error(error_code, MYF(0), error_buffer);
DBUG_RETURN(error_code);
-
}
@@ -545,22 +549,27 @@ error:
'password' and 'port' are both optional.
RETURN VALUE
- 0 success
- 1 failure, wrong string format
+ 0 success
+ error_num particular error code
*/
static int parse_url(FEDERATED_SHARE *share, TABLE *table,
uint table_create_flag)
{
- uint error_num= (table_create_flag ? ER_CANT_CREATE_TABLE :
- ER_CONNECT_TO_MASTER);
+ uint error_num= (table_create_flag ?
+ ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE :
+ ER_FOREIGN_DATA_STRING_INVALID);
DBUG_ENTER("ha_federated::parse_url");
share->port= 0;
- share->socket= 0;
share->scheme= my_strdup(table->s->comment, MYF(0));
+ DBUG_PRINT("info",("parse_url alloced share->scheme %lx", share->scheme));
+ /*
+ remove addition of null terminator and store length
+ for each string in share
+ */
if ((share->username= strstr(share->scheme, "://")))
{
share->scheme[share->username - share->scheme]= '\0';
@@ -613,20 +622,20 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
share->port= atoi(share->sport);
}
- if ((share->table_base_name= strchr(share->database, '/')))
+ if ((share->table_name= strchr(share->database, '/')))
{
- share->database[share->table_base_name - share->database]= '\0';
- share->table_base_name++;
+ share->database[share->table_name - share->database]= '\0';
+ share->table_name++;
}
else
goto error;
- share->table_base_name_length= strlen(share->table_base_name);
+ share->table_name_length= strlen(share->table_name);
}
else
goto error;
/* make sure there's not an extra / */
- if ((strchr(share->table_base_name, '/')))
+ if ((strchr(share->table_name, '/')))
goto error;
if (share->hostname[0] == '\0')
@@ -645,7 +654,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
hostname %s port %d database %s tablename %s\n",
share->scheme, share->username, share->password,
share->hostname, share->port, share->database,
- share->table_base_name));
+ share->table_name));
}
else
goto error;
@@ -656,13 +665,54 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
DBUG_RETURN(0);
error:
- my_error(error_num, MYF(0),
- "connection string is not in the correct format",0);
- DBUG_RETURN(1);
+ if (share->scheme)
+ {
+ DBUG_PRINT("info",
+ ("error: parse_url. Returning error code %d \
+ freeing share->scheme %lx", error_num, share->scheme));
+ my_free((gptr) share->scheme, MYF(0));
+ share->scheme= 0;
+ }
+ my_error(error_num, MYF(0), table->s->comment);
+ DBUG_RETURN(error_num);
}
+/* Federated storage engine handlerton */
+
+static handlerton federated_hton= {
+ "FEDERATED",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
+};
+
+
+/*****************************************************************************
+** FEDERATED tables
+*****************************************************************************/
+
+ha_federated::ha_federated(TABLE *table_arg)
+ :handler(&federated_hton, table_arg),
+ mysql(0), stored_result(0), scan_flag(0),
+ ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0)
+{}
+
+
/*
Convert MySQL result set row to handler internal format
@@ -684,25 +734,115 @@ error:
uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
{
- ulong *lengths;
uint num_fields;
- uint x= 0;
+ ulong *lengths;
+ Field **field;
DBUG_ENTER("ha_federated::convert_row_to_internal_format");
- num_fields= mysql_num_fields(result);
- lengths= mysql_fetch_lengths(result);
+ num_fields= mysql_num_fields(stored_result);
+ lengths= mysql_fetch_lengths(stored_result);
memset(record, 0, table->s->null_bytes);
- for (Field **field= table->field; *field; field++, x++)
+ for (field= table->field; *field; field++)
{
+ /*
+ index variable to move us through the row at the
+ same iterative step as the field
+ */
+ int x= field - table->field;
+ my_ptrdiff_t old_ptr;
+ old_ptr= (my_ptrdiff_t) (record - table->record[0]);
+ (*field)->move_field(old_ptr);
if (!row[x])
(*field)->set_null();
else
+ {
+ (*field)->set_notnull();
(*field)->store(row[x], lengths[x], &my_charset_bin);
+ }
+ (*field)->move_field(-old_ptr);
+ }
+
+ DBUG_RETURN(0);
+}
+
+static bool emit_key_part_name(String *to, KEY_PART_INFO *part)
+{
+ DBUG_ENTER("emit_key_part_name");
+ if (to->append(FEDERATED_BTICK) ||
+ to->append(part->field->field_name) ||
+ to->append(FEDERATED_BTICK))
+ DBUG_RETURN(1); // Out of memory
+ DBUG_RETURN(0);
+}
+
+static bool emit_key_part_element(String *to, KEY_PART_INFO *part,
+ bool needs_quotes, bool is_like,
+ const byte *ptr, uint len)
+{
+ Field *field= part->field;
+ DBUG_ENTER("emit_key_part_element");
+
+ if (needs_quotes && to->append(FEDERATED_SQUOTE))
+ DBUG_RETURN(1);
+
+ if (part->type == HA_KEYTYPE_BIT)
+ {
+ char buff[STRING_BUFFER_USUAL_SIZE], *buf= buff;
+
+ *buf++= '0';
+ *buf++= 'x';
+ for (; len; ptr++,len--)
+ {
+ uint tmp= (uint)(uchar) *ptr;
+ *buf++= _dig_vec_upper[tmp >> 4];
+ *buf++= _dig_vec_upper[tmp & 15];
+ }
+ if (to->append(buff, (uint)(buf - buff)))
+ DBUG_RETURN(1);
+ }
+ else if (part->key_part_flag & HA_BLOB_PART)
+ {
+ String blob;
+ uint blob_length= uint2korr(ptr);
+ blob.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH,
+ blob_length, &my_charset_bin);
+ if (append_escaped(to, &blob))
+ DBUG_RETURN(1);
+ }
+ else if (part->key_part_flag & HA_VAR_LENGTH_PART)
+ {
+ String varchar;
+ uint var_length= uint2korr(ptr);
+ varchar.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH,
+ var_length, &my_charset_bin);
+ if (append_escaped(to, &varchar))
+ DBUG_RETURN(1);
+ }
+ else
+ {
+ char strbuff[MAX_FIELD_WIDTH];
+ String str(strbuff, sizeof(strbuff), part->field->charset()), *res;
+
+ res= field->val_str(&str, (char *)ptr);
+
+ if (field->result_type() == STRING_RESULT)
+ {
+ if (append_escaped(to, res))
+ DBUG_RETURN(1);
+ }
+ else if (to->append(res->ptr(), res->length()))
+ DBUG_RETURN(1);
}
+ if (is_like && to->append(FEDERATED_PERCENT))
+ DBUG_RETURN(1);
+
+ if (needs_quotes && to->append(FEDERATED_SQUOTE))
+ DBUG_RETURN(1);
+
DBUG_RETURN(0);
}
@@ -716,6 +856,8 @@ uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
key_info KEY struct pointer
key byte pointer containing key
key_length length of key
+ range_type 0 - no range, 1 - min range, 2 - max range
+ (see enum range_operation)
DESCRIPTION
Using iteration through all the keys via a KEY_PART_INFO pointer,
@@ -726,112 +868,402 @@ uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row)
0 After all keys have been accounted for to create the WHERE clause
1 No keys found
- */
+ Range flags Table per Timour:
+
+ -----------------
+ - start_key:
+ * ">" -> HA_READ_AFTER_KEY
+ * ">=" -> HA_READ_KEY_OR_NEXT
+ * "=" -> HA_READ_KEY_EXACT
+
+ - end_key:
+ * "<" -> HA_READ_BEFORE_KEY
+ * "<=" -> HA_READ_AFTER_KEY
+
+ records_in_range:
+ -----------------
+ - start_key:
+ * ">" -> HA_READ_AFTER_KEY
+ * ">=" -> HA_READ_KEY_EXACT
+ * "=" -> HA_READ_KEY_EXACT
+
+ - end_key:
+ * "<" -> HA_READ_BEFORE_KEY
+ * "<=" -> HA_READ_AFTER_KEY
+ * "=" -> HA_READ_AFTER_KEY
+
+0 HA_READ_KEY_EXACT, Find first record else error
+1 HA_READ_KEY_OR_NEXT, Record or next record
+2 HA_READ_KEY_OR_PREV, Record or previous
+3 HA_READ_AFTER_KEY, Find next rec. after key-record
+4 HA_READ_BEFORE_KEY, Find next rec. before key-record
+5 HA_READ_PREFIX, Key which as same prefix
+6 HA_READ_PREFIX_LAST, Last key with the same prefix
+7 HA_READ_PREFIX_LAST_OR_PREV, Last or prev key with the same prefix
+
+Flags that I've found:
+
+id, primary key, varchar
+
+id = 'ccccc'
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 0 end_key NULL
+
+id > 'ccccc'
+records_in_range: start_key 3 end_key NULL
+read_range_first: start_key 3 end_key NULL
+
+id < 'ccccc'
+records_in_range: start_key NULL end_key 4
+read_range_first: start_key NULL end_key 4
+
+id <= 'ccccc'
+records_in_range: start_key NULL end_key 3
+read_range_first: start_key NULL end_key 3
+
+id >= 'ccccc'
+records_in_range: start_key 0 end_key NULL
+read_range_first: start_key 1 end_key NULL
+
+id like 'cc%cc'
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 'aaaaa' and id < 'ccccc'
+records_in_range: start_key 3 end_key 4
+read_range_first: start_key 3 end_key 4
+
+id >= 'aaaaa' and id < 'ccccc';
+records_in_range: start_key 0 end_key 4
+read_range_first: start_key 1 end_key 4
+
+id >= 'aaaaa' and id <= 'ccccc';
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 'aaaaa' and id <= 'ccccc';
+records_in_range: start_key 3 end_key 3
+read_range_first: start_key 3 end_key 3
+
+numeric keys:
+
+id = 4
+index_read_idx: start_key 0 end_key NULL
+
+id > 4
+records_in_range: start_key 3 end_key NULL
+read_range_first: start_key 3 end_key NULL
+
+id >= 4
+records_in_range: start_key 0 end_key NULL
+read_range_first: start_key 1 end_key NULL
+
+id < 4
+records_in_range: start_key NULL end_key 4
+read_range_first: start_key NULL end_key 4
+
+id <= 4
+records_in_range: start_key NULL end_key 3
+read_range_first: start_key NULL end_key 3
+
+id like 4
+full table scan, select * from
+
+id > 2 and id < 8
+records_in_range: start_key 3 end_key 4
+read_range_first: start_key 3 end_key 4
+
+id >= 2 and id < 8
+records_in_range: start_key 0 end_key 4
+read_range_first: start_key 1 end_key 4
+
+id >= 2 and id <= 8
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 2 and id <= 8
+records_in_range: start_key 3 end_key 3
+read_range_first: start_key 3 end_key 3
+
+multi keys (id int, name varchar, other varchar)
+
+id = 1;
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 0 end_key NULL
+
+id > 4;
+id > 2 and name = '333'; remote: id > 2
+id > 2 and name > '333'; remote: id > 2
+id > 2 and name > '333' and other < 'ddd'; remote: id > 2 no results
+id > 2 and name >= '333' and other < 'ddd'; remote: id > 2 1 result
+id >= 4 and name = 'eric was here' and other > 'eeee';
+records_in_range: start_key 3 end_key NULL
+read_range_first: start_key 3 end_key NULL
-bool ha_federated::create_where_from_key(String *to, KEY *key_info,
- const byte *key, uint key_length)
+id >= 4;
+id >= 2 and name = '333' and other < 'ddd';
+remote: `id` >= 2 AND `name` >= '333';
+records_in_range: start_key 0 end_key NULL
+read_range_first: start_key 1 end_key NULL
+
+id < 4;
+id < 3 and name = '222' and other <= 'ccc'; remote: id < 3
+records_in_range: start_key NULL end_key 4
+read_range_first: start_key NULL end_key 4
+
+id <= 4;
+records_in_range: start_key NULL end_key 3
+read_range_first: start_key NULL end_key 3
+
+id like 4;
+full table scan
+
+id > 2 and id < 4;
+records_in_range: start_key 3 end_key 4
+read_range_first: start_key 3 end_key 4
+
+id >= 2 and id < 4;
+records_in_range: start_key 0 end_key 4
+read_range_first: start_key 1 end_key 4
+
+id >= 2 and id <= 4;
+records_in_range: start_key 0 end_key 3
+read_range_first: start_key 1 end_key 3
+
+id > 2 and id <= 4;
+id = 6 and name = 'eric was here' and other > 'eeee';
+remote: (`id` > 6 AND `name` > 'eric was here' AND `other` > 'eeee')
+AND (`id` <= 6) AND ( AND `name` <= 'eric was here')
+no results
+records_in_range: start_key 3 end_key 3
+read_range_first: start_key 3 end_key 3
+
+Summary:
+
+* If the start key flag is 0 the max key flag shouldn't even be set,
+ and if it is, the query produced would be invalid.
+* Multipart keys, even if containing some or all numeric columns,
+ are treated the same as non-numeric keys
+
+ If the query is " = " (quotes or not):
+ - records in range start key flag HA_READ_KEY_EXACT,
+ end key flag HA_READ_AFTER_KEY (incorrect)
+ - any other: start key flag HA_READ_KEY_OR_NEXT,
+ end key flag HA_READ_AFTER_KEY (correct)
+
+* 'like' queries (of key)
+ - Numeric, full table scan
+ - Non-numeric
+ records_in_range: start_key 0 end_key 3
+ other : start_key 1 end_key 3
+
+* If the key flag is HA_READ_AFTER_KEY:
+ if start_key, append >
+ if end_key, append <=
+
+* If create_where_key was called by records_in_range:
+
+ - if the key is numeric:
+ start key flag is 0 when end key is NULL, end key flag is 3 or 4
+ - if create_where_key was called by any other function:
+ start key flag is 1 when end key is NULL, end key flag is 3 or 4
+ - if the key is non-numeric, or multipart
+ When the query is an exact match, the start key flag is 0,
+ end key flag is 3 for what should be a no-range condition where
+ you should have 0 and max key NULL, which it is if called by
+ read_range_first
+
+Conclusion:
+
+1. Need logic to determin if a key is min or max when the flag is
+HA_READ_AFTER_KEY, and handle appending correct operator accordingly
+
+2. Need a boolean flag to pass to create_where_from_key, used in the
+switch statement. Add 1 to the flag if:
+ - start key flag is HA_READ_KEY_EXACT and the end key is NULL
+
+*/
+
+bool ha_federated::create_where_from_key(String *to,
+ KEY *key_info,
+ const key_range *start_key,
+ const key_range *end_key,
+ bool records_in_range)
{
- uint second_loop= 0;
- KEY_PART_INFO *key_part;
- bool needs_quotes;
- String tmp;
+ bool both_not_null=
+ (start_key != NULL && end_key != NULL) ? TRUE : FALSE;
+ const byte *ptr;
+ uint remainder, length;
+ char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE];
+ String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
+ const key_range *ranges[2]= { start_key, end_key };
DBUG_ENTER("ha_federated::create_where_from_key");
- for (key_part= key_info->key_part; (int) key_length > 0; key_part++)
- {
- Field *field= key_part->field;
- needs_quotes= field->needs_quotes();
- uint length= key_part->length;
+ tmp.length(0);
+ if (start_key == NULL && end_key == NULL)
+ DBUG_RETURN(1);
- if (second_loop++ && to->append(" AND ", 5))
- DBUG_RETURN(1);
- if (to->append('`') || to->append(field->field_name) || to->append("` ", 2))
- DBUG_RETURN(1); // Out of memory
+ for (int i= 0; i <= 1; i++)
+ {
+ bool needs_quotes;
+ uint loop_counter= 0;
+ KEY_PART_INFO *key_part;
+ if (ranges[i] == NULL)
+ continue;
+ const byte *key= ranges[i]->key;
+ uint key_length= ranges[i]->length;
- if (key_part->null_bit)
+ if (both_not_null)
{
- if (*key++)
- {
- if (to->append("IS NULL", 7))
- DBUG_RETURN(1);
-
- DBUG_PRINT("info",
- ("NULL type %s", to->c_ptr_quick()));
- key_length-= key_part->store_length;
- key+= key_part->store_length - 1;
- continue;
- }
- key_length--;
+ if (i > 0)
+ tmp.append(FEDERATED_CONJUNCTION);
+ else
+ tmp.append(FEDERATED_OPENPAREN);
}
- if (to->append("= "))
- DBUG_RETURN(1);
- if (needs_quotes && to->append("'"))
- DBUG_RETURN(1);
- if (key_part->type == HA_KEYTYPE_BIT)
- {
- /* This is can be treated as a hex string */
- Field_bit *field= (Field_bit *) (key_part->field);
- char buff[64 + 2], *ptr;
- byte *end= (byte*)(key)+length;
-
- buff[0]= '0';
- buff[1]= 'x';
- for (ptr= buff + 2; key < end; key++)
- {
- uint tmp= (uint)(uchar) *key;
- *ptr++= _dig_vec_upper[tmp >> 4];
- *ptr++= _dig_vec_upper[tmp & 15];
- }
- if (to->append(buff, (uint)(ptr - buff)))
- DBUG_RETURN(1);
- key_length-= length;
- continue;
- }
- if (key_part->key_part_flag & HA_BLOB_PART)
+ for (key_part= key_info->key_part,
+ remainder= key_info->key_parts,
+ length= ranges[i]->length,
+ ptr= ranges[i]->key; ;
+ remainder--,
+ key_part++)
{
- uint blob_length= uint2korr(key);
- key+= HA_KEY_BLOB_LENGTH;
- key_length-= HA_KEY_BLOB_LENGTH;
+ Field *field= key_part->field;
+ uint store_length= key_part->store_length;
+ uint part_length= min(store_length, length);
+ needs_quotes= field->needs_quotes();
+ DBUG_DUMP("key, start of loop", (char *) ptr, length);
- tmp.set_quick((char*) key, blob_length, &my_charset_bin);
- if (append_escaped(to, &tmp))
- DBUG_RETURN(1);
+ if (key_part->null_bit)
+ {
+ if (*ptr++)
+ {
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(FEDERATED_ISNULL))
+ DBUG_RETURN(1);
+ continue;
+ }
+ }
- length= key_part->length;
- }
- else if (key_part->key_part_flag & HA_VAR_LENGTH_PART)
- {
- length= uint2korr(key);
- key+= HA_KEY_BLOB_LENGTH;
- tmp.set_quick((char*) key, length, &my_charset_bin);
- if (append_escaped(to, &tmp))
+ if (tmp.append(FEDERATED_OPENPAREN))
DBUG_RETURN(1);
- }
- else
- {
- char buff[MAX_FIELD_WIDTH];
- String str(buff, sizeof(buff), field->charset()), *res;
- res= field->val_str(&str, (char*) (key));
- if (field->result_type() == STRING_RESULT)
- {
- if (append_escaped(to, res))
+ switch(ranges[i]->flag) {
+ case(HA_READ_KEY_EXACT):
+ if (store_length >= length ||
+ !needs_quotes ||
+ key_part->type == HA_KEYTYPE_BIT ||
+ field->result_type() != STRING_RESULT)
+ {
+ if (emit_key_part_name(&tmp, key_part))
+ DBUG_RETURN(1);
+
+ if (records_in_range)
+ {
+ if (tmp.append(FEDERATED_GE))
+ DBUG_RETURN(1);
+ }
+ else
+ {
+ if (tmp.append(FEDERATED_EQ))
+ DBUG_RETURN(1);
+ }
+
+ if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
+ DBUG_RETURN(1);
+ }
+ else
+ /* LIKE */
+ {
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(FEDERATED_LIKE) ||
+ emit_key_part_element(&tmp, key_part, needs_quotes, 1, ptr,
+ part_length))
+ DBUG_RETURN(1);
+ }
+ break;
+ case(HA_READ_AFTER_KEY):
+ if (store_length >= length) /* end key */
+ {
+ if (emit_key_part_name(&tmp, key_part))
+ DBUG_RETURN(1);
+
+ if (i > 0) /* end key */
+ {
+ if (tmp.append(FEDERATED_LE))
+ DBUG_RETURN(1);
+ }
+ else /* start key */
+ {
+ if (tmp.append(FEDERATED_GT))
+ DBUG_RETURN(1);
+ }
+
+ if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
+ {
+ DBUG_RETURN(1);
+ }
+ break;
+ }
+ case(HA_READ_KEY_OR_NEXT):
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(FEDERATED_GE) ||
+ emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
DBUG_RETURN(1);
- res= field->val_str(&str, (char*) (key));
+ break;
+ case(HA_READ_BEFORE_KEY):
+ if (store_length >= length)
+ {
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(FEDERATED_LT) ||
+ emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
+ DBUG_RETURN(1);
+ break;
+ }
+ case(HA_READ_KEY_OR_PREV):
+ if (emit_key_part_name(&tmp, key_part) ||
+ tmp.append(FEDERATED_LE) ||
+ emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr,
+ part_length))
+ DBUG_RETURN(1);
+ break;
+ default:
+ DBUG_PRINT("info",("cannot handle flag %d", ranges[i]->flag));
+ DBUG_RETURN(1);
}
- else if (to->append(res->ptr(), res->length()))
+ if (tmp.append(FEDERATED_CLOSEPAREN))
DBUG_RETURN(1);
+
+next_loop:
+ if (store_length >= length)
+ break;
+ DBUG_PRINT("info", ("remainder %d", remainder));
+ DBUG_ASSERT(remainder > 1);
+ length-= store_length;
+ ptr+= store_length;
+ if (tmp.append(FEDERATED_AND))
+ DBUG_RETURN(1);
+
+ DBUG_PRINT("info",
+ ("create_where_from_key WHERE clause: %s",
+ tmp.c_ptr_quick()));
}
- if (needs_quotes && to->append("'"))
- DBUG_RETURN(1);
- DBUG_PRINT("info",
- ("final value for 'to' %s", to->c_ptr_quick()));
- key+= length;
- key_length-= length;
- DBUG_RETURN(0);
}
- DBUG_RETURN(1);
+ if (both_not_null)
+ if (tmp.append(FEDERATED_CLOSEPAREN))
+ DBUG_RETURN(1);
+
+ if (to->append(FEDERATED_WHERE))
+ DBUG_RETURN(1);
+
+ if (to->append(tmp))
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(0);
}
/*
@@ -842,40 +1274,47 @@ bool ha_federated::create_where_from_key(String *to, KEY *key_info,
static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
{
- FEDERATED_SHARE *share;
- char query_buffer[IO_SIZE];
+ char *select_query, *tmp_table_name;
+ char query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ uint tmp_table_name_length;
+ Field **field;
String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+ FEDERATED_SHARE *share;
+ /*
+ In order to use this string, we must first zero it's length,
+ or it will contain garbage
+ */
query.length(0);
- uint table_name_length, table_base_name_length;
- char *tmp_table_name, *table_base_name, *select_query;
-
- /* share->table_name has the file location - we want the table's name! */
- table_base_name= (char*) table->s->table_name;
- DBUG_PRINT("info", ("table_name %s", table_base_name));
- /*
- So why does this exist? There is no way currently to init a storage engine.
- Innodb and BDB both have modifications to the server to allow them to
- do this. Since you will not want to do this, this is probably the next
- best method.
- */
pthread_mutex_lock(&federated_mutex);
- table_name_length= (uint) strlen(table_name);
- table_base_name_length= (uint) strlen(table_base_name);
+ tmp_table_name= (char *)table->s->table_name;
+ tmp_table_name_length= (uint) strlen(tmp_table_name);
if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables,
(byte*) table_name,
- table_name_length)))
+ strlen(table_name))))
{
query.set_charset(system_charset_info);
- query.append("SELECT * FROM `");
+ query.append(FEDERATED_SELECT);
+ for (field= table->field; *field; field++)
+ {
+ query.append(FEDERATED_BTICK);
+ query.append((*field)->field_name);
+ query.append(FEDERATED_BTICK);
+ query.append(FEDERATED_COMMA);
+ }
+ query.length(query.length()- strlen(FEDERATED_COMMA));
+ query.append(FEDERATED_FROM);
+ query.append(FEDERATED_BTICK);
+
if (!(share= (FEDERATED_SHARE *)
- my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ my_multi_malloc(MYF(MY_WME),
&share, sizeof(*share),
- &tmp_table_name, table_name_length + 1,
- &select_query, query.length() +
- strlen(table->s->comment) + 1, NullS)))
+ &tmp_table_name, tmp_table_name_length+ 1,
+ &select_query,
+ query.length()+strlen(table->s->comment)+1,
+ NullS)))
{
pthread_mutex_unlock(&federated_mutex);
return NULL;
@@ -884,16 +1323,15 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
if (parse_url(share, table, 0))
goto error;
- query.append(share->table_base_name);
- query.append("`");
- share->use_count= 0;
- share->table_name_length= table_name_length;
- share->table_name= tmp_table_name;
+ query.append(share->table_name, share->table_name_length);
+ query.append(FEDERATED_BTICK);
share->select_query= select_query;
- strmov(share->table_name, table_name);
strmov(share->select_query, query.ptr());
+ share->use_count= 0;
+ share->table_name_length= strlen(share->table_name);
DBUG_PRINT("info",
("share->select_query %s", share->select_query));
+
if (my_hash_insert(&federated_open_tables, (byte*) share))
goto error;
thr_lock_init(&share->lock);
@@ -907,9 +1345,10 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
error:
pthread_mutex_unlock(&federated_mutex);
if (share->scheme)
+ {
my_free((gptr) share->scheme, MYF(0));
- my_free((gptr) share, MYF(0));
-
+ share->scheme= 0;
+ }
return NULL;
}
@@ -922,12 +1361,16 @@ error:
static int free_share(FEDERATED_SHARE *share)
{
+ DBUG_ENTER("free_share");
pthread_mutex_lock(&federated_mutex);
if (!--share->use_count)
{
if (share->scheme)
+ {
my_free((gptr) share->scheme, MYF(0));
+ share->scheme= 0;
+ }
hash_delete(&federated_open_tables, (byte*) share);
thr_lock_delete(&share->lock);
@@ -936,23 +1379,37 @@ static int free_share(FEDERATED_SHARE *share)
}
pthread_mutex_unlock(&federated_mutex);
- return 0;
+ DBUG_RETURN(0);
}
+ha_rows ha_federated::records_in_range(uint inx, key_range *start_key,
+ key_range *end_key)
+{
+ /*
+
+ We really want indexes to be used as often as possible, therefore
+ we just need to hard-code the return value to a very low number to
+ force the issue
+
+*/
+ DBUG_ENTER("ha_federated::records_in_range");
+ DBUG_RETURN(FEDERATED_RECORDS_IN_RANGE);
+}
/*
If frm_error() is called then we will use this to to find out
what file extentions exist for the storage engine. This is
also used by the default rename_table and delete_table method
in handler.cc.
*/
-static const char *ha_federated_exts[] = {
- NullS
-};
const char **ha_federated::bas_ext() const
{
- return ha_federated_exts;
+ static const char *ext[]=
+ {
+ NullS
+ };
+ return ext;
}
@@ -969,6 +1426,7 @@ const char **ha_federated::bas_ext() const
int ha_federated::open(const char *name, int mode, uint test_if_locked)
{
+ int rc;
DBUG_ENTER("ha_federated::open");
if (!(share= get_share(name, table)))
@@ -977,11 +1435,6 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked)
/* Connect to foreign database mysql_real_connect() */
mysql= mysql_init(0);
- DBUG_PRINT("info", ("hostname %s", share->hostname));
- DBUG_PRINT("info", ("username %s", share->username));
- DBUG_PRINT("info", ("password %s", share->password));
- DBUG_PRINT("info", ("database %s", share->database));
- DBUG_PRINT("info", ("port %d", share->port));
if (!mysql_real_connect(mysql,
share->hostname,
share->username,
@@ -990,8 +1443,13 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked)
share->port,
share->socket, 0))
{
- my_error(ER_CONNECT_TO_MASTER, MYF(0), mysql_error(mysql));
- DBUG_RETURN(ER_CONNECT_TO_MASTER);
+ int error_code;
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ error_code= ER_CONNECT_TO_FOREIGN_DATA_SOURCE;
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ mysql_errno(mysql), mysql_error(mysql)));
+ my_error(error_code, MYF(0), error_buffer);
+ DBUG_RETURN(error_code);
}
/*
Since we do not support transactions at this version, we can let the client
@@ -1016,19 +1474,21 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked)
int ha_federated::close(void)
{
+ int retval;
DBUG_ENTER("ha_federated::close");
/* free the result set */
- if (result)
+ if (stored_result)
{
DBUG_PRINT("info",
- ("mysql_free_result result at address %lx", result));
- mysql_free_result(result);
- result= 0;
+ ("mysql_free_result result at address %lx", stored_result));
+ mysql_free_result(stored_result);
+ stored_result= 0;
}
/* Disconnect from mysql */
mysql_close(mysql);
- DBUG_RETURN(free_share(share));
+ retval= free_share(share);
+ DBUG_RETURN(retval);
}
@@ -1085,30 +1545,32 @@ inline uint field_in_record_is_null(TABLE *table,
int ha_federated::write_row(byte *buf)
{
- uint x= 0, num_fields= 0;
- Field **field;
- query_id_t current_query_id= 1;
- ulong tmp_query_id= 1;
+ bool has_fields= FALSE;
uint all_fields_have_same_query_id= 1;
-
- char insert_buffer[IO_SIZE];
- char values_buffer[IO_SIZE], insert_field_value_buffer[IO_SIZE];
+ ulong current_query_id= 1;
+ ulong tmp_query_id= 1;
+ char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ char values_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
+ Field **field;
/* The main insert query string */
String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin);
- insert_string.length(0);
/* The string containing the values to be added to the insert */
String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin);
- values_string.length(0);
/* The actual value of the field, to be added to the values_string */
String insert_field_value_string(insert_field_value_buffer,
sizeof(insert_field_value_buffer),
&my_charset_bin);
+ values_string.length(0);
+ insert_string.length(0);
insert_field_value_string.length(0);
DBUG_ENTER("ha_federated::write_row");
- DBUG_PRINT("info", ("table charset name %s csname %s",
- table->s->table_charset->name, table->s->table_charset->csname));
+ DBUG_PRINT("info",
+ ("table charset name %s csname %s",
+ table->s->table_charset->name,
+ table->s->table_charset->csname));
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
@@ -1122,22 +1584,27 @@ int ha_federated::write_row(byte *buf)
current_query_id= table->in_use->query_id;
DBUG_PRINT("info", ("current query id %d", current_query_id));
- /* start off our string */
- insert_string.append("INSERT INTO `");
- insert_string.append(share->table_base_name);
- insert_string.append("`");
- /* start both our field and field values strings */
- insert_string.append(" (");
- values_string.append(" VALUES (");
+ /*
+ start both our field and field values strings
+ */
+ insert_string.append(FEDERATED_INSERT);
+ insert_string.append(FEDERATED_BTICK);
+ insert_string.append(share->table_name, share->table_name_length);
+ insert_string.append(FEDERATED_BTICK);
+ insert_string.append(FEDERATED_OPENPAREN);
+
+ values_string.append(FEDERATED_VALUES);
+ values_string.append(FEDERATED_OPENPAREN);
/*
Even if one field is different, all_fields_same_query_id can't remain
0 if it remains 0, then that means no fields were specified in the query
such as in the case of INSERT INTO table VALUES (val1, val2, valN)
+
*/
- for (field= table->field; *field; field++, x++)
+ for (field= table->field; *field; field++)
{
- if (x > 0 && tmp_query_id != (*field)->query_id)
+ if (field > table->field && tmp_query_id != (*field)->query_id)
all_fields_have_same_query_id= 0;
tmp_query_id= (*field)->query_id;
@@ -1145,31 +1612,30 @@ int ha_federated::write_row(byte *buf)
/*
loop through the field pointer array, add any fields to both the values
list and the fields list that match the current query id
+
+ You might ask "Why an index variable (has_fields) ?" My answer is that
+ we need to count how many fields we actually need
*/
- x=0;
- for (field= table->field; *field; field++, x++)
+ for (field= table->field; *field; field++)
{
/* if there is a query id and if it's equal to the current query id */
if (((*field)->query_id && (*field)->query_id == current_query_id)
|| all_fields_have_same_query_id)
{
- num_fields++;
+ /*
+ There are some fields. This will be used later to determine
+ whether to chop off commas and parens.
+ */
+ has_fields= TRUE;
if ((*field)->is_null())
- {
- DBUG_PRINT("info",
- ("column %d current query id %d field is_null query id %d",
- x, current_query_id, (*field)->query_id));
- insert_field_value_string.append("NULL");
- }
+ insert_field_value_string.append(FEDERATED_NULL);
else
{
- DBUG_PRINT("info",
- ("column %d current query id %d field is not null query ID %d",
- x, current_query_id, (*field)->query_id));
(*field)->val_str(&insert_field_value_string);
/* quote these fields if they require it */
- (*field)->quote_data(&insert_field_value_string); }
+ (*field)->quote_data(&insert_field_value_string);
+ }
/* append the field name */
insert_string.append((*field)->field_name);
@@ -1178,34 +1644,35 @@ int ha_federated::write_row(byte *buf)
insert_field_value_string.length(0);
/* append commas between both fields and fieldnames */
- insert_string.append(',');
- values_string.append(',');
-
+ /*
+ unfortunately, we can't use the logic
+ if *(fields + 1) to make the following
+ appends conditional because we may not append
+ if the next field doesn't match the condition:
+ (((*field)->query_id && (*field)->query_id == current_query_id)
+ */
+ insert_string.append(FEDERATED_COMMA);
+ values_string.append(FEDERATED_COMMA);
}
}
/*
- chop of the trailing comma, or if there were no fields, a '('
- So, "INSERT INTO foo (" becomes "INSERT INTO foo "
- or, with fields, "INSERT INTO foo (field1, field2," becomes
- "INSERT INTO foo (field1, field2"
+ remove trailing comma
*/
- insert_string.chop();
-
+ insert_string.length(insert_string.length() - strlen(FEDERATED_COMMA));
/*
if there were no fields, we don't want to add a closing paren
AND, we don't want to chop off the last char '('
insert will be "INSERT INTO t1 VALUES ();"
*/
- DBUG_PRINT("info", ("x %d num fields %d", x, num_fields));
- if (num_fields > 0)
+ if (has_fields)
{
/* chops off leading commas */
- values_string.chop();
- insert_string.append(')');
+ values_string.length(values_string.length() - strlen(FEDERATED_COMMA));
+ insert_string.append(FEDERATED_CLOSEPAREN);
}
/* we always want to append this, even if there aren't any fields */
- values_string.append(')');
+ values_string.append(FEDERATED_CLOSEPAREN);
/* add the values */
insert_string.append(values_string);
@@ -1214,8 +1681,69 @@ int ha_federated::write_row(byte *buf)
if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length()))
{
- my_error(ER_QUERY_ON_MASTER, MYF(0), mysql_error(mysql));
- DBUG_RETURN(ER_QUERY_ON_MASTER);
+ int error_code;
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ error_code= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ mysql_errno(mysql), mysql_error(mysql)));
+ my_error(error_code, MYF(0), error_buffer);
+ DBUG_RETURN(error_code);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+int ha_federated::optimize(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ char query_buffer[STRING_BUFFER_USUAL_SIZE];
+ String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+
+ DBUG_ENTER("ha_federated::optimize");
+
+ query.length(0);
+
+ query.set_charset(system_charset_info);
+ query.append(FEDERATED_OPTIMIZE);
+ query.append(FEDERATED_BTICK);
+ query.append(share->table_name, share->table_name_length);
+ query.append(FEDERATED_BTICK);
+
+ if (mysql_real_query(mysql, query.ptr(), query.length()))
+ {
+ my_error(-1, MYF(0), mysql_error(mysql));
+ DBUG_RETURN(-1);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ char query_buffer[STRING_BUFFER_USUAL_SIZE];
+ String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
+
+ DBUG_ENTER("ha_federated::repair");
+
+ query.length(0);
+
+ query.set_charset(system_charset_info);
+ query.append(FEDERATED_REPAIR);
+ query.append(FEDERATED_BTICK);
+ query.append(share->table_name, share->table_name_length);
+ query.append(FEDERATED_BTICK);
+ if (check_opt->flags & T_QUICK)
+ query.append(FEDERATED_QUICK);
+ if (check_opt->flags & T_EXTEND)
+ query.append(FEDERATED_EXTENDED);
+ if (check_opt->sql_flags & TT_USEFRM)
+ query.append(FEDERATED_USE_FRM);
+
+ if (mysql_real_query(mysql, query.ptr(), query.length()))
+ {
+ my_error(-1, MYF(0), mysql_error(mysql));
+ DBUG_RETURN(-1);
}
DBUG_RETURN(0);
@@ -1241,39 +1769,57 @@ int ha_federated::write_row(byte *buf)
int ha_federated::update_row(const byte *old_data, byte *new_data)
{
- uint x= 0;
- uint has_a_primary_key= 0;
- uint primary_key_field_num;
- char old_field_value_buffer[IO_SIZE], new_field_value_buffer[IO_SIZE];
- char update_buffer[IO_SIZE], where_buffer[IO_SIZE];
+ /*
+ This used to control how the query was built. If there was a primary key,
+ the query would be built such that there was a where clause with only
+ that column as the condition. This is flawed, because if we have a multi-part
+ primary key, it would only use the first part! We don't need to do this anyway,
+ because read_range_first will retrieve the correct record, which is what is used
+ to build the WHERE clause. We can however use this to append a LIMIT to the end
+ if there is NOT a primary key. Why do this? Because we only are updating one
+ record, and LIMIT enforces this.
+ */
+ bool has_a_primary_key= (table->s->primary_key == 0 ? TRUE : FALSE);
+ /*
+ buffers for following strings
+ */
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ char old_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
+ char new_field_value_buffer[STRING_BUFFER_USUAL_SIZE];
+ char update_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ char where_buffer[FEDERATED_QUERY_BUFFER_SIZE];
/* stores the value to be replaced of the field were are updating */
- String old_field_value(old_field_value_buffer, sizeof(old_field_value_buffer),
+ String old_field_value(old_field_value_buffer,
+ sizeof(old_field_value_buffer),
&my_charset_bin);
/* stores the new value of the field */
- String new_field_value(new_field_value_buffer, sizeof(new_field_value_buffer),
+ String new_field_value(new_field_value_buffer,
+ sizeof(new_field_value_buffer),
&my_charset_bin);
/* stores the update query */
- String update_string(update_buffer, sizeof(update_buffer), &my_charset_bin);
+ String update_string(update_buffer,
+ sizeof(update_buffer),
+ &my_charset_bin);
/* stores the WHERE clause */
- String where_string(where_buffer, sizeof(where_buffer), &my_charset_bin);
+ String where_string(where_buffer,
+ sizeof(where_buffer),
+ &my_charset_bin);
DBUG_ENTER("ha_federated::update_row");
+ /*
+ set string lengths to 0 to avoid misc chars in string
+ */
old_field_value.length(0);
new_field_value.length(0);
update_string.length(0);
where_string.length(0);
- has_a_primary_key= (table->s->primary_key == 0 ? 1 : 0);
- primary_key_field_num= has_a_primary_key ?
- table->key_info[table->s->primary_key].key_part->fieldnr - 1 : -1;
- if (has_a_primary_key)
- DBUG_PRINT("info", ("has a primary key"));
-
- update_string.append("UPDATE `");
- update_string.append(share->table_base_name);
- update_string.append("`");
- update_string.append(" SET ");
+ update_string.append(FEDERATED_UPDATE);
+ update_string.append(FEDERATED_BTICK);
+ update_string.append(share->table_name);
+ update_string.append(FEDERATED_BTICK);
+ update_string.append(FEDERATED_SET);
/*
In this loop, we want to match column names to values being inserted
@@ -1285,104 +1831,73 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
field=oldvalue
*/
- for (Field **field= table->field; *field; field++, x++)
+ for (Field **field= table->field; *field; field++)
{
- /*
- In all of these tests for 'has_a_primary_key', what I'm trying to
- accomplish is to only use the primary key in the WHERE clause if the
- table has a primary key, as opposed to a table without a primary key
- in which case we have to use all the fields to create a WHERE clause
- using the old/current values, as well as adding a LIMIT statement
- */
- if (has_a_primary_key)
- {
- if (x == primary_key_field_num)
- where_string.append((*field)->field_name);
- }
- else
- where_string.append((*field)->field_name);
-
+ where_string.append((*field)->field_name);
update_string.append((*field)->field_name);
- update_string.append('=');
+ update_string.append(FEDERATED_EQ);
if ((*field)->is_null())
- {
- DBUG_PRINT("info", ("column %d is NULL", x ));
- new_field_value.append("NULL");
- }
+ new_field_value.append(FEDERATED_NULL);
else
{
/* otherwise = */
(*field)->val_str(&new_field_value);
(*field)->quote_data(&new_field_value);
- if (has_a_primary_key)
- {
- if (x == primary_key_field_num)
- where_string.append("=");
- }
- else if (!field_in_record_is_null(table, *field, (char*) old_data))
- where_string.append("=");
+ if (!field_in_record_is_null(table, *field, (char*) old_data))
+ where_string.append(FEDERATED_EQ);
}
- if (has_a_primary_key)
- {
- if (x == primary_key_field_num)
- {
- (*field)->val_str(&old_field_value,
- (char*) (old_data + (*field)->offset()));
- (*field)->quote_data(&old_field_value);
- where_string.append(old_field_value);
- }
- }
+ if (field_in_record_is_null(table, *field, (char*) old_data))
+ where_string.append(FEDERATED_ISNULL);
else
{
- if (field_in_record_is_null(table, *field, (char*) old_data))
- where_string.append(" IS NULL ");
- else
- {
- uint o_len;
- (*field)->val_str(&old_field_value,
- (char*) (old_data + (*field)->offset()));
- o_len= (*field)->pack_length();
- DBUG_PRINT("info", ("o_len %lu", o_len));
- (*field)->quote_data(&old_field_value);
- where_string.append(old_field_value);
- }
+ uint o_len;
+ (*field)->val_str(&old_field_value,
+ (char*) (old_data + (*field)->offset()));
+ o_len= (*field)->pack_length();
+ (*field)->quote_data(&old_field_value);
+ where_string.append(old_field_value);
}
- DBUG_PRINT("info",
- ("column %d new value %s old value %s",
- x, new_field_value.c_ptr_quick(), old_field_value.c_ptr_quick() ));
+
update_string.append(new_field_value);
new_field_value.length(0);
- if (x + 1 < table->s->fields)
+ /*
+ Only append conjunctions if we have another field in which
+ to iterate
+ */
+ if (*(field + 1))
{
- update_string.append(", ");
- if (!has_a_primary_key)
- where_string.append(" AND ");
+ update_string.append(FEDERATED_COMMA);
+ where_string.append(FEDERATED_AND);
}
old_field_value.length(0);
}
- update_string.append(" WHERE ");
+ update_string.append(FEDERATED_WHERE);
update_string.append(where_string);
- if (! has_a_primary_key)
- update_string.append(" LIMIT 1");
+ /*
+ If this table has not a primary key, then we could possibly
+ update multiple rows. We want to make sure to only update one!
+ */
+ if (!has_a_primary_key)
+ update_string.append(FEDERATED_LIMIT1);
- DBUG_PRINT("info", ("Final update query: %s",
- update_string.c_ptr_quick()));
if (mysql_real_query(mysql, update_string.ptr(), update_string.length()))
{
- my_error(ER_QUERY_ON_MASTER, MYF(0), mysql_error(mysql));
- DBUG_RETURN(ER_QUERY_ON_MASTER);
+ int error_code= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ mysql_errno(mysql), mysql_error(mysql)));
+ my_error(error_code, MYF(0), error_buffer);
+ DBUG_RETURN(error_code);
}
-
-
DBUG_RETURN(0);
}
/*
- This will delete a row. 'buf' will contain a copy of the row to be deleted.
+ This will delete a row. 'buf' will contain a copy of the row to be =deleted.
The server will call this right after the current row has been called (from
either a previous rnd_nexT() or index call).
If you keep a pointer to the last row or can access a primary key it will
@@ -1398,49 +1913,60 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
int ha_federated::delete_row(const byte *buf)
{
- char delete_buffer[IO_SIZE];
- char data_buffer[IO_SIZE];
+ char delete_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ char data_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+
String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin);
String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin);
+ delete_string.length(0);
+ data_string.length(0);
+
DBUG_ENTER("ha_federated::delete_row");
- delete_string.length(0);
- delete_string.append("DELETE FROM `");
- delete_string.append(share->table_base_name);
- delete_string.append("`");
- delete_string.append(" WHERE ");
+ delete_string.append(FEDERATED_DELETE);
+ delete_string.append(FEDERATED_FROM);
+ delete_string.append(FEDERATED_BTICK);
+ delete_string.append(share->table_name);
+ delete_string.append(FEDERATED_BTICK);
+ delete_string.append(FEDERATED_WHERE);
for (Field **field= table->field; *field; field++)
{
- Field *cur_field= *field;
- data_string.length(0);
- delete_string.append(cur_field->field_name);
+ delete_string.append((*field)->field_name);
- if (cur_field->is_null_in_record((const uchar*) buf))
+ if ((*field)->is_null())
{
- delete_string.append(" IS ");
- data_string.append("NULL");
+ delete_string.append(FEDERATED_IS);
+ data_string.append(FEDERATED_NULL);
}
else
{
- delete_string.append("=");
- cur_field->val_str(&data_string, (char*) buf+ cur_field->offset());
- cur_field->quote_data(&data_string);
+ delete_string.append(FEDERATED_EQ);
+ (*field)->val_str(&data_string);
+ (*field)->quote_data(&data_string);
}
delete_string.append(data_string);
- delete_string.append(" AND ");
+ data_string.length(0);
+
+ if (*(field + 1))
+ delete_string.append(FEDERATED_AND);
}
- delete_string.length(delete_string.length()-5); // Remove AND
- delete_string.append(" LIMIT 1");
+ delete_string.append(FEDERATED_LIMIT1);
DBUG_PRINT("info",
("Delete sql: %s", delete_string.c_ptr_quick()));
if (mysql_real_query(mysql, delete_string.ptr(), delete_string.length()))
{
- my_error(ER_QUERY_ON_MASTER, MYF(0), mysql_error(mysql));
- DBUG_RETURN(ER_QUERY_ON_MASTER);
+ int error_code= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ my_error(error_code, MYF(0), error_buffer);
+ DBUG_RETURN(error_code);
}
+ deleted+= mysql->affected_rows;
+ DBUG_PRINT("info",
+ ("rows deleted %d rows deleted for all time %d",
+ int(mysql->affected_rows), deleted));
DBUG_RETURN(0);
}
@@ -1454,12 +1980,12 @@ int ha_federated::delete_row(const byte *buf)
*/
int ha_federated::index_read(byte *buf, const byte *key,
- uint key_len __attribute__ ((unused)),
- enum ha_rkey_function find_flag
- __attribute__ ((unused)))
+ uint key_len, enum ha_rkey_function find_flag)
{
+ int retval;
DBUG_ENTER("ha_federated::index_read");
- DBUG_RETURN(index_read_idx(buf, active_index, key, key_len, find_flag));
+ retval= index_read_idx(buf, active_index, key, key_len, find_flag);
+ DBUG_RETURN(retval);
}
@@ -1473,17 +1999,23 @@ int ha_federated::index_read(byte *buf, const byte *key,
*/
int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
- uint key_len __attribute__ ((unused)),
- enum ha_rkey_function find_flag
- __attribute__ ((unused)))
+ uint key_len, enum ha_rkey_function find_flag)
{
- char index_value[IO_SIZE];
- String index_string(index_value, sizeof(index_value), &my_charset_bin);
- index_string.length(0);
- uint keylen;
+ int retval;
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ char index_value[STRING_BUFFER_USUAL_SIZE];
+ char key_value[STRING_BUFFER_USUAL_SIZE];
+ char test_value[STRING_BUFFER_USUAL_SIZE];
+ char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ String index_string(index_value,
+ sizeof(index_value),
+ &my_charset_bin);
+ String sql_query(sql_query_buffer,
+ sizeof(sql_query_buffer),
+ &my_charset_bin);
+ key_range range;
- char sql_query_buffer[IO_SIZE];
- String sql_query(sql_query_buffer, sizeof(sql_query_buffer), &my_charset_bin);
+ index_string.length(0);
sql_query.length(0);
DBUG_ENTER("ha_federated::index_read_idx");
@@ -1492,10 +2024,14 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
&LOCK_status);
sql_query.append(share->select_query);
- sql_query.append(" WHERE ");
- keylen= strlen((char*) (key));
- create_where_from_key(&index_string, &table->key_info[index], key, keylen);
+ range.key= key;
+ range.length= key_len;
+ range.flag= find_flag;
+ create_where_from_key(&index_string,
+ &table->key_info[index],
+ &range,
+ NULL, 0);
sql_query.append(index_string);
DBUG_PRINT("info",
@@ -1507,42 +2043,50 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key,
("current position %d sql_query %s", current_position,
sql_query.c_ptr_quick()));
- if (result)
+ if (stored_result)
{
- mysql_free_result(result);
- result= 0;
+ mysql_free_result(stored_result);
+ stored_result= 0;
}
if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
{
- my_error(ER_QUERY_ON_MASTER, MYF(0), mysql_error(mysql));
- DBUG_RETURN(ER_QUERY_ON_MASTER);
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ mysql_errno(mysql), mysql_error(mysql)));
+ retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ goto error;
}
- result= mysql_store_result(mysql);
+ stored_result= mysql_store_result(mysql);
- if (!result)
+ if (!stored_result)
{
- table->status= STATUS_NOT_FOUND;
- DBUG_RETURN(HA_ERR_END_OF_FILE);
+ retval= HA_ERR_END_OF_FILE;
+ goto error;
}
+ /*
+ This basically says that the record in table->record[0] is legal,
+ and that it is ok to use this record, for whatever reason, such
+ as with a join (without it, joins will not work)
+ */
+ table->status= 0;
+
+ retval= rnd_next(buf);
+ DBUG_RETURN(retval);
- if (mysql_errno(mysql))
+error:
+ if (stored_result)
{
- table->status= STATUS_NOT_FOUND;
- DBUG_RETURN(mysql_errno(mysql));
+ mysql_free_result(stored_result);
+ stored_result= 0;
}
- /*
- This basically says that the record in table->record[0] is legal, and that it is
- ok to use this record, for whatever reason, such as with a join (without it, joins
- will not work)
- */
- table->status=0;
-
- DBUG_RETURN(rnd_next(buf));
+ table->status= STATUS_NOT_FOUND;
+ my_error(retval, MYF(0), error_buffer);
+ DBUG_RETURN(retval);
}
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
int ha_federated::index_init(uint keynr)
{
+ int error;
DBUG_ENTER("ha_federated::index_init");
DBUG_PRINT("info",
("table: '%s' key: %d", table->s->table_name, keynr));
@@ -1550,14 +2094,90 @@ int ha_federated::index_init(uint keynr)
DBUG_RETURN(0);
}
+/*
+
+ int read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range, bool sorted);
+*/
+int ha_federated::read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range, bool sorted)
+{
+ char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ int retval;
+ String sql_query(sql_query_buffer,
+ sizeof(sql_query_buffer),
+ &my_charset_bin);
+
+ DBUG_ENTER("ha_federated::read_range_first");
+ if (start_key == NULL && end_key == NULL)
+ DBUG_RETURN(0);
+
+ sql_query.length(0);
+ sql_query.append(share->select_query);
+ create_where_from_key(&sql_query,
+ &table->key_info[active_index],
+ start_key, end_key, 0);
+
+ if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length()))
+ {
+ retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ goto error;
+ }
+ sql_query.length(0);
+
+ if (stored_result)
+ {
+ DBUG_PRINT("info",
+ ("mysql_free_result address %lx", stored_result));
+ mysql_free_result(stored_result);
+ stored_result= 0;
+ }
+ stored_result= mysql_store_result(mysql);
+
+ if (!stored_result)
+ {
+ retval= HA_ERR_END_OF_FILE;
+ goto error;
+ }
+
+ /* This was successful, please let it be known! */
+ table->status= 0;
+
+ retval= rnd_next(table->record[0]);
+ DBUG_RETURN(retval);
+
+error:
+ table->status= STATUS_NOT_FOUND;
+ if (stored_result)
+ {
+ DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result));
+ mysql_free_result(stored_result);
+ stored_result= 0;
+ }
+ DBUG_RETURN(retval);
+}
+
+int ha_federated::read_range_next()
+{
+ int retval;
+ DBUG_ENTER("ha_federated::read_range_next");
+ retval= rnd_next(table->record[0]);
+ DBUG_RETURN(retval);
+}
+
+
/* Used to read forward through the index. */
int ha_federated::index_next(byte *buf)
{
+ int retval;
DBUG_ENTER("ha_federated::index_next");
- DBUG_RETURN(rnd_next(buf));
+ statistic_increment(table->in_use->status_var.ha_read_next_count,
+ &LOCK_status);
+ retval= rnd_next(buf);
+ DBUG_RETURN(retval);
}
-
-
/*
rnd_init() is called when the system wants the storage engine to do a table
scan.
@@ -1573,12 +2193,15 @@ int ha_federated::index_next(byte *buf)
int ha_federated::rnd_init(bool scan)
{
- DBUG_ENTER("ha_federated::rnd_init");
+ int num_fields, rows;
+ int retval;
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ DBUG_ENTER("ha_federated::rnd_init");
/*
- This 'scan' flag is incredibly important for this handler to work
- properly, especially with updates containing WHERE clauses using
- indexed columns.
+ The use of the 'scan' flag is incredibly important for this handler
+ to work properly, especially with updates containing WHERE clauses
+ using indexed columns.
When the initial query contains a WHERE clause of the query using an
indexed column, it's index_read_idx that selects the exact record from
@@ -1613,40 +2236,48 @@ int ha_federated::rnd_init(bool scan)
if (scan)
{
DBUG_PRINT("info", ("share->select_query %s", share->select_query));
- if (result)
+ if (stored_result)
{
DBUG_PRINT("info",
- ("mysql_free_result address %lx", result));
- mysql_free_result(result);
- result= 0;
+ ("mysql_free_result address %lx", stored_result));
+ mysql_free_result(stored_result);
+ stored_result= 0;
}
- if (mysql_real_query
- (mysql, share->select_query, strlen(share->select_query)))
- {
- my_error(ER_QUERY_ON_MASTER, MYF(0), mysql_error(mysql));
- DBUG_RETURN(ER_QUERY_ON_MASTER);
- }
- result= mysql_store_result(mysql);
+ if (mysql_real_query(mysql,
+ share->select_query,
+ strlen(share->select_query)))
+ goto error;
- if (mysql_errno(mysql))
- DBUG_RETURN(mysql_errno(mysql));
+ stored_result= mysql_store_result(mysql);
+ if (!stored_result)
+ goto error;
}
DBUG_RETURN(0);
+
+error:
+ retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ mysql_errno(mysql), mysql_error(mysql)));
+ my_error(retval, MYF(0), error_buffer);
+ DBUG_PRINT("info",
+ ("return error code %d", retval));
+ DBUG_RETURN(retval);
}
int ha_federated::rnd_end()
{
+ int retval;
DBUG_ENTER("ha_federated::rnd_end");
- if (result)
+
+ if (stored_result)
{
- DBUG_PRINT("info", ("mysql_free_result address %lx", result));
- mysql_free_result(result);
- result= 0;
+ DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result));
+ mysql_free_result(stored_result);
+ stored_result= 0;
}
-
- mysql_free_result(result);
- DBUG_RETURN(index_end());
+ retval= index_end();
+ DBUG_RETURN(retval);
}
int ha_federated::index_end(void)
@@ -1668,10 +2299,11 @@ int ha_federated::index_end(void)
int ha_federated::rnd_next(byte *buf)
{
+ int retval;
MYSQL_ROW row;
DBUG_ENTER("ha_federated::rnd_next");
- if (result == 0)
+ if (stored_result == 0)
{
/*
Return value of rnd_init is not always checked (see records.cc),
@@ -1682,12 +2314,13 @@ int ha_federated::rnd_next(byte *buf)
}
/* Fetch a row, insert it back in a row format. */
- current_position= result->data_cursor;
+ current_position= stored_result->data_cursor;
DBUG_PRINT("info", ("current position %d", current_position));
- if (!(row= mysql_fetch_row(result)))
+ if (!(row= mysql_fetch_row(stored_result)))
DBUG_RETURN(HA_ERR_END_OF_FILE);
- DBUG_RETURN(convert_row_to_internal_format(buf, row));
+ retval= convert_row_to_internal_format(buf, row);
+ DBUG_RETURN(retval);
}
@@ -1734,13 +2367,15 @@ int ha_federated::rnd_pos(byte *buf, byte *pos)
*/
if (scan_flag)
{
+ int retval;
statistic_increment(table->in_use->status_var.ha_read_rnd_count,
&LOCK_status);
memcpy_fixed(&current_position, pos, sizeof(MYSQL_ROW_OFFSET)); // pos
/* is not aligned */
- result->current_row= 0;
- result->data_cursor= current_position;
- DBUG_RETURN(rnd_next(buf));
+ stored_result->current_row= 0;
+ stored_result->data_cursor= current_position;
+ retval= rnd_next(buf);
+ DBUG_RETURN(retval);
}
DBUG_RETURN(0);
}
@@ -1789,12 +2424,91 @@ int ha_federated::rnd_pos(byte *buf, byte *pos)
sql_update.cc
*/
-/* FIX: later version provide better information to the optimizer */
void ha_federated::info(uint flag)
{
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ char status_buf[FEDERATED_QUERY_BUFFER_SIZE];
+ char escaped_table_name[FEDERATED_QUERY_BUFFER_SIZE];
+ int error;
+ uint error_code;
+ MYSQL_RES *result= 0;
+ MYSQL_ROW row;
+ String status_query_string(status_buf, sizeof(status_buf), &my_charset_bin);
+
DBUG_ENTER("ha_federated::info");
- records= 10000; // fix later
+
+ error_code= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ /* we want not to show table status if not needed to do so */
+ if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST))
+ {
+ status_query_string.length(0);
+ status_query_string.append(FEDERATED_INFO);
+ status_query_string.append(FEDERATED_SQUOTE);
+
+ escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name,
+ sizeof(escaped_table_name),
+ share->table_name,
+ share->table_name_length);
+ status_query_string.append(escaped_table_name);
+ status_query_string.append(FEDERATED_SQUOTE);
+
+ if (mysql_real_query(mysql, status_query_string.ptr(),
+ status_query_string.length()))
+ goto error;
+
+ status_query_string.length(0);
+
+ result= mysql_store_result(mysql);
+ if (!result)
+ goto error;
+
+ if (!mysql_num_rows(result))
+ goto error;
+
+ if (!(row= mysql_fetch_row(result)))
+ goto error;
+
+ if (flag & HA_STATUS_VARIABLE | HA_STATUS_CONST)
+ {
+ /*
+ deleted is set in ha_federated::info
+ */
+ /*
+ need to figure out what this means as far as federated is concerned,
+ since we don't have a "file"
+
+ data_file_length = ?
+ index_file_length = ?
+ delete_length = ?
+ */
+ if (row[4] != NULL)
+ records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error);
+ if (row[5] != NULL)
+ mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, &error);
+ if (row[12] != NULL)
+ update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error);
+ if (row[13] != NULL)
+ check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error);
+ }
+ if (flag & HA_STATUS_CONST)
+ {
+ TABLE_SHARE *share= table->s;
+ block_size= 4096;
+ }
+ }
+
+ if (result)
+ mysql_free_result(result);
+
+ DBUG_VOID_RETURN;
+
+error:
+ if (result)
+ mysql_free_result(result);
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ mysql_errno(mysql), mysql_error(mysql)));
+ my_error(error_code, MYF(0), error_buffer);
DBUG_VOID_RETURN;
}
@@ -1815,22 +2529,30 @@ int ha_federated::delete_all_rows()
{
DBUG_ENTER("ha_federated::delete_all_rows");
- char query_buffer[IO_SIZE];
+ char query_buffer[FEDERATED_QUERY_BUFFER_SIZE];
String query(query_buffer, sizeof(query_buffer), &my_charset_bin);
query.length(0);
query.set_charset(system_charset_info);
- query.append("TRUNCATE `");
- query.append(share->table_base_name);
- query.append("`");
+ query.append(FEDERATED_TRUNCATE);
+ query.append(FEDERATED_BTICK);
+ query.append(share->table_name);
+ query.append(FEDERATED_BTICK);
+ /*
+ TRUNCATE won't return anything in mysql_affected_rows
+ */
+ deleted+= records;
if (mysql_real_query(mysql, query.ptr(), query.length()))
{
- my_error(ER_QUERY_ON_MASTER, MYF(0), mysql_error(mysql));
- DBUG_RETURN(ER_QUERY_ON_MASTER);
+ int error_code= ER_QUERY_ON_FOREIGN_DATA_SOURCE;
+ char error_buffer[FEDERATED_QUERY_BUFFER_SIZE];
+ my_sprintf(error_buffer, (error_buffer, ": %d : %s",
+ mysql_errno(mysql), mysql_error(mysql)));
+ my_error(error_code, MYF(0), error_buffer);
+ DBUG_RETURN(error_code);
}
-
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ DBUG_RETURN(0);
}
@@ -1878,7 +2600,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd,
*/
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
- lock_type <= TL_WRITE) && !thd->in_lock_tables && !thd->tablespace_op)
+ lock_type <= TL_WRITE) && !thd->in_lock_tables)
lock_type= TL_WRITE_ALLOW_WRITE;
/*
@@ -1908,28 +2630,33 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd,
int ha_federated::create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
- int connection_error=0;
- FEDERATED_SHARE tmp;
+ int retval= 0;
+ /*
+ only a temporary share, to test the url
+ */
+ FEDERATED_SHARE tmp_share;
DBUG_ENTER("ha_federated::create");
- if (parse_url(&tmp, table_arg, 1))
- {
- my_error(ER_CANT_CREATE_TABLE, MYF(0), name, 1);
+ if ((retval= parse_url(&tmp_share, table_arg, 1)))
goto error;
- }
- if ((connection_error= check_foreign_data_source(&tmp)))
- {
- my_error(connection_error, MYF(0), name, 1);
+
+ if ((retval= check_foreign_data_source(&tmp_share, 1)))
goto error;
+
+ if (tmp_share.scheme)
+ {
+ my_free((gptr) tmp_share.scheme, MYF(0));
+ tmp_share.scheme= 0;
}
-
- my_free((gptr) tmp.scheme, MYF(0));
- DBUG_RETURN(0);
-
+ DBUG_RETURN(retval);
+
error:
- DBUG_PRINT("info", ("errors, returning %d", ER_CANT_CREATE_TABLE));
- my_free((gptr) tmp.scheme, MYF(0));
- DBUG_RETURN(ER_CANT_CREATE_TABLE);
+ if (tmp_share.scheme)
+ {
+ my_free((gptr) tmp_share.scheme, MYF(0));
+ tmp_share.scheme= 0;
+ }
+ DBUG_RETURN(retval);
}
#endif /* HAVE_FEDERATED_DB */
diff --git a/sql/ha_federated.h b/sql/ha_federated.h
index f084976718c..58b78ab0dde 100644
--- a/sql/ha_federated.h
+++ b/sql/ha_federated.h
@@ -26,16 +26,95 @@
#endif
#include <mysql.h>
-//#include <client.h>
+
+#define FEDERATED_QUERY_BUFFER_SIZE STRING_BUFFER_USUAL_SIZE * 5
+#define FEDERATED_RECORDS_IN_RANGE 2
+
+#define FEDERATED_INFO " SHOW TABLE STATUS LIKE "
+#define FEDERATED_INFO_LEN sizeof(FEDERATED_INFO)
+#define FEDERATED_SELECT "SELECT "
+#define FEDERATED_SELECT_LEN sizeof(FEDERATED_SELECT)
+#define FEDERATED_WHERE " WHERE "
+#define FEDERATED_WHERE_LEN sizeof(FEDERATED_WHERE)
+#define FEDERATED_FROM " FROM "
+#define FEDERATED_FROM_LEN sizeof(FEDERATED_FROM)
+#define FEDERATED_PERCENT "%"
+#define FEDERATED_PERCENT_LEN sizeof(FEDERATED_PERCENT)
+#define FEDERATED_IS " IS "
+#define FEDERATED_IS_LEN sizeof(FEDERATED_IS)
+#define FEDERATED_NULL " NULL "
+#define FEDERATED_NULL_LEN sizeof(FEDERATED_NULL)
+#define FEDERATED_ISNULL " IS NULL "
+#define FEDERATED_ISNULL_LEN sizeof(FEDERATED_ISNULL)
+#define FEDERATED_LIKE " LIKE "
+#define FEDERATED_LIKE_LEN sizeof(FEDERATED_LIKE)
+#define FEDERATED_TRUNCATE "TRUNCATE "
+#define FEDERATED_TRUNCATE_LEN sizeof(FEDERATED_TRUNCATE)
+#define FEDERATED_DELETE "DELETE "
+#define FEDERATED_DELETE_LEN sizeof(FEDERATED_DELETE)
+#define FEDERATED_INSERT "INSERT INTO "
+#define FEDERATED_INSERT_LEN sizeof(FEDERATED_INSERT)
+#define FEDERATED_OPTIMIZE "OPTIMIZE TABLE "
+#define FEDERATED_OPTIMIZE_LEN sizeof(FEDERATED_OPTIMIZE)
+#define FEDERATED_REPAIR "REPAIR TABLE "
+#define FEDERATED_REPAIR_LEN sizeof(FEDERATED_REPAIR)
+#define FEDERATED_QUICK " QUICK"
+#define FEDERATED_QUICK_LEN sizeof(FEDERATED_QUICK)
+#define FEDERATED_EXTENDED " EXTENDED"
+#define FEDERATED_EXTENDED_LEN sizeof(FEDERATED_EXTENDED)
+#define FEDERATED_USE_FRM " USE_FRM"
+#define FEDERATED_USE_FRM_LEN sizeof(FEDERATED_USE_FRM)
+#define FEDERATED_LIMIT1 " LIMIT 1"
+#define FEDERATED_LIMIT1_LEN sizeof(FEDERATED_LIMIT1)
+#define FEDERATED_VALUES "VALUES "
+#define FEDERATED_VALUES_LEN sizeof(FEDERATED_VALUES)
+#define FEDERATED_UPDATE "UPDATE "
+#define FEDERATED_UPDATE_LEN sizeof(FEDERATED_UPDATE)
+#define FEDERATED_SET "SET "
+#define FEDERATED_SET_LEN sizeof(FEDERATED_SET)
+#define FEDERATED_AND " AND "
+#define FEDERATED_AND_LEN sizeof(FEDERATED_AND)
+#define FEDERATED_CONJUNCTION ") AND ("
+#define FEDERATED_CONJUNCTION_LEN sizeof(FEDERATED_CONJUNCTION)
+#define FEDERATED_OR " OR "
+#define FEDERATED_OR_LEN sizeof(FEDERATED_OR)
+#define FEDERATED_NOT " NOT "
+#define FEDERATED_NOT_LEN sizeof(FEDERATED_NOT)
+#define FEDERATED_STAR "* "
+#define FEDERATED_STAR_LEN sizeof(FEDERATED_STAR)
+#define FEDERATED_SPACE " "
+#define FEDERATED_SPACE_LEN sizeof(FEDERATED_SPACE)
+#define FEDERATED_SQUOTE "'"
+#define FEDERATED_SQUOTE_LEN sizeof(FEDERATED_SQUOTE)
+#define FEDERATED_COMMA ", "
+#define FEDERATED_COMMA_LEN sizeof(FEDERATED_COMMA)
+#define FEDERATED_BTICK "`"
+#define FEDERATED_BTICK_LEN sizeof(FEDERATED_BTICK)
+#define FEDERATED_OPENPAREN " ("
+#define FEDERATED_OPENPAREN_LEN sizeof(FEDERATED_OPENPAREN)
+#define FEDERATED_CLOSEPAREN ") "
+#define FEDERATED_CLOSEPAREN_LEN sizeof(FEDERATED_CLOSEPAREN)
+#define FEDERATED_NE " != "
+#define FEDERATED_NE_LEN sizeof(FEDERATED_NE)
+#define FEDERATED_GT " > "
+#define FEDERATED_GT_LEN sizeof(FEDERATED_GT)
+#define FEDERATED_LT " < "
+#define FEDERATED_LT_LEN sizeof(FEDERATED_LT)
+#define FEDERATED_LE " <= "
+#define FEDERATED_LE_LEN sizeof(FEDERATED_LE)
+#define FEDERATED_GE " >= "
+#define FEDERATED_GE_LEN sizeof(FEDERATED_GE)
+#define FEDERATED_EQ " = "
+#define FEDERATED_EQ_LEN sizeof(FEDERATED_EQ)
+#define FEDERATED_FALSE " 1=0"
+#define FEDERATED_FALSE_LEN sizeof(FEDERATED_FALSE)
/*
FEDERATED_SHARE is a structure that will be shared amoung all open handlers
The example implements the minimum of what you will probably need.
*/
typedef struct st_federated_share {
- char *table_name;
- char *table_base_name;
- /*
+ /*
the primary select query to be used in rnd_init
*/
char *select_query;
@@ -47,11 +126,12 @@ typedef struct st_federated_share {
char *username;
char *password;
char *database;
+ char *table_name;
char *table;
char *socket;
char *sport;
- int port;
- uint table_name_length,table_base_name_length,use_count;
+ ushort port;
+ uint table_name_length, use_count;
pthread_mutex_t mutex;
THR_LOCK lock;
} FEDERATED_SHARE;
@@ -63,8 +143,8 @@ class ha_federated: public handler
{
THR_LOCK_DATA lock; /* MySQL lock */
FEDERATED_SHARE *share; /* Shared lock info */
- MYSQL *mysql;
- MYSQL_RES *result;
+ MYSQL *mysql; /* MySQL connection */
+ MYSQL_RES *stored_result;
bool scan_flag;
uint ref_length;
uint fetch_num; // stores the fetch num
@@ -77,14 +157,12 @@ private:
*/
uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row);
bool create_where_from_key(String *to, KEY *key_info,
- const byte *key, uint key_length);
+ const key_range *start_key,
+ const key_range *end_key,
+ bool records_in_range);
public:
- ha_federated(TABLE *table): handler(table),
- mysql(0), result(0), scan_flag(0),
- ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0)
- {
- }
+ ha_federated(TABLE *table_arg);
~ha_federated()
{
}
@@ -94,20 +172,20 @@ public:
The name of the index type that will be used for display
don't implement this method unless you really have indexes
*/
+ // perhaps get index type
const char *index_type(uint inx) { return "REMOTE"; }
const char **bas_ext() const;
/*
This is a list of flags that says what the storage engine
implements. The current table flags are documented in
handler.h
- Serg: Double check these (Brian)
- // FIX add blob support
*/
ulong table_flags() const
{
- return (HA_TABLE_SCAN_ON_INDEX | HA_NOT_EXACT_COUNT |
- HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED |
- HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS);
+ /* fix server to be able to get remote server table flags */
+ return (HA_NOT_EXACT_COUNT |
+ HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ |
+ HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS| HA_NO_PREFIX_CHAR_KEYS);
}
/*
This is a bitmap of flags that says how the storage engine
@@ -119,29 +197,45 @@ public:
If all_parts it's set, MySQL want to know the flags for the combined
index up to and including 'part'.
*/
+ /* fix server to be able to get remote server index flags */
ulong index_flags(uint inx, uint part, bool all_parts) const
{
- return (HA_READ_NEXT);
- // return (HA_READ_NEXT | HA_ONLY_WHOLE_INDEX);
+ return (HA_READ_NEXT | HA_READ_RANGE | HA_READ_AFTER_KEY);
}
uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
uint max_supported_keys() const { return MAX_KEY; }
- uint max_supported_key_parts() const { return 1024; }
- uint max_supported_key_length() const { return 1024; }
+ uint max_supported_key_parts() const { return MAX_REF_PARTS; }
+ uint max_supported_key_length() const { return MAX_KEY_LENGTH; }
/*
Called in test_quick_select to determine if indexes should be used.
+ Normally, we need to know number of blocks . For federated we need to
+ know number of blocks on remote side, and number of packets and blocks
+ on the network side (?)
+ Talk to Kostja about this - how to get the
+ number of rows * ...
+ disk scan time on other side (block size, size of the row) + network time ...
+ The reason for "records * 1000" is that such a large number forces
+ this to use indexes "
*/
- virtual double scan_time()
+ double scan_time()
{
- DBUG_PRINT("ha_federated::scan_time",
- ("rows %d", records)); return (double)(records*2);
+ DBUG_PRINT("info",
+ ("records %d", records));
+ return (double)(records*1000);
}
/*
The next method will never be called if you do not implement indexes.
*/
- virtual double read_time(uint index, uint ranges, ha_rows rows)
- { return (double) rows / 20.0+1; }
+ double read_time(uint index, uint ranges, ha_rows rows)
+ {
+ /*
+ Per Brian, this number is bugus, but this method must be implemented,
+ and at a later date, he intends to document this issue for handler code
+ */
+ return (double) rows / 20.0+1;
+ }
+ const key_map *keys_to_use_for_scanning() { return &key_map_full; }
/*
Everything below are methods that we implment in ha_federated.cc.
@@ -151,16 +245,20 @@ public:
int open(const char *name, int mode, uint test_if_locked); // required
int close(void); // required
- int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
+ int write_row(byte *buf);
+ int update_row(const byte *old_data, byte *new_data);
+ int delete_row(const byte *buf);
int index_init(uint keynr);
- int index_read(byte * buf, const byte * key,
+ int index_read(byte *buf, const byte *key,
uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
+ int index_read_idx(byte *buf, uint idx, const byte *key,
uint key_len, enum ha_rkey_function find_flag);
- int index_next(byte * buf);
+ int index_next(byte *buf);
int index_end();
+ int read_range_first(const key_range *start_key,
+ const key_range *end_key,
+ bool eq_range, bool sorted);
+ int read_range_next();
/*
unlike index_init(), rnd_init() can be called two times
without rnd_end() in between (it only makes sense if scan=1).
@@ -172,13 +270,18 @@ public:
int rnd_init(bool scan); //required
int rnd_end();
int rnd_next(byte *buf); //required
- int rnd_pos(byte * buf, byte *pos); //required
+ int rnd_pos(byte *buf, byte *pos); //required
void position(const byte *record); //required
void info(uint); //required
+ int repair(THD* thd, HA_CHECK_OPT* check_opt);
+ int optimize(THD* thd, HA_CHECK_OPT* check_opt);
+
int delete_all_rows(void);
int create(const char *name, TABLE *form,
HA_CREATE_INFO *create_info); //required
+ ha_rows records_in_range(uint inx, key_range *start_key,
+ key_range *end_key);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type); //required
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index 6e609a94be3..94ee3f8e656 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -23,9 +23,36 @@
#include <myisampack.h>
#include "ha_heap.h"
+static handlerton heap_hton= {
+ "MEMORY",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
+};
+
/*****************************************************************************
** HEAP tables
*****************************************************************************/
+
+ha_heap::ha_heap(TABLE *table_arg)
+ :handler(&heap_hton, table_arg), file(0), records_changed(0),
+ key_stats_ok(0)
+{}
+
+
static const char *ha_heap_exts[] = {
NullS
};
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
index 7a97c727049..7c4227e952c 100644
--- a/sql/ha_heap.h
+++ b/sql/ha_heap.h
@@ -31,8 +31,7 @@ class ha_heap: public handler
uint records_changed;
bool key_stats_ok;
public:
- ha_heap(TABLE *table): handler(table), file(0), records_changed(0),
- key_stats_ok(0) {}
+ ha_heap(TABLE *table);
~ha_heap() {}
const char *table_type() const
{
@@ -44,6 +43,8 @@ public:
return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ? "BTREE" :
"HASH");
}
+ /* Rows also use a fixed-size format */
+ enum row_type get_row_type() const { return ROW_TYPE_FIXED; }
const char **bas_ext() const;
ulong table_flags() const
{
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index e33a0939e27..d5d79b51f60 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -215,7 +215,11 @@ static handlerton innobase_hton = {
innobase_xa_prepare, /* prepare */
innobase_xa_recover, /* recover */
innobase_commit_by_xid, /* commit_by_xid */
- innobase_rollback_by_xid /* rollback_by_xid */
+ innobase_rollback_by_xid, /* rollback_by_xid */
+ innobase_create_cursor_view,
+ innobase_set_cursor_view,
+ innobase_close_cursor_view,
+ HTON_NO_FLAGS
};
/*********************************************************************
@@ -534,7 +538,7 @@ innobase_mysql_prepare_print_arbitrary_thd(void)
}
/*****************************************************************
-Relases the mutex reserved by innobase_mysql_prepare_print_arbitrary_thd().
+Releases the mutex reserved by innobase_mysql_prepare_print_arbitrary_thd().
NOTE that /mysql/innobase/lock/lock0lock.c must contain the prototype for this
function! */
extern "C"
@@ -765,6 +769,24 @@ check_trx_exists(
return(trx);
}
+
+/*************************************************************************
+Construct ha_innobase handler. */
+
+ha_innobase::ha_innobase(TABLE *table_arg)
+ :handler(&innobase_hton, table_arg),
+ int_table_flags(HA_REC_NOT_IN_SEQ |
+ HA_NULL_IN_KEY |
+ HA_CAN_INDEX_BLOBS |
+ HA_CAN_SQL_HANDLER |
+ HA_NOT_EXACT_COUNT |
+ HA_PRIMARY_KEY_IN_READ_INDEX |
+ HA_TABLE_SCAN_ON_INDEX),
+ last_dup_key((uint) -1),
+ start_of_scan(0),
+ num_write_row(0)
+{}
+
/*************************************************************************
Updates the user_thd field in a handle and also allocates a new InnoDB
transaction handle if needed, and updates the transaction fields in the
@@ -1678,7 +1700,7 @@ innobase_store_binlog_offset_and_flush_log(
/* Commits the mini-transaction */
mtr_commit(&mtr);
- /* Syncronous flush of the log buffer to disk */
+ /* Synchronous flush of the log buffer to disk */
log_buffer_flush_to_disk();
}
#endif
@@ -2110,15 +2132,34 @@ innobase_savepoint(
/*********************************************************************
Frees a possible InnoDB trx object associated with the current THD. */
-
-static int
+static
+int
innobase_close_connection(
/*======================*/
/* out: 0 or error number */
THD* thd) /* in: handle to the MySQL thread of the user
whose resources should be free'd */
{
- trx_free_for_mysql((trx_t*)thd->ha_data[innobase_hton.slot]);
+ trx_t* trx;
+
+ trx = (trx_t*)thd->ha_data[innobase_hton.slot];
+
+ ut_a(trx);
+
+ if (trx->conc_state != TRX_NOT_STARTED) {
+ ut_print_timestamp(stderr);
+
+ fprintf(stderr,
+" InnoDB: Warning: MySQL is closing a connection\n"
+"InnoDB: that has an active InnoDB transaction. We roll back that\n"
+"InnoDB: transaction. %lu row modifications to roll back.\n",
+ (ulong)trx->undo_no.low);
+ }
+
+ innobase_rollback_trx(trx);
+
+ trx_free_for_mysql(trx);
+
return(0);
}
@@ -2812,7 +2853,7 @@ ha_innobase::store_key_val_for_row(
/* All indexes on BLOB and TEXT are column prefix
indexes, and we may need to truncate the data to be
- stored in the kay value: */
+ stored in the key value: */
if (blob_len > key_part->length) {
blob_len = key_part->length;
@@ -5484,7 +5525,7 @@ ha_innobase::update_table_comment(
external_lock(). To be safe, update the thd of the current table
handle. */
- if(length > 64000 - 3) {
+ if (length > 64000 - 3) {
return((char*)comment); /* string too long */
}
@@ -5522,7 +5563,7 @@ ha_innobase::update_table_comment(
if (str) {
char* pos = str + length;
- if(length) {
+ if (length) {
memcpy(str, comment, length);
*pos++ = ';';
*pos++ = ' ';
@@ -5580,7 +5621,7 @@ ha_innobase::get_foreign_key_create_info(void)
flen = ftell(file);
if (flen < 0) {
flen = 0;
- } else if(flen > 64000 - 1) {
+ } else if (flen > 64000 - 1) {
flen = 64000 - 1;
}
@@ -5857,7 +5898,7 @@ ha_innobase::start_stmt(
innobase_release_stat_resources(trx);
if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
- && trx->read_view) {
+ && trx->global_read_view) {
/* At low transaction isolation levels we let
each consistent read set its own snapshot */
@@ -6078,7 +6119,7 @@ ha_innobase::external_lock(
}
} else {
if (trx->isolation_level <= TRX_ISO_READ_COMMITTED
- && trx->read_view) {
+ && trx->global_read_view) {
/* At low transaction isolation levels we let
each consistent read set its own snapshot */
@@ -7095,7 +7136,7 @@ int
innobase_rollback_by_xid(
/*=====================*/
/* out: 0 or error number */
- XID *xid) /* in: X/Open XA transaction idenfification */
+ XID *xid) /* in: X/Open XA transaction identification */
{
trx_t* trx;
@@ -7108,4 +7149,48 @@ innobase_rollback_by_xid(
}
}
+/***********************************************************************
+Create a consistent view for a cursor based on current transaction
+which is created if the corresponding MySQL thread still lacks one.
+This consistent view is then used inside of MySQL when accessing records
+using a cursor. */
+
+void*
+innobase_create_cursor_view(void)
+/*=============================*/
+ /* out: Pointer to cursor view or NULL */
+{
+ return(read_cursor_view_create_for_mysql(
+ check_trx_exists(current_thd)));
+}
+
+/***********************************************************************
+Close the given consistent cursor view of a transaction and restore
+global read view to a transaction read view. Transaction is created if the
+corresponding MySQL thread still lacks one. */
+
+void
+innobase_close_cursor_view(
+/*=======================*/
+ void* curview)/* in: Consistent read view to be closed */
+{
+ read_cursor_view_close_for_mysql(check_trx_exists(current_thd),
+ (cursor_view_t*) curview);
+}
+
+/***********************************************************************
+Set the given consistent cursor view to a transaction which is created
+if the corresponding MySQL thread still lacks one. If the given
+consistent cursor view is NULL global read view of a transaction is
+restored to a transaction read view. */
+
+void
+innobase_set_cursor_view(
+/*=====================*/
+ void* curview)/* in: Consistent cursor view to be set */
+{
+ read_cursor_set_for_mysql(check_trx_exists(current_thd),
+ (cursor_view_t*) curview);
+}
+
#endif /* HAVE_INNOBASE_DB */
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 90cae3998ed..3bc1fc5b2c8 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -81,19 +81,7 @@ class ha_innobase: public handler
/* Init values for the class: */
public:
- ha_innobase(TABLE *table): handler(table),
- int_table_flags(HA_REC_NOT_IN_SEQ |
- HA_NULL_IN_KEY |
- HA_CAN_INDEX_BLOBS |
- HA_CAN_SQL_HANDLER |
- HA_NOT_EXACT_COUNT |
- HA_PRIMARY_KEY_IN_READ_INDEX |
- HA_TABLE_SCAN_ON_INDEX),
- last_dup_key((uint) -1),
- start_of_scan(0),
- num_write_row(0)
- {
- }
+ ha_innobase(TABLE *table_arg);
~ha_innobase() {}
/*
Get the row type from the storage engine. If this method returns
@@ -314,7 +302,7 @@ which is in the prepared state */
int innobase_rollback_by_xid(
/* out: 0 or error number */
- XID *xid); /* in : X/Open XA Transaction Idenfification */
+ XID *xid); /* in : X/Open XA Transaction Identification */
int innobase_xa_end(THD *thd);
@@ -322,3 +310,35 @@ int innobase_xa_end(THD *thd);
int innobase_repl_report_sent_binlog(THD *thd, char *log_file_name,
my_off_t end_offset);
+
+/***********************************************************************
+Create a consistent view for a cursor based on current transaction
+which is created if the corresponding MySQL thread still lacks one.
+This consistent view is then used inside of MySQL when accessing records
+using a cursor. */
+
+void*
+innobase_create_cursor_view(void);
+/*=============================*/
+ /* out: Pointer to cursor view or NULL */
+
+/***********************************************************************
+Close the given consistent cursor view of a transaction and restore
+global read view to a transaction read view. Transaction is created if the
+corresponding MySQL thread still lacks one. */
+
+void
+innobase_close_cursor_view(
+/*=======================*/
+ void* curview); /* in: Consistent read view to be closed */
+
+/***********************************************************************
+Set the given consistent cursor view to a transaction which is created
+if the corresponding MySQL thread still lacks one. If the given
+consistent cursor view is NULL global read view of a transaction is
+restored to a transaction read view. */
+
+void
+innobase_set_cursor_view(
+/*=====================*/
+ void* curview); /* in: Consistent read view to be closed */
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 0d9c32adbfa..8f3970d69e6 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -44,6 +44,32 @@ TYPELIB myisam_recover_typelib= {array_elements(myisam_recover_names)-1,"",
** MyISAM tables
*****************************************************************************/
+/* MyISAM handlerton */
+
+static handlerton myisam_hton= {
+ "MyISAM",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ /*
+ MyISAM doesn't support transactions and doesn't have
+ transaction-dependent context: cursors can survive a commit.
+ */
+ HTON_NO_FLAGS
+};
+
// collect errors printed by mi_check routines
static void mi_check_print_msg(MI_CHECK *param, const char* msg_type,
@@ -123,6 +149,17 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...)
}
+
+ha_myisam::ha_myisam(TABLE *table_arg)
+ :handler(&myisam_hton, table_arg), file(0),
+ int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
+ HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
+ HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
+ HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD),
+ can_enable_indexes(1)
+{}
+
+
static const char *ha_myisam_exts[] = {
".MYI",
".MYD",
@@ -602,7 +639,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
!(share->state.changed & STATE_NOT_OPTIMIZED_KEYS))))
{
ulonglong key_map= ((local_testflag & T_CREATE_MISSING_KEYS) ?
- ((ulonglong) 1L << share->base.keys)-1 :
+ mi_get_mask_all_keys_active(share->base.keys) :
share->state.key_map);
uint testflag=param.testflag;
if (mi_test_if_sort_rep(file,file->state->records,key_map,0) &&
@@ -903,7 +940,7 @@ int ha_myisam::enable_indexes(uint mode)
{
int error;
- if (file->s->state.key_map == set_bits(ulonglong, file->s->base.keys))
+ if (mi_is_all_keys_active(file->s->state.key_map, file->s->base.keys))
{
/* All indexes are enabled already. */
return 0;
@@ -1002,8 +1039,8 @@ void ha_myisam::start_bulk_insert(ha_rows rows)
if (! rows || (rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE))
mi_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &size);
- can_enable_indexes= (file->s->state.key_map ==
- set_bits(ulonglong, file->s->base.keys));
+ can_enable_indexes= mi_is_all_keys_active(file->s->state.key_map,
+ file->s->base.keys);
if (!(specialflag & SPECIAL_SAFE_MODE))
{
@@ -1256,7 +1293,7 @@ void ha_myisam::info(uint flag)
share->db_options_in_use= info.options;
block_size= myisam_block_size;
share->keys_in_use.set_prefix(share->keys);
- share->keys_in_use.intersect(info.key_map);
+ share->keys_in_use.intersect_extended(info.key_map);
share->keys_for_keyread.intersect(share->keys_in_use);
share->db_record_offset= info.record_offset;
if (share->key_parts)
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index bbd9721f8e2..ca684463311 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -43,13 +43,7 @@ class ha_myisam: public handler
int repair(THD *thd, MI_CHECK &param, bool optimize);
public:
- ha_myisam(TABLE *table): handler(table), file(0),
- int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
- HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
- HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
- HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD),
- can_enable_indexes(1)
- {}
+ ha_myisam(TABLE *table_arg);
~ha_myisam() {}
const char *table_type() const { return "MyISAM"; }
const char *index_type(uint key_number);
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index 5d3f379081c..f92717e11eb 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -32,6 +32,33 @@
** MyISAM MERGE tables
*****************************************************************************/
+/* MyISAM MERGE handlerton */
+
+static handlerton myisammrg_hton= {
+ "MRG_MyISAM",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
+};
+
+
+ha_myisammrg::ha_myisammrg(TABLE *table_arg)
+ :handler(&myisammrg_hton, table_arg), file(0)
+{}
+
static const char *ha_myisammrg_exts[] = {
".MRG",
NullS
diff --git a/sql/ha_myisammrg.h b/sql/ha_myisammrg.h
index 7348096b695..c762b7c286e 100644
--- a/sql/ha_myisammrg.h
+++ b/sql/ha_myisammrg.h
@@ -28,7 +28,7 @@ class ha_myisammrg: public handler
MYRG_INFO *file;
public:
- ha_myisammrg(TABLE *table): handler(table), file(0) {}
+ ha_myisammrg(TABLE *table_arg);
~ha_myisammrg() {}
const char *table_type() const { return "MRG_MyISAM"; }
const char **bas_ext() const;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 27019c94284..37a3864a217 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -62,7 +62,11 @@ static handlerton ndbcluster_hton = {
NULL, /* prepare */
NULL, /* recover */
NULL, /* commit_by_xid */
- NULL /* rollback_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
};
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
@@ -706,8 +710,8 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
blob_ptr= (char*)"";
}
- DBUG_PRINT("value", ("set blob ptr=%x len=%u",
- (unsigned)blob_ptr, blob_len));
+ DBUG_PRINT("value", ("set blob ptr=%p len=%u",
+ blob_ptr, blob_len));
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
if (set_blob_value)
@@ -4174,7 +4178,7 @@ ulonglong ha_ndbcluster::get_auto_increment()
*/
ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
- handler(table_arg),
+ handler(&ndbcluster_hton, table_arg),
m_active_trans(NULL),
m_active_cursor(NULL),
m_table(NULL),
@@ -5445,7 +5449,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
Uint64 sum_commits= 0;
Uint64 sum_row_size= 0;
Uint64 sum_mem= 0;
- while((check= pOp->nextResult(TRUE, TRUE)) == 0)
+ while ((check= pOp->nextResult(TRUE, TRUE)) == 0)
{
sum_rows+= rows;
sum_commits+= commits;
@@ -5473,7 +5477,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
sum_mem, count));
DBUG_RETURN(0);
- } while(0);
+ } while (0);
if (pTrans)
ndb->closeTransaction(pTrans);
@@ -6349,12 +6353,14 @@ void ndb_serialize_cond(const Item *item, void *arg)
// result type
if (context->expecting(Item::FIELD_ITEM) &&
(context->expecting_field_result(field->result_type()) ||
- // Date and year can be written as strings
+ // Date and year can be written as string or int
((type == MYSQL_TYPE_TIME ||
type == MYSQL_TYPE_DATE ||
type == MYSQL_TYPE_YEAR ||
type == MYSQL_TYPE_DATETIME)
- ? context->expecting_field_result(STRING_RESULT) : true)) &&
+ ? (context->expecting_field_result(STRING_RESULT) ||
+ context->expecting_field_result(INT_RESULT))
+ : true)) &&
// Bit fields no yet supported in scan filter
type != MYSQL_TYPE_BIT)
{
@@ -6422,8 +6428,8 @@ void ndb_serialize_cond(const Item *item, void *arg)
}
else
{
- DBUG_PRINT("info", ("Was not expecting field of type %u",
- field->result_type()));
+ DBUG_PRINT("info", ("Was not expecting field of type %u(%u)",
+ field->result_type(), type));
context->supported= FALSE;
}
}
@@ -6564,17 +6570,6 @@ void ndb_serialize_cond(const Item *item, void *arg)
context->expect(Item::FUNC_ITEM);
break;
}
- case Item_func::NOTLIKE_FUNC:
- {
- DBUG_PRINT("info", ("NOTLIKE_FUNC"));
- curr_cond->ndb_item= new Ndb_item(func_item->functype(),
- func_item);
- context->expect(Item::STRING_ITEM);
- context->expect(Item::FIELD_ITEM);
- context->expect_field_result(STRING_RESULT);
- context->expect(Item::FUNC_ITEM);
- break;
- }
case Item_func::ISNULL_FUNC:
{
DBUG_PRINT("info", ("ISNULL_FUNC"));
@@ -7017,7 +7012,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
switch ((negated) ?
Ndb_item::negate(cond->ndb_item->qualification.function_type)
: cond->ndb_item->qualification.function_type) {
- case Item_func::EQ_FUNC:
+ case NDB_EQ_FUNC:
{
if (!value || !field) break;
// Save value in right format for the field type
@@ -7031,7 +7026,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
cond= cond->next->next->next;
DBUG_RETURN(0);
}
- case Item_func::NE_FUNC:
+ case NDB_NE_FUNC:
{
if (!value || !field) break;
// Save value in right format for the field type
@@ -7045,7 +7040,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
cond= cond->next->next->next;
DBUG_RETURN(0);
}
- case Item_func::LT_FUNC:
+ case NDB_LT_FUNC:
{
if (!value || !field) break;
// Save value in right format for the field type
@@ -7071,7 +7066,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
cond= cond->next->next->next;
DBUG_RETURN(0);
}
- case Item_func::LE_FUNC:
+ case NDB_LE_FUNC:
{
if (!value || !field) break;
// Save value in right format for the field type
@@ -7097,7 +7092,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
cond= cond->next->next->next;
DBUG_RETURN(0);
}
- case Item_func::GE_FUNC:
+ case NDB_GE_FUNC:
{
if (!value || !field) break;
// Save value in right format for the field type
@@ -7123,7 +7118,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
cond= cond->next->next->next;
DBUG_RETURN(0);
}
- case Item_func::GT_FUNC:
+ case NDB_GT_FUNC:
{
if (!value || !field) break;
// Save value in right format for the field type
@@ -7149,7 +7144,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
cond= cond->next->next->next;
DBUG_RETURN(0);
}
- case Item_func::LIKE_FUNC:
+ case NDB_LIKE_FUNC:
{
if (!value || !field) break;
if ((value->qualification.value_type != Item::STRING_ITEM) &&
@@ -7168,12 +7163,12 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
cond= cond->next->next->next;
DBUG_RETURN(0);
}
- case Item_func::NOTLIKE_FUNC:
+ case NDB_NOTLIKE_FUNC:
{
if (!value || !field) break;
if ((value->qualification.value_type != Item::STRING_ITEM) &&
(value->qualification.value_type != Item::VARBIN_ITEM))
- break;
+ break;
// Save value in right format for the field type
value->save_in_field(field);
DBUG_PRINT("info", ("Generating NOTLIKE filter: notlike(%d,%s,%d)",
@@ -7187,7 +7182,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
cond= cond->next->next->next;
DBUG_RETURN(0);
}
- case Item_func::ISNULL_FUNC:
+ case NDB_ISNULL_FUNC:
if (!field)
break;
DBUG_PRINT("info", ("Generating ISNULL filter"));
@@ -7195,7 +7190,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
DBUG_RETURN(1);
cond= cond->next->next;
DBUG_RETURN(0);
- case Item_func::ISNOTNULL_FUNC:
+ case NDB_ISNOTNULL_FUNC:
{
if (!field)
break;
@@ -7232,7 +7227,7 @@ ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter)
case NDB_FUNCTION:
{
switch (cond->ndb_item->qualification.function_type) {
- case Item_func::COND_AND_FUNC:
+ case NDB_COND_AND_FUNC:
{
level++;
DBUG_PRINT("info", ("Generating %s group %u", (negated)?"NAND":"AND",
@@ -7244,7 +7239,7 @@ ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter)
cond= cond->next;
break;
}
- case Item_func::COND_OR_FUNC:
+ case NDB_COND_OR_FUNC:
{
level++;
DBUG_PRINT("info", ("Generating %s group %u", (negated)?"NOR":"OR",
@@ -7256,7 +7251,7 @@ ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter)
cond= cond->next;
break;
}
- case Item_func::NOT_FUNC:
+ case NDB_NOT_FUNC:
{
DBUG_PRINT("info", ("Generating negated query"));
cond= cond->next;
@@ -7299,8 +7294,8 @@ ha_ndbcluster::build_scan_filter(Ndb_cond * &cond, NdbScanFilter *filter)
switch (cond->ndb_item->type) {
case NDB_FUNCTION:
switch (cond->ndb_item->qualification.function_type) {
- case Item_func::COND_AND_FUNC:
- case Item_func::COND_OR_FUNC:
+ case NDB_COND_AND_FUNC:
+ case NDB_COND_OR_FUNC:
simple_cond= FALSE;
break;
default:
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index b34f8dd063c..034bb9292e8 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -72,10 +72,28 @@ typedef enum ndb_item_type {
NDB_END_COND = 3 // End marker for condition group
} NDB_ITEM_TYPE;
+typedef enum ndb_func_type {
+ NDB_EQ_FUNC = 0,
+ NDB_NE_FUNC = 1,
+ NDB_LT_FUNC = 2,
+ NDB_LE_FUNC = 3,
+ NDB_GT_FUNC = 4,
+ NDB_GE_FUNC = 5,
+ NDB_ISNULL_FUNC = 6,
+ NDB_ISNOTNULL_FUNC = 7,
+ NDB_LIKE_FUNC = 8,
+ NDB_NOTLIKE_FUNC = 9,
+ NDB_NOT_FUNC = 10,
+ NDB_UNKNOWN_FUNC = 11,
+ NDB_COND_AND_FUNC = 12,
+ NDB_COND_OR_FUNC = 13,
+ NDB_UNSUPPORTED_FUNC = 14
+} NDB_FUNC_TYPE;
+
typedef union ndb_item_qualification {
Item::Type value_type;
- enum_field_types field_type; // Instead of Item::FIELD_ITEM
- Item_func::Functype function_type; // Instead of Item::FUNC_ITEM
+ enum_field_types field_type; // Instead of Item::FIELD_ITEM
+ NDB_FUNC_TYPE function_type; // Instead of Item::FUNC_ITEM
} NDB_ITEM_QUALIFICATION;
typedef struct ndb_item_field_value {
@@ -91,23 +109,31 @@ typedef union ndb_item_value {
struct negated_function_mapping
{
- Item_func::Functype pos_fun;
- Item_func::Functype neg_fun;
+ NDB_FUNC_TYPE pos_fun;
+ NDB_FUNC_TYPE neg_fun;
};
+/*
+ Define what functions can be negated in condition pushdown.
+ Note, these HAVE to be in the same order as in definition enum
+*/
static const negated_function_mapping neg_map[]=
{
- {Item_func::EQ_FUNC, Item_func::NE_FUNC},
- {Item_func::NE_FUNC, Item_func::EQ_FUNC},
- {Item_func::LT_FUNC, Item_func::GE_FUNC},
- {Item_func::LE_FUNC, Item_func::GT_FUNC},
- {Item_func::GT_FUNC, Item_func::LE_FUNC},
- {Item_func::GE_FUNC, Item_func::LT_FUNC},
- {Item_func::LIKE_FUNC, Item_func::NOTLIKE_FUNC},
- {Item_func::NOTLIKE_FUNC, Item_func::LIKE_FUNC},
- {Item_func::ISNULL_FUNC, Item_func::ISNOTNULL_FUNC},
- {Item_func::ISNOTNULL_FUNC, Item_func::ISNULL_FUNC},
- {Item_func::UNKNOWN_FUNC, Item_func::NOT_FUNC}
+ {NDB_EQ_FUNC, NDB_NE_FUNC},
+ {NDB_NE_FUNC, NDB_EQ_FUNC},
+ {NDB_LT_FUNC, NDB_GE_FUNC},
+ {NDB_LE_FUNC, NDB_GT_FUNC},
+ {NDB_GT_FUNC, NDB_LE_FUNC},
+ {NDB_GE_FUNC, NDB_LT_FUNC},
+ {NDB_ISNULL_FUNC, NDB_ISNOTNULL_FUNC},
+ {NDB_ISNOTNULL_FUNC, NDB_ISNULL_FUNC},
+ {NDB_LIKE_FUNC, NDB_NOTLIKE_FUNC},
+ {NDB_NOTLIKE_FUNC, NDB_LIKE_FUNC},
+ {NDB_NOT_FUNC, NDB_UNSUPPORTED_FUNC},
+ {NDB_UNKNOWN_FUNC, NDB_UNSUPPORTED_FUNC},
+ {NDB_COND_AND_FUNC, NDB_UNSUPPORTED_FUNC},
+ {NDB_COND_OR_FUNC, NDB_UNSUPPORTED_FUNC},
+ {NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC}
};
/*
@@ -162,14 +188,14 @@ class Ndb_item {
Ndb_item(Item_func::Functype func_type, const Item *item_value)
: type(NDB_FUNCTION)
{
- qualification.function_type= func_type;
+ qualification.function_type= item_func_to_ndb_func(func_type);
value.item= item_value;
value.arg_count= ((Item_func *) item_value)->argument_count();
};
Ndb_item(Item_func::Functype func_type, uint no_args)
: type(NDB_FUNCTION)
{
- qualification.function_type= func_type;
+ qualification.function_type= item_func_to_ndb_func(func_type);
value.arg_count= no_args;
};
~Ndb_item()
@@ -231,13 +257,30 @@ class Ndb_item {
((Item *)item)->save_in_field(field, false);
};
- static Item_func::Functype negate(Item_func::Functype fun)
+ static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun)
+ {
+ switch (fun) {
+ case (Item_func::EQ_FUNC): { return NDB_EQ_FUNC; }
+ case (Item_func::NE_FUNC): { return NDB_NE_FUNC; }
+ case (Item_func::LT_FUNC): { return NDB_LT_FUNC; }
+ case (Item_func::LE_FUNC): { return NDB_LE_FUNC; }
+ case (Item_func::GT_FUNC): { return NDB_GT_FUNC; }
+ case (Item_func::GE_FUNC): { return NDB_GE_FUNC; }
+ case (Item_func::ISNULL_FUNC): { return NDB_ISNULL_FUNC; }
+ case (Item_func::ISNOTNULL_FUNC): { return NDB_ISNOTNULL_FUNC; }
+ case (Item_func::LIKE_FUNC): { return NDB_LIKE_FUNC; }
+ case (Item_func::NOT_FUNC): { return NDB_NOT_FUNC; }
+ case (Item_func::UNKNOWN_FUNC): { return NDB_UNKNOWN_FUNC; }
+ case (Item_func::COND_AND_FUNC): { return NDB_COND_AND_FUNC; }
+ case (Item_func::COND_OR_FUNC): { return NDB_COND_OR_FUNC; }
+ default: { return NDB_UNSUPPORTED_FUNC; }
+ }
+ };
+
+ static NDB_FUNC_TYPE negate(NDB_FUNC_TYPE fun)
{
- uint i;
- for (i=0;
- fun != neg_map[i].pos_fun &&
- neg_map[i].pos_fun != Item_func::UNKNOWN_FUNC;
- i++);
+ uint i= (uint) fun;
+ DBUG_ASSERT(fun == neg_map[i].pos_fun);
return neg_map[i].neg_fun;
};
diff --git a/sql/handler.cc b/sql/handler.cc
index a61dce35501..1f13f0d5e36 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -208,15 +208,8 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
case DB_TYPE_HASH:
return new ha_hash(table);
#endif
-#ifdef HAVE_ISAM
- case DB_TYPE_MRG_ISAM:
- return new ha_isammrg(table);
- case DB_TYPE_ISAM:
- return new ha_isam(table);
-#else
case DB_TYPE_MRG_ISAM:
return new ha_myisammrg(table);
-#endif
#ifdef HAVE_BERKELEY_DB
case DB_TYPE_BERKELEY_DB:
return new ha_berkeley(table);
@@ -605,7 +598,7 @@ int ha_commit_trans(THD *thd, bool all)
my_xid xid= thd->transaction.xid.get_my_xid();
DBUG_ENTER("ha_commit_trans");
- if (thd->transaction.in_sub_stmt)
+ if (thd->in_sub_stmt)
{
/*
Since we don't support nested statement transactions in 5.0,
@@ -634,6 +627,11 @@ int ha_commit_trans(THD *thd, bool all)
DBUG_RETURN(1);
}
DBUG_EXECUTE_IF("crash_commit_before", abort(););
+
+ /* Close all cursors that can not survive COMMIT */
+ if (is_real_trans) /* not a statement commit */
+ thd->stmt_map.close_transient_cursors();
+
if (!trans->no_2pc && trans->nht > 1)
{
for (; *ht && !error; ht++)
@@ -719,7 +717,7 @@ int ha_rollback_trans(THD *thd, bool all)
THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt;
bool is_real_trans=all || thd->transaction.all.nht == 0;
DBUG_ENTER("ha_rollback_trans");
- if (thd->transaction.in_sub_stmt)
+ if (thd->in_sub_stmt)
{
/*
If we are inside stored function or trigger we should not commit or
@@ -735,6 +733,10 @@ int ha_rollback_trans(THD *thd, bool all)
#ifdef USING_TRANSACTIONS
if (trans->nht)
{
+ /* Close all cursors that can not survive ROLLBACK */
+ if (is_real_trans) /* not a statement commit */
+ thd->stmt_map.close_transient_cursors();
+
for (handlerton **ht=trans->ht; *ht; ht++)
{
int err;
@@ -2099,6 +2101,8 @@ int ha_discover(THD *thd, const char *db, const char *name,
int error= -1; // Table does not exist in any handler
DBUG_ENTER("ha_discover");
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
+ if (is_prefix(name,tmp_file_prefix)) /* skip temporary tables */
+ DBUG_RETURN(error);
#ifdef HAVE_NDBCLUSTER_DB
if (have_ndbcluster == SHOW_OPTION_YES)
error= ndbcluster_discover(thd, db, name, frmblob, frmlen);
@@ -2434,6 +2438,7 @@ TYPELIB *ha_known_exts(void)
known_extensions_id= mysys_usage_id;
found_exts.push_back((char*) triggers_file_ext);
+ found_exts.push_back((char*) trigname_file_ext);
for (types= sys_table_types; types->type; types++)
{
if (*types->value == SHOW_OPTION_YES)
diff --git a/sql/handler.h b/sql/handler.h
index df906e284e7..276f50bde63 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -106,9 +106,11 @@
/*
Note: the following includes binlog and closing 0.
- so: innodb+bdb+ndb+binlog+0
+ so: innodb + bdb + ndb + binlog + myisam + myisammrg + archive +
+ example + csv + heap + blackhole + federated + 0
+ (yes, the sum is deliberately inaccurate)
*/
-#define MAX_HA 6
+#define MAX_HA 14
/*
Bits in index_ddl_flags(KEY *wanted_index)
@@ -349,8 +351,16 @@ typedef struct
int (*recover)(XID *xid_list, uint len);
int (*commit_by_xid)(XID *xid);
int (*rollback_by_xid)(XID *xid);
+ void *(*create_cursor_read_view)();
+ void (*set_cursor_read_view)(void *);
+ void (*close_cursor_read_view)(void *);
+ uint32 flags; /* global handler flags */
} handlerton;
+/* Possible flags of a handlerton */
+#define HTON_NO_FLAGS 0
+#define HTON_CLOSE_CURSORS_AT_COMMIT 1
+
typedef struct st_thd_trans
{
/* number of entries in the ht[] */
@@ -445,6 +455,7 @@ class handler :public Sql_alloc
virtual int rnd_end() { return 0; }
public:
+ const handlerton *ht; /* storage engine of this handler */
byte *ref; /* Pointer to current row */
byte *dupp_ref; /* Pointer to dupp row */
ulonglong data_file_length; /* Length off data file */
@@ -486,7 +497,8 @@ public:
bool implicit_emptied; /* Can be !=0 only if HEAP */
const COND *pushed_cond;
- handler(TABLE *table_arg) :table(table_arg),
+ handler(const handlerton *ht_arg, TABLE *table_arg) :table(table_arg),
+ ht(ht_arg),
ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0),
delete_length(0), auto_increment_value(0),
records(0), deleted(0), mean_rec_length(0),
diff --git a/sql/hostname.cc b/sql/hostname.cc
index 39223556024..3b1eeb63d37 100644
--- a/sql/hostname.cc
+++ b/sql/hostname.cc
@@ -130,15 +130,23 @@ void reset_host_errors(struct in_addr *in)
VOID(pthread_mutex_unlock(&hostname_cache->lock));
}
+/* Deal with systems that don't defined INADDR_LOOPBACK */
+#ifndef INADDR_LOOPBACK
+#define INADDR_LOOPBACK 0x7f000001UL
+#endif
my_string ip_to_hostname(struct in_addr *in, uint *errors)
{
uint i;
host_entry *entry;
DBUG_ENTER("ip_to_hostname");
+ *errors=0;
+
+ /* We always treat the loopback address as "localhost". */
+ if (in->s_addr == htonl(INADDR_LOOPBACK)) // is expanded inline by gcc
+ DBUG_RETURN((char *)my_localhost);
/* Check first if we have name in cache */
- *errors=0;
if (!(specialflag & SPECIAL_NO_HOST_CACHE))
{
VOID(pthread_mutex_lock(&hostname_cache->lock));
diff --git a/sql/item.cc b/sql/item.cc
index 0a6c87e598c..ae4cbadc01e 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -638,6 +638,38 @@ Item *Item_num::safe_charset_converter(CHARSET_INFO *tocs)
}
+Item *Item_static_int_func::safe_charset_converter(CHARSET_INFO *tocs)
+{
+ Item_string *conv;
+ char buf[64];
+ String *s, tmp(buf, sizeof(buf), &my_charset_bin);
+ s= val_str(&tmp);
+ if ((conv= new Item_static_string_func(func_name, s->ptr(), s->length(),
+ s->charset())))
+ {
+ conv->str_value.copy();
+ conv->str_value.mark_as_const();
+ }
+ return conv;
+}
+
+
+Item *Item_static_float_func::safe_charset_converter(CHARSET_INFO *tocs)
+{
+ Item_string *conv;
+ char buf[64];
+ String *s, tmp(buf, sizeof(buf), &my_charset_bin);
+ s= val_str(&tmp);
+ if ((conv= new Item_static_string_func(func_name, s->ptr(), s->length(),
+ s->charset())))
+ {
+ conv->str_value.copy();
+ conv->str_value.mark_as_const();
+ }
+ return conv;
+}
+
+
Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs)
{
Item_string *conv;
@@ -663,6 +695,33 @@ Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs)
}
+Item *Item_static_string_func::safe_charset_converter(CHARSET_INFO *tocs)
+{
+ Item_string *conv;
+ uint conv_errors;
+ String tmp, cstr, *ostr= val_str(&tmp);
+ cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
+ if (conv_errors ||
+ !(conv= new Item_static_string_func(func_name,
+ cstr.ptr(), cstr.length(),
+ cstr.charset(),
+ collation.derivation)))
+ {
+ /*
+ Safe conversion is not possible (or EOM).
+ We could not convert a string into the requested character set
+ without data loss. The target charset does not cover all the
+ characters from the string. Operation cannot be done correctly.
+ */
+ return NULL;
+ }
+ conv->str_value.copy();
+ /* Ensure that no one is going to change the result string */
+ conv->str_value.mark_as_const();
+ return conv;
+}
+
+
bool Item_string::eq(const Item *item, bool binary_cmp) const
{
if (type() == item->type() && item->basic_const_item())
@@ -957,14 +1016,18 @@ bool DTCollation::aggregate(DTCollation &dt, uint flags)
; // Do nothing
}
else if ((flags & MY_COLL_ALLOW_SUPERSET_CONV) &&
- derivation < dt.derivation &&
- collation->state & MY_CS_UNICODE)
+ collation->state & MY_CS_UNICODE &&
+ (derivation < dt.derivation ||
+ (derivation == dt.derivation &&
+ !(dt.collation->state & MY_CS_UNICODE))))
{
// Do nothing
}
else if ((flags & MY_COLL_ALLOW_SUPERSET_CONV) &&
- dt.derivation < derivation &&
- dt.collation->state & MY_CS_UNICODE)
+ dt.collation->state & MY_CS_UNICODE &&
+ (dt.derivation < derivation ||
+ (dt.derivation == derivation &&
+ !(collation->state & MY_CS_UNICODE))))
{
set(dt);
}
@@ -1023,6 +1086,176 @@ bool DTCollation::aggregate(DTCollation &dt, uint flags)
return 0;
}
+/******************************/
+static
+void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname)
+{
+ my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0),
+ c1.collation->name,c1.derivation_name(),
+ c2.collation->name,c2.derivation_name(),
+ fname);
+}
+
+
+static
+void my_coll_agg_error(DTCollation &c1, DTCollation &c2, DTCollation &c3,
+ const char *fname)
+{
+ my_error(ER_CANT_AGGREGATE_3COLLATIONS,MYF(0),
+ c1.collation->name,c1.derivation_name(),
+ c2.collation->name,c2.derivation_name(),
+ c3.collation->name,c3.derivation_name(),
+ fname);
+}
+
+
+static
+void my_coll_agg_error(Item** args, uint count, const char *fname)
+{
+ if (count == 2)
+ my_coll_agg_error(args[0]->collation, args[1]->collation, fname);
+ else if (count == 3)
+ my_coll_agg_error(args[0]->collation, args[1]->collation,
+ args[2]->collation, fname);
+ else
+ my_error(ER_CANT_AGGREGATE_NCOLLATIONS,MYF(0),fname);
+}
+
+
+bool agg_item_collations(DTCollation &c, const char *fname,
+ Item **av, uint count, uint flags)
+{
+ uint i;
+ c.set(av[0]->collation);
+ for (i= 1; i < count; i++)
+ {
+ if (c.aggregate(av[i]->collation, flags))
+ {
+ my_coll_agg_error(av, count, fname);
+ return TRUE;
+ }
+ }
+ if ((flags & MY_COLL_DISALLOW_NONE) &&
+ c.derivation == DERIVATION_NONE)
+ {
+ my_coll_agg_error(av, count, fname);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+bool agg_item_collations_for_comparison(DTCollation &c, const char *fname,
+ Item **av, uint count, uint flags)
+{
+ return (agg_item_collations(c, fname, av, count,
+ flags | MY_COLL_DISALLOW_NONE));
+}
+
+
+/*
+ Collect arguments' character sets together.
+ We allow to apply automatic character set conversion in some cases.
+ The conditions when conversion is possible are:
+ - arguments A and B have different charsets
+ - A wins according to coercibility rules
+ (i.e. a column is stronger than a string constant,
+ an explicit COLLATE clause is stronger than a column)
+ - character set of A is either superset for character set of B,
+ or B is a string constant which can be converted into the
+ character set of A without data loss.
+
+ If all of the above is true, then it's possible to convert
+ B into the character set of A, and then compare according
+ to the collation of A.
+
+ For functions with more than two arguments:
+
+ collect(A,B,C) ::= collect(collect(A,B),C)
+*/
+
+bool agg_item_charsets(DTCollation &coll, const char *fname,
+ Item **args, uint nargs, uint flags)
+{
+ Item **arg, **last, *safe_args[2];
+ if (agg_item_collations(coll, fname, args, nargs, flags))
+ return TRUE;
+
+ /*
+ For better error reporting: save the first and the second argument.
+ We need this only if the the number of args is 3 or 2:
+ - for a longer argument list, "Illegal mix of collations"
+ doesn't display each argument's characteristics.
+ - if nargs is 1, then this error cannot happen.
+ */
+ if (nargs >=2 && nargs <= 3)
+ {
+ safe_args[0]= args[0];
+ safe_args[1]= args[1];
+ }
+
+ THD *thd= current_thd;
+ Query_arena *arena, backup;
+ bool res= FALSE;
+ /*
+ In case we're in statement prepare, create conversion item
+ in its memory: it will be reused on each execute.
+ */
+ arena= thd->change_arena_if_needed(&backup);
+
+ for (arg= args, last= args + nargs; arg < last; arg++)
+ {
+ Item* conv;
+ uint32 dummy_offset;
+ if (!String::needs_conversion(0, coll.collation,
+ (*arg)->collation.collation,
+ &dummy_offset))
+ continue;
+
+ if (!(conv= (*arg)->safe_charset_converter(coll.collation)))
+ {
+ if (nargs >=2 && nargs <= 3)
+ {
+ /* restore the original arguments for better error message */
+ args[0]= safe_args[0];
+ args[1]= safe_args[1];
+ }
+ my_coll_agg_error(args, nargs, fname);
+ res= TRUE;
+ break; // we cannot return here, we need to restore "arena".
+ }
+ if ((*arg)->type() == Item::FIELD_ITEM)
+ ((Item_field *)(*arg))->no_const_subst= 1;
+ /*
+ If in statement prepare, then we create a converter for two
+ constant items, do it once and then reuse it.
+ If we're in execution of a prepared statement, arena is NULL,
+ and the conv was created in runtime memory. This can be
+ the case only if the argument is a parameter marker ('?'),
+ because for all true constants the charset converter has already
+ been created in prepare. In this case register the change for
+ rollback.
+ */
+ if (arena)
+ *arg= conv;
+ else
+ thd->change_item_tree(arg, conv);
+ /*
+ We do not check conv->fixed, because Item_func_conv_charset which can
+ be return by safe_charset_converter can't be fixed at creation
+ */
+ conv->fix_fields(thd, arg);
+ }
+ if (arena)
+ thd->restore_backup_item_arena(arena, &backup);
+ return res;
+}
+
+
+
+
+/**********************************************/
+
Item_field::Item_field(Field *f)
:Item_ident(0, NullS, *f->table_name, f->field_name),
item_equal(0), no_const_subst(0),
@@ -1959,6 +2192,7 @@ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry)
void Item_param::reset()
{
+ DBUG_ENTER("Item_param::reset");
/* Shrink string buffer if it's bigger than max possible CHAR column */
if (str_value.alloced_length() > MAX_CHAR_WIDTH)
str_value.free();
@@ -1983,6 +2217,7 @@ void Item_param::reset()
DBUG_ASSERTS(state != NO_VALUE) in all Item_param::get_*
methods).
*/
+ DBUG_VOID_RETURN;
}
@@ -3264,7 +3499,12 @@ void Item::make_field(Send_field *tmp_field)
void Item_empty_string::make_field(Send_field *tmp_field)
{
- init_make_field(tmp_field, MYSQL_TYPE_VARCHAR);
+ enum_field_types type= FIELD_TYPE_VAR_STRING;
+ if (max_length >= 16777216)
+ type= FIELD_TYPE_LONG_BLOB;
+ else if (max_length >= 65536)
+ type= FIELD_TYPE_MEDIUM_BLOB;
+ init_make_field(tmp_field, type);
}
diff --git a/sql/item.h b/sql/item.h
index a10934e40fb..f0ffb160553 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -767,6 +767,15 @@ public:
};
+bool agg_item_collations(DTCollation &c, const char *name,
+ Item **items, uint nitems, uint flags= 0);
+bool agg_item_collations_for_comparison(DTCollation &c, const char *name,
+ Item **items, uint nitems,
+ uint flags= 0);
+bool agg_item_charsets(DTCollation &c, const char *name,
+ Item **items, uint nitems, uint flags= 0);
+
+
class Item_num: public Item
{
public:
@@ -1132,6 +1141,7 @@ public:
Item_static_int_func(const char *str_arg, longlong i, uint length)
:Item_int(NullS, i, length), func_name(str_arg)
{}
+ Item *safe_charset_converter(CHARSET_INFO *tocs);
void print(String *str) { str->append(func_name); }
};
@@ -1140,7 +1150,7 @@ class Item_uint :public Item_int
{
public:
Item_uint(const char *str_arg, uint length);
- Item_uint(uint32 i) :Item_int((ulonglong) i, 10) {}
+ Item_uint(ulonglong i) :Item_int((ulonglong) i, 10) {}
Item_uint(const char *str_arg, longlong i, uint length);
double val_real()
{ DBUG_ASSERT(fixed == 1); return ulonglong2double((ulonglong)value); }
@@ -1242,6 +1252,7 @@ public:
:Item_float(NullS, val_arg, decimal_par, length), func_name(str)
{}
void print(String *str) { str->append(func_name); }
+ Item *safe_charset_converter(CHARSET_INFO *tocs);
};
@@ -1314,6 +1325,7 @@ public:
Derivation dv= DERIVATION_COERCIBLE)
:Item_string(NullS, str, length, cs, dv), func_name(name_par)
{}
+ Item *safe_charset_converter(CHARSET_INFO *tocs);
void print(String *str) { str->append(func_name); }
};
@@ -1461,7 +1473,13 @@ public:
void save_org_in_field(Field *field) { (*ref)->save_org_in_field(field); }
enum Item_result result_type () const { return (*ref)->result_type(); }
enum_field_types field_type() const { return (*ref)->field_type(); }
- Field *get_tmp_table_field() { return result_field; }
+ Field *get_tmp_table_field()
+ { return result_field ? result_field : (*ref)->get_tmp_table_field(); }
+ Item *get_tmp_table_item(THD *thd)
+ {
+ return (result_field ? new Item_field(result_field) :
+ (*ref)->get_tmp_table_item(thd));
+ }
table_map used_tables() const
{
return depended_from ? OUTER_REF_TABLE_BIT : (*ref)->used_tables();
@@ -1699,7 +1717,7 @@ class Cached_item_field :public Cached_item
public:
Cached_item_field(Item_field *item)
{
- field=item->field;
+ field= item->field;
buff= (char*) sql_calloc(length=field->pack_length());
}
bool cmp(void);
@@ -1778,7 +1796,7 @@ public:
*/
enum trg_action_time_type
{
- TRG_ACTION_BEFORE= 0, TRG_ACTION_AFTER= 1
+ TRG_ACTION_BEFORE= 0, TRG_ACTION_AFTER= 1, TRG_ACTION_MAX
};
/*
@@ -1786,7 +1804,7 @@ enum trg_action_time_type
*/
enum trg_event_type
{
- TRG_EVENT_INSERT= 0 , TRG_EVENT_UPDATE= 1, TRG_EVENT_DELETE= 2
+ TRG_EVENT_INSERT= 0 , TRG_EVENT_UPDATE= 1, TRG_EVENT_DELETE= 2, TRG_EVENT_MAX
};
class Table_triggers_list;
diff --git a/sql/item_buff.cc b/sql/item_buff.cc
index a67e420170a..9db2f465080 100644
--- a/sql/item_buff.cc
+++ b/sql/item_buff.cc
@@ -25,9 +25,9 @@
Cached_item *new_Cached_item(THD *thd, Item *item)
{
- if (item->type() == Item::FIELD_ITEM &&
- !(((Item_field *) item)->field->flags & BLOB_FLAG))
- return new Cached_item_field((Item_field *) item);
+ if (item->real_item()->type() == Item::FIELD_ITEM &&
+ !(((Item_field *) (item->real_item()))->field->flags & BLOB_FLAG))
+ return new Cached_item_field((Item_field *) (item->real_item()));
switch (item->result_type()) {
case STRING_RESULT:
return new Cached_item_str(thd, (Item_field *) item);
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 5ed857319be..d430d0d3c23 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -237,32 +237,35 @@ void Item_bool_func2::fix_length_and_dec()
set_cmp_func();
return;
}
-
- Item *real_item= args[0]->real_item();
- if (real_item->type() == FIELD_ITEM)
+
+ if (!thd->is_context_analysis_only())
{
- Field *field= ((Item_field*) real_item)->field;
- if (field->can_be_compared_as_longlong())
+ Item *real_item= args[0]->real_item();
+ if (real_item->type() == FIELD_ITEM)
{
- if (convert_constant_item(thd, field,&args[1]))
+ Field *field=((Item_field*) real_item)->field;
+ if (field->can_be_compared_as_longlong())
{
- cmp.set_cmp_func(this, tmp_arg, tmp_arg+1,
- INT_RESULT); // Works for all types.
- return;
+ if (convert_constant_item(thd, field,&args[1]))
+ {
+ cmp.set_cmp_func(this, tmp_arg, tmp_arg+1,
+ INT_RESULT); // Works for all types.
+ return;
+ }
}
}
- }
- real_item= args[1]->real_item();
- if (real_item->type() == FIELD_ITEM)
- {
- Field *field= ((Item_field*) real_item)->field;
- if (field->can_be_compared_as_longlong())
+ real_item= args[1]->real_item();
+ if (real_item->type() == FIELD_ITEM /* && !real_item->const_item() */)
{
- if (convert_constant_item(thd, field,&args[0]))
+ Field *field=((Item_field*) real_item)->field;
+ if (field->can_be_compared_as_longlong())
{
- cmp.set_cmp_func(this, tmp_arg, tmp_arg+1,
- INT_RESULT); // Works for all types.
- return;
+ if (convert_constant_item(thd, field,&args[0]))
+ {
+ cmp.set_cmp_func(this, tmp_arg, tmp_arg+1,
+ INT_RESULT); // Works for all types.
+ return;
+ }
}
}
}
@@ -990,7 +993,8 @@ void Item_func_between::fix_length_and_dec()
if (args[0]->type() == FIELD_ITEM)
{
Field *field=((Item_field*) args[0])->field;
- if (field->can_be_compared_as_longlong())
+ if (!thd->is_context_analysis_only() &&
+ field->can_be_compared_as_longlong())
{
/*
The following can't be recoded with || as convert_constant_item
@@ -2180,7 +2184,13 @@ void Item_func_in::fix_length_and_dec()
return;
for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++)
- const_itm&= arg[0]->const_item();
+ {
+ if (!arg[0]->const_item())
+ {
+ const_itm= 0;
+ break;
+ }
+ }
/*
Row item with NULLs inside can return NULL or FALSE =>
diff --git a/sql/item_create.cc b/sql/item_create.cc
index b9073a6c0b3..b7d8d50f9b3 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -299,16 +299,8 @@ Item *create_func_pow(Item* a, Item *b)
Item *create_func_current_user()
{
- THD *thd=current_thd;
- char buff[HOSTNAME_LENGTH+USERNAME_LENGTH+2];
- uint length;
-
- thd->lex->safe_to_cache_query= 0;
- length= (uint) (strxmov(buff, thd->priv_user, "@", thd->priv_host, NullS) -
- buff);
- return new Item_static_string_func("current_user()",
- thd->memdup(buff, length), length,
- system_charset_info);
+ current_thd->lex->safe_to_cache_query= 0;
+ return new Item_func_user(TRUE);
}
Item *create_func_radians(Item *a)
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 296083b256e..71e0f29ffc7 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -42,73 +42,6 @@ bool check_reserved_words(LEX_STRING *name)
}
-static void my_coll_agg_error(DTCollation &c1, DTCollation &c2,
- const char *fname)
-{
- my_error(ER_CANT_AGGREGATE_2COLLATIONS, MYF(0),
- c1.collation->name, c1.derivation_name(),
- c2.collation->name, c2.derivation_name(),
- fname);
-}
-
-static void my_coll_agg_error(DTCollation &c1,
- DTCollation &c2,
- DTCollation &c3,
- const char *fname)
-{
- my_error(ER_CANT_AGGREGATE_3COLLATIONS, MYF(0),
- c1.collation->name, c1.derivation_name(),
- c2.collation->name, c2.derivation_name(),
- c3.collation->name, c3.derivation_name(),
- fname);
-}
-
-
-static void my_coll_agg_error(Item** args, uint count, const char *fname)
-{
- if (count == 2)
- my_coll_agg_error(args[0]->collation, args[1]->collation, fname);
- else if (count == 3)
- my_coll_agg_error(args[0]->collation,
- args[1]->collation,
- args[2]->collation,
- fname);
- else
- my_error(ER_CANT_AGGREGATE_NCOLLATIONS, MYF(0), fname);
-}
-
-
-bool Item_func::agg_arg_collations(DTCollation &c, Item **av, uint count,
- uint flags)
-{
- uint i;
- c.set(av[0]->collation);
- for (i= 1; i < count; i++)
- {
- if (c.aggregate(av[i]->collation, flags))
- {
- my_coll_agg_error(av, count, func_name());
- return TRUE;
- }
- }
- if ((flags & MY_COLL_DISALLOW_NONE) &&
- c.derivation == DERIVATION_NONE)
- {
- my_coll_agg_error(av, count, func_name());
- return TRUE;
- }
- return FALSE;
-}
-
-
-bool Item_func::agg_arg_collations_for_comparison(DTCollation &c,
- Item **av, uint count,
- uint flags)
-{
- return (agg_arg_collations(c, av, count, flags | MY_COLL_DISALLOW_NONE));
-}
-
-
/* return TRUE if item is a constant */
bool
@@ -118,107 +51,6 @@ eval_const_cond(COND *cond)
}
-
-/*
- Collect arguments' character sets together.
- We allow to apply automatic character set conversion in some cases.
- The conditions when conversion is possible are:
- - arguments A and B have different charsets
- - A wins according to coercibility rules
- (i.e. a column is stronger than a string constant,
- an explicit COLLATE clause is stronger than a column)
- - character set of A is either superset for character set of B,
- or B is a string constant which can be converted into the
- character set of A without data loss.
-
- If all of the above is true, then it's possible to convert
- B into the character set of A, and then compare according
- to the collation of A.
-
- For functions with more than two arguments:
-
- collect(A,B,C) ::= collect(collect(A,B),C)
-*/
-
-bool Item_func::agg_arg_charsets(DTCollation &coll,
- Item **args, uint nargs, uint flags)
-{
- Item **arg, **last, *safe_args[2];
- if (agg_arg_collations(coll, args, nargs, flags))
- return TRUE;
-
- /*
- For better error reporting: save the first and the second argument.
- We need this only if the the number of args is 3 or 2:
- - for a longer argument list, "Illegal mix of collations"
- doesn't display each argument's characteristics.
- - if nargs is 1, then this error cannot happen.
- */
- if (nargs >=2 && nargs <= 3)
- {
- safe_args[0]= args[0];
- safe_args[1]= args[1];
- }
-
- THD *thd= current_thd;
- Query_arena *arena, backup;
- bool res= FALSE;
- /*
- In case we're in statement prepare, create conversion item
- in its memory: it will be reused on each execute.
- */
- arena= thd->change_arena_if_needed(&backup);
-
- for (arg= args, last= args + nargs; arg < last; arg++)
- {
- Item* conv;
- uint32 dummy_offset;
- if (!String::needs_conversion(0, coll.collation,
- (*arg)->collation.collation,
- &dummy_offset))
- continue;
-
- if (!(conv= (*arg)->safe_charset_converter(coll.collation)))
- {
- if (nargs >=2 && nargs <= 3)
- {
- /* restore the original arguments for better error message */
- args[0]= safe_args[0];
- args[1]= safe_args[1];
- }
- my_coll_agg_error(args, nargs, func_name());
- res= TRUE;
- break; // we cannot return here, we need to restore "arena".
- }
- if ((*arg)->type() == FIELD_ITEM)
- ((Item_field *)(*arg))->no_const_subst= 1;
- /*
- If in statement prepare, then we create a converter for two
- constant items, do it once and then reuse it.
- If we're in execution of a prepared statement, arena is NULL,
- and the conv was created in runtime memory. This can be
- the case only if the argument is a parameter marker ('?'),
- because for all true constants the charset converter has already
- been created in prepare. In this case register the change for
- rollback.
- */
- if (arena)
- *arg= conv;
- else
- thd->change_item_tree(arg, conv);
- /*
- We do not check conv->fixed, because Item_func_conv_charset which can
- be return by safe_charset_converter can't be fixed at creation
- */
- conv->fix_fields(thd, arg);
- }
- if (arena)
- thd->restore_backup_item_arena(arena, &backup);
- return res;
-}
-
-
-
void Item_func::set_arguments(List<Item> &list)
{
allowed_arg_cols= 1;
@@ -1536,8 +1368,6 @@ my_decimal *Item_func_abs::decimal_op(my_decimal *decimal_value)
void Item_func_abs::fix_length_and_dec()
{
Item_func_num1::fix_length_and_dec();
- if (hybrid_type == INT_RESULT)
- unsigned_flag= 1;
}
@@ -3157,7 +2987,7 @@ void debug_sync_point(const char* lock_name, uint lock_timeout)
THD* thd=current_thd;
User_level_lock* ull;
struct timespec abstime;
- int lock_name_len,error=0;
+ int lock_name_len;
lock_name_len=strlen(lock_name);
pthread_mutex_lock(&LOCK_user_locks);
@@ -3191,8 +3021,8 @@ void debug_sync_point(const char* lock_name, uint lock_timeout)
set_timespec(abstime,lock_timeout);
while (!thd->killed &&
- (error=pthread_cond_timedwait(&ull->cond,&LOCK_user_locks,&abstime))
- != ETIME && error != ETIMEDOUT && ull->locked) ;
+ pthread_cond_timedwait(&ull->cond, &LOCK_user_locks,
+ &abstime) != ETIMEDOUT && ull->locked) ;
if (ull->locked)
{
if (!--ull->count)
@@ -3294,14 +3124,14 @@ longlong Item_func_get_lock::val_int()
set_timespec(abstime,timeout);
while (!thd->killed &&
(error=pthread_cond_timedwait(&ull->cond,&LOCK_user_locks,&abstime))
- != ETIME && error != ETIMEDOUT && error != EINVAL && ull->locked) ;
+ != ETIMEDOUT && error != EINVAL && ull->locked) ;
if (thd->killed)
error=EINTR; // Return NULL
if (ull->locked)
{
if (!--ull->count)
delete ull; // Should never happen
- if (error != ETIME && error != ETIMEDOUT)
+ if (error != ETIMEDOUT)
{
error=1;
null_value=1; // Return NULL
@@ -4217,6 +4047,37 @@ void Item_user_var_as_out_param::print(String *str)
}
+Item_func_get_system_var::
+Item_func_get_system_var(sys_var *var_arg, enum_var_type var_type_arg,
+ LEX_STRING *component_arg, const char *name_arg,
+ size_t name_len_arg)
+ :var(var_arg), var_type(var_type_arg), component(*component_arg)
+{
+ /* set_name() will allocate the name */
+ set_name(name_arg, name_len_arg, system_charset_info);
+}
+
+
+bool
+Item_func_get_system_var::fix_fields(THD *thd, Item **ref)
+{
+ Item *item;
+ DBUG_ENTER("Item_func_get_system_var::fix_fields");
+
+ /*
+ Evaluate the system variable and substitute the result (a basic constant)
+ instead of this item. If the variable can not be evaluated,
+ the error is reported in sys_var::item().
+ */
+ if (!(item= var->item(thd, var_type, &component)))
+ DBUG_RETURN(1); // Impossible
+ item->set_name(name, 0, system_charset_info); // don't allocate a new name
+ thd->change_item_tree(ref, item);
+
+ DBUG_RETURN(0);
+}
+
+
longlong Item_func_inet_aton::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -4395,6 +4256,9 @@ bool Item_func_match::fix_index()
if (key == NO_SUCH_KEY)
return 0;
+
+ if (!table)
+ goto err;
for (keynr=0 ; keynr < table->s->keys ; keynr++)
{
@@ -4560,22 +4424,21 @@ longlong Item_func_bit_xor::val_int()
0 error
# constant item
*/
-
+
Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name,
LEX_STRING component)
{
+ sys_var *var;
+ char buff[MAX_SYS_VAR_LENGTH*2+4+8], *pos;
+ LEX_STRING *base_name, *component_name;
+
if (component.str == 0 &&
!my_strcasecmp(system_charset_info, name.str, "VERSION"))
- return new Item_string("@@VERSION", server_version,
+ return new Item_string(NULL, server_version,
(uint) strlen(server_version),
system_charset_info, DERIVATION_SYSCONST);
- Item *item;
- sys_var *var;
- char buff[MAX_SYS_VAR_LENGTH*2+4+8], *pos;
- LEX_STRING *base_name, *component_name;
-
if (component.str)
{
base_name= &component;
@@ -4597,51 +4460,12 @@ Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name,
return 0;
}
}
- if (!(item=var->item(thd, var_type, component_name)))
- return 0; // Impossible
thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- buff[0]='@';
- buff[1]='@';
- pos=buff+2;
- if (var_type == OPT_SESSION)
- pos=strmov(pos,"session.");
- else if (var_type == OPT_GLOBAL)
- pos=strmov(pos,"global.");
-
- set_if_smaller(component_name->length, MAX_SYS_VAR_LENGTH);
- set_if_smaller(base_name->length, MAX_SYS_VAR_LENGTH);
-
- if (component_name->str)
- {
- memcpy(pos, component_name->str, component_name->length);
- pos+= component_name->length;
- *pos++= '.';
- }
- memcpy(pos, base_name->str, base_name->length);
- pos+= base_name->length;
-
- // set_name() will allocate the name
- item->set_name(buff,(uint) (pos-buff), system_charset_info);
- return item;
-}
-
-Item *get_system_var(THD *thd, enum_var_type var_type, const char *var_name,
- uint length, const char *item_name)
-{
- Item *item;
- sys_var *var;
- LEX_STRING null_lex_string;
-
- null_lex_string.str= 0;
+ set_if_smaller(component_name->length, MAX_SYS_VAR_LENGTH);
- var= find_sys_var(var_name, length);
- DBUG_ASSERT(var != 0);
- if (!(item=var->item(thd, var_type, &null_lex_string)))
- return 0; // Impossible
- thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT);
- item->set_name(item_name, 0, system_charset_info); // Will use original name
- return item;
+ return new Item_func_get_system_var(var, var_type, component_name,
+ NULL, 0);
}
@@ -4832,7 +4656,7 @@ Item_func_sp::execute(Item **itp)
THD *thd= current_thd;
ulong old_client_capabilites;
int res= -1;
- bool save_in_sub_stmt= thd->transaction.in_sub_stmt;
+ bool save_in_sub_stmt= thd->in_sub_stmt;
my_bool save_no_send_ok;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
st_sp_security_context save_ctx;
@@ -4870,11 +4694,11 @@ Item_func_sp::execute(Item **itp)
*/
tmp_disable_binlog(thd); /* don't binlog the substatements */
- thd->transaction.in_sub_stmt= TRUE;
+ thd->in_sub_stmt= TRUE;
res= m_sp->execute_function(thd, args, arg_count, itp);
- thd->transaction.in_sub_stmt= save_in_sub_stmt;
+ thd->in_sub_stmt= save_in_sub_stmt;
reenable_binlog(thd);
if (res && mysql_bin_log.is_open() &&
(m_sp->m_chistics->daccess == SP_CONTAINS_SQL ||
diff --git a/sql/item_func.h b/sql/item_func.h
index 3ca37b1961f..e8db9d70ae7 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -43,7 +43,7 @@ public:
bool const_item_cache;
enum Functype { UNKNOWN_FUNC,EQ_FUNC,EQUAL_FUNC,NE_FUNC,LT_FUNC,LE_FUNC,
GE_FUNC,GT_FUNC,FT_FUNC,
- LIKE_FUNC,NOTLIKE_FUNC,ISNULL_FUNC,ISNOTNULL_FUNC,
+ LIKE_FUNC,ISNULL_FUNC,ISNOTNULL_FUNC,
COND_AND_FUNC, COND_OR_FUNC, COND_XOR_FUNC,
BETWEEN, IN_FUNC, MULT_EQUAL_FUNC,
INTERVAL_FUNC, ISNOTNULLTEST_FUNC,
@@ -166,12 +166,22 @@ public:
my_decimal *val_decimal(my_decimal *);
bool agg_arg_collations(DTCollation &c, Item **items, uint nitems,
- uint flags= 0);
+ uint flags= 0)
+ {
+ return agg_item_collations(c, func_name(), items, nitems, flags);
+ }
bool agg_arg_collations_for_comparison(DTCollation &c,
Item **items, uint nitems,
- uint flags= 0);
+ uint flags= 0)
+ {
+ return agg_item_collations_for_comparison(c, func_name(),
+ items, nitems, flags);
+ }
bool agg_arg_charsets(DTCollation &c, Item **items, uint nitems,
- uint flags= 0);
+ uint flags= 0)
+ {
+ return agg_item_charsets(c, func_name(), items, nitems, flags);
+ }
bool walk(Item_processor processor, byte *arg);
Item *transform(Item_transformer transformer, byte *arg);
void traverse_cond(Cond_traverser traverser,
@@ -1199,6 +1209,31 @@ public:
};
+/* A system variable */
+
+class Item_func_get_system_var :public Item_func
+{
+ sys_var *var;
+ enum_var_type var_type;
+ LEX_STRING component;
+public:
+ Item_func_get_system_var(sys_var *var_arg, enum_var_type var_type_arg,
+ LEX_STRING *component_arg, const char *name_arg,
+ size_t name_len_arg);
+ bool fix_fields(THD *thd, Item **ref);
+ /*
+ Stubs for pure virtual methods. Should never be called: this
+ item is always substituted with a constant in fix_fields().
+ */
+ double val_real() { DBUG_ASSERT(0); return 0.0; }
+ longlong val_int() { DBUG_ASSERT(0); return 0; }
+ String* val_str(String*) { DBUG_ASSERT(0); return 0; }
+ void fix_length_and_dec() { DBUG_ASSERT(0); }
+ /* TODO: fix to support views */
+ const char *func_name() const { return "get_system_var"; }
+};
+
+
class Item_func_inet_aton : public Item_int_func
{
public:
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 2fd0f25e699..256090d3e61 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -335,19 +335,20 @@ null:
void Item_func_concat::fix_length_and_dec()
{
- max_length=0;
+ ulonglong max_result_length= 0;
if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV))
return;
for (uint i=0 ; i < arg_count ; i++)
- max_length+=args[i]->max_length;
+ max_result_length+= args[i]->max_length;
- if (max_length > MAX_BLOB_WIDTH)
+ if (max_result_length >= MAX_BLOB_WIDTH)
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ max_result_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
+ max_length= (ulong) max_result_length;
}
/*
@@ -378,9 +379,6 @@ String *Item_func_des_encrypt::val_str(String *str)
if (arg_count == 1)
{
- /* Make sure LOCK_des_key_file was initialized. */
- init_des_key_file();
-
/* Protect against someone doing FLUSH DES_KEY_FILE */
VOID(pthread_mutex_lock(&LOCK_des_key_file));
keyschedule= des_keyschedule[key_number=des_default_key];
@@ -391,10 +389,6 @@ String *Item_func_des_encrypt::val_str(String *str)
key_number= (uint) args[1]->val_int();
if (key_number > 9)
goto error;
-
- /* Make sure LOCK_des_key_file was initialized. */
- init_des_key_file();
-
VOID(pthread_mutex_lock(&LOCK_des_key_file));
keyschedule= des_keyschedule[key_number];
VOID(pthread_mutex_unlock(&LOCK_des_key_file));
@@ -482,9 +476,6 @@ String *Item_func_des_decrypt::val_str(String *str)
if (!(current_thd->master_access & SUPER_ACL) || key_number > 9)
goto error;
- /* Make sure LOCK_des_key_file was initialized. */
- init_des_key_file();
-
VOID(pthread_mutex_lock(&LOCK_des_key_file));
keyschedule= des_keyschedule[key_number];
VOID(pthread_mutex_unlock(&LOCK_des_key_file));
@@ -658,7 +649,7 @@ null:
void Item_func_concat_ws::fix_length_and_dec()
{
- max_length=0;
+ ulonglong max_result_length;
if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV))
return;
@@ -668,15 +659,16 @@ void Item_func_concat_ws::fix_length_and_dec()
it is done on parser level in sql_yacc.yy
so, (arg_count - 2) is safe here.
*/
- max_length= args[0]->max_length * (arg_count - 2);
+ max_result_length= (ulonglong) args[0]->max_length * (arg_count - 2);
for (uint i=1 ; i < arg_count ; i++)
- max_length+=args[i]->max_length;
+ max_result_length+=args[i]->max_length;
- if (max_length > MAX_BLOB_WIDTH)
+ if (max_result_length >= MAX_BLOB_WIDTH)
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ max_result_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
+ max_length= (ulong) max_result_length;
}
@@ -855,18 +847,19 @@ null:
void Item_func_replace::fix_length_and_dec()
{
- max_length=args[0]->max_length;
+ ulonglong max_result_length= args[0]->max_length;
int diff=(int) (args[2]->max_length - args[1]->max_length);
if (diff > 0 && args[1]->max_length)
{ // Calculate of maxreplaces
- uint max_substrs= max_length/args[1]->max_length;
- max_length+= max_substrs * (uint) diff;
+ ulonglong max_substrs= max_result_length/args[1]->max_length;
+ max_result_length+= max_substrs * (uint) diff;
}
- if (max_length > MAX_BLOB_WIDTH)
+ if (max_result_length >= MAX_BLOB_WIDTH)
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ max_result_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
+ max_length= (ulong) max_result_length;
if (agg_arg_charsets(collation, args, 3, MY_COLL_CMP_CONV))
return;
@@ -914,18 +907,22 @@ null:
void Item_func_insert::fix_length_and_dec()
{
Item *cargs[2];
+ ulonglong max_result_length;
+
cargs[0]= args[0];
cargs[1]= args[3];
if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV))
return;
args[0]= cargs[0];
args[3]= cargs[1];
- max_length=args[0]->max_length+args[3]->max_length;
- if (max_length > MAX_BLOB_WIDTH)
+ max_result_length= ((ulonglong) args[0]->max_length+
+ (ulonglong) args[3]->max_length);
+ if (max_result_length >= MAX_BLOB_WIDTH)
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ max_result_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
+ max_length= (ulong) max_result_length;
}
@@ -1552,9 +1549,11 @@ Item *Item_func_sysconst::safe_charset_converter(CHARSET_INFO *tocs)
uint conv_errors;
String tmp, cstr, *ostr= val_str(&tmp);
cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
- if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(),
- cstr.charset(),
- collation.derivation)))
+ if (conv_errors ||
+ !(conv= new Item_static_string_func(fully_qualified_func_name(),
+ cstr.ptr(), cstr.length(),
+ cstr.charset(),
+ collation.derivation)))
{
return NULL;
}
@@ -1584,13 +1583,24 @@ String *Item_func_user::val_str(String *str)
DBUG_ASSERT(fixed == 1);
THD *thd=current_thd;
CHARSET_INFO *cs= system_charset_info;
- const char *host= thd->host_or_ip;
+ const char *host, *user;
uint res_length;
+ if (is_current)
+ {
+ user= thd->priv_user;
+ host= thd->priv_host;
+ }
+ else
+ {
+ user= thd->user;
+ host= thd->host_or_ip;
+ }
+
// For system threads (e.g. replication SQL thread) user may be empty
- if (!thd->user)
+ if (!user)
return &my_empty_string;
- res_length= (strlen(thd->user)+strlen(host)+2) * cs->mbmaxlen;
+ res_length= (strlen(user)+strlen(host)+2) * cs->mbmaxlen;
if (str->alloc(res_length))
{
@@ -1598,12 +1608,13 @@ String *Item_func_user::val_str(String *str)
return 0;
}
res_length=cs->cset->snprintf(cs, (char*)str->ptr(), res_length, "%s@%s",
- thd->user, host);
+ user, host);
str->length(res_length);
str->set_charset(cs);
return str;
}
+
void Item_func_soundex::fix_length_and_dec()
{
collation.set(args[0]->collation);
@@ -1987,17 +1998,19 @@ void Item_func_repeat::fix_length_and_dec()
collation.set(args[0]->collation);
if (args[1]->const_item())
{
- max_length=(long) (args[0]->max_length * args[1]->val_int());
- if (max_length >= MAX_BLOB_WIDTH)
+ ulonglong max_result_length= ((ulonglong) args[0]->max_length *
+ args[1]->val_int());
+ if (max_result_length >= MAX_BLOB_WIDTH)
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ max_result_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
+ max_length= (ulong) max_result_length;
}
else
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ max_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
}
@@ -2052,6 +2065,7 @@ err:
void Item_func_rpad::fix_length_and_dec()
{
Item *cargs[2];
+
cargs[0]= args[0];
cargs[1]= args[2];
if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV))
@@ -2060,18 +2074,19 @@ void Item_func_rpad::fix_length_and_dec()
args[2]= cargs[1];
if (args[1]->const_item())
{
- uint32 length= (uint32) args[1]->val_int() * collation.collation->mbmaxlen;
- max_length=max(args[0]->max_length,length);
- if (max_length >= MAX_BLOB_WIDTH)
+ ulonglong length= ((ulonglong) args[1]->val_int() *
+ collation.collation->mbmaxlen);
+ if (length >= MAX_BLOB_WIDTH)
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
+ max_length= (ulong) length;
}
else
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ max_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
}
@@ -2104,7 +2119,7 @@ String *Item_func_rpad::val_str(String *str)
func_name(), current_thd->variables.max_allowed_packet);
goto err;
}
- if(args[2]->null_value || !pad_char_length)
+ if (args[2]->null_value || !pad_char_length)
goto err;
res_byte_length= res->length(); /* Must be done before alloc_buffer */
if (!(res= alloc_buffer(res,str,&tmp_value,byte_count)))
@@ -2146,18 +2161,19 @@ void Item_func_lpad::fix_length_and_dec()
if (args[1]->const_item())
{
- uint32 length= (uint32) args[1]->val_int() * collation.collation->mbmaxlen;
- max_length=max(args[0]->max_length,length);
- if (max_length >= MAX_BLOB_WIDTH)
+ ulonglong length= ((ulonglong) args[1]->val_int() *
+ collation.collation->mbmaxlen);
+ if (length >= MAX_BLOB_WIDTH)
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
+ max_length= (ulong) length;
}
else
{
- max_length=MAX_BLOB_WIDTH;
- maybe_null=1;
+ max_length= MAX_BLOB_WIDTH;
+ maybe_null= 1;
}
}
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index d85210984d9..fa098849a43 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -357,8 +357,15 @@ public:
Item_func_sysconst()
{ collation.set(system_charset_info,DERIVATION_SYSCONST); }
Item *safe_charset_converter(CHARSET_INFO *tocs);
+ /*
+ Used to create correct Item name in new converted item in
+ safe_charset_converter, return string representation of this function
+ call
+ */
+ virtual const char *fully_qualified_func_name() const = 0;
};
+
class Item_func_database :public Item_func_sysconst
{
public:
@@ -370,18 +377,27 @@ public:
maybe_null=1;
}
const char *func_name() const { return "database"; }
+ const char *fully_qualified_func_name() const { return "database()"; }
};
+
class Item_func_user :public Item_func_sysconst
{
+ bool is_current;
+
public:
- Item_func_user() :Item_func_sysconst() {}
+ Item_func_user(bool is_current_arg)
+ :Item_func_sysconst(), is_current(is_current_arg) {}
String *val_str(String *);
- void fix_length_and_dec()
- {
- max_length= (USERNAME_LENGTH+HOSTNAME_LENGTH+1)*system_charset_info->mbmaxlen;
+ void fix_length_and_dec()
+ {
+ max_length= ((USERNAME_LENGTH + HOSTNAME_LENGTH + 1) *
+ system_charset_info->mbmaxlen);
}
- const char *func_name() const { return "user"; }
+ const char *func_name() const
+ { return is_current ? "current_user" : "user"; }
+ const char *fully_qualified_func_name() const
+ { return is_current ? "current_user()" : "user()"; }
};
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index ad1c9977e5b..53e377339b3 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1070,15 +1070,20 @@ Item_in_subselect::row_value_transformer(JOIN *join)
List_iterator_fast<Item> li(select_lex->item_list);
for (uint i= 0; i < n; i++)
{
+ Item *func;
DBUG_ASSERT(left_expr->fixed && select_lex->ref_pointer_array[i]->fixed);
if (select_lex->ref_pointer_array[i]->
check_cols(left_expr->el(i)->cols()))
DBUG_RETURN(RES_ERROR);
- Item *func= new Item_ref_null_helper(&select_lex->context,
- this,
- select_lex->ref_pointer_array+i,
- (char *) "<no matter>",
- (char *) "<list ref>");
+ if (join->having || select_lex->with_sum_func ||
+ select_lex->group_list.elements)
+ func= new Item_ref_null_helper(&select_lex->context,
+ this,
+ select_lex->ref_pointer_array+i,
+ (char *) "<no matter>",
+ (char *) "<list ref>");
+ else
+ func= li++;
func=
eq_creator.create(new Item_direct_ref(&select_lex->context,
(*optimizer->get_cache())->
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index d2f1016891b..2d80a19aa55 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -321,6 +321,22 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table,
field->flags&= ~NOT_NULL_FLAG;
return field;
}
+ /*
+ DATE/TIME fields have STRING_RESULT result types.
+ In order to preserve field type, it's needed to handle DATE/TIME
+ fields creations separately.
+ */
+ switch (args[0]->field_type()) {
+ case MYSQL_TYPE_DATE:
+ return new Field_date(maybe_null, name, table, collation.collation);
+ case MYSQL_TYPE_TIME:
+ return new Field_time(maybe_null, name, table, collation.collation);
+ case MYSQL_TYPE_TIMESTAMP:
+ case MYSQL_TYPE_DATETIME:
+ return new Field_datetime(maybe_null, name, table, collation.collation);
+ default:
+ break;
+ }
return Item_sum::create_tmp_field(group, table, convert_blob_length);
}
@@ -2275,7 +2291,8 @@ bool Item_sum_count_distinct::setup(THD *thd)
DBUG_ASSERT(table == 0);
if (!(table= create_tmp_table(thd, tmp_table_param, list, (ORDER*) 0, 1,
0,
- select_lex->options | thd->options,
+ (select_lex->options | thd->options) &
+ ~TMP_TABLE_FORCE_MYISAM,
HA_POS_ERROR, (char*)"")))
return TRUE;
table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows
@@ -2632,7 +2649,11 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1,
the temporary table, not the original field
*/
Field *field= (*field_item)->get_tmp_table_field();
- if (field)
+ /*
+ If field_item is a const item then either get_tp_table_field returns 0
+ or it is an item over a const table.
+ */
+ if (field && !(*field_item)->const_item())
{
int res;
uint offset= field->offset() - table->s->null_bytes;
@@ -2666,8 +2687,11 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2)
the temporary table, not the original field
*/
Field *field= item->get_tmp_table_field();
- /* If the item is a constant, there is no tmp table field */
- if (field)
+ /*
+ If item is a const item then either get_tp_table_field returns 0
+ or it is an item over a const table.
+ */
+ if (field && !item->const_item())
{
int res;
uint offset= field->offset() - table->s->null_bytes;
@@ -2976,6 +3000,10 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref)
maybe_null|= args[i]->maybe_null;
}
+ if (agg_item_charsets(collation, func_name(),
+ args, arg_count, MY_COLL_ALLOW_CONV))
+ return 1;
+
result_field= 0;
null_value= 1;
thd->allow_sum_func= 1;
@@ -3046,7 +3074,8 @@ bool Item_func_group_concat::setup(THD *thd)
*/
if (!(table= create_tmp_table(thd, tmp_table_param, all_fields,
(ORDER*) 0, 0, TRUE,
- select_lex->options | thd->options,
+ (select_lex->options | thd->options) &
+ ~TMP_TABLE_FORCE_MYISAM,
HA_POS_ERROR, (char*) "")))
DBUG_RETURN(TRUE);
table->file->extra(HA_EXTRA_NO_ROWS);
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index cac9613f1ad..a6fbbee6f23 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -1070,7 +1070,7 @@ longlong Item_func_year::val_int()
longlong Item_func_unix_timestamp::val_int()
{
TIME ltime;
- bool not_used;
+ my_bool not_used;
DBUG_ASSERT(fixed == 1);
if (arg_count == 0)
@@ -1555,7 +1555,7 @@ void Item_func_date_format::fix_length_and_dec()
{
fixed_length=0;
/* The result is a binary string (no reason to use collation->mbmaxlen */
- max_length=args[1]->max_length*10;
+ max_length=min(args[1]->max_length,MAX_BLOB_WIDTH) * 10;
set_if_smaller(max_length,MAX_BLOB_WIDTH);
}
maybe_null=1; // If wrong date
@@ -1798,7 +1798,6 @@ bool Item_func_convert_tz::get_date(TIME *ltime,
uint fuzzy_date __attribute__((unused)))
{
my_time_t my_time_tmp;
- bool not_used;
String str;
if (!from_tz_cached)
@@ -1824,6 +1823,7 @@ bool Item_func_convert_tz::get_date(TIME *ltime,
ltime->year==TIMESTAMP_MAX_YEAR && ltime->month==1 && ltime->day==1 ||
ltime->year==TIMESTAMP_MIN_YEAR && ltime->month==12 && ltime->day==31)
{
+ my_bool not_used;
my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, &not_used);
if (my_time_tmp >= TIMESTAMP_MIN_VALUE && my_time_tmp <= TIMESTAMP_MAX_VALUE)
to_tz->gmt_sec_to_TIME(ltime, my_time_tmp);
@@ -2524,7 +2524,7 @@ void Item_func_add_time::print(String *str)
}
args[0]->print(str);
str->append(',');
- args[0]->print(str);
+ args[1]->print(str);
str->append(')');
}
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index 107d12e6da2..e79bc25030b 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -714,6 +714,12 @@ public:
{
return (new Field_date(maybe_null, name, t_arg, &my_charset_bin));
}
+ void fix_length_and_dec()
+ {
+ collation.set(&my_charset_bin);
+ max_length= 10;
+ maybe_null= 1;
+ }
};
diff --git a/sql/lex.h b/sql/lex.h
index aa10328ced0..122e7040c80 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -497,6 +497,7 @@ static SYMBOL symbols[] = {
{ "TRAILING", SYM(TRAILING)},
{ "TRANSACTION", SYM(TRANSACTION_SYM)},
{ "TRIGGER", SYM(TRIGGER_SYM)},
+ { "TRIGGERS", SYM(TRIGGERS_SYM)},
{ "TRUE", SYM(TRUE_SYM)},
{ "TRUNCATE", SYM(TRUNCATE_SYM)},
{ "TYPE", SYM(TYPE_SYM)},
diff --git a/sql/lock.cc b/sql/lock.cc
index 7f3fe5ac5da..568ca2b68af 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -99,10 +99,15 @@ static void print_lock_error(int error, const char *);
NULL on error.
*/
+static int thr_lock_errno_to_mysql[]=
+{ 0, 1, ER_LOCK_WAIT_TIMEOUT, ER_LOCK_DEADLOCK };
+
MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags)
{
MYSQL_LOCK *sql_lock;
TABLE *write_lock_used;
+ int rc;
+ /* Map the return value of thr_lock to an error from errmsg.txt */
DBUG_ENTER("mysql_lock_tables");
for (;;)
@@ -135,15 +140,24 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags)
{
my_free((gptr) sql_lock,MYF(0));
sql_lock=0;
- thd->proc_info=0;
break;
}
thd->proc_info="Table lock";
thd->locked=1;
- if (thr_multi_lock(sql_lock->locks,sql_lock->lock_count))
+ rc= thr_lock_errno_to_mysql[(int) thr_multi_lock(sql_lock->locks,
+ sql_lock->lock_count,
+ thd->lock_id)];
+ if (rc > 1) /* a timeout or a deadlock */
+ {
+ my_error(rc, MYF(0));
+ my_free((gptr) sql_lock,MYF(0));
+ sql_lock= 0;
+ break;
+ }
+ else if (rc == 1) /* aborted */
{
thd->some_tables_deleted=1; // Try again
- sql_lock->lock_count=0; // Locks are alread freed
+ sql_lock->lock_count= 0; // Locks are already freed
}
else if (!thd->some_tables_deleted || (flags & MYSQL_LOCK_IGNORE_FLUSH))
{
@@ -335,22 +349,37 @@ void mysql_lock_abort(THD *thd, TABLE *table)
}
-/* Abort one thread / table combination */
+/*
+ Abort one thread / table combination
+
+ SYNOPSIS
+ mysql_lock_abort_for_thread()
+ thd Thread handler
+ table Table that should be removed from lock queue
-void mysql_lock_abort_for_thread(THD *thd, TABLE *table)
+ RETURN
+ 0 Table was not locked by another thread
+ 1 Table was locked by at least one other thread
+*/
+
+bool mysql_lock_abort_for_thread(THD *thd, TABLE *table)
{
MYSQL_LOCK *locked;
TABLE *write_lock_used;
+ bool result= FALSE;
DBUG_ENTER("mysql_lock_abort_for_thread");
if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
{
for (uint i=0; i < locked->lock_count; i++)
- thr_abort_locks_for_thread(locked->locks[i]->lock,
- table->in_use->real_id);
+ {
+ if (thr_abort_locks_for_thread(locked->locks[i]->lock,
+ table->in_use->real_id))
+ result= TRUE;
+ }
my_free((gptr) locked,MYF(0));
}
- DBUG_VOID_RETURN;
+ DBUG_RETURN(result);
}
@@ -583,6 +612,7 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list)
DBUG_RETURN(-1);
table->s= &table->share_not_to_be_used;
memcpy((table->s->table_cache_key= (char*) (table+1)), key, key_length);
+ table->s->db= table->s->table_cache_key;
table->s->key_length=key_length;
table->in_use=thd;
table->locked_by_name=1;
@@ -593,9 +623,10 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list)
my_free((gptr) table,MYF(0));
DBUG_RETURN(-1);
}
- if (remove_table_from_cache(thd, db, table_list->table_name, 0))
- DBUG_RETURN(1); // Table is in use
- DBUG_RETURN(0);
+
+ /* Return 1 if table is in use */
+ DBUG_RETURN(test(remove_table_from_cache(thd, db, table_list->table_name,
+ RTFC_NO_FLAG)));
}
@@ -959,7 +990,7 @@ bool make_global_read_lock_block_commit(THD *thd)
make_global_read_lock_block_commit(), do nothing.
*/
if (thd->global_read_lock != GOT_GLOBAL_READ_LOCK)
- DBUG_RETURN(1);
+ DBUG_RETURN(0);
pthread_mutex_lock(&LOCK_global_read_lock);
/* increment this BEFORE waiting on cond (otherwise race cond) */
global_read_lock_blocks_commit++;
diff --git a/sql/log.cc b/sql/log.cc
index 44fff28b612..5ad8ec818ef 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -58,7 +58,11 @@ static handlerton binlog_hton = {
binlog_prepare,
NULL, /* recover */
NULL, /* commit_by_xid */
- NULL /* rollback_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
};
/*
@@ -2209,7 +2213,7 @@ bool flush_error_log()
On Windows is necessary a temporary file for to rename
the current error file.
*/
- strmov(strmov(err_temp, err_renamed),"-tmp");
+ strxmov(err_temp, err_renamed,"-tmp",NullS);
(void) my_delete(err_temp, MYF(0));
if (freopen(err_temp,"a+",stdout))
{
@@ -2650,7 +2654,7 @@ int TC_LOG_MMAP::log(THD *thd, my_xid xid)
{ // somebody's syncing. let's wait
p->waiters++;
/*
- note - it must be while(), not do ... while() here
+ note - it must be while (), not do ... while () here
as p->state may be not DIRTY when we come here
*/
while (p->state == DIRTY && syncing)
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 0873ee50743..bdf17ba20e3 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1,15 +1,15 @@
/* Copyright (C) 2000-2004 MySQL AB
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
@@ -242,7 +242,7 @@ static void print_set_option(FILE* file, uint32 bits_changed, uint32 option,
{
if (*need_comma)
fprintf(file,", ");
- fprintf(file,"%s=%d", name, (bool)(flags & option));
+ fprintf(file,"%s=%d", name, test(flags & option));
*need_comma= 1;
}
}
@@ -2774,6 +2774,16 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
*/
handle_dup= DUP_ERROR;
}
+ /*
+ We need to set thd->lex->sql_command and thd->lex->duplicates
+ since InnoDB tests these variables to decide if this is a LOAD
+ DATA ... REPLACE INTO ... statement even though mysql_parse()
+ is not called. This is not needed in 5.0 since there the LOAD
+ DATA ... statement is replicated using mysql_parse(), which
+ sets the thd->lex fields correctly.
+ */
+ thd->lex->sql_command= SQLCOM_LOAD;
+ thd->lex->duplicates= handle_dup;
sql_exchange ex((char*)fname, sql_ex.opt_flags & DUMPFILE_FLAG);
String field_term(sql_ex.field_term,sql_ex.field_term_len,log_cs);
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 66c38d2f24f..dae564a15c0 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -256,6 +256,13 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset;
#define OPTION_WARNINGS (1L << 13) // THD, user
#define OPTION_AUTO_IS_NULL (1L << 14) // THD, user, binlog
#define OPTION_FOUND_COMMENT (1L << 15) // SELECT, intern, parser
+/*
+ Force the used temporary table to be a MyISAM table (because we will use
+ fulltext functions when reading from it. This uses the same constant as
+ OPTION_FOUND_COMMENT because we've run out of bits and these two values
+ are not used together.
+*/
+#define TMP_TABLE_FORCE_MYISAM (1L << 15)
#define OPTION_SAFE_UPDATES (1L << 16) // THD, user
#define OPTION_BUFFER_RESULT (1L << 17) // SELECT, user
#define OPTION_BIN_LOG (1L << 18) // THD, user
@@ -475,6 +482,11 @@ typedef my_bool (*qc_engine_callback)(THD *thd, char *table_key,
#include "protocol.h"
#include "sql_udf.h"
class user_var_entry;
+enum enum_var_type
+{
+ OPT_DEFAULT= 0, OPT_SESSION, OPT_GLOBAL
+};
+class sys_var;
#include "item.h"
extern my_decimal decimal_zero;
typedef Comp_creator* (*chooser_compare_func_creator)(bool invert);
@@ -520,6 +532,7 @@ struct Query_cache_query_flags
{
unsigned int client_long_flag:1;
unsigned int client_protocol_41:1;
+ unsigned int more_results_exists:1;
uint character_set_client_num;
uint character_set_results_num;
uint collation_connection_num;
@@ -785,9 +798,7 @@ extern char *des_key_file;
extern struct st_des_keyschedule des_keyschedule[10];
extern uint des_default_key;
extern pthread_mutex_t LOCK_des_key_file;
-void init_des_key_file();
bool load_des_key_file(const char *file_name);
-void free_des_key_file();
#endif /* HAVE_OPENSSL */
/* sql_do.cc */
@@ -818,8 +829,6 @@ bool mysqld_show_column_types(THD *thd);
bool mysqld_help (THD *thd, const char *text);
void calc_sum_of_all_status(STATUS_VAR *to);
-
-
/* information schema */
extern LEX_STRING information_schema_name;
LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str,
@@ -947,8 +956,16 @@ bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db,
const char *table_name);
void remove_db_from_cache(const char *db);
void flush_tables();
+bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
+
+/* bits for last argument to remove_table_from_cache() */
+#define RTFC_NO_FLAG 0x0000
+#define RTFC_OWNED_BY_THD_FLAG 0x0001
+#define RTFC_WAIT_OTHER_THREAD_FLAG 0x0002
+#define RTFC_CHECK_KILLED_FLAG 0x0004
bool remove_table_from_cache(THD *thd, const char *db, const char *table,
- bool return_if_owned_by_thd);
+ uint flags);
+
bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables);
void copy_field_from_tmp_record(Field *field,int offset);
bool fill_record(THD *thd, Field **field, List<Item> &values,
@@ -1076,6 +1093,7 @@ extern const char **errmesg; /* Error messages */
extern const char *myisam_recover_options_str;
extern const char *in_left_expr_name, *in_additional_cond;
extern const char * const triggers_file_ext;
+extern const char * const trigname_file_ext;
extern Eq_creator eq_creator;
extern Ne_creator ne_creator;
extern Gt_creator gt_creator;
@@ -1144,14 +1162,18 @@ extern uint opt_large_page_size;
extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log;
extern FILE *bootstrap_file;
extern int bootstrap_error;
+extern FILE *stderror_file;
extern pthread_key(MEM_ROOT**,THR_MALLOC);
extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open,
LOCK_thread_count,LOCK_mapped_file,LOCK_user_locks, LOCK_status,
LOCK_error_log, LOCK_delayed_insert, LOCK_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_read_lock,
- LOCK_global_system_variables, LOCK_user_conn,
+ LOCK_global_system_variables, LOCK_user_conn,
LOCK_bytes_sent, LOCK_bytes_received;
+#ifdef HAVE_OPENSSL
+extern pthread_mutex_t LOCK_des_key_file;
+#endif
extern rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
extern pthread_cond_t COND_refresh, COND_thread_count, COND_manager;
extern pthread_attr_t connection_attrib;
@@ -1175,6 +1197,7 @@ extern TABLE *unused_tables;
extern I_List<i_string> binlog_do_db, binlog_ignore_db;
extern const char* any_db;
extern struct my_option my_long_options[];
+extern const LEX_STRING view_type;
/* optional things, have_* variables */
@@ -1207,7 +1230,7 @@ void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock);
void mysql_unlock_some_tables(THD *thd, TABLE **table,uint count);
void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table);
void mysql_lock_abort(THD *thd, TABLE *table);
-void mysql_lock_abort_for_thread(THD *thd, TABLE *table);
+bool mysql_lock_abort_for_thread(THD *thd, TABLE *table);
MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b);
bool lock_global_read_lock(THD *thd);
void unlock_global_read_lock(THD *thd);
@@ -1233,10 +1256,13 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list,
void unireg_init(ulong options);
void unireg_end(void);
bool mysql_create_frm(THD *thd, my_string file_name,
+ const char *db, const char *table,
HA_CREATE_INFO *create_info,
List<create_field> &create_field,
uint key_count,KEY *key_info,handler *db_type);
-int rea_create_table(THD *thd, my_string file_name,HA_CREATE_INFO *create_info,
+int rea_create_table(THD *thd, my_string file_name,
+ const char *db, const char *table,
+ HA_CREATE_INFO *create_info,
List<create_field> &create_field,
uint key_count,KEY *key_info);
int format_number(uint inputflag,uint max_length,my_string pos,uint length,
@@ -1254,7 +1280,7 @@ ulong convert_period_to_month(ulong period);
ulong convert_month_to_period(ulong month);
void get_date_from_daynr(long daynr,uint *year, uint *month,
uint *day);
-my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *not_exist);
+my_time_t TIME_to_timestamp(THD *thd, const TIME *t, my_bool *not_exist);
bool str_to_time_with_warn(const char *str,uint length,TIME *l_time);
timestamp_type str_to_datetime_with_warn(const char *str, uint length,
TIME *l_time, uint flags);
@@ -1303,7 +1329,8 @@ ulong make_new_entry(File file,uchar *fileinfo,TYPELIB *formnames,
const char *newname);
ulong next_io_size(ulong pos);
void append_unescaped(String *res, const char *pos, uint length);
-int create_frm(THD *thd, char *name,uint reclength,uchar *fileinfo,
+int create_frm(THD *thd, char *name, const char *db, const char *table,
+ uint reclength,uchar *fileinfo,
HA_CREATE_INFO *create_info, uint keys);
void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form);
int rename_file_ext(const char * from,const char * to,const char * ext);
@@ -1328,12 +1355,9 @@ extern bool sql_cache_init();
extern void sql_cache_free();
extern int sql_cache_hit(THD *thd, char *inBuf, uint length);
-/* item.cc */
+/* item_func.cc */
Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name,
LEX_STRING component);
-Item *get_system_var(THD *thd, enum_var_type var_type, const char *var_name,
- uint length, const char *item_name);
-/* item_func.cc */
int get_var_with_binlog(THD *thd, LEX_STRING &name,
user_var_entry **out_entry);
/* log.cc */
@@ -1432,6 +1456,12 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr)
table->status= STATUS_NO_RECORD;
table->keys_in_use_for_query= table->s->keys_in_use;
table->maybe_null= table_list->outer_join;
+ TABLE_LIST *embedding= table_list->embedding;
+ while (!table->maybe_null && embedding)
+ {
+ table->maybe_null= embedding->outer_join;
+ embedding= embedding->embedding;
+ }
table->tablenr= tablenr;
table->map= (table_map) 1 << tablenr;
table->force_index= table_list->force_index;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 78d9af387da..f8bfcb75be2 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -220,21 +220,58 @@ extern "C" int gethostname(char *name, int namelen);
/* Constants */
const char *show_comp_option_name[]= {"YES", "NO", "DISABLED"};
-static const char *sql_mode_names[] =
+static const char *sql_mode_names[]=
{
"REAL_AS_FLOAT", "PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE",
"?", "ONLY_FULL_GROUP_BY", "NO_UNSIGNED_SUBTRACTION",
"NO_DIR_IN_CREATE",
"POSTGRESQL", "ORACLE", "MSSQL", "DB2", "MAXDB", "NO_KEY_OPTIONS",
"NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "MYSQL323", "MYSQL40", "ANSI",
- "NO_AUTO_VALUE_ON_ZERO", "NO_BACKSLASH_ESCAPES", "STRICT_TRANS_TABLES", "STRICT_ALL_TABLES",
- "NO_ZERO_IN_DATE", "NO_ZERO_DATE", "ALLOW_INVALID_DATES", "ERROR_FOR_DIVISION_BY_ZERO",
+ "NO_AUTO_VALUE_ON_ZERO", "NO_BACKSLASH_ESCAPES", "STRICT_TRANS_TABLES",
+ "STRICT_ALL_TABLES",
+ "NO_ZERO_IN_DATE", "NO_ZERO_DATE", "ALLOW_INVALID_DATES",
+ "ERROR_FOR_DIVISION_BY_ZERO",
"TRADITIONAL", "NO_AUTO_CREATE_USER", "HIGH_NOT_PRECEDENCE",
"NO_ENGINE_SUBSTITUTION",
NullS
};
+static const unsigned int sql_mode_names_len[]=
+{
+ /*REAL_AS_FLOAT*/ 13,
+ /*PIPES_AS_CONCAT*/ 15,
+ /*ANSI_QUOTES*/ 11,
+ /*IGNORE_SPACE*/ 12,
+ /*?*/ 1,
+ /*ONLY_FULL_GROUP_BY*/ 18,
+ /*NO_UNSIGNED_SUBTRACTION*/ 23,
+ /*NO_DIR_IN_CREATE*/ 16,
+ /*POSTGRESQL*/ 10,
+ /*ORACLE*/ 6,
+ /*MSSQL*/ 5,
+ /*DB2*/ 3,
+ /*MAXDB*/ 5,
+ /*NO_KEY_OPTIONS*/ 14,
+ /*NO_TABLE_OPTIONS*/ 16,
+ /*NO_FIELD_OPTIONS*/ 16,
+ /*MYSQL323*/ 8,
+ /*MYSQL40*/ 7,
+ /*ANSI*/ 4,
+ /*NO_AUTO_VALUE_ON_ZERO*/ 21,
+ /*NO_BACKSLASH_ESCAPES*/ 20,
+ /*STRICT_TRANS_TABLES*/ 19,
+ /*STRICT_ALL_TABLES*/ 17,
+ /*NO_ZERO_IN_DATE*/ 15,
+ /*NO_ZERO_DATE*/ 12,
+ /*ALLOW_INVALID_DATES*/ 19,
+ /*ERROR_FOR_DIVISION_BY_ZERO*/ 26,
+ /*TRADITIONAL*/ 11,
+ /*NO_AUTO_CREATE_USER*/ 19,
+ /*HIGH_NOT_PRECEDENCE*/ 19,
+ /*NO_ENGINE_SUBSTITUTION*/ 22
+};
TYPELIB sql_mode_typelib= { array_elements(sql_mode_names)-1,"",
- sql_mode_names, NULL };
+ sql_mode_names,
+ (unsigned int *)sql_mode_names_len };
static const char *tc_heuristic_recover_names[]=
{
"COMMIT", "ROLLBACK", NullS
@@ -410,6 +447,7 @@ Le_creator le_creator;
FILE *bootstrap_file;
int bootstrap_error;
+FILE *stderror_file=0;
I_List<i_string_pair> replicate_rewrite_db;
I_List<i_string> replicate_do_db, replicate_ignore_db;
@@ -447,6 +485,9 @@ pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received,
LOCK_global_system_variables,
LOCK_user_conn, LOCK_slave_list, LOCK_active_mi;
+#ifdef HAVE_OPENSSL
+pthread_mutex_t LOCK_des_key_file;
+#endif
rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
pthread_cond_t COND_refresh,COND_thread_count;
pthread_t signal_thread;
@@ -675,7 +716,11 @@ static void close_connections(void)
end_thr_alarm(0); // Abort old alarms.
end_slave();
- /* First signal all threads that it's time to die */
+ /*
+ First signal all threads that it's time to die
+ This will give the threads some time to gracefully abort their
+ statements and inform their clients that the server is about to die.
+ */
THD *tmp;
(void) pthread_mutex_lock(&LOCK_thread_count); // For unlink from list
@@ -702,9 +747,13 @@ static void close_connections(void)
(void) pthread_mutex_unlock(&LOCK_thread_count); // For unlink from list
if (thread_count)
- sleep(1); // Give threads time to die
+ sleep(2); // Give threads time to die
- /* Force remaining threads to die by closing the connection to the client */
+ /*
+ Force remaining threads to die by closing the connection to the client
+ This will ensure that threads that are waiting for a command from the
+ client on a blocking read call are aborted.
+ */
for (;;)
{
@@ -719,8 +768,9 @@ static void close_connections(void)
#ifndef __bsdi__ // Bug in BSDI kernel
if (tmp->vio_ok())
{
- sql_print_error(ER(ER_FORCING_CLOSE),my_progname,
- tmp->thread_id,tmp->user ? tmp->user : "");
+ if (global_system_variables.log_warnings)
+ sql_print_warning(ER(ER_FORCING_CLOSE),my_progname,
+ tmp->thread_id,tmp->user ? tmp->user : "");
close_connection(tmp,0,0);
}
#endif
@@ -882,7 +932,7 @@ static void __cdecl kill_server(int sig_ptr)
unireg_end();
#ifdef __NETWARE__
- if(!event_flag)
+ if (!event_flag)
pthread_join(select_thread, NULL); // wait for main thread
#endif /* __NETWARE__ */
@@ -1036,7 +1086,6 @@ void clean_up(bool print_message)
#ifdef HAVE_OPENSSL
if (ssl_acceptor_fd)
my_free((gptr) ssl_acceptor_fd, MYF(MY_ALLOW_ZERO_PTR));
- free_des_key_file();
#endif /* HAVE_OPENSSL */
#ifdef USE_REGEX
regex_end();
@@ -1108,6 +1157,9 @@ static void clean_up_mutexes()
(void) pthread_mutex_destroy(&LOCK_bytes_sent);
(void) pthread_mutex_destroy(&LOCK_bytes_received);
(void) pthread_mutex_destroy(&LOCK_user_conn);
+#ifdef HAVE_OPENSSL
+ (void) pthread_mutex_destroy(&LOCK_des_key_file);
+#endif
#ifdef HAVE_REPLICATION
(void) pthread_mutex_destroy(&LOCK_rpl_status);
(void) pthread_cond_destroy(&COND_rpl_status);
@@ -2305,6 +2357,19 @@ static int my_message_sql(uint error, const char *str, myf MyFlags)
DBUG_RETURN(0);
}
+
+static void *my_str_malloc_mysqld(size_t size)
+{
+ return my_malloc(size, MYF(MY_FAE));
+}
+
+
+static void my_str_free_mysqld(void *ptr)
+{
+ my_free((gptr)ptr, MYF(MY_FAE));
+}
+
+
#ifdef __WIN__
struct utsname
@@ -2586,6 +2651,50 @@ static int init_common_variables(const char *conf_file_name, int argc,
if (my_dbopt_init())
return 1;
+ /*
+ Ensure that lower_case_table_names is set on system where we have case
+ insensitive names. If this is not done the users MyISAM tables will
+ get corrupted if accesses with names of different case.
+ */
+ DBUG_PRINT("info", ("lower_case_table_names: %d", lower_case_table_names));
+ if (!lower_case_table_names &&
+ (lower_case_file_system=
+ (test_if_case_insensitive(mysql_real_data_home) == 1)))
+ {
+ if (lower_case_table_names_used)
+ {
+ if (global_system_variables.log_warnings)
+ sql_print_warning("\
+You have forced lower_case_table_names to 0 through a command-line \
+option, even though your file system '%s' is case insensitive. This means \
+that you can corrupt a MyISAM table by accessing it with different cases. \
+You should consider changing lower_case_table_names to 1 or 2",
+ mysql_real_data_home);
+ }
+ else
+ {
+ if (global_system_variables.log_warnings)
+ sql_print_warning("Setting lower_case_table_names=2 because file system for %s is case insensitive", mysql_real_data_home);
+ lower_case_table_names= 2;
+ }
+ }
+ else if (lower_case_table_names == 2 &&
+ !(lower_case_file_system=
+ (test_if_case_insensitive(mysql_real_data_home) == 1)))
+ {
+ if (global_system_variables.log_warnings)
+ sql_print_warning("lower_case_table_names was set to 2, even though your "
+ "the file system '%s' is case sensitive. Now setting "
+ "lower_case_table_names to 0 to avoid future problems.",
+ mysql_real_data_home);
+ lower_case_table_names= 0;
+ }
+
+ /* Reset table_alias_charset, now that lower_case_table_names is set. */
+ table_alias_charset= (lower_case_table_names ?
+ files_charset_info :
+ &my_charset_bin);
+
return 0;
}
@@ -2611,6 +2720,9 @@ static int init_thread_environment()
(void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_global_read_lock, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_uuid_generator, MY_MUTEX_INIT_FAST);
+#ifdef HAVE_OPENSSL
+ (void) pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST);
+#endif
(void) my_rwlock_init(&LOCK_sys_init_connect, NULL);
(void) my_rwlock_init(&LOCK_sys_init_slave, NULL);
(void) my_rwlock_init(&LOCK_grant, NULL);
@@ -2767,7 +2879,7 @@ server.");
#ifndef EMBEDDED_LIBRARY
if (freopen(log_error_file, "a+", stdout))
#endif
- freopen(log_error_file, "a+", stderr);
+ stderror_file= freopen(log_error_file, "a+", stderr);
}
}
@@ -3060,50 +3172,6 @@ int main(int argc, char **argv)
(void) thr_setconcurrency(concurrency); // 10 by default
- /*
- Ensure that lower_case_table_names is set on system where we have case
- insensitive names. If this is not done the users MyISAM tables will
- get corrupted if accesses with names of different case.
- */
- DBUG_PRINT("info", ("lower_case_table_names: %d", lower_case_table_names));
- if (!lower_case_table_names &&
- (lower_case_file_system=
- (test_if_case_insensitive(mysql_real_data_home) == 1)))
- {
- if (lower_case_table_names_used)
- {
- if (global_system_variables.log_warnings)
- sql_print_warning("\
-You have forced lower_case_table_names to 0 through a command-line \
-option, even though your file system '%s' is case insensitive. This means \
-that you can corrupt a MyISAM table by accessing it with different cases. \
-You should consider changing lower_case_table_names to 1 or 2",
- mysql_real_data_home);
- }
- else
- {
- if (global_system_variables.log_warnings)
- sql_print_warning("Setting lower_case_table_names=2 because file system for %s is case insensitive", mysql_real_data_home);
- lower_case_table_names= 2;
- }
- }
- else if (lower_case_table_names == 2 &&
- !(lower_case_file_system=
- (test_if_case_insensitive(mysql_real_data_home) == 1)))
- {
- if (global_system_variables.log_warnings)
- sql_print_warning("lower_case_table_names was set to 2, even though your "
- "the file system '%s' is case sensitive. Now setting "
- "lower_case_table_names to 0 to avoid future problems.",
- mysql_real_data_home);
- lower_case_table_names= 0;
- }
-
- /* Reset table_alias_charset, now that lower_case_table_names is set. */
- table_alias_charset= (lower_case_table_names ?
- files_charset_info :
- &my_charset_bin);
-
select_thread=pthread_self();
select_thread_in_use=1;
init_ssl();
@@ -3159,10 +3227,16 @@ we force server id to 2, but this MySQL server will not act as a slave.");
#endif
/*
+ Initialize my_str_malloc() and my_str_free()
+ */
+ my_str_malloc= &my_str_malloc_mysqld;
+ my_str_free= &my_str_free_mysqld;
+
+ /*
init signals & alarm
After this we can't quit by a simple unireg_abort
*/
- error_handler_hook = my_message_sql;
+ error_handler_hook= my_message_sql;
start_signal_handler(); // Creates pidfile
if (acl_init((THD *)0, opt_noacl) ||
my_tz_init((THD *)0, default_tz_name, opt_bootstrap))
@@ -4336,7 +4410,8 @@ enum options_mysqld
OPT_ENABLE_LARGE_PAGES,
OPT_TIMED_MUTEXES,
OPT_OLD_STYLE_USER_LIMITS,
- OPT_LOG_SLOW_ADMIN_STATEMENTS
+ OPT_LOG_SLOW_ADMIN_STATEMENTS,
+ OPT_TABLE_LOCK_WAIT_TIMEOUT
};
@@ -5589,6 +5664,11 @@ The minimum value for this variable is 4096.",
"The number of open tables for all threads.", (gptr*) &table_cache_size,
(gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L,
0, 1, 0},
+ {"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT, "Timeout in "
+ "seconds to wait for a table level lock before returning an error. Used"
+ " only if the connection has active cursors.",
+ (gptr*) &table_lock_wait_timeout, (gptr*) &table_lock_wait_timeout,
+ 0, GET_ULONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0},
{"thread_cache_size", OPT_THREAD_CACHE_SIZE,
"How many threads we should keep in a cache for reuse.",
(gptr*) &thread_cache_size, (gptr*) &thread_cache_size, 0, GET_ULONG,
@@ -5722,6 +5802,7 @@ struct show_var_st status_vars[]= {
{"Com_show_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS]), SHOW_LONG_STATUS},
{"Com_show_storage_engines", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STORAGE_ENGINES]), SHOW_LONG_STATUS},
{"Com_show_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLES]), SHOW_LONG_STATUS},
+ {"Com_show_triggers", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TRIGGERS]), SHOW_LONG_STATUS},
{"Com_show_variables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS},
{"Com_show_warnings", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS},
{"Com_slave_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS},
@@ -7077,4 +7158,6 @@ template class I_List_iterator<THD>;
template class I_List<i_string>;
template class I_List<i_string_pair>;
template class I_List<NAMED_LIST>;
+template class I_List<Statement>;
+template class I_List_iterator<Statement>;
#endif
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index c2760b08b6e..c1ebfe105b6 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -630,7 +630,7 @@ int imerge_list_or_tree(PARAM *param,
{
SEL_IMERGE *imerge;
List_iterator<SEL_IMERGE> it(*im1);
- while((imerge= it++))
+ while ((imerge= it++))
{
if (imerge->or_sel_tree_with_checks(param, tree))
it.remove();
@@ -990,7 +990,7 @@ int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler)
DBUG_RETURN(1);
quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS);
}
- while((quick= quick_it++))
+ while ((quick= quick_it++))
{
if (quick->init_ror_merged_scan(FALSE))
DBUG_RETURN(1);
@@ -3531,7 +3531,8 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond)
if (arg->type() != Item::FUNC_ITEM)
DBUG_RETURN(0);
cond_func= (Item_func*) arg;
- if (cond_func->select_optimize() == Item_func::OPTIMIZE_NONE)
+ if (cond_func->functype() != Item_func::BETWEEN &&
+ cond_func->functype() != Item_func::IN_FUNC)
DBUG_RETURN(0);
inv= TRUE;
}
@@ -6942,7 +6943,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
List_iterator<Item> select_items_it(join->fields_list);
/* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/
- if(join->make_sum_func_list(join->all_fields, join->fields_list, 1))
+ if (join->make_sum_func_list(join->all_fields, join->fields_list, 1))
DBUG_RETURN(NULL);
if (join->sum_funcs[0])
{
@@ -7268,7 +7269,7 @@ check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item,
Item *and_or_arg;
while ((and_or_arg= li++))
{
- if(!check_group_min_max_predicates(and_or_arg, min_max_arg_item,
+ if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item,
image_type))
DBUG_RETURN(FALSE);
}
@@ -7350,7 +7351,7 @@ check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item,
}
else if (cur_arg->type() == Item::FUNC_ITEM)
{
- if(!check_group_min_max_predicates(cur_arg, min_max_arg_item,
+ if (!check_group_min_max_predicates(cur_arg, min_max_arg_item,
image_type))
DBUG_RETURN(FALSE);
}
@@ -7881,19 +7882,19 @@ int QUICK_GROUP_MIN_MAX_SELECT::init()
if (min_max_arg_part)
{
- if(my_init_dynamic_array(&min_max_ranges, sizeof(QUICK_RANGE*), 16, 16))
+ if (my_init_dynamic_array(&min_max_ranges, sizeof(QUICK_RANGE*), 16, 16))
return 1;
if (have_min)
{
- if(!(min_functions= new List<Item_sum>))
+ if (!(min_functions= new List<Item_sum>))
return 1;
}
else
min_functions= NULL;
if (have_max)
{
- if(!(max_functions= new List<Item_sum>))
+ if (!(max_functions= new List<Item_sum>))
return 1;
}
else
@@ -7967,7 +7968,7 @@ bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range)
uint range_flag= sel_range->min_flag | sel_range->max_flag;
/* Skip (-inf,+inf) ranges, e.g. (x < 5 or x > 4). */
- if((range_flag & NO_MIN_RANGE) && (range_flag & NO_MAX_RANGE))
+ if ((range_flag & NO_MIN_RANGE) && (range_flag & NO_MAX_RANGE))
return FALSE;
if (!(sel_range->min_flag & NO_MIN_RANGE) &&
diff --git a/sql/parse_file.cc b/sql/parse_file.cc
index 7cc563901d2..82ce2f2d7b5 100644
--- a/sql/parse_file.cc
+++ b/sql/parse_file.cc
@@ -166,6 +166,25 @@ write_parameter(IO_CACHE *file, gptr base, File_option *parameter,
}
break;
}
+ case FILE_OPTIONS_ULLLIST:
+ {
+ List_iterator_fast<ulonglong> it(*((List<ulonglong>*)
+ (base + parameter->offset)));
+ bool first= 1;
+ ulonglong *val;
+ while ((val= it++))
+ {
+ num.set(*val, &my_charset_bin);
+ // We need ' ' after string to detect list continuation
+ if ((!first && my_b_append(file, (const byte *)" ", 1)) ||
+ my_b_append(file, (const byte *)num.ptr(), num.length()))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ first= 0;
+ }
+ break;
+ }
default:
DBUG_ASSERT(0); // never should happened
}
@@ -615,6 +634,8 @@ File_parser::parse(gptr base, MEM_ROOT *mem_root,
char *eol;
LEX_STRING *str;
List<LEX_STRING> *list;
+ ulonglong *num;
+ List<ulonglong> *nlist;
DBUG_ENTER("File_parser::parse");
while (ptr < end && found < required)
@@ -719,7 +740,7 @@ File_parser::parse(gptr base, MEM_ROOT *mem_root,
case FILE_OPTIONS_STRLIST:
{
list= (List<LEX_STRING>*)(base + parameter->offset);
-
+
list->empty();
// list parsing
while (ptr < end)
@@ -728,7 +749,7 @@ File_parser::parse(gptr base, MEM_ROOT *mem_root,
sizeof(LEX_STRING))) ||
list->push_back(str, mem_root))
goto list_err;
- if(!(ptr= parse_quoted_escaped_string(ptr, end, mem_root, str)))
+ if (!(ptr= parse_quoted_escaped_string(ptr, end, mem_root, str)))
goto list_err_w_message;
switch (*ptr) {
case '\n':
@@ -741,17 +762,56 @@ File_parser::parse(gptr base, MEM_ROOT *mem_root,
goto list_err_w_message;
}
}
- end_of_list:
+
+end_of_list:
if (*(ptr++) != '\n')
goto list_err;
break;
- list_err_w_message:
+list_err_w_message:
my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0),
parameter->name.str, line);
- list_err:
+list_err:
DBUG_RETURN(TRUE);
}
+ case FILE_OPTIONS_ULLLIST:
+ {
+ nlist= (List<ulonglong>*)(base + parameter->offset);
+ nlist->empty();
+ // list parsing
+ while (ptr < end)
+ {
+ int not_used;
+ char *num_end= end;
+ if (!(num= (ulonglong*)alloc_root(mem_root, sizeof(ulonglong))) ||
+ nlist->push_back(num, mem_root))
+ goto nlist_err;
+ *num= my_strtoll10(ptr, &num_end, &not_used);
+ ptr= num_end;
+ switch (*ptr) {
+ case '\n':
+ goto end_of_nlist;
+ case ' ':
+ // we cant go over buffer bounds, because we have \0 at the end
+ ptr++;
+ break;
+ default:
+ goto nlist_err_w_message;
+ }
+ }
+
+end_of_nlist:
+ if (*(ptr++) != '\n')
+ goto nlist_err;
+ break;
+
+nlist_err_w_message:
+ my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0),
+ parameter->name.str, line);
+nlist_err:
+ DBUG_RETURN(TRUE);
+
+ }
default:
DBUG_ASSERT(0); // never should happened
}
diff --git a/sql/parse_file.h b/sql/parse_file.h
index 82a89dffd18..cc0aa6556f6 100644
--- a/sql/parse_file.h
+++ b/sql/parse_file.h
@@ -27,8 +27,10 @@ enum file_opt_type {
FILE_OPTIONS_REV, /* Revision version number (ulonglong) */
FILE_OPTIONS_TIMESTAMP, /* timestamp (LEX_STRING have to be
allocated with length 20 (19+1) */
- FILE_OPTIONS_STRLIST /* list of escaped strings
+ FILE_OPTIONS_STRLIST, /* list of escaped strings
(List<LEX_STRING>) */
+ FILE_OPTIONS_ULLLIST /* list of ulonglong values
+ (List<ulonglong>) */
};
struct File_option
diff --git a/sql/protocol_cursor.cc b/sql/protocol_cursor.cc
index ed2d0b583d0..093a2bf2b90 100644
--- a/sql/protocol_cursor.cc
+++ b/sql/protocol_cursor.cc
@@ -114,8 +114,7 @@ bool Protocol_cursor::write()
for (; cur_field < fields_end; cur_field++, data_tmp++)
{
- if ((len= net_field_length((uchar **)&cp)) == 0 ||
- len == NULL_LENGTH)
+ if ((len= net_field_length((uchar **)&cp)) == NULL_LENGTH)
{
*data_tmp= 0;
}
diff --git a/sql/set_var.cc b/sql/set_var.cc
index ae7e4bd844b..637b33f18d2 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -375,6 +375,8 @@ sys_var_thd_ulong sys_sync_replication_timeout(
sys_var_bool_ptr sys_sync_frm("sync_frm", &opt_sync_frm);
sys_var_long_ptr sys_table_cache_size("table_cache",
&table_cache_size);
+sys_var_long_ptr sys_table_lock_wait_timeout("table_lock_wait_timeout",
+ &table_lock_wait_timeout);
sys_var_long_ptr sys_thread_cache_size("thread_cache_size",
&thread_cache_size);
sys_var_thd_enum sys_tx_isolation("tx_isolation",
@@ -682,6 +684,7 @@ sys_var *sys_variables[]=
#endif
&sys_sync_frm,
&sys_table_cache_size,
+ &sys_table_lock_wait_timeout,
&sys_table_type,
&sys_thread_cache_size,
&sys_time_format,
@@ -972,6 +975,7 @@ struct show_var_st init_vars[]= {
{"system_time_zone", system_time_zone, SHOW_CHAR},
#endif
{"table_cache", (char*) &table_cache_size, SHOW_LONG},
+ {"table_lock_wait_timeout", (char*) &table_lock_wait_timeout, SHOW_LONG },
{sys_table_type.name, (char*) &sys_table_type, SHOW_SYS},
{sys_thread_cache_size.name,(char*) &sys_thread_cache_size, SHOW_SYS},
#ifdef HAVE_THR_SETCONCURRENCY
@@ -1422,6 +1426,12 @@ bool sys_var_thd_ulong::update(THD *thd, set_var *var)
if ((ulong) tmp > max_system_variables.*offset)
tmp= max_system_variables.*offset;
+#if SIZEOF_LONG == 4
+ /* Avoid overflows on 32 bit systems */
+ if (tmp > (ulonglong) ~(ulong) 0)
+ tmp= ((ulonglong) ~(ulong) 0);
+#endif
+
if (option_limits)
tmp= (ulong) getopt_ull_limit_value(tmp, option_limits);
if (var->type == OPT_GLOBAL)
@@ -1652,15 +1662,7 @@ err:
/*
Return an Item for a variable. Used with @@[global.]variable_name
-
If type is not given, return local value if exists, else global
-
- We have to use netprintf() instead of my_error() here as this is
- called on the parsing stage.
-
- TODO:
- With prepared statements/stored procedures this has to be fixed
- to create an item that gets the current value at fix_fields() stage.
*/
Item *sys_var::item(THD *thd, enum_var_type var_type, LEX_STRING *base)
@@ -1683,7 +1685,7 @@ Item *sys_var::item(THD *thd, enum_var_type var_type, LEX_STRING *base)
pthread_mutex_lock(&LOCK_global_system_variables);
value= *(uint*) value_ptr(thd, var_type, base);
pthread_mutex_unlock(&LOCK_global_system_variables);
- return new Item_uint((int32) value);
+ return new Item_uint((ulonglong) value);
}
case SHOW_LONG:
{
@@ -1691,7 +1693,7 @@ Item *sys_var::item(THD *thd, enum_var_type var_type, LEX_STRING *base)
pthread_mutex_lock(&LOCK_global_system_variables);
value= *(ulong*) value_ptr(thd, var_type, base);
pthread_mutex_unlock(&LOCK_global_system_variables);
- return new Item_uint((int32) value);
+ return new Item_uint((ulonglong) value);
}
case SHOW_LONGLONG:
{
@@ -2291,7 +2293,12 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var)
if (!tmp) // Zero size means delete
{
if (key_cache == dflt_key_cache)
+ {
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARN_CANT_DROP_DEFAULT_KEYCACHE,
+ ER(ER_WARN_CANT_DROP_DEFAULT_KEYCACHE));
goto end; // Ignore default key cache
+ }
if (key_cache->key_cache_inited) // If initied
{
@@ -3200,27 +3207,50 @@ bool sys_var_thd_table_type::update(THD *thd, set_var *var)
Functions to handle sql_mode
****************************************************************************/
-byte *sys_var_thd_sql_mode::value_ptr(THD *thd, enum_var_type type,
- LEX_STRING *base)
+/*
+ Make string representation of mode
+
+ SYNOPSIS
+ thd in thread handler
+ val in sql_mode value
+ len out pointer on length of string
+
+ RETURN
+ pointer to string with sql_mode representation
+*/
+
+byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd, ulong val,
+ ulong *len)
{
- ulong val;
char buff[256];
String tmp(buff, sizeof(buff), &my_charset_latin1);
+ ulong length;
tmp.length(0);
- val= ((type == OPT_GLOBAL) ? global_system_variables.*offset :
- thd->variables.*offset);
for (uint i= 0; val; val>>= 1, i++)
{
if (val & 1)
{
- tmp.append(enum_names->type_names[i]);
+ tmp.append(sql_mode_typelib.type_names[i],
+ sql_mode_typelib.type_lengths[i]);
tmp.append(',');
}
}
- if (tmp.length())
- tmp.length(tmp.length() - 1);
- return (byte*) thd->strmake(tmp.ptr(), tmp.length());
+
+ if ((length= tmp.length()))
+ length--;
+ *len= length;
+ return (byte*) thd->strmake(tmp.ptr(), length);
+}
+
+
+byte *sys_var_thd_sql_mode::value_ptr(THD *thd, enum_var_type type,
+ LEX_STRING *base)
+{
+ ulong val= ((type == OPT_GLOBAL) ? global_system_variables.*offset :
+ thd->variables.*offset);
+ ulong length_unused;
+ return symbolic_mode_representation(thd, val, &length_unused);
}
@@ -3232,6 +3262,7 @@ void sys_var_thd_sql_mode::set_default(THD *thd, enum_var_type type)
thd->variables.*offset= global_system_variables.*offset;
}
+
void fix_sql_mode_var(THD *thd, enum_var_type type)
{
if (type == OPT_GLOBAL)
diff --git a/sql/set_var.h b/sql/set_var.h
index a6532323b34..c8b075ddd35 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -30,11 +30,6 @@ class set_var;
typedef struct system_variables SV;
extern TYPELIB bool_typelib, delay_key_write_typelib, sql_mode_typelib;
-enum enum_var_type
-{
- OPT_DEFAULT= 0, OPT_SESSION, OPT_GLOBAL
-};
-
typedef int (*sys_check_func)(THD *, set_var *);
typedef bool (*sys_update_func)(THD *, set_var *);
typedef void (*sys_after_update_func)(THD *,enum_var_type);
@@ -366,6 +361,8 @@ public:
}
void set_default(THD *thd, enum_var_type type);
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
+ static byte *symbolic_mode_representation(THD *thd, ulong sql_mode,
+ ulong *length);
};
diff --git a/sql/share/charsets/Index.xml b/sql/share/charsets/Index.xml
index a6bb4bad99b..32fd1618a8b 100644
--- a/sql/share/charsets/Index.xml
+++ b/sql/share/charsets/Index.xml
@@ -106,7 +106,7 @@ To make maintaining easier please:
<charset name="latin1">
<family>Western</family>
- <description>ISO 8859-1 West European</description>
+ <description>cp1252 West European</description>
<alias>csisolatin1</alias>
<alias>iso-8859-1</alias>
<alias>iso-ir-100</alias>
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index f999f17aedf..2d46a2192b9 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -4094,20 +4094,20 @@ ER_ERROR_DURING_CHECKPOINT
swe "Fick fel %d vid CHECKPOINT"
ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ CHECKPOINT"
ER_NEW_ABORTING_CONNECTION 08S01
- cze "Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: `%-.64s' (%-.64s) bylo pøeru¹eno"
- dan "Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: `%-.64s' (%-.64s)"
- nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: `%-.64s' (%-.64s)"
- eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)"
- est "Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: `%-.64s' (%-.64s)"
- fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: `%-.64s' (%-.64s)"
- ger "Verbindungsabbruch %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: `%-.64s' (%-.64s)"
+ cze "Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: '%-.64s' (%-.64s) bylo pøeru¹eno"
+ dan "Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: '%-.64s' (%-.64s)"
+ nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: '%-.64s' (%-.64s)"
+ eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: '%-.64s' (%-.64s)"
+ est "Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: '%-.64s' (%-.64s)"
+ fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: '%-.64s' (%-.64s)"
+ ger "Verbindungsabbruch %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: '%-.64s' (%-.64s)"
ita "Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)"
- por "Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' `%-.64s' ('%-.64s')"
- rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ `%-.64s' (%-.64s)"
- serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: `%-.64s' (%-.64s)"
- spa "Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: `%-.64s' (%-.64s)"
+ por "Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' '%-.64s' ('%-.64s')"
+ rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ '%-.64s' (%-.64s)"
+ serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: '%-.64s' (%-.64s)"
+ spa "Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: '%-.64s' (%-.64s)"
swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)"
- ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: `%-.64s' (%-.64s)"
+ ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: '%-.64s' (%-.64s)"
ER_DUMP_NOT_IMPLEMENTED
cze "Handler tabulky nepodporuje bin-Bární dump"
dan "Denne tabeltype unserstøtter ikke binært tabeldump"
@@ -5370,3 +5370,26 @@ ER_SCALE_BIGGER_THAN_PRECISION 42000 S1009
eng "Scale may not be larger than the precision (column '%-.64s')."
ER_WRONG_LOCK_OF_SYSTEM_TABLE
eng "You can't combine write-locking of system '%-.64s.%-.64s' table with other tables"
+ER_CONNECT_TO_FOREIGN_DATA_SOURCE
+ eng "Unable to connect to foreign data source - database '%s'!"
+ER_QUERY_ON_FOREIGN_DATA_SOURCE
+ eng "There was a problem processing the query on the foreign data source. Data source error: '%-.64s'"
+ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST
+ eng "The foreign data source you are trying to reference does not exist. Data source error : '%-.64s'"
+ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE
+ eng "Can't create federated table. The data source connection string '%-.64s' is not in the correct format"
+ER_FOREIGN_DATA_STRING_INVALID
+ eng "The data source connection string '%-.64s' is not in the correct format"
+ER_CANT_CREATE_FEDERATED_TABLE
+ eng "Can't create federated table. Foreign data src error : '%-.64s'"
+ER_TRG_IN_WRONG_SCHEMA
+ eng "Trigger in wrong schema"
+ER_STACK_OVERRUN_NEED_MORE
+ eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld -O thread_stack=#' to specify a bigger stack."
+ER_TOO_LONG_BODY 42000 S1009
+ eng "Routine body for '%-.100s' is too long"
+ER_WARN_CANT_DROP_DEFAULT_KEYCACHE
+ eng "Cannot drop default keycache"
+ ger "Der Default-Keycache kann nicht gelöscht werden"
+ER_TOO_BIG_DISPLAYWIDTH 42000 S1009
+ eng "Display width out of range for column '%-.64s' (max = %d)"
diff --git a/sql/slave.cc b/sql/slave.cc
index c6c0de7160b..c09753d805b 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -2396,7 +2396,8 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
&my_charset_bin);
protocol->store((ulonglong) mi->rli.group_relay_log_pos);
protocol->store(mi->rli.group_master_log_name, &my_charset_bin);
- protocol->store(mi->slave_running ? "Yes":"No", &my_charset_bin);
+ protocol->store(mi->slave_running == MYSQL_SLAVE_RUN_CONNECT
+ ? "Yes":"No", &my_charset_bin);
protocol->store(mi->rli.slave_running ? "Yes":"No", &my_charset_bin);
protocol->store(&replicate_do_db);
protocol->store(&replicate_ignore_db);
@@ -2753,7 +2754,7 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
else
pthread_cond_wait(&data_cond, &data_lock);
DBUG_PRINT("info",("Got signal of master update or timed out"));
- if (error == ETIMEDOUT || error == ETIME)
+ if (error == ETIMEDOUT)
{
error= -1;
break;
@@ -3637,6 +3638,7 @@ err:
mi->abort_slave = 0; // TODO: check if this is needed
DBUG_ASSERT(thd->net.buff != 0);
net_end(&thd->net); // destructor will not free it, because net.vio is 0
+ close_thread_tables(thd, 0);
pthread_mutex_lock(&LOCK_thread_count);
THD_CHECK_SENTRY(thd);
delete thd;
diff --git a/sql/sp.cc b/sql/sp.cc
index 5c95d12872b..0eee0ac209c 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -506,6 +506,11 @@ db_create_routine(THD *thd, int type, sp_head *sp)
ret= SP_BAD_IDENTIFIER;
goto done;
}
+ if (sp->m_body.length > table->field[MYSQL_PROC_FIELD_BODY]->field_length)
+ {
+ ret= SP_BODY_TOO_LONG;
+ goto done;
+ }
table->field[MYSQL_PROC_FIELD_DB]->
store(sp->m_db.str, sp->m_db.length, system_charset_info);
table->field[MYSQL_PROC_FIELD_NAME]->
@@ -1179,6 +1184,43 @@ extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first)
/*
+ Check if
+ - current statement (the one in thd->lex) needs table prelocking
+ - first routine in thd->lex->sroutines_list needs to execute its body in
+ prelocked mode.
+
+ SYNOPSIS
+ sp_get_prelocking_info()
+ thd Current thread, thd->lex is the statement to be
+ checked.
+ need_prelocking OUT TRUE - prelocked mode should be activated
+ before executing the statement
+ FALSE - Don't activate prelocking
+ first_no_prelocking OUT TRUE - Tables used by first routine in
+ thd->lex->sroutines_list should be
+ prelocked.
+ FALSE - Otherwise.
+ NOTES
+ This function assumes that for any "CALL proc(...)" statement routines_list
+ will have 'proc' as first element (it may have several, consider e.g.
+ "proc(sp_func(...)))". This property is currently guaranted by the parser.
+*/
+
+void sp_get_prelocking_info(THD *thd, bool *need_prelocking,
+ bool *first_no_prelocking)
+{
+ Sroutine_hash_entry *routine;
+ routine= (Sroutine_hash_entry*)thd->lex->sroutines_list.first;
+
+ DBUG_ASSERT(routine);
+ bool first_is_procedure= (routine->key.str[0] == TYPE_ENUM_PROCEDURE);
+
+ *first_no_prelocking= first_is_procedure;
+ *need_prelocking= !first_is_procedure || test(routine->next);
+}
+
+
+/*
Auxilary function that adds new element to the set of stored routines
used by statement.
@@ -1315,11 +1357,13 @@ static void sp_update_stmt_used_routines(THD *thd, LEX *lex, HASH *src)
SYNOPSIS
sp_cache_routines_and_add_tables_aux()
- thd - thread context
- lex - LEX representing statement
- start - first routine from the list of routines to be cached
- (this list defines mentioned sub-set).
-
+ thd - thread context
+ lex - LEX representing statement
+ start - first routine from the list of routines to be cached
+ (this list defines mentioned sub-set).
+ first_no_prelock - If true, don't add tables or cache routines used by
+ the body of the first routine (i.e. *start)
+ will be executed in non-prelocked mode.
NOTE
If some function is missing this won't be reported here.
Instead this fact will be discovered during query execution.
@@ -1331,10 +1375,11 @@ static void sp_update_stmt_used_routines(THD *thd, LEX *lex, HASH *src)
static bool
sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
- Sroutine_hash_entry *start)
+ Sroutine_hash_entry *start,
+ bool first_no_prelock)
{
bool result= FALSE;
-
+ bool first= TRUE;
DBUG_ENTER("sp_cache_routines_and_add_tables_aux");
for (Sroutine_hash_entry *rt= start; rt; rt= rt->next)
@@ -1370,9 +1415,13 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
}
if (sp)
{
- sp_update_stmt_used_routines(thd, lex, &sp->m_sroutines);
- result|= sp->add_used_tables_to_table_list(thd, &lex->query_tables_last);
+ if (!(first && first_no_prelock))
+ {
+ sp_update_stmt_used_routines(thd, lex, &sp->m_sroutines);
+ result|= sp->add_used_tables_to_table_list(thd, &lex->query_tables_last);
+ }
}
+ first= FALSE;
}
DBUG_RETURN(result);
}
@@ -1385,20 +1434,22 @@ sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex,
SYNOPSIS
sp_cache_routines_and_add_tables()
- thd - thread context
- lex - LEX representing statement
-
+ thd - thread context
+ lex - LEX representing statement
+ first_no_prelock - If true, don't add tables or cache routines used by
+ the body of the first routine (i.e. *start)
+
RETURN VALUE
TRUE - some tables were added
FALSE - no tables were added.
*/
bool
-sp_cache_routines_and_add_tables(THD *thd, LEX *lex)
+sp_cache_routines_and_add_tables(THD *thd, LEX *lex, bool first_no_prelock)
{
-
return sp_cache_routines_and_add_tables_aux(thd, lex,
- (Sroutine_hash_entry *)lex->sroutines_list.first);
+ (Sroutine_hash_entry *)lex->sroutines_list.first,
+ first_no_prelock);
}
@@ -1420,8 +1471,8 @@ sp_cache_routines_and_add_tables_for_view(THD *thd, LEX *lex, LEX *aux_lex)
Sroutine_hash_entry **last_cached_routine_ptr=
(Sroutine_hash_entry **)lex->sroutines_list.next;
sp_update_stmt_used_routines(thd, lex, &aux_lex->sroutines);
- (void)sp_cache_routines_and_add_tables_aux(thd, lex,
- *last_cached_routine_ptr);
+ (void)sp_cache_routines_and_add_tables_aux(thd, lex,
+ *last_cached_routine_ptr, FALSE);
}
@@ -1445,8 +1496,10 @@ sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
{
Sroutine_hash_entry **last_cached_routine_ptr=
(Sroutine_hash_entry **)lex->sroutines_list.next;
- for (int i= 0; i < 3; i++)
- for (int j= 0; j < 2; j++)
+ for (int i= 0; i < (int)TRG_EVENT_MAX; i++)
+ {
+ for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
+ {
if (triggers->bodies[i][j])
{
(void)triggers->bodies[i][j]->add_used_tables_to_table_list(thd,
@@ -1454,9 +1507,11 @@ sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
sp_update_stmt_used_routines(thd, lex,
&triggers->bodies[i][j]->m_sroutines);
}
-
+ }
+ }
(void)sp_cache_routines_and_add_tables_aux(thd, lex,
- *last_cached_routine_ptr);
+ *last_cached_routine_ptr,
+ FALSE);
}
}
diff --git a/sql/sp.h b/sql/sp.h
index 29ba5625e37..9f110f87807 100644
--- a/sql/sp.h
+++ b/sql/sp.h
@@ -29,6 +29,7 @@
#define SP_INTERNAL_ERROR -7
#define SP_NO_DB_ERROR -8
#define SP_BAD_IDENTIFIER -9
+#define SP_BODY_TOO_LONG -10
/* Drop all routines in database 'db' */
int
@@ -79,10 +80,13 @@ sp_show_status_function(THD *thd, const char *wild);
Procedures for pre-caching of stored routines and building table list
for prelocking.
*/
+void sp_get_prelocking_info(THD *thd, bool *need_prelocking,
+ bool *first_no_prelocking);
void sp_add_used_routine(LEX *lex, Query_arena *arena,
sp_name *rt, char rt_type);
void sp_update_sp_used_routines(HASH *dst, HASH *src);
-bool sp_cache_routines_and_add_tables(THD *thd, LEX *lex);
+bool sp_cache_routines_and_add_tables(THD *thd, LEX *lex,
+ bool first_no_prelock);
void sp_cache_routines_and_add_tables_for_view(THD *thd, LEX *lex,
LEX *aux_lex);
void sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex,
diff --git a/sql/sp_cache.h b/sql/sp_cache.h
index e9efe5b2a8c..14b2db97f5f 100644
--- a/sql/sp_cache.h
+++ b/sql/sp_cache.h
@@ -22,6 +22,13 @@
#pragma interface /* gcc class implementation */
#endif
+/*
+ Stored procedures/functions cache. This is used as follows:
+ * Each thread has its own cache.
+ * Each sp_head object is put into its thread cache before it is used, and
+ then remains in the cache until deleted.
+*/
+
class sp_head;
class sp_cache;
@@ -31,16 +38,20 @@ void sp_cache_init();
/* Clear the cache *cp and set *cp to NULL */
void sp_cache_clear(sp_cache **cp);
-/* Insert an SP to cache. If 'cp' points to NULL, it's set to a new cache */
+/* Insert an SP into cache. If 'cp' points to NULL, it's set to a new cache */
void sp_cache_insert(sp_cache **cp, sp_head *sp);
/* Lookup an SP in cache */
sp_head *sp_cache_lookup(sp_cache **cp, sp_name *name);
-/* Remove an SP from cache. Returns true if something was removed */
+/*
+ Remove an SP from cache, and also bump the Cversion number so all other
+ caches are invalidated.
+ Returns true if something was removed.
+*/
bool sp_cache_remove(sp_cache **cp, sp_name *name);
-/* Invalidate a cache */
+/* Invalidate all existing SP caches by bumping Cversion number. */
void sp_cache_invalidate();
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 272456d8c8e..3a386356335 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -574,6 +574,7 @@ sp_head::execute(THD *thd)
sp_rcontext *ctx;
int ret= 0;
uint ip= 0;
+ ulong save_sql_mode;
Query_arena *old_arena;
query_id_t old_query_id;
TABLE *old_derived_tables;
@@ -626,6 +627,8 @@ sp_head::execute(THD *thd)
old_query_id= thd->query_id;
old_derived_tables= thd->derived_tables;
thd->derived_tables= 0;
+ save_sql_mode= thd->variables.sql_mode;
+ thd->variables.sql_mode= m_sql_mode;
/*
It is also more efficient to save/restore current thd->lex once when
do it in each instruction
@@ -715,6 +718,7 @@ sp_head::execute(THD *thd)
thd->query_id= old_query_id;
DBUG_ASSERT(!thd->derived_tables);
thd->derived_tables= old_derived_tables;
+ thd->variables.sql_mode= save_sql_mode;
thd->current_arena= old_arena;
state= EXECUTED;
@@ -879,7 +883,10 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
octx= new sp_rcontext(csize, hmax, cmax);
tmp_octx= TRUE;
}
+
+ /* Evaluate SP arguments (i.e. get the values passed as parameters) */
// QQ: Should do type checking?
+ DBUG_PRINT("info",(" %.*s: eval args", m_name.length, m_name.str));
for (i = 0 ; (it= li++) && i < params ; i++)
{
sp_pvar_t *pvar= m_pcont->find_pvar(i);
@@ -916,6 +923,15 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
}
}
+ /*
+ Okay, got values for all arguments. Close tables that might be used by
+ arguments evaluation. If arguments evaluation required prelocking mode,
+ we'll leave it here.
+ */
+ if (!thd->in_sub_stmt)
+ close_thread_tables(thd, 0, 0, 0);
+
+ DBUG_PRINT("info",(" %.*s: eval args done", m_name.length, m_name.str));
// The rest of the frame are local variables which are all IN.
// Default all variables to null (those with default clauses will
// be set by an set instruction).
@@ -1023,6 +1039,7 @@ sp_head::reset_lex(THD *thd)
DBUG_ENTER("sp_head::reset_lex");
LEX *sublex;
LEX *oldlex= thd->lex;
+ my_lex_states state= oldlex->next_state; // Keep original next_state
(void)m_lex.push_front(oldlex);
thd->lex= sublex= new st_lex;
@@ -1030,6 +1047,11 @@ sp_head::reset_lex(THD *thd)
/* Reset most stuff. The length arguments doesn't matter here. */
lex_start(thd, oldlex->buf, (ulong) (oldlex->end_of_query - oldlex->ptr));
+ /*
+ * next_state is normally the same (0), but it happens that we swap lex in
+ * "mid-sentence", so we must restore it.
+ */
+ sublex->next_state= state;
/* We must reset ptr and end_of_query again */
sublex->ptr= oldlex->ptr;
sublex->end_of_query= oldlex->end_of_query;
@@ -1239,8 +1261,6 @@ sp_head::show_create_procedure(THD *thd)
String buffer(buff, sizeof(buff), system_charset_info);
int res;
List<Item> field_list;
- ulong old_sql_mode;
- sys_var *sql_mode_var;
byte *sql_mode_str;
ulong sql_mode_len;
bool full_access;
@@ -1252,19 +1272,13 @@ sp_head::show_create_procedure(THD *thd)
if (check_show_routine_access(thd, this, &full_access))
return 1;
-
- old_sql_mode= thd->variables.sql_mode;
- thd->variables.sql_mode= m_sql_mode;
- sql_mode_var= find_sys_var("SQL_MODE", 8);
- if (sql_mode_var)
- {
- sql_mode_str= sql_mode_var->value_ptr(thd, OPT_SESSION, 0);
- sql_mode_len= strlen((char*) sql_mode_str);
- }
+ sql_mode_str=
+ sys_var_thd_sql_mode::symbolic_mode_representation(thd,
+ m_sql_mode,
+ &sql_mode_len);
field_list.push_back(new Item_empty_string("Procedure", NAME_LEN));
- if (sql_mode_var)
- field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len));
+ field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len));
// 1024 is for not to confuse old clients
field_list.push_back(new Item_empty_string("Create Procedure",
max(buffer.length(), 1024)));
@@ -1276,15 +1290,13 @@ sp_head::show_create_procedure(THD *thd)
}
protocol->prepare_for_resend();
protocol->store(m_name.str, m_name.length, system_charset_info);
- if (sql_mode_var)
- protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info);
+ protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info);
if (full_access)
protocol->store(m_defstr.str, m_defstr.length, system_charset_info);
res= protocol->write();
send_eof(thd);
done:
- thd->variables.sql_mode= old_sql_mode;
DBUG_RETURN(res);
}
@@ -1320,8 +1332,6 @@ sp_head::show_create_function(THD *thd)
String buffer(buff, sizeof(buff), system_charset_info);
int res;
List<Item> field_list;
- ulong old_sql_mode;
- sys_var *sql_mode_var;
byte *sql_mode_str;
ulong sql_mode_len;
bool full_access;
@@ -1333,18 +1343,12 @@ sp_head::show_create_function(THD *thd)
if (check_show_routine_access(thd, this, &full_access))
return 1;
- old_sql_mode= thd->variables.sql_mode;
- thd->variables.sql_mode= m_sql_mode;
- sql_mode_var= find_sys_var("SQL_MODE", 8);
- if (sql_mode_var)
- {
- sql_mode_str= sql_mode_var->value_ptr(thd, OPT_SESSION, 0);
- sql_mode_len= strlen((char*) sql_mode_str);
- }
-
+ sql_mode_str=
+ sys_var_thd_sql_mode::symbolic_mode_representation(thd,
+ m_sql_mode,
+ &sql_mode_len);
field_list.push_back(new Item_empty_string("Function",NAME_LEN));
- if (sql_mode_var)
- field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len));
+ field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len));
field_list.push_back(new Item_empty_string("Create Function",
max(buffer.length(),1024)));
if (protocol->send_fields(&field_list,
@@ -1355,15 +1359,13 @@ sp_head::show_create_function(THD *thd)
}
protocol->prepare_for_resend();
protocol->store(m_name.str, m_name.length, system_charset_info);
- if (sql_mode_var)
- protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info);
+ protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info);
if (full_access)
protocol->store(m_defstr.str, m_defstr.length, system_charset_info);
res= protocol->write();
send_eof(thd);
done:
- thd->variables.sql_mode= old_sql_mode;
DBUG_RETURN(res);
}
@@ -1474,8 +1476,27 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
implemented at the same time as ability not to store LEX for
instruction if it is not really used.
*/
- reinit_stmt_before_use(thd, m_lex);
+ if (thd->prelocked_mode == NON_PRELOCKED)
+ {
+ /*
+ This statement will enter/leave prelocked mode on its own.
+ Entering prelocked mode changes table list and related members
+ of LEX, so we'll need to restore them.
+ */
+ if (lex_query_tables_own_last)
+ {
+ /*
+ We've already entered/left prelocked mode with this statement.
+ Attach the list of tables that need to be prelocked and mark m_lex
+ as having such list attached.
+ */
+ *lex_query_tables_own_last= prelocking_tables;
+ m_lex->mark_as_requiring_prelocking(lex_query_tables_own_last);
+ }
+ }
+
+ reinit_stmt_before_use(thd, m_lex);
/*
If requested check whenever we have access to tables in LEX's table list
and open and lock them before executing instructtions core function.
@@ -1493,6 +1514,26 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
thd->proc_info="closing tables";
close_thread_tables(thd);
+ if (m_lex->query_tables_own_last)
+ {
+ /*
+ We've entered and left prelocking mode when executing statement
+ stored in m_lex.
+ m_lex->query_tables(->next_global)* list now has a 'tail' - a list
+ of tables that are added for prelocking. (If this is the first
+ execution, the 'tail' was added by open_tables(), otherwise we've
+ attached it above in this function).
+ Now we'll save the 'tail', and detach it.
+ */
+ DBUG_ASSERT(!lex_query_tables_own_last ||
+ lex_query_tables_own_last == m_lex->query_tables_own_last &&
+ prelocking_tables == *(m_lex->query_tables_own_last));
+
+ lex_query_tables_own_last= m_lex->query_tables_own_last;
+ prelocking_tables= *lex_query_tables_own_last;
+ *lex_query_tables_own_last= NULL;
+ m_lex->mark_as_requiring_prelocking(NULL);
+ }
thd->rollback_item_tree_changes();
/*
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 32dc4449174..e15b68be158 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -121,7 +121,7 @@ public:
uchar *m_tmp_query; // Temporary pointer to sub query string
uint m_old_cmq; // Old CLIENT_MULTI_QUERIES value
st_sp_chistics *m_chistics;
- ulong m_sql_mode; // For SHOW CREATE
+ ulong m_sql_mode; // For SHOW CREATE and execution
LEX_STRING m_qname; // db.name
LEX_STRING m_db;
LEX_STRING m_name;
@@ -282,6 +282,10 @@ private:
/*
Multi-set representing optimized list of tables to be locked by this
routine. Does not include tables which are used by invoked routines.
+
+ Note: for prelocking-free SPs this multiset is constructed too.
+ We do so because the same instance of sp_head may be called both
+ in prelocked mode and in non-prelocked mode.
*/
HASH m_sptabs;
@@ -383,7 +387,8 @@ class sp_lex_keeper
public:
sp_lex_keeper(LEX *lex, bool lex_resp)
- : m_lex(lex), m_lex_resp(lex_resp)
+ : m_lex(lex), m_lex_resp(lex_resp),
+ lex_query_tables_own_last(NULL)
{
lex->sp_lex_in_use= TRUE;
}
@@ -418,6 +423,25 @@ private:
for LEX deletion.
*/
bool m_lex_resp;
+
+ /*
+ Support for being able to execute this statement in two modes:
+ a) inside prelocked mode set by the calling procedure or its ancestor.
+ b) outside of prelocked mode, when this statement enters/leaves
+ prelocked mode itself.
+ */
+
+ /*
+ List of additional tables this statement needs to lock when it
+ enters/leaves prelocked mode on its own.
+ */
+ TABLE_LIST *prelocking_tables;
+
+ /*
+ The value m_lex->query_tables_own_last should be set to this when the
+ statement enters/leaves prelocked mode on its own.
+ */
+ TABLE_LIST **lex_query_tables_own_last;
};
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 04666469e9c..1b3d8cc914a 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -3547,9 +3547,9 @@ bool check_grant_db(THD *thd,const char *db)
{
char helping [NAME_LEN+USERNAME_LENGTH+2];
uint len;
- bool error=1;
+ bool error= 1;
- len = (uint) (strmov(strmov(helping,thd->priv_user)+1,db)-helping)+ 1;
+ len= (uint) (strmov(strmov(helping,thd->priv_user)+1,db)-helping)+ 1;
rw_rdlock(&LOCK_grant);
for (uint idx=0 ; idx < column_priv_hash.records ; idx++)
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 3a650b50b23..0895901508d 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -292,7 +292,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
bool found=0;
for (TABLE_LIST *table= tables; table; table= table->next_local)
{
- if (remove_table_from_cache(thd, table->db, table->table_name, 1))
+ if (remove_table_from_cache(thd, table->db, table->table_name,
+ RTFC_OWNED_BY_THD_FLAG))
found=1;
}
if (!found)
@@ -390,6 +391,8 @@ static void mark_used_tables_as_free_for_reuse(THD *thd, TABLE *table)
LOCK_open
skip_derived Set to 1 (0 = default) if we should not free derived
tables.
+ stopper When closing tables from thd->open_tables(->next)*,
+ don't close/remove tables starting from stopper.
IMPLEMENTATION
Unlocks tables and frees derived tables.
@@ -472,6 +475,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
We are in prelocked mode, so we have to leave it now with doing
implicit UNLOCK TABLES if need.
*/
+ DBUG_PRINT("info",("thd->prelocked_mode= NON_PRELOCKED"));
thd->prelocked_mode= NON_PRELOCKED;
if (prelocked_mode == PRELOCKED_UNDER_LOCK_TABLES)
@@ -1040,19 +1044,47 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
if (thd->locked_tables || thd->prelocked_mode)
{ // Using table locks
+ TABLE *best_table= 0;
+ int best_distance= INT_MIN;
for (table=thd->open_tables; table ; table=table->next)
{
if (table->s->key_length == key_length &&
!memcmp(table->s->table_cache_key, key, key_length) &&
!my_strcasecmp(system_charset_info, table->alias, alias) &&
- table->query_id != thd->query_id && /* skip tables already used by this query */
+ table->query_id != thd->query_id && /* skip tables already used */
!(thd->prelocked_mode && table->query_id))
{
- table->query_id= thd->query_id;
- DBUG_PRINT("info",("Using locked table"));
- goto reset;
+ int distance= ((int) table->reginfo.lock_type -
+ (int) table_list->lock_type);
+ /*
+ Find a table that either has the exact lock type requested,
+ or has the best suitable lock. In case there is no locked
+ table that has an equal or higher lock than requested,
+ we us the closest matching lock to be able to produce an error
+ message about wrong lock mode on the table. The best_table is changed
+ if bd < 0 <= d or bd < d < 0 or 0 <= d < bd.
+
+ distance < 0 - No suitable lock found
+ distance > 0 - we have lock mode higher then we require
+ distance == 0 - we have lock mode exactly which we need
+ */
+ if (best_distance < 0 && distance > best_distance ||
+ distance >= 0 && distance < best_distance)
+ {
+ best_distance= distance;
+ best_table= table;
+ if (best_distance == 0) // Found perfect lock
+ break;
+ }
}
}
+ if (best_table)
+ {
+ table= best_table;
+ table->query_id= thd->query_id;
+ DBUG_PRINT("info",("Using locked table"));
+ goto reset;
+ }
/*
is it view?
(it is work around to allow to open view with locked tables,
@@ -1711,7 +1743,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db,
!my_strcasecmp(system_charset_info, name, "proc"))
entry->s->system_table= 1;
- if (Table_triggers_list::check_n_load(thd, db, name, entry))
+ if (Table_triggers_list::check_n_load(thd, db, name, entry, 0))
goto err;
/*
@@ -1762,6 +1794,7 @@ err:
DBUG_RETURN(1);
}
+
/*
Open all tables in list
@@ -1816,10 +1849,6 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
statement for which table list for prelocking is already built, let
us cache routines and try to build such table list.
- NOTE: If we want queries with functions to work under explicit
- LOCK TABLES we have to additionaly lock mysql.proc table in it.
- At least until Monty will fix SP loading :)
-
NOTE: We can't delay prelocking until we will met some sub-statement
which really uses tables, since this will imply that we have to restore
its table list to be able execute it in some other context.
@@ -1833,16 +1862,23 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
mode we will have some locked tables, because queries which use only
derived/information schema tables and views possible. Thus "counter"
may be still zero for prelocked statement...
+
+ NOTE: The above notes may be out of date. Please wait for psergey to
+ document new prelocked behavior.
*/
- if (!thd->prelocked_mode && !thd->lex->requires_prelocking() &&
- thd->lex->sroutines.records)
+
+ if (!thd->prelocked_mode && !thd->lex->requires_prelocking() &&
+ thd->lex->sroutines_list.elements)
{
+ bool first_no_prelocking, need_prelocking;
TABLE_LIST **save_query_tables_last= thd->lex->query_tables_last;
DBUG_ASSERT(thd->lex->query_tables == *start);
+ sp_get_prelocking_info(thd, &need_prelocking, &first_no_prelocking);
- if (sp_cache_routines_and_add_tables(thd, thd->lex) ||
- *start)
+ if ((sp_cache_routines_and_add_tables(thd, thd->lex,
+ first_no_prelocking) ||
+ *start) && need_prelocking)
{
query_tables_last_own= save_query_tables_last;
*start= thd->lex->query_tables;
@@ -1864,14 +1900,32 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
DBUG_RETURN(-1);
}
(*counter)++;
+
if (!tables->table &&
!(tables->table= open_table(thd, tables, &new_frm_mem, &refresh, flags)))
{
free_root(&new_frm_mem, MYF(MY_KEEP_PREALLOC));
+
if (tables->view)
{
/* VIEW placeholder */
(*counter)--;
+
+ /*
+ tables->next_global list consists of two parts:
+ 1) Query tables and underlying tables of views.
+ 2) Tables used by all stored routines that this statement invokes on
+ execution.
+ We need to know where the bound between these two parts is. If we've
+ just opened a view, which was the last table in part #1, and it
+ has added its base tables after itself, adjust the boundary pointer
+ accordingly.
+ */
+ if (query_tables_last_own &&
+ query_tables_last_own == &(tables->next_global) &&
+ tables->view->query_tables)
+ query_tables_last_own= tables->view->query_tables_last;
+
/*
Again if needed we have to get cache all routines used by this view
and add tables used by them to table list.
@@ -2300,6 +2354,7 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count)
and was marked as occupied during open_tables() as free for reuse.
*/
mark_real_tables_as_free_for_reuse(first_not_own);
+ DBUG_PRINT("info",("prelocked_mode= PRELOCKED"));
thd->prelocked_mode= PRELOCKED;
}
}
@@ -2323,6 +2378,7 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count)
if (thd->lex->requires_prelocking())
{
mark_real_tables_as_free_for_reuse(first_not_own);
+ DBUG_PRINT("info", ("thd->prelocked_mode= PRELOCKED_UNDER_LOCK_TABLES"));
thd->prelocked_mode= PRELOCKED_UNDER_LOCK_TABLES;
}
}
@@ -4059,6 +4115,9 @@ void flush_tables()
The table will be closed (not stored in cache) by the current thread when
close_thread_tables() is called.
+ PREREQUISITES
+ Lock on LOCK_open()
+
RETURN
0 This thread now have exclusive access to this table and no other thread
can access the table until close_thread_tables() is called.
@@ -4066,62 +4125,97 @@ void flush_tables()
*/
bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
- bool return_if_owned_by_thd)
+ uint flags)
{
char key[MAX_DBKEY_LENGTH];
uint key_length;
TABLE *table;
- bool result=0;
+ bool result=0, signalled= 0;
DBUG_ENTER("remove_table_from_cache");
+
key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1;
- for (table=(TABLE*) hash_search(&open_cache,(byte*) key,key_length) ;
- table;
- table = (TABLE*) hash_next(&open_cache,(byte*) key,key_length))
+ for (;;)
{
- THD *in_use;
- table->s->version= 0L; /* Free when thread is ready */
- if (!(in_use=table->in_use))
- {
- DBUG_PRINT("info",("Table was not in use"));
- relink_unused(table);
- }
- else if (in_use != thd)
+ result= signalled= 0;
+
+ for (table=(TABLE*) hash_search(&open_cache,(byte*) key,key_length) ;
+ table;
+ table = (TABLE*) hash_next(&open_cache,(byte*) key,key_length))
{
- in_use->some_tables_deleted=1;
- if (table->db_stat)
- result=1;
- /* Kill delayed insert threads */
- if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
- ! in_use->killed)
+ THD *in_use;
+ table->s->version=0L; /* Free when thread is ready */
+ if (!(in_use=table->in_use))
{
- in_use->killed= THD::KILL_CONNECTION;
- pthread_mutex_lock(&in_use->mysys_var->mutex);
- if (in_use->mysys_var->current_cond)
- {
- pthread_mutex_lock(in_use->mysys_var->current_mutex);
- pthread_cond_broadcast(in_use->mysys_var->current_cond);
- pthread_mutex_unlock(in_use->mysys_var->current_mutex);
- }
- pthread_mutex_unlock(&in_use->mysys_var->mutex);
+ DBUG_PRINT("info",("Table was not in use"));
+ relink_unused(table);
}
- /*
- Now we must abort all tables locks used by this thread
- as the thread may be waiting to get a lock for another table
- */
- for (TABLE *thd_table= in_use->open_tables;
- thd_table ;
- thd_table= thd_table->next)
+ else if (in_use != thd)
{
- if (thd_table->db_stat) // If table is open
- mysql_lock_abort_for_thread(thd, thd_table);
+ in_use->some_tables_deleted=1;
+ if (table->db_stat)
+ result=1;
+ /* Kill delayed insert threads */
+ if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
+ ! in_use->killed)
+ {
+ in_use->killed= THD::KILL_CONNECTION;
+ pthread_mutex_lock(&in_use->mysys_var->mutex);
+ if (in_use->mysys_var->current_cond)
+ {
+ pthread_mutex_lock(in_use->mysys_var->current_mutex);
+ signalled= 1;
+ pthread_cond_broadcast(in_use->mysys_var->current_cond);
+ pthread_mutex_unlock(in_use->mysys_var->current_mutex);
+ }
+ pthread_mutex_unlock(&in_use->mysys_var->mutex);
+ }
+ /*
+ Now we must abort all tables locks used by this thread
+ as the thread may be waiting to get a lock for another table
+ */
+ for (TABLE *thd_table= in_use->open_tables;
+ thd_table ;
+ thd_table= thd_table->next)
+ {
+ if (thd_table->db_stat) // If table is open
+ signalled|= mysql_lock_abort_for_thread(thd, thd_table);
+ }
}
+ else
+ result= result || (flags & RTFC_OWNED_BY_THD_FLAG);
}
- else
- result= result || return_if_owned_by_thd;
+ while (unused_tables && !unused_tables->s->version)
+ VOID(hash_delete(&open_cache,(byte*) unused_tables));
+ if (result && (flags & RTFC_WAIT_OTHER_THREAD_FLAG))
+ {
+ if (!(flags & RTFC_CHECK_KILLED_FLAG) || !thd->killed)
+ {
+ dropping_tables++;
+ if (likely(signalled))
+ (void) pthread_cond_wait(&COND_refresh, &LOCK_open);
+ else
+ {
+ struct timespec abstime;
+ /*
+ It can happen that another thread has opened the
+ table but has not yet locked any table at all. Since
+ it can be locked waiting for a table that our thread
+ has done LOCK TABLE x WRITE on previously, we need to
+ ensure that the thread actually hears our signal
+ before we go to sleep. Thus we wait for a short time
+ and then we retry another loop in the
+ remove_table_from_cache routine.
+ */
+ set_timespec(abstime, 10);
+ pthread_cond_timedwait(&COND_refresh, &LOCK_open, &abstime);
+ }
+ dropping_tables--;
+ continue;
+ }
+ }
+ break;
}
- while (unused_tables && !unused_tables->s->version)
- VOID(hash_delete(&open_cache,(byte*) unused_tables));
DBUG_RETURN(result);
}
@@ -4197,7 +4291,7 @@ open_new_frm(const char *path, const char *alias,
if ((parser= sql_parse_prepare(&pathstr, mem_root, 1)))
{
- if (!strncmp("VIEW", parser->type()->str, parser->type()->length))
+ if (is_equal(&view_type, parser->type()))
{
if (table_desc == 0 || table_desc->required_type == FRMTYPE_TABLE)
{
@@ -4220,3 +4314,9 @@ err:
bzero(outparam, sizeof(TABLE)); // do not run repair
DBUG_RETURN(1);
}
+
+
+bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
+{
+ return a->length == b->length && !strncmp(a->str, b->str, a->length);
+}
diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h
index bc1484b4fb0..0f5b6dcd35e 100644
--- a/sql/sql_bitmap.h
+++ b/sql/sql_bitmap.h
@@ -51,6 +51,14 @@ public:
bitmap_init(&map2, (uchar *)&map2buff, sizeof(ulonglong)*8, 0);
bitmap_intersect(&map, &map2);
}
+ /* Use highest bit for all bits above sizeof(ulonglong)*8. */
+ void intersect_extended(ulonglong map2buff)
+ {
+ intersect(map2buff);
+ if (map.bitmap_size > sizeof(ulonglong))
+ bitmap_set_above(&map, sizeof(ulonglong),
+ test(map2buff & (LL(1) << (sizeof(ulonglong) * 8 - 1))));
+ }
void subtract(Bitmap& map2) { bitmap_subtract(&map, &map2.map); }
void merge(Bitmap& map2) { bitmap_union(&map, &map2.map); }
my_bool is_set(uint n) const { return bitmap_is_set(&map, n); }
@@ -116,6 +124,7 @@ public:
void clear_all() { map=(ulonglong)0; }
void intersect(Bitmap<64>& map2) { map&= map2.map; }
void intersect(ulonglong map2) { map&= map2; }
+ void intersect_extended(ulonglong map2) { map&= map2; }
void subtract(Bitmap<64>& map2) { map&= ~map2.map; }
void merge(Bitmap<64>& map2) { map|= map2.map; }
my_bool is_set(uint n) const { return test(map & (((ulonglong)1) << n)); }
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index e0a15b7d449..15d4f699bc9 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -774,10 +774,11 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
Query_cache_query_flags flags;
// fill all gaps between fields with 0 to get repeatable key
bzero(&flags, QUERY_CACHE_FLAGS_SIZE);
- flags.client_long_flag= (thd->client_capabilities & CLIENT_LONG_FLAG ?
- 1 : 0);
- flags.client_protocol_41= (thd->client_capabilities & CLIENT_PROTOCOL_41 ?
- 1 : 0);
+ flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG);
+ flags.client_protocol_41= test(thd->client_capabilities &
+ CLIENT_PROTOCOL_41);
+ flags.more_results_exists= test(thd->server_status &
+ SERVER_MORE_RESULTS_EXISTS);
flags.character_set_client_num=
thd->variables.character_set_client->number;
flags.character_set_results_num=
@@ -791,6 +792,20 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
flags.sql_mode= thd->variables.sql_mode;
flags.max_sort_length= thd->variables.max_sort_length;
flags.group_concat_max_len= thd->variables.group_concat_max_len;
+ DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, \
+CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
+sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
+ (int)flags.client_long_flag,
+ (int)flags.client_protocol_41,
+ (int)flags.more_results_exists,
+ flags.character_set_client_num,
+ flags.character_set_results_num,
+ flags.collation_connection_num,
+ flags.limit,
+ (ulong)flags.time_zone,
+ flags.sql_mode,
+ flags.max_sort_length,
+ flags.group_concat_max_len));
STRUCT_LOCK(&structure_guard_mutex);
if (query_cache_size == 0)
@@ -973,10 +988,11 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
// fill all gaps between fields with 0 to get repeatable key
bzero(&flags, QUERY_CACHE_FLAGS_SIZE);
- flags.client_long_flag= (thd->client_capabilities & CLIENT_LONG_FLAG ?
- 1 : 0);
- flags.client_protocol_41= (thd->client_capabilities & CLIENT_PROTOCOL_41 ?
- 1 : 0);
+ flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG);
+ flags.client_protocol_41= test(thd->client_capabilities &
+ CLIENT_PROTOCOL_41);
+ flags.more_results_exists= test(thd->server_status &
+ SERVER_MORE_RESULTS_EXISTS);
flags.character_set_client_num= thd->variables.character_set_client->number;
flags.character_set_results_num=
(thd->variables.character_set_results ?
@@ -988,6 +1004,20 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
flags.sql_mode= thd->variables.sql_mode;
flags.max_sort_length= thd->variables.max_sort_length;
flags.group_concat_max_len= thd->variables.group_concat_max_len;
+ DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, \
+CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
+sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
+ (int)flags.client_long_flag,
+ (int)flags.client_protocol_41,
+ (int)flags.more_results_exists,
+ flags.character_set_client_num,
+ flags.character_set_results_num,
+ flags.collation_connection_num,
+ flags.limit,
+ (ulong)flags.time_zone,
+ flags.sql_mode,
+ flags.max_sort_length,
+ flags.group_concat_max_len));
memcpy((void *)(sql + (tot_length - QUERY_CACHE_FLAGS_SIZE)),
&flags, QUERY_CACHE_FLAGS_SIZE);
query_block = (Query_cache_block *) hash_search(&queries, (byte*) sql,
@@ -2051,7 +2081,7 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block,
*/
data_len= len - new_block->length;
prev_block= new_block;
- } while(1);
+ } while (1);
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 16959fdd033..21df4640f3f 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -173,7 +173,8 @@ Open_tables_state::Open_tables_state(ulong version_arg)
THD::THD()
:Statement(CONVENTIONAL_EXECUTION, 0, ALLOC_ROOT_MIN_BLOCK_SIZE, 0),
Open_tables_state(refresh_version),
- user_time(0), global_read_lock(0), is_fatal_error(0),
+ lock_id(&main_lock_id),
+ user_time(0), in_sub_stmt(FALSE), global_read_lock(0), is_fatal_error(0),
rand_used(0), time_zone_used(0),
last_insert_id_used(0), insert_id_used(0), clear_next_insert_id(0),
in_lock_tables(0), bootstrap(0), derived_tables_processing(FALSE),
@@ -265,6 +266,8 @@ THD::THD()
tablespace_op=FALSE;
ulong tmp=sql_rnd_with_mutex();
randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::query_id);
+ thr_lock_info_init(&lock_info); /* safety: will be reset after start */
+ thr_lock_owner_init(&main_lock_id, &lock_info);
}
@@ -406,6 +409,8 @@ THD::~THD()
net_end(&net);
}
#endif
+ stmt_map.destroy(); /* close all prepared statements */
+ DBUG_ASSERT(lock_info.n_cursors == 0);
if (!cleanup_done)
cleanup();
@@ -518,6 +523,11 @@ bool THD::store_globals()
if this is the slave SQL thread.
*/
variables.pseudo_thread_id= thread_id;
+ /*
+ We have to call thr_lock_info_init() again here as THD may have been
+ created in another thread
+ */
+ thr_lock_info_init(&lock_info);
return 0;
}
@@ -1563,6 +1573,12 @@ void Statement::restore_backup_statement(Statement *stmt, Statement *backup)
}
+void Statement::close_cursor()
+{
+ DBUG_ASSERT("Statement::close_cursor()" == "not implemented");
+}
+
+
void THD::end_statement()
{
/* Cleanup SQL processing state to resuse this statement in next query. */
@@ -1683,6 +1699,14 @@ int Statement_map::insert(Statement *statement)
}
+void Statement_map::close_transient_cursors()
+{
+ Statement *stmt;
+ while ((stmt= transient_cursor_list.head()))
+ stmt->close_cursor(); /* deletes itself from the list */
+}
+
+
bool select_dumpvar::send_data(List<Item> &items)
{
List_iterator_fast<Item_func_set_user_var> li(vars);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 12c0f384046..3900b25626f 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -756,7 +756,7 @@ class Cursor;
be used explicitly.
*/
-class Statement: public Query_arena
+class Statement: public ilink, public Query_arena
{
Statement(const Statement &rhs); /* not implemented: */
Statement &operator=(const Statement &rhs); /* non-copyable */
@@ -833,6 +833,8 @@ public:
void restore_backup_statement(Statement *stmt, Statement *backup);
/* return class type */
virtual Type type() const;
+ /* Close the cursor open for this statement, if there is one */
+ virtual void close_cursor();
};
@@ -884,15 +886,25 @@ public:
}
hash_delete(&st_hash, (byte *) statement);
}
+ void add_transient_cursor(Statement *stmt)
+ { transient_cursor_list.append(stmt); }
+ void erase_transient_cursor(Statement *stmt) { stmt->unlink(); }
+ /*
+ Close all cursors of this connection that use tables of a storage
+ engine that has transaction-specific state and therefore can not
+ survive COMMIT or ROLLBACK. Currently all but MyISAM cursors are closed.
+ */
+ void close_transient_cursors();
/* Erase all statements (calls Statement destructor) */
void reset()
{
my_hash_reset(&names_hash);
my_hash_reset(&st_hash);
+ transient_cursor_list.empty();
last_found_statement= 0;
}
- ~Statement_map()
+ void destroy()
{
hash_free(&names_hash);
hash_free(&st_hash);
@@ -900,6 +912,7 @@ public:
private:
HASH st_hash;
HASH names_hash;
+ I_List<Statement> transient_cursor_list;
Statement *last_found_statement;
};
@@ -1023,8 +1036,7 @@ public:
a thread/connection descriptor
*/
-class THD :public ilink,
- public Statement,
+class THD :public Statement,
public Open_tables_state
{
public:
@@ -1050,6 +1062,10 @@ public:
struct rand_struct rand; // used for authentication
struct system_variables variables; // Changeable local variables
struct system_status_var status_var; // Per thread statistic vars
+ THR_LOCK_INFO lock_info; // Locking info of this thread
+ THR_LOCK_OWNER main_lock_id; // To use for conventional queries
+ THR_LOCK_OWNER *lock_id; // If not main_lock_id, points to
+ // the lock_id of a cursor.
pthread_mutex_t LOCK_delete; // Locked before thd is deleted
/* all prepared statements and cursors of this connection */
Statement_map stmt_map;
@@ -1122,6 +1138,10 @@ public:
thr_lock_type update_lock_default;
delayed_insert *di;
my_bool tablespace_op; /* This is TRUE in DISCARD/IMPORT TABLESPACE */
+
+ /* TRUE if we are inside of trigger or stored function. */
+ bool in_sub_stmt;
+
/* container for handler's private per-connection data */
void *ha_data[MAX_HA];
struct st_transactions {
@@ -1129,8 +1149,6 @@ public:
THD_TRANS all; // Trans since BEGIN WORK
THD_TRANS stmt; // Trans for current statement
bool on; // see ha_enable_transaction()
- /* TRUE if we are inside of trigger or stored function. */
- bool in_sub_stmt;
XID xid; // transaction identifier
enum xa_states xa_state; // used by external XA only
/*
@@ -1168,12 +1186,22 @@ public:
This is to track items changed during execution of a prepared
statement/stored procedure. It's created by
register_item_tree_change() in memory root of THD, and freed in
- rollback_item_tree_changes(). For conventional execution it's always 0.
+ rollback_item_tree_changes(). For conventional execution it's always
+ empty.
*/
Item_change_list change_list;
/*
- Current prepared Query_arena if there one, or 0
+ A permanent memory area of the statement. For conventional
+ execution, the parsed tree and execution runtime reside in the same
+ memory root. In this case current_arena points to THD. In case of
+ a prepared statement or a stored procedure statement, thd->mem_root
+ conventionally points to runtime memory, and thd->current_arena
+ points to the memory of the PS/SP, where the parsed tree of the
+ statement resides. Whenever you need to perform a permanent
+ transformation of a parsed tree, you should allocate new memory in
+ current_arena, to allow correct re-execution of PS/SP.
+ Note: in the parser, current_arena == thd, even for PS/SP.
*/
Query_arena *current_arena;
/*
@@ -1462,6 +1490,8 @@ public:
(variables.sql_mode & MODE_STRICT_ALL_TABLES)));
}
void set_status_var_init();
+ bool is_context_analysis_only()
+ { return current_arena->is_stmt_prepare() || lex->view_prepare_mode; }
void reset_n_backup_open_tables_state(Open_tables_state *backup);
void restore_backup_open_tables_state(Open_tables_state *backup);
};
@@ -1687,8 +1717,8 @@ public:
TMP_TABLE_PARAM()
:copy_field(0), group_parts(0),
- group_length(0), group_null_parts(0), convert_blob_length(0),
- schema_table(0)
+ group_length(0), group_null_parts(0), convert_blob_length(0),
+ schema_table(0)
{}
~TMP_TABLE_PARAM()
{
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index fc9d15e94c4..afcf7dbd93f 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -142,7 +142,8 @@ int mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *orig_table_list)
unit->types, (ORDER*) 0,
FALSE, 1,
(first_select->options | thd->options |
- TMP_TABLE_ALL_COLUMNS),
+ TMP_TABLE_ALL_COLUMNS) &
+ ~TMP_TABLE_FORCE_MYISAM,
HA_POS_ERROR,
orig_table_list->alias)))
{
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 5d30cad2926..27342287fcd 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1088,7 +1088,9 @@ ok_or_after_trg_err:
err:
info->last_errno= error;
- thd->lex->current_select->no_error= 0; // Give error
+ /* current_select is NULL if this is a delayed insert */
+ if (thd->lex->current_select)
+ thd->lex->current_select->no_error= 0; // Give error
table->file->print_error(error,MYF(0));
before_trg_err:
@@ -1730,7 +1732,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg)
#endif
if (thd->killed || di->status)
break;
- if (error == ETIME || error == ETIMEDOUT)
+ if (error == ETIMEDOUT)
{
thd->killed= THD::KILL_CONNECTION;
break;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 218410eed81..674d9302c72 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -556,6 +556,15 @@ int yylex(void *arg, void *yythd)
lex->next_state= MY_LEX_START; // Allow signed numbers
if (c == ',')
lex->tok_start=lex->ptr; // Let tok_start point at next item
+ /*
+ Check for a placeholder: it should not precede a possible identifier
+ because of binlogging: when a placeholder is replaced with
+ its value in a query for the binlog, the query must stay
+ grammatically correct.
+ */
+ else if (c == '?' && ((THD*) yythd)->command == COM_STMT_PREPARE &&
+ !ident_map[yyPeek()])
+ return(PARAM_MARKER);
return((int) c);
case MY_LEX_IDENT_OR_NCHAR:
@@ -1999,6 +2008,7 @@ void st_lex::cleanup_after_one_table_open()
time_zone_tables_used= 0;
if (sroutines.records)
my_hash_reset(&sroutines);
+ sroutines_list.empty();
}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 45c8182a29c..4bba0c432c7 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -57,6 +57,7 @@ enum enum_sql_command {
SQLCOM_SHOW_PROCESSLIST, SQLCOM_SHOW_MASTER_STAT, SQLCOM_SHOW_SLAVE_STAT,
SQLCOM_SHOW_GRANTS, SQLCOM_SHOW_CREATE, SQLCOM_SHOW_CHARSETS,
SQLCOM_SHOW_COLLATIONS, SQLCOM_SHOW_CREATE_DB, SQLCOM_SHOW_TABLE_STATUS,
+ SQLCOM_SHOW_TRIGGERS,
SQLCOM_LOAD,SQLCOM_SET_OPTION,SQLCOM_LOCK_TABLES,SQLCOM_UNLOCK_TABLES,
SQLCOM_GRANT,
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 2e5cab4bb1c..c1d4fae5253 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -27,6 +27,7 @@
#include "sp_head.h"
#include "sp.h"
+#include "sp_cache.h"
#ifdef HAVE_OPENSSL
/*
@@ -124,7 +125,7 @@ static bool end_active_trans(THD *thd)
{
int error=0;
DBUG_ENTER("end_active_trans");
- if (unlikely(thd->transaction.in_sub_stmt))
+ if (unlikely(thd->in_sub_stmt))
{
my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0));
DBUG_RETURN(1);
@@ -147,11 +148,7 @@ static bool end_active_trans(THD *thd)
static bool begin_trans(THD *thd)
{
int error=0;
- /*
- QQ: May be it is better to simply prohibit COMMIT and ROLLBACK in
- stored routines as SQL2003 suggests?
- */
- if (unlikely(thd->transaction.in_sub_stmt))
+ if (unlikely(thd->in_sub_stmt))
{
my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0));
return 1;
@@ -193,7 +190,7 @@ static int get_or_create_user_conn(THD *thd, const char *user,
const char *host,
USER_RESOURCES *mqh)
{
- int return_val=0;
+ int return_val= 0;
uint temp_len, user_len;
char temp_user[USERNAME_LENGTH+HOSTNAME_LENGTH+2];
struct user_conn *uc;
@@ -201,7 +198,7 @@ static int get_or_create_user_conn(THD *thd, const char *user,
DBUG_ASSERT(user != 0);
DBUG_ASSERT(host != 0);
- user_len=strlen(user);
+ user_len= strlen(user);
temp_len= (strmov(strmov(temp_user, user)+1, host) - temp_user)+1;
(void) pthread_mutex_lock(&LOCK_user_conn);
if (!(uc = (struct user_conn *) hash_search(&hash_user_connections,
@@ -213,21 +210,21 @@ static int get_or_create_user_conn(THD *thd, const char *user,
MYF(MY_WME)))))
{
net_send_error(thd, 0, NullS); // Out of memory
- return_val=1;
+ return_val= 1;
goto end;
}
uc->user=(char*) (uc+1);
memcpy(uc->user,temp_user,temp_len+1);
uc->host= uc->user + user_len + 1;
- uc->len = temp_len;
+ uc->len= temp_len;
uc->connections= uc->questions= uc->updates= uc->conn_per_hour= 0;
- uc->user_resources=*mqh;
- uc->intime=thd->thr_create_time;
+ uc->user_resources= *mqh;
+ uc->intime= thd->thr_create_time;
if (my_hash_insert(&hash_user_connections, (byte*) uc))
{
my_free((char*) uc,0);
net_send_error(thd, 0, NullS); // Out of memory
- return_val=1;
+ return_val= 1;
goto end;
}
}
@@ -775,29 +772,19 @@ static int check_connection(THD *thd)
return (ER_OUT_OF_RESOURCES);
thd->host_or_ip= thd->ip;
vio_in_addr(net->vio,&thd->remote.sin_addr);
-#if !defined(HAVE_SYS_UN_H) || defined(HAVE_mit_thread)
- /* Fast local hostname resolve for Win32 */
- if (!strcmp(thd->ip,"127.0.0.1"))
- {
- thd->host= (char*) my_localhost;
- thd->host_or_ip= my_localhost;
- }
- else
-#endif
+ if (!(specialflag & SPECIAL_NO_RESOLVE))
{
- if (!(specialflag & SPECIAL_NO_RESOLVE))
+ vio_in_addr(net->vio,&thd->remote.sin_addr);
+ thd->host=ip_to_hostname(&thd->remote.sin_addr,&connect_errors);
+ /* Cut very long hostnames to avoid possible overflows */
+ if (thd->host)
{
- vio_in_addr(net->vio,&thd->remote.sin_addr);
- thd->host=ip_to_hostname(&thd->remote.sin_addr,&connect_errors);
- /* Cut very long hostnames to avoid possible overflows */
- if (thd->host)
- {
- thd->host[min(strlen(thd->host), HOSTNAME_LENGTH)]= 0;
- thd->host_or_ip= thd->host;
- }
- if (connect_errors > max_connect_errors)
- return(ER_HOST_IS_BLOCKED);
+ if (thd->host != my_localhost)
+ thd->host[min(strlen(thd->host), HOSTNAME_LENGTH)]= 0;
+ thd->host_or_ip= thd->host;
}
+ if (connect_errors > max_connect_errors)
+ return(ER_HOST_IS_BLOCKED);
}
DBUG_PRINT("info",("Host: %s ip: %s",
thd->host ? thd->host : "unknown host",
@@ -1350,11 +1337,7 @@ int end_trans(THD *thd, enum enum_mysql_completiontype completion)
int res= 0;
DBUG_ENTER("end_trans");
- /*
- QQ: May be it is better to simply prohibit COMMIT and ROLLBACK in
- stored routines as SQL2003 suggests?
- */
- if (unlikely(thd->transaction.in_sub_stmt))
+ if (unlikely(thd->in_sub_stmt))
{
my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0));
DBUG_RETURN(1);
@@ -2104,6 +2087,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
case SCH_TABLE_NAMES:
case SCH_TABLES:
case SCH_VIEWS:
+ case SCH_TRIGGERS:
#ifdef DONT_ALLOW_SHOW_COMMANDS
my_message(ER_NOT_ALLOWED_COMMAND,
ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */
@@ -2151,7 +2135,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
TABLE_LIST **query_tables_last= lex->query_tables_last;
sel= new SELECT_LEX();
sel->init_query();
- if(!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ,
+ if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ,
(List<String> *) 0, (List<String> *) 0))
DBUG_RETURN(1);
lex->query_tables_last= query_tables_last;
@@ -2304,7 +2288,8 @@ mysql_execute_command(THD *thd)
Don't reset warnings when executing a stored routine.
*/
if ((all_tables || &lex->select_lex != lex->all_selects_list ||
- lex->sroutines.records) && !thd->spcont)
+ lex->sroutines.records) && !thd->spcont ||
+ lex->time_zone_tables_used)
mysql_reset_errors(thd, 0);
#ifdef HAVE_REPLICATION
@@ -2385,10 +2370,12 @@ mysql_execute_command(THD *thd)
select_result *result=lex->result;
if (all_tables)
{
- res= check_table_access(thd,
- lex->exchange ? SELECT_ACL | FILE_ACL :
- SELECT_ACL,
- all_tables, 0);
+ if (lex->orig_sql_command != SQLCOM_SHOW_STATUS_PROC &&
+ lex->orig_sql_command != SQLCOM_SHOW_STATUS_FUNC)
+ res= check_table_access(thd,
+ lex->exchange ? SELECT_ACL | FILE_ACL :
+ SELECT_ACL,
+ all_tables, 0);
}
else
res= check_access(thd,
@@ -3651,6 +3638,7 @@ end_with_restore_list:
if (!(res = mysql_create_function(thd, &lex->udf)))
send_ok(thd);
#else
+ net_printf_error(thd, ER_CANT_OPEN_LIBRARY, lex->udf.dl, 0, "feature disabled");
res= TRUE;
#endif
break;
@@ -4112,6 +4100,12 @@ end_with_restore_list:
delete lex->sphead;
lex->sphead= 0;
goto error;
+ case SP_BODY_TOO_LONG:
+ my_error(ER_TOO_LONG_BODY, MYF(0), name);
+ lex->unit.cleanup();
+ delete lex->sphead;
+ lex->sphead= 0;
+ goto error;
default:
my_error(ER_SP_STORE_FAILED, MYF(0), SP_TYPE_STRING(lex), name);
lex->unit.cleanup();
@@ -4134,9 +4128,8 @@ end_with_restore_list:
goto error;
/*
- By this moment all needed SPs should be in cache so no need
- to look into DB. Moreover we may be unable to do it becuase
- we may don't have read lock on mysql.proc
+ By this moment all needed SPs should be in cache so no need to look
+ into DB.
*/
if (!(sp= sp_find_procedure(thd, lex->spname, TRUE)))
{
@@ -4201,7 +4194,7 @@ end_with_restore_list:
select_limit= thd->variables.select_limit;
thd->variables.select_limit= HA_POS_ERROR;
- thd->row_count_func= 0;
+ thd->row_count_func= 0;
tmp_disable_binlog(thd); /* don't binlog the substatements */
res= sp->execute_procedure(thd, &lex->value_list);
reenable_binlog(thd);
@@ -5093,8 +5086,9 @@ bool check_stack_overrun(THD *thd, long margin,
if ((stack_used=used_stack(thd->thread_stack,(char*) &stack_used)) >=
(long) (thread_stack - margin))
{
- sprintf(errbuff[0],ER(ER_STACK_OVERRUN),stack_used,thread_stack);
- my_message(ER_STACK_OVERRUN,errbuff[0],MYF(0));
+ sprintf(errbuff[0],ER(ER_STACK_OVERRUN_NEED_MORE),
+ stack_used,thread_stack,margin);
+ my_message(ER_STACK_OVERRUN_NEED_MORE,errbuff[0],MYF(0));
thd->fatal_error();
return 1;
}
@@ -5309,6 +5303,8 @@ void create_select_for_variable(const char *var_name)
THD *thd;
LEX *lex;
LEX_STRING tmp, null_lex_string;
+ Item *var;
+ char buff[MAX_SYS_VAR_LENGTH*2+4+8], *end;
DBUG_ENTER("create_select_for_variable");
thd= current_thd;
@@ -5318,8 +5314,14 @@ void create_select_for_variable(const char *var_name)
tmp.str= (char*) var_name;
tmp.length=strlen(var_name);
bzero((char*) &null_lex_string.str, sizeof(null_lex_string));
- add_item_to_list(thd, get_system_var(thd, OPT_SESSION, tmp,
- null_lex_string));
+ /*
+ We set the name of Item to @@session.var_name because that then is used
+ as the column name in the output.
+ */
+ var= get_system_var(thd, OPT_SESSION, tmp, null_lex_string);
+ end= strxmov(buff, "@@session.", var_name, NullS);
+ var->set_name(buff, end-buff, system_charset_info);
+ add_item_to_list(thd, var);
DBUG_VOID_RETURN;
}
@@ -5817,7 +5819,7 @@ new_create_field(THD *thd, char *field_name, enum_field_types type,
new_field->length= 1;
if (new_field->length > MAX_BIT_FIELD_LENGTH)
{
- my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), field_name,
+ my_error(ER_TOO_BIG_DISPLAYWIDTH, MYF(0), field_name,
MAX_BIT_FIELD_LENGTH);
DBUG_RETURN(NULL);
}
@@ -5836,7 +5838,10 @@ new_create_field(THD *thd, char *field_name, enum_field_types type,
type != MYSQL_TYPE_STRING &&
type != MYSQL_TYPE_VARCHAR && type != FIELD_TYPE_GEOMETRY)))
{
- my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0),
+ my_error((type == MYSQL_TYPE_VAR_STRING || type == MYSQL_TYPE_VARCHAR ||
+ type == MYSQL_TYPE_STRING) ? ER_TOO_BIG_FIELDLENGTH :
+ ER_TOO_BIG_DISPLAYWIDTH,
+ MYF(0),
field_name, max_field_charlength); /* purecov: inspected */
DBUG_RETURN(NULL);
}
@@ -6448,6 +6453,23 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
if ((options & REFRESH_READ_LOCK) && thd)
{
/*
+ We must not try to aspire a global read lock if we have a write
+ locked table. This would lead to a deadlock when trying to
+ reopen (and re-lock) the table after the flush.
+ */
+ if (thd->locked_tables)
+ {
+ THR_LOCK_DATA **lock_p= thd->locked_tables->locks;
+ THR_LOCK_DATA **end_p= lock_p + thd->locked_tables->lock_count;
+
+ for (; lock_p < end_p; lock_p++)
+ if ((*lock_p)->type == TL_WRITE)
+ {
+ my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
+ return 1;
+ }
+ }
+ /*
Writing to the binlog could cause deadlocks, as we don't log
UNLOCK TABLES
*/
@@ -6843,7 +6865,7 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables)
/*
Is there tables of subqueries?
*/
- if (&lex->select_lex != lex->all_selects_list)
+ if (&lex->select_lex != lex->all_selects_list || lex->time_zone_tables_used)
{
DBUG_PRINT("info",("Checking sub query list"));
for (table= tables; table; table= table->next_global)
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 413ee486e55..c196bf76dda 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -88,6 +88,7 @@ class Prepared_statement: public Statement
{
public:
THD *thd;
+ Protocol *protocol;
Item_param **param_array;
uint param_count;
uint last_errno;
@@ -313,24 +314,28 @@ static void set_param_int64(Item_param *param, uchar **pos, ulong len)
static void set_param_float(Item_param *param, uchar **pos, ulong len)
{
+ float data;
#ifndef EMBEDDED_LIBRARY
if (len < 4)
return;
-#endif
- float data;
float4get(data,*pos);
+#else
+ floatget(data, *pos);
+#endif
param->set_double((double) data);
*pos+= 4;
}
static void set_param_double(Item_param *param, uchar **pos, ulong len)
{
+ double data;
#ifndef EMBEDDED_LIBRARY
if (len < 8)
return;
-#endif
- double data;
float8get(data,*pos);
+#else
+ doubleget(data, *pos);
+#endif
param->set_double((double) data);
*pos+= 8;
}
@@ -597,10 +602,8 @@ static bool insert_params_withlog(Prepared_statement *stmt, uchar *null_array,
Item_param **begin= stmt->param_array;
Item_param **end= begin + stmt->param_count;
uint32 length= 0;
-
String str;
const String *res;
-
DBUG_ENTER("insert_params_withlog");
if (query->copy(stmt->query, stmt->query_length, default_charset_info))
@@ -1680,10 +1683,12 @@ static bool init_param_array(Prepared_statement *stmt)
static void cleanup_stmt_and_thd_after_use(Statement *stmt, THD *thd)
{
+ DBUG_ENTER("cleanup_stmt_and_thd_after_use");
stmt->lex->unit.cleanup();
cleanup_items(stmt->free_list);
thd->rollback_item_tree_changes();
thd->cleanup_after_query();
+ DBUG_VOID_RETURN;
}
/*
@@ -1753,6 +1758,10 @@ bool mysql_stmt_prepare(THD *thd, char *packet, uint packet_length,
DBUG_RETURN(TRUE);
}
+ /*
+ alloc_query() uses thd->memroot && thd->query, so we have to call
+ both of backup_statement() and backup_item_area() here.
+ */
thd->set_n_backup_statement(stmt, &stmt_backup);
thd->set_n_backup_item_arena(stmt, &stmt_backup);
@@ -1976,7 +1985,8 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_execute")))
DBUG_VOID_RETURN;
- DBUG_PRINT("exec_query:", ("%s", stmt->query));
+ DBUG_PRINT("exec_query", ("%s", stmt->query));
+ DBUG_PRINT("info",("stmt: %p", stmt));
/* Check if we got an error when sending long data */
if (stmt->state == Query_arena::ERROR)
@@ -2012,6 +2022,8 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
DBUG_VOID_RETURN;
/* If lex->result is set, mysql_execute_command will use it */
stmt->lex->result= &cursor->result;
+ stmt->protocol= &cursor->protocol;
+ thd->lock_id= &cursor->lock_id;
}
}
#ifndef EMBEDDED_LIBRARY
@@ -2045,7 +2057,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
}
mysql_log.write(thd, thd->command, "[%lu] %s", stmt->id, thd->query);
- thd->protocol= &thd->protocol_prep; // Switch to binary protocol
+ thd->protocol= stmt->protocol; // Switch to binary protocol
if (!(specialflag & SPECIAL_NO_PRIOR))
my_pthread_setprio(pthread_self(),QUERY_PRIOR);
mysql_execute_command(thd);
@@ -2061,6 +2073,9 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
Cursor::open is buried deep in JOIN::exec of the top level join.
*/
cursor->init_from_thd(thd);
+
+ if (cursor->close_at_commit)
+ thd->stmt_map.add_transient_cursor(stmt);
}
else
{
@@ -2069,7 +2084,12 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
reset_stmt_params(stmt);
}
+ log_slow_statement(thd);
+ /* Prevent from second logging in the end of dispatch_command */
+ thd->enable_slow_log= FALSE;
+
thd->set_statement(&stmt_backup);
+ thd->lock_id= &thd->main_lock_id;
thd->current_arena= thd;
DBUG_VOID_RETURN;
@@ -2114,6 +2134,8 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name)
DBUG_VOID_RETURN;
}
+ DBUG_PRINT("info",("stmt: %p", stmt));
+
/* Must go before setting variables, as it clears thd->user_var_events */
mysql_reset_thd_for_next_command(thd);
thd->set_n_backup_statement(stmt, &stmt_backup);
@@ -2227,7 +2249,7 @@ void mysql_stmt_fetch(THD *thd, char *packet, uint packet_length)
if (!(specialflag & SPECIAL_NO_PRIOR))
my_pthread_setprio(pthread_self(), QUERY_PRIOR);
- thd->protocol= &thd->protocol_prep; // Switch to binary protocol
+ thd->protocol= stmt->protocol; // Switch to binary protocol
cursor->fetch(num_rows);
thd->protocol= &thd->protocol_simple; // Use normal protocol
@@ -2240,10 +2262,12 @@ void mysql_stmt_fetch(THD *thd, char *packet, uint packet_length)
cleanup_stmt_and_thd_after_use(stmt, thd);
reset_stmt_params(stmt);
/*
- Must be the last, as some momory is still needed for
+ Must be the last, as some memory is still needed for
the previous calls.
*/
free_root(cursor->mem_root, MYF(0));
+ if (cursor->close_at_commit)
+ thd->stmt_map.erase_transient_cursor(stmt);
}
thd->restore_backup_statement(stmt, &stmt_backup);
@@ -2283,14 +2307,6 @@ void mysql_stmt_reset(THD *thd, char *packet)
DBUG_VOID_RETURN;
stmt->close_cursor(); /* will reset statement params */
- cursor= stmt->cursor;
- if (cursor && cursor->is_open())
- {
- thd->change_list= cursor->change_list;
- cursor->close(FALSE);
- cleanup_stmt_and_thd_after_use(stmt, thd);
- free_root(cursor->mem_root, MYF(0));
- }
stmt->state= Query_arena::PREPARED;
@@ -2405,6 +2421,7 @@ Prepared_statement::Prepared_statement(THD *thd_arg)
thd_arg->variables.query_alloc_block_size,
thd_arg->variables.query_prealloc_size),
thd(thd_arg),
+ protocol(&thd_arg->protocol_prep),
param_array(0),
param_count(0),
last_errno(0)
@@ -2440,19 +2457,26 @@ void Prepared_statement::setup_set_params()
Prepared_statement::~Prepared_statement()
{
+ DBUG_ENTER("Prepared_statement::~Prepared_statement");
+ DBUG_PRINT("enter",("stmt: %p cursor: %p", this, cursor));
if (cursor)
{
if (cursor->is_open())
{
cursor->close(FALSE);
- free_items();
+ cleanup_items(free_list);
+ thd->rollback_item_tree_changes();
free_root(cursor->mem_root, MYF(0));
}
cursor->Cursor::~Cursor();
}
- else
- free_items();
+ /*
+ We have to call free on the items even if cleanup is called as some items,
+ like Item_param, don't free everything until free_items()
+ */
+ free_items();
delete lex->result;
+ DBUG_VOID_RETURN;
}
@@ -2464,16 +2488,22 @@ Query_arena::Type Prepared_statement::type() const
void Prepared_statement::close_cursor()
{
+ DBUG_ENTER("Prepared_statement::close_cursor");
+ DBUG_PRINT("enter",("stmt: %p", this));
+
if (cursor && cursor->is_open())
{
thd->change_list= cursor->change_list;
cursor->close(FALSE);
cleanup_stmt_and_thd_after_use(this, thd);
free_root(cursor->mem_root, MYF(0));
+ if (cursor->close_at_commit)
+ thd->stmt_map.erase_transient_cursor(this);
}
/*
Clear parameters from data which could be set by
mysql_stmt_send_long_data() call.
*/
reset_stmt_params(this);
+ DBUG_VOID_RETURN;
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 87bf5463ffa..32a8378d41d 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1526,7 +1526,7 @@ bool show_binlogs(THD* thd)
else
{
/* this is an old log, open it and find the size */
- if ((file= my_open(fname+dir_len, O_RDONLY | O_SHARE | O_BINARY,
+ if ((file= my_open(fname, O_RDONLY | O_SHARE | O_BINARY,
MYF(0))) >= 0)
{
file_length= (ulonglong) my_seek(file, 0L, MY_SEEK_END, MYF(0));
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 524fc784422..fb0407e1405 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1016,7 +1016,7 @@ JOIN::optimize()
group_list : (ORDER*) 0),
group_list ? 0 : select_distinct,
group_list && simple_group,
- select_options,
+ select_options & ~TMP_TABLE_FORCE_MYISAM,
(order == 0 || skip_sort_order) ? select_limit :
HA_POS_ERROR,
(char *) "")))
@@ -1198,7 +1198,14 @@ JOIN::exec()
{
result->send_fields(fields_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
- if (!having || having->val_int())
+ /*
+ We have to test for 'conds' here as the WHERE may not be constant
+ even if we don't have any tables for prepared statements or if
+ conds uses something like 'rand()'.
+ */
+ if (cond_value != Item::COND_FALSE &&
+ (!conds || conds->val_int()) &&
+ (!having || having->val_int()))
{
if (do_send_rows && (procedure ? (procedure->send_row(fields_list) ||
procedure->end_of_records())
@@ -1207,14 +1214,18 @@ JOIN::exec()
else
{
error= (int) result->send_eof();
- send_records=1;
+ send_records= ((select_options & OPTION_FOUND_ROWS) ? 1 :
+ thd->sent_row_count);
}
}
else
+ {
error=(int) result->send_eof();
+ send_records= 0;
+ }
}
- /* Single select (without union and limit) always returns 1 row */
- thd->limit_found_rows= 1;
+ /* Single select (without union) always returns 0 or 1 row */
+ thd->limit_found_rows= send_records;
thd->examined_row_count= 0;
DBUG_VOID_RETURN;
}
@@ -1385,7 +1396,8 @@ JOIN::exec()
(ORDER*) 0,
curr_join->select_distinct &&
!curr_join->group_list,
- 1, curr_join->select_options,
+ 1, curr_join->select_options
+ & ~TMP_TABLE_FORCE_MYISAM,
HA_POS_ERROR,
(char *) "")))
DBUG_VOID_RETURN;
@@ -1702,16 +1714,21 @@ JOIN::destroy()
Cursor::Cursor(THD *thd)
:Query_arena(&main_mem_root, INITIALIZED),
- join(0), unit(0)
+ join(0), unit(0),
+ protocol(thd),
+ close_at_commit(FALSE)
{
/* We will overwrite it at open anyway. */
init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
+ thr_lock_owner_init(&lock_id, &thd->lock_info);
+ bzero((void*) ht_info, sizeof(ht_info));
}
void
Cursor::init_from_thd(THD *thd)
{
+ Engine_info *info;
/*
We need to save and reset thd->mem_root, otherwise it'll be freed
later in mysql_parse.
@@ -1739,6 +1756,22 @@ Cursor::init_from_thd(THD *thd)
free_list= thd->free_list;
change_list= thd->change_list;
reset_thd(thd);
+ /* Now we have an active cursor and can cause a deadlock */
+ thd->lock_info.n_cursors++;
+
+ close_at_commit= FALSE; /* reset in case we're reusing the cursor */
+ info= &ht_info[0];
+ for (handlerton **pht= thd->transaction.stmt.ht; *pht; pht++)
+ {
+ const handlerton *ht= *pht;
+ close_at_commit|= test(ht->flags & HTON_CLOSE_CURSORS_AT_COMMIT);
+ if (ht->create_cursor_read_view)
+ {
+ info->ht= ht;
+ info->read_view= (ht->create_cursor_read_view)();
+ ++info;
+ }
+ }
/*
XXX: thd->locked_tables is not changed.
What problems can we have with it if cursor is open?
@@ -1818,6 +1851,7 @@ Cursor::fetch(ulong num_rows)
JOIN_TAB *join_tab= join->join_tab + join->const_tables;
enum_nested_loop_state error= NESTED_LOOP_OK;
Query_arena backup_arena;
+ Engine_info *info;
DBUG_ENTER("Cursor::fetch");
DBUG_PRINT("enter",("rows: %lu", num_rows));
@@ -1832,6 +1866,9 @@ Cursor::fetch(ulong num_rows)
/* save references to memory, allocated during fetch */
thd->set_n_backup_item_arena(this, &backup_arena);
+ for (info= ht_info; info->read_view ; info++)
+ (info->ht->set_cursor_read_view)(info->read_view);
+
join->fetch_limit+= num_rows;
error= sub_select(join, join_tab, 0);
@@ -1848,6 +1885,9 @@ Cursor::fetch(ulong num_rows)
/* Grab free_list here to correctly free it in close */
thd->restore_backup_item_arena(this, &backup_arena);
+ for (info= ht_info; info->read_view; info++)
+ (info->ht->set_cursor_read_view)(0);
+
if (error == NESTED_LOOP_CURSOR_LIMIT)
{
/* Fetch limit worked, possibly more rows are there */
@@ -1888,6 +1928,13 @@ Cursor::close(bool is_active)
else
(void) join->select_lex->cleanup();
+ for (Engine_info *info= ht_info; info->read_view; info++)
+ {
+ (info->ht->close_cursor_read_view)(info->read_view);
+ info->read_view= 0;
+ info->ht= 0;
+ }
+
if (is_active)
close_thread_tables(thd);
else
@@ -1907,6 +1954,7 @@ Cursor::close(bool is_active)
thd->derived_tables= tmp_derived_tables;
thd->lock= tmp_lock;
}
+ thd->lock_info.n_cursors--; /* Decrease the number of active cursors */
join= 0;
unit= 0;
free_items();
@@ -2712,9 +2760,9 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond,
We use null_rejecting in add_not_null_conds() to add
'othertbl.field IS NOT NULL' to tab->select_cond.
*/
- (*key_fields)->null_rejecting= (cond->functype() == Item_func::EQ_FUNC) &&
- ((*value)->type() == Item::FIELD_ITEM) &&
- ((Item_field*)*value)->field->maybe_null();
+ (*key_fields)->null_rejecting= ((cond->functype() == Item_func::EQ_FUNC) &&
+ ((*value)->type() == Item::FIELD_ITEM) &&
+ ((Item_field*)*value)->field->maybe_null());
(*key_fields)++;
}
@@ -2834,11 +2882,11 @@ add_key_fields(KEY_FIELD **key_fields,uint *and_level,
cond_func->arguments()[1]->real_item()->type() == Item::FIELD_ITEM &&
!(cond_func->arguments()[0]->used_tables() & OUTER_REF_TABLE_BIT))
values--;
+ DBUG_ASSERT(cond_func->functype() != Item_func::IN_FUNC ||
+ cond_func->argument_count() != 2);
add_key_equal_fields(key_fields, *and_level, cond_func,
(Item_field*) (cond_func->key_item()->real_item()),
- cond_func->argument_count() == 2 &&
- cond_func->functype() == Item_func::IN_FUNC,
- values,
+ 0, values,
cond_func->argument_count()-1,
usable_tables);
}
@@ -5138,7 +5186,23 @@ inline void add_cond_and_fix(Item **e1, Item *e2)
(where othertbl is a non-const table and othertbl.field may be NULL)
and add them to conditions on correspoding tables (othertbl in this
example).
-
+
+ Exception from that is the case when referred_tab->join != join.
+ I.e. don't add NOT NULL constraints from any embedded subquery.
+ Consider this query:
+ SELECT A.f2 FROM t1 LEFT JOIN t2 A ON A.f2 = f1
+ WHERE A.f3=(SELECT MIN(f3) FROM t2 C WHERE A.f4 = C.f4) OR A.f3 IS NULL;
+ Here condition A.f3 IS NOT NULL is going to be added to the WHERE
+ condition of the embedding query.
+ Another example:
+ SELECT * FROM t10, t11 WHERE (t10.a < 10 OR t10.a IS NULL)
+ AND t11.b <=> t10.b AND (t11.a = (SELECT MAX(a) FROM t12
+ WHERE t12.b = t10.a ));
+ Here condition t10.a IS NOT NULL is going to be added.
+ In both cases addition of NOT NULL condition will erroneously reject
+ some rows of the result set.
+ referred_tab->join != join constraint would disallow such additions.
+
This optimization doesn't affect the choices that ref, range, or join
optimizer make. This was intentional because this was added after 4.1
was GA.
@@ -5166,14 +5230,27 @@ static void add_not_null_conds(JOIN *join)
if (tab->ref.null_rejecting & (1 << keypart))
{
Item *item= tab->ref.items[keypart];
+ Item *notnull;
DBUG_ASSERT(item->type() == Item::FIELD_ITEM);
Item_field *not_null_item= (Item_field*)item;
JOIN_TAB *referred_tab= not_null_item->field->table->reginfo.join_tab;
- Item_func_isnotnull *notnull;
+ /*
+ For UPDATE queries such as:
+ UPDATE t1 SET t1.f2=(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1);
+ not_null_item is the t1.f1, but it's referred_tab is 0.
+ */
+ if (!referred_tab || referred_tab->join != join)
+ continue;
if (!(notnull= new Item_func_isnotnull(not_null_item)))
DBUG_VOID_RETURN;
-
- notnull->quick_fix_field();
+ /*
+ We need to do full fix_fields() call here in order to have correct
+ notnull->const_item(). This is needed e.g. by test_quick_select
+ when it is called from make_join_select after this function is
+ called.
+ */
+ if (notnull->fix_fields(join->thd, &notnull))
+ DBUG_VOID_RETURN;
DBUG_EXECUTE("where",print_where(notnull,
referred_tab->table->alias););
add_cond_and_fix(&referred_tab->select_cond, notnull);
@@ -6691,7 +6768,7 @@ static COND *build_equal_items_for_cond(COND *cond,
of the condition expression.
*/
li.rewind();
- while((item= li++))
+ while ((item= li++))
{
Item *new_item;
if ((new_item = build_equal_items_for_cond(item, inherited))!= item)
@@ -7500,7 +7577,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top)
/* Flatten nested joins that can be flattened. */
li.rewind();
- while((table= li++))
+ while ((table= li++))
{
nested_join= table->nested_join;
if (nested_join && !table->on_expr)
@@ -7869,7 +7946,15 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
item->name, table, item->unsigned_flag);
break;
case STRING_RESULT:
- if (item->max_length > 255 && convert_blob_length)
+ enum enum_field_types type;
+ /*
+ DATE/TIME fields have STRING_RESULT result type. To preserve
+ type they needed to be handled separately.
+ */
+ if ((type= item->field_type()) == MYSQL_TYPE_DATETIME ||
+ type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE)
+ new_field= item->tmp_table_field_from_field_type(table);
+ else if (item->max_length > 255 && convert_blob_length)
new_field= new Field_varstring(convert_blob_length, maybe_null,
item->name, table,
item->collation.collation);
@@ -7959,6 +8044,14 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
bool table_cant_handle_bit_fields,
uint convert_blob_length)
{
+ if (type != Item::FIELD_ITEM &&
+ item->real_item()->type() == Item::FIELD_ITEM &&
+ (item->type() != Item::REF_ITEM ||
+ !((Item_ref *) item)->depended_from))
+ {
+ item= item->real_item();
+ type= Item::FIELD_ITEM;
+ }
switch (type) {
case Item::SUM_FUNC_ITEM:
{
@@ -7972,30 +8065,31 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
case Item::DEFAULT_VALUE_ITEM:
{
Item_field *field= (Item_field*) item;
- if (table_cant_handle_bit_fields && field->field->type() == FIELD_TYPE_BIT)
+ /*
+ If item have to be able to store NULLs but underlaid field can't do it,
+ create_tmp_field_from_field() can't be used for tmp field creation.
+ */
+ if (field->maybe_null && !field->field->maybe_null())
+ {
+ Field *res= create_tmp_field_from_item(thd, item, table, NULL,
+ modify_item, convert_blob_length);
+ *from_field= field->field;
+ if (res && modify_item)
+ ((Item_field*)item)->result_field= res;
+ return res;
+ }
+
+ if (table_cant_handle_bit_fields &&
+ field->field->type() == FIELD_TYPE_BIT)
return create_tmp_field_from_item(thd, item, table, copy_func,
modify_item, convert_blob_length);
return create_tmp_field_from_field(thd, (*from_field= field->field),
item->name, table,
- modify_item ? (Item_field*) item : NULL,
+ modify_item ? (Item_field*) item :
+ NULL,
convert_blob_length);
}
- case Item::REF_ITEM:
- {
- Item *tmp_item;
- if ((tmp_item= item->real_item())->type() == Item::FIELD_ITEM)
- {
- Item_field *field= (Item_field*) tmp_item;
- Field *new_field= create_tmp_field_from_field(thd,
- (*from_field= field->field),
- item->name, table,
- NULL,
- convert_blob_length);
- if (modify_item)
- item->set_result_field(new_field);
- return new_field;
- }
- }
+ /* Fall through */
case Item::FUNC_ITEM:
case Item::COND_ITEM:
case Item::FIELD_AVG_ITEM:
@@ -8007,6 +8101,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
case Item::REAL_ITEM:
case Item::DECIMAL_ITEM:
case Item::STRING_ITEM:
+ case Item::REF_ITEM:
case Item::NULL_ITEM:
case Item::VARBIN_ITEM:
return create_tmp_field_from_item(thd, item, table, copy_func, modify_item,
@@ -8288,7 +8383,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
/* If result table is small; use a heap */
if (blob_count || using_unique_constraint ||
(select_options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) ==
- OPTION_BIG_TABLES)
+ OPTION_BIG_TABLES ||(select_options & TMP_TABLE_FORCE_MYISAM))
{
table->file=get_new_handler(table,table->s->db_type= DB_TYPE_MYISAM);
if (group &&
@@ -9651,7 +9746,13 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
table->file->extra(HA_EXTRA_KEYREAD);
tab->index= tab->ref.key;
}
- if ((error=join_read_const(tab)))
+ error=join_read_const(tab);
+ if (table->key_read)
+ {
+ table->key_read=0;
+ table->file->extra(HA_EXTRA_NO_KEYREAD);
+ }
+ if (error)
{
tab->info="unique row not found";
/* Mark for EXPLAIN that the row was not found */
@@ -9659,11 +9760,6 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
if (!table->maybe_null || error > 0)
DBUG_RETURN(error);
}
- if (table->key_read)
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
}
if (*tab->on_expr_ref && !table->null_row)
{
@@ -9784,6 +9880,11 @@ join_read_always_key(JOIN_TAB *tab)
int error;
TABLE *table= tab->table;
+ for (uint i= 0 ; i < tab->ref.key_parts ; i++)
+ {
+ if ((tab->ref.null_rejecting & 1 << i) && tab->ref.items[i]->is_null())
+ return -1;
+ }
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key);
if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
@@ -10837,13 +10938,13 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
usable_keys.set_all();
for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next)
{
- if ((*tmp_order->item)->type() != Item::FIELD_ITEM)
+ Item *item= (*tmp_order->item)->real_item();
+ if (item->type() != Item::FIELD_ITEM)
{
usable_keys.clear_all();
DBUG_RETURN(0);
}
- usable_keys.intersect(((Item_field*) (*tmp_order->item))->
- field->part_of_sortkey);
+ usable_keys.intersect(((Item_field*) item)->field->part_of_sortkey);
if (usable_keys.is_clear_all())
DBUG_RETURN(0); // No usable keys
}
@@ -12096,7 +12197,6 @@ create_distinct_group(THD *thd, Item **ref_pointer_array,
List_iterator<Item> li(fields);
Item *item;
ORDER *order,*group,**prev;
- uint index= 0;
*all_order_by_fields_used= 1;
while ((item=li++))
@@ -12133,12 +12233,12 @@ create_distinct_group(THD *thd, Item **ref_pointer_array,
simple indexing of ref_pointer_array (order in the array and in the
list are same)
*/
- ord->item= ref_pointer_array + index;
+ ord->item= ref_pointer_array;
ord->asc=1;
*prev=ord;
prev= &ord->next;
}
- index++;
+ ref_pointer_array++;
}
*prev=0;
return group;
@@ -12967,7 +13067,7 @@ static bool change_group_ref(THD *thd, Item_func *expr, ORDER *group_list,
if (item->eq(*group_tmp->item,0))
{
Item *new_item;
- if(!(new_item= new Item_ref(context, group_tmp->item, 0,
+ if (!(new_item= new Item_ref(context, group_tmp->item, 0,
item->name)))
return 1; // fatal_error is set
thd->change_item_tree(arg, new_item);
@@ -13037,7 +13137,7 @@ bool JOIN::rollup_init()
ORDER *group_tmp;
for (group_tmp= group_list; group_tmp; group_tmp= group_tmp->next)
{
- if (item->eq(*group_tmp->item,0))
+ if (*group_tmp->item == item)
item->maybe_null= 1;
}
if (item->type() == Item::FUNC_ITEM)
@@ -13157,7 +13257,7 @@ bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields,
for (group_tmp= start_group, i= pos ;
group_tmp ; group_tmp= group_tmp->next, i++)
{
- if (item->eq(*group_tmp->item,0))
+ if (*group_tmp->item == item)
{
/*
This is an element that is used by the GROUP BY and should be
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 9285e33be33..c950444e1c6 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -389,9 +389,18 @@ class Cursor: public Sql_alloc, public Query_arena
TABLE *derived_tables;
/* List of items created during execution */
query_id_t query_id;
+ struct Engine_info
+ {
+ const handlerton *ht;
+ void *read_view;
+ };
+ Engine_info ht_info[MAX_HA];
public:
+ Protocol_prep protocol;
Item_change_list change_list;
select_send result;
+ THR_LOCK_OWNER lock_id;
+ my_bool close_at_commit;
/* Temporary implementation as now we replace THD state by value */
/* Save THD state into cursor */
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index d3861be721c..1c0519475b4 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -22,6 +22,7 @@
#include "repl_failsafe.h"
#include "sp.h"
#include "sp_head.h"
+#include "sql_trigger.h"
#include <my_dir.h>
#ifdef HAVE_BERKELEY_DB
@@ -1697,6 +1698,7 @@ void get_index_field_values(LEX *lex, INDEX_FIELD_VALUES *index_field_values)
break;
case SQLCOM_SHOW_TABLES:
case SQLCOM_SHOW_TABLE_STATUS:
+ case SQLCOM_SHOW_TRIGGERS:
index_field_values->db_value= lex->current_select->db;
index_field_values->table_value= wild;
break;
@@ -1719,7 +1721,7 @@ int make_table_list(THD *thd, SELECT_LEX *sel,
ident_table.length= strlen(table);
table_ident= new Table_ident(thd, ident_db, ident_table, 1);
sel->init_query();
- if(!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ,
+ if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ,
(List<String> *) 0, (List<String> *) 0))
return 1;
return 0;
@@ -2388,6 +2390,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
{
const char *tmp_buff;
byte *pos;
+ bool is_blob;
uint flags=field->flags;
char tmp[MAX_FIELD_WIDTH];
char tmp1[MAX_FIELD_WIDTH];
@@ -2466,12 +2469,14 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
"NO" : "YES");
table->field[6]->store((const char*) pos,
strlen((const char*) pos), cs);
- if (field->has_charset())
+ is_blob= (field->type() == FIELD_TYPE_BLOB);
+ if (field->has_charset() || is_blob)
{
- table->field[8]->store((longlong) field->field_length/
- field->charset()->mbmaxlen);
+ longlong c_octet_len= is_blob ? (longlong) field->max_length() :
+ (longlong) field->max_length()/field->charset()->mbmaxlen;
+ table->field[8]->store(c_octet_len);
table->field[8]->set_notnull();
- table->field[9]->store((longlong) field->field_length);
+ table->field[9]->store((longlong) field->max_length());
table->field[9]->set_notnull();
}
@@ -2499,6 +2504,17 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
case FIELD_TYPE_LONG:
case FIELD_TYPE_LONGLONG:
case FIELD_TYPE_INT24:
+ {
+ table->field[10]->store((longlong) field->max_length() - 1);
+ table->field[10]->set_notnull();
+ break;
+ }
+ case FIELD_TYPE_BIT:
+ {
+ table->field[10]->store((longlong) field->max_length());
+ table->field[10]->set_notnull();
+ break;
+ }
case FIELD_TYPE_FLOAT:
case FIELD_TYPE_DOUBLE:
{
@@ -2976,6 +2992,86 @@ static int get_schema_constraints_record(THD *thd, struct st_table_list *tables,
}
+static bool store_trigger(THD *thd, TABLE *table, const char *db,
+ const char *tname, LEX_STRING *trigger_name,
+ enum trg_event_type event,
+ enum trg_action_time_type timing,
+ LEX_STRING *trigger_stmt,
+ ulong sql_mode)
+{
+ CHARSET_INFO *cs= system_charset_info;
+ byte *sql_mode_str;
+ ulong sql_mode_len;
+
+ restore_record(table, s->default_values);
+ table->field[1]->store(db, strlen(db), cs);
+ table->field[2]->store(trigger_name->str, trigger_name->length, cs);
+ table->field[3]->store(trg_event_type_names[event].str,
+ trg_event_type_names[event].length, cs);
+ table->field[5]->store(db, strlen(db), cs);
+ table->field[6]->store(tname, strlen(tname), cs);
+ table->field[9]->store(trigger_stmt->str, trigger_stmt->length, cs);
+ table->field[10]->store("ROW", 3, cs);
+ table->field[11]->store(trg_action_time_type_names[timing].str,
+ trg_action_time_type_names[timing].length, cs);
+ table->field[14]->store("OLD", 3, cs);
+ table->field[15]->store("NEW", 3, cs);
+
+ sql_mode_str=
+ sys_var_thd_sql_mode::symbolic_mode_representation(thd,
+ sql_mode,
+ &sql_mode_len);
+ table->field[17]->store((const char*)sql_mode_str, sql_mode_len, cs);
+ return schema_table_store_record(thd, table);
+}
+
+
+static int get_schema_triggers_record(THD *thd, struct st_table_list *tables,
+ TABLE *table, bool res,
+ const char *base_name,
+ const char *file_name)
+{
+ DBUG_ENTER("get_schema_triggers_record");
+ /*
+ res can be non zero value when processed table is a view or
+ error happened during opening of processed table.
+ */
+ if (res)
+ {
+ if (!tables->view)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ thd->net.last_errno, thd->net.last_error);
+ thd->clear_error();
+ DBUG_RETURN(0);
+ }
+ if (!tables->view && tables->table->triggers)
+ {
+ Table_triggers_list *triggers= tables->table->triggers;
+ int event, timing;
+ for (event= 0; event < (int)TRG_EVENT_MAX; event++)
+ {
+ for (timing= 0; timing < (int)TRG_ACTION_MAX; timing++)
+ {
+ LEX_STRING trigger_name;
+ LEX_STRING trigger_stmt;
+ ulong sql_mode;
+ if (triggers->get_trigger_info(thd, (enum trg_event_type) event,
+ (enum trg_action_time_type)timing,
+ &trigger_name, &trigger_stmt,
+ &sql_mode))
+ continue;
+ if (store_trigger(thd, table, base_name, file_name, &trigger_name,
+ (enum trg_event_type) event,
+ (enum trg_action_time_type) timing, &trigger_stmt,
+ sql_mode))
+ DBUG_RETURN(1);
+ }
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
void store_key_column_usage(TABLE *table, const char*db, const char *tname,
const char *key_name, uint key_len,
const char *con_type, uint con_len, longlong idx)
@@ -3236,7 +3332,8 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
if (!(table= create_tmp_table(thd, tmp_table_param,
field_list, (ORDER*) 0, 0, 0,
(select_lex->options | thd->options |
- TMP_TABLE_ALL_COLUMNS),
+ TMP_TABLE_ALL_COLUMNS) &
+ ~TMP_TABLE_FORCE_MYISAM,
HA_POS_ERROR, table_list->alias)))
DBUG_RETURN(0);
table_list->schema_table_param= tmp_table_param;
@@ -3473,9 +3570,8 @@ int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list)
if (table_list->schema_table_reformed) // show command
{
SELECT_LEX *sel= lex->current_select;
- uint i= 0;
Item *item;
- Field_translator *transl;
+ Field_translator *transl, *org_transl;
if (table_list->field_translation)
{
@@ -3496,16 +3592,17 @@ int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list)
{
DBUG_RETURN(1);
}
- while ((item= it++))
+ for (org_transl= transl; (item= it++); transl++)
{
- char *name= item->name;
- transl[i].item= item;
- if (!item->fixed && item->fix_fields(thd, &transl[i].item))
+ transl->item= item;
+ transl->name= item->name;
+ if (!item->fixed && item->fix_fields(thd, &transl->item))
+ {
DBUG_RETURN(1);
- transl[i++].name= name;
+ }
}
- table_list->field_translation= transl;
- table_list->field_translation_end= transl + sel->item_list.elements;
+ table_list->field_translation= org_transl;
+ table_list->field_translation_end= transl;
}
DBUG_RETURN(0);
@@ -3848,6 +3945,30 @@ ST_FIELD_INFO open_tables_fields_info[]=
};
+ST_FIELD_INFO triggers_fields_info[]=
+{
+ {"TRIGGER_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"TRIGGER_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"TRIGGER_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Trigger"},
+ {"EVENT_MANIPULATION", 6, MYSQL_TYPE_STRING, 0, 0, "Event"},
+ {"EVENT_OBJECT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"EVENT_OBJECT_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"EVENT_OBJECT_TABLE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"},
+ {"ACTION_ORDER", 4, MYSQL_TYPE_LONG, 0, 0, 0},
+ {"ACTION_CONDITION", 65535, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"ACTION_STATEMENT", 65535, MYSQL_TYPE_STRING, 0, 0, "Statement"},
+ {"ACTION_ORIENTATION", 9, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"ACTION_TIMING", 6, MYSQL_TYPE_STRING, 0, 0, "Timing"},
+ {"ACTION_REFERENCE_OLD_TABLE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"ACTION_REFERENCE_NEW_TABLE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0},
+ {"ACTION_REFERENCE_OLD_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"ACTION_REFERENCE_NEW_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0},
+ {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Created"},
+ {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, "sql_mode"},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
+};
+
+
ST_FIELD_INFO variables_fields_info[]=
{
{"Variable_name", 80, MYSQL_TYPE_STRING, 0, 0, "Variable_name"},
@@ -3862,44 +3983,46 @@ ST_FIELD_INFO variables_fields_info[]=
ST_SCHEMA_TABLE schema_tables[]=
{
- {"SCHEMATA", schema_fields_info, create_schema_table,
- fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0},
- {"TABLES", tables_fields_info, create_schema_table,
- get_all_tables, make_old_format, get_schema_tables_record, 1, 2, 0},
- {"COLUMNS", columns_fields_info, create_schema_table,
- get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2, 0},
{"CHARACTER_SETS", charsets_fields_info, create_schema_table,
fill_schema_charsets, make_character_sets_old_format, 0, -1, -1, 0},
{"COLLATIONS", collation_fields_info, create_schema_table,
fill_schema_collation, make_old_format, 0, -1, -1, 0},
{"COLLATION_CHARACTER_SET_APPLICABILITY", coll_charset_app_fields_info,
create_schema_table, fill_schema_coll_charset_app, 0, 0, -1, -1, 0},
- {"ROUTINES", proc_fields_info, create_schema_table,
- fill_schema_proc, make_proc_old_format, 0, -1, -1, 0},
- {"STATISTICS", stat_fields_info, create_schema_table,
- get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0},
- {"VIEWS", view_fields_info, create_schema_table,
- get_all_tables, 0, get_schema_views_record, 1, 2, 0},
- {"USER_PRIVILEGES", user_privileges_fields_info, create_schema_table,
- fill_schema_user_privileges, 0, 0, -1, -1, 0},
- {"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table,
- fill_schema_schema_privileges, 0, 0, -1, -1, 0},
- {"TABLE_PRIVILEGES", table_privileges_fields_info, create_schema_table,
- fill_schema_table_privileges, 0, 0, -1, -1, 0},
+ {"COLUMNS", columns_fields_info, create_schema_table,
+ get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2, 0},
{"COLUMN_PRIVILEGES", column_privileges_fields_info, create_schema_table,
fill_schema_column_privileges, 0, 0, -1, -1, 0},
- {"TABLE_CONSTRAINTS", table_constraints_fields_info, create_schema_table,
- get_all_tables, 0, get_schema_constraints_record, 3, 4, 0},
{"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table,
get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0},
- {"TABLE_NAMES", table_names_fields_info, create_schema_table,
- get_all_tables, make_table_names_old_format, 0, 1, 2, 1},
{"OPEN_TABLES", open_tables_fields_info, create_schema_table,
fill_open_tables, make_old_format, 0, -1, -1, 1},
+ {"ROUTINES", proc_fields_info, create_schema_table,
+ fill_schema_proc, make_proc_old_format, 0, -1, -1, 0},
+ {"SCHEMATA", schema_fields_info, create_schema_table,
+ fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0},
+ {"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table,
+ fill_schema_schema_privileges, 0, 0, -1, -1, 0},
+ {"STATISTICS", stat_fields_info, create_schema_table,
+ get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0},
{"STATUS", variables_fields_info, create_schema_table, fill_status,
make_old_format, 0, -1, -1, 1},
+ {"TABLES", tables_fields_info, create_schema_table,
+ get_all_tables, make_old_format, get_schema_tables_record, 1, 2, 0},
+ {"TABLE_CONSTRAINTS", table_constraints_fields_info, create_schema_table,
+ get_all_tables, 0, get_schema_constraints_record, 3, 4, 0},
+ {"TABLE_NAMES", table_names_fields_info, create_schema_table,
+ get_all_tables, make_table_names_old_format, 0, 1, 2, 1},
+ {"TABLE_PRIVILEGES", table_privileges_fields_info, create_schema_table,
+ fill_schema_table_privileges, 0, 0, -1, -1, 0},
+ {"TRIGGERS", triggers_fields_info, create_schema_table,
+ get_all_tables, make_old_format, get_schema_triggers_record, 5, 6, 0},
{"VARIABLES", variables_fields_info, create_schema_table, fill_variables,
make_old_format, 0, -1, -1, 1},
+ {"VIEWS", view_fields_info, create_schema_table,
+ get_all_tables, 0, get_schema_views_record, 1, 2, 0},
+ {"USER_PRIVILEGES", user_privileges_fields_info, create_schema_table,
+ fill_schema_user_privileges, 0, 0, -1, -1, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0}
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 89282d9fcb9..8874a70327e 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -23,6 +23,8 @@
#include <hash.h>
#include <myisam.h>
#include <my_dir.h>
+#include "sp_head.h"
+#include "sql_trigger.h"
#ifdef __WIN__
#include <io.h>
@@ -237,15 +239,11 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
error=0;
if (!drop_temporary)
{
- abort_locked_tables(thd,db,table->table_name);
- while (remove_table_from_cache(thd, db, table->table_name, 0) &&
- !thd->killed)
- {
- dropping_tables++;
- (void) pthread_cond_wait(&COND_refresh,&LOCK_open);
- dropping_tables--;
- }
- drop_locked_tables(thd,db,table->table_name);
+ abort_locked_tables(thd, db, table->table_name);
+ remove_table_from_cache(thd, db, table->table_name,
+ RTFC_WAIT_OTHER_THREAD_FLAG |
+ RTFC_CHECK_KILLED_FLAG);
+ drop_locked_tables(thd, db, table->table_name);
if (thd->killed)
{
thd->no_warnings_for_error= 0;
@@ -290,16 +288,8 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
if (!(new_error=my_delete(path,MYF(MY_WME))))
{
some_tables_deleted=1;
- /*
- Destroy triggers for this table if there are any.
-
- We won't need this as soon as we will have new .FRM format,
- in which we will store trigger definitions in the same .FRM
- files as table descriptions.
- */
- strmov(end, triggers_file_ext);
- if (!access(path, F_OK))
- new_error= my_delete(path, MYF(MY_WME));
+ new_error= Table_triggers_list::drop_all_triggers(thd, db,
+ table->table_name);
}
error|= new_error;
}
@@ -1630,12 +1620,10 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
create_info->data_file_name= create_info->index_file_name= 0;
create_info->table_options=db_options;
- if (rea_create_table(thd, path, create_info, fields, key_count,
+ if (rea_create_table(thd, path, db, table_name,
+ create_info, fields, key_count,
key_info_buffer))
- {
- /* my_error(ER_CANT_CREATE_TABLE,MYF(0),table_name,my_errno); */
goto end;
- }
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
{
/* Open table and put in temporary table list */
@@ -1876,12 +1864,8 @@ static void wait_while_table_is_used(THD *thd,TABLE *table,
mysql_lock_abort(thd, table); // end threads waiting on lock
/* Wait until all there are no other threads that has this table open */
- while (remove_table_from_cache(thd, table->s->db, table->s->table_name, 0))
- {
- dropping_tables++;
- (void) pthread_cond_wait(&COND_refresh,&LOCK_open);
- dropping_tables--;
- }
+ remove_table_from_cache(thd, table->s->db,
+ table->s->table_name, RTFC_WAIT_OTHER_THREAD_FLAG);
DBUG_VOID_RETURN;
}
@@ -2268,14 +2252,10 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open,
"Waiting to get writelock");
mysql_lock_abort(thd,table->table);
- while (remove_table_from_cache(thd, table->table->s->db,
- table->table->s->table_name, 0) &&
- ! thd->killed)
- {
- dropping_tables++;
- (void) pthread_cond_wait(&COND_refresh,&LOCK_open);
- dropping_tables--;
- }
+ remove_table_from_cache(thd, table->table->s->db,
+ table->table->s->table_name,
+ RTFC_WAIT_OTHER_THREAD_FLAG |
+ RTFC_CHECK_KILLED_FLAG);
thd->exit_cond(old_message);
if (thd->killed)
goto err;
@@ -2420,7 +2400,7 @@ send_result_message:
{
pthread_mutex_lock(&LOCK_open);
remove_table_from_cache(thd, table->table->s->db,
- table->table->s->table_name, 0);
+ table->table->s->table_name, RTFC_NO_FLAG);
pthread_mutex_unlock(&LOCK_open);
/* May be something modified consequently we have to invalidate cache */
query_cache_invalidate3(thd, table->table, 0);
@@ -2649,6 +2629,15 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
}
}
+ /*
+ create like should be not allowed for Views, Triggers, ...
+ */
+ if (mysql_frm_type(src_path) != FRMTYPE_TABLE)
+ {
+ my_error(ER_WRONG_OBJECT, MYF(0), src_db, src_table, "BASE TABLE");
+ goto err;
+ }
+
/*
Validate the destination table
@@ -2678,8 +2667,14 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
/*
Create a new table by copying from source table
*/
- if (my_copy(src_path, dst_path, MYF(MY_WME|MY_DONT_OVERWRITE_FILE)))
+ if (my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE)))
+ {
+ if (my_errno == ENOENT)
+ my_error(ER_BAD_DB_ERROR,MYF(0),db);
+ else
+ my_error(ER_CANT_CREATE_FILE,MYF(0),dst_path,my_errno);
goto err;
+ }
/*
As mysql_truncate don't work on a new table at this stage of
@@ -3747,8 +3742,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (table)
{
VOID(table->file->extra(HA_EXTRA_FORCE_REOPEN)); // Use new file
- remove_table_from_cache(thd,db,table_name, 0); // Mark in-use copies old
- mysql_lock_abort(thd,table); // end threads waiting on lock
+ /* Mark in-use copies old */
+ remove_table_from_cache(thd,db,table_name,RTFC_NO_FLAG);
+ /* end threads waiting on lock */
+ mysql_lock_abort(thd,table);
}
VOID(quick_rm_table(old_db_type,db,old_name));
if (close_data_tables(thd,db,table_name) ||
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index fd79fc8b878..09eeff02de6 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -32,13 +32,56 @@ const char * const triggers_file_ext= ".TRG";
*/
static File_option triggers_file_parameters[]=
{
- {{(char*)"triggers", 8}, offsetof(class Table_triggers_list, definitions_list),
- FILE_OPTIONS_STRLIST},
+ {{(char*)"triggers", 8},
+ offsetof(class Table_triggers_list, definitions_list),
+ FILE_OPTIONS_STRLIST},
+ {{(char*)"sql_modes", 13},
+ offsetof(class Table_triggers_list, definition_modes_list),
+ FILE_OPTIONS_ULLLIST},
{{0, 0}, 0, FILE_OPTIONS_STRING}
};
/*
+ Structure representing contents of .TRN file which are used to support
+ database wide trigger namespace.
+*/
+
+struct st_trigname
+{
+ LEX_STRING trigger_table;
+};
+
+static const LEX_STRING trigname_file_type= {(char *)"TRIGGERNAME", 11};
+
+const char * const trigname_file_ext= ".TRN";
+
+static File_option trigname_file_parameters[]=
+{
+ {{(char*)"trigger_table", 15}, offsetof(struct st_trigname, trigger_table),
+ FILE_OPTIONS_ESTRING},
+ {{0, 0}, 0, FILE_OPTIONS_STRING}
+};
+
+
+const LEX_STRING trg_action_time_type_names[]=
+{
+ { (char *) STRING_WITH_LEN("BEFORE") },
+ { (char *) STRING_WITH_LEN("AFTER") }
+};
+
+const LEX_STRING trg_event_type_names[]=
+{
+ { (char *) STRING_WITH_LEN("INSERT") },
+ { (char *) STRING_WITH_LEN("UPDATE") },
+ { (char *) STRING_WITH_LEN("DELETE") }
+};
+
+
+static TABLE_LIST *add_table_for_trigger(THD *thd, sp_name *trig);
+
+
+/*
Create or drop trigger for table.
SYNOPSIS
@@ -69,6 +112,10 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
But do we want this ?
*/
+ if (!create &&
+ !(tables= add_table_for_trigger(thd, thd->lex->spname)))
+ DBUG_RETURN(TRUE);
+
/* We should have only one table in table list. */
DBUG_ASSERT(tables->next_global == 0);
@@ -84,12 +131,13 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
DBUG_RETURN(TRUE);
/*
- We do not allow creation of triggers on views or temporary tables.
- We have to do this check here and not in
- Table_triggers_list::create_trigger() because we want to avoid messing
- with table cash for views and temporary tables.
+ We do not allow creation of triggers on temporary tables. We also don't
+ allow creation of triggers on views but fulfilment of this restriction
+ is guaranteed by open_ltable(). It is better to have this check here
+ than do it in Table_triggers_list::create_trigger() and mess with table
+ cache.
*/
- if (tables->view || table->s->tmp_table != NO_TMP_TABLE)
+ if (table->s->tmp_table != NO_TMP_TABLE)
{
my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias);
DBUG_RETURN(TRUE);
@@ -174,28 +222,29 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables)
{
LEX *lex= thd->lex;
TABLE *table= tables->table;
- char dir_buff[FN_REFLEN], file_buff[FN_REFLEN];
- LEX_STRING dir, file;
+ char dir_buff[FN_REFLEN], file_buff[FN_REFLEN], trigname_buff[FN_REFLEN],
+ trigname_path[FN_REFLEN];
+ LEX_STRING dir, file, trigname_file;
LEX_STRING *trg_def, *name;
+ ulonglong *trg_sql_mode;
Item_trigger_field *trg_field;
- List_iterator_fast<LEX_STRING> it(names_list);
+ struct st_trigname trigname;
- /* We don't allow creation of several triggers of the same type yet */
- if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time])
+
+ /* Trigger must be in the same schema as target table. */
+ if (my_strcasecmp(table_alias_charset, table->s->db,
+ lex->spname->m_db.str ? lex->spname->m_db.str :
+ thd->db))
{
- my_message(ER_TRG_ALREADY_EXISTS, ER(ER_TRG_ALREADY_EXISTS), MYF(0));
+ my_error(ER_TRG_IN_WRONG_SCHEMA, MYF(0));
return 1;
}
- /* Let us check if trigger with the same name exists */
- while ((name= it++))
+ /* We don't allow creation of several triggers of the same type yet */
+ if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time])
{
- if (my_strcasecmp(system_charset_info, lex->ident.str,
- name->str) == 0)
- {
- my_message(ER_TRG_ALREADY_EXISTS, ER(ER_TRG_ALREADY_EXISTS), MYF(0));
- return 1;
- }
+ my_message(ER_TRG_ALREADY_EXISTS, ER(ER_TRG_ALREADY_EXISTS), MYF(0));
+ return 1;
}
/*
@@ -234,6 +283,25 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables)
file.length= strxnmov(file_buff, FN_REFLEN, tables->table_name,
triggers_file_ext, NullS) - file_buff;
file.str= file_buff;
+ trigname_file.length= strxnmov(trigname_buff, FN_REFLEN,
+ lex->spname->m_name.str,
+ trigname_file_ext, NullS) - trigname_buff;
+ trigname_file.str= trigname_buff;
+ strxnmov(trigname_path, FN_REFLEN, dir_buff, trigname_buff, NullS);
+
+ /* Use the filesystem to enforce trigger namespace constraints. */
+ if (!access(trigname_path, F_OK))
+ {
+ my_error(ER_TRG_ALREADY_EXISTS, MYF(0));
+ return 1;
+ }
+
+ trigname.trigger_table.str= tables->table_name;
+ trigname.trigger_table.length= tables->table_name_length;
+
+ if (sql_create_definition_file(&dir, &trigname_file, &trigname_file_type,
+ (gptr)&trigname, trigname_file_parameters, 0))
+ return 1;
/*
Soon we will invalidate table object and thus Table_triggers_list object
@@ -245,14 +313,71 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables)
*/
if (!(trg_def= (LEX_STRING *)alloc_root(&table->mem_root,
sizeof(LEX_STRING))) ||
- definitions_list.push_back(trg_def, &table->mem_root))
- return 1;
+ definitions_list.push_back(trg_def, &table->mem_root) ||
+ !(trg_sql_mode= (ulonglong*)alloc_root(&table->mem_root,
+ sizeof(ulonglong))) ||
+ definition_modes_list.push_back(trg_sql_mode, &table->mem_root))
+ goto err_with_cleanup;
trg_def->str= thd->query;
trg_def->length= thd->query_length;
+ *trg_sql_mode= thd->variables.sql_mode;
+
+ if (!sql_create_definition_file(&dir, &file, &triggers_file_type,
+ (gptr)this, triggers_file_parameters, 3))
+ return 0;
+
+err_with_cleanup:
+ my_delete(trigname_path, MYF(MY_WME));
+ return 1;
+}
- return sql_create_definition_file(&dir, &file, &triggers_file_type,
- (gptr)this, triggers_file_parameters, 3);
+
+/*
+ Deletes the .TRG file for a table
+
+ SYNOPSIS
+ rm_trigger_file()
+ path - char buffer of size FN_REFLEN to be used
+ for constructing path to .TRG file.
+ db - table's database name
+ table_name - table's name
+
+ RETURN VALUE
+ False - success
+ True - error
+*/
+
+static bool rm_trigger_file(char *path, char *db, char *table_name)
+{
+ strxnmov(path, FN_REFLEN, mysql_data_home, "/", db, "/", table_name,
+ triggers_file_ext, NullS);
+ unpack_filename(path, path);
+ return my_delete(path, MYF(MY_WME));
+}
+
+
+/*
+ Deletes the .TRN file for a trigger
+
+ SYNOPSIS
+ rm_trigname_file()
+ path - char buffer of size FN_REFLEN to be used
+ for constructing path to .TRN file.
+ db - trigger's database name
+ table_name - trigger's name
+
+ RETURN VALUE
+ False - success
+ True - error
+*/
+
+static bool rm_trigname_file(char *path, char *db, char *trigger_name)
+{
+ strxnmov(path, FN_REFLEN, mysql_data_home, "/", db, "/", trigger_name,
+ trigname_file_ext, NullS);
+ unpack_filename(path, path);
+ return my_delete(path, MYF(MY_WME));
}
@@ -275,12 +400,15 @@ bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables)
LEX_STRING *name;
List_iterator_fast<LEX_STRING> it_name(names_list);
List_iterator<LEX_STRING> it_def(definitions_list);
+ List_iterator<ulonglong> it_mod(definition_modes_list);
+ char path[FN_REFLEN];
while ((name= it_name++))
{
it_def++;
+ it_mod++;
- if (my_strcasecmp(system_charset_info, lex->ident.str,
+ if (my_strcasecmp(table_alias_charset, lex->spname->m_name.str,
name->str) == 0)
{
/*
@@ -288,21 +416,18 @@ bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables)
clean trigger removing since table will be reopened anyway.
*/
it_def.remove();
+ it_mod.remove();
if (definitions_list.is_empty())
{
- char path[FN_REFLEN];
-
/*
TODO: Probably instead of removing .TRG file we should move
to archive directory but this should be done as part of
parse_file.cc functionality (because we will need it
elsewhere).
*/
- strxnmov(path, FN_REFLEN, mysql_data_home, "/", tables->db, "/",
- tables->table_name, triggers_file_ext, NullS);
- unpack_filename(path, path);
- return my_delete(path, MYF(MY_WME));
+ if (rm_trigger_file(path, tables->db, tables->table_name))
+ return 1;
}
else
{
@@ -317,10 +442,15 @@ bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables)
triggers_file_ext, NullS) - file_buff;
file.str= file_buff;
- return sql_create_definition_file(&dir, &file, &triggers_file_type,
- (gptr)this,
- triggers_file_parameters, 3);
+ if (sql_create_definition_file(&dir, &file, &triggers_file_type,
+ (gptr)this, triggers_file_parameters,
+ 3))
+ return 1;
}
+
+ if (rm_trigname_file(path, tables->db, lex->spname->m_name.str))
+ return 1;
+ return 0;
}
}
@@ -331,8 +461,8 @@ bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables)
Table_triggers_list::~Table_triggers_list()
{
- for (int i= 0; i < 3; i++)
- for (int j= 0; j < 2; j++)
+ for (int i= 0; i < (int)TRG_EVENT_MAX; i++)
+ for (int j= 0; j < (int)TRG_ACTION_MAX; j++)
delete bodies[i][j];
if (record1_field)
@@ -389,17 +519,21 @@ bool Table_triggers_list::prepare_record1_accessors(TABLE *table)
db - table's database name
table_name - table's name
table - pointer to table object
+ names_only - stop after loading trigger names
RETURN VALUE
False - success
True - error
*/
+
bool Table_triggers_list::check_n_load(THD *thd, const char *db,
- const char *table_name, TABLE *table)
+ const char *table_name, TABLE *table,
+ bool names_only)
{
char path_buff[FN_REFLEN];
LEX_STRING path;
File_parser *parser;
+ LEX_STRING save_db;
DBUG_ENTER("Table_triggers_list::check_n_load");
@@ -420,8 +554,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
if ((parser= sql_parse_prepare(&path, &table->mem_root, 1)))
{
- if (!strncmp(triggers_file_type.str, parser->type()->str,
- parser->type()->length))
+ if (is_equal(&triggers_file_type, parser->type()))
{
Table_triggers_list *triggers=
new (&table->mem_root) Table_triggers_list(table);
@@ -429,10 +562,48 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
if (!triggers)
DBUG_RETURN(1);
+ /*
+ We don't have sql_modes in old versions of .TRG file, so we should
+ initialize list for safety.
+ */
+ triggers->definition_modes_list.empty();
+
if (parser->parse((gptr)triggers, &table->mem_root,
- triggers_file_parameters, 1))
+ triggers_file_parameters, 2))
DBUG_RETURN(1);
+ List_iterator_fast<LEX_STRING> it(triggers->definitions_list);
+ LEX_STRING *trg_create_str, *trg_name_str;
+ ulonglong *trg_sql_mode;
+
+ if (triggers->definition_modes_list.is_empty() &&
+ !triggers->definitions_list.is_empty())
+ {
+ /*
+ It is old file format => we should fill list of sql_modes.
+
+ We use one mode (current) for all triggers, because we have not
+ information about mode in old format.
+ */
+ if (!(trg_sql_mode= (ulonglong*)alloc_root(&table->mem_root,
+ sizeof(ulonglong))))
+ {
+ DBUG_RETURN(1); // EOM
+ }
+ *trg_sql_mode= global_system_variables.sql_mode;
+ while ((trg_create_str= it++))
+ {
+ if (triggers->definition_modes_list.push_back(trg_sql_mode,
+ &table->mem_root))
+ {
+ DBUG_RETURN(1); // EOM
+ }
+ }
+ it.rewind();
+ }
+
+ DBUG_ASSERT(triggers->definition_modes_list.elements ==
+ triggers->definitions_list.elements);
table->triggers= triggers;
/*
@@ -444,25 +615,30 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
alloc_root(&table->mem_root, triggers->sroutines_key.length)))
DBUG_RETURN(1);
triggers->sroutines_key.str[0]= TYPE_ENUM_TRIGGER;
- strmov(strmov(strmov(triggers->sroutines_key.str+1, db), "."),
- table_name);
+ strxmov(triggers->sroutines_key.str+1, db, ".", table_name, NullS);
/*
TODO: This could be avoided if there is no triggers
for UPDATE and DELETE.
*/
- if (triggers->prepare_record1_accessors(table))
+ if (!names_only && triggers->prepare_record1_accessors(table))
DBUG_RETURN(1);
- List_iterator_fast<LEX_STRING> it(triggers->definitions_list);
- LEX_STRING *trg_create_str, *trg_name_str;
char *trg_name_buff;
+ List_iterator_fast<ulonglong> itm(triggers->definition_modes_list);
LEX *old_lex= thd->lex, lex;
+ ulong save_sql_mode= thd->variables.sql_mode;
thd->lex= &lex;
+ save_db.str= thd->db;
+ save_db.length= thd->db_length;
+ thd->db_length= strlen(db);
+ thd->db= (char *) db;
while ((trg_create_str= it++))
{
+ trg_sql_mode= itm++;
+ thd->variables.sql_mode= (ulong)*trg_sql_mode;
lex_start(thd, (uchar*)trg_create_str->str, trg_create_str->length);
if (yyparse((void *)thd) || thd->is_fatal_error)
@@ -471,40 +647,31 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
Free lex associated resources
QQ: Do we really need all this stuff here ?
*/
- if (lex.sphead)
- {
- delete lex.sphead;
- lex.sphead= 0;
- }
+ delete lex.sphead;
goto err_with_lex_cleanup;
}
+ lex.sphead->m_sql_mode= *trg_sql_mode;
triggers->bodies[lex.trg_chistics.event]
[lex.trg_chistics.action_time]= lex.sphead;
- lex.sphead= 0;
-
- if (!(trg_name_buff= alloc_root(&table->mem_root,
- sizeof(LEX_STRING) +
- lex.ident.length + 1)))
- goto err_with_lex_cleanup;
-
- trg_name_str= (LEX_STRING *)trg_name_buff;
- trg_name_buff+= sizeof(LEX_STRING);
- memcpy(trg_name_buff, lex.ident.str,
- lex.ident.length + 1);
- trg_name_str->str= trg_name_buff;
- trg_name_str->length= lex.ident.length;
+ if (triggers->names_list.push_back(&lex.sphead->m_name,
+ &table->mem_root))
+ goto err_with_lex_cleanup;
- if (triggers->names_list.push_back(trg_name_str, &table->mem_root))
- goto err_with_lex_cleanup;
+ if (names_only)
+ {
+ lex_end(&lex);
+ continue;
+ }
/*
Let us bind Item_trigger_field objects representing access to fields
in old/new versions of row in trigger to Field objects in table being
opened.
- We ignore errors here, because if even something is wrong we still will
- be willing to open table to perform some operations (e.g. SELECT)...
+ We ignore errors here, because if even something is wrong we still
+ will be willing to open table to perform some operations (e.g.
+ SELECT)...
Anyway some things can be checked only during trigger execution.
*/
for (Item_trigger_field *trg_field=
@@ -515,7 +682,10 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
lex_end(&lex);
}
+ thd->db= save_db.str;
+ thd->db_length= save_db.length;
thd->lex= old_lex;
+ thd->variables.sql_mode= save_sql_mode;
DBUG_RETURN(0);
@@ -523,6 +693,9 @@ err_with_lex_cleanup:
// QQ: anything else ?
lex_end(&lex);
thd->lex= old_lex;
+ thd->variables.sql_mode= save_sql_mode;
+ thd->db= save_db.str;
+ thd->db_length= save_db.length;
DBUG_RETURN(1);
}
@@ -531,9 +704,168 @@ err_with_lex_cleanup:
be merged into .FRM anyway.
*/
my_error(ER_WRONG_OBJECT, MYF(0),
- table_name, triggers_file_ext, "TRIGGER");
+ table_name, triggers_file_ext+1, "TRIGGER");
DBUG_RETURN(1);
}
DBUG_RETURN(1);
}
+
+
+/*
+ Obtains and returns trigger metadata
+
+ SYNOPSIS
+ get_trigger_info()
+ thd - current thread context
+ event - trigger event type
+ time_type - trigger action time
+ name - returns name of trigger
+ stmt - returns statement of trigger
+ sql_mode - returns sql_mode of trigger
+
+ RETURN VALUE
+ False - success
+ True - error
+*/
+
+bool Table_triggers_list::get_trigger_info(THD *thd, trg_event_type event,
+ trg_action_time_type time_type,
+ LEX_STRING *trigger_name,
+ LEX_STRING *trigger_stmt,
+ ulong *sql_mode)
+{
+ sp_head *body;
+ DBUG_ENTER("get_trigger_info");
+ if ((body= bodies[event][time_type]))
+ {
+ *trigger_name= body->m_name;
+ *trigger_stmt= body->m_body;
+ *sql_mode= body->m_sql_mode;
+ DBUG_RETURN(0);
+ }
+ DBUG_RETURN(1);
+}
+
+
+/*
+ Find trigger's table from trigger identifier and add it to
+ the statement table list.
+
+ SYNOPSIS
+ mysql_table_for_trigger()
+ thd - current thread context
+ trig - identifier for trigger
+
+ RETURN VALUE
+ 0 - error
+ # - pointer to TABLE_LIST object for the table
+*/
+
+static TABLE_LIST *add_table_for_trigger(THD *thd, sp_name *trig)
+{
+ const char *db= !trig->m_db.str ? thd->db : trig->m_db.str;
+ LEX *lex= thd->lex;
+ char path_buff[FN_REFLEN];
+ LEX_STRING path;
+ File_parser *parser;
+ struct st_trigname trigname;
+ DBUG_ENTER("add_table_for_trigger");
+
+ strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", db, "/",
+ trig->m_name.str, trigname_file_ext, NullS);
+ path.length= unpack_filename(path_buff, path_buff);
+ path.str= path_buff;
+
+ if (access(path_buff, F_OK))
+ {
+ my_error(ER_TRG_DOES_NOT_EXIST, MYF(0));
+ DBUG_RETURN(0);
+ }
+
+ if (!(parser= sql_parse_prepare(&path, thd->mem_root, 1)))
+ DBUG_RETURN(0);
+
+ if (!is_equal(&trigname_file_type, parser->type()))
+ {
+ my_error(ER_WRONG_OBJECT, MYF(0), trig->m_name.str, trigname_file_ext+1,
+ "TRIGGERNAME");
+ DBUG_RETURN(0);
+ }
+
+ if (parser->parse((gptr)&trigname, thd->mem_root,
+ trigname_file_parameters, 1))
+ DBUG_RETURN(0);
+
+ /* We need to reset statement table list to be PS/SP friendly. */
+ lex->query_tables= 0;
+ lex->query_tables_last= &lex->query_tables;
+ DBUG_RETURN(sp_add_to_query_tables(thd, lex, db,
+ trigname.trigger_table.str, TL_WRITE));
+}
+
+
+/*
+ Drop all triggers for table.
+
+ SYNOPSIS
+ drop_all_triggers()
+ thd - current thread context
+ db - schema for table
+ name - name for table
+
+ NOTE
+ The calling thread should hold the LOCK_open mutex;
+
+ RETURN VALUE
+ False - success
+ True - error
+*/
+
+bool Table_triggers_list::drop_all_triggers(THD *thd, char *db, char *name)
+{
+ TABLE table;
+ char path[FN_REFLEN];
+ bool result= 0;
+ DBUG_ENTER("drop_all_triggers");
+
+ bzero(&table, sizeof(table));
+ init_alloc_root(&table.mem_root, 8192, 0);
+
+ safe_mutex_assert_owner(&LOCK_open);
+
+ if (Table_triggers_list::check_n_load(thd, db, name, &table, 1))
+ {
+ result= 1;
+ goto end;
+ }
+ if (table.triggers)
+ {
+ LEX_STRING *trigger;
+ List_iterator_fast<LEX_STRING> it_name(table.triggers->names_list);
+
+ while ((trigger= it_name++))
+ {
+ if (rm_trigname_file(path, db, trigger->str))
+ {
+ /*
+ Instead of immediately bailing out with error if we were unable
+ to remove .TRN file we will try to drop other files.
+ */
+ result= 1;
+ continue;
+ }
+ }
+
+ if (rm_trigger_file(path, db, name))
+ {
+ result= 1;
+ goto end;
+ }
+ }
+end:
+ if (table.triggers)
+ delete table.triggers;
+ free_root(&table.mem_root, MYF(0));
+ DBUG_RETURN(result);
+}
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index 044219d5ac9..e2ed4bcc0f4 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -23,7 +23,7 @@
class Table_triggers_list: public Sql_alloc
{
/* Triggers as SPs grouped by event, action_time */
- sp_head *bodies[3][2];
+ sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX];
/*
Copy of TABLE::Field array with field pointers set to TABLE::record[1]
buffer instead of TABLE::record[0] (used for OLD values in on UPDATE
@@ -60,6 +60,10 @@ public:
It have to be public because we are using it directly from parser.
*/
List<LEX_STRING> definitions_list;
+ /*
+ List of sql modes for triggers
+ */
+ List<ulonglong> definition_modes_list;
Table_triggers_list(TABLE *table_arg):
record1_field(0), table(table_arg)
@@ -78,7 +82,7 @@ public:
if (bodies[event][time_type])
{
- bool save_in_sub_stmt= thd->transaction.in_sub_stmt;
+ bool save_in_sub_stmt= thd->in_sub_stmt;
#ifndef EMBEDDED_LIBRARY
/* Surpress OK packets in case if we will execute statements */
my_bool nsok= thd->net.no_send_ok;
@@ -107,11 +111,11 @@ public:
does NOT go into binlog.
*/
tmp_disable_binlog(thd);
- thd->transaction.in_sub_stmt= TRUE;
+ thd->in_sub_stmt= TRUE;
res= bodies[event][time_type]->execute_function(thd, 0, 0, 0);
- thd->transaction.in_sub_stmt= save_in_sub_stmt;
+ thd->in_sub_stmt= save_in_sub_stmt;
reenable_binlog(thd);
#ifndef EMBEDDED_LIBRARY
@@ -121,9 +125,14 @@ public:
return res;
}
+ bool get_trigger_info(THD *thd, trg_event_type event,
+ trg_action_time_type time_type,
+ LEX_STRING *trigger_name, LEX_STRING *trigger_stmt,
+ ulong *sql_mode);
static bool check_n_load(THD *thd, const char *db, const char *table_name,
- TABLE *table);
+ TABLE *table, bool names_only);
+ static bool drop_all_triggers(THD *thd, char *db, char *table_name);
bool has_delete_triggers()
{
@@ -143,3 +152,6 @@ public:
private:
bool prepare_record1_accessors(TABLE *table);
};
+
+extern const LEX_STRING trg_action_time_type_names[];
+extern const LEX_STRING trg_event_type_names[];
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index f2b637dc5f4..cdbe8a986b2 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -119,8 +119,6 @@ void
st_select_lex_unit::init_prepare_fake_select_lex(THD *thd)
{
thd->lex->current_select= fake_select_lex;
- fake_select_lex->ftfunc_list_alloc.empty();
- fake_select_lex->ftfunc_list= &fake_select_lex->ftfunc_list_alloc;
fake_select_lex->table_list.link_in_list((byte *)&result_table_list,
(byte **)
&result_table_list.next_local);
@@ -235,7 +233,13 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
if ((res= (res || thd_arg->is_fatal_error)))
goto err;
- if (sl == first_select)
+ /*
+ Use items list of underlaid select for derived tables to preserve
+ information about fields lengths and exact types
+ */
+ if (!is_union)
+ types= first_select_in_union()->item_list;
+ else if (sl == first_select)
{
/*
We need to create an empty table object. It is used
@@ -295,15 +299,24 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
goto err;
}
}
+
+ ulong create_options= (first_select_in_union()->options | thd_arg->options |
+ TMP_TABLE_ALL_COLUMNS) & ~TMP_TABLE_FORCE_MYISAM;
+ /*
+ Force the temporary table to be a MyISAM table if we're going to use
+ fullext functions (MATCH ... AGAINST .. IN BOOLEAN MODE) when reading
+ from it (this should be removed in 5.2 when fulltext search is moved
+ out of MyISAM).
+ */
+ if (global_parameters->ftfunc_list->elements)
+ create_options= create_options | TMP_TABLE_FORCE_MYISAM;
union_result->tmp_table_param.field_count= types.elements;
if (!(table= create_tmp_table(thd_arg,
&union_result->tmp_table_param, types,
(ORDER*) 0, (bool) union_distinct, 1,
- (first_select_in_union()->options |
- thd_arg->options |
- TMP_TABLE_ALL_COLUMNS),
- HA_POS_ERROR, (char *) tmp_table_alias)))
+ create_options, HA_POS_ERROR,
+ (char *) tmp_table_alias)))
goto err;
table->file->extra(HA_EXTRA_WRITE_CACHE);
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index a60bf80a6d8..aedff648e5c 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -24,6 +24,8 @@
#define MD5_BUFF_LENGTH 33
+const LEX_STRING view_type= { (char*) STRING_WITH_LEN("VIEW") };
+
static int mysql_register_view(THD *thd, TABLE_LIST *view,
enum_view_create_mode mode);
@@ -431,7 +433,7 @@ static File_option view_parameters[]=
FILE_OPTIONS_STRING}
};
-static LEX_STRING view_file_type[]= {{(char*)"VIEW", 4}};
+static LEX_STRING view_file_type[]= {{(char*) STRING_WITH_LEN("VIEW") }};
/*
@@ -470,7 +472,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
thd->variables.sql_mode|= sql_mode;
}
str.append('\0');
- DBUG_PRINT("VIEW", ("View: %s", str.ptr()));
+ DBUG_PRINT("info", ("View: %s", str.ptr()));
/* print file name */
(void) my_snprintf(dir_buff, FN_REFLEN, "%s/%s/",
@@ -507,8 +509,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
if (!(parser= sql_parse_prepare(&path, thd->mem_root, 0)))
DBUG_RETURN(1);
- if (!parser->ok() ||
- strncmp("VIEW", parser->type()->str, parser->type()->length))
+ if (!parser->ok() || !is_equal(&view_type, parser->type()))
{
my_error(ER_WRONG_OBJECT, MYF(0),
(view->db ? view->db : thd->db), view->table_name, "VIEW");
@@ -691,7 +692,7 @@ mysql_make_view(File_parser *parser, TABLE_LIST *table)
view_select= &lex->select_lex;
view_select->select_number= ++thd->select_number;
{
- ulong options= thd->options;
+ ulong save_mode= thd->variables.sql_mode;
/* switch off modes which can prevent normal parsing of VIEW
- MODE_REAL_AS_FLOAT affect only CREATE TABLE parsing
+ MODE_PIPES_AS_CONCAT affect expression parsing
@@ -716,13 +717,13 @@ mysql_make_view(File_parser *parser, TABLE_LIST *table)
? MODE_NO_AUTO_VALUE_ON_ZERO affect UPDATEs
+ MODE_NO_BACKSLASH_ESCAPES affect expression parsing
*/
- thd->options&= ~(MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
- MODE_IGNORE_SPACE | MODE_NO_BACKSLASH_ESCAPES);
+ thd->variables.sql_mode&= ~(MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES |
+ MODE_IGNORE_SPACE | MODE_NO_BACKSLASH_ESCAPES);
CHARSET_INFO *save_cs= thd->variables.character_set_client;
thd->variables.character_set_client= system_charset_info;
res= yyparse((void *)thd);
thd->variables.character_set_client= save_cs;
- thd->options= options;
+ thd->variables.sql_mode= save_mode;
}
if (!res && !thd->is_fatal_error)
{
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 84de06d5604..b680787b9a3 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -45,7 +45,7 @@ int yylex(void *yylval, void *yythd);
const LEX_STRING null_lex_str={0,0};
-#define yyoverflow(A,B,C,D,E,F) {ulong val= *(F); if(my_yyoverflow((B), (D), &val)) { yyerror((char*) (A)); return 2; } else { *(F)= (YYSIZE_T)val; }}
+#define yyoverflow(A,B,C,D,E,F) {ulong val= *(F); if (my_yyoverflow((B), (D), &val)) { yyerror((char*) (A)); return 2; } else { *(F)= (YYSIZE_T)val; }}
#define WARN_DEPRECATED(A,B) \
push_warning_printf(((THD *)yythd), MYSQL_ERROR::WARN_LEVEL_WARN, \
@@ -465,6 +465,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token PACK_KEYS_SYM
%token PARTIAL
%token PASSWORD
+%token PARAM_MARKER
%token PHASE_SYM
%token POINTFROMTEXT
%token POINT_SYM
@@ -598,6 +599,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token TRAILING
%token TRANSACTION_SYM
%token TRIGGER_SYM
+%token TRIGGERS_SYM
%token TRIM
%token TRUE_SYM
%token TRUNCATE_SYM
@@ -1265,7 +1267,7 @@ create:
}
opt_view_list AS select_init check_option
{}
- | CREATE TRIGGER_SYM ident trg_action_time trg_event
+ | CREATE TRIGGER_SYM sp_name trg_action_time trg_event
ON table_ident FOR_SYM EACH_SYM ROW_SYM
{
LEX *lex= Lex;
@@ -1284,6 +1286,7 @@ create:
sp->m_type= TYPE_ENUM_TRIGGER;
lex->sphead= sp;
+ lex->spname= $3;
/*
We have to turn of CLIENT_MULTI_QUERIES while parsing a
stored procedure, otherwise yylex will chop it into pieces
@@ -1294,7 +1297,7 @@ create:
bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics));
lex->sphead->m_chistics= &lex->sp_chistics;
- lex->sphead->m_body_begin= lex->tok_start;
+ lex->sphead->m_body_begin= lex->ptr;
}
sp_proc_stmt
{
@@ -1302,14 +1305,12 @@ create:
sp_head *sp= lex->sphead;
lex->sql_command= SQLCOM_CREATE_TRIGGER;
- sp->init_strings(YYTHD, lex, NULL);
+ sp->init_strings(YYTHD, lex, $3);
/* Restore flag if it was cleared above */
if (sp->m_old_cmq)
YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES;
sp->restore_thd_mem_root(YYTHD);
- lex->ident= $3;
-
/*
We have to do it after parsing trigger body, because some of
sp_proc_stmt alternatives are not saving/restoring LEX, so
@@ -4025,7 +4026,7 @@ select_into:
select_from:
FROM join_table_list where_clause group_clause having_clause
opt_order_clause opt_limit_clause procedure_clause
- | FROM DUAL_SYM opt_limit_clause
+ | FROM DUAL_SYM where_clause opt_limit_clause
/* oracle compatibility: oracle always requires FROM clause,
and DUAL is system table without fields.
Is "SELECT 1 FROM DUAL" any better than "SELECT 1" ?
@@ -4226,9 +4227,25 @@ bool_pri:
predicate:
bit_expr IN_SYM '(' expr_list ')'
- { $4->push_front($1); $$= new Item_func_in(*$4); }
+ {
+ if ($4->elements == 1)
+ $$= new Item_func_eq($1, $4->head());
+ else
+ {
+ $4->push_front($1);
+ $$= new Item_func_in(*$4);
+ }
+ }
| bit_expr not IN_SYM '(' expr_list ')'
- { $5->push_front($1); $$= negate_expression(YYTHD, new Item_func_in(*$5)); }
+ {
+ if ($5->elements == 1)
+ $$= new Item_func_ne($1, $5->head());
+ else
+ {
+ $5->push_front($1);
+ $$= negate_expression(YYTHD, new Item_func_in(*$5));
+ }
+ }
| bit_expr IN_SYM in_subselect
{ $$= new Item_in_subselect($1, $3); }
| bit_expr not IN_SYM in_subselect
@@ -4805,7 +4822,7 @@ simple_expr:
| UNIX_TIMESTAMP '(' expr ')'
{ $$= new Item_func_unix_timestamp($3); }
| USER '(' ')'
- { $$= new Item_func_user(); Lex->safe_to_cache_query=0; }
+ { $$= new Item_func_user(FALSE); Lex->safe_to_cache_query=0; }
| UTC_DATE_SYM optional_braces
{ $$= new Item_func_curdate_utc(); Lex->safe_to_cache_query=0;}
| UTC_TIME_SYM optional_braces
@@ -5902,19 +5919,11 @@ drop:
lex->sql_command= SQLCOM_DROP_VIEW;
lex->drop_if_exists= $3;
}
- | DROP TRIGGER_SYM ident '.' ident
+ | DROP TRIGGER_SYM sp_name
{
LEX *lex= Lex;
-
lex->sql_command= SQLCOM_DROP_TRIGGER;
- /* QQ: Could we loosen lock type in certain cases ? */
- if (!lex->select_lex.add_table_to_list(YYTHD,
- new Table_ident($3),
- (LEX_STRING*) 0,
- TL_OPTION_UPDATING,
- TL_WRITE))
- YYABORT;
- lex->ident= $5;
+ lex->spname= $3;
}
;
@@ -6279,6 +6288,15 @@ show_param:
if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLE_NAMES))
YYABORT;
}
+ | opt_full TRIGGERS_SYM opt_db wild_and_where
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SELECT;
+ lex->orig_sql_command= SQLCOM_SHOW_TRIGGERS;
+ lex->select_lex.db= $3;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_TRIGGERS))
+ YYABORT;
+ }
| TABLE_SYM STATUS_SYM opt_db wild_and_where
{
LEX *lex= Lex;
@@ -6933,23 +6951,15 @@ text_string:
;
param_marker:
- '?'
+ PARAM_MARKER
{
THD *thd=YYTHD;
LEX *lex= thd->lex;
- if (thd->command == COM_STMT_PREPARE)
+ Item_param *item= new Item_param((uint) (lex->tok_start -
+ (uchar *) thd->query));
+ if (!($$= item) || lex->param_list.push_back(item))
{
- Item_param *item= new Item_param((uint) (lex->tok_start -
- (uchar *) thd->query));
- if (!($$= item) || lex->param_list.push_back(item))
- {
- my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
- YYABORT;
- }
- }
- else
- {
- yyerror(ER(ER_SYNTAX_ERROR));
+ my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
YYABORT;
}
}
@@ -7581,6 +7591,7 @@ keyword_sp:
| TEMPTABLE_SYM {}
| TEXT_SYM {}
| TRANSACTION_SYM {}
+ | TRIGGERS_SYM {}
| TIMESTAMP {}
| TIMESTAMP_ADD {}
| TIMESTAMP_DIFF {}
diff --git a/sql/table.cc b/sql/table.cc
index 220aba27d5b..0324e29abf3 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -702,10 +702,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
key_part->key_part_flag|= HA_BIT_PART;
if (i == 0 && key != primary_key)
- field->flags |=
- ((keyinfo->flags & HA_NOSAME) &&
- field->key_length() ==
- keyinfo->key_length ? UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG);
+ field->flags |= ((keyinfo->flags & HA_NOSAME) &&
+ (keyinfo->key_parts == 1)) ?
+ UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG;
if (i == 0)
field->key_start.set_bit(key);
if (field->key_length() == key_part->length &&
@@ -1343,8 +1342,9 @@ void append_unescaped(String *res, const char *pos, uint length)
/* Create a .frm file */
-File create_frm(THD *thd, register my_string name, uint reclength,
- uchar *fileinfo, HA_CREATE_INFO *create_info, uint keys)
+File create_frm(THD *thd, my_string name, const char *db,
+ const char *table, uint reclength, uchar *fileinfo,
+ HA_CREATE_INFO *create_info, uint keys)
{
register File file;
ulong length;
@@ -1367,7 +1367,7 @@ File create_frm(THD *thd, register my_string name, uint reclength,
*/
set_if_smaller(create_info->raid_chunks, 255);
- if ((file= my_create(name, CREATE_MODE, create_flags, MYF(MY_WME))) >= 0)
+ if ((file= my_create(name, CREATE_MODE, create_flags, MYF(0))) >= 0)
{
uint key_length, tmp_key_length;
uint tmp;
@@ -1414,6 +1414,13 @@ File create_frm(THD *thd, register my_string name, uint reclength,
}
}
}
+ else
+ {
+ if (my_errno == ENOENT)
+ my_error(ER_BAD_DB_ERROR,MYF(0),db);
+ else
+ my_error(ER_CANT_CREATE_TABLE,MYF(0),table,my_errno);
+ }
return (file);
} /* create_frm */
diff --git a/sql/table.h b/sql/table.h
index e5653a1f213..22a35e29e87 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -278,11 +278,26 @@ typedef struct st_foreign_key_info
enum enum_schema_tables
{
- SCH_SCHEMATA= 0, SCH_TABLES, SCH_COLUMNS, SCH_CHARSETS, SCH_COLLATIONS,
- SCH_COLLATION_CHARACTER_SET_APPLICABILITY, SCH_PROCEDURES, SCH_STATISTICS,
- SCH_VIEWS, SCH_USER_PRIVILEGES, SCH_SCHEMA_PRIVILEGES, SCH_TABLE_PRIVILEGES,
- SCH_COLUMN_PRIVILEGES, SCH_TABLE_CONSTRAINTS, SCH_KEY_COLUMN_USAGE,
- SCH_TABLE_NAMES, SCH_OPEN_TABLES, SCH_STATUS, SCH_VARIABLES
+ SCH_CHARSETS= 0,
+ SCH_COLLATIONS,
+ SCH_COLLATION_CHARACTER_SET_APPLICABILITY,
+ SCH_COLUMNS,
+ SCH_COLUMN_PRIVILEGES,
+ SCH_KEY_COLUMN_USAGE,
+ SCH_OPEN_TABLES,
+ SCH_PROCEDURES,
+ SCH_SCHEMATA,
+ SCH_SCHEMA_PRIVILEGES,
+ SCH_STATISTICS,
+ SCH_STATUS,
+ SCH_TABLES,
+ SCH_TABLE_CONSTRAINTS,
+ SCH_TABLE_NAMES,
+ SCH_TABLE_PRIVILEGES,
+ SCH_TRIGGERS,
+ SCH_VARIABLES,
+ SCH_VIEWS,
+ SCH_USER_PRIVILEGES
};
diff --git a/sql/time.cc b/sql/time.cc
index a3ec2283860..5069031081d 100644
--- a/sql/time.cc
+++ b/sql/time.cc
@@ -223,7 +223,7 @@ str_to_datetime_with_warn(const char *str, uint length, TIME *l_time,
0 - t contains datetime value which is out of TIMESTAMP range.
*/
-my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *in_dst_time_gap)
+my_time_t TIME_to_timestamp(THD *thd, const TIME *t, my_bool *in_dst_time_gap)
{
my_time_t timestamp;
diff --git a/sql/tztime.cc b/sql/tztime.cc
index f5111459da2..5a907f0d170 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -880,12 +880,12 @@ sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec)
0 in case of error.
*/
static my_time_t
-TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, bool *in_dst_time_gap)
+TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp,
+ my_bool *in_dst_time_gap)
{
my_time_t local_t;
uint saved_seconds;
uint i;
-
DBUG_ENTER("TIME_to_gmt_sec");
/* We need this for correct leap seconds handling */
@@ -962,7 +962,7 @@ class Time_zone_system : public Time_zone
{
public:
virtual my_time_t TIME_to_gmt_sec(const TIME *t,
- bool *in_dst_time_gap) const;
+ my_bool *in_dst_time_gap) const;
virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const;
virtual const String * get_name() const;
};
@@ -994,7 +994,7 @@ public:
Corresponding my_time_t value or 0 in case of error
*/
my_time_t
-Time_zone_system::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
+Time_zone_system::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const
{
long not_used;
return my_system_gmt_sec(t, &not_used, in_dst_time_gap);
@@ -1055,7 +1055,7 @@ class Time_zone_utc : public Time_zone
{
public:
virtual my_time_t TIME_to_gmt_sec(const TIME *t,
- bool *in_dst_time_gap) const;
+ my_bool *in_dst_time_gap) const;
virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const;
virtual const String * get_name() const;
};
@@ -1081,7 +1081,7 @@ public:
0
*/
my_time_t
-Time_zone_utc::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
+Time_zone_utc::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const
{
/* Should be never called */
DBUG_ASSERT(0);
@@ -1144,7 +1144,7 @@ class Time_zone_db : public Time_zone
public:
Time_zone_db(TIME_ZONE_INFO *tz_info_arg, const String * tz_name_arg);
virtual my_time_t TIME_to_gmt_sec(const TIME *t,
- bool *in_dst_time_gap) const;
+ my_bool *in_dst_time_gap) const;
virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const;
virtual const String * get_name() const;
private:
@@ -1193,7 +1193,7 @@ Time_zone_db::Time_zone_db(TIME_ZONE_INFO *tz_info_arg,
Corresponding my_time_t value or 0 in case of error
*/
my_time_t
-Time_zone_db::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
+Time_zone_db::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const
{
return ::TIME_to_gmt_sec(t, tz_info, in_dst_time_gap);
}
@@ -1240,7 +1240,7 @@ class Time_zone_offset : public Time_zone
public:
Time_zone_offset(long tz_offset_arg);
virtual my_time_t TIME_to_gmt_sec(const TIME *t,
- bool *in_dst_time_gap) const;
+ my_bool *in_dst_time_gap) const;
virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const;
virtual const String * get_name() const;
/*
@@ -1292,7 +1292,7 @@ Time_zone_offset::Time_zone_offset(long tz_offset_arg):
Corresponding my_time_t value or 0 in case of error
*/
my_time_t
-Time_zone_offset::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const
+Time_zone_offset::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const
{
return sec_since_epoch(t->year, t->month, t->day,
t->hour, t->minute, t->second) -
@@ -2549,8 +2549,6 @@ main(int argc, char **argv)
time_t t, t1, t2;
char fullname[FN_REFLEN+1];
char *str_end;
- long not_used;
- bool not_used_2;
MEM_ROOT tz_storage;
MY_INIT(argv[0]);
@@ -2660,14 +2658,21 @@ main(int argc, char **argv)
dates.
*/
for (time_tmp.year= 1980; time_tmp.year < 2010; time_tmp.year++)
+ {
for (time_tmp.month= 1; time_tmp.month < 13; time_tmp.month++)
+ {
for (time_tmp.day= 1;
time_tmp.day < mon_lengths[isleap(time_tmp.year)][time_tmp.month-1];
time_tmp.day++)
+ {
for (time_tmp.hour= 0; time_tmp.hour < 24; time_tmp.hour++)
+ {
for (time_tmp.minute= 0; time_tmp.minute < 60; time_tmp.minute+= 5)
+ {
for (time_tmp.second=0; time_tmp.second<60; time_tmp.second+=25)
{
+ long not_used;
+ my_bool not_used_2;
t= (time_t)my_system_gmt_sec(&time_tmp, &not_used, &not_used_2);
t1= (time_t)TIME_to_gmt_sec(&time_tmp, &tz_info, &not_used_2);
if (t != t1)
@@ -2699,6 +2704,11 @@ main(int argc, char **argv)
return 1;
}
}
+ }
+ }
+ }
+ }
+ }
printf("TIME_to_gmt_sec = my_system_gmt_sec for test range\n");
diff --git a/sql/tztime.h b/sql/tztime.h
index cbf359e8961..a168fe4fb73 100644
--- a/sql/tztime.h
+++ b/sql/tztime.h
@@ -37,7 +37,7 @@ public:
falls into spring time-gap (or lefts it untouched otherwise).
*/
virtual my_time_t TIME_to_gmt_sec(const TIME *t,
- bool *in_dst_time_gap) const = 0;
+ my_bool *in_dst_time_gap) const = 0;
/*
Converts time in my_time_t representation to local time in
broken down TIME representation.
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 7f170b3ef87..a89d89426a6 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -55,6 +55,8 @@ static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
mysql_create_frm()
thd Thread handler
file_name Name of file (including database and .frm)
+ db Name of database
+ table Name of table
create_info create info parameters
create_fields Fields to create
keys number of keys to create
@@ -67,6 +69,7 @@ static bool make_empty_rec(THD *thd, int file, enum db_type table_type,
*/
bool mysql_create_frm(THD *thd, my_string file_name,
+ const char *db, const char *table,
HA_CREATE_INFO *create_info,
List<create_field> &create_fields,
uint keys, KEY *key_info,
@@ -113,7 +116,7 @@ bool mysql_create_frm(THD *thd, my_string file_name,
}
reclength=uint2korr(forminfo+266);
- if ((file=create_frm(thd, file_name, reclength, fileinfo,
+ if ((file=create_frm(thd, file_name, db, table, reclength, fileinfo,
create_info, keys)) < 0)
{
my_free((gptr) screen_buff,MYF(0));
@@ -211,9 +214,11 @@ err3:
Create a frm (table definition) file and the tables
SYNOPSIS
- mysql_create_frm()
+ rea_create_table()
thd Thread handler
file_name Name of file (including database and .frm)
+ db Name of database
+ table Name of table
create_info create info parameters
create_fields Fields to create
keys number of keys to create
@@ -226,13 +231,14 @@ err3:
*/
int rea_create_table(THD *thd, my_string file_name,
+ const char *db, const char *table,
HA_CREATE_INFO *create_info,
List<create_field> &create_fields,
uint keys, KEY *key_info)
{
DBUG_ENTER("rea_create_table");
- if (mysql_create_frm(thd, file_name, create_info,
+ if (mysql_create_frm(thd, file_name, db, table, create_info,
create_fields, keys, key_info, NULL))
DBUG_RETURN(1);
if (!create_info->frm_only && ha_create_table(file_name,create_info,0))