summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <msvensson@neptunus.(none)>2007-02-06 15:46:17 +0100
committerunknown <msvensson@neptunus.(none)>2007-02-06 15:46:17 +0100
commit8c6fe3587cc534bf084181d6c2308edfab913190 (patch)
treefc2a41854c7dea15d518e7c0aeaed54f30988795 /sql
parentaff6c2e72318b959899b5257cdc08b8f0d4f71bb (diff)
parentc52165de13ca62c2a596ea05c01e07807432d605 (diff)
downloadmariadb-git-8c6fe3587cc534bf084181d6c2308edfab913190.tar.gz
Merge neptunus.(none):/home/msvensson/mysql/mysql-5.1
into neptunus.(none):/home/msvensson/mysql/mysql-5.1-maint BitKeeper/etc/ignore: auto-union Makefile.am: Auto merged cmd-line-utils/readline/display.c: Auto merged configure.in: Auto merged extra/yassl/include/buffer.hpp: Auto merged extra/yassl/include/crypto_wrapper.hpp: Auto merged extra/yassl/include/yassl_imp.hpp: Auto merged extra/yassl/include/yassl_int.hpp: Auto merged extra/yassl/src/crypto_wrapper.cpp: Auto merged extra/yassl/taocrypt/include/algebra.hpp: Auto merged extra/yassl/taocrypt/include/des.hpp: Auto merged extra/yassl/taocrypt/include/hash.hpp: Auto merged extra/yassl/taocrypt/include/hmac.hpp: Auto merged extra/yassl/taocrypt/include/modarith.hpp: Auto merged extra/yassl/taocrypt/include/modes.hpp: Auto merged extra/yassl/taocrypt/include/rsa.hpp: Auto merged extra/yassl/taocrypt/include/type_traits.hpp: Auto merged extra/yassl/taocrypt/mySTL/list.hpp: Auto merged extra/yassl/taocrypt/src/aes.cpp: Auto merged extra/yassl/taocrypt/src/algebra.cpp: Auto merged extra/yassl/taocrypt/src/asn.cpp: Auto merged extra/yassl/testsuite/testsuite.cpp: Auto merged mysql-test/mysql-test-run.pl: Auto merged mysql-test/r/mysqltest.result: Auto merged mysql-test/t/mysqltest.test: Auto merged mysys/default.c: Auto merged sql/field.cc: Auto merged sql/item.cc: Auto merged sql/item_cmpfunc.cc: Auto merged sql/item_cmpfunc.h: Auto merged sql/item_sum.cc: Auto merged sql/mysql_priv.h: Auto merged sql/net_serv.cc: Auto merged sql/repl_failsafe.cc: Auto merged sql/set_var.cc: Auto merged sql/set_var.h: Auto merged sql/slave.cc: Auto merged sql/sql_parse.cc: Auto merged sql-common/client.c: Auto merged sql/sql_select.cc: Auto merged extra/yassl/taocrypt/test/test.cpp: Manual merge mysql-test/r/grant.result: Manual merge mysql-test/r/select.result: Manual merge mysql-test/t/grant.test: Manual merge mysql-test/t/select.test: Manual merge sql/field.h: Manual merge sql/mysqld.cc: Manual merge
Diffstat (limited to 'sql')
-rw-r--r--sql/event_data_objects.cc1
-rw-r--r--sql/event_db_repository.cc1
-rw-r--r--sql/event_queue.cc2
-rw-r--r--sql/field.cc201
-rw-r--r--sql/field.h54
-rw-r--r--sql/filesort.cc12
-rw-r--r--sql/gen_lex_hash.cc22
-rw-r--r--sql/ha_ndbcluster.cc407
-rw-r--r--sql/ha_ndbcluster_binlog.cc2
-rw-r--r--sql/ha_partition.cc7
-rw-r--r--sql/handler.cc13
-rw-r--r--sql/item.cc67
-rw-r--r--sql/item.h36
-rw-r--r--sql/item_cmpfunc.cc84
-rw-r--r--sql/item_cmpfunc.h28
-rw-r--r--sql/item_func.cc38
-rw-r--r--sql/item_geofunc.cc5
-rw-r--r--sql/item_row.h2
-rw-r--r--sql/item_strfunc.cc15
-rw-r--r--sql/item_subselect.cc52
-rw-r--r--sql/item_subselect.h6
-rw-r--r--sql/item_sum.cc25
-rw-r--r--sql/item_timefunc.cc19
-rw-r--r--sql/item_xmlfunc.cc4
-rw-r--r--sql/log.cc84
-rw-r--r--sql/log.h8
-rw-r--r--sql/log_event.cc25
-rw-r--r--sql/mysql_priv.h11
-rw-r--r--sql/mysqld.cc162
-rw-r--r--sql/net_serv.cc10
-rw-r--r--sql/opt_range.cc173
-rw-r--r--sql/opt_range.h2
-rw-r--r--sql/opt_sum.cc10
-rw-r--r--sql/partition_info.cc1
-rw-r--r--sql/password.c7
-rw-r--r--sql/protocol.cc8
-rw-r--r--sql/repl_failsafe.cc12
-rw-r--r--sql/set_var.cc11
-rw-r--r--sql/set_var.h92
-rw-r--r--sql/slave.cc26
-rw-r--r--sql/slave.h2
-rw-r--r--sql/sp.cc7
-rw-r--r--sql/sp_head.cc11
-rw-r--r--sql/sp_head.h9
-rw-r--r--sql/spatial.cc26
-rw-r--r--sql/spatial.h12
-rw-r--r--sql/sql_base.cc58
-rw-r--r--sql/sql_cache.h6
-rw-r--r--sql/sql_class.cc25
-rw-r--r--sql/sql_class.h15
-rw-r--r--sql/sql_delete.cc8
-rw-r--r--sql/sql_derived.cc2
-rw-r--r--sql/sql_insert.cc27
-rw-r--r--sql/sql_lex.cc37
-rw-r--r--sql/sql_lex.h18
-rw-r--r--sql/sql_load.cc6
-rw-r--r--sql/sql_parse.cc175
-rw-r--r--sql/sql_partition.cc5
-rw-r--r--sql/sql_plugin.cc11
-rw-r--r--sql/sql_prepare.cc13
-rw-r--r--sql/sql_select.cc176
-rw-r--r--sql/sql_servers.cc11
-rw-r--r--sql/sql_servers.h1
-rw-r--r--sql/sql_show.cc48
-rw-r--r--sql/sql_string.cc19
-rw-r--r--sql/sql_string.h3
-rw-r--r--sql/sql_table.cc93
-rw-r--r--sql/sql_trigger.cc26
-rw-r--r--sql/sql_trigger.h5
-rw-r--r--sql/sql_union.cc89
-rw-r--r--sql/sql_update.cc1
-rw-r--r--sql/sql_view.cc8
-rw-r--r--sql/sql_yacc.yy84
-rw-r--r--sql/table.cc67
-rw-r--r--sql/table.h23
-rw-r--r--sql/tztime.cc4
-rw-r--r--sql/unireg.cc10
77 files changed, 1507 insertions, 1379 deletions
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index 54b043bd916..07575a6d33a 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -1560,7 +1560,6 @@ done:
int
Event_timed::get_create_event(THD *thd, String *buf)
{
- int multipl= 0;
char tmp_buf[2 * STRING_BUFFER_USUAL_SIZE];
String expr_buf(tmp_buf, sizeof(tmp_buf), system_charset_info);
expr_buf.length(0);
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index bcc7d476fff..940930ec4c6 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -518,7 +518,6 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data,
my_bool create_if_not)
{
int ret= 0;
- CHARSET_INFO *scs= system_charset_info;
TABLE *table= NULL;
char old_db_buf[NAME_LEN+1];
LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
diff --git a/sql/event_queue.cc b/sql/event_queue.cc
index 45d354ea9b6..068abbe3408 100644
--- a/sql/event_queue.cc
+++ b/sql/event_queue.cc
@@ -139,8 +139,6 @@ bool
Event_queue::init_queue(THD *thd, Event_db_repository *db_repo)
{
bool res;
- struct event_queue_param *event_queue_param_value= NULL;
-
DBUG_ENTER("Event_queue::init_queue");
DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
diff --git a/sql/field.cc b/sql/field.cc
index 02f0990c307..867edc6f9dd 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1415,12 +1415,12 @@ my_decimal* Field_num::val_decimal(my_decimal *decimal_value)
Field_str::Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
- const char *field_name_arg, CHARSET_INFO *charset)
+ const char *field_name_arg, CHARSET_INFO *charset_arg)
:Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg)
{
- field_charset=charset;
- if (charset->state & MY_CS_BINSORT)
+ field_charset= charset_arg;
+ if (charset_arg->state & MY_CS_BINSORT)
flags|=BINARY_FLAG;
field_derivation= DERIVATION_IMPLICIT;
}
@@ -1527,7 +1527,7 @@ bool Field::get_time(TIME *ltime)
Needs to be changed if/when we want to support different time formats
*/
-int Field::store_time(TIME *ltime, timestamp_type type)
+int Field::store_time(TIME *ltime, timestamp_type type_arg)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
char buff[MAX_DATE_STRING_REP_LENGTH];
@@ -2242,12 +2242,12 @@ Field_new_decimal::Field_new_decimal(char *ptr_arg,
Field_new_decimal::Field_new_decimal(uint32 len_arg,
- bool maybe_null,
+ bool maybe_null_arg,
const char *name,
uint8 dec_arg,
bool unsigned_arg)
:Field_num((char*) 0, len_arg,
- maybe_null ? (uchar*) "": 0, 0,
+ maybe_null_arg ? (uchar*) "": 0, 0,
NONE, name, dec_arg, 0, unsigned_arg)
{
precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg);
@@ -2351,7 +2351,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
int Field_new_decimal::store(const char *from, uint length,
- CHARSET_INFO *charset)
+ CHARSET_INFO *charset_arg)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
int err;
@@ -2360,7 +2360,7 @@ int Field_new_decimal::store(const char *from, uint length,
if ((err= str2my_decimal(E_DEC_FATAL_ERROR &
~(E_DEC_OVERFLOW | E_DEC_BAD_NUM),
- from, length, charset, &decimal_value)) &&
+ from, length, charset_arg, &decimal_value)) &&
table->in_use->abort_on_warning)
{
/* Because "from" is not NUL-terminated and we use %s in the ER() */
@@ -2556,7 +2556,7 @@ uint Field_new_decimal::is_equal(create_field *new_field)
(uint) (flags & UNSIGNED_FLAG)) &&
((new_field->flags & AUTO_INCREMENT_FLAG) ==
(uint) (flags & AUTO_INCREMENT_FLAG)) &&
- (new_field->length == max_length()) &&
+ (new_field->length == max_display_length()) &&
(new_field->decimals == dec));
}
@@ -3282,25 +3282,6 @@ void Field_medium::sql_type(String &res) const
** long int
****************************************************************************/
-/*
- A helper function to check whether the next character
- in the string "s" is MINUS SIGN.
-*/
-#ifdef HAVE_CHARSET_ucs2
-static bool test_if_minus(CHARSET_INFO *cs,
- const char *s, const char *e)
-{
- my_wc_t wc;
- return cs->cset->mb_wc(cs, &wc, (uchar*) s, (uchar*) e) > 0 && wc == '-';
-}
-#else
-/*
- If not UCS2 support is compiled then it is easier
-*/
-#define test_if_minus(cs, s, e) (*s == '-')
-#endif
-
-
int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
@@ -4912,7 +4893,7 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs)
}
-int Field_time::store_time(TIME *ltime, timestamp_type type)
+int Field_time::store_time(TIME *ltime, timestamp_type time_type)
{
long tmp= ((ltime->month ? 0 : ltime->day * 24L) + ltime->hour) * 10000L +
(ltime->minute * 100 + ltime->second);
@@ -5523,12 +5504,13 @@ int Field_newdate::store(longlong nr, bool unsigned_val)
}
-int Field_newdate::store_time(TIME *ltime,timestamp_type type)
+int Field_newdate::store_time(TIME *ltime,timestamp_type time_type)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
long tmp;
int error= 0;
- if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME)
+ if (time_type == MYSQL_TIMESTAMP_DATE ||
+ time_type == MYSQL_TIMESTAMP_DATETIME)
{
tmp=ltime->year*16*32+ltime->month*32+ltime->day;
if (check_date(ltime, tmp != 0,
@@ -5748,7 +5730,7 @@ int Field_datetime::store(longlong nr, bool unsigned_val)
}
-int Field_datetime::store_time(TIME *ltime,timestamp_type type)
+int Field_datetime::store_time(TIME *ltime,timestamp_type time_type)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
longlong tmp;
@@ -5757,7 +5739,8 @@ int Field_datetime::store_time(TIME *ltime,timestamp_type type)
We don't perform range checking here since values stored in TIME
structure always fit into DATETIME range.
*/
- if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME)
+ if (time_type == MYSQL_TIMESTAMP_DATE ||
+ time_type == MYSQL_TIMESTAMP_DATETIME)
{
tmp=((ltime->year*10000L+ltime->month*100+ltime->day)*LL(1000000)+
(ltime->hour*10000L+ltime->minute*100+ltime->second));
@@ -6136,32 +6119,32 @@ int Field_str::store(double nr)
char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
uint length;
bool use_scientific_notation= TRUE;
- uint char_length= field_length / charset()->mbmaxlen;
+ uint local_char_length= field_length / charset()->mbmaxlen;
/*
Check fabs(nr) against longest value that can be stored in field,
which depends on whether the value is < 1 or not, and negative or not
*/
double anr= fabs(nr);
int neg= (nr < 0.0) ? 1 : 0;
- if (char_length > 4 && char_length < 32 &&
- (anr < 1.0 ? anr > 1/(log_10[max(0,(int) char_length-neg-2)]) /* -2 for "0." */
- : anr < log_10[char_length-neg]-1))
+ if (local_char_length > 4 && local_char_length < 32 &&
+ (anr < 1.0 ? anr > 1/(log_10[max(0,(int) local_char_length-neg-2)]) /* -2 for "0." */
+ : anr < log_10[local_char_length-neg]-1))
use_scientific_notation= FALSE;
length= (uint) my_sprintf(buff, (buff, "%-.*g",
(use_scientific_notation ?
- max(0, (int)char_length-neg-5) :
- char_length),
+ max(0, (int)local_char_length-neg-5) :
+ local_char_length),
nr));
/*
+1 below is because "precision" in %g above means the
max. number of significant digits, not the output width.
Thus the width can be larger than number of significant digits by 1
(for decimal point)
- the test for char_length < 5 is for extreme cases,
+ the test for local_char_length < 5 is for extreme cases,
like inserting 500.0 in char(1)
*/
- DBUG_ASSERT(char_length < 5 || length <= char_length+1);
+ DBUG_ASSERT(local_char_length < 5 || length <= local_char_length+1);
return store((const char *) buff, length, charset());
}
@@ -6182,7 +6165,7 @@ uint Field_str::is_equal(create_field *new_field)
return ((new_field->sql_type == real_type()) &&
new_field->charset == field_charset &&
- new_field->length == max_length());
+ new_field->length == max_display_length());
}
@@ -6344,10 +6327,11 @@ void Field_string::sql_type(String &res) const
char *Field_string::pack(char *to, const char *from, uint max_length)
{
uint length= min(field_length,max_length);
- uint char_length= max_length/field_charset->mbmaxlen;
- if (length > char_length)
- char_length= my_charpos(field_charset, from, from+length, char_length);
- set_if_smaller(length, char_length);
+ uint local_char_length= max_length/field_charset->mbmaxlen;
+ if (length > local_char_length)
+ local_char_length= my_charpos(field_charset, from, from+length,
+ local_char_length);
+ set_if_smaller(length, local_char_length);
while (length && from[length-1] == ' ')
length--;
*to++= (char) (uchar) length;
@@ -6431,15 +6415,15 @@ int Field_string::pack_cmp(const char *a, const char *b, uint length,
int Field_string::pack_cmp(const char *key, uint length,
my_bool insert_or_update)
{
- uint row_length, key_length;
+ uint row_length, local_key_length;
char *end;
if (length > 255)
{
- key_length= uint2korr(key);
+ local_key_length= uint2korr(key);
key+= 2;
}
else
- key_length= (uint) (uchar) *key++;
+ local_key_length= (uint) (uchar) *key++;
/* Only use 'length' of key, not field_length */
end= ptr + length;
@@ -6449,7 +6433,7 @@ int Field_string::pack_cmp(const char *key, uint length,
return field_charset->coll->strnncollsp(field_charset,
(const uchar*) ptr, row_length,
- (const uchar*) key, key_length,
+ (const uchar*) key, local_key_length,
insert_or_update);
}
@@ -6641,11 +6625,11 @@ int Field_varstring::cmp_max(const char *a_ptr, const char *b_ptr,
int Field_varstring::key_cmp(const byte *key_ptr, uint max_key_length)
{
uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
- uint char_length= max_key_length / field_charset->mbmaxlen;
+ uint local_char_length= max_key_length / field_charset->mbmaxlen;
- char_length= my_charpos(field_charset, ptr + length_bytes,
- ptr + length_bytes + length, char_length);
- set_if_smaller(length, char_length);
+ local_char_length= my_charpos(field_charset, ptr + length_bytes,
+ ptr + length_bytes + length, local_char_length);
+ set_if_smaller(length, local_char_length);
return field_charset->coll->strnncollsp(field_charset,
(const uchar*) ptr + length_bytes,
length,
@@ -6755,13 +6739,14 @@ char *Field_varstring::pack(char *to, const char *from, uint max_length)
char *Field_varstring::pack_key(char *to, const char *key, uint max_length)
{
uint length= length_bytes == 1 ? (uint) (uchar) *key : uint2korr(key);
- uint char_length= ((field_charset->mbmaxlen > 1) ?
+ uint local_char_length= ((field_charset->mbmaxlen > 1) ?
max_length/field_charset->mbmaxlen : max_length);
key+= length_bytes;
- if (length > char_length)
+ if (length > local_char_length)
{
- char_length= my_charpos(field_charset, key, key+length, char_length);
- set_if_smaller(length, char_length);
+ local_char_length= my_charpos(field_charset, key, key+length,
+ local_char_length);
+ set_if_smaller(length, local_char_length);
}
*to++= (char) (length & 255);
if (max_length > 255)
@@ -6857,11 +6842,12 @@ const char *Field_varstring::unpack(char *to, const char *from)
}
-int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length,
+int Field_varstring::pack_cmp(const char *a, const char *b,
+ uint key_length_arg,
my_bool insert_or_update)
{
uint a_length, b_length;
- if (key_length > 255)
+ if (key_length_arg > 255)
{
a_length=uint2korr(a); a+= 2;
b_length=uint2korr(b); b+= 2;
@@ -6878,26 +6864,28 @@ int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length,
}
-int Field_varstring::pack_cmp(const char *b, uint key_length,
+int Field_varstring::pack_cmp(const char *b, uint key_length_arg,
my_bool insert_or_update)
{
char *a= ptr+ length_bytes;
uint a_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
uint b_length;
- uint char_length= ((field_charset->mbmaxlen > 1) ?
- key_length / field_charset->mbmaxlen : key_length);
+ uint local_char_length= ((field_charset->mbmaxlen > 1) ?
+ key_length_arg / field_charset->mbmaxlen :
+ key_length_arg);
- if (key_length > 255)
+ if (key_length_arg > 255)
{
b_length=uint2korr(b); b+= HA_KEY_BLOB_LENGTH;
}
else
b_length= (uint) (uchar) *b++;
- if (a_length > char_length)
+ if (a_length > local_char_length)
{
- char_length= my_charpos(field_charset, a, a+a_length, char_length);
- set_if_smaller(a_length, char_length);
+ local_char_length= my_charpos(field_charset, a, a+a_length,
+ local_char_length);
+ set_if_smaller(a_length, local_char_length);
}
return field_charset->coll->strnncollsp(field_charset,
@@ -6922,13 +6910,15 @@ uint Field_varstring::max_packed_col_length(uint max_length)
}
-void Field_varstring::get_key_image(char *buff, uint length, imagetype type)
+void Field_varstring::get_key_image(char *buff, uint length,
+ imagetype type_arg)
{
uint f_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
- uint char_length= length / field_charset->mbmaxlen;
+ uint local_char_length= length / field_charset->mbmaxlen;
char *pos= ptr+length_bytes;
- char_length= my_charpos(field_charset, pos, pos + f_length, char_length);
- set_if_smaller(f_length, char_length);
+ local_char_length= my_charpos(field_charset, pos, pos + f_length,
+ local_char_length);
+ set_if_smaller(f_length, local_char_length);
/* Key is always stored with 2 bytes */
int2store(buff,f_length);
memcpy(buff+HA_KEY_BLOB_LENGTH, pos, f_length);
@@ -7009,11 +6999,11 @@ uint Field_varstring::is_equal(create_field *new_field)
if (new_field->sql_type == real_type() &&
new_field->charset == field_charset)
{
- if (new_field->length == max_length())
+ if (new_field->length == max_display_length())
return IS_EQUAL_YES;
- if (new_field->length > max_length() &&
- ((new_field->length <= 255 && max_length() <= 255) ||
- (new_field->length > 255 && max_length() > 255)))
+ if (new_field->length > max_display_length() &&
+ ((new_field->length <= 255 && max_display_length() <= 255) ||
+ (new_field->length > 255 && max_display_length() > 255)))
return IS_EQUAL_PACK_LENGTH; // VARCHAR, longer variable length
}
return IS_EQUAL_NO;
@@ -7344,13 +7334,13 @@ int Field_blob::cmp_binary(const char *a_ptr, const char *b_ptr,
/* The following is used only when comparing a key */
-void Field_blob::get_key_image(char *buff, uint length, imagetype type)
+void Field_blob::get_key_image(char *buff, uint length, imagetype type_arg)
{
uint32 blob_length= get_length(ptr);
char *blob;
#ifdef HAVE_SPATIAL
- if (type == itMBR)
+ if (type_arg == itMBR)
{
const char *dummy;
MBR mbr;
@@ -7378,10 +7368,10 @@ void Field_blob::get_key_image(char *buff, uint length, imagetype type)
#endif /*HAVE_SPATIAL*/
get_ptr(&blob);
- uint char_length= length / field_charset->mbmaxlen;
- char_length= my_charpos(field_charset, blob, blob + blob_length,
- char_length);
- set_if_smaller(blob_length, char_length);
+ uint local_char_length= length / field_charset->mbmaxlen;
+ local_char_length= my_charpos(field_charset, blob, blob + blob_length,
+ local_char_length);
+ set_if_smaller(blob_length, local_char_length);
if ((uint32) length > blob_length)
{
@@ -7410,9 +7400,10 @@ int Field_blob::key_cmp(const byte *key_ptr, uint max_key_length)
uint blob_length=get_length(ptr);
memcpy_fixed(&blob1,ptr+packlength,sizeof(char*));
CHARSET_INFO *cs= charset();
- uint char_length= max_key_length / cs->mbmaxlen;
- char_length= my_charpos(cs, blob1, blob1+blob_length, char_length);
- set_if_smaller(blob_length, char_length);
+ uint local_char_length= max_key_length / cs->mbmaxlen;
+ local_char_length= my_charpos(cs, blob1, blob1+blob_length,
+ local_char_length);
+ set_if_smaller(blob_length, local_char_length);
return Field_blob::cmp(blob1, blob_length,
(char*) key_ptr+HA_KEY_BLOB_LENGTH,
uint2korr(key_ptr));
@@ -7534,11 +7525,11 @@ const char *Field_blob::unpack(char *to, const char *from)
/* Keys for blobs are like keys on varchars */
-int Field_blob::pack_cmp(const char *a, const char *b, uint key_length,
+int Field_blob::pack_cmp(const char *a, const char *b, uint key_length_arg,
my_bool insert_or_update)
{
uint a_length, b_length;
- if (key_length > 255)
+ if (key_length_arg > 255)
{
a_length=uint2korr(a); a+=2;
b_length=uint2korr(b); b+=2;
@@ -7555,19 +7546,19 @@ int Field_blob::pack_cmp(const char *a, const char *b, uint key_length,
}
-int Field_blob::pack_cmp(const char *b, uint key_length,
+int Field_blob::pack_cmp(const char *b, uint key_length_arg,
my_bool insert_or_update)
{
char *a;
+ uint a_length, b_length;
memcpy_fixed(&a,ptr+packlength,sizeof(char*));
if (!a)
- return key_length > 0 ? -1 : 0;
- uint a_length=get_length(ptr);
- uint b_length;
+ return key_length_arg > 0 ? -1 : 0;
- if (key_length > 255)
+ a_length= get_length(ptr);
+ if (key_length_arg > 255)
{
- b_length=uint2korr(b); b+=2;
+ b_length= uint2korr(b); b+=2;
}
else
b_length= (uint) (uchar) *b++;
@@ -7584,13 +7575,14 @@ char *Field_blob::pack_key(char *to, const char *from, uint max_length)
char *save=ptr;
ptr=(char*) from;
uint32 length=get_length(); // Length of from string
- uint char_length= ((field_charset->mbmaxlen > 1) ?
+ uint local_char_length= ((field_charset->mbmaxlen > 1) ?
max_length/field_charset->mbmaxlen : max_length);
if (length)
get_ptr((char**) &from);
- if (length > char_length)
- char_length= my_charpos(field_charset, from, from+length, char_length);
- set_if_smaller(length, char_length);
+ if (length > local_char_length)
+ local_char_length= my_charpos(field_charset, from, from+length,
+ local_char_length);
+ set_if_smaller(length, local_char_length);
*to++= (uchar) length;
if (max_length > 255) // 2 byte length
*to++= (uchar) (length >> 8);
@@ -7677,7 +7669,7 @@ uint Field_blob::max_packed_col_length(uint max_length)
#ifdef HAVE_SPATIAL
-void Field_geom::get_key_image(char *buff, uint length, imagetype type)
+void Field_geom::get_key_image(char *buff, uint length, imagetype type_arg)
{
char *blob;
const char *dummy;
@@ -7775,7 +7767,7 @@ int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs)
goto err;
wkb_type= uint4korr(from + SRID_SIZE + 1);
if (wkb_type < (uint32) Geometry::wkb_point ||
- wkb_type > (uint32) Geometry::wkb_end)
+ wkb_type > (uint32) Geometry::wkb_last)
goto err;
Field_blob::store_length(length);
if (table->copy_blobs || length <= MAX_FIELD_WIDTH)
@@ -8204,7 +8196,7 @@ uint Field_num::is_equal(create_field *new_field)
UNSIGNED_FLAG)) &&
((new_field->flags & AUTO_INCREMENT_FLAG) ==
(uint) (flags & AUTO_INCREMENT_FLAG)) &&
- (new_field->length <= max_length()));
+ (new_field->length <= max_display_length()));
}
@@ -8485,7 +8477,7 @@ int Field_bit::cmp_offset(uint row_offset)
}
-void Field_bit::get_key_image(char *buff, uint length, imagetype type)
+void Field_bit::get_key_image(char *buff, uint length, imagetype type_arg)
{
if (bit_len)
{
@@ -8659,7 +8651,7 @@ void create_field::create_length_to_internal_length(void)
void create_field::init_for_tmp_table(enum_field_types sql_type_arg,
- uint32 length_arg, uint32 decimals,
+ uint32 length_arg, uint32 decimals_arg,
bool maybe_null, bool is_unsigned)
{
field_name= "";
@@ -8670,7 +8662,7 @@ void create_field::init_for_tmp_table(enum_field_types sql_type_arg,
charset= &my_charset_bin;
geom_type= Field::GEOM_GEOMETRY;
pack_flag= (FIELDFLAG_NUMBER |
- ((decimals & FIELDFLAG_MAX_DEC) << FIELDFLAG_DEC_SHIFT) |
+ ((decimals_arg & FIELDFLAG_MAX_DEC) << FIELDFLAG_DEC_SHIFT) |
(maybe_null ? FIELDFLAG_MAYBE_NULL : 0) |
(is_unsigned ? 0 : FIELDFLAG_DECIMAL));
}
@@ -9378,12 +9370,13 @@ create_field::create_field(Field *old_field,Field *orig_field)
maximum possible display length for blob
SYNOPSIS
- Field_blob::max_length()
+ Field_blob::max_display_length()
RETURN
length
*/
-uint32 Field_blob::max_length()
+
+uint32 Field_blob::max_display_length()
{
switch (packlength)
{
diff --git a/sql/field.h b/sql/field.h
index 37f15db6fe2..581e99bd1bc 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -158,12 +158,12 @@ public:
virtual void reset_fields() {}
virtual void set_default()
{
- my_ptrdiff_t offset = (my_ptrdiff_t) (table->s->default_values -
+ my_ptrdiff_t l_offset= (my_ptrdiff_t) (table->s->default_values -
table->record[0]);
- memcpy(ptr, ptr + offset, pack_length());
+ memcpy(ptr, ptr + l_offset, pack_length());
if (null_ptr)
*null_ptr= ((*null_ptr & (uchar) ~null_bit) |
- null_ptr[offset] & null_bit);
+ null_ptr[l_offset] & null_bit);
}
virtual bool binary() const { return 1; }
virtual bool zero_pack() const { return 1; }
@@ -276,7 +276,7 @@ public:
{ memcpy(buff,ptr,length); }
inline void set_image(char *buff,uint length, CHARSET_INFO *cs)
{ memcpy(ptr,buff,length); }
- virtual void get_key_image(char *buff, uint length, imagetype type)
+ virtual void get_key_image(char *buff, uint length, imagetype type_arg)
{ get_image(buff,length, &my_charset_bin); }
virtual void set_key_image(char *buff,uint length)
{ set_image(buff,length, &my_charset_bin); }
@@ -352,10 +352,10 @@ public:
virtual CHARSET_INFO *charset(void) const { return &my_charset_bin; }
virtual CHARSET_INFO *sort_charset(void) const { return charset(); }
virtual bool has_charset(void) const { return FALSE; }
- virtual void set_charset(CHARSET_INFO *charset) { }
+ virtual void set_charset(CHARSET_INFO *charset_arg) { }
virtual enum Derivation derivation(void) const
{ return DERIVATION_IMPLICIT; }
- virtual void set_derivation(enum Derivation derivation) { }
+ virtual void set_derivation(enum Derivation derivation_arg) { }
bool set_warning(MYSQL_ERROR::enum_warning_level, unsigned int code,
int cuted_increment);
bool check_int(const char *str, int length, const char *int_end,
@@ -380,7 +380,7 @@ public:
}
/* maximum possible display length */
- virtual uint32 max_length()= 0;
+ virtual uint32 max_display_length()= 0;
virtual uint is_equal(create_field *new_field);
/* convert decimal to longlong with overflow check */
@@ -464,12 +464,12 @@ public:
int store(const char *to,uint length,CHARSET_INFO *cs)=0;
uint size_of() const { return sizeof(*this); }
CHARSET_INFO *charset(void) const { return field_charset; }
- void set_charset(CHARSET_INFO *charset) { field_charset=charset; }
+ void set_charset(CHARSET_INFO *charset_arg) { field_charset= charset_arg; }
enum Derivation derivation(void) const { return field_derivation; }
virtual void set_derivation(enum Derivation derivation_arg)
{ field_derivation= derivation_arg; }
bool binary() const { return field_charset == &my_charset_bin; }
- uint32 max_length() { return field_length; }
+ uint32 max_display_length() { return field_length; }
friend class create_field;
my_decimal *val_decimal(my_decimal *);
virtual bool str_needs_quotes() { return TRUE; }
@@ -484,9 +484,9 @@ class Field_longstr :public Field_str
public:
Field_longstr(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
- const char *field_name_arg, CHARSET_INFO *charset)
+ const char *field_name_arg, CHARSET_INFO *charset_arg)
:Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg,
- field_name_arg, charset)
+ field_name_arg, charset_arg)
{}
int store_decimal(const my_decimal *d);
@@ -532,7 +532,7 @@ public:
void overflow(bool negative);
bool zero_pack() const { return 0; }
void sql_type(String &str) const;
- uint32 max_length() { return field_length; }
+ uint32 max_display_length() { return field_length; }
};
@@ -574,7 +574,7 @@ public:
void sort_string(char *buff, uint length);
bool zero_pack() const { return 0; }
void sql_type(String &str) const;
- uint32 max_length() { return field_length; }
+ uint32 max_display_length() { return field_length; }
uint size_of() const { return sizeof(*this); }
uint32 pack_length() const { return (uint32) bin_size; }
uint is_equal(create_field *new_field);
@@ -607,7 +607,7 @@ public:
void sort_string(char *buff,uint length);
uint32 pack_length() const { return 1; }
void sql_type(String &str) const;
- uint32 max_length() { return 4; }
+ uint32 max_display_length() { return 4; }
};
@@ -642,7 +642,7 @@ public:
void sort_string(char *buff,uint length);
uint32 pack_length() const { return 2; }
void sql_type(String &str) const;
- uint32 max_length() { return 6; }
+ uint32 max_display_length() { return 6; }
};
@@ -672,7 +672,7 @@ public:
void sort_string(char *buff,uint length);
uint32 pack_length() const { return 3; }
void sql_type(String &str) const;
- uint32 max_length() { return 8; }
+ uint32 max_display_length() { return 8; }
};
@@ -707,7 +707,7 @@ public:
void sort_string(char *buff,uint length);
uint32 pack_length() const { return 4; }
void sql_type(String &str) const;
- uint32 max_length() { return 11; }
+ uint32 max_display_length() { return 11; }
};
@@ -749,7 +749,7 @@ public:
uint32 pack_length() const { return 8; }
void sql_type(String &str) const;
bool can_be_compared_as_longlong() const { return TRUE; }
- uint32 max_length() { return 20; }
+ uint32 max_display_length() { return 20; }
};
#endif
@@ -783,7 +783,7 @@ public:
void sort_string(char *buff,uint length);
uint32 pack_length() const { return sizeof(float); }
void sql_type(String &str) const;
- uint32 max_length() { return 24; }
+ uint32 max_display_length() { return 24; }
};
@@ -825,8 +825,8 @@ public:
void sort_string(char *buff,uint length);
uint32 pack_length() const { return sizeof(double); }
void sql_type(String &str) const;
- uint32 max_length() { return 53; }
uint size_of() const { return sizeof(*this); }
+ uint32 max_display_length() { return 53; }
};
@@ -858,7 +858,7 @@ public:
uint32 pack_length() const { return 0; }
void sql_type(String &str) const;
uint size_of() const { return sizeof(*this); }
- uint32 max_length() { return 4; }
+ uint32 max_display_length() { return 4; }
};
@@ -1239,10 +1239,10 @@ public:
packlength= 4;
if (set_packlength)
{
- uint32 char_length= len_arg/cs->mbmaxlen;
- packlength= char_length <= 255 ? 1 :
- char_length <= 65535 ? 2 :
- char_length <= 16777215 ? 3 : 4;
+ uint32 l_char_length= len_arg/cs->mbmaxlen;
+ packlength= l_char_length <= 255 ? 1 :
+ l_char_length <= 65535 ? 2 :
+ l_char_length <= 16777215 ? 3 : 4;
}
}
enum_field_types type() const { return MYSQL_TYPE_BLOB;}
@@ -1326,7 +1326,7 @@ public:
uint size_of() const { return sizeof(*this); }
bool has_charset(void) const
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
- uint32 max_length();
+ uint32 max_display_length();
};
@@ -1455,7 +1455,7 @@ public:
enum_field_types type() const { return MYSQL_TYPE_BIT; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; }
uint32 key_length() const { return (uint32) (field_length + 7) / 8; }
- uint32 max_length() { return field_length; }
+ uint32 max_display_length() { return field_length; }
uint size_of() const { return sizeof(*this); }
Item_result result_type () const { return INT_RESULT; }
int reset(void) { bzero(ptr, bytes_in_rec); return 0; }
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 46ef9c9a553..2f9a96472ca 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -104,7 +104,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
uint maxbuffer;
BUFFPEK *buffpek;
ha_rows records= HA_POS_ERROR;
- uchar **sort_keys;
+ uchar **sort_keys= 0;
IO_CACHE tempfile, buffpek_pointers, *selected_records_file, *outfile;
SORTPARAM param;
bool multi_byte_charset;
@@ -434,7 +434,8 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
byte *ref_pos,*next_pos,ref_buff[MAX_REFLENGTH];
my_off_t record;
TABLE *sort_form;
- volatile THD::killed_state *killed= &current_thd->killed;
+ THD *thd= current_thd;
+ volatile THD::killed_state *killed= &thd->killed;
handler *file;
MY_BITMAP *save_read_set, *save_write_set;
DBUG_ENTER("find_all_keys");
@@ -547,6 +548,9 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
}
else
file->unlock_row();
+ /* It does not make sense to read more keys in case of a fatal error */
+ if (thd->net.report_error)
+ DBUG_RETURN(HA_POS_ERROR);
}
if (quick_select)
{
@@ -886,12 +890,14 @@ static void make_sortkey(register SORTPARAM *param,
}
else
{
- uchar *end= (uchar*) field->pack((char *) to, field->ptr);
#ifdef HAVE_purify
+ uchar *end= (uchar*) field->pack((char *) to, field->ptr);
uint length= (uint) ((to + addonf->length) - end);
DBUG_ASSERT((int) length >= 0);
if (length)
bzero(end, length);
+#else
+ (void) field->pack((char *) to, field->ptr);
#endif
}
to+= addonf->length;
diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc
index 7abdb5f488c..2d78999017a 100644
--- a/sql/gen_lex_hash.cc
+++ b/sql/gen_lex_hash.cc
@@ -445,10 +445,24 @@ int main(int argc,char **argv)
/* Broken up to indicate that it's not advice to you, gentle reader. */
printf("/*\n\n Do " "not " "edit " "this " "file " "directly!\n\n*/\n");
- printf("/* Copyright (C) 2001-2004 MySQL AB\n\
- This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\
- and you are welcome to modify and redistribute it under the GPL license\n\
- \n*/\n\n");
+ printf("\
+/* Copyright (C) 2001-2004 MySQL AB\n\
+\n\
+ This program is free software; you can redistribute it and/or modify\n\
+ it under the terms of the GNU General Public License as published by\n\
+ the Free Software Foundation; version 2 of the License.\n\
+\n\
+ This program is distributed in the hope that it will be useful,\n\
+ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\
+ GNU General Public License for more details.\n\
+\n\
+ You should have received a copy of the GNU General Public License\n\
+ along with this program; see the file COPYING. If not, write to the\n\
+ Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston\n\
+ MA 02110-1301 USA. */\n\
+\n\
+");
/* Broken up to indicate that it's not advice to you, gentle reader. */
printf("/* Do " "not " "edit " "this " "file! This is generated by "
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 21697be83aa..36fe6457167 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -150,7 +150,6 @@ static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length,
#ifdef HAVE_NDB_BINLOG
static int rename_share(NDB_SHARE *share, const char *new_key);
#endif
-static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len);
static int ndb_get_table_statistics(ha_ndbcluster*, bool, Ndb*, const NDBTAB *,
struct Ndb_statistics *);
@@ -443,15 +442,15 @@ ha_rows ha_ndbcluster::records()
{
ha_rows retval;
DBUG_ENTER("ha_ndbcluster::records");
- struct Ndb_local_table_statistics *info= m_table_info;
+ struct Ndb_local_table_statistics *local_info= m_table_info;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
- info->no_uncommitted_rows_count));
+ local_info->no_uncommitted_rows_count));
Ndb *ndb= get_ndb();
ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
- if (ndb_get_table_statistics(this, true, ndb, m_table, &stat) == 0)
+ if (ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat) == 0)
{
retval= stat.row_count;
}
@@ -462,9 +461,9 @@ ha_rows ha_ndbcluster::records()
THD *thd= current_thd;
if (get_thd_ndb(thd)->error)
- info->no_uncommitted_rows_count= 0;
+ local_info->no_uncommitted_rows_count= 0;
- DBUG_RETURN(retval + info->no_uncommitted_rows_count);
+ DBUG_RETURN(retval + local_info->no_uncommitted_rows_count);
}
int ha_ndbcluster::records_update()
@@ -474,30 +473,29 @@ int ha_ndbcluster::records_update()
DBUG_ENTER("ha_ndbcluster::records_update");
int result= 0;
- struct Ndb_local_table_statistics *info= m_table_info;
+ struct Ndb_local_table_statistics *local_info= m_table_info;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
- info->no_uncommitted_rows_count));
- // if (info->records == ~(ha_rows)0)
+ local_info->no_uncommitted_rows_count));
{
Ndb *ndb= get_ndb();
struct Ndb_statistics stat;
ndb->setDatabaseName(m_dbname);
- result= ndb_get_table_statistics(this, true, ndb, m_table, &stat);
+ result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat);
if (result == 0)
{
stats.mean_rec_length= stat.row_size;
stats.data_file_length= stat.fragment_memory;
- info->records= stat.row_count;
+ local_info->records= stat.row_count;
}
}
{
THD *thd= current_thd;
if (get_thd_ndb(thd)->error)
- info->no_uncommitted_rows_count= 0;
+ local_info->no_uncommitted_rows_count= 0;
}
- if(result==0)
- stats.records= info->records+ info->no_uncommitted_rows_count;
+ if (result == 0)
+ stats.records= local_info->records+ local_info->no_uncommitted_rows_count;
DBUG_RETURN(result);
}
@@ -515,11 +513,11 @@ void ha_ndbcluster::no_uncommitted_rows_update(int c)
if (m_ha_not_exact_count)
return;
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update");
- struct Ndb_local_table_statistics *info= m_table_info;
- info->no_uncommitted_rows_count+= c;
+ struct Ndb_local_table_statistics *local_info= m_table_info;
+ local_info->no_uncommitted_rows_count+= c;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
- info->no_uncommitted_rows_count));
+ local_info->no_uncommitted_rows_count));
DBUG_VOID_RETURN;
}
@@ -956,7 +954,6 @@ int ha_ndbcluster::get_ndb_partition_id(NdbOperation *ndb_op)
bool ha_ndbcluster::uses_blob_value()
{
- uint blob_fields;
MY_BITMAP *bitmap;
uint *blob_index, *blob_index_end;
if (table_share->blob_fields == 0)
@@ -1106,7 +1103,6 @@ int ha_ndbcluster::create_indexes(Ndb *ndb, TABLE *tab)
const char *index_name;
KEY* key_info= tab->key_info;
const char **key_name= tab->s->keynames.type_names;
- NDBDICT *dict= ndb->getDictionary();
DBUG_ENTER("ha_ndbcluster::create_indexes");
for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
@@ -1244,7 +1240,6 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error)
int error= 0;
THD *thd=current_thd;
NDBDICT *dict= ndb->getDictionary();
- const char *index_name;
KEY* key_info= tab->key_info;
const char **key_name= tab->s->keynames.type_names;
DBUG_ENTER("ha_ndbcluster::open_indexes");
@@ -1256,9 +1251,9 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error)
m_index[i].index= m_index[i].unique_index= NULL;
else
break;
- m_index[i].null_in_unique_index= false;
+ m_index[i].null_in_unique_index= FALSE;
if (check_index_fields_not_null(key_info))
- m_index[i].null_in_unique_index= true;
+ m_index[i].null_in_unique_index= TRUE;
}
if (error && !ignore_error)
@@ -1294,7 +1289,6 @@ void ha_ndbcluster::renumber_indexes(Ndb *ndb, TABLE *tab)
const char *index_name;
KEY* key_info= tab->key_info;
const char **key_name= tab->s->keynames.type_names;
- NDBDICT *dict= ndb->getDictionary();
DBUG_ENTER("ha_ndbcluster::renumber_indexes");
for (i= 0; i < tab->s->keys; i++, key_info++, key_name++)
@@ -1411,10 +1405,10 @@ bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info)
{
Field* field= key_part->field;
if (field->maybe_null())
- DBUG_RETURN(true);
+ DBUG_RETURN(TRUE);
}
- DBUG_RETURN(false);
+ DBUG_RETURN(FALSE);
}
void ha_ndbcluster::release_metadata(THD *thd, Ndb *ndb)
@@ -1732,7 +1726,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
ERR_RETURN(trans->getNdbError());
}
- if (execute_no_commit_ie(this,trans,false) != 0)
+ if (execute_no_commit_ie(this,trans,FALSE) != 0)
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
@@ -1797,7 +1791,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data,
}
}
- if (execute_no_commit(this,trans,false) != 0)
+ if (execute_no_commit(this,trans,FALSE) != 0)
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
@@ -1843,7 +1837,7 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans,
if (err.status != NdbError::Success)
{
if (ndb_to_mysql_error(&err) != (int) errcode)
- DBUG_RETURN(false);
+ DBUG_RETURN(FALSE);
if (op == last) break;
op= trans->getNextCompletedOperation(op);
}
@@ -1874,10 +1868,10 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans,
if (errcode == HA_ERR_KEY_NOT_FOUND)
m_dupkey= table->s->primary_key;
}
- DBUG_RETURN(false);
+ DBUG_RETURN(FALSE);
}
}
- DBUG_RETURN(true);
+ DBUG_RETURN(TRUE);
}
@@ -1955,7 +1949,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record,
}
last= trans->getLastDefinedOperation();
if (first)
- res= execute_no_commit_ie(this,trans,false);
+ res= execute_no_commit_ie(this,trans,FALSE);
else
{
// Table has no keys
@@ -2004,7 +1998,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
if ((res= define_read_attrs(buf, op)))
DBUG_RETURN(res);
- if (execute_no_commit_ie(this,trans,false) != 0)
+ if (execute_no_commit_ie(this,trans,FALSE) != 0)
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
@@ -2018,7 +2012,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
{
DBUG_ENTER("fetch_next");
- int check;
+ int local_check;
NdbTransaction *trans= m_active_trans;
if (m_lock_tuple)
@@ -2029,19 +2023,21 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
LOCK WITH SHARE MODE) and row was not explictly unlocked
with unlock_row() call
*/
- NdbConnection *trans= m_active_trans;
+ NdbConnection *con_trans= m_active_trans;
NdbOperation *op;
// Lock row
DBUG_PRINT("info", ("Keeping lock on scanned row"));
if (!(op= m_active_cursor->lockCurrentTuple()))
{
- m_lock_tuple= false;
- ERR_RETURN(trans->getNdbError());
+ /* purecov: begin inspected */
+ m_lock_tuple= FALSE;
+ ERR_RETURN(con_trans->getNdbError());
+ /* purecov: end */
}
m_ops_pending++;
}
- m_lock_tuple= false;
+ m_lock_tuple= FALSE;
bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE &&
m_lock.type != TL_READ_WITH_SHARED_LOCKS;;
@@ -2052,13 +2048,13 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
*/
if (m_ops_pending && m_blobs_pending)
{
- if (execute_no_commit(this,trans,false) != 0)
+ if (execute_no_commit(this,trans,FALSE) != 0)
DBUG_RETURN(ndb_err(trans));
m_ops_pending= 0;
m_blobs_pending= FALSE;
}
- if ((check= cursor->nextResult(contact_ndb, m_force_send)) == 0)
+ if ((local_check= cursor->nextResult(contact_ndb, m_force_send)) == 0)
{
/*
Explicitly lock tuple if "select for update" or
@@ -2069,7 +2065,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
m_lock.type == TL_READ_WITH_SHARED_LOCKS);
DBUG_RETURN(0);
}
- else if (check == 1 || check == 2)
+ else if (local_check == 1 || local_check == 2)
{
// 1: No more records
// 2: No more cached records
@@ -2084,7 +2080,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
{
if (m_transaction_on)
{
- if (execute_no_commit(this,trans,false) != 0)
+ if (execute_no_commit(this,trans,FALSE) != 0)
DBUG_RETURN(-1);
}
else
@@ -2099,13 +2095,13 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor)
}
m_ops_pending= 0;
}
- contact_ndb= (check == 2);
+ contact_ndb= (local_check == 2);
}
else
{
DBUG_RETURN(-1);
}
- } while (check == 2);
+ } while (local_check == 2);
DBUG_RETURN(1);
}
@@ -2365,7 +2361,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
bool need_pk = (lm == NdbOperation::LM_Read);
if (!(op= trans->getNdbIndexScanOperation(m_index[active_index].index,
m_table)) ||
- op->readTuples(lm, 0, parallelism, sorted, descending, false, need_pk))
+ op->readTuples(lm, 0, parallelism, sorted, descending, FALSE, need_pk))
ERR_RETURN(trans->getNdbError());
if (m_use_partition_function && part_spec != NULL &&
part_spec->start_part == part_spec->end_part)
@@ -2387,7 +2383,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
{
const key_range *keys[2]= { start_key, end_key };
- res= set_bounds(op, active_index, false, keys);
+ res= set_bounds(op, active_index, FALSE, keys);
if (res)
DBUG_RETURN(res);
}
@@ -2411,7 +2407,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
ERR_RETURN(trans->getNdbError());
}
- if (execute_no_commit(this,trans,false) != 0)
+ if (execute_no_commit(this,trans,FALSE) != 0)
DBUG_RETURN(ndb_err(trans));
DBUG_RETURN(next_result(buf));
@@ -2506,7 +2502,7 @@ int ha_ndbcluster::unique_index_scan(const KEY* key_info,
if ((res= define_read_attrs(buf, op)))
DBUG_RETURN(res);
- if (execute_no_commit(this,trans,false) != 0)
+ if (execute_no_commit(this,trans,FALSE) != 0)
DBUG_RETURN(ndb_err(trans));
DBUG_PRINT("exit", ("Scan started successfully"));
DBUG_RETURN(next_result(buf));
@@ -2575,7 +2571,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
if ((res= define_read_attrs(buf, op)))
DBUG_RETURN(res);
- if (execute_no_commit(this,trans,false) != 0)
+ if (execute_no_commit(this,trans,FALSE) != 0)
DBUG_RETURN(ndb_err(trans));
DBUG_PRINT("exit", ("Scan started successfully"));
DBUG_RETURN(next_result(buf));
@@ -2591,7 +2587,7 @@ int ha_ndbcluster::write_row(byte *record)
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
int res;
- THD *thd= current_thd;
+ THD *thd= table->in_use;
longlong func_value= 0;
DBUG_ENTER("ha_ndbcluster::write_row");
@@ -2604,7 +2600,6 @@ int ha_ndbcluster::write_row(byte *record)
*/
if (has_auto_increment)
{
- THD *thd= table->in_use;
int error;
m_skip_auto_increment= FALSE;
@@ -2624,7 +2619,7 @@ int ha_ndbcluster::write_row(byte *record)
start_bulk_insert will set parameters to ensure that each
write_row is committed individually
*/
- int peek_res= peek_indexed_rows(record, true);
+ int peek_res= peek_indexed_rows(record, TRUE);
if (!peek_res)
{
@@ -2743,7 +2738,7 @@ int ha_ndbcluster::write_row(byte *record)
m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on)
{
- if (execute_no_commit(this,trans,false) != 0)
+ if (execute_no_commit(this,trans,FALSE) != 0)
{
m_skip_auto_increment= TRUE;
no_uncommitted_rows_execute_failure();
@@ -2934,7 +2929,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
DBUG_PRINT("info", ("Calling updateTuple on cursor"));
if (!(op= cursor->updateCurrentTuple()))
ERR_RETURN(trans->getNdbError());
- m_lock_tuple= false;
+ m_lock_tuple= FALSE;
m_ops_pending++;
if (uses_blob_value())
m_blobs_pending= TRUE;
@@ -2997,7 +2992,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
op->setValue(no_fields, part_func_value);
}
// Execute update operation
- if (!cursor && execute_no_commit(this,trans,false) != 0) {
+ if (!cursor && execute_no_commit(this,trans,FALSE) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@@ -3043,7 +3038,7 @@ int ha_ndbcluster::delete_row(const byte *record)
DBUG_PRINT("info", ("Calling deleteTuple on cursor"));
if (cursor->deleteCurrentTuple() != 0)
ERR_RETURN(trans->getNdbError());
- m_lock_tuple= false;
+ m_lock_tuple= FALSE;
m_ops_pending++;
if (m_use_partition_function)
@@ -3083,7 +3078,7 @@ int ha_ndbcluster::delete_row(const byte *record)
}
// Execute delete operation
- if (execute_no_commit(this,trans,false) != 0) {
+ if (execute_no_commit(this,trans,FALSE) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@@ -3311,8 +3306,7 @@ int ha_ndbcluster::index_init(uint index, bool sorted)
unless m_lock.type == TL_READ_HIGH_PRIORITY
and no sub-sequent call to unlock_row()
*/
- m_lock_tuple= false;
- m_lock_tuple= false;
+ m_lock_tuple= FALSE;
DBUG_RETURN(0);
}
@@ -3334,7 +3328,6 @@ check_null_in_key(const KEY* key_info, const byte *key, uint key_len)
const byte* end_ptr= key + key_len;
curr_part= key_info->key_part;
end_part= curr_part + key_info->key_parts;
-
for (; curr_part != end_part && key < end_ptr; curr_part++)
{
@@ -3572,12 +3565,12 @@ int ha_ndbcluster::close_scan()
if (!(op= cursor->lockCurrentTuple()))
{
- m_lock_tuple= false;
+ m_lock_tuple= FALSE;
ERR_RETURN(trans->getNdbError());
}
m_ops_pending++;
}
- m_lock_tuple= false;
+ m_lock_tuple= FALSE;
if (m_ops_pending)
{
/*
@@ -3585,7 +3578,7 @@ int ha_ndbcluster::close_scan()
deleteing/updating transaction before closing the scan
*/
DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending));
- if (execute_no_commit(this,trans,false) != 0) {
+ if (execute_no_commit(this,trans,FALSE) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@@ -3790,7 +3783,7 @@ int ha_ndbcluster::info(uint flag)
struct Ndb_statistics stat;
ndb->setDatabaseName(m_dbname);
if (current_thd->variables.ndb_use_exact_count &&
- (result= ndb_get_table_statistics(this, true, ndb, m_table, &stat))
+ (result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat))
== 0)
{
stats.mean_rec_length= stat.row_size;
@@ -3991,7 +3984,7 @@ int ha_ndbcluster::end_bulk_insert()
m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on)
{
- if (execute_no_commit(this, trans,false) != 0)
+ if (execute_no_commit(this, trans,FALSE) != 0)
{
no_uncommitted_rows_execute_failure();
my_errno= error= ndb_err(trans);
@@ -4316,7 +4309,7 @@ void ha_ndbcluster::unlock_row()
DBUG_ENTER("unlock_row");
DBUG_PRINT("info", ("Unlocking row"));
- m_lock_tuple= false;
+ m_lock_tuple= FALSE;
DBUG_VOID_RETURN;
}
@@ -4743,14 +4736,14 @@ static int create_ndb_column(NDBCOL &col,
int ha_ndbcluster::create(const char *name,
TABLE *form,
- HA_CREATE_INFO *info)
+ HA_CREATE_INFO *create_info)
{
THD *thd= current_thd;
NDBTAB tab;
NDBCOL col;
uint pack_length, length, i, pk_length= 0;
const void *data, *pack_data;
- bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
+ bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE);
char tablespace[FN_LEN];
@@ -4774,7 +4767,7 @@ int ha_ndbcluster::create(const char *name,
if (!(m_table= ndbtab_g.get_table()))
ERR_RETURN(dict->getNdbError());
if ((get_tablespace_name(thd, tablespace, FN_LEN)))
- info->tablespace= tablespace;
+ create_info->tablespace= tablespace;
m_table= NULL;
}
DBUG_PRINT("info", ("Dropping and re-creating table for TRUNCATE"));
@@ -4815,7 +4808,7 @@ int ha_ndbcluster::create(const char *name,
DBUG_PRINT("table", ("name: %s", m_tabname));
tab.setName(m_tabname);
- tab.setLogging(!(info->options & HA_LEX_CREATE_TMP_TABLE));
+ tab.setLogging(!(create_info->options & HA_LEX_CREATE_TMP_TABLE));
// Save frm data for this table
if (readfrm(name, &data, &length))
@@ -4836,10 +4829,10 @@ int ha_ndbcluster::create(const char *name,
DBUG_PRINT("info", ("name: %s, type: %u, pack_length: %d",
field->field_name, field->real_type(),
field->pack_length()));
- if ((my_errno= create_ndb_column(col, field, info)))
+ if ((my_errno= create_ndb_column(col, field, create_info)))
DBUG_RETURN(my_errno);
- if (info->storage_media == HA_SM_DISK)
+ if (create_info->storage_media == HA_SM_DISK)
col.setStorageType(NdbDictionary::Column::StorageTypeDisk);
else
col.setStorageType(NdbDictionary::Column::StorageTypeMemory);
@@ -4859,16 +4852,16 @@ int ha_ndbcluster::create(const char *name,
NdbDictionary::Column::StorageTypeMemory);
}
- if (info->storage_media == HA_SM_DISK)
+ if (create_info->storage_media == HA_SM_DISK)
{
- if (info->tablespace)
- tab.setTablespaceName(info->tablespace);
+ if (create_info->tablespace)
+ tab.setTablespaceName(create_info->tablespace);
else
tab.setTablespaceName("DEFAULT-TS");
}
- else if (info->tablespace)
+ else if (create_info->tablespace)
{
- if (info->storage_media == HA_SM_MEMORY)
+ if (create_info->storage_media == HA_SM_MEMORY)
{
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_ILLEGAL_HA_CREATE_OPTION,
@@ -4878,8 +4871,8 @@ int ha_ndbcluster::create(const char *name,
"STORAGE DISK");
DBUG_RETURN(HA_ERR_UNSUPPORTED);
}
- tab.setTablespaceName(info->tablespace);
- info->storage_media = HA_SM_DISK; //if use tablespace, that also means store on disk
+ tab.setTablespaceName(create_info->tablespace);
+ create_info->storage_media = HA_SM_DISK; //if use tablespace, that also means store on disk
}
// No primary key, create shadow key as 64 bit, auto increment
@@ -4910,13 +4903,13 @@ int ha_ndbcluster::create(const char *name,
case MYSQL_TYPE_MEDIUM_BLOB:
case MYSQL_TYPE_LONG_BLOB:
{
- NdbDictionary::Column * col= tab.getColumn(i);
- int size= pk_length + (col->getPartSize()+3)/4 + 7;
+ NdbDictionary::Column * column= tab.getColumn(i);
+ int size= pk_length + (column->getPartSize()+3)/4 + 7;
if (size > NDB_MAX_TUPLE_SIZE_IN_WORDS &&
(pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS)
{
size= NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7;
- col->setPartSize(4*size);
+ column->setPartSize(4*size);
}
/**
* If size > NDB_MAX and pk_length+7 >= NDB_MAX
@@ -5009,7 +5002,7 @@ int ha_ndbcluster::create(const char *name,
get a new share
*/
- if (!(share= get_share(name, form, true, true)))
+ if (!(share= get_share(name, form, TRUE, TRUE)))
{
sql_print_error("NDB: allocating table share for %s failed", name);
/* my_errno is set */
@@ -5071,10 +5064,8 @@ int ha_ndbcluster::create(const char *name,
int ha_ndbcluster::create_handler_files(const char *file,
const char *old_name,
int action_flag,
- HA_CREATE_INFO *info)
+ HA_CREATE_INFO *create_info)
{
- char path[FN_REFLEN];
- const char *name;
Ndb* ndb;
const NDBTAB *tab;
const void *data, *pack_data;
@@ -5092,7 +5083,7 @@ int ha_ndbcluster::create_handler_files(const char *file,
DBUG_RETURN(HA_ERR_NO_CONNECTION);
NDBDICT *dict= ndb->getDictionary();
- if (!info->frm_only)
+ if (!create_info->frm_only)
DBUG_RETURN(0); // Must be a create, ignore since frm is saved in create
// TODO handle this
@@ -5374,7 +5365,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
int ndb_table_id= orig_tab->getObjectId();
int ndb_table_version= orig_tab->getObjectVersion();
- NDB_SHARE *share= get_share(from, 0, false);
+ NDB_SHARE *share= get_share(from, 0, FALSE);
if (share)
{
int r= rename_share(share, to);
@@ -5528,7 +5519,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb,
DBUG_PRINT("info", ("Schema distribution table not setup"));
DBUG_RETURN(HA_ERR_NO_CONNECTION);
}
- NDB_SHARE *share= get_share(path, 0, false);
+ NDB_SHARE *share= get_share(path, 0, FALSE);
#endif
/* Drop the table from NDB */
@@ -5919,7 +5910,7 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
Ndb *ndb= get_ndb();
ndb->setDatabaseName(m_dbname);
struct Ndb_statistics stat;
- res= ndb_get_table_statistics(NULL, false, ndb, m_table, &stat);
+ res= ndb_get_table_statistics(NULL, FALSE, ndb, m_table, &stat);
stats.mean_rec_length= stat.row_size;
stats.data_file_length= stat.fragment_memory;
stats.records= stat.row_count;
@@ -6074,7 +6065,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db,
ndb->setDatabaseName(db);
NDBDICT* dict= ndb->getDictionary();
build_table_filename(key, sizeof(key), db, name, "", 0);
- NDB_SHARE *share= get_share(key, 0, false);
+ NDB_SHARE *share= get_share(key, 0, FALSE);
if (share && get_ndb_share_state(share) == NSS_ALTERED)
{
// Frm has been altered on disk, but not yet written to ndb
@@ -6241,7 +6232,6 @@ int ndbcluster_drop_database_impl(const char *path)
static void ndbcluster_drop_database(handlerton *hton, char *path)
{
- THD *thd= current_thd;
DBUG_ENTER("ndbcluster_drop_database");
#ifdef HAVE_NDB_BINLOG
/*
@@ -6258,6 +6248,7 @@ static void ndbcluster_drop_database(handlerton *hton, char *path)
ndbcluster_drop_database_impl(path);
#ifdef HAVE_NDB_BINLOG
char db[FN_REFLEN];
+ THD *thd= current_thd;
ha_ndbcluster::set_dbname(path, db);
ndbcluster_log_schema_op(thd, 0,
thd->query, thd->query_length,
@@ -6283,16 +6274,17 @@ int ndb_create_table_from_engine(THD *thd, const char *db,
*/
int ndbcluster_find_all_files(THD *thd)
{
- DBUG_ENTER("ndbcluster_find_all_files");
Ndb* ndb;
char key[FN_REFLEN];
+ NDBDICT *dict;
+ int unhandled, retries= 5, skipped;
+ DBUG_ENTER("ndbcluster_find_all_files");
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
- NDBDICT *dict= ndb->getDictionary();
+ dict= ndb->getDictionary();
- int unhandled, retries= 5, skipped;
LINT_INIT(unhandled);
LINT_INIT(skipped);
do
@@ -6362,7 +6354,7 @@ int ndbcluster_find_all_files(THD *thd)
}
else if (cmp_frm(ndbtab, pack_data, pack_length))
{
- NDB_SHARE *share= get_share(key, 0, false);
+ NDB_SHARE *share= get_share(key, 0, FALSE);
if (!share || get_ndb_share_state(share) != NSS_ALTERED)
{
discover= 1;
@@ -6476,12 +6468,12 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
List<char> delete_list;
while ((file_name=it++))
{
- bool file_on_disk= false;
+ bool file_on_disk= FALSE;
DBUG_PRINT("info", ("%s", file_name));
if (hash_search(&ndb_tables, file_name, strlen(file_name)))
{
DBUG_PRINT("info", ("%s existed in NDB _and_ on disk ", file_name));
- file_on_disk= true;
+ file_on_disk= TRUE;
}
// Check for .ndb file with this name
@@ -7034,19 +7026,19 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
{
// We must provide approx table rows
Uint64 table_rows=0;
- Ndb_local_table_statistics *info= m_table_info;
- if (info->records != ~(ha_rows)0 && info->records != 0)
+ Ndb_local_table_statistics *ndb_info= m_table_info;
+ if (ndb_info->records != ~(ha_rows)0 && ndb_info->records != 0)
{
- table_rows = info->records;
- DBUG_PRINT("info", ("use info->records: %llu", table_rows));
+ table_rows = ndb_info->records;
+ DBUG_PRINT("info", ("use info->records: %lu", (ulong) table_rows));
}
else
{
Ndb_statistics stat;
- if ((res=ndb_get_table_statistics(this, true, ndb, m_table, &stat)) != 0)
+ if ((res=ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat)))
break;
table_rows=stat.row_count;
- DBUG_PRINT("info", ("use db row_count: %llu", table_rows));
+ DBUG_PRINT("info", ("use db row_count: %lu", (ulong) table_rows));
if (table_rows == 0) {
// Problem if autocommit=0
#ifdef ndb_get_table_statistics_uses_active_trans
@@ -7069,7 +7061,7 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
if ((op->readTuples(NdbOperation::LM_CommittedRead)) == -1)
ERR_BREAK(op->getNdbError(), res);
const key_range *keys[2]={ min_key, max_key };
- if ((res=set_bounds(op, inx, true, keys)) != 0)
+ if ((res=set_bounds(op, inx, TRUE, keys)) != 0)
break;
// Decide if db should be contacted
@@ -7204,7 +7196,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
{
Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname);
if (ndbtab_g.get_table() == 0
- || ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat))
+ || ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat))
{
free_share(&share);
DBUG_RETURN(1);
@@ -7383,9 +7375,9 @@ static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length,
static void print_share(const char* where, NDB_SHARE* share)
{
fprintf(DBUG_FILE,
- "%s %s.%s: use_count: %u, commit_count: %llu\n",
+ "%s %s.%s: use_count: %u, commit_count: %lu\n",
where, share->db, share->table_name, share->use_count,
- (long long unsigned int) share->commit_count);
+ (ulong) share->commit_count);
fprintf(DBUG_FILE,
" - key: %s, key_length: %d\n",
share->key, share->key_length);
@@ -7622,7 +7614,6 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table,
bool create_if_not_exists,
bool have_lock)
{
- THD *thd= current_thd;
NDB_SHARE *share;
uint length= (uint) strlen(key);
DBUG_ENTER("ndbcluster_get_share");
@@ -7774,7 +7765,6 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, const
Uint64 sum_row_size= 0;
Uint64 sum_mem= 0;
NdbScanOperation*pOp;
- NdbResultSet *rs;
int check;
if ((pTrans= ndb->startTransaction()) == NULL)
@@ -7953,10 +7943,10 @@ ha_ndbcluster::null_value_index_search(KEY_MULTI_RANGE *ranges,
const byte *key= range->start_key.key;
uint key_len= range->start_key.length;
if (check_null_in_key(key_info, key, key_len))
- DBUG_RETURN(true);
+ DBUG_RETURN(TRUE);
curr += reclength;
}
- DBUG_RETURN(false);
+ DBUG_RETURN(FALSE);
}
int
@@ -7966,21 +7956,20 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
bool sorted,
HANDLER_BUFFER *buffer)
{
- DBUG_ENTER("ha_ndbcluster::read_multi_range_first");
m_write_op= FALSE;
-
int res;
KEY* key_info= table->key_info + active_index;
- NDB_INDEX_TYPE index_type= get_index_type(active_index);
+ NDB_INDEX_TYPE cur_index_type= get_index_type(active_index);
ulong reclength= table_share->reclength;
NdbOperation* op;
Thd_ndb *thd_ndb= get_thd_ndb(current_thd);
+ DBUG_ENTER("ha_ndbcluster::read_multi_range_first");
/**
* blobs and unique hash index with NULL can't be batched currently
*/
if (uses_blob_value() ||
- (index_type == UNIQUE_INDEX &&
+ (cur_index_type == UNIQUE_INDEX &&
has_null_in_unique_index(active_index) &&
null_value_index_search(ranges, ranges+range_count, buffer)))
{
@@ -8055,7 +8044,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
continue;
}
}
- switch(index_type){
+ switch (cur_index_type) {
case PRIMARY_KEY_ORDERED_INDEX:
if (!(multi_range_curr->start_key.length == key_info->key_length &&
multi_range_curr->start_key.flag == HA_READ_KEY_EXACT))
@@ -8070,7 +8059,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
!define_read_attrs(curr, op) &&
(op->setAbortOption(AO_IgnoreError), TRUE) &&
(!m_use_partition_function ||
- (op->setPartitionId(part_spec.start_part), true)))
+ (op->setPartitionId(part_spec.start_part), TRUE)))
curr += reclength;
else
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
@@ -8131,7 +8120,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
const key_range *keys[2]= { &multi_range_curr->start_key,
&multi_range_curr->end_key };
- if ((res= set_bounds(scanOp, active_index, false, keys,
+ if ((res= set_bounds(scanOp, active_index, FALSE, keys,
multi_range_curr-ranges)))
DBUG_RETURN(res);
break;
@@ -8253,7 +8242,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
DBUG_MULTI_RANGE(6);
// First fetch from cursor
DBUG_ASSERT(range_no == -1);
- if ((res= m_multi_cursor->nextResult(true)))
+ if ((res= m_multi_cursor->nextResult(TRUE)))
{
DBUG_MULTI_RANGE(15);
goto close_scan;
@@ -8375,7 +8364,6 @@ ha_ndbcluster::update_table_comment(
}
ndb->setDatabaseName(m_dbname);
- NDBDICT* dict= ndb->getDictionary();
const NDBTAB* tab= m_table;
DBUG_ASSERT(tab != NULL);
@@ -8570,7 +8558,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
ndb->setDatabaseName(share->db);
Ndb_table_guard ndbtab_g(ndb->getDictionary(), share->table_name);
if (ndbtab_g.get_table() &&
- ndb_get_table_statistics(NULL, false, ndb,
+ ndb_get_table_statistics(NULL, FALSE, ndb,
ndbtab_g.get_table(), &stat) == 0)
{
char buff[22], buff2[22];
@@ -8751,14 +8739,14 @@ void ndb_serialize_cond(const Item *item, void *arg)
if (context->supported)
{
- Ndb_rewrite_context *rewrite_context= context->rewrite_stack;
- const Item_func *func_item;
+ Ndb_rewrite_context *rewrite_context2= context->rewrite_stack;
+ const Item_func *rewrite_func_item;
// Check if we are rewriting some unsupported function call
- if (rewrite_context &&
- (func_item= rewrite_context->func_item) &&
- rewrite_context->count++ == 0)
+ if (rewrite_context2 &&
+ (rewrite_func_item= rewrite_context2->func_item) &&
+ rewrite_context2->count++ == 0)
{
- switch (func_item->functype()) {
+ switch (rewrite_func_item->functype()) {
case Item_func::BETWEEN:
/*
Rewrite
@@ -8785,7 +8773,7 @@ void ndb_serialize_cond(const Item *item, void *arg)
if (context->expecting(item->type()))
{
// This is the <field>|<const> item, save it in the rewrite context
- rewrite_context->left_hand_item= item;
+ rewrite_context2->left_hand_item= item;
if (item->type() == Item::FUNC_ITEM)
{
Item_func *func_item= (Item_func *) item;
@@ -8950,7 +8938,7 @@ void ndb_serialize_cond(const Item *item, void *arg)
type == MYSQL_TYPE_DATETIME)
? (context->expecting_field_result(STRING_RESULT) ||
context->expecting_field_result(INT_RESULT))
- : true)) &&
+ : TRUE)) &&
// Bit fields no yet supported in scan filter
type != MYSQL_TYPE_BIT &&
// No BLOB support in scan filter
@@ -9610,25 +9598,24 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
break;
Ndb_item *a= cond->next->ndb_item;
Ndb_item *b, *field, *value= NULL;
- LINT_INIT(field);
switch (cond->ndb_item->argument_count()) {
case 1:
- field=
- (a->type == NDB_FIELD)? a : NULL;
+ field= (a->type == NDB_FIELD)? a : NULL;
break;
case 2:
if (!cond->next->next)
+ {
+ field= NULL;
break;
+ }
b= cond->next->next->ndb_item;
- value=
- (a->type == NDB_VALUE)? a
- : (b->type == NDB_VALUE)? b
- : NULL;
- field=
- (a->type == NDB_FIELD)? a
- : (b->type == NDB_FIELD)? b
- : NULL;
+ value= ((a->type == NDB_VALUE) ? a :
+ (b->type == NDB_VALUE) ? b :
+ NULL);
+ field= ((a->type == NDB_FIELD) ? a :
+ (b->type == NDB_FIELD) ? b :
+ NULL);
break;
default:
field= NULL; //Keep compiler happy
@@ -9838,6 +9825,7 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond,
DBUG_RETURN(1);
}
+
int
ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter)
{
@@ -9911,6 +9899,7 @@ ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter)
DBUG_RETURN(0);
}
+
int
ha_ndbcluster::build_scan_filter(Ndb_cond * &cond, NdbScanFilter *filter)
{
@@ -9961,14 +9950,14 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack,
DBUG_RETURN(0);
}
+
int
ha_ndbcluster::generate_scan_filter_from_cond(Ndb_cond_stack *ndb_cond_stack,
NdbScanFilter& filter)
{
- DBUG_ENTER("generate_scan_filter_from_cond");
bool multiple_cond= FALSE;
-
- DBUG_PRINT("info", ("Generating scan filter"));
+ DBUG_ENTER("generate_scan_filter_from_cond");
+
// Wrap an AND group around multiple conditions
if (ndb_cond_stack->next)
{
@@ -9994,6 +9983,7 @@ ha_ndbcluster::generate_scan_filter_from_cond(Ndb_cond_stack *ndb_cond_stack,
DBUG_RETURN(0);
}
+
int ha_ndbcluster::generate_scan_filter_from_key(NdbScanOperation *op,
const KEY* key_info,
const byte *key,
@@ -10004,15 +9994,14 @@ int ha_ndbcluster::generate_scan_filter_from_key(NdbScanOperation *op,
KEY_PART_INFO* end= key_part+key_info->key_parts;
NdbScanFilter filter(op);
int res;
-
DBUG_ENTER("generate_scan_filter_from_key");
+
filter.begin(NdbScanFilter::AND);
for (; key_part != end; key_part++)
{
Field* field= key_part->field;
uint32 pack_len= field->pack_length();
const byte* ptr= key;
- char buf[256];
DBUG_PRINT("info", ("Filtering value for %s", field->field_name));
DBUG_DUMP("key", (char*)ptr, pack_len);
if (key_part->null_bit)
@@ -10190,13 +10179,13 @@ static bool adjusted_frag_count(uint no_fragments, uint no_nodes,
return (reported_frags < no_fragments);
}
-int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *info)
+int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *create_info)
{
ha_rows max_rows, min_rows;
- if (info)
+ if (create_info)
{
- max_rows= info->max_rows;
- min_rows= info->min_rows;
+ max_rows= create_info->max_rows;
+ min_rows= create_info->min_rows;
}
else
{
@@ -10347,15 +10336,14 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
{
uint16 frag_data[MAX_PARTITIONS];
char *ts_names[MAX_PARTITIONS];
- ulong ts_index= 0, fd_index= 0, i, j;
+ ulong fd_index= 0, i, j;
NDBTAB *tab= (NDBTAB*)tab_par;
NDBTAB::FragmentType ftype= NDBTAB::UserDefined;
partition_element *part_elem;
bool first= TRUE;
- uint ts_id, ts_version, part_count= 0, tot_ts_name_len;
+ uint tot_ts_name_len;
List_iterator<partition_element> part_it(part_info->partitions);
int error;
- char *name_ptr;
DBUG_ENTER("ha_ndbcluster::set_up_partition_info");
if (part_info->part_type == HASH_PARTITION &&
@@ -10469,7 +10457,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
}
-bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
+bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes)
{
DBUG_ENTER("ha_ndbcluster::check_if_incompatible_data");
@@ -10527,76 +10515,78 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
}
/* Check that auto_increment value was not changed */
- if ((info->used_fields & HA_CREATE_USED_AUTO) &&
- info->auto_increment_value != 0)
+ if ((create_info->used_fields & HA_CREATE_USED_AUTO) &&
+ create_info->auto_increment_value != 0)
DBUG_RETURN(COMPATIBLE_DATA_NO);
/* Check that row format didn't change */
- if ((info->used_fields & HA_CREATE_USED_AUTO) &&
- get_row_type() != info->row_type)
+ if ((create_info->used_fields & HA_CREATE_USED_AUTO) &&
+ get_row_type() != create_info->row_type)
DBUG_RETURN(COMPATIBLE_DATA_NO);
DBUG_RETURN(COMPATIBLE_DATA_YES);
}
-bool set_up_tablespace(st_alter_tablespace *info,
+bool set_up_tablespace(st_alter_tablespace *alter_info,
NdbDictionary::Tablespace *ndb_ts)
{
- ndb_ts->setName(info->tablespace_name);
- ndb_ts->setExtentSize(info->extent_size);
- ndb_ts->setDefaultLogfileGroup(info->logfile_group_name);
- return false;
+ ndb_ts->setName(alter_info->tablespace_name);
+ ndb_ts->setExtentSize(alter_info->extent_size);
+ ndb_ts->setDefaultLogfileGroup(alter_info->logfile_group_name);
+ return FALSE;
}
-bool set_up_datafile(st_alter_tablespace *info,
+bool set_up_datafile(st_alter_tablespace *alter_info,
NdbDictionary::Datafile *ndb_df)
{
- if (info->max_size > 0)
+ if (alter_info->max_size > 0)
{
my_error(ER_TABLESPACE_AUTO_EXTEND_ERROR, MYF(0));
- return true;
+ return TRUE;
}
- ndb_df->setPath(info->data_file_name);
- ndb_df->setSize(info->initial_size);
- ndb_df->setTablespace(info->tablespace_name);
- return false;
+ ndb_df->setPath(alter_info->data_file_name);
+ ndb_df->setSize(alter_info->initial_size);
+ ndb_df->setTablespace(alter_info->tablespace_name);
+ return FALSE;
}
-bool set_up_logfile_group(st_alter_tablespace *info,
+bool set_up_logfile_group(st_alter_tablespace *alter_info,
NdbDictionary::LogfileGroup *ndb_lg)
{
- ndb_lg->setName(info->logfile_group_name);
- ndb_lg->setUndoBufferSize(info->undo_buffer_size);
- return false;
+ ndb_lg->setName(alter_info->logfile_group_name);
+ ndb_lg->setUndoBufferSize(alter_info->undo_buffer_size);
+ return FALSE;
}
-bool set_up_undofile(st_alter_tablespace *info,
+bool set_up_undofile(st_alter_tablespace *alter_info,
NdbDictionary::Undofile *ndb_uf)
{
- ndb_uf->setPath(info->undo_file_name);
- ndb_uf->setSize(info->initial_size);
- ndb_uf->setLogfileGroup(info->logfile_group_name);
- return false;
+ ndb_uf->setPath(alter_info->undo_file_name);
+ ndb_uf->setSize(alter_info->initial_size);
+ ndb_uf->setLogfileGroup(alter_info->logfile_group_name);
+ return FALSE;
}
-int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace *info)
+int ndbcluster_alter_tablespace(handlerton *hton,
+ THD* thd, st_alter_tablespace *alter_info)
{
+ int is_tablespace= 0;
+ NdbError err;
+ NDBDICT *dict;
+ int error;
+ const char *errmsg;
+ Ndb *ndb;
DBUG_ENTER("ha_ndbcluster::alter_tablespace");
+ LINT_INIT(errmsg);
- int is_tablespace= 0;
- Ndb *ndb= check_ndb_in_thd(thd);
+ ndb= check_ndb_in_thd(thd);
if (ndb == NULL)
{
DBUG_RETURN(HA_ERR_NO_CONNECTION);
}
+ dict= ndb->getDictionary();
- NdbError err;
- NDBDICT *dict= ndb->getDictionary();
- int error;
- const char * errmsg;
- LINT_INIT(errmsg);
-
- switch (info->ts_cmd_type){
+ switch (alter_info->ts_cmd_type){
case (CREATE_TABLESPACE):
{
error= ER_CREATE_FILEGROUP_FAILED;
@@ -10604,11 +10594,11 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
NdbDictionary::Tablespace ndb_ts;
NdbDictionary::Datafile ndb_df;
NdbDictionary::ObjectId objid;
- if (set_up_tablespace(info, &ndb_ts))
+ if (set_up_tablespace(alter_info, &ndb_ts))
{
DBUG_RETURN(1);
}
- if (set_up_datafile(info, &ndb_df))
+ if (set_up_datafile(alter_info, &ndb_df))
{
DBUG_RETURN(1);
}
@@ -10618,7 +10608,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
DBUG_PRINT("error", ("createTablespace returned %d", error));
goto ndberror;
}
- DBUG_PRINT("info", ("Successfully created Tablespace"));
+ DBUG_PRINT("alter_info", ("Successfully created Tablespace"));
errmsg= "DATAFILE";
if (dict->createDatafile(ndb_df))
{
@@ -10640,10 +10630,10 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
case (ALTER_TABLESPACE):
{
error= ER_ALTER_FILEGROUP_FAILED;
- if (info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE)
+ if (alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE)
{
NdbDictionary::Datafile ndb_df;
- if (set_up_datafile(info, &ndb_df))
+ if (set_up_datafile(alter_info, &ndb_df))
{
DBUG_RETURN(1);
}
@@ -10653,14 +10643,14 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
goto ndberror;
}
}
- else if(info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE)
+ else if(alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE)
{
- NdbDictionary::Tablespace ts= dict->getTablespace(info->tablespace_name);
- NdbDictionary::Datafile df= dict->getDatafile(0, info->data_file_name);
+ NdbDictionary::Tablespace ts= dict->getTablespace(alter_info->tablespace_name);
+ NdbDictionary::Datafile df= dict->getDatafile(0, alter_info->data_file_name);
NdbDictionary::ObjectId objid;
df.getTablespaceId(&objid);
if (ts.getObjectId() == objid.getObjectId() &&
- strcmp(df.getPath(), info->data_file_name) == 0)
+ strcmp(df.getPath(), alter_info->data_file_name) == 0)
{
errmsg= " DROP DATAFILE";
if (dict->dropDatafile(df))
@@ -10678,7 +10668,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
else
{
DBUG_PRINT("error", ("Unsupported alter tablespace: %d",
- info->ts_alter_tablespace_type));
+ alter_info->ts_alter_tablespace_type));
DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
}
is_tablespace= 1;
@@ -10690,14 +10680,14 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
NdbDictionary::LogfileGroup ndb_lg;
NdbDictionary::Undofile ndb_uf;
NdbDictionary::ObjectId objid;
- if (info->undo_file_name == NULL)
+ if (alter_info->undo_file_name == NULL)
{
/*
REDO files in LOGFILE GROUP not supported yet
*/
DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
}
- if (set_up_logfile_group(info, &ndb_lg))
+ if (set_up_logfile_group(alter_info, &ndb_lg))
{
DBUG_RETURN(1);
}
@@ -10706,8 +10696,8 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
{
goto ndberror;
}
- DBUG_PRINT("info", ("Successfully created Logfile Group"));
- if (set_up_undofile(info, &ndb_uf))
+ DBUG_PRINT("alter_info", ("Successfully created Logfile Group"));
+ if (set_up_undofile(alter_info, &ndb_uf))
{
DBUG_RETURN(1);
}
@@ -10729,7 +10719,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
case (ALTER_LOGFILE_GROUP):
{
error= ER_ALTER_FILEGROUP_FAILED;
- if (info->undo_file_name == NULL)
+ if (alter_info->undo_file_name == NULL)
{
/*
REDO files in LOGFILE GROUP not supported yet
@@ -10737,7 +10727,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
}
NdbDictionary::Undofile ndb_uf;
- if (set_up_undofile(info, &ndb_uf))
+ if (set_up_undofile(alter_info, &ndb_uf))
{
DBUG_RETURN(1);
}
@@ -10752,7 +10742,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
{
error= ER_DROP_FILEGROUP_FAILED;
errmsg= "TABLESPACE";
- if (dict->dropTablespace(dict->getTablespace(info->tablespace_name)))
+ if (dict->dropTablespace(dict->getTablespace(alter_info->tablespace_name)))
{
goto ndberror;
}
@@ -10763,7 +10753,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
{
error= ER_DROP_FILEGROUP_FAILED;
errmsg= "LOGFILE GROUP";
- if (dict->dropLogfileGroup(dict->getLogfileGroup(info->logfile_group_name)))
+ if (dict->dropLogfileGroup(dict->getLogfileGroup(alter_info->logfile_group_name)))
{
goto ndberror;
}
@@ -10786,13 +10776,13 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace
if (is_tablespace)
ndbcluster_log_schema_op(thd, 0,
thd->query, thd->query_length,
- "", info->tablespace_name,
+ "", alter_info->tablespace_name,
0, 0,
SOT_TABLESPACE, 0, 0, 0);
else
ndbcluster_log_schema_op(thd, 0,
thd->query, thd->query_length,
- "", info->logfile_group_name,
+ "", alter_info->logfile_group_name,
0, 0,
SOT_LOGFILE_GROUP, 0, 0, 0);
#endif
@@ -10813,7 +10803,6 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
{
Ndb *ndb;
NDBDICT *dict;
- const NDBTAB *tab;
int err;
DBUG_ENTER("ha_ndbcluster::get_no_parts");
LINT_INIT(err);
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 38b640d5f55..fc7d933be7d 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -1131,7 +1131,7 @@ ndbcluster_update_slock(THD *thd,
ndb_error= this_error;
break;
}
-end:
+
if (ndb_error)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 9d4cd69be12..87d24207dcd 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -584,7 +584,6 @@ int ha_partition::drop_partitions(const char *path)
List_iterator<partition_element> part_it(m_part_info->partitions);
char part_name_buff[FN_REFLEN];
uint no_parts= m_part_info->partitions.elements;
- uint part_count= 0;
uint no_subparts= m_part_info->no_subparts;
uint i= 0;
uint name_variant;
@@ -1075,7 +1074,6 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
uint no_parts= m_part_info->no_parts;
uint no_subparts= m_part_info->no_subparts;
uint i= 0;
- LEX *lex= thd->lex;
int error;
DBUG_ENTER("ha_partition::handle_opt_partitions");
DBUG_PRINT("enter", ("all_parts %u, flag= %u", all_parts, flag));
@@ -1087,11 +1085,9 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
{
if (m_is_sub_partitioned)
{
- List_iterator<partition_element> sub_it(part_elem->subpartitions);
uint j= 0, part;
do
{
- partition_element *sub_elem= sub_it++;
part= i * no_subparts + j;
DBUG_PRINT("info", ("Optimize subpartition %u",
part));
@@ -1136,7 +1132,6 @@ int ha_partition::prepare_new_partition(TABLE *table,
{
int error;
bool create_flag= FALSE;
- bool open_flag= FALSE;
DBUG_ENTER("prepare_new_partition");
if ((error= set_up_table_before_create(table, part_name, create_info,
@@ -1245,7 +1240,6 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
handler **new_file_array;
int error= 1;
bool first;
- bool copy_parts= FALSE;
uint temp_partitions= m_part_info->temp_partitions.elements;
THD *thd= current_thd;
DBUG_ENTER("ha_partition::change_partitions");
@@ -2061,7 +2055,6 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root)
partition_element *part_elem;
uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
List_iterator_fast <partition_element> part_it(m_part_info->partitions);
- THD *thd= current_thd;
DBUG_ENTER("ha_partition::new_handlers_from_part_info");
if (!(m_file= (handler **) alloc_root(mem_root, alloc_len)))
diff --git a/sql/handler.cc b/sql/handler.cc
index bdfaeaf2672..8e7206aade9 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -47,12 +47,6 @@ static handlerton *installed_htons[128];
KEY_CREATE_INFO default_key_create_info= { HA_KEY_ALG_UNDEF, 0, {NullS,0} };
-/* static functions defined in this file */
-
-static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root);
-
-static SHOW_COMP_OPTION have_yes= SHOW_OPTION_YES;
-
/* number of entries in handlertons[] */
ulong total_ha= 0;
/* number of storage engines (from handlertons[]) that support 2pc */
@@ -167,11 +161,13 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type)
}
+#ifdef NOT_USED
static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
handlerton *hton= ha_default_handlerton(current_thd);
return (hton && hton->create) ? hton->create(hton, table, mem_root) : NULL;
}
+#endif
handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type)
@@ -726,7 +722,7 @@ int ha_commit_trans(THD *thd, bool all)
}
DBUG_EXECUTE_IF("crash_commit_after_prepare", abort(););
if (error || (is_real_trans && xid &&
- (error= !(cookie= tc_log->log(thd, xid)))))
+ (error= !(cookie= tc_log->log_xid(thd, xid)))))
{
ha_rollback_trans(thd, all);
error= 1;
@@ -734,7 +730,7 @@ int ha_commit_trans(THD *thd, bool all)
}
DBUG_EXECUTE_IF("crash_commit_after_log", abort(););
}
- error=ha_commit_one_phase(thd, all) ? cookie ? 2 : 1 : 0;
+ error=ha_commit_one_phase(thd, all) ? (cookie ? 2 : 1) : 0;
DBUG_EXECUTE_IF("crash_commit_before_unlog", abort(););
if (cookie)
tc_log->unlog(cookie, xid);
@@ -3345,7 +3341,6 @@ static my_bool exts_handlerton(THD *unused, st_plugin_int *plugin,
TYPELIB *ha_known_exts(void)
{
- MEM_ROOT *mem_root= current_thd->mem_root;
if (!known_extensions.type_names || mysys_usage_id != known_extensions_id)
{
List<char> found_exts;
diff --git a/sql/item.cc b/sql/item.cc
index 0f28572163e..b087ddb3abb 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -25,10 +25,6 @@
#include "sql_trigger.h"
#include "sql_select.h"
-static void mark_as_dependent(THD *thd,
- SELECT_LEX *last, SELECT_LEX *current,
- Item_ident *item);
-
const String my_null_string("NULL", 4, default_charset_info);
/****************************************************************************/
@@ -1270,7 +1266,10 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array,
if (type() == SUM_FUNC_ITEM && skip_registered &&
((Item_sum *) this)->ref_by)
return;
- if (type() != SUM_FUNC_ITEM && with_sum_func)
+ if ((type() != SUM_FUNC_ITEM && with_sum_func) ||
+ (type() == FUNC_ITEM &&
+ (((Item_func *) this)->functype() == Item_func::ISNOTNULLTEST_FUNC ||
+ ((Item_func *) this)->functype() == Item_func::TRIG_COND_FUNC)))
{
/* Will split complicated items and ignore simple ones */
split_sum_func(thd, ref_pointer_array, fields);
@@ -1715,7 +1714,7 @@ void Item_field::set_field(Field *field_par)
field=result_field=field_par; // for easy coding with fields
maybe_null=field->maybe_null();
decimals= field->decimals();
- max_length= field_par->max_length();
+ max_length= field_par->max_display_length();
table_name= *field_par->table_name;
field_name= field_par->field_name;
db_name= field_par->table->s->db.str;
@@ -2424,21 +2423,22 @@ void Item_param::set_decimal(const char *str, ulong length)
the fact that even wrong value sent over binary protocol fits into
MAX_DATE_STRING_REP_LENGTH buffer.
*/
-void Item_param::set_time(TIME *tm, timestamp_type type, uint32 max_length_arg)
+void Item_param::set_time(TIME *tm, timestamp_type time_type,
+ uint32 max_length_arg)
{
DBUG_ENTER("Item_param::set_time");
value.time= *tm;
- value.time.time_type= type;
+ value.time.time_type= time_type;
if (value.time.year > 9999 || value.time.month > 12 ||
value.time.day > 31 ||
- type != MYSQL_TIMESTAMP_TIME && value.time.hour > 23 ||
+ time_type != MYSQL_TIMESTAMP_TIME && value.time.hour > 23 ||
value.time.minute > 59 || value.time.second > 59)
{
char buff[MAX_DATE_STRING_REP_LENGTH];
uint length= my_TIME_to_str(&value.time, buff);
- make_truncated_value_warning(current_thd, buff, length, type, 0);
+ make_truncated_value_warning(current_thd, buff, length, time_type, 0);
set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR);
}
@@ -2904,7 +2904,7 @@ bool Item_param::basic_const_item() const
Item *
-Item_param::new_item()
+Item_param::clone_item()
{
/* see comments in the header file */
switch (state) {
@@ -3521,28 +3521,29 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
thd->lex->in_sum_func->nest_level ==
thd->lex->current_select->nest_level)
{
- Item::Type type= (*reference)->type();
+ Item::Type ref_type= (*reference)->type();
set_if_bigger(thd->lex->in_sum_func->max_arg_level,
select->nest_level);
set_field(*from_field);
fixed= 1;
mark_as_dependent(thd, last_checked_context->select_lex,
context->select_lex, this,
- ((type == REF_ITEM || type == FIELD_ITEM) ?
+ ((ref_type == REF_ITEM ||
+ ref_type == FIELD_ITEM) ?
(Item_ident*) (*reference) : 0));
return 0;
}
}
else
{
- Item::Type type= (*reference)->type();
+ Item::Type ref_type= (*reference)->type();
prev_subselect_item->used_tables_cache|=
(*reference)->used_tables();
prev_subselect_item->const_item_cache&=
(*reference)->const_item();
mark_as_dependent(thd, last_checked_context->select_lex,
context->select_lex, this,
- ((type == REF_ITEM || type == FIELD_ITEM) ?
+ ((ref_type == REF_ITEM || ref_type == FIELD_ITEM) ?
(Item_ident*) (*reference) :
0));
/*
@@ -4085,7 +4086,7 @@ Item *Item_field::replace_equal_field(byte *arg)
void Item::init_make_field(Send_field *tmp_field,
- enum enum_field_types field_type)
+ enum enum_field_types field_type_arg)
{
char *empty_name= (char*) "";
tmp_field->db_name= empty_name;
@@ -4097,7 +4098,7 @@ void Item::init_make_field(Send_field *tmp_field,
tmp_field->flags= (maybe_null ? 0 : NOT_NULL_FLAG) |
(my_binary_compare(collation.collation) ?
BINARY_FLAG : 0);
- tmp_field->type=field_type;
+ tmp_field->type= field_type_arg;
tmp_field->length=max_length;
tmp_field->decimals=decimals;
if (unsigned_flag)
@@ -4112,12 +4113,12 @@ void Item::make_field(Send_field *tmp_field)
enum_field_types Item::string_field_type() const
{
- enum_field_types type= MYSQL_TYPE_VAR_STRING;
+ enum_field_types f_type= MYSQL_TYPE_VAR_STRING;
if (max_length >= 16777216)
- type= MYSQL_TYPE_LONG_BLOB;
+ f_type= MYSQL_TYPE_LONG_BLOB;
else if (max_length >= 65536)
- type= MYSQL_TYPE_MEDIUM_BLOB;
- return type;
+ f_type= MYSQL_TYPE_MEDIUM_BLOB;
+ return f_type;
}
@@ -4487,7 +4488,7 @@ bool Item_int::eq(const Item *arg, bool binary_cmp) const
}
-Item *Item_int_with_ref::new_item()
+Item *Item_int_with_ref::clone_item()
{
DBUG_ASSERT(ref->const_item());
/*
@@ -4765,10 +4766,10 @@ bool Item_null::send(Protocol *protocol, String *packet)
bool Item::send(Protocol *protocol, String *buffer)
{
bool result;
- enum_field_types type;
+ enum_field_types f_type;
LINT_INIT(result); // Will be set if null_value == 0
- switch ((type=field_type())) {
+ switch ((f_type=field_type())) {
default:
case MYSQL_TYPE_NULL:
case MYSQL_TYPE_DECIMAL:
@@ -4847,7 +4848,7 @@ bool Item::send(Protocol *protocol, String *buffer)
get_date(&tm, TIME_FUZZY_DATE);
if (!null_value)
{
- if (type == MYSQL_TYPE_DATE)
+ if (f_type == MYSQL_TYPE_DATE)
return protocol->store_date(&tm);
else
result= protocol->store(&tm);
@@ -5060,7 +5061,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
goto error;
if (from_field == view_ref_found)
{
- Item::Type type= (*reference)->type();
+ Item::Type refer_type= (*reference)->type();
prev_subselect_item->used_tables_cache|=
(*reference)->used_tables();
prev_subselect_item->const_item_cache&=
@@ -5068,7 +5069,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
DBUG_ASSERT((*reference)->type() == REF_ITEM);
mark_as_dependent(thd, last_checked_context->select_lex,
context->select_lex, this,
- ((type == REF_ITEM || type == FIELD_ITEM) ?
+ ((refer_type == REF_ITEM ||
+ refer_type == FIELD_ITEM) ?
(Item_ident*) (*reference) :
0));
/*
@@ -5814,8 +5816,8 @@ bool Item_trigger_field::fix_fields(THD *thd, Item **items)
{
table_grants->want_privilege= want_privilege;
- if (check_grant_column(thd, table_grants, triggers->table->s->db.str,
- triggers->table->s->table_name.str, field_name,
+ if (check_grant_column(thd, table_grants, triggers->trigger_table->s->db.str,
+ triggers->trigger_table->s->table_name.str, field_name,
strlen(field_name), thd->security_ctx))
return TRUE;
}
@@ -5928,7 +5930,8 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item)
DBUG_ASSERT(item_row->cols() == comp_item_row->cols());
col= item_row->cols();
while (col-- > 0)
- resolve_const_item(thd, item_row->addr(col), comp_item_row->el(col));
+ resolve_const_item(thd, item_row->addr(col),
+ comp_item_row->element_index(col));
break;
}
/* Fallthrough */
@@ -6196,7 +6199,7 @@ bool Item_cache_row::setup(Item * item)
return 1;
for (uint i= 0; i < item_count; i++)
{
- Item *el= item->el(i);
+ Item *el= item->element_index(i);
Item_cache *tmp;
if (!(tmp= values[i]= Item_cache::get_cache(el->result_type())))
return 1;
@@ -6212,7 +6215,7 @@ void Item_cache_row::store(Item * item)
item->bring_value();
for (uint i= 0; i < item_count; i++)
{
- values[i]->store(item->el(i));
+ values[i]->store(item->element_index(i));
null_value|= values[i]->null_value;
}
}
diff --git a/sql/item.h b/sql/item.h
index 2ab716872f0..09c118a2e14 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -687,7 +687,7 @@ public:
*/
virtual bool basic_const_item() const { return 0; }
/* cloning of constant items (0 if it is not const) */
- virtual Item *new_item() { return 0; }
+ virtual Item *clone_item() { return 0; }
virtual cond_result eq_cmp_result() const { return COND_OK; }
inline uint float_length(uint decimals_par) const
{ return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;}
@@ -818,6 +818,7 @@ public:
virtual bool collect_item_field_processor(byte * arg) { return 0; }
virtual bool find_item_in_field_list_processor(byte *arg) { return 0; }
virtual bool change_context_processor(byte *context) { return 0; }
+ virtual bool reset_query_id_processor(byte *query_id_arg) { return 0; }
virtual bool is_expensive_processor(byte *arg) { return 0; }
virtual bool register_field_in_read_map(byte *arg) { return 0; }
/*
@@ -895,11 +896,11 @@ public:
For SP local variable returns address of pointer to Item representing its
current value and pointer passed via parameter otherwise.
*/
- virtual Item **this_item_addr(THD *thd, Item **addr) { return addr; }
+ virtual Item **this_item_addr(THD *thd, Item **addr_arg) { return addr_arg; }
// Row emulation
virtual uint cols() { return 1; }
- virtual Item* el(uint i) { return this; }
+ virtual Item* element_index(uint i) { return this; }
virtual Item** addr(uint i) { return 0; }
virtual bool check_cols(uint c);
// It is not row => null inside is impossible
@@ -1165,7 +1166,8 @@ class Item_name_const : public Item
Item *value_item;
Item *name_item;
public:
- Item_name_const(Item *name, Item *val): value_item(val), name_item(name)
+ Item_name_const(Item *name_arg, Item *val):
+ value_item(val), name_item(name_arg)
{
Item::maybe_null= TRUE;
}
@@ -1385,7 +1387,7 @@ public:
Item *equal_fields_propagator(byte *arg);
bool set_no_const_sub(byte *arg);
Item *replace_equal_field(byte *arg);
- inline uint32 max_disp_length() { return field->max_length(); }
+ inline uint32 max_disp_length() { return field->max_display_length(); }
Item_field *filed_for_view_update() { return this; }
Item *safe_charset_converter(CHARSET_INFO *tocs);
int fix_outer_field(THD *thd, Field **field, Item **reference);
@@ -1419,7 +1421,7 @@ public:
/* to prevent drop fixed flag (no need parent cleanup call) */
void cleanup() {}
bool basic_const_item() const { return 1; }
- Item *new_item() { return new Item_null(name); }
+ Item *clone_item() { return new Item_null(name); }
bool is_null() { return 1; }
void print(String *str) { str->append(STRING_WITH_LEN("NULL")); }
Item *safe_charset_converter(CHARSET_INFO *tocs);
@@ -1568,7 +1570,7 @@ public:
basic_const_item returned TRUE.
*/
Item *safe_charset_converter(CHARSET_INFO *tocs);
- Item *new_item();
+ Item *clone_item();
/*
Implement by-value equality evaluation if parameter value
is set and is a basic constant (integer, real or string).
@@ -1600,7 +1602,7 @@ public:
String *val_str(String*);
int save_in_field(Field *field, bool no_conversions);
bool basic_const_item() const { return 1; }
- Item *new_item() { return new Item_int(name,value,max_length); }
+ Item *clone_item() { return new Item_int(name,value,max_length); }
// to prevent drop fixed flag (no need parent cleanup call)
void cleanup() {}
void print(String *str);
@@ -1620,7 +1622,7 @@ public:
double val_real()
{ DBUG_ASSERT(fixed == 1); return ulonglong2double((ulonglong)value); }
String *val_str(String*);
- Item *new_item() { return new Item_uint(name,max_length); }
+ Item *clone_item() { return new Item_uint(name,max_length); }
int save_in_field(Field *field, bool no_conversions);
void print(String *str);
Item_num *neg ();
@@ -1651,7 +1653,7 @@ public:
my_decimal *val_decimal(my_decimal *val) { return &decimal_value; }
int save_in_field(Field *field, bool no_conversions);
bool basic_const_item() const { return 1; }
- Item *new_item()
+ Item *clone_item()
{
return new Item_decimal(name, &decimal_value, decimals, max_length);
}
@@ -1709,7 +1711,7 @@ public:
bool basic_const_item() const { return 1; }
// to prevent drop fixed flag (no need parent cleanup call)
void cleanup() {}
- Item *new_item()
+ Item *clone_item()
{ return new Item_float(name, value, decimals, max_length); }
Item_num *neg() { value= -value; return this; }
void print(String *str);
@@ -1795,7 +1797,7 @@ public:
enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
bool basic_const_item() const { return 1; }
bool eq(const Item *item, bool binary_cmp) const;
- Item *new_item()
+ Item *clone_item()
{
return new Item_string(name, str_value.ptr(),
str_value.length(), collation.collation);
@@ -1849,9 +1851,9 @@ class Item_return_int :public Item_int
{
enum_field_types int_field_type;
public:
- Item_return_int(const char *name, uint length,
+ Item_return_int(const char *name_arg, uint length,
enum_field_types field_type_arg)
- :Item_int(name, 0, length), int_field_type(field_type_arg)
+ :Item_int(name_arg, 0, length), int_field_type(field_type_arg)
{
unsigned_flag=1;
}
@@ -2115,7 +2117,7 @@ public:
{
return ref->save_in_field(field, no_conversions);
}
- Item *new_item();
+ Item *clone_item();
virtual Item *real_item() { return ref; }
bool check_partition_func_processor(byte *int_arg) {return TRUE;}
};
@@ -2553,8 +2555,8 @@ public:
enum Item_result result_type() const { return ROW_RESULT; }
uint cols() { return item_count; }
- Item* el(uint i) { return values[i]; }
- Item** addr(uint i) { return (Item **) (values + i); }
+ Item *element_index(uint i) { return values[i]; }
+ Item **addr(uint i) { return (Item **) (values + i); }
bool check_cols(uint c);
bool null_inside();
void bring_value();
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index bccb184a6ef..89f57846f9b 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -328,7 +328,7 @@ static bool convert_constant_item(THD *thd, Field *field, Item **item)
void Item_bool_func2::fix_length_and_dec()
{
max_length= 1; // Function returns 0 or 1
- THD *thd= current_thd;
+ THD *thd;
/*
As some compare functions are generated after sql_yacc,
@@ -366,12 +366,13 @@ void Item_bool_func2::fix_length_and_dec()
return;
}
+ thd= current_thd;
if (!thd->is_context_analysis_only())
{
- Item *real_item= args[0]->real_item();
- if (real_item->type() == FIELD_ITEM)
+ Item *arg_real_item= args[0]->real_item();
+ if (arg_real_item->type() == FIELD_ITEM)
{
- Field *field=((Item_field*) real_item)->field;
+ Field *field=((Item_field*) arg_real_item)->field;
if (field->can_be_compared_as_longlong())
{
if (convert_constant_item(thd, field,&args[1]))
@@ -383,10 +384,10 @@ void Item_bool_func2::fix_length_and_dec()
}
}
}
- real_item= args[1]->real_item();
- if (real_item->type() == FIELD_ITEM /* && !real_item->const_item() */)
+ arg_real_item= args[1]->real_item();
+ if (arg_real_item->type() == FIELD_ITEM)
{
- Field *field=((Item_field*) real_item)->field;
+ Field *field=((Item_field*) arg_real_item)->field;
if (field->can_be_compared_as_longlong())
{
if (convert_constant_item(thd, field,&args[0]))
@@ -422,9 +423,9 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type)
return 1;
for (uint i=0; i < n; i++)
{
- if ((*a)->el(i)->cols() != (*b)->el(i)->cols())
+ if ((*a)->element_index(i)->cols() != (*b)->element_index(i)->cols())
{
- my_error(ER_OPERAND_COLUMNS, MYF(0), (*a)->el(i)->cols());
+ my_error(ER_OPERAND_COLUMNS, MYF(0), (*a)->element_index(i)->cols());
return 1;
}
comparators[i].set_cmp_func(owner, (*a)->addr(i), (*b)->addr(i));
@@ -858,10 +859,10 @@ bool Item_in_optimizer::fix_left(THD *thd, Item **ref)
uint n= cache->cols();
for (uint i= 0; i < n; i++)
{
- if (args[0]->el(i)->used_tables())
- ((Item_cache *)cache->el(i))->set_used_tables(OUTER_REF_TABLE_BIT);
+ if (args[0]->element_index(i)->used_tables())
+ ((Item_cache *)cache->element_index(i))->set_used_tables(OUTER_REF_TABLE_BIT);
else
- ((Item_cache *)cache->el(i))->set_used_tables(0);
+ ((Item_cache *)cache->element_index(i))->set_used_tables(0);
}
used_tables_cache= args[0]->used_tables();
}
@@ -936,7 +937,7 @@ longlong Item_in_optimizer::val_int()
if (cache->cols() == 1)
{
item_subs->set_cond_guard_var(0, FALSE);
- longlong tmp= args[1]->val_bool_result();
+ (void) args[1]->val_bool_result();
result_for_null_param= null_value= !item_subs->engine->no_rows();
item_subs->set_cond_guard_var(0, TRUE);
}
@@ -950,11 +951,11 @@ longlong Item_in_optimizer::val_int()
*/
for (i= 0; i < ncols; i++)
{
- if (cache->el(i)->null_value)
+ if (cache->element_index(i)->null_value)
item_subs->set_cond_guard_var(i, FALSE);
}
- longlong tmp= args[1]->val_bool_result();
+ (void) args[1]->val_bool_result();
result_for_null_param= null_value= !item_subs->engine->no_rows();
/* Turn all predicates back on */
@@ -1074,15 +1075,17 @@ longlong Item_func_strcmp::val_int()
void Item_func_interval::fix_length_and_dec()
{
- use_decimal_comparison= (row->el(0)->result_type() == DECIMAL_RESULT) ||
- (row->el(0)->result_type() == INT_RESULT);
+ use_decimal_comparison= ((row->element_index(0)->result_type() ==
+ DECIMAL_RESULT) ||
+ (row->element_index(0)->result_type() ==
+ INT_RESULT));
if (row->cols() > 8)
{
bool consts=1;
for (uint i=1 ; consts && i < row->cols() ; i++)
{
- consts&= row->el(i)->const_item();
+ consts&= row->element_index(i)->const_item();
}
if (consts &&
@@ -1093,7 +1096,7 @@ void Item_func_interval::fix_length_and_dec()
{
for (uint i=1 ; i < row->cols(); i++)
{
- Item *el= row->el(i);
+ Item *el= row->element_index(i);
interval_range *range= intervals + (i-1);
if ((el->result_type() == DECIMAL_RESULT) ||
(el->result_type() == INT_RESULT))
@@ -1118,7 +1121,7 @@ void Item_func_interval::fix_length_and_dec()
{
for (uint i=1 ; i < row->cols(); i++)
{
- intervals[i-1].dbl= row->el(i)->val_real();
+ intervals[i-1].dbl= row->element_index(i)->val_real();
}
}
}
@@ -1158,15 +1161,15 @@ longlong Item_func_interval::val_int()
if (use_decimal_comparison)
{
- dec= row->el(0)->val_decimal(&dec_buf);
- if (row->el(0)->null_value)
+ dec= row->element_index(0)->val_decimal(&dec_buf);
+ if (row->element_index(0)->null_value)
return -1;
my_decimal2double(E_DEC_FATAL_ERROR, dec, &value);
}
else
{
- value= row->el(0)->val_real();
- if (row->el(0)->null_value)
+ value= row->element_index(0)->val_real();
+ if (row->element_index(0)->null_value)
return -1;
}
@@ -1202,16 +1205,16 @@ longlong Item_func_interval::val_int()
for (i=1 ; i < row->cols() ; i++)
{
- Item *el= row->el(i);
+ Item *el= row->element_index(i);
if (use_decimal_comparison &&
((el->result_type() == DECIMAL_RESULT) ||
(el->result_type() == INT_RESULT)))
{
- my_decimal e_dec_buf, *e_dec= row->el(i)->val_decimal(&e_dec_buf);
+ my_decimal e_dec_buf, *e_dec= row->element_index(i)->val_decimal(&e_dec_buf);
if (my_decimal_cmp(e_dec, dec) > 0)
return i-1;
}
- else if (row->el(i)->val_real() > value)
+ else if (row->element_index(i)->val_real() > value)
return i-1;
}
return i-1;
@@ -1905,7 +1908,9 @@ bool Item_func_case::fix_fields(THD *thd, Item **ref)
buff should match stack usage from
Item_func_case::val_int() -> Item_func_case::find_item()
*/
+#ifndef EMBEDDED_LIBRARY
char buff[MAX_FIELD_WIDTH*2+sizeof(String)*2+sizeof(String*)*2+sizeof(double)*2+sizeof(longlong)*2];
+#endif
bool res= Item_func::fix_fields(thd, ref);
/*
Call check_stack_overrun after fix_fields to be sure that stack variable
@@ -1922,7 +1927,6 @@ void Item_func_case::fix_length_and_dec()
{
Item **agg;
uint nagg;
- THD *thd= current_thd;
uint found_types= 0;
if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1))))
return;
@@ -2383,11 +2387,11 @@ void cmp_item_row::store_value(Item *item)
{
if (!comparators[i])
if (!(comparators[i]=
- cmp_item::get_comparator(item->el(i)->result_type(),
- item->el(i)->collation.collation)))
+ cmp_item::get_comparator(item->element_index(i)->result_type(),
+ item->element_index(i)->collation.collation)))
break; // new failed
- comparators[i]->store_value(item->el(i));
- item->null_value|= item->el(i)->null_value;
+ comparators[i]->store_value(item->element_index(i));
+ item->null_value|= item->element_index(i)->null_value;
}
}
DBUG_VOID_RETURN;
@@ -2412,8 +2416,8 @@ void cmp_item_row::store_value_by_template(cmp_item *t, Item *item)
if (!(comparators[i]= tmpl->comparators[i]->make_same()))
break; // new failed
comparators[i]->store_value_by_template(tmpl->comparators[i],
- item->el(i));
- item->null_value|= item->el(i)->null_value;
+ item->element_index(i));
+ item->null_value|= item->element_index(i)->null_value;
}
}
}
@@ -2431,9 +2435,9 @@ int cmp_item_row::cmp(Item *arg)
arg->bring_value();
for (uint i=0; i < n; i++)
{
- if (comparators[i]->cmp(arg->el(i)))
+ if (comparators[i]->cmp(arg->element_index(i)))
{
- if (!arg->el(i)->null_value)
+ if (!arg->element_index(i)->null_value)
return 1;
was_null= 1;
}
@@ -2444,11 +2448,11 @@ int cmp_item_row::cmp(Item *arg)
int cmp_item_row::compare(cmp_item *c)
{
- cmp_item_row *cmp= (cmp_item_row *) c;
+ cmp_item_row *l_cmp= (cmp_item_row *) c;
for (uint i=0; i < n; i++)
{
int res;
- if ((res= comparators[i]->compare(cmp->comparators[i])))
+ if ((res= comparators[i]->compare(l_cmp->comparators[i])))
return res;
}
return 0;
@@ -2475,8 +2479,8 @@ int cmp_item_decimal::cmp(Item *arg)
int cmp_item_decimal::compare(cmp_item *arg)
{
- cmp_item_decimal *cmp= (cmp_item_decimal*) arg;
- return my_decimal_cmp(&value, &cmp->value);
+ cmp_item_decimal *l_cmp= (cmp_item_decimal*) arg;
+ return my_decimal_cmp(&value, &l_cmp->value);
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 00de34b02fc..4a1519a5498 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -818,10 +818,10 @@ public:
return (value_res ? (res ? sortcmp(value_res, res, cmp_charset) : 1) :
(res ? -1 : 0));
}
- int compare(cmp_item *c)
+ int compare(cmp_item *ci)
{
- cmp_item_string *cmp= (cmp_item_string *)c;
- return sortcmp(value_res, cmp->value_res, cmp_charset);
+ cmp_item_string *l_cmp= (cmp_item_string *) ci;
+ return sortcmp(value_res, l_cmp->value_res, cmp_charset);
}
cmp_item *make_same();
void set_charset(CHARSET_INFO *cs)
@@ -844,10 +844,10 @@ public:
{
return value != arg->val_int();
}
- int compare(cmp_item *c)
+ int compare(cmp_item *ci)
{
- cmp_item_int *cmp= (cmp_item_int *)c;
- return (value < cmp->value) ? -1 : ((value == cmp->value) ? 0 : 1);
+ cmp_item_int *l_cmp= (cmp_item_int *)ci;
+ return (value < l_cmp->value) ? -1 : ((value == l_cmp->value) ? 0 : 1);
}
cmp_item *make_same();
};
@@ -865,10 +865,10 @@ public:
{
return value != arg->val_real();
}
- int compare(cmp_item *c)
+ int compare(cmp_item *ci)
{
- cmp_item_real *cmp= (cmp_item_real *)c;
- return (value < cmp->value)? -1 : ((value == cmp->value) ? 0 : 1);
+ cmp_item_real *l_cmp= (cmp_item_real *) ci;
+ return (value < l_cmp->value)? -1 : ((value == l_cmp->value) ? 0 : 1);
}
cmp_item *make_same();
};
@@ -934,10 +934,10 @@ public:
DBUG_ASSERT(0);
return 1;
}
- int compare(cmp_item *c)
+ int compare(cmp_item *ci)
{
- cmp_item_string *cmp= (cmp_item_string *)c;
- return sortcmp(value_res, cmp->value_res, cmp_charset);
+ cmp_item_string *l_cmp= (cmp_item_string *) ci;
+ return sortcmp(value_res, l_cmp->value_res, cmp_charset);
}
cmp_item *make_same()
{
@@ -1455,7 +1455,7 @@ public:
Item_cond_and() :Item_cond() {}
Item_cond_and(Item *i1,Item *i2) :Item_cond(i1,i2) {}
Item_cond_and(THD *thd, Item_cond_and *item) :Item_cond(thd, item) {}
- Item_cond_and(List<Item> &list): Item_cond(list) {}
+ Item_cond_and(List<Item> &list_arg): Item_cond(list_arg) {}
enum Functype functype() const { return COND_AND_FUNC; }
longlong val_int();
const char *func_name() const { return "and"; }
@@ -1477,7 +1477,7 @@ public:
Item_cond_or() :Item_cond() {}
Item_cond_or(Item *i1,Item *i2) :Item_cond(i1,i2) {}
Item_cond_or(THD *thd, Item_cond_or *item) :Item_cond(thd, item) {}
- Item_cond_or(List<Item> &list): Item_cond(list) {}
+ Item_cond_or(List<Item> &list_arg): Item_cond(list_arg) {}
enum Functype functype() const { return COND_OR_FUNC; }
longlong val_int();
const char *func_name() const { return "or"; }
diff --git a/sql/item_func.cc b/sql/item_func.cc
index fe3a2c393fb..82e6196183b 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -997,7 +997,7 @@ String *Item_decimal_typecast::val_str(String *str)
my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf);
if (null_value)
return NULL;
- my_decimal2string(E_DEC_FATAL_ERROR, &tmp_buf, 0, 0, 0, str);
+ my_decimal2string(E_DEC_FATAL_ERROR, tmp, 0, 0, 0, str);
return str;
}
@@ -2375,7 +2375,7 @@ longlong Item_func_locate::val_int()
b->ptr(), b->length(),
&match, 1))
return 0;
- return (longlong) match.mblen + start0 + 1;
+ return (longlong) match.mb_len + start0 + 1;
}
@@ -3085,7 +3085,13 @@ public:
int count;
bool locked;
pthread_cond_t cond;
+#ifndef EMBEDDED_LIBRARY
pthread_t thread;
+ void set_thread(THD *thd) { thread= thd->real_id; }
+#else
+ THD *thread;
+ void set_thread(THD *thd) { thread= thd; }
+#endif /*EMBEDDED_LIBRARY*/
ulong thread_id;
User_level_lock(const char *key_arg,uint length, ulong id)
@@ -3173,9 +3179,9 @@ longlong Item_master_pos_wait::val_int()
null_value = 1;
return 0;
}
+#ifdef HAVE_REPLICATION
longlong pos = (ulong)args[1]->val_int();
longlong timeout = (arg_count==3) ? args[2]->val_int() : 0 ;
-#ifdef HAVE_REPLICATION
if ((event_count = active_mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2)
{
null_value = 1;
@@ -3239,7 +3245,7 @@ void debug_sync_point(const char* lock_name, uint lock_timeout)
else
{
ull->locked=1;
- ull->thread=thd->real_id;
+ ull->set_thread(thd);
thd->ull=ull;
}
pthread_mutex_unlock(&LOCK_user_locks);
@@ -3314,7 +3320,7 @@ longlong Item_func_get_lock::val_int()
null_value=1; // Probably out of memory
return 0;
}
- ull->thread=thd->real_id;
+ ull->set_thread(thd);
thd->ull=ull;
pthread_mutex_unlock(&LOCK_user_locks);
return 1; // Got new lock
@@ -3355,7 +3361,7 @@ longlong Item_func_get_lock::val_int()
else // We got the lock
{
ull->locked=1;
- ull->thread=thd->real_id;
+ ull->set_thread(thd);
ull->thread_id= thd->thread_id;
thd->ull=ull;
error=0;
@@ -3404,7 +3410,7 @@ longlong Item_func_release_lock::val_int()
else
{
#ifdef EMBEDDED_LIBRARY
- if (ull->locked && pthread_equal(current_thd->real_id,ull->thread))
+ if (ull->locked && (current_thd == ull->thread))
#else
if (ull->locked && pthread_equal(pthread_self(),ull->thread))
#endif
@@ -3711,7 +3717,8 @@ update_hash(user_var_entry *entry, bool set_null, void *ptr, uint length,
bool
-Item_func_set_user_var::update_hash(void *ptr, uint length, Item_result type,
+Item_func_set_user_var::update_hash(void *ptr, uint length,
+ Item_result res_type,
CHARSET_INFO *cs, Derivation dv,
bool unsigned_arg)
{
@@ -3720,9 +3727,9 @@ Item_func_set_user_var::update_hash(void *ptr, uint length, Item_result type,
result type of the variable
*/
if ((null_value= args[0]->null_value) && null_item)
- type= entry->type; // Don't change type of item
+ res_type= entry->type; // Don't change type of item
if (::update_hash(entry, (null_value= args[0]->null_value),
- ptr, length, type, cs, dv, unsigned_arg))
+ ptr, length, res_type, cs, dv, unsigned_arg))
{
current_thd->fatal_error(); // Probably end of memory
null_value= 1;
@@ -4860,7 +4867,7 @@ longlong Item_func_bit_xor::val_int()
thd Thread handler
var_type global / session
name Name of base or system variable
- component Component.
+ component Component
NOTES
If component.str = 0 then the variable name is in 'name'
@@ -4972,8 +4979,9 @@ longlong Item_func_row_count::val_int()
}
-Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, sp_name *name)
- :Item_func(), context(context_arg), m_name(name), m_sp(NULL),
+Item_func_sp::Item_func_sp(Name_resolution_context *context_arg,
+ sp_name *name_arg)
+ :Item_func(), context(context_arg), m_name(name_arg), m_sp(NULL),
result_field(NULL)
{
maybe_null= 1;
@@ -4984,8 +4992,8 @@ Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, sp_name *name)
Item_func_sp::Item_func_sp(Name_resolution_context *context_arg,
- sp_name *name, List<Item> &list)
- :Item_func(list), context(context_arg), m_name(name), m_sp(NULL),
+ sp_name *name_arg, List<Item> &list)
+ :Item_func(list), context(context_arg), m_name(name_arg), m_sp(NULL),
result_field(NULL)
{
maybe_null= 1;
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index 1b8c8d6a161..35a9f026b1d 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -701,8 +701,9 @@ double Item_func_glength::val_real()
null_value= (!swkb ||
!(geom= Geometry::construct(&buffer,
- swkb->ptr(), swkb->length())) ||
- geom->length(&res));
+ swkb->ptr(),
+ swkb->length())) ||
+ geom->geom_length(&res));
return res;
}
diff --git a/sql/item_row.h b/sql/item_row.h
index 503e48ca16b..d55d3ae223f 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -71,7 +71,7 @@ public:
Item *transform(Item_transformer transformer, byte *arg);
uint cols() { return arg_count; }
- Item* el(uint i) { return items[i]; }
+ Item* element_index(uint i) { return items[i]; }
Item** addr(uint i) { return items + i; }
bool check_cols(uint c);
bool null_inside() { return with_null; };
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 89a85a19f56..faea5380a66 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -37,15 +37,6 @@ C_MODE_END
String my_empty_string("",default_charset_info);
-static void my_coll_agg_error(DTCollation &c1, DTCollation &c2,
- const char *fname)
-{
- my_error(ER_CANT_AGGREGATE_2COLLATIONS, MYF(0),
- c1.collation->name, c1.derivation_name(),
- c2.collation->name, c2.derivation_name(),
- fname);
-}
-
String *Item_str_func::check_well_formed_result(String *str)
{
@@ -3273,15 +3264,17 @@ String *Item_func_uuid::val_str(String *str)
int i;
if (my_gethwaddr(mac))
{
+ /* purecov: begin inspected */
/*
generating random "hardware addr"
and because specs explicitly specify that it should NOT correlate
with a clock_seq value (initialized random below), we use a separate
randominit() here
*/
- randominit(&uuid_rand, tmp + (ulong) thd, tmp + (ulong)query_id);
+ randominit(&uuid_rand, tmp + (ulong) thd, tmp + (ulong)global_query_id);
for (i=0; i < (int)sizeof(mac); i++)
mac[i]=(uchar)(my_rnd(&uuid_rand)*255);
+ /* purecov: end */
}
s=clock_seq_and_node_str+sizeof(clock_seq_and_node_str)-1;
for (i=sizeof(mac)-1 ; i>=0 ; i--)
@@ -3289,7 +3282,7 @@ String *Item_func_uuid::val_str(String *str)
*--s=_dig_vec_lower[mac[i] & 15];
*--s=_dig_vec_lower[mac[i] >> 4];
}
- randominit(&uuid_rand, tmp + (ulong)start_time,
+ randominit(&uuid_rand, tmp + (ulong) server_start_time,
tmp + thd->status_var.bytes_sent);
set_clock_seq_str();
}
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index e131072d9bf..de67a314631 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -238,6 +238,10 @@ bool Item_subselect::exec()
{
int res;
+ if (thd->net.report_error)
+ /* Do not execute subselect in case of a fatal error */
+ return 1;
+
res= engine->exec();
if (engine_changed)
@@ -271,11 +275,11 @@ bool Item_subselect::const_item() const
return const_item_cache;
}
-Item *Item_subselect::get_tmp_table_item(THD *thd)
+Item *Item_subselect::get_tmp_table_item(THD *thd_arg)
{
if (!with_sum_func && !const_item())
return new Item_field(result_field);
- return copy_or_same(thd);
+ return copy_or_same(thd_arg);
}
void Item_subselect::update_used_tables()
@@ -590,13 +594,13 @@ void Item_exists_subselect::print(String *str)
}
-bool Item_in_subselect::test_limit(SELECT_LEX_UNIT *unit)
+bool Item_in_subselect::test_limit(SELECT_LEX_UNIT *unit_arg)
{
- if (unit->fake_select_lex &&
- unit->fake_select_lex->test_limit())
+ if (unit_arg->fake_select_lex &&
+ unit_arg->fake_select_lex->test_limit())
return(1);
- SELECT_LEX *sl= unit->first_select();
+ SELECT_LEX *sl= unit_arg->first_select();
for (; sl; sl= sl->next_select())
{
if (sl->test_limit())
@@ -868,7 +872,6 @@ Item_subselect::trans_res
Item_in_subselect::single_value_transformer(JOIN *join,
Comp_creator *func)
{
- Item_subselect::trans_res result= RES_ERROR;
SELECT_LEX *select_lex= join->select_lex;
DBUG_ENTER("Item_in_subselect::single_value_transformer");
@@ -966,7 +969,7 @@ Item_in_subselect::single_value_transformer(JOIN *join,
if (!substitution)
{
/* We're invoked for the 1st (or the only) SELECT in the subquery UNION */
- SELECT_LEX_UNIT *unit= select_lex->master_unit();
+ SELECT_LEX_UNIT *master_unit= select_lex->master_unit();
substitution= optimizer;
SELECT_LEX *current= thd->lex->current_select, *up;
@@ -989,7 +992,7 @@ Item_in_subselect::single_value_transformer(JOIN *join,
(char *)"<no matter>",
(char *)in_left_expr_name);
- unit->uncacheable|= UNCACHEABLE_DEPENDENT;
+ master_unit->uncacheable|= UNCACHEABLE_DEPENDENT;
}
if (!abort_on_null && left_expr->maybe_null && !pushed_cond_guards)
{
@@ -1188,7 +1191,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
if (!substitution)
{
//first call for this unit
- SELECT_LEX_UNIT *unit= select_lex->master_unit();
+ SELECT_LEX_UNIT *master_unit= select_lex->master_unit();
substitution= optimizer;
SELECT_LEX *current= thd->lex->current_select, *up;
@@ -1204,7 +1207,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
optimizer->keep_top_level_cache();
thd->lex->current_select= current;
- unit->uncacheable|= UNCACHEABLE_DEPENDENT;
+ master_unit->uncacheable|= UNCACHEABLE_DEPENDENT;
if (!abort_on_null && left_expr->maybe_null && !pushed_cond_guards)
{
@@ -1237,7 +1240,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
{
DBUG_ASSERT(left_expr->fixed && select_lex->ref_pointer_array[i]->fixed);
if (select_lex->ref_pointer_array[i]->
- check_cols(left_expr->el(i)->cols()))
+ check_cols(left_expr->element_index(i)->cols()))
DBUG_RETURN(RES_ERROR);
Item *item_eq=
new Item_func_eq(new
@@ -1260,7 +1263,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
(char *)"<list ref>")
);
Item *col_item= new Item_cond_or(item_eq, item_isnull);
- if (!abort_on_null && left_expr->el(i)->maybe_null)
+ if (!abort_on_null && left_expr->element_index(i)->maybe_null)
{
if (!(col_item= new Item_func_trig_cond(col_item, get_cond_guard(i))))
DBUG_RETURN(RES_ERROR);
@@ -1274,7 +1277,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
ref_pointer_array + i,
(char *)"<no matter>",
(char *)"<list ref>"));
- if (!abort_on_null && left_expr->el(i)->maybe_null)
+ if (!abort_on_null && left_expr->element_index(i)->maybe_null)
{
if (!(item_nnull_test=
new Item_func_trig_cond(item_nnull_test, get_cond_guard(i))))
@@ -1311,7 +1314,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
Item *item, *item_isnull;
DBUG_ASSERT(left_expr->fixed && select_lex->ref_pointer_array[i]->fixed);
if (select_lex->ref_pointer_array[i]->
- check_cols(left_expr->el(i)->cols()))
+ check_cols(left_expr->element_index(i)->cols()))
DBUG_RETURN(RES_ERROR);
item=
new Item_func_eq(new
@@ -1351,7 +1354,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
TODO: why we create the above for cases where the right part
cant be NULL?
*/
- if (left_expr->el(i)->maybe_null)
+ if (left_expr->element_index(i)->maybe_null)
{
if (!(item= new Item_func_trig_cond(item, get_cond_guard(i))))
DBUG_RETURN(RES_ERROR);
@@ -1513,14 +1516,14 @@ void Item_in_subselect::print(String *str)
}
-bool Item_in_subselect::fix_fields(THD *thd, Item **ref)
+bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref)
{
bool result = 0;
- if(thd->lex->view_prepare_mode && left_expr && !left_expr->fixed)
- result = left_expr->fix_fields(thd, &left_expr);
+ if (thd_arg->lex->view_prepare_mode && left_expr && !left_expr->fixed)
+ result = left_expr->fix_fields(thd_arg, &left_expr);
- return result || Item_subselect::fix_fields(thd, ref);
+ return result || Item_subselect::fix_fields(thd_arg, ref);
}
@@ -1559,13 +1562,13 @@ void subselect_engine::set_thd(THD *thd_arg)
subselect_single_select_engine::
subselect_single_select_engine(st_select_lex *select,
- select_subselect *result,
- Item_subselect *item)
- :subselect_engine(item, result),
+ select_subselect *result_arg,
+ Item_subselect *item_arg)
+ :subselect_engine(item_arg, result_arg),
prepared(0), optimized(0), executed(0),
select_lex(select), join(0)
{
- select_lex->master_unit()->item= item;
+ select_lex->master_unit()->item= item_arg;
}
@@ -1802,7 +1805,6 @@ int subselect_single_select_engine::exec()
if (!executed)
{
item->reset_value_registration();
- bool have_changed_access= FALSE;
JOIN_TAB *changed_tabs[MAX_TABLES];
JOIN_TAB **last_changed_tab= changed_tabs;
if (item->have_guarded_conds())
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index aa7b6a3aeb4..fdf3708cabb 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -164,7 +164,7 @@ public:
void fix_length_and_dec();
uint cols();
- Item* el(uint i) { return my_reinterpret_cast(Item*)(row[i]); }
+ Item* element_index(uint i) { return my_reinterpret_cast(Item*)(row[i]); }
Item** addr(uint i) { return (Item**)row + i; }
bool check_cols(uint c);
bool null_inside();
@@ -532,10 +532,10 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine
public:
// constructor can assign THD because it will be called after JOIN::prepare
- subselect_indexsubquery_engine(THD *thd, st_join_table *tab_arg,
+ subselect_indexsubquery_engine(THD *thd_arg, st_join_table *tab_arg,
Item_subselect *subs, Item *where,
Item *having_arg, bool chk_null)
- :subselect_uniquesubquery_engine(thd, tab_arg, subs, where),
+ :subselect_uniquesubquery_engine(thd_arg, tab_arg, subs, where),
check_null(chk_null),
having(having_arg)
{}
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 0471968b701..f34fc008186 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -1172,7 +1172,7 @@ double Item_sum_avg::val_real()
my_decimal *Item_sum_avg::val_decimal(my_decimal *val)
{
- my_decimal sum, cnt;
+ my_decimal sum_buff, cnt;
const my_decimal *sum_dec;
DBUG_ASSERT(fixed == 1);
if (!count)
@@ -1180,7 +1180,7 @@ my_decimal *Item_sum_avg::val_decimal(my_decimal *val)
null_value=1;
return NULL;
}
- sum_dec= Item_sum_sum::val_decimal(&sum);
+ sum_dec= Item_sum_sum::val_decimal(&sum_buff);
int2my_decimal(E_DEC_FATAL_ERROR, count, 0, &cnt);
my_decimal_div(E_DEC_FATAL_ERROR, val, sum_dec, &cnt, prec_increment);
return val;
@@ -1627,7 +1627,7 @@ bool Item_sum_min::add()
break;
case DECIMAL_RESULT:
{
- my_decimal value, *val= args[0]->val_decimal(&value);
+ my_decimal value_buff, *val= args[0]->val_decimal(&value_buff);
if (!args[0]->null_value &&
(null_value || (my_decimal_cmp(&sum_dec, val) > 0)))
{
@@ -1691,7 +1691,7 @@ bool Item_sum_max::add()
break;
case DECIMAL_RESULT:
{
- my_decimal value, *val= args[0]->val_decimal(&value);
+ my_decimal value_buff, *val= args[0]->val_decimal(&value_buff);
if (!args[0]->null_value &&
(null_value || (my_decimal_cmp(val, &sum_dec) > 0)))
{
@@ -1856,7 +1856,7 @@ void Item_sum_hybrid::reset_field()
}
case DECIMAL_RESULT:
{
- my_decimal value, *arg_dec= args[0]->val_decimal(&value);
+ my_decimal value_buff, *arg_dec= args[0]->val_decimal(&value_buff);
if (maybe_null)
{
@@ -2484,11 +2484,11 @@ bool Item_sum_count_distinct::setup(THD *thd)
for (tree_key_length= 0; field < field_end; ++field)
{
Field *f= *field;
- enum enum_field_types type= f->type();
+ enum enum_field_types f_type= f->type();
tree_key_length+= f->pack_length();
- if ((type == MYSQL_TYPE_VARCHAR) ||
- !f->binary() && (type == MYSQL_TYPE_STRING ||
- type == MYSQL_TYPE_VAR_STRING))
+ if ((f_type == MYSQL_TYPE_VARCHAR) ||
+ !f->binary() && (f_type == MYSQL_TYPE_STRING ||
+ f_type == MYSQL_TYPE_VAR_STRING))
{
all_binary= FALSE;
break;
@@ -3073,8 +3073,6 @@ Item_func_group_concat::Item_func_group_concat(THD *thd,
void Item_func_group_concat::cleanup()
{
- THD *thd= current_thd;
-
DBUG_ENTER("Item_func_group_concat::cleanup");
Item_sum::cleanup();
@@ -3083,7 +3081,7 @@ void Item_func_group_concat::cleanup()
{
char warn_buff[MYSQL_ERRMSG_SIZE];
sprintf(warn_buff, ER(ER_CUT_VALUE_GROUP_CONCAT), count_cut_values);
- warning->set_msg(thd, warn_buff);
+ warning->set_msg(current_thd, warn_buff);
warning= 0;
}
@@ -3113,8 +3111,7 @@ void Item_func_group_concat::cleanup()
warning= 0;
}
}
- DBUG_ASSERT(tree == 0);
- DBUG_ASSERT(warning == 0);
+ DBUG_ASSERT(tree == 0 && warning == 0);
}
DBUG_VOID_RETURN;
}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index faeef7fd5db..bcfdd403acf 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -1007,7 +1007,8 @@ longlong Item_func_quarter::val_int()
{
DBUG_ASSERT(fixed == 1);
TIME ltime;
- (void) get_arg0_date(&ltime, TIME_FUZZY_DATE);
+ if (get_arg0_date(&ltime, TIME_FUZZY_DATE))
+ return 0;
return (longlong) ((ltime.month+2)/3);
}
@@ -1119,14 +1120,14 @@ String* Item_func_dayname::val_str(String* str)
{
DBUG_ASSERT(fixed == 1);
uint weekday=(uint) val_int(); // Always Item_func_daynr()
- const char *name;
+ const char *day_name;
THD *thd= current_thd;
if (null_value)
return (String*) 0;
- name= thd->variables.lc_time_names->day_names->type_names[weekday];
- str->set(name, strlen(name), system_charset_info);
+ day_name= thd->variables.lc_time_names->day_names->type_names[weekday];
+ str->set(day_name, strlen(day_name), system_charset_info);
return str;
}
@@ -1647,6 +1648,7 @@ String *Item_func_sec_to_time::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
TIME ltime;
+ longlong arg_val= args[0]->val_int();
if ((null_value=args[0]->null_value) || str->alloc(19))
{
@@ -1654,7 +1656,7 @@ String *Item_func_sec_to_time::val_str(String *str)
return (String*) 0;
}
- sec_to_time(args[0]->val_int(), args[0]->unsigned_flag, &ltime);
+ sec_to_time(arg_val, args[0]->unsigned_flag, &ltime);
make_time((DATE_TIME_FORMAT *) 0, &ltime, str);
return str;
@@ -1665,11 +1667,12 @@ longlong Item_func_sec_to_time::val_int()
{
DBUG_ASSERT(fixed == 1);
TIME ltime;
+ longlong arg_val= args[0]->val_int();
if ((null_value=args[0]->null_value))
return 0;
- sec_to_time(args[0]->val_int(), args[0]->unsigned_flag, &ltime);
+ sec_to_time(arg_val, args[0]->unsigned_flag, &ltime);
return (ltime.neg ? -1 : 1) *
((ltime.hour)*10000 + ltime.minute*100 + ltime.second);
@@ -3168,10 +3171,10 @@ bool Item_func_str_to_date::get_date(TIME *ltime, uint fuzzy_date)
{
DATE_TIME_FORMAT date_time_format;
char val_buff[64], format_buff[64];
- String val_str(val_buff, sizeof(val_buff), &my_charset_bin), *val;
+ String val_string(val_buff, sizeof(val_buff), &my_charset_bin), *val;
String format_str(format_buff, sizeof(format_buff), &my_charset_bin), *format;
- val= args[0]->val_str(&val_str);
+ val= args[0]->val_str(&val_string);
format= args[1]->val_str(&format_str);
if (args[0]->null_value || args[1]->null_value)
goto null_date;
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 3da68cf43c2..9321992e566 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -577,7 +577,6 @@ String * Item_nodeset_func_union::val_nodeset(String *nodeset)
both_str.alloc(numnodes);
char *both= (char*) both_str.ptr();
bzero((void*)both, numnodes);
- uint pos= 0;
MY_XPATH_FLT *flt;
fltbeg= (MY_XPATH_FLT*) s0->ptr();
@@ -1484,7 +1483,6 @@ static int my_xpath_parse_AxisName(MY_XPATH *xpath)
static int my_xpath_parse_LocationPath(MY_XPATH *xpath);
static int my_xpath_parse_AbsoluteLocationPath(MY_XPATH *xpath);
static int my_xpath_parse_RelativeLocationPath(MY_XPATH *xpath);
-static int my_xpath_parse_AbbreviatedAbsoluteLocationPath(MY_XPATH *xpath);
static int my_xpath_parse_AbbreviatedStep(MY_XPATH *xpath);
static int my_xpath_parse_Step(MY_XPATH *xpath);
static int my_xpath_parse_AxisSpecifier(MY_XPATH *xpath);
@@ -1503,7 +1501,6 @@ static int my_xpath_parse_RelationalExpr(MY_XPATH *xpath);
static int my_xpath_parse_AndExpr(MY_XPATH *xpath);
static int my_xpath_parse_EqualityExpr(MY_XPATH *xpath);
static int my_xpath_parse_VariableReference(MY_XPATH *xpath);
-static int my_xpath_parse_slash_opt_slash(MY_XPATH *xpath);
/*
@@ -2699,7 +2696,6 @@ String *Item_func_xml_update::val_str(String *str)
}
MY_XML_NODE *nodebeg= (MY_XML_NODE*) pxml.ptr();
- MY_XML_NODE *nodeend= (MY_XML_NODE*) pxml.ptr() + pxml.length();
MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) nodeset->ptr();
MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (nodeset->ptr() + nodeset->length());
diff --git a/sql/log.cc b/sql/log.cc
index 1b432ca15c0..5e9ebfcb902 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -147,8 +147,7 @@ public:
*/
void truncate(my_off_t pos)
{
- DBUG_PRINT("info", ("truncating to position %lu", pos));
- DBUG_PRINT("info", ("before_stmt_pos=%lu", pos));
+ DBUG_PRINT("info", ("truncating to position %lu", (ulong) pos));
delete pending();
set_pending(0);
reinit_io_cache(&trans_log, WRITE_CACHE, pos, 0, 0);
@@ -909,7 +908,7 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
my_time_t current_time;
Security_context *sctx= thd->security_ctx;
- uint message_buff_len= 0, user_host_len= 0;
+ uint user_host_len= 0;
longlong query_time= 0, lock_time= 0;
/*
@@ -1544,23 +1543,21 @@ static int binlog_prepare(handlerton *hton, THD *thd, bool all)
do nothing.
just pretend we can do 2pc, so that MySQL won't
switch to 1pc.
- real work will be done in MYSQL_BIN_LOG::log()
+ real work will be done in MYSQL_BIN_LOG::log_xid()
*/
return 0;
}
static int binlog_commit(handlerton *hton, THD *thd, bool all)
{
- int error= 0;
DBUG_ENTER("binlog_commit");
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
- IO_CACHE *trans_log= &trx_data->trans_log;
DBUG_ASSERT(mysql_bin_log.is_open());
if (all && trx_data->empty())
{
- // we're here because trans_log was flushed in MYSQL_BIN_LOG::log()
+ // we're here because trans_log was flushed in MYSQL_BIN_LOG::log_xid()
trx_data->reset();
DBUG_RETURN(0);
}
@@ -1584,7 +1581,6 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all)
int error=0;
binlog_trx_data *const trx_data=
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
- IO_CACHE *trans_log= &trx_data->trans_log;
DBUG_ASSERT(mysql_bin_log.is_open());
if (trx_data->empty()) {
@@ -1647,9 +1643,6 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv)
static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
{
DBUG_ENTER("binlog_savepoint_rollback");
- binlog_trx_data *const trx_data=
- (binlog_trx_data*) thd->ha_data[binlog_hton->slot];
- IO_CACHE *trans_log= &trx_data->trans_log;
DBUG_ASSERT(mysql_bin_log.is_open());
/*
@@ -1660,7 +1653,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
if (unlikely(thd->options &
(OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG)))
{
- int const error=
+ int error=
thd->binlog_query(THD::STMT_QUERY_TYPE,
thd->query, thd->query_length, TRUE, FALSE);
DBUG_RETURN(error);
@@ -1669,6 +1662,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
DBUG_RETURN(0);
}
+
int check_binlog_magic(IO_CACHE* log, const char** errmsg)
{
char magic[4];
@@ -1689,6 +1683,7 @@ int check_binlog_magic(IO_CACHE* log, const char** errmsg)
return 0;
}
+
File open_binlog(IO_CACHE *log, const char *log_file_name, const char **errmsg)
{
File file;
@@ -2195,7 +2190,6 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT))
{
- Security_context *sctx= thd->security_ctx;
if (current_time != last_time)
{
last_time= current_time;
@@ -2434,7 +2428,6 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
bool null_created_arg)
{
File file= -1;
- int open_flags = O_CREAT | O_BINARY;
DBUG_ENTER("MYSQL_BIN_LOG::open");
DBUG_PRINT("enter",("log_type: %d",(int) log_type_arg));
@@ -2596,6 +2589,8 @@ int MYSQL_BIN_LOG::raw_get_current_log(LOG_INFO* linfo)
0 ok
*/
+#ifdef HAVE_REPLICATION
+
static bool copy_up_file_and_fill(IO_CACHE *index_file, my_off_t offset)
{
int bytes_read;
@@ -2629,6 +2624,7 @@ err:
DBUG_RETURN(1);
}
+#endif /* HAVE_REPLICATION */
/*
Find the position in the log-index-file for the given log name
@@ -3121,8 +3117,6 @@ err:
pthread_mutex_unlock(&LOCK_index);
DBUG_RETURN(error);
}
-
-
#endif /* HAVE_REPLICATION */
@@ -3244,7 +3238,6 @@ void MYSQL_BIN_LOG::new_file_impl(bool need_lock)
We log the whole file name for log file as the user may decide
to change base names at some point.
*/
- THD *thd = current_thd; /* may be 0 if we are reacting to SIGHUP */
Rotate_log_event r(new_name+dirname_length(new_name),
0, LOG_EVENT_OFFSET, 0);
r.write(&log_file);
@@ -3480,10 +3473,10 @@ int THD::binlog_flush_transaction_cache()
{
DBUG_ENTER("binlog_flush_transaction_cache");
binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot];
- DBUG_PRINT("enter", ("trx_data=0x%lu", trx_data));
+ DBUG_PRINT("enter", ("trx_data: 0x%lx", (ulong) trx_data));
if (trx_data)
- DBUG_PRINT("enter", ("trx_data->before_stmt_pos=%u",
- trx_data->before_stmt_pos));
+ DBUG_PRINT("enter", ("trx_data->before_stmt_pos: %lu",
+ (ulong) trx_data->before_stmt_pos));
/*
Write the transaction cache to the binary log. We don't flush and
@@ -3698,14 +3691,14 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
*/
if (likely(is_open()))
{
- const char *local_db= event_info->get_db();
IO_CACHE *file= &log_file;
#ifdef HAVE_REPLICATION
/*
- In the future we need to add to the following if tests like
- "do the involved tables match (to be implemented)
- binlog_[wild_]{do|ignore}_table?" (WL#1049)"
+ In the future we need to add to the following if tests like
+ "do the involved tables match (to be implemented)
+ binlog_[wild_]{do|ignore}_table?" (WL#1049)"
*/
+ const char *local_db= event_info->get_db();
if ((thd && !(thd->options & OPTION_BIN_LOG)) ||
(!binlog_filter->db_ok(local_db)))
{
@@ -3981,8 +3974,6 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
if (likely(is_open())) // Should always be true
{
- uint length;
-
/*
We only bother to write to the binary log if there is anything
to write.
@@ -4022,9 +4013,6 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
if (commit_event && commit_event->write(&log_file))
goto err;
-#ifndef DBUG_OFF
- DBUG_skip_commit:
-#endif
if (flush_and_sync())
goto err;
DBUG_EXECUTE_IF("half_binlogged_transaction", abort(););
@@ -4684,21 +4672,34 @@ int TC_LOG_MMAP::overflow()
}
/*
- all access to active page is serialized but it's not a problem, as
- we're assuming that fsync() will be a main bottleneck.
- That is, parallelizing writes to log pages we'll decrease number of
- threads waiting for a page, but then all these threads will be waiting
- for a fsync() anyway
+ Record that transaction XID is committed on the persistent storage
+
+ NOTES
+ This function is called in the middle of two-phase commit:
+ First all resources prepare the transaction, then tc_log->log() is called,
+ then all resources commit the transaction, then tc_log->unlog() is called.
+
+ All access to active page is serialized but it's not a problem, as
+ we're assuming that fsync() will be a main bottleneck.
+ That is, parallelizing writes to log pages we'll decrease number of
+ threads waiting for a page, but then all these threads will be waiting
+ for a fsync() anyway
+
+ IMPLEMENTATION
+ If tc_log == MYSQL_LOG then tc_log writes transaction to binlog and
+ records XID in a special Xid_log_event.
+ If tc_log = TC_LOG_MMAP then xid is written in a special memory-mapped
+ log.
RETURN
- 0 - error
- otherwise - "cookie", a number that will be passed as an argument
- to unlog() call. tc_log can define it any way it wants,
- and use for whatever purposes. TC_LOG_MMAP sets it
- to the position in memory where xid was logged to.
+ 0 Error
+ # "cookie", a number that will be passed as an argument
+ to unlog() call. tc_log can define it any way it wants,
+ and use for whatever purposes. TC_LOG_MMAP sets it
+ to the position in memory where xid was logged to.
*/
-int TC_LOG_MMAP::log(THD *thd, my_xid xid)
+int TC_LOG_MMAP::log_xid(THD *thd, my_xid xid)
{
int err;
PAGE *p;
@@ -4807,6 +4808,7 @@ int TC_LOG_MMAP::sync()
erase xid from the page, update page free space counters/pointers.
cookie points directly to the memory where xid was logged
*/
+
void TC_LOG_MMAP::unlog(ulong cookie, my_xid xid)
{
PAGE *p=pages+(cookie/tc_log_page_size);
@@ -5049,7 +5051,7 @@ void TC_LOG_BINLOG::close()
0 - error
1 - success
*/
-int TC_LOG_BINLOG::log(THD *thd, my_xid xid)
+int TC_LOG_BINLOG::log_xid(THD *thd, my_xid xid)
{
DBUG_ENTER("TC_LOG_BINLOG::log");
Xid_log_event xle(thd, xid);
diff --git a/sql/log.h b/sql/log.h
index 61db7052f75..80aa4b20ee6 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -33,7 +33,7 @@ class TC_LOG
virtual int open(const char *opt_name)=0;
virtual void close()=0;
- virtual int log(THD *thd, my_xid xid)=0;
+ virtual int log_xid(THD *thd, my_xid xid)=0;
virtual void unlog(ulong cookie, my_xid xid)=0;
};
@@ -43,7 +43,7 @@ public:
TC_LOG_DUMMY() {}
int open(const char *opt_name) { return 0; }
void close() { }
- int log(THD *thd, my_xid xid) { return 1; }
+ int log_xid(THD *thd, my_xid xid) { return 1; }
void unlog(ulong cookie, my_xid xid) { }
};
@@ -88,7 +88,7 @@ class TC_LOG_MMAP: public TC_LOG
TC_LOG_MMAP(): inited(0) {}
int open(const char *opt_name);
void close();
- int log(THD *thd, my_xid xid);
+ int log_xid(THD *thd, my_xid xid);
void unlog(ulong cookie, my_xid xid);
int recover();
@@ -287,7 +287,7 @@ public:
int open(const char *opt_name);
void close();
- int log(THD *thd, my_xid xid);
+ int log_xid(THD *thd, my_xid xid);
void unlog(ulong cookie, my_xid xid);
int recover(IO_CACHE *log, Format_description_log_event *fdle);
#if !defined(MYSQL_CLIENT)
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 82fb64bfe15..82cfc0cd3a2 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -139,22 +139,6 @@ static void pretty_print_str(IO_CACHE* cache, char* str, int len)
}
#endif /* MYSQL_CLIENT */
-#ifdef HAVE_purify
-static void
-valgrind_check_mem(void *ptr, size_t len)
-{
- static volatile uchar dummy;
- for (volatile uchar *p= (uchar*) ptr ; p != (uchar*) ptr + len ; ++p)
- {
- int const c = *p;
- if (c < 128)
- dummy= c + 1;
- else
- dummy = c - 1;
- }
-}
-#endif
-
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
static void clear_all_errors(THD *thd, struct st_relay_log_info *rli)
@@ -381,12 +365,14 @@ append_query_string(CHARSET_INFO *csinfo,
}
#endif
+
/*
Prints a "session_var=value" string. Used by mysqlbinlog to print some SET
commands just before it prints a query.
*/
#ifdef MYSQL_CLIENT
+
static void print_set_option(IO_CACHE* file, uint32 bits_changed,
uint32 option, uint32 flags, const char* name,
bool* need_comma)
@@ -3269,7 +3255,6 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
thd->main_lex.select_lex.context.resolve_in_table_list_only(&tables);
set_fields(tables.db, field_list, &thd->main_lex.select_lex.context);
thd->variables.pseudo_thread_id= thread_id;
- List<Item> set_fields;
if (net)
{
// mysql_load will use thd->net to read the file
@@ -3280,10 +3265,11 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
thd->net.pkt_nr = net->pkt_nr;
}
/*
- It is safe to use set_fields twice because we are not going to
+ It is safe to use tmp_list twice because we are not going to
update it inside mysql_load().
*/
- if (mysql_load(thd, &ex, &tables, field_list, set_fields, set_fields,
+ List<Item> tmp_list;
+ if (mysql_load(thd, &ex, &tables, field_list, tmp_list, tmp_list,
handle_dup, ignore, net != 0))
thd->query_error= 1;
if (thd->cuted_fields)
@@ -5481,7 +5467,6 @@ int Rows_log_event::do_add_row_data(byte *const row_data,
if (static_cast<my_size_t>(m_rows_end - m_rows_cur) < length)
{
my_size_t const block_size= 1024;
- my_ptrdiff_t const old_alloc= m_rows_end - m_rows_buf;
my_ptrdiff_t const cur_size= m_rows_cur - m_rows_buf;
my_ptrdiff_t const new_alloc=
block_size * ((cur_size + length) / block_size + block_size - 1);
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index fee891913ca..d9026af7b99 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -57,10 +57,10 @@ typedef ulonglong nested_join_map;
/* query_id */
typedef ulonglong query_id_t;
-extern query_id_t query_id;
+extern query_id_t global_query_id;
/* increment query_id and return it. */
-inline query_id_t next_query_id() { return query_id++; }
+inline query_id_t next_query_id() { return global_query_id++; }
/* useful constants */
extern const key_map key_map_empty;
@@ -170,7 +170,7 @@ MY_LOCALE *my_locale_by_number(uint number);
Feel free to raise this by the smallest amount you can to get the
"execution_constants" test to pass.
*/
-#define STACK_MIN_SIZE 10788 // Abort if less stack during eval.
+#define STACK_MIN_SIZE 12000 // Abort if less stack during eval.
#define STACK_MIN_SIZE_FOR_OPEN 1024*80
#define STACK_BUFF_ALLOC 256 // For stack overrun checks
@@ -1155,7 +1155,8 @@ bool push_new_name_resolution_context(THD *thd,
TABLE_LIST *left_op,
TABLE_LIST *right_op);
void add_join_on(TABLE_LIST *b,Item *expr);
-void add_join_natural(TABLE_LIST *a,TABLE_LIST *b,List<String> *using_fields);
+void add_join_natural(TABLE_LIST *a,TABLE_LIST *b,List<String> *using_fields,
+ SELECT_LEX *lex);
bool add_proc_to_list(THD *thd, Item *item);
TABLE *unlink_open_table(THD *thd,TABLE *list,TABLE *find);
void update_non_unique_table_error(TABLE_LIST *update,
@@ -1526,7 +1527,7 @@ extern int creating_table; // How many mysql_create_table() are running
External variables
*/
-extern time_t start_time;
+extern time_t server_start_time;
extern char *mysql_data_home,server_version[SERVER_VERSION_LENGTH],
mysql_real_data_home[], *opt_mysql_tmpdir, mysql_charsets_dir[],
def_ft_boolean_syntax[sizeof(ft_boolean_syntax)];
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 0fc79331163..fe7162e986e 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -199,12 +199,6 @@ inline void reset_floating_point_exceptions()
} /* cplusplus */
-
-#if defined(HAVE_LINUXTHREADS)
-#define THR_KILL_SIGNAL SIGINT
-#else
-#define THR_KILL_SIGNAL SIGUSR2 // Can't use this with LinuxThreads
-#endif
#define MYSQL_KILL_SIGNAL SIGTERM
#ifdef HAVE_GLIBC2_STYLE_GETHOSTBYNAME_R
@@ -470,7 +464,7 @@ ulong slave_net_timeout, slave_trans_retries;
ulong thread_cache_size=0, binlog_cache_size=0, max_binlog_cache_size=0;
ulong query_cache_size=0;
ulong refresh_version, flush_version; /* Increments on each reload */
-query_id_t query_id;
+query_id_t global_query_id;
ulong aborted_threads, aborted_connects;
ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size;
ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use;
@@ -503,7 +497,7 @@ const char *log_output_str= "TABLE";
double log_10[32]; /* 10 potences */
double log_01[32];
-time_t start_time;
+time_t server_start_time;
char mysql_home[FN_REFLEN], pidfile_name[FN_REFLEN], system_time_zone[30];
char *default_tz_name;
@@ -519,7 +513,6 @@ key_map key_map_full(0); // Will be initialized later
const char *opt_date_time_formats[3];
char mysql_data_home_buff[2], *mysql_data_home=mysql_real_data_home;
-struct passwd *user_info;
char server_version[SERVER_VERSION_LENGTH];
char *mysqld_unix_port, *opt_mysql_tmpdir;
const char **errmesg; /* Error messages */
@@ -541,7 +534,6 @@ Lt_creator lt_creator;
Ge_creator ge_creator;
Le_creator le_creator;
-
FILE *bootstrap_file;
int bootstrap_error;
FILE *stderror_file=0;
@@ -596,6 +588,7 @@ pthread_mutex_t LOCK_server_started;
pthread_cond_t COND_server_started;
int mysqld_server_started= 0;
+static uint thr_kill_signal;
File_parser_dummy_hook file_parser_dummy_hook;
@@ -624,9 +617,13 @@ static char **defaults_argv;
static char *opt_bin_logname;
static my_socket unix_sock,ip_sock;
-static pthread_t select_thread;
struct rand_struct sql_rand; // used by sql_class.cc:THD::THD()
+#ifndef EMBEDDED_LIBRARY
+struct passwd *user_info;
+static pthread_t select_thread;
+#endif
+
/* OS specific variables */
#ifdef __WIN__
@@ -703,7 +700,6 @@ struct st_VioSSLFd *ssl_acceptor_fd;
/* Function declarations */
-static void start_signal_handler(void);
pthread_handler_t signal_hand(void *arg);
static void mysql_init_variables(void);
static void get_options(int argc,char **argv);
@@ -714,7 +710,6 @@ static void fix_paths(void);
pthread_handler_t handle_connections_sockets(void *arg);
pthread_handler_t kill_server_thread(void *arg);
static void bootstrap(FILE *file);
-static void close_server_sock();
static bool read_init_file(char *file_name);
#ifdef __NT__
pthread_handler_t handle_connections_namedpipes(void *arg);
@@ -725,11 +720,17 @@ pthread_handler_t handle_connections_shared_memory(void *arg);
pthread_handler_t handle_slave(void *arg);
static ulong find_bit_type(const char *x, TYPELIB *bit_lib);
static void clean_up(bool print_message);
+static int test_if_case_insensitive(const char *dir_name);
+
+#ifndef EMBEDDED_LIBRARY
+static void start_signal_handler(void);
+static void close_server_sock();
static void clean_up_mutexes(void);
static void wait_for_signal_thread_to_end(void);
-static int test_if_case_insensitive(const char *dir_name);
static void create_pid_file();
static void end_ssl();
+#endif
+
#ifndef EMBEDDED_LIBRARY
/****************************************************************************
@@ -769,7 +770,7 @@ static void close_connections(void)
DBUG_PRINT("info",("Waiting for select thread"));
#ifndef DONT_USE_THR_ALARM
- if (pthread_kill(select_thread,THR_CLIENT_ALARM))
+ if (pthread_kill(select_thread, thr_client_alarm))
break; // allready dead
#endif
set_timespec(abstime, 2);
@@ -919,7 +920,6 @@ static void close_connections(void)
DBUG_PRINT("quit",("close_connections thread"));
DBUG_VOID_RETURN;
}
-#endif /*EMBEDDED_LIBRARY*/
static void close_server_sock()
@@ -962,12 +962,14 @@ static void close_server_sock()
#endif
}
+#endif /*EMBEDDED_LIBRARY*/
+
void kill_mysql(void)
{
DBUG_ENTER("kill_mysql");
-#ifdef SIGNALS_DONT_BREAK_READ
+#if defined(SIGNALS_DONT_BREAK_READ) && !defined(EMBEDDED_LIBRARY)
abort_loop=1; // Break connection loops
close_server_sock(); // Force accept to wake up
#endif
@@ -1045,7 +1047,7 @@ static void __cdecl kill_server(int sig_ptr)
kill_in_progress=TRUE;
abort_loop=1; // This should be set
if (sig != 0) // 0 is not a valid signal number
- my_sigset(sig,SIG_IGN);
+ my_sigset(sig, SIG_IGN); /* purify inspected */
if (sig == MYSQL_KILL_SIGNAL || sig == 0)
sql_print_information(ER(ER_NORMAL_SHUTDOWN),my_progname);
else
@@ -1233,7 +1235,9 @@ void clean_up(bool print_message)
#endif
delete binlog_filter;
delete rpl_filter;
+#ifndef EMBEDDED_LIBRARY
end_ssl();
+#endif
vio_end();
#ifdef USE_REGEX
my_regex_end();
@@ -1266,6 +1270,8 @@ void clean_up(bool print_message)
} /* clean_up */
+#ifndef EMBEDDED_LIBRARY
+
/*
This is mainly needed when running with purify, but it's still nice to
know that all child threads have died when mysqld exits
@@ -1336,6 +1342,9 @@ static void clean_up_mutexes()
(void) pthread_cond_destroy(&COND_manager);
}
+#endif /*EMBEDDED_LIBRARY*/
+
+
/****************************************************************************
** Init IP and UNIX socket
****************************************************************************/
@@ -1370,7 +1379,7 @@ static void set_ports()
static struct passwd *check_user(const char *user)
{
#if !defined(__WIN__) && !defined(__NETWARE__)
- struct passwd *user_info;
+ struct passwd *tmp_user_info;
uid_t user_id= geteuid();
// Don't bother if we aren't superuser
@@ -1378,12 +1387,14 @@ static struct passwd *check_user(const char *user)
{
if (user)
{
- // Don't give a warning, if real user is same as given with --user
- user_info= getpwnam(user);
- if ((!user_info || user_id != user_info->pw_uid) &&
+ /* Don't give a warning, if real user is same as given with --user */
+ /* purecov: begin tested */
+ tmp_user_info= getpwnam(user);
+ if ((!tmp_user_info || user_id != tmp_user_info->pw_uid) &&
global_system_variables.log_warnings)
sql_print_warning(
"One can only use the --user switch if running as root\n");
+ /* purecov: end */
}
return NULL;
}
@@ -1396,23 +1407,22 @@ static struct passwd *check_user(const char *user)
}
return NULL;
}
+ /* purecov: begin tested */
if (!strcmp(user,"root"))
return NULL; // Avoid problem with dynamic libraries
- if (!(user_info= getpwnam(user)))
+ if (!(tmp_user_info= getpwnam(user)))
{
// Allow a numeric uid to be used
const char *pos;
for (pos= user; my_isdigit(mysqld_charset,*pos); pos++) ;
if (*pos) // Not numeric id
goto err;
- if (!(user_info= getpwuid(atoi(user))))
+ if (!(tmp_user_info= getpwuid(atoi(user))))
goto err;
- else
- return user_info;
}
- else
- return user_info;
+ return tmp_user_info;
+ /* purecov: end */
err:
sql_print_error("Fatal error: Can't change to run as user '%s' ; Please check that the user exists!\n",user);
@@ -1421,10 +1431,11 @@ err:
return NULL;
}
-static void set_user(const char *user, struct passwd *user_info)
+static void set_user(const char *user, struct passwd *user_info_arg)
{
+ /* purecov: begin tested */
#if !defined(__WIN__) && !defined(__NETWARE__)
- DBUG_ASSERT(user_info != 0);
+ DBUG_ASSERT(user_info_arg != 0);
#ifdef HAVE_INITGROUPS
/*
We can get a SIGSEGV when calling initgroups() on some systems when NSS
@@ -1433,33 +1444,34 @@ static void set_user(const char *user, struct passwd *user_info)
output a specific message to help the user resolve this problem.
*/
calling_initgroups= TRUE;
- initgroups((char*) user, user_info->pw_gid);
+ initgroups((char*) user, user_info_arg->pw_gid);
calling_initgroups= FALSE;
#endif
- if (setgid(user_info->pw_gid) == -1)
+ if (setgid(user_info_arg->pw_gid) == -1)
{
sql_perror("setgid");
unireg_abort(1);
}
- if (setuid(user_info->pw_uid) == -1)
+ if (setuid(user_info_arg->pw_uid) == -1)
{
sql_perror("setuid");
unireg_abort(1);
}
#endif
+ /* purecov: end */
}
-static void set_effective_user(struct passwd *user_info)
+static void set_effective_user(struct passwd *user_info_arg)
{
#if !defined(__WIN__) && !defined(__NETWARE__)
- DBUG_ASSERT(user_info != 0);
- if (setregid((gid_t)-1, user_info->pw_gid) == -1)
+ DBUG_ASSERT(user_info_arg != 0);
+ if (setregid((gid_t)-1, user_info_arg->pw_gid) == -1)
{
sql_perror("setregid");
unireg_abort(1);
}
- if (setreuid((uid_t)-1, user_info->pw_uid) == -1)
+ if (setreuid((uid_t)-1, user_info_arg->pw_uid) == -1)
{
sql_perror("setreuid");
unireg_abort(1);
@@ -1800,6 +1812,7 @@ extern "C" sig_handler abort_thread(int sig __attribute__((unused)))
}
#endif
+
/******************************************************************************
Setup a signal thread with handles all signals.
Because Linux doesn't support schemas use a mutex to check that
@@ -1819,6 +1832,7 @@ static void init_signals(void)
#endif
}
+
static void start_signal_handler(void)
{
// Save vm id of this process
@@ -1826,6 +1840,7 @@ static void start_signal_handler(void)
create_pid_file();
}
+
static void check_data_home(const char *path)
{}
@@ -2046,6 +2061,7 @@ static void init_signals(void)
}
+
static void start_signal_handler(void)
{
// Save vm id of this process
@@ -2182,6 +2198,8 @@ bugs.\n");
#define SA_NODEFER 0
#endif
+#ifndef EMBEDDED_LIBRARY
+
static void init_signals(void)
{
sigset_t set;
@@ -2189,7 +2207,9 @@ static void init_signals(void)
DBUG_ENTER("init_signals");
if (test_flags & TEST_SIGINT)
- my_sigset(THR_KILL_SIGNAL,end_thread_signal);
+ {
+ my_sigset(thr_kill_signal, end_thread_signal);
+ }
my_sigset(THR_SERVER_ALARM,print_signal_warning); // Should never be called!
if (!(test_flags & TEST_NO_STACKTRACE) || (test_flags & TEST_CORE_ON_SIGNAL))
@@ -2246,15 +2266,18 @@ static void init_signals(void)
#endif
sigaddset(&set,THR_SERVER_ALARM);
if (test_flags & TEST_SIGINT)
- sigdelset(&set,THR_KILL_SIGNAL); // May be SIGINT
- sigdelset(&set,THR_CLIENT_ALARM); // For alarms
+ {
+ // May be SIGINT
+ sigdelset(&set, thr_kill_signal);
+ }
+ // For alarms
+ sigdelset(&set, thr_client_alarm);
sigprocmask(SIG_SETMASK,&set,NULL);
pthread_sigmask(SIG_SETMASK,&set,NULL);
DBUG_VOID_RETURN;
}
-#ifndef EMBEDDED_LIBRARY
static void start_signal_handler(void)
{
int error;
@@ -2311,24 +2334,20 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
*/
init_thr_alarm(max_connections +
global_system_variables.max_insert_delayed_threads + 10);
-#if SIGINT != THR_KILL_SIGNAL
- if (test_flags & TEST_SIGINT)
+ if (thd_lib_detected != THD_LIB_LT && (test_flags & TEST_SIGINT))
{
(void) sigemptyset(&set); // Setup up SIGINT for debug
(void) sigaddset(&set,SIGINT); // For debugging
(void) pthread_sigmask(SIG_UNBLOCK,&set,NULL);
}
-#endif
(void) sigemptyset(&set); // Setup up SIGINT for debug
#ifdef USE_ONE_SIGNAL_HAND
(void) sigaddset(&set,THR_SERVER_ALARM); // For alarms
#endif
#ifndef IGNORE_SIGHUP_SIGQUIT
(void) sigaddset(&set,SIGQUIT);
-#if THR_CLIENT_ALARM != SIGHUP
(void) sigaddset(&set,SIGHUP);
#endif
-#endif
(void) sigaddset(&set,SIGTERM);
(void) sigaddset(&set,SIGTSTP);
@@ -2428,11 +2447,11 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
}
return(0); /* purecov: deadcode */
}
-#endif /*!EMBEDDED_LIBRARY*/
static void check_data_home(const char *path)
{}
+#endif /*!EMBEDDED_LIBRARY*/
#endif /* __WIN__*/
@@ -2497,6 +2516,7 @@ static int my_message_sql(uint error, const char *str, myf MyFlags)
}
+#ifndef EMBEDDED_LIBRARY
static void *my_str_malloc_mysqld(size_t size)
{
return my_malloc(size, MYF(MY_FAE));
@@ -2507,6 +2527,7 @@ static void my_str_free_mysqld(void *ptr)
{
my_free((gptr)ptr, MYF(MY_FAE));
}
+#endif /* EMBEDDED_LIBRARY */
#ifdef __WIN__
@@ -2613,7 +2634,15 @@ static int init_common_variables(const char *conf_file_name, int argc,
tzset(); // Set tzname
max_system_variables.pseudo_thread_id= (ulong)~0;
- start_time=time((time_t*) 0);
+ server_start_time= time((time_t*) 0);
+ rpl_filter= new Rpl_filter;
+ binlog_filter= new Rpl_filter;
+ if (!rpl_filter || !binlog_filter)
+ {
+ sql_perror("Could not allocate replication and binlog filters");
+ exit(1);
+ }
+
if (init_thread_environment())
return 1;
mysql_init_variables();
@@ -2621,7 +2650,7 @@ static int init_common_variables(const char *conf_file_name, int argc,
#ifdef HAVE_TZNAME
{
struct tm tm_tmp;
- localtime_r(&start_time,&tm_tmp);
+ localtime_r(&server_start_time,&tm_tmp);
strmake(system_time_zone, tzname[tm_tmp.tm_isdst != 0 ? 1 : 0],
sizeof(system_time_zone)-1);
@@ -3011,6 +3040,8 @@ static void openssl_lock(int mode, openssl_lock_t *lock, const char *file,
#endif /* HAVE_OPENSSL */
+#ifndef EMBEDDED_LIBRARY
+
static void init_ssl()
{
#ifdef HAVE_OPENSSL
@@ -3048,6 +3079,8 @@ static void end_ssl()
#endif /* HAVE_OPENSSL */
}
+#endif /* EMBEDDED_LIBRARY */
+
static int init_server_components()
{
@@ -3063,7 +3096,7 @@ static int init_server_components()
query_cache_set_min_res_unit(query_cache_min_res_unit);
query_cache_init();
query_cache_resize(query_cache_size);
- randominit(&sql_rand,(ulong) start_time,(ulong) start_time/2);
+ randominit(&sql_rand,(ulong) server_start_time,(ulong) server_start_time/2);
reset_floating_point_exceptions();
init_thr_lock();
#ifdef HAVE_REPLICATION
@@ -3366,6 +3399,8 @@ server.");
}
+#ifndef EMBEDDED_LIBRARY
+
static void create_maintenance_thread()
{
if (flush_time && flush_time != ~(ulong) 0L)
@@ -3379,7 +3414,6 @@ static void create_maintenance_thread()
static void create_shutdown_thread()
{
-#if !defined(EMBEDDED_LIBRARY)
#ifdef __WIN__
hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name);
pthread_t hThread;
@@ -3388,10 +3422,11 @@ static void create_shutdown_thread()
// On "Stop Service" we have to do regular shutdown
Service.SetShutdownEvent(hEventShutdown);
-#endif
-#endif // EMBEDDED_LIBRARY
+#endif /* __WIN__ */
}
+#endif /* EMBEDDED_LIBRARY */
+
#if (defined(__NT__) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
static void handle_connections_methods()
@@ -3474,13 +3509,12 @@ int main(int argc, char **argv)
MY_INIT(argv[0]); // init my_sys library & pthreads
/* nothing should come before this line ^^^ */
- rpl_filter= new Rpl_filter;
- binlog_filter= new Rpl_filter;
- if (!rpl_filter || !binlog_filter)
- {
- sql_perror("Could not allocate replication and binlog filters");
- exit(1);
- }
+ /* Set signal used to kill MySQL */
+#if defined(SIGUSR2)
+ thr_kill_signal= thd_lib_detected == THD_LIB_LT ? SIGINT : SIGUSR2;
+#else
+ thr_kill_signal= SIGINT;
+#endif
/*
Perform basic logger initialization logger. Should be called after
@@ -6302,7 +6336,7 @@ static int show_starttime(THD *thd, SHOW_VAR *var, char *buff)
{
var->type= SHOW_LONG;
var->value= buff;
- *((long *)buff)= (long) (thd->query_start() - start_time);
+ *((long *)buff)= (long) (thd->query_start() - server_start_time);
return 0;
}
@@ -6986,7 +7020,7 @@ static void mysql_init_variables(void)
protocol_version= PROTOCOL_VERSION;
what_to_log= ~ (1L << (uint) COM_TIME);
refresh_version= flush_version= 1L; /* Increments on each reload */
- query_id= thread_id= 1L;
+ global_query_id= thread_id= 1L;
strmov(server_version, MYSQL_SERVER_VERSION);
myisam_recover_options_str= sql_mode_str= "OFF";
myisam_stats_method_str= "nulls_unequal";
@@ -8060,6 +8094,8 @@ static int test_if_case_insensitive(const char *dir_name)
/* Create file to store pid number */
+#ifndef EMBEDDED_LIBRARY
+
static void create_pid_file()
{
File file;
@@ -8079,7 +8115,7 @@ static void create_pid_file()
sql_perror("Can't start server: can't create PID file");
exit(1);
}
-
+#endif /* EMBEDDED_LIBRARY */
/* Clear most status variables */
void refresh_status(THD *thd)
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index ceaac26742b..e9533495650 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -220,6 +220,8 @@ my_bool net_realloc(NET *net, ulong length)
-1 Don't know if data is ready or not
*/
+#if !defined(EMBEDDED_LIBRARY)
+
static int net_data_is_ready(my_socket sd)
{
#ifdef HAVE_POLL
@@ -254,9 +256,10 @@ static int net_data_is_ready(my_socket sd)
return 0;
else
return test(res ? FD_ISSET(sd, &sfds) : 0);
-#endif
+#endif /* HAVE_POLL */
}
+#endif /* EMBEDDED_LIBRARY */
/*
Remove unwanted characters from connection
@@ -282,8 +285,11 @@ static int net_data_is_ready(my_socket sd)
void net_clear(NET *net, my_bool clear_buffer)
{
+#if !defined(EMBEDDED_LIBRARY)
int count, ready;
+#endif
DBUG_ENTER("net_clear");
+
#if !defined(EMBEDDED_LIBRARY)
if (clear_buffer)
{
@@ -295,7 +301,7 @@ void net_clear(NET *net, my_bool clear_buffer)
{
DBUG_PRINT("info",("skipped %d bytes from file: %s",
count, vio_description(net->vio)));
-#ifdef EXTRA_DEBUG
+#if defined(EXTRA_DEBUG) && (MYSQL_VERSION_ID < 51000)
fprintf(stderr,"Error: net_clear() skipped %d bytes from file: %s\n",
count, vio_description(net->vio));
#endif
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 628b07631c1..f613b1b9f02 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -219,6 +219,8 @@ public:
}
inline void merge_flags(SEL_ARG *arg) { maybe_flag|=arg->maybe_flag; }
inline void maybe_smaller() { maybe_flag=1; }
+ /* Return true iff it's a single-point null interval */
+ inline bool is_null_interval() { return maybe_null && max_value[0] == 1; }
inline int cmp_min_to_min(SEL_ARG* arg)
{
return sel_cmp(field,min_value, arg->min_value, min_flag, arg->min_flag);
@@ -560,6 +562,7 @@ public:
bool is_ror_scan;
/* Number of ranges in the last checked tree->key */
uint n_ranges;
+ uint8 first_null_comp; /* first null component if any, 0 - otherwise */
};
class TABLE_READ_PLAN;
@@ -606,9 +609,6 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
double read_time);
static
TRP_GROUP_MIN_MAX *get_best_group_min_max(PARAM *param, SEL_TREE *tree);
-static int get_index_merge_params(PARAM *param, key_map& needed_reg,
- SEL_IMERGE *imerge, double *read_time,
- ha_rows* imerge_rows);
static double get_index_only_read_time(const PARAM* param, ha_rows records,
int keynr);
@@ -618,7 +618,6 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
static void print_ror_scans_arr(TABLE *table, const char *msg,
struct st_ror_scan_info **start,
struct st_ror_scan_info **end);
-static void print_rowid(byte* val, int len);
static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg);
#endif
@@ -930,7 +929,7 @@ QUICK_SELECT_I::QUICK_SELECT_I()
QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
bool no_alloc, MEM_ROOT *parent_alloc)
- :dont_free(0),error(0),free_file(0),in_range(0),cur_range(NULL),range(0)
+ :dont_free(0),error(0),free_file(0),in_range(0),cur_range(NULL),last_range(0)
{
my_bitmap_map *bitmap;
DBUG_ENTER("QUICK_RANGE_SELECT::QUICK_RANGE_SELECT");
@@ -1390,13 +1389,12 @@ int QUICK_ROR_UNION_SELECT::queue_cmp(void *arg, byte *val1, byte *val2)
int QUICK_ROR_UNION_SELECT::reset()
{
- QUICK_SELECT_I* quick;
+ QUICK_SELECT_I *quick;
int error;
DBUG_ENTER("QUICK_ROR_UNION_SELECT::reset");
have_prev_rowid= FALSE;
if (!scans_inited)
{
- QUICK_SELECT_I *quick;
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
while ((quick= it++))
{
@@ -2425,8 +2423,6 @@ static int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar,
static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
List<SEL_IMERGE> &merges);
static void mark_all_partitions_as_used(partition_info *part_info);
-static uint32 part_num_to_part_id_range(PART_PRUNE_PARAM* prune_par,
- uint32 num);
#ifndef DBUG_OFF
static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end);
@@ -4687,8 +4683,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
param->table->key_info[keynr].name, found_read_time,
read_time));
- if (read_time > found_read_time && found_records != HA_POS_ERROR
- /*|| read_time == DBL_MAX*/ )
+ if (read_time > found_read_time && found_records != HA_POS_ERROR)
{
read_time= found_read_time;
best_records= found_records;
@@ -7016,6 +7011,7 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree, bool update_tbl_stats)
DBUG_ENTER("check_quick_select");
param->is_ror_scan= FALSE;
+ param->first_null_comp= 0;
if (!tree)
DBUG_RETURN(HA_POS_ERROR); // Can't use it
@@ -7116,6 +7112,7 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree,
ha_rows records=0, tmp;
uint tmp_min_flag, tmp_max_flag, keynr, min_key_length, max_key_length;
char *tmp_min_key, *tmp_max_key;
+ uint8 save_first_null_comp= param->first_null_comp;
param->max_key_part=max(param->max_key_part,key_tree->part);
if (key_tree->left != &null_element)
@@ -7153,6 +7150,9 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree,
param->is_ror_scan= FALSE;
}
+ if (!param->first_null_comp && key_tree->is_null_interval())
+ param->first_null_comp= key_tree->part+1;
+
if (key_tree->next_key_part &&
key_tree->next_key_part->part == key_tree->part+1 &&
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE)
@@ -7196,7 +7196,8 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree,
(param->table->key_info[keynr].flags & (HA_NOSAME | HA_END_SPACE_KEY)) ==
HA_NOSAME &&
min_key_length == max_key_length &&
- !memcmp(param->min_key,param->max_key,min_key_length))
+ !memcmp(param->min_key,param->max_key,min_key_length) &&
+ !param->first_null_comp)
{
tmp=1; // Max one record
param->n_ranges++;
@@ -7271,6 +7272,7 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree,
return tmp;
records+=tmp;
}
+ param->first_null_comp= save_first_null_comp;
return records;
}
@@ -7988,7 +7990,7 @@ int QUICK_RANGE_SELECT::reset()
byte *mrange_buff;
DBUG_ENTER("QUICK_RANGE_SELECT::reset");
next=0;
- range= NULL;
+ last_range= NULL;
in_range= FALSE;
cur_range= (QUICK_RANGE**) ranges.buffer;
@@ -8122,23 +8124,23 @@ int QUICK_RANGE_SELECT::get_next()
{
start_key= &mrange_slot->start_key;
end_key= &mrange_slot->end_key;
- range= *(cur_range++);
+ last_range= *(cur_range++);
- start_key->key= (const byte*) range->min_key;
- start_key->length= range->min_length;
- start_key->flag= ((range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
- (range->flag & EQ_RANGE) ?
+ start_key->key= (const byte*) last_range->min_key;
+ start_key->length= last_range->min_length;
+ start_key->flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
+ (last_range->flag & EQ_RANGE) ?
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
- end_key->key= (const byte*) range->max_key;
- end_key->length= range->max_length;
+ end_key->key= (const byte*) last_range->max_key;
+ end_key->length= last_range->max_length;
/*
We use HA_READ_AFTER_KEY here because if we are reading on a key
prefix. We want to find all keys with this prefix.
*/
- end_key->flag= (range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
+ end_key->flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
HA_READ_AFTER_KEY);
- mrange_slot->range_flag= range->flag;
+ mrange_slot->range_flag= last_range->flag;
}
result= file->read_multi_range_first(&mrange, multi_range, count,
@@ -8194,7 +8196,7 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix)
{
int result;
key_range start_key, end_key;
- if (range)
+ if (last_range)
{
/* Read the next record in the same range with prefix after cur_prefix. */
DBUG_ASSERT(cur_prefix != 0);
@@ -8208,35 +8210,35 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix)
if (count == 0)
{
/* Ranges have already been used up before. None is left for read. */
- range= 0;
+ last_range= 0;
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
- range= *(cur_range++);
+ last_range= *(cur_range++);
- start_key.key= (const byte*) range->min_key;
- start_key.length= min(range->min_length, prefix_length);
- start_key.flag= ((range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
- (range->flag & EQ_RANGE) ?
+ start_key.key= (const byte*) last_range->min_key;
+ start_key.length= min(last_range->min_length, prefix_length);
+ start_key.flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
+ (last_range->flag & EQ_RANGE) ?
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
- end_key.key= (const byte*) range->max_key;
- end_key.length= min(range->max_length, prefix_length);
+ end_key.key= (const byte*) last_range->max_key;
+ end_key.length= min(last_range->max_length, prefix_length);
/*
We use READ_AFTER_KEY here because if we are reading on a key
prefix we want to find all keys with this prefix
*/
- end_key.flag= (range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
+ end_key.flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
HA_READ_AFTER_KEY);
- result= file->read_range_first(range->min_length ? &start_key : 0,
- range->max_length ? &end_key : 0,
- test(range->flag & EQ_RANGE),
+ result= file->read_range_first(last_range->min_length ? &start_key : 0,
+ last_range->max_length ? &end_key : 0,
+ test(last_range->flag & EQ_RANGE),
sorted);
- if (range->flag == (UNIQUE_RANGE | EQ_RANGE))
- range=0; // Stop searching
+ if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE))
+ last_range= 0; // Stop searching
if (result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
- range=0; // No matching rows; go to next range
+ last_range= 0; // No matching rows; go to next range
}
}
@@ -8250,11 +8252,11 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
for (;;)
{
int result;
- if (range)
+ if (last_range)
{
// Already read through key
- result= file->index_next_same(record, (byte*) range->min_key,
- range->min_length);
+ result= file->index_next_same(record, (byte*) last_range->min_key,
+ last_range->min_length);
if (result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
}
@@ -8263,18 +8265,18 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
if (count == 0)
{
/* Ranges have already been used up before. None is left for read. */
- range= 0;
+ last_range= 0;
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
- range= *(cur_range++);
+ last_range= *(cur_range++);
result= file->index_read(record,
- (byte*) range->min_key,
- range->min_length,
- (ha_rkey_function)(range->flag ^ GEOM_FLAG));
+ (byte*) last_range->min_key,
+ last_range->min_length,
+ (ha_rkey_function)(last_range->flag ^ GEOM_FLAG));
if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
- range=0; // Not found, to next range
+ last_range= 0; // Not found, to next range
}
}
@@ -8299,7 +8301,7 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
bool QUICK_RANGE_SELECT::row_in_ranges()
{
- QUICK_RANGE *range;
+ QUICK_RANGE *res;
uint min= 0;
uint max= ranges.elements - 1;
uint mid= (max + min)/2;
@@ -8315,8 +8317,8 @@ bool QUICK_RANGE_SELECT::row_in_ranges()
max= mid;
mid= (min + max) / 2;
}
- range= *(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid);
- return (!cmp_next(range) && !cmp_prev(range));
+ res= *(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid);
+ return (!cmp_next(res) && !cmp_prev(res));
}
/*
@@ -8330,14 +8332,14 @@ bool QUICK_RANGE_SELECT::row_in_ranges()
*/
QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q,
- uint used_key_parts)
+ uint used_key_parts_arg)
:QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
{
QUICK_RANGE *r;
QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer;
- QUICK_RANGE **last_range= pr + ranges.elements;
- for (; pr!=last_range; pr++)
+ QUICK_RANGE **end_range= pr + ranges.elements;
+ for (; pr!=end_range; pr++)
rev_ranges.push_front(*pr);
/* Remove EQ_RANGE flag for keys that are not using the full key */
@@ -8371,11 +8373,11 @@ int QUICK_SELECT_DESC::get_next()
for (;;)
{
int result;
- if (range)
+ if (last_range)
{ // Already read through key
- result = ((range->flag & EQ_RANGE)
- ? file->index_next_same(record, (byte*) range->min_key,
- range->min_length) :
+ result = ((last_range->flag & EQ_RANGE)
+ ? file->index_next_same(record, (byte*) last_range->min_key,
+ last_range->min_length) :
file->index_prev(record));
if (!result)
{
@@ -8386,47 +8388,49 @@ int QUICK_SELECT_DESC::get_next()
DBUG_RETURN(result);
}
- if (!(range=rev_it++))
+ if (!(last_range= rev_it++))
DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used
- if (range->flag & NO_MAX_RANGE) // Read last record
+ if (last_range->flag & NO_MAX_RANGE) // Read last record
{
int local_error;
if ((local_error=file->index_last(record)))
DBUG_RETURN(local_error); // Empty table
- if (cmp_prev(range) == 0)
+ if (cmp_prev(last_range) == 0)
DBUG_RETURN(0);
- range=0; // No matching records; go to next range
+ last_range= 0; // No match; go to next range
continue;
}
- if (range->flag & EQ_RANGE)
+ if (last_range->flag & EQ_RANGE)
{
- result = file->index_read(record, (byte*) range->max_key,
- range->max_length, HA_READ_KEY_EXACT);
+ result= file->index_read(record, (byte*) last_range->max_key,
+ last_range->max_length, HA_READ_KEY_EXACT);
}
else
{
- DBUG_ASSERT(range->flag & NEAR_MAX || range_reads_after_key(range));
- result=file->index_read(record, (byte*) range->max_key,
- range->max_length,
- ((range->flag & NEAR_MAX) ?
- HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV));
+ DBUG_ASSERT(last_range->flag & NEAR_MAX ||
+ range_reads_after_key(last_range));
+ result=file->index_read(record, (byte*) last_range->max_key,
+ last_range->max_length,
+ ((last_range->flag & NEAR_MAX) ?
+ HA_READ_BEFORE_KEY :
+ HA_READ_PREFIX_LAST_OR_PREV));
}
if (result)
{
if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
- range=0; // Not found, to next range
+ last_range= 0; // Not found, to next range
continue;
}
- if (cmp_prev(range) == 0)
+ if (cmp_prev(last_range) == 0)
{
- if (range->flag == (UNIQUE_RANGE | EQ_RANGE))
- range = 0; // Stop searching
+ if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE))
+ last_range= 0; // Stop searching
DBUG_RETURN(0); // Found key is in range
}
- range = 0; // To next range
+ last_range= 0; // To next range
}
}
@@ -10918,23 +10922,9 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
}
-static void print_rowid(byte* val, int len)
-{
- byte *pb;
- DBUG_LOCK_FILE;
- fputc('\"', DBUG_FILE);
- for (pb= val; pb!= val + len; ++pb)
- fprintf(DBUG_FILE, "%c", *pb);
- fprintf(DBUG_FILE, "\", hex: ");
-
- for (pb= val; pb!= val + len; ++pb)
- fprintf(DBUG_FILE, "%x ", *pb);
- fputc('\n', DBUG_FILE);
- DBUG_UNLOCK_FILE;
-}
-
void QUICK_RANGE_SELECT::dbug_dump(int indent, bool verbose)
{
+ /* purecov: begin inspected */
fprintf(DBUG_FILE, "%*squick range select, key %s, length: %d\n",
indent, "", head->key_info[index].name, max_used_key_length);
@@ -10942,8 +10932,8 @@ void QUICK_RANGE_SELECT::dbug_dump(int indent, bool verbose)
{
QUICK_RANGE *range;
QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer;
- QUICK_RANGE **last_range= pr + ranges.elements;
- for (; pr!=last_range; ++pr)
+ QUICK_RANGE **end_range= pr + ranges.elements;
+ for (; pr != end_range; ++pr)
{
fprintf(DBUG_FILE, "%*s", indent + 2, "");
range= *pr;
@@ -10968,6 +10958,7 @@ void QUICK_RANGE_SELECT::dbug_dump(int indent, bool verbose)
fputs("\n",DBUG_FILE);
}
}
+ /* purecov: end */
}
void QUICK_INDEX_MERGE_SELECT::dbug_dump(int indent, bool verbose)
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 525a0adcff7..d82e1dc459e 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -301,7 +301,7 @@ protected:
DYNAMIC_ARRAY ranges; /* ordered array of range ptrs */
QUICK_RANGE **cur_range; /* current element in ranges */
- QUICK_RANGE *range;
+ QUICK_RANGE *last_range;
KEY_PART *key_parts;
KEY_PART_INFO *key_part_info;
int cmp_next(QUICK_RANGE *range);
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 90a3ff66a22..5bd5ec4b42d 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -97,9 +97,9 @@ static ulonglong get_exact_record_count(TABLE_LIST *tables)
GROUP BY part.
RETURN VALUES
- 0 No errors
- 1 if all items were resolved
- -1 on impossible conditions
+ 0 no errors
+ 1 if all items were resolved
+ HA_ERR_KEY_NOT_FOUND on impossible conditions
OR an error number from my_base.h HA_ERR_... if a deadlock or a lock
wait timeout happens, for example
*/
@@ -267,7 +267,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
if (error)
{
if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE)
- return -1; // No rows matching WHERE
+ return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE
/* HA_ERR_LOCK_DEADLOCK or some other error */
table->file->print_error(error, MYF(0));
return(error);
@@ -354,7 +354,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
if (error)
{
if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE)
- return -1; // No rows matching WHERE
+ return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE
/* HA_ERR_LOCK_DEADLOCK or some other error */
table->file->print_error(error, MYF(0));
return(error);
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 76630e8530b..a7f9bd413c6 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -920,7 +920,6 @@ bool partition_info::set_up_charset_field_preps()
if (field_is_partition_charset(field))
{
char *field_buf;
- CHARSET_INFO *cs= ((Field_str*)field)->charset();
size= field->pack_length();
if (!(field_buf= sql_calloc(size)))
goto error;
diff --git a/sql/password.c b/sql/password.c
index 0e4bd6347e2..1ff67888ea4 100644
--- a/sql/password.c
+++ b/sql/password.c
@@ -472,7 +472,7 @@ scramble(char *to, const char *message, const char *password)
*/
my_bool
-check_scramble(const char *scramble, const char *message,
+check_scramble(const char *scramble_arg, const char *message,
const uint8 *hash_stage2)
{
SHA1_CONTEXT sha1_context;
@@ -485,7 +485,7 @@ check_scramble(const char *scramble, const char *message,
mysql_sha1_input(&sha1_context, hash_stage2, SHA1_HASH_SIZE);
mysql_sha1_result(&sha1_context, buf);
/* encrypt scramble */
- my_crypt((char *) buf, buf, (const uchar *) scramble, SCRAMBLE_LENGTH);
+ my_crypt((char *) buf, buf, (const uchar *) scramble_arg, SCRAMBLE_LENGTH);
/* now buf supposedly contains hash_stage1: so we can get hash_stage2 */
mysql_sha1_reset(&sha1_context);
mysql_sha1_input(&sha1_context, buf, SHA1_HASH_SIZE);
@@ -495,7 +495,8 @@ check_scramble(const char *scramble, const char *message,
/*
- Convert scrambled password from asciiz hex string to binary form.
+ Convert scrambled password from asciiz hex string to binary form.
+
SYNOPSIS
get_salt_from_password()
res OUT buf to hold password. Must be at least SHA1_HASH_SIZE
diff --git a/sql/protocol.cc b/sql/protocol.cc
index da46405dea2..05e98c68e4e 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -27,8 +27,10 @@
#include <stdarg.h>
static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024;
-static void write_eof_packet(THD *thd, NET *net);
void net_send_error_packet(THD *thd, uint sql_errno, const char *err);
+#ifndef EMBEDDED_LIBRARY
+static void write_eof_packet(THD *thd, NET *net);
+#endif
#ifndef EMBEDDED_LIBRARY
bool Protocol::net_store_data(const char *from, uint length)
@@ -935,15 +937,15 @@ bool Protocol_simple::store(Field *field)
char buff[MAX_FIELD_WIDTH];
String str(buff,sizeof(buff), &my_charset_bin);
CHARSET_INFO *tocs= this->thd->variables.character_set_results;
+#ifndef DBUG_OFF
TABLE *table= field->table;
-#ifdef DBUG_OFF
my_bitmap_map *old_map= 0;
if (table->file)
old_map= dbug_tmp_use_all_columns(table, table->read_set);
#endif
field->val_str(&str);
-#ifdef DBUG_OFF
+#ifndef DBUG_OFF
if (old_map)
dbug_tmp_restore_column_map(table->read_set, old_map);
#endif
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 1b272361a19..16b00cab516 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -530,11 +530,11 @@ HOSTS";
while ((row= mysql_fetch_row(res)))
{
- uint32 server_id;
+ uint32 log_server_id;
SLAVE_INFO* si, *old_si;
- server_id = atoi(row[0]);
+ log_server_id = atoi(row[0]);
if ((old_si= (SLAVE_INFO*)hash_search(&slave_list,
- (byte*)&server_id,4)))
+ (byte*)&log_server_id,4)))
si = old_si;
else
{
@@ -544,7 +544,7 @@ HOSTS";
pthread_mutex_unlock(&LOCK_slave_list);
goto err;
}
- si->server_id = server_id;
+ si->server_id = log_server_id;
my_hash_insert(&slave_list, (byte*)si);
}
strmake(si->host, row[1], sizeof(si->host)-1);
@@ -918,14 +918,14 @@ bool load_master_data(THD* thd)
setting active_mi, because init_master_info() sets active_mi with
defaults.
*/
- int error;
+ int error_2;
if (init_master_info(active_mi, master_info_file, relay_log_info_file,
0, (SLAVE_IO | SLAVE_SQL)))
my_message(ER_MASTER_INFO, ER(ER_MASTER_INFO), MYF(0));
strmake(active_mi->master_log_name, row[0],
sizeof(active_mi->master_log_name));
- active_mi->master_log_pos= my_strtoll10(row[1], (char**) 0, &error);
+ active_mi->master_log_pos= my_strtoll10(row[1], (char**) 0, &error_2);
/* at least in recent versions, the condition below should be false */
if (active_mi->master_log_pos < BIN_LOG_HEADER_SIZE)
active_mi->master_log_pos = BIN_LOG_HEADER_SIZE;
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 7425e7c352d..55fbeac5622 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -116,9 +116,6 @@ TYPELIB delay_key_write_typelib=
delay_key_write_type_names, NULL
};
-static int sys_check_charset(THD *thd, set_var *var);
-static bool sys_update_charset(THD *thd, set_var *var);
-static void sys_set_default_charset(THD *thd, enum_var_type type);
static int sys_check_ftb_syntax(THD *thd, set_var *var);
static bool sys_update_ftb_syntax(THD *thd, set_var * var);
static void sys_default_ftb_syntax(THD *thd, enum_var_type type);
@@ -1414,9 +1411,9 @@ static void fix_server_id(THD *thd, enum_var_type type)
sys_var_long_ptr::
-sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
+sys_var_long_ptr(const char *name_arg, ulong *value_ptr_arg,
sys_after_update_func after_update_arg)
- :sys_var_long_ptr_global(name_arg, value_ptr,
+ :sys_var_long_ptr_global(name_arg, value_ptr_arg,
&LOCK_global_system_variables, after_update_arg)
{}
@@ -1766,7 +1763,7 @@ Item *sys_var::item(THD *thd, enum_var_type var_type, LEX_STRING *base)
/* As there was no local variable, return the global value */
var_type= OPT_GLOBAL;
}
- switch (type()) {
+ switch (show_type()) {
case SHOW_INT:
{
uint value;
@@ -3999,8 +3996,6 @@ sys_var_event_scheduler::update(THD *thd, set_var *var)
DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value));
- Item_result var_type= var->value->result_type();
-
if (var->save_result.ulong_value == Events::EVENTS_ON)
res= Events::get_instance()->start_execution_of_events();
else if (var->save_result.ulong_value == Events::EVENTS_OFF)
diff --git a/sql/set_var.h b/sql/set_var.h
index 69f759a91bb..338ec5513b0 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -64,8 +64,8 @@ public:
bool check_enum(THD *thd, set_var *var, TYPELIB *enum_names);
bool check_set(THD *thd, set_var *var, TYPELIB *enum_names);
virtual bool update(THD *thd, set_var *var)=0;
- virtual void set_default(THD *thd, enum_var_type type) {}
- virtual SHOW_TYPE type() { return SHOW_UNDEF; }
+ virtual void set_default(THD *thd_arg, enum_var_type type) {}
+ virtual SHOW_TYPE show_type() { return SHOW_UNDEF; }
virtual byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
{ return 0; }
virtual bool check_type(enum_var_type type)
@@ -105,14 +105,16 @@ class sys_var_long_ptr_global: public sys_var_global
{
public:
ulong *value;
- sys_var_long_ptr_global(const char *name_arg, ulong *value_ptr,
+ sys_var_long_ptr_global(const char *name_arg, ulong *value_ptr_arg,
pthread_mutex_t *guard_arg,
sys_after_update_func after_update_arg= NULL)
- :sys_var_global(name_arg, after_update_arg, guard_arg), value(value_ptr) {}
+ :sys_var_global(name_arg, after_update_arg, guard_arg),
+ value(value_ptr_arg)
+ {}
bool check(THD *thd, set_var *var);
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_LONG; }
+ SHOW_TYPE show_type() { return SHOW_LONG; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
{ return (byte*) value; }
};
@@ -134,14 +136,14 @@ class sys_var_ulonglong_ptr :public sys_var
{
public:
ulonglong *value;
- sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr)
- :sys_var(name_arg),value(value_ptr) {}
- sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr,
+ sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr_arg)
+ :sys_var(name_arg),value(value_ptr_arg) {}
+ sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr_arg,
sys_after_update_func func)
- :sys_var(name_arg,func), value(value_ptr) {}
+ :sys_var(name_arg,func), value(value_ptr_arg) {}
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_LONGLONG; }
+ SHOW_TYPE show_type() { return SHOW_LONGLONG; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
{ return (byte*) value; }
};
@@ -160,7 +162,7 @@ public:
}
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_MY_BOOL; }
+ SHOW_TYPE show_type() { return SHOW_MY_BOOL; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
{ return (byte*) value; }
bool check_update_type(Item_result type) { return 0; }
@@ -192,7 +194,7 @@ public:
{
(*set_default_func)(thd, type);
}
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
{ return (byte*) value; }
bool check_update_type(Item_result type)
@@ -218,7 +220,7 @@ public:
{
return 1;
}
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
{
return (byte*) value;
@@ -247,7 +249,7 @@ public:
{
return 1;
}
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
{
return (byte*) *value;
@@ -275,7 +277,7 @@ public:
return check_enum(thd, var, enum_names);
}
bool update(THD *thd, set_var *var);
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
bool check_update_type(Item_result type) { return 0; }
};
@@ -310,7 +312,7 @@ public:
bool check(THD *thd, set_var *var);
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_LONG; }
+ SHOW_TYPE show_type() { return SHOW_LONG; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
@@ -328,7 +330,7 @@ public:
{}
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_HA_ROWS; }
+ SHOW_TYPE show_type() { return SHOW_HA_ROWS; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
@@ -348,7 +350,7 @@ public:
{}
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_LONGLONG; }
+ SHOW_TYPE show_type() { return SHOW_LONGLONG; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
bool check_default(enum_var_type type)
{
@@ -374,7 +376,7 @@ public:
{}
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_MY_BOOL; }
+ SHOW_TYPE show_type() { return SHOW_MY_BOOL; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
bool check(THD *thd, set_var *var)
{
@@ -417,7 +419,7 @@ public:
}
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
bool check_update_type(Item_result type) { return 0; }
};
@@ -452,7 +454,7 @@ public:
:sys_var_thd(name_arg), offset(offset_arg)
{}
bool check(THD *thd, set_var *var);
-SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
bool check_update_type(Item_result type)
{
return type != STRING_RESULT; /* Only accept strings */
@@ -490,7 +492,7 @@ public:
bool update(THD *thd, set_var *var);
bool check_update_type(Item_result type) { return 0; }
bool check_type(enum_var_type type) { return type == OPT_GLOBAL; }
- SHOW_TYPE type() { return SHOW_MY_BOOL; }
+ SHOW_TYPE show_type() { return SHOW_MY_BOOL; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
@@ -500,7 +502,7 @@ public:
sys_var_thd_dbug(const char *name_arg) :sys_var_thd(name_arg) {}
bool check_update_type(Item_result type) { return type != STRING_RESULT; }
bool check(THD *thd, set_var *var);
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type) { DBUG_POP(); }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *b);
@@ -518,7 +520,7 @@ public:
void set_default(THD *thd, enum_var_type type);
bool check_type(enum_var_type type) { return type == OPT_GLOBAL; }
bool check_default(enum_var_type type) { return 0; }
- SHOW_TYPE type() { return SHOW_LONG; }
+ SHOW_TYPE show_type() { return SHOW_LONG; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
@@ -529,7 +531,7 @@ public:
sys_var_last_insert_id(const char *name_arg) :sys_var(name_arg) {}
bool update(THD *thd, set_var *var);
bool check_type(enum_var_type type) { return type == OPT_GLOBAL; }
- SHOW_TYPE type() { return SHOW_LONGLONG; }
+ SHOW_TYPE show_type() { return SHOW_LONGLONG; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
@@ -540,7 +542,7 @@ public:
sys_var_insert_id(const char *name_arg) :sys_var(name_arg) {}
bool update(THD *thd, set_var *var);
bool check_type(enum_var_type type) { return type == OPT_GLOBAL; }
- SHOW_TYPE type() { return SHOW_LONGLONG; }
+ SHOW_TYPE show_type() { return SHOW_LONGLONG; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
@@ -555,15 +557,15 @@ public:
bool check_type(enum_var_type type) { return type != OPT_GLOBAL; }
/*
We can't retrieve the value of this, so we don't have to define
- type() or value_ptr()
+ show_type() or value_ptr()
*/
};
class sys_var_sync_binlog_period :public sys_var_long_ptr
{
public:
- sys_var_sync_binlog_period(const char *name_arg, ulong *value_ptr)
- :sys_var_long_ptr(name_arg,value_ptr) {}
+ sys_var_sync_binlog_period(const char *name_arg, ulong *value_ptr_arg)
+ :sys_var_long_ptr(name_arg,value_ptr_arg) {}
bool update(THD *thd, set_var *var);
};
#endif
@@ -593,7 +595,7 @@ public:
no_support_one_shot= 0;
}
bool check(THD *thd, set_var *var);
-SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
bool check_update_type(Item_result type)
{
return ((type != STRING_RESULT) && (type != INT_RESULT));
@@ -617,7 +619,7 @@ public:
no_support_one_shot= 0;
}
bool check(THD *thd, set_var *var);
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
bool check_update_type(Item_result type)
{
return ((type != STRING_RESULT) && (type != INT_RESULT));
@@ -733,7 +735,7 @@ public:
:sys_var_key_cache_param(name_arg, offsetof(KEY_CACHE, param_buff_size))
{}
bool update(THD *thd, set_var *var);
- SHOW_TYPE type() { return SHOW_LONGLONG; }
+ SHOW_TYPE show_type() { return SHOW_LONGLONG; }
};
@@ -744,7 +746,7 @@ public:
:sys_var_key_cache_param(name_arg, offset_arg)
{}
bool update(THD *thd, set_var *var);
- SHOW_TYPE type() { return SHOW_LONG; }
+ SHOW_TYPE show_type() { return SHOW_LONG; }
};
@@ -759,7 +761,7 @@ public:
:sys_var_thd(name_arg), offset(offset_arg),
date_time_type(date_time_type_arg)
{}
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
bool check_update_type(Item_result type)
{
return type != STRING_RESULT; /* Only accept strings */
@@ -801,7 +803,7 @@ public:
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
bool check_update_type(Item_result type) { return 0; }
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
};
@@ -811,13 +813,13 @@ class sys_var_readonly: public sys_var
{
public:
enum_var_type var_type;
- SHOW_TYPE show_type;
+ SHOW_TYPE show_type_value;
sys_value_ptr_func value_ptr_func;
sys_var_readonly(const char *name_arg, enum_var_type type,
SHOW_TYPE show_type_arg,
sys_value_ptr_func value_ptr_func_arg)
:sys_var(name_arg), var_type(type),
- show_type(show_type_arg), value_ptr_func(value_ptr_func_arg)
+ show_type_value(show_type_arg), value_ptr_func(value_ptr_func_arg)
{}
bool update(THD *thd, set_var *var) { return 1; }
bool check_default(enum_var_type type) { return 1; }
@@ -827,7 +829,7 @@ public:
{
return (*value_ptr_func)(thd);
}
- SHOW_TYPE type() { return show_type; }
+ SHOW_TYPE show_type() { return show_type_value; }
bool is_readonly() const { return 1; }
};
@@ -850,7 +852,7 @@ public:
bool check_default(enum_var_type type) { return 1; }
bool check_type(enum_var_type type) { return type != OPT_GLOBAL; }
bool check_update_type(Item_result type) { return 1; }
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
bool is_readonly() const { return 1; }
};
@@ -864,7 +866,7 @@ public:
no_support_one_shot= 0;
}
bool check(THD *thd, set_var *var);
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
bool check_update_type(Item_result type)
{
return type != STRING_RESULT; /* Only accept strings */
@@ -888,7 +890,7 @@ public:
return type != OPT_GLOBAL || !option_limits;
}
void set_default(THD *thd, enum_var_type type);
- SHOW_TYPE type() { return SHOW_INT; }
+ SHOW_TYPE show_type() { return SHOW_INT; }
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
@@ -929,7 +931,7 @@ public:
#endif
}
bool check(THD *thd, set_var *var);
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
bool check_update_type(Item_result type)
{
return ((type != STRING_RESULT) && (type != INT_RESULT));
@@ -949,7 +951,7 @@ public:
sys_var_long_ptr(name_arg, NULL, NULL) {};
bool update(THD *thd, set_var *var);
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
- SHOW_TYPE type() { return SHOW_CHAR; }
+ SHOW_TYPE show_type() { return SHOW_CHAR; }
bool check(THD *thd, set_var *var);
bool check_update_type(Item_result type)
{
@@ -1008,8 +1010,8 @@ public:
} save_result;
LEX_STRING base; /* for structs */
- set_var(enum_var_type type_arg, sys_var *var_arg, const LEX_STRING *base_name_arg,
- Item *value_arg)
+ set_var(enum_var_type type_arg, sys_var *var_arg,
+ const LEX_STRING *base_name_arg, Item *value_arg)
:var(var_arg), type(type_arg), base(*base_name_arg)
{
/*
diff --git a/sql/slave.cc b/sql/slave.cc
index 4c471df71f8..6f3c5926023 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -73,6 +73,7 @@ static int request_table_dump(MYSQL* mysql, const char* db, const char* table);
static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
const char* table_name, bool overwrite);
static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi);
+static Log_event* next_event(RELAY_LOG_INFO* rli);
/*
Find out which replications threads are running
@@ -1326,12 +1327,12 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
if ((mi->slave_running == MYSQL_SLAVE_RUN_CONNECT) &&
mi->rli.slave_running)
{
- long tmp= (long)((time_t)time((time_t*) 0)
- - mi->rli.last_master_timestamp)
- - mi->clock_diff_with_master;
+ long time_diff= ((long)((time_t)time((time_t*) 0)
+ - mi->rli.last_master_timestamp)
+ - mi->clock_diff_with_master);
/*
- Apparently on some systems tmp can be <0. Here are possible reasons
- related to MySQL:
+ Apparently on some systems time_diff can be <0. Here are possible
+ reasons related to MySQL:
- the master is itself a slave of another master whose time is ahead.
- somebody used an explicit SET TIMESTAMP on the master.
Possible reason related to granularity-to-second of time functions
@@ -1349,8 +1350,8 @@ bool show_master_info(THD* thd, MASTER_INFO* mi)
last_master_timestamp == 0 (an "impossible" timestamp 1970) is a
special marker to say "consider we have caught up".
*/
- protocol->store((longlong)(mi->rli.last_master_timestamp ? max(0, tmp)
- : 0));
+ protocol->store((longlong)(mi->rli.last_master_timestamp ?
+ max(0, time_diff) : 0));
}
else
protocol->store_null();
@@ -2051,15 +2052,16 @@ after reconnect");
while (!io_slave_killed(thd,mi))
{
- bool suppress_warnings= 0;
+ ulong event_len;
+ suppress_warnings= 0;
/*
We say "waiting" because read_event() will wait if there's nothing to
read. But if there's something to read, it will not wait. The
important thing is to not confuse users by saying "reading" whereas
we're in fact receiving nothing.
*/
- thd->proc_info = "Waiting for master to send event";
- ulong event_len = read_event(mysql, mi, &suppress_warnings);
+ thd->proc_info= "Waiting for master to send event";
+ event_len= read_event(mysql, mi, &suppress_warnings);
if (io_slave_killed(thd,mi))
{
if (global_system_variables.log_warnings)
@@ -2860,6 +2862,8 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
pthread_mutex_t *log_lock= rli->relay_log.get_log_lock();
DBUG_ENTER("queue_event");
+ LINT_INIT(inc_pos);
+
if (mi->rli.relay_log.description_event_for_queue->binlog_version<4 &&
buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT /* a way to escape */)
DBUG_RETURN(queue_old_event(mi,buf,event_len));
@@ -3001,7 +3005,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len)
err:
pthread_mutex_unlock(&mi->data_lock);
- DBUG_PRINT("info", ("error=%d", error));
+ DBUG_PRINT("info", ("error: %d", error));
DBUG_RETURN(error);
}
diff --git a/sql/slave.h b/sql/slave.h
index 43eb71be601..bc039f6eb75 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -111,8 +111,6 @@ extern ulonglong relay_log_space_limit;
#define MYSQL_SLAVE_RUN_NOT_CONNECT 1
#define MYSQL_SLAVE_RUN_CONNECT 2
-static Log_event* next_event(RELAY_LOG_INFO* rli);
-
#define RPL_LOG_NAME (rli->group_master_log_name[0] ? rli->group_master_log_name :\
"FIRST")
#define IO_RPL_LOG_NAME (mi->master_log_name[0] ? mi->master_log_name :\
diff --git a/sql/sp.cc b/sql/sp.cc
index 14703e3aa42..3a7bea6a4b1 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -494,8 +494,6 @@ db_create_routine(THD *thd, int type, sp_head *sp)
int ret;
TABLE *table;
char definer[USER_HOST_BUFF_SIZE];
- char old_db_buf[NAME_LEN+1];
- LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
DBUG_ENTER("db_create_routine");
DBUG_PRINT("enter", ("type: %d name: %.*s",type,sp->m_name.length,
sp->m_name.str));
@@ -990,7 +988,7 @@ sp_find_routine(THD *thd, int type, sp_name *name, sp_cache **cp,
DBUG_PRINT("info", ("found: 0x%lx", (ulong)sp));
if (sp->m_first_free_instance)
{
- DBUG_PRINT("info", ("first free: 0x%lx, level: %lu, flags %x",
+ DBUG_PRINT("info", ("first free: 0x%lx level: %lu flags %x",
(ulong)sp->m_first_free_instance,
sp->m_first_free_instance->m_recursion_level,
sp->m_first_free_instance->m_flags));
@@ -1839,9 +1837,7 @@ create_string(THD *thd, String *buf,
SYNOPSIS
sp_use_new_db()
thd thread handle
-
new_db new database name (a string and its length)
-
old_db [IN] str points to a buffer where to store the old
database, length contains the size of the buffer
[OUT] if old db was not NULL, its name is copied
@@ -1849,7 +1845,6 @@ create_string(THD *thd, String *buf,
accordingly. Otherwise str[0] is set to '\0' and length
is set to 0. The out parameter should be used only if
the database name has been changed (see dbchangedp).
-
dbchangedp [OUT] is set to TRUE if the current database is changed,
FALSE otherwise. A database is not changed if the old
name is the same as the new one, both names are empty,
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 7c110185a95..b77d0cc9a0c 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -94,8 +94,6 @@ sp_map_item_type(enum enum_field_types type)
static String *
sp_get_item_value(THD *thd, Item *item, String *str)
{
- Item_result result_type= item->result_type();
-
switch (item->result_type()) {
case REAL_RESULT:
case INT_RESULT:
@@ -1369,7 +1367,6 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
MEM_ROOT call_mem_root;
Query_arena call_arena(&call_mem_root, Query_arena::INITIALIZED_FOR_SP);
Query_arena backup_arena;
-
DBUG_ENTER("sp_head::execute_function");
DBUG_PRINT("info", ("function %s", m_name.str));
@@ -1777,7 +1774,7 @@ sp_head::reset_lex(THD *thd)
DBUG_ENTER("sp_head::reset_lex");
LEX *sublex;
LEX *oldlex= thd->lex;
- my_lex_states state= oldlex->next_state; // Keep original next_state
+ my_lex_states org_next_state= oldlex->next_state;
(void)m_lex.push_front(oldlex);
thd->lex= sublex= new st_lex;
@@ -1786,10 +1783,10 @@ sp_head::reset_lex(THD *thd)
lex_start(thd, oldlex->buf, (ulong) (oldlex->end_of_query - oldlex->ptr));
/*
- * next_state is normally the same (0), but it happens that we swap lex in
- * "mid-sentence", so we must restore it.
+ next_state is normally the same (0), but it happens that we swap lex in
+ "mid-sentence", so we must restore it.
*/
- sublex->next_state= state;
+ sublex->next_state= org_next_state;
/* We must reset ptr and end_of_query again */
sublex->ptr= oldlex->ptr;
sublex->end_of_query= oldlex->end_of_query;
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 385d90bb5b8..be6eefa2ea4 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -653,9 +653,9 @@ class sp_instr_set : public sp_instr
public:
sp_instr_set(uint ip, sp_pcontext *ctx,
- uint offset, Item *val, enum enum_field_types type,
+ uint offset, Item *val, enum enum_field_types type_arg,
LEX *lex, bool lex_resp)
- : sp_instr(ip, ctx), m_offset(offset), m_value(val), m_type(type),
+ : sp_instr(ip, ctx), m_offset(offset), m_value(val), m_type(type_arg),
m_lex_keeper(lex, lex_resp)
{}
@@ -853,8 +853,9 @@ class sp_instr_freturn : public sp_instr
public:
sp_instr_freturn(uint ip, sp_pcontext *ctx,
- Item *val, enum enum_field_types type, LEX *lex)
- : sp_instr(ip, ctx), m_value(val), m_type(type), m_lex_keeper(lex, TRUE)
+ Item *val, enum enum_field_types type_arg, LEX *lex)
+ : sp_instr(ip, ctx), m_value(val), m_type(type_arg),
+ m_lex_keeper(lex, TRUE)
{}
virtual ~sp_instr_freturn()
diff --git a/sql/spatial.cc b/sql/spatial.cc
index 22df69a978b..6cadb0f3aad 100644
--- a/sql/spatial.cc
+++ b/sql/spatial.cc
@@ -23,13 +23,13 @@
String Geometry::bad_geometry_data("Bad object", &my_charset_bin);
-Geometry::Class_info *Geometry::ci_collection[Geometry::wkb_end+1]=
+Geometry::Class_info *Geometry::ci_collection[Geometry::wkb_last+1]=
{
NULL, NULL, NULL, NULL, NULL, NULL, NULL
};
static Geometry::Class_info **ci_collection_end=
- Geometry::ci_collection+Geometry::wkb_end + 1;
+ Geometry::ci_collection+Geometry::wkb_last + 1;
Geometry::Class_info::Class_info(const char *name, int type_id,
void(*create_func)(void *)):
@@ -552,7 +552,7 @@ bool Gis_line_string::get_mbr(MBR *mbr, const char **end) const
}
-int Gis_line_string::length(double *len) const
+int Gis_line_string::geom_length(double *len) const
{
uint32 n_points;
double prev_x, prev_y;
@@ -928,6 +928,8 @@ int Gis_polygon::centroid_xy(double *x, double *y) const
n_linear_rings= uint4korr(data);
data+= 4;
+ DBUG_ASSERT(n_linear_rings > 0);
+
while (n_linear_rings--)
{
uint32 n_points, org_n_points;
@@ -947,14 +949,14 @@ int Gis_polygon::centroid_xy(double *x, double *y) const
while (--n_points) // One point is already read
{
- double x, y;
- get_point(&x, &y, data);
+ double tmp_x, tmp_y;
+ get_point(&tmp_x, &tmp_y, data);
data+= (SIZEOF_STORED_DOUBLE*2);
- cur_area+= (prev_x + x) * (prev_y - y);
- cur_cx+= x;
- cur_cy+= y;
- prev_x= x;
- prev_y= y;
+ cur_area+= (prev_x + tmp_x) * (prev_y - tmp_y);
+ cur_cx+= tmp_x;
+ cur_cy+= tmp_y;
+ prev_x= tmp_x;
+ prev_y= tmp_y;
}
cur_area= fabs(cur_area) / 2;
cur_cx= cur_cx / (org_n_points - 1);
@@ -1298,7 +1300,7 @@ int Gis_multi_line_string::geometry_n(uint32 num, String *result) const
}
-int Gis_multi_line_string::length(double *len) const
+int Gis_multi_line_string::geom_length(double *len) const
{
uint32 n_line_strings;
const char *data= m_data;
@@ -1315,7 +1317,7 @@ int Gis_multi_line_string::length(double *len) const
Gis_line_string ls;
data+= WKB_HEADER_SIZE;
ls.set_data_ptr(data, (uint32) (m_data_end - data));
- if (ls.length(&ls_len))
+ if (ls.geom_length(&ls_len))
return 1;
*len+= ls_len;
/*
diff --git a/sql/spatial.h b/sql/spatial.h
index afce9b2d98f..f0c8b7bba28 100644
--- a/sql/spatial.h
+++ b/sql/spatial.h
@@ -188,7 +188,7 @@ public:
wkb_multilinestring= 5,
wkb_multipolygon= 6,
wkb_geometrycollection= 7,
- wkb_end=7
+ wkb_last=7
};
enum wkbByteOrder
{
@@ -217,7 +217,7 @@ public:
virtual bool dimension(uint32 *dim, const char **end) const=0;
virtual int get_x(double *x) const { return -1; }
virtual int get_y(double *y) const { return -1; }
- virtual int length(double *len) const { return -1; }
+ virtual int geom_length(double *len) const { return -1; }
virtual int area(double *ar, const char **end) const { return -1;}
virtual int is_closed(int *closed) const { return -1; }
virtual int num_interior_ring(uint32 *n_int_rings) const { return -1; }
@@ -273,12 +273,12 @@ public:
}
bool envelope(String *result) const;
- static Class_info *ci_collection[wkb_end+1];
+ static Class_info *ci_collection[wkb_last+1];
protected:
static Class_info *find_class(int type_id)
{
- return ((type_id < wkb_point) || (type_id > wkb_end)) ?
+ return ((type_id < wkb_point) || (type_id > wkb_last)) ?
NULL : ci_collection[type_id];
}
static Class_info *find_class(const char *name, uint32 len);
@@ -359,7 +359,7 @@ public:
uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res);
bool get_data_as_wkt(String *txt, const char **end) const;
bool get_mbr(MBR *mbr, const char **end) const;
- int length(double *len) const;
+ int geom_length(double *len) const;
int is_closed(int *closed) const;
int num_points(uint32 *n_points) const;
int start_point(String *point) const;
@@ -441,7 +441,7 @@ public:
bool get_mbr(MBR *mbr, const char **end) const;
int num_geometries(uint32 *num) const;
int geometry_n(uint32 num, String *result) const;
- int length(double *len) const;
+ int geom_length(double *len) const;
int is_closed(int *closed) const;
bool dimension(uint32 *dim, const char **end) const
{
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index c03910a7829..fd2e8445132 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -3764,7 +3764,7 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name,
{
List_iterator_fast<Natural_join_column>
field_it(*(table_ref->join_columns));
- Natural_join_column *nj_col;
+ Natural_join_column *nj_col, *curr_nj_col;
Field *found_field;
Query_arena *arena, backup;
DBUG_ENTER("find_field_in_natural_join");
@@ -3776,14 +3776,21 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name,
LINT_INIT(arena);
LINT_INIT(found_field);
- for (;;)
+ for (nj_col= NULL, curr_nj_col= field_it++; curr_nj_col;
+ curr_nj_col= field_it++)
{
- if (!(nj_col= field_it++))
- DBUG_RETURN(NULL);
-
- if (!my_strcasecmp(system_charset_info, nj_col->name(), name))
- break;
+ if (!my_strcasecmp(system_charset_info, curr_nj_col->name(), name))
+ {
+ if (nj_col)
+ {
+ my_error(ER_NON_UNIQ_ERROR, MYF(0), name, thd->where);
+ DBUG_RETURN(NULL);
+ }
+ nj_col= curr_nj_col;
+ }
}
+ if (!nj_col)
+ DBUG_RETURN(NULL);
if (nj_col->view_field)
{
@@ -4684,9 +4691,16 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
{
bool found= FALSE;
const char *field_name_1;
+ /* true if field_name_1 is a member of using_fields */
+ bool is_using_column_1;
if (!(nj_col_1= it_1.get_or_create_column_ref(leaf_1)))
goto err;
field_name_1= nj_col_1->name();
+ is_using_column_1= using_fields &&
+ test_if_string_in_list(field_name_1, using_fields);
+ DBUG_PRINT ("info", ("field_name_1=%s.%s",
+ nj_col_1->table_name() ? nj_col_1->table_name() : "",
+ field_name_1));
/*
Find a field with the same name in table_ref_2.
@@ -4703,6 +4717,10 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
if (!(cur_nj_col_2= it_2.get_or_create_column_ref(leaf_2)))
goto err;
cur_field_name_2= cur_nj_col_2->name();
+ DBUG_PRINT ("info", ("cur_field_name_2=%s.%s",
+ cur_nj_col_2->table_name() ?
+ cur_nj_col_2->table_name() : "",
+ cur_field_name_2));
/*
Compare the two columns and check for duplicate common fields.
@@ -4710,10 +4728,16 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
table_ref_2 (then found == TRUE), or if a field in table_ref_2
was already matched by some previous field in table_ref_1
(then cur_nj_col_2->is_common == TRUE).
+ Note that it is too early to check the columns outside of the
+ USING list for ambiguity because they are not actually "referenced"
+ here. These columns must be checked only on unqualified reference
+ by name (e.g. in SELECT list).
*/
if (!my_strcasecmp(system_charset_info, field_name_1, cur_field_name_2))
{
- if (found || cur_nj_col_2->is_common)
+ DBUG_PRINT ("info", ("match c1.is_common=%d", nj_col_1->is_common));
+ if (cur_nj_col_2->is_common ||
+ (found && (!using_fields || is_using_column_1)))
{
my_error(ER_NON_UNIQ_ERROR, MYF(0), field_name_1, thd->where);
goto err;
@@ -4739,9 +4763,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
clause (if present), mark them as common fields, and add a new
equi-join condition to the ON clause.
*/
- if (nj_col_2 &&
- (!using_fields ||
- test_if_string_in_list(field_name_1, using_fields)))
+ if (nj_col_2 && (!using_fields ||is_using_column_1))
{
Item *item_1= nj_col_1->create_item(thd);
Item *item_2= nj_col_2->create_item(thd);
@@ -4796,6 +4818,13 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
eq_cond);
nj_col_1->is_common= nj_col_2->is_common= TRUE;
+ DBUG_PRINT ("info", ("%s.%s and %s.%s are common",
+ nj_col_1->table_name() ?
+ nj_col_1->table_name() : "",
+ nj_col_1->name(),
+ nj_col_2->table_name() ?
+ nj_col_2->table_name() : "",
+ nj_col_2->name()));
if (field_1)
{
@@ -5462,7 +5491,12 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
get_key_map_from_key_list(&map, table, table_list->use_index);
if (map.is_set_all())
DBUG_RETURN(1);
- table->keys_in_use_for_query=map;
+ /*
+ Don't introduce keys in keys_in_use_for_query that weren't there
+ before. FORCE/USE INDEX should not add keys, it should only remove
+ all keys except the key(s) specified in the hint.
+ */
+ table->keys_in_use_for_query.intersect(map);
}
if (table_list->ignore_index)
{
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index 0fbc06ce919..bc00f7ea629 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -127,7 +127,7 @@ struct Query_cache_query
inline void tables_type(uint8 type) { tbls_type= type; }
inline ulong length() { return len; }
inline ulong add(ulong packet_len) { return(len+= packet_len); }
- inline void length(ulong length) { len= length; }
+ inline void length(ulong length_arg) { len= length_arg; }
inline gptr query()
{
return (gptr)(((byte*)this)+
@@ -155,7 +155,7 @@ struct Query_cache_table
inline char *db() { return (char *) data(); }
inline char *table() { return tbl; }
- inline void table(char *table) { tbl= table; }
+ inline void table(char *table_arg) { tbl= table_arg; }
inline uint32 key_length() { return key_len; }
inline void key_length(uint32 len) { key_len= len; }
inline uint8 type() { return table_type; }
@@ -163,7 +163,7 @@ struct Query_cache_table
inline qc_engine_callback callback() { return callback_func; }
inline void callback(qc_engine_callback fn){ callback_func= fn; }
inline ulonglong engine_data() { return engine_data_buff; }
- inline void engine_data(ulonglong data) { engine_data_buff= data; }
+ inline void engine_data(ulonglong data_arg){ engine_data_buff= data_arg; }
inline gptr data()
{
return (gptr)(((byte*)this)+
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index cbe033f99b8..d5f81168be3 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -214,6 +214,8 @@ THD::THD()
stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE),
spcont(NULL)
{
+ ulong tmp;
+
stmt_arena= this;
thread_stack= 0;
db= 0;
@@ -305,8 +307,8 @@ THD::THD()
protocol_prep.init(this);
tablespace_op=FALSE;
- ulong tmp=sql_rnd_with_mutex();
- randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::query_id);
+ tmp= sql_rnd_with_mutex();
+ randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::global_query_id);
substitute_null_with_insert_id = FALSE;
thr_lock_info_init(&lock_info); /* safety: will be reset after start */
thr_lock_owner_init(&main_lock_id, &lock_info);
@@ -1532,7 +1534,7 @@ bool select_max_min_finder_subselect::send_data(List<Item> &items)
bool select_max_min_finder_subselect::cmp_real()
{
- Item *maxmin= ((Item_singlerow_subselect *)item)->el(0);
+ Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
double val1= cache->val_real(), val2= maxmin->val_real();
if (fmax)
return (cache->null_value && !maxmin->null_value) ||
@@ -1545,7 +1547,7 @@ bool select_max_min_finder_subselect::cmp_real()
bool select_max_min_finder_subselect::cmp_int()
{
- Item *maxmin= ((Item_singlerow_subselect *)item)->el(0);
+ Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
longlong val1= cache->val_int(), val2= maxmin->val_int();
if (fmax)
return (cache->null_value && !maxmin->null_value) ||
@@ -1558,7 +1560,7 @@ bool select_max_min_finder_subselect::cmp_int()
bool select_max_min_finder_subselect::cmp_decimal()
{
- Item *maxmin= ((Item_singlerow_subselect *)item)->el(0);
+ Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
my_decimal cval, *cvalue= cache->val_decimal(&cval);
my_decimal mval, *mvalue= maxmin->val_decimal(&mval);
if (fmax)
@@ -1573,7 +1575,7 @@ bool select_max_min_finder_subselect::cmp_decimal()
bool select_max_min_finder_subselect::cmp_str()
{
String *val1, *val2, buf1, buf2;
- Item *maxmin= ((Item_singlerow_subselect *)item)->el(0);
+ Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
/*
as far as both operand is Item_cache buf1 & buf2 will not be used,
but added for safety
@@ -2407,11 +2409,12 @@ THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
my_size_t colcnt, my_size_t, bool,
Update_rows_log_event *);
#endif
+
+#ifdef NOT_USED
static char const*
field_type_name(enum_field_types type)
{
- switch (type)
- {
+ switch (type) {
case MYSQL_TYPE_DECIMAL:
return "MYSQL_TYPE_DECIMAL";
case MYSQL_TYPE_TINY:
@@ -2469,6 +2472,7 @@ field_type_name(enum_field_types type)
}
return "Unknown";
}
+#endif
my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const
@@ -2649,8 +2653,6 @@ int THD::binlog_write_row(TABLE* table, bool is_trans,
Pack records into format for transfer. We are allocating more
memory than needed, but that doesn't matter.
*/
- int error= 0;
-
Row_data_memory memory(table, max_row_length(table, record));
if (!memory.has_memory())
return HA_ERR_OUT_OF_MEM;
@@ -2677,7 +2679,6 @@ int THD::binlog_update_row(TABLE* table, bool is_trans,
{
DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open());
- int error= 0;
my_size_t const before_maxlen = max_row_length(table, before_record);
my_size_t const after_maxlen = max_row_length(table, after_record);
@@ -2727,8 +2728,6 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans,
Pack records into format for transfer. We are allocating more
memory than needed, but that doesn't matter.
*/
- int error= 0;
-
Row_data_memory memory(table, max_row_length(table, record));
if (unlikely(!memory.has_memory()))
return HA_ERR_OUT_OF_MEM;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 588d936fd57..7babe1eda24 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1811,13 +1811,13 @@ class select_create: public select_insert {
HA_CREATE_INFO *create_info;
Field **field;
public:
- select_create (TABLE_LIST *table,
+ select_create (TABLE_LIST *table_arg,
HA_CREATE_INFO *create_info_par,
List<create_field> &fields_par,
List<Key> &keys_par,
List<Item> &select_fields,enum_duplicates duplic, bool ignore)
:select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore),
- create_table(table), extra_fields(&fields_par),keys(&keys_par),
+ create_table(table_arg), extra_fields(&fields_par),keys(&keys_par),
create_info(create_info_par)
{}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
@@ -1929,7 +1929,9 @@ public:
class select_singlerow_subselect :public select_subselect
{
public:
- select_singlerow_subselect(Item_subselect *item):select_subselect(item){}
+ select_singlerow_subselect(Item_subselect *item_arg)
+ :select_subselect(item_arg)
+ {}
bool send_data(List<Item> &items);
};
@@ -1940,8 +1942,8 @@ class select_max_min_finder_subselect :public select_subselect
bool (select_max_min_finder_subselect::*op)();
bool fmax;
public:
- select_max_min_finder_subselect(Item_subselect *item, bool mx)
- :select_subselect(item), cache(0), fmax(mx)
+ select_max_min_finder_subselect(Item_subselect *item_arg, bool mx)
+ :select_subselect(item_arg), cache(0), fmax(mx)
{}
void cleanup();
bool send_data(List<Item> &items);
@@ -1955,7 +1957,8 @@ public:
class select_exists_subselect :public select_subselect
{
public:
- select_exists_subselect(Item_subselect *item):select_subselect(item){}
+ select_exists_subselect(Item_subselect *item_arg)
+ :select_subselect(item_arg){}
bool send_data(List<Item> &items);
};
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index e3405d9c9d9..ea8c0e2d83e 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -69,13 +69,13 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
Test if the user wants to delete all rows and deletion doesn't have
any side-effects (because of triggers), so we can use optimized
handler::delete_all_rows() method.
-
- If row-based replication is used, we also delete the table row by
- row.
+ We implement fast TRUNCATE for InnoDB even if triggers are present.
+ TRUNCATE ignores triggers.
*/
if (!using_limit && const_cond && (!conds || conds->val_int()) &&
!(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE)) &&
- !(table->triggers && table->triggers->has_delete_triggers()) &&
+ (thd->lex->sql_command == SQLCOM_TRUNCATE ||
+ !(table->triggers && table->triggers->has_delete_triggers())) &&
!thd->current_stmt_binlog_row_based)
{
/* Update the table->file->stats.records number */
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 92a6e24bc80..5712fbceac8 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -109,8 +109,6 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *orig_table_list)
SELECT_LEX *first_select= unit->first_select();
TABLE *table= 0;
select_union *derived_result;
- bool is_union= first_select->next_select() &&
- first_select->next_select()->linkage == UNION_TYPE;
/* prevent name resolving out of derived table */
for (SELECT_LEX *sl= first_select; sl; sl= sl->next_select())
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 4ad8e5ee128..aaffa09b978 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -60,7 +60,6 @@
#include "sql_select.h"
#include "sql_show.h"
-static int check_null_fields(THD *thd,TABLE *entry);
#ifndef EMBEDDED_LIBRARY
static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list);
static int write_delayed(THD *thd, TABLE *table, enum_duplicates dup,
@@ -190,11 +189,11 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (grant_option)
{
- Field_iterator_table fields;
- fields.set_table(table);
+ Field_iterator_table field_it;
+ field_it.set_table(table);
if (check_grant_all_columns(thd, INSERT_ACL, &table->grant,
table->s->db.str, table->s->table_name.str,
- &fields))
+ &field_it))
return -1;
}
#endif
@@ -351,13 +350,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
bool ignore)
{
int error, res;
- /*
- log_on is about delayed inserts only.
- By default, both logs are enabled (this won't cause problems if the server
- runs without --log-update or --log-bin).
- */
- bool log_on= ((thd->options & OPTION_BIN_LOG) ||
- (!(thd->security_ctx->master_access & SUPER_ACL)));
bool transactional_table, joins_freed= FALSE;
bool changed;
uint value_count;
@@ -371,6 +363,13 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
Name_resolution_context_state ctx_state;
#ifndef EMBEDDED_LIBRARY
char *query= thd->query;
+ /*
+ log_on is about delayed inserts only.
+ By default, both logs are enabled (this won't cause problems if the server
+ runs without --log-update or --log-bin).
+ */
+ bool log_on= ((thd->options & OPTION_BIN_LOG) ||
+ (!(thd->security_ctx->master_access & SUPER_ACL)));
#endif
thr_lock_type lock_type = table_list->lock_type;
Item *unused_conds= 0;
@@ -791,7 +790,6 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
Field_translator *trans_start= view->field_translation,
*trans_end= trans_start + num;
Field_translator *trans;
- Field **field_ptr= table->field;
uint used_fields_buff_size= bitmap_buffer_size(table->s->fields);
uint32 *used_fields_buff= (uint32*)thd->alloc(used_fields_buff_size);
MY_BITMAP used_fields;
@@ -3145,8 +3143,9 @@ void select_create::send_error(uint errcode,const char *err)
("Current statement %s row-based",
thd->current_stmt_binlog_row_based ? "is" : "is NOT"));
DBUG_PRINT("info",
- ("Current table (at 0x%lu) %s a temporary (or non-existant) table",
- table,
+ ("Current table (at 0x%lx) %s a temporary (or non-existing) "
+ "table",
+ (ulong) table,
table && !table->s->tmp_table ? "is NOT" : "is"));
DBUG_PRINT("info",
("Table %s prior to executing this statement",
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index db2a6182caa..35db3e40930 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -317,13 +317,15 @@ static char *get_text(LEX *lex)
{
c = yyGet();
#ifdef USE_MB
- int l;
- if (use_mb(cs) &&
- (l = my_ismbchar(cs,
- (const char *)lex->ptr-1,
- (const char *)lex->end_of_query))) {
+ {
+ int l;
+ if (use_mb(cs) &&
+ (l = my_ismbchar(cs,
+ (const char *)lex->ptr-1,
+ (const char *)lex->end_of_query))) {
lex->ptr += l-1;
continue;
+ }
}
#endif
if (c == '\\' &&
@@ -791,8 +793,8 @@ int MYSQLlex(void *arg, void *yythd)
lex->tok_start=lex->ptr; // Skip first `
while ((c=yyGet()))
{
- int length;
- if ((length= my_mbcharlen(cs, c)) == 1)
+ int var_length;
+ if ((var_length= my_mbcharlen(cs, c)) == 1)
{
if (c == quote_char)
{
@@ -804,9 +806,9 @@ int MYSQLlex(void *arg, void *yythd)
}
}
#ifdef USE_MB
- else if (length < 1)
+ else if (var_length < 1)
break; // Error
- lex->ptr+= length-1;
+ lex->ptr+= var_length-1;
#endif
}
if (double_quotes)
@@ -1755,13 +1757,14 @@ bool st_lex::can_be_merged()
bool selects_allow_merge= select_lex.next_select() == 0;
if (selects_allow_merge)
{
- for (SELECT_LEX_UNIT *unit= select_lex.first_inner_unit();
- unit;
- unit= unit->next_unit())
+ for (SELECT_LEX_UNIT *tmp_unit= select_lex.first_inner_unit();
+ tmp_unit;
+ tmp_unit= tmp_unit->next_unit())
{
- if (unit->first_select()->parent_lex == this &&
- (unit->item == 0 ||
- (unit->item->place() != IN_WHERE && unit->item->place() != IN_ON)))
+ if (tmp_unit->first_select()->parent_lex == this &&
+ (tmp_unit->item == 0 ||
+ (tmp_unit->item->place() != IN_WHERE &&
+ tmp_unit->item->place() != IN_ON)))
{
selects_allow_merge= 0;
break;
@@ -2059,12 +2062,12 @@ void st_lex::first_lists_tables_same()
FALSE - success
*/
-bool st_lex::add_time_zone_tables_to_query_tables(THD *thd)
+bool st_lex::add_time_zone_tables_to_query_tables(THD *thd_arg)
{
/* We should not add these tables twice */
if (!time_zone_tables_used)
{
- time_zone_tables_used= my_tz_get_table_list(thd, &query_tables_last);
+ time_zone_tables_used= my_tz_get_table_list(thd_arg, &query_tables_last);
if (time_zone_tables_used == &fake_time_zone_tables_list)
return TRUE;
}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 8632ab0f675..821af3f946d 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -432,7 +432,8 @@ protected:
select_result *result;
ulonglong found_rows_for_union;
- bool res;
+ bool saved_error;
+
public:
bool prepared, // prepare phase already performed for UNION (unit)
optimized, // optimize phase already performed for UNION (unit)
@@ -620,6 +621,21 @@ public:
/* index in the select list of the expression currently being fixed */
int cur_pos_in_select_list;
+ List<udf_func> udf_list; /* udf function calls stack */
+ /*
+ This is a copy of the original JOIN USING list that comes from
+ the parser. The parser :
+ 1. Sets the natural_join of the second TABLE_LIST in the join
+ and the st_select_lex::prev_join_using.
+ 2. Makes a parent TABLE_LIST and sets its is_natural_join/
+ join_using_fields members.
+ 3. Uses the wrapper TABLE_LIST as a table in the upper level.
+ We cannot assign directly to join_using_fields in the parser because
+ at stage (1.) the parent TABLE_LIST is not constructed yet and
+ the assignment will override the JOIN USING fields of the lower level
+ joins on the right.
+ */
+ List<String> *prev_join_using;
void init_query();
void init_select();
st_select_lex_unit* master_unit();
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index c1c775f95b7..364fda2c94b 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -82,10 +82,11 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
List<Item> &set_values, READ_INFO &read_info,
String &enclosed, ulong skip_lines,
bool ignore_check_option_errors);
+#ifndef EMBEDDED_LIBRARY
static bool write_execute_load_query_log_event(THD *thd,
bool duplicates, bool ignore,
bool transactional_table);
-
+#endif /* EMBEDDED_LIBRARY */
/*
Execute LOAD DATA query
@@ -505,6 +506,8 @@ err:
}
+#ifndef EMBEDDED_LIBRARY
+
/* Not a very useful function; just to avoid duplication of code */
static bool write_execute_load_query_log_event(THD *thd,
bool duplicates, bool ignore,
@@ -521,6 +524,7 @@ static bool write_execute_load_query_log_event(THD *thd,
return mysql_bin_log.write(&e);
}
+#endif
/****************************************************************************
** Read of rows of fixed size + optional garage + optonal newline
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 8a4eea6819b..b45859c4e28 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -56,12 +56,11 @@
(LP)->sql_command == SQLCOM_DROP_FUNCTION ? \
"FUNCTION" : "PROCEDURE")
-static void time_out_user_resource_limits(THD *thd, USER_CONN *uc);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
+static void time_out_user_resource_limits(THD *thd, USER_CONN *uc);
static int check_for_max_user_connections(THD *thd, USER_CONN *uc);
static void decrease_user_connections(USER_CONN *uc);
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
-static bool check_multi_update_lock(THD *thd);
static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables);
const char *any_db="*any*"; // Special symbol for check_access
@@ -414,7 +413,11 @@ int check_user(THD *thd, enum enum_server_command command,
}
}
- /* Why logging is performed before all checks've passed? */
+ /*
+ Log the command before authentication checks, so that the user can
+ check the log for the tried login tried and also to detect
+ break-in attempts.
+ */
general_log_print(thd, command,
(thd->main_security_ctx.priv_user ==
thd->main_security_ctx.user ?
@@ -712,6 +715,8 @@ bool is_update_query(enum enum_sql_command command)
safe to test and modify members of the USER_CONN structure.
*/
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+
static void time_out_user_resource_limits(THD *thd, USER_CONN *uc)
{
time_t check_time = thd->start_time ? thd->start_time : time(NULL);
@@ -729,7 +734,6 @@ static void time_out_user_resource_limits(THD *thd, USER_CONN *uc)
DBUG_VOID_RETURN;
}
-
/*
Check if maximum queries per hour limit has been reached
returns 0 if OK.
@@ -737,7 +741,6 @@ static void time_out_user_resource_limits(THD *thd, USER_CONN *uc)
static bool check_mqh(THD *thd, uint check_command)
{
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
bool error= 0;
USER_CONN *uc=thd->user_connect;
DBUG_ENTER("check_mqh");
@@ -772,11 +775,10 @@ static bool check_mqh(THD *thd, uint check_command)
end:
(void) pthread_mutex_unlock(&LOCK_user_conn);
DBUG_RETURN(error);
-#else
- return (0);
-#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
+#endif /* NO_EMBEDDED_ACCESS_CHECKS */
+
static void reset_mqh(LEX_USER *lu, bool get_them= 0)
{
@@ -1676,7 +1678,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->lex->sql_command= SQLCOM_END; /* to avoid confusing VIEW detectors */
thd->set_time();
VOID(pthread_mutex_lock(&LOCK_thread_count));
- thd->query_id=query_id;
+ thd->query_id= global_query_id;
if (command != COM_STATISTICS && command != COM_PING)
next_query_id();
thread_running++;
@@ -1870,7 +1872,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
while (!thd->killed && thd->lex->found_semicolon && !thd->net.report_error)
{
- char *packet= thd->lex->found_semicolon;
+ char *next_packet= thd->lex->found_semicolon;
net->no_send_error= 0;
/*
Multiple queries exits, execute them individually
@@ -1878,24 +1880,24 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (thd->lock || thd->open_tables || thd->derived_tables ||
thd->prelocked_mode)
close_thread_tables(thd);
- ulong length= (ulong)(packet_end-packet);
+ ulong length= (ulong)(packet_end - next_packet);
log_slow_statement(thd);
/* Remove garbage at start of query */
- while (my_isspace(thd->charset(), *packet) && length > 0)
+ while (my_isspace(thd->charset(), *next_packet) && length > 0)
{
- packet++;
+ next_packet++;
length--;
}
VOID(pthread_mutex_lock(&LOCK_thread_count));
thd->query_length= length;
- thd->query= packet;
+ thd->query= next_packet;
thd->query_id= next_query_id();
thd->set_time(); /* Reset the query start time. */
/* TODO: set thd->lex->sql_command to SQLCOM_END here */
VOID(pthread_mutex_unlock(&LOCK_thread_count));
- mysql_parse(thd, packet, length);
+ mysql_parse(thd, next_packet, length);
}
if (!(specialflag & SPECIAL_NO_PRIOR))
@@ -1912,11 +1914,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
{
char *fields, *packet_end= packet + packet_length - 1, *arg_end;
/* Locked closure of all tables */
- TABLE_LIST *locked_tables= NULL;
TABLE_LIST table_list;
LEX_STRING conv_name;
- /* Saved variable value */
- my_bool old_innodb_table_locks= thd->variables.innodb_table_locks;
uint dummy;
/* used as fields initializator */
@@ -2130,7 +2129,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS],
&LOCK_status);
calc_sum_of_all_status(&current_global_status_var);
- uptime= (ulong) (thd->start_time - start_time);
+ uptime= (ulong) (thd->start_time - server_start_time);
length= my_snprintf((char*) buff, buff_len - 1,
"Uptime: %lu Threads: %d Questions: %lu "
"Slow queries: %lu Opens: %lu Flush tables: %lu "
@@ -2185,13 +2184,14 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
{
statistic_increment(thd->status_var.com_stat[SQLCOM_SET_OPTION],
&LOCK_status);
- enum_mysql_set_option command= (enum_mysql_set_option) uint2korr(packet);
- switch (command) {
- case MYSQL_OPTION_MULTI_STATEMENTS_ON:
+ uint opt_command= uint2korr(packet);
+
+ switch (opt_command) {
+ case (int) MYSQL_OPTION_MULTI_STATEMENTS_ON:
thd->client_capabilities|= CLIENT_MULTI_STATEMENTS;
send_eof(thd);
break;
- case MYSQL_OPTION_MULTI_STATEMENTS_OFF:
+ case (int) MYSQL_OPTION_MULTI_STATEMENTS_OFF:
thd->client_capabilities&= ~CLIENT_MULTI_STATEMENTS;
send_eof(thd);
break;
@@ -2533,7 +2533,7 @@ mysql_execute_command(THD *thd)
{
bool res= FALSE;
bool need_start_waiting= FALSE; // have protection against global read lock
- int result= 0;
+ int up_result= 0;
LEX *lex= thd->lex;
/* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
SELECT_LEX *select_lex= &lex->select_lex;
@@ -3389,22 +3389,23 @@ end_with_restore_list:
break;
DBUG_ASSERT(select_lex->offset_limit == 0);
unit->set_limit(select_lex);
- res= (result= mysql_update(thd, all_tables,
- select_lex->item_list,
- lex->value_list,
- select_lex->where,
- select_lex->order_list.elements,
- (ORDER *) select_lex->order_list.first,
- unit->select_limit_cnt,
- lex->duplicates, lex->ignore));
+ res= (up_result= mysql_update(thd, all_tables,
+ select_lex->item_list,
+ lex->value_list,
+ select_lex->where,
+ select_lex->order_list.elements,
+ (ORDER *) select_lex->order_list.first,
+ unit->select_limit_cnt,
+ lex->duplicates, lex->ignore));
/* mysql_update return 2 if we need to switch to multi-update */
- if (result != 2)
+ if (up_result != 2)
break;
+ /* Fall through */
case SQLCOM_UPDATE_MULTI:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
/* if we switched from normal update, rights are checked */
- if (result != 2)
+ if (up_result != 2)
{
if ((res= multi_update_precheck(thd, all_tables)))
break;
@@ -3489,7 +3490,7 @@ end_with_restore_list:
case SQLCOM_REPLACE_SELECT:
case SQLCOM_INSERT_SELECT:
{
- select_result *result;
+ select_result *sel_result;
DBUG_ASSERT(first_table == all_tables && first_table != 0);
if ((res= insert_precheck(thd, all_tables)))
break;
@@ -3518,13 +3519,15 @@ end_with_restore_list:
select_lex->context.table_list=
select_lex->context.first_name_resolution_table= second_table;
res= mysql_insert_select_prepare(thd);
- if (!res && (result= new select_insert(first_table, first_table->table,
- &lex->field_list,
- &lex->update_list,
- &lex->value_list,
- lex->duplicates, lex->ignore)))
+ if (!res && (sel_result= new select_insert(first_table,
+ first_table->table,
+ &lex->field_list,
+ &lex->update_list,
+ &lex->value_list,
+ lex->duplicates,
+ lex->ignore)))
{
- res= handle_select(thd, lex, result, OPTION_SETUP_TABLES_DONE);
+ res= handle_select(thd, lex, sel_result, OPTION_SETUP_TABLES_DONE);
/*
Invalidate the table in the query cache if something changed
after unlocking when changes become visible.
@@ -3542,7 +3545,7 @@ end_with_restore_list:
first_table->next_local= save_table;
thd->lock=0;
}
- delete result;
+ delete sel_result;
}
/* revert changes for SP */
select_lex->table_list.first= (byte*) first_table;
@@ -3567,7 +3570,7 @@ end_with_restore_list:
break;
}
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (check_one_table_access(thd, DELETE_ACL, all_tables))
+ if (check_one_table_access(thd, DROP_ACL, all_tables))
goto error;
/*
Don't allow this within a transaction because we want to use
@@ -3608,7 +3611,7 @@ end_with_restore_list:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
TABLE_LIST *aux_tables=
(TABLE_LIST *)thd->lex->auxiliary_table_list.first;
- multi_delete *result;
+ multi_delete *del_result;
if (!thd->locked_tables &&
!(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
@@ -3633,8 +3636,8 @@ end_with_restore_list:
if ((res= mysql_multi_delete_prepare(thd)))
goto error;
- if (!thd->is_fatal_error && (result= new multi_delete(aux_tables,
- lex->table_count)))
+ if (!thd->is_fatal_error &&
+ (del_result= new multi_delete(aux_tables, lex->table_count)))
{
res= mysql_select(thd, &select_lex->ref_pointer_array,
select_lex->get_table_list(),
@@ -3646,8 +3649,8 @@ end_with_restore_list:
select_lex->options | thd->options |
SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
OPTION_SETUP_TABLES_DONE,
- result, unit, select_lex);
- delete result;
+ del_result, unit, select_lex);
+ delete del_result;
}
else
res= TRUE; // Error
@@ -4055,7 +4058,6 @@ end_with_restore_list:
lex->spname->m_name);
else
{
- uint affected= 1;
if (!(res= Events::get_instance()->drop_event(thd,
lex->spname->m_db,
lex->spname->m_name,
@@ -4454,7 +4456,7 @@ end_with_restore_list:
{
uint namelen;
char *name;
- int result= SP_INTERNAL_ERROR;
+ int sp_result= SP_INTERNAL_ERROR;
DBUG_ASSERT(lex->sphead != 0);
DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */
@@ -4527,18 +4529,18 @@ end_with_restore_list:
if (!lex->definer)
{
- bool res= FALSE;
+ bool local_res= FALSE;
Query_arena original_arena;
Query_arena *ps_arena = thd->activate_stmt_arena_if_needed(&original_arena);
if (!(lex->definer= create_default_definer(thd)))
- res= TRUE;
+ local_res= TRUE;
if (ps_arena)
thd->restore_active_arena(ps_arena, &original_arena);
/* Error has been already reported. */
- if (res)
+ if (local_res)
goto create_sp_error;
if (thd->slave_thread)
@@ -4578,8 +4580,8 @@ end_with_restore_list:
}
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
- res= (result= lex->sphead->create(thd));
- switch (result) {
+ res= (sp_result= lex->sphead->create(thd));
+ switch (sp_result) {
case SP_OK:
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* only add privileges if really neccessary */
@@ -4619,7 +4621,7 @@ create_sp_error:
lex->unit.cleanup();
delete lex->sphead;
lex->sphead= 0;
- if (result != SP_OK )
+ if (sp_result != SP_OK )
goto error;
send_ok(thd);
break; /* break super switch */
@@ -4734,7 +4736,7 @@ create_sp_error:
case SQLCOM_ALTER_PROCEDURE:
case SQLCOM_ALTER_FUNCTION:
{
- int result;
+ int sp_result;
sp_head *sp;
st_sp_chistics chistics;
@@ -4749,7 +4751,7 @@ create_sp_error:
if (! sp)
{
if (lex->spname->m_db.str)
- result= SP_KEY_NOT_FOUND;
+ sp_result= SP_KEY_NOT_FOUND;
else
{
my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
@@ -4774,7 +4776,7 @@ create_sp_error:
{
my_message(ER_BINLOG_UNSAFE_ROUTINE,
ER(ER_BINLOG_UNSAFE_ROUTINE), MYF(0));
- result= SP_INTERNAL_ERROR;
+ sp_result= SP_INTERNAL_ERROR;
}
else
{
@@ -4784,15 +4786,15 @@ create_sp_error:
follow the restrictions that log-bin-trust-function-creators=0
already puts on CREATE FUNCTION.
*/
+ /* Conditionally writes to binlog */
if (lex->sql_command == SQLCOM_ALTER_PROCEDURE)
- /* Conditionally writes to binlog */
- result= sp_update_procedure(thd, lex->spname, &lex->sp_chistics);
+ sp_result= sp_update_procedure(thd, lex->spname,
+ &lex->sp_chistics);
else
- /* Conditionally writes to binlog */
- result= sp_update_function(thd, lex->spname, &lex->sp_chistics);
+ sp_result= sp_update_function(thd, lex->spname, &lex->sp_chistics);
}
}
- switch (result)
+ switch (sp_result)
{
case SP_OK:
send_ok(thd);
@@ -4811,13 +4813,13 @@ create_sp_error:
case SQLCOM_DROP_PROCEDURE:
case SQLCOM_DROP_FUNCTION:
{
- int result;
+ int sp_result;
int type= (lex->sql_command == SQLCOM_DROP_PROCEDURE ?
TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION);
- result= sp_routine_exists_in_table(thd, type, lex->spname);
+ sp_result= sp_routine_exists_in_table(thd, type, lex->spname);
mysql_reset_errors(thd, 0);
- if (result == SP_OK)
+ if (sp_result == SP_OK)
{
char *db= lex->spname->m_db.str;
char *name= lex->spname->m_name.str;
@@ -4838,12 +4840,11 @@ create_sp_error:
ER(ER_PROC_AUTO_REVOKE_FAIL));
}
#endif
- if (lex->sql_command == SQLCOM_DROP_PROCEDURE)
/* Conditionally writes to binlog */
- result= sp_drop_procedure(thd, lex->spname); /* Conditionally writes to binlog */
+ if (lex->sql_command == SQLCOM_DROP_PROCEDURE)
+ sp_result= sp_drop_procedure(thd, lex->spname);
else
- /* Conditionally writes to binlog */
- result= sp_drop_function(thd, lex->spname); /* Conditionally writes to binlog */
+ sp_result= sp_drop_function(thd, lex->spname);
}
else
{
@@ -4867,16 +4868,15 @@ create_sp_error:
}
#endif
if (lex->spname->m_db.str)
- result= SP_KEY_NOT_FOUND;
+ sp_result= SP_KEY_NOT_FOUND;
else
{
my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
goto error;
}
}
- res= result;
- switch (result)
- {
+ res= sp_result;
+ switch (sp_result) {
case SP_OK:
send_ok(thd);
break;
@@ -5644,7 +5644,9 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
{
uint found=0;
ulong found_access=0;
+#ifndef EMBEDDED_LIBRARY
TABLE_LIST *org_tables= tables;
+#endif
TABLE_LIST *first_not_own_table= thd->lex->first_not_own_table();
Security_context *sctx= thd->security_ctx, *backup_ctx= thd->security_ctx;
/*
@@ -6678,11 +6680,8 @@ TABLE_LIST *st_select_lex::nest_last_join(THD *thd)
If this is a JOIN ... USING, move the list of joined fields to the
table reference that describes the join.
*/
- if (table->join_using_fields)
- {
- ptr->join_using_fields= table->join_using_fields;
- table->join_using_fields= NULL;
- }
+ if (prev_join_using)
+ ptr->join_using_fields= prev_join_using;
}
}
join_list->push_front(ptr);
@@ -6818,18 +6817,18 @@ void st_select_lex::set_lock_for_tables(thr_lock_type lock_type)
0 on success
*/
-bool st_select_lex_unit::add_fake_select_lex(THD *thd)
+bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg)
{
SELECT_LEX *first_sl= first_select();
DBUG_ENTER("add_fake_select_lex");
DBUG_ASSERT(!fake_select_lex);
- if (!(fake_select_lex= new (thd->mem_root) SELECT_LEX()))
+ if (!(fake_select_lex= new (thd_arg->mem_root) SELECT_LEX()))
DBUG_RETURN(1);
fake_select_lex->include_standalone(this,
(SELECT_LEX_NODE**)&fake_select_lex);
fake_select_lex->select_number= INT_MAX;
- fake_select_lex->parent_lex= thd->lex; /* Used in init_query. */
+ fake_select_lex->parent_lex= thd_arg->lex; /* Used in init_query. */
fake_select_lex->make_empty_select();
fake_select_lex->linkage= GLOBAL_OPTIONS_TYPE;
fake_select_lex->select_limit= 0;
@@ -6849,9 +6848,9 @@ bool st_select_lex_unit::add_fake_select_lex(THD *thd)
*/
global_parameters= fake_select_lex;
fake_select_lex->no_table_names_allowed= 1;
- thd->lex->current_select= fake_select_lex;
+ thd_arg->lex->current_select= fake_select_lex;
}
- thd->lex->pop_context();
+ thd_arg->lex->pop_context();
DBUG_RETURN(0);
}
@@ -6938,6 +6937,7 @@ void add_join_on(TABLE_LIST *b, Item *expr)
a Left join argument
b Right join argument
using_fields Field names from USING clause
+ lex The current st_select_lex
IMPLEMENTATION
This function marks that table b should be joined with a either via
@@ -6966,10 +6966,11 @@ void add_join_on(TABLE_LIST *b, Item *expr)
None
*/
-void add_join_natural(TABLE_LIST *a, TABLE_LIST *b, List<String> *using_fields)
+void add_join_natural(TABLE_LIST *a, TABLE_LIST *b, List<String> *using_fields,
+ SELECT_LEX *lex)
{
b->natural_join= a;
- b->join_using_fields= using_fields;
+ lex->prev_join_using= using_fields;
}
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 0b6d841a337..923e851c0ff 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -2002,7 +2002,6 @@ char *generate_partition_syntax(partition_info *part_info,
{
uint i,j, tot_no_parts, no_subparts;
partition_element *part_elem;
- partition_element *save_part_elem= NULL;
ulonglong buffer_length;
char path[FN_REFLEN];
int err= 0;
@@ -5369,7 +5368,6 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt,
List_iterator<partition_element> temp_it(part_info->temp_partitions);
uint no_temp_partitions= part_info->temp_partitions.elements;
uint no_elements= part_info->partitions.elements;
- uint i= 0;
DBUG_ENTER("write_log_dropped_partitions");
ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION;
@@ -5742,7 +5740,6 @@ static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt,
bool dont_crash)
{
partition_info *part_info= lpt->part_info;
- uint count_loop= 0;
DDL_LOG_MEMORY_ENTRY *log_entry= part_info->exec_log_entry;
DBUG_ENTER("write_log_completed");
@@ -6016,8 +6013,6 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
uint fast_alter_partition)
{
/* Set-up struct used to write frm files */
- ulonglong copied= 0;
- ulonglong deleted= 0;
partition_info *part_info= table->part_info;
ALTER_PARTITION_PARAM_TYPE lpt_obj;
ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj;
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 901fe3a1aa8..e3e24c1f375 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -53,12 +53,15 @@ plugin_type_init plugin_type_deinitialize[MYSQL_MAX_PLUGIN_TYPE_NUM]=
0,ha_finalize_handlerton,0,0,finalize_schema_table
};
+#ifdef HAVE_DLOPEN
static const char *plugin_interface_version_sym=
"_mysql_plugin_interface_version_";
static const char *sizeof_st_plugin_sym=
"_mysql_sizeof_struct_st_plugin_";
static const char *plugin_declarations_sym= "_mysql_plugin_declarations_";
static int min_plugin_interface_version= MYSQL_PLUGIN_INTERFACE_VERSION & ~0xFF;
+#endif
+
/* Note that 'int version' must be the first field of every plugin
sub-structure (plugin->info).
*/
@@ -91,6 +94,8 @@ static int plugin_array_version=0;
my_bool plugin_register_builtin(struct st_mysql_plugin *plugin);
void plugin_load(void);
+#ifdef HAVE_DLOPEN
+
static struct st_plugin_dl *plugin_dl_find(const LEX_STRING *dl)
{
uint i;
@@ -128,6 +133,8 @@ static st_plugin_dl *plugin_dl_insert_or_reuse(struct st_plugin_dl *plugin_dl)
DBUG_RETURN(dynamic_element(&plugin_dl_array, plugin_dl_array.elements - 1,
struct st_plugin_dl *));
}
+#endif /* HAVE_DLOPEN */
+
static inline void free_plugin_mem(struct st_plugin_dl *p)
{
@@ -545,6 +552,8 @@ static void plugin_del(struct st_plugin_int *plugin)
DBUG_VOID_RETURN;
}
+#ifdef NOT_USED
+
static void plugin_del(const LEX_STRING *name)
{
struct st_plugin_int *plugin;
@@ -554,6 +563,8 @@ static void plugin_del(const LEX_STRING *name)
DBUG_VOID_RETURN;
}
+#endif
+
void plugin_unlock(struct st_plugin_int *plugin)
{
DBUG_ENTER("plugin_unlock");
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 07f852282b7..11d04905f64 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -976,19 +976,19 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt,
/* Insert @'escaped-varname' instead of parameter in the query */
if (entry)
{
- char *begin, *ptr;
+ char *start, *ptr;
buf.length(0);
if (buf.reserve(entry->name.length*2+3))
DBUG_RETURN(1);
- begin= ptr= buf.c_ptr_quick();
+ start= ptr= buf.c_ptr_quick();
*ptr++= '@';
*ptr++= '\'';
ptr+= escape_string_for_mysql(&my_charset_utf8_general_ci,
ptr, 0, entry->name.str,
entry->name.length);
*ptr++= '\'';
- buf.length(ptr - begin);
+ buf.length(ptr - start);
val= &buf;
}
else
@@ -1026,7 +1026,6 @@ static bool mysql_test_insert(Prepared_statement *stmt,
enum_duplicates duplic)
{
THD *thd= stmt->thd;
- LEX *lex= stmt->lex;
List_iterator_fast<List_item> its(values_list);
List_item *values;
DBUG_ENTER("mysql_test_insert");
@@ -2566,7 +2565,9 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length)
uint param_number;
Prepared_statement *stmt;
Item_param *param;
+#ifndef EMBEDDED_LIBRARY
char *packet_end= packet + packet_length - 1;
+#endif
DBUG_ENTER("mysql_stmt_get_longdata");
statistic_increment(thd->status_var.com_stmt_send_long_data, &LOCK_status);
@@ -2620,8 +2621,8 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length)
Select_fetch_protocol_prep
****************************************************************************/
-Select_fetch_protocol_prep::Select_fetch_protocol_prep(THD *thd)
- :protocol(thd)
+Select_fetch_protocol_prep::Select_fetch_protocol_prep(THD *thd_arg)
+ :protocol(thd_arg)
{}
bool Select_fetch_protocol_prep::send_fields(List<Item> &list, uint flags)
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index ab919f19dff..495151ddfe3 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -109,7 +109,6 @@ static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list,
static COND *optimize_cond(JOIN *join, COND *conds,
List<TABLE_LIST> *join_list,
Item::cond_result *cond_value);
-static bool resolve_nested_join (TABLE_LIST *table);
static bool const_expression_in_where(COND *conds,Item *item, Item **comp_item);
static bool open_tmp_table(TABLE *table);
static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
@@ -717,11 +716,20 @@ JOIN::optimize()
{
int res;
/*
- opt_sum_query() returns -1 if no rows match to the WHERE conditions,
- or 1 if all items were resolved, or 0, or an error number HA_ERR_...
+ opt_sum_query() returns HA_ERR_KEY_NOT_FOUND if no rows match
+ to the WHERE conditions,
+ or 1 if all items were resolved,
+ or 0, or an error number HA_ERR_...
*/
if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds)))
{
+ if (res == HA_ERR_KEY_NOT_FOUND)
+ {
+ DBUG_PRINT("info",("No matching min/max row"));
+ zero_result_cause= "No matching min/max row";
+ error=0;
+ DBUG_RETURN(0);
+ }
if (res > 1)
{
thd->fatal_error();
@@ -729,13 +737,6 @@ JOIN::optimize()
DBUG_PRINT("error",("Error from opt_sum_query"));
DBUG_RETURN(1);
}
- if (res < 0)
- {
- DBUG_PRINT("info",("No matching min/max row"));
- zero_result_cause= "No matching min/max row";
- error=0;
- DBUG_RETURN(0);
- }
DBUG_PRINT("info",("Select tables optimized away"));
zero_result_cause= "Select tables optimized away";
tables_list= 0; // All tables resolved
@@ -865,6 +866,13 @@ JOIN::optimize()
{
ORDER *org_order= order;
order=remove_const(this, order,conds,1, &simple_order);
+ if (thd->net.report_error)
+ {
+ error= 1;
+ DBUG_PRINT("error",("Error from remove_const"));
+ DBUG_RETURN(1);
+ }
+
/*
If we are using ORDER BY NULL or ORDER BY const_expression,
return result in any order (even if we are using a GROUP BY)
@@ -874,10 +882,11 @@ JOIN::optimize()
}
/*
Check if we can optimize away GROUP BY/DISTINCT.
- We can do that if there are no aggregate functions and the
+ We can do that if there are no aggregate functions, the
fields in DISTINCT clause (if present) and/or columns in GROUP BY
(if present) contain direct references to all key parts of
- an unique index (in whatever order).
+ an unique index (in whatever order) and if the key parts of the
+ unique index cannot contain NULLs.
Note that the unique keys for DISTINCT and GROUP BY should not
be the same (as long as they are unique).
@@ -972,6 +981,12 @@ JOIN::optimize()
group_list= remove_const(this, (old_group_list= group_list), conds,
rollup.state == ROLLUP::STATE_NONE,
&simple_group);
+ if (thd->net.report_error)
+ {
+ error= 1;
+ DBUG_PRINT("error",("Error from remove_const"));
+ DBUG_RETURN(1);
+ }
if (old_group_list && !group_list)
select_distinct= 0;
}
@@ -988,6 +1003,12 @@ JOIN::optimize()
{
group_list= procedure->group= remove_const(this, procedure->group, conds,
1, &simple_group);
+ if (thd->net.report_error)
+ {
+ error= 1;
+ DBUG_PRINT("error",("Error from remove_const"));
+ DBUG_RETURN(1);
+ }
calc_group_buffer(this, group_list);
}
@@ -2469,14 +2490,14 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
for( ; sargables->field ; sargables++)
{
Field *field= sargables->field;
- JOIN_TAB *stat= field->table->reginfo.join_tab;
+ JOIN_TAB *join_tab= field->table->reginfo.join_tab;
key_map possible_keys= field->key_start;
possible_keys.intersect(field->table->keys_in_use_for_query);
bool is_const= 1;
- for (uint i=0; i< sargables->num_values; i++)
- is_const&= sargables->arg_value[i]->const_item();
+ for (uint j=0; j < sargables->num_values; j++)
+ is_const&= sargables->arg_value[j]->const_item();
if (is_const)
- stat[0].const_keys.merge(possible_keys);
+ join_tab[0].const_keys.merge(possible_keys);
}
}
@@ -3472,16 +3493,16 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
*/
if (keyuse->elements)
{
- KEYUSE end,*prev,*save_pos,*use;
+ KEYUSE key_end,*prev,*save_pos,*use;
qsort(keyuse->buffer,keyuse->elements,sizeof(KEYUSE),
(qsort_cmp) sort_keyuse);
- bzero((char*) &end,sizeof(end)); /* Add for easy testing */
- VOID(insert_dynamic(keyuse,(gptr) &end));
+ bzero((char*) &key_end,sizeof(key_end)); /* Add for easy testing */
+ VOID(insert_dynamic(keyuse,(gptr) &key_end));
use=save_pos=dynamic_element(keyuse,0,KEYUSE*);
- prev=&end;
+ prev= &key_end;
found_eq_constant=0;
for (i=0 ; i < keyuse->elements-1 ; i++,use++)
{
@@ -3509,7 +3530,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
save_pos++;
}
i=(uint) (save_pos-(KEYUSE*) keyuse->buffer);
- VOID(set_dynamic(keyuse,(gptr) &end,i));
+ VOID(set_dynamic(keyuse,(gptr) &key_end,i));
keyuse->elements=i;
}
return FALSE;
@@ -3694,7 +3715,6 @@ best_access_path(JOIN *join,
table_map best_ref_depends_map= 0;
double tmp;
ha_rows rec;
-
DBUG_ENTER("best_access_path");
if (s->keyuse)
@@ -3742,12 +3762,12 @@ best_access_path(JOIN *join,
if (!(keyuse->used_tables & ~join->const_table_map))
const_part|= keyuse->keypart_map;
- double tmp= prev_record_reads(join, idx, (found_ref |
+ double tmp2= prev_record_reads(join, idx, (found_ref |
keyuse->used_tables));
- if (tmp < best_prev_record_reads)
+ if (tmp2 < best_prev_record_reads)
{
best_part_found_ref= keyuse->used_tables & ~join->const_table_map;
- best_prev_record_reads= tmp;
+ best_prev_record_reads= tmp2;
}
if (rec > keyuse->ref_table_rows)
rec= keyuse->ref_table_rows;
@@ -5917,37 +5937,42 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
*/
COND *on_expr= *first_inner_tab->on_expr_ref;
- table_map used_tables= join->const_table_map |
- OUTER_REF_TABLE_BIT | RAND_TABLE_BIT;
+ table_map used_tables2= (join->const_table_map |
+ OUTER_REF_TABLE_BIT | RAND_TABLE_BIT);
for (tab= join->join_tab+join->const_tables; tab <= last_tab ; tab++)
{
current_map= tab->table->map;
- used_tables|= current_map;
- COND *tmp= make_cond_for_table(on_expr, used_tables, current_map);
- if (tmp)
+ used_tables2|= current_map;
+ COND *tmp_cond= make_cond_for_table(on_expr, used_tables2,
+ current_map);
+ if (tmp_cond)
{
JOIN_TAB *cond_tab= tab < first_inner_tab ? first_inner_tab : tab;
/*
First add the guards for match variables of
all embedding outer join operations.
*/
- if (!(tmp= add_found_match_trig_cond(cond_tab->first_inner,
- tmp, first_inner_tab)))
+ if (!(tmp_cond= add_found_match_trig_cond(cond_tab->first_inner,
+ tmp_cond,
+ first_inner_tab)))
DBUG_RETURN(1);
/*
Now add the guard turning the predicate off for
the null complemented row.
*/
DBUG_PRINT("info", ("Item_func_trig_cond"));
- tmp= new Item_func_trig_cond(tmp,
- &first_inner_tab->not_null_compl);
- DBUG_PRINT("info", ("Item_func_trig_cond 0x%lx", (ulong) tmp));
- if (tmp)
- tmp->quick_fix_field();
+ tmp_cond= new Item_func_trig_cond(tmp_cond,
+ &first_inner_tab->
+ not_null_compl);
+ DBUG_PRINT("info", ("Item_func_trig_cond 0x%lx",
+ (ulong) tmp_cond));
+ if (tmp_cond)
+ tmp_cond->quick_fix_field();
/* Add the predicate to other pushed down predicates */
DBUG_PRINT("info", ("Item_cond_and"));
- cond_tab->select_cond= !cond_tab->select_cond ? tmp :
- new Item_cond_and(cond_tab->select_cond,tmp);
+ cond_tab->select_cond= !cond_tab->select_cond ? tmp_cond :
+ new Item_cond_and(cond_tab->select_cond,
+ tmp_cond);
DBUG_PRINT("info", ("Item_cond_and 0x%lx",
(ulong)cond_tab->select_cond));
if (!cond_tab->select_cond)
@@ -6266,7 +6291,7 @@ void JOIN_TAB::cleanup()
void JOIN::join_free()
{
- SELECT_LEX_UNIT *unit;
+ SELECT_LEX_UNIT *tmp_unit;
SELECT_LEX *sl;
/*
Optimization: if not EXPLAIN and we are done with the JOIN,
@@ -6278,8 +6303,10 @@ void JOIN::join_free()
cleanup(full);
- for (unit= select_lex->first_inner_unit(); unit; unit= unit->next_unit())
- for (sl= unit->first_select(); sl; sl= sl->next_select())
+ for (tmp_unit= select_lex->first_inner_unit();
+ tmp_unit;
+ tmp_unit= tmp_unit->next_unit())
+ for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
{
Item_subselect *subselect= sl->master_unit()->item;
bool full_local= full && (!subselect || subselect->is_evaluated());
@@ -6578,6 +6605,8 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
*simple_order=0; // Must do a temp table to sort
else if (!(order_tables & not_const_tables))
{
+ if (order->item[0]->with_subselect)
+ order->item[0]->val_str(&order->item[0]->str_value);
DBUG_PRINT("info",("removing: %s", order->item[0]->full_name()));
continue; // skip const item
}
@@ -6915,9 +6944,9 @@ static bool check_simple_equality(Item *left_item, Item *right_item,
else
{
/* None of the fields was found in multiple equalities */
- Item_equal *item= new Item_equal((Item_field *) left_item,
- (Item_field *) right_item);
- cond_equal->current_level.push_back(item);
+ Item_equal *item_equal= new Item_equal((Item_field *) left_item,
+ (Item_field *) right_item);
+ cond_equal->current_level.push_back(item_equal);
}
}
return TRUE;
@@ -7024,8 +7053,8 @@ static bool check_row_equality(Item *left_row, Item_row *right_row,
for (uint i= 0 ; i < n; i++)
{
bool is_converted;
- Item *left_item= left_row->el(i);
- Item *right_item= right_row->el(i);
+ Item *left_item= left_row->element_index(i);
+ Item *right_item= right_row->element_index(i);
if (left_item->type() == Item::ROW_ITEM &&
right_item->type() == Item::ROW_ITEM)
is_converted= check_row_equality((Item_row *) left_item,
@@ -7415,14 +7444,15 @@ static COND *build_equal_items(THD *thd, COND *cond,
{
if (table->on_expr)
{
- List<TABLE_LIST> *join_list= table->nested_join ?
- &table->nested_join->join_list : NULL;
+ List<TABLE_LIST> *nested_join_list= table->nested_join ?
+ &table->nested_join->join_list : NULL;
/*
We can modify table->on_expr because its old value will
be restored before re-execution of PS/SP.
*/
table->on_expr= build_equal_items(thd, table->on_expr, inherited,
- join_list, &table->cond_equal);
+ nested_join_list,
+ &table->cond_equal);
}
}
}
@@ -7795,7 +7825,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
value->result_type() != STRING_RESULT ||
left_item->collation.collation == value->collation.collation))
{
- Item *tmp=value->new_item();
+ Item *tmp=value->clone_item();
tmp->collation.set(right_item->collation);
if (tmp)
@@ -7819,7 +7849,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
value->result_type() != STRING_RESULT ||
right_item->collation.collation == value->collation.collation))
{
- Item *tmp=value->new_item();
+ Item *tmp= value->clone_item();
tmp->collation.set(left_item->collation);
if (tmp)
@@ -8440,7 +8470,6 @@ optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list,
Item::cond_result *cond_value)
{
THD *thd= join->thd;
- SELECT_LEX *select= thd->lex->current_select;
DBUG_ENTER("optimize_cond");
if (!conds)
@@ -12109,7 +12138,7 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts,
/*
- Check if GROUP BY/DISTINCT can be optimized away because the set is
+ Check if GROUP BY/DISTINCT can be optimized away because the set is
already known to be distinct.
SYNOPSIS
@@ -12117,7 +12146,7 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts,
table The table to operate on.
find_func function to iterate over the list and search
for a field
-
+
DESCRIPTION
Used in removing the GROUP BY/DISTINCT of the following types of
statements:
@@ -12128,12 +12157,13 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts,
then <any combination of a,b,c>,{whatever} is also distinct
This function checks if all the key parts of any of the unique keys
- of the table are referenced by a list : either the select list
+ of the table are referenced by a list : either the select list
through find_field_in_item_list or GROUP BY list through
find_field_in_order_list.
- If the above holds then we can safely remove the GROUP BY/DISTINCT,
+ If the above holds and the key parts cannot contain NULLs then we
+ can safely remove the GROUP BY/DISTINCT,
as no result set can be more distinct than an unique key.
-
+
RETURN VALUE
1 found
0 not found.
@@ -12156,7 +12186,8 @@ list_contains_unique_index(TABLE *table,
key_part < key_part_end;
key_part++)
{
- if (!find_func(key_part->field, data))
+ if (key_part->field->maybe_null() ||
+ !find_func(key_part->field, data))
break;
}
if (key_part == key_part_end)
@@ -12264,13 +12295,14 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
DBUG_ENTER("test_if_skip_sort_order");
LINT_INIT(ref_key_parts);
+ /* Check which keys can be used to resolve ORDER BY. */
+ usable_keys= table->keys_in_use_for_query;
+
/*
- Check which keys can be used to resolve ORDER BY.
- We must not try to use disabled keys.
+ Keys disabled by ALTER TABLE ... DISABLE KEYS should have already
+ been taken into account.
*/
- usable_keys= table->s->keys_in_use;
- /* we must not consider keys that are disabled by IGNORE INDEX */
- usable_keys.intersect(table->keys_in_use_for_query);
+ DBUG_ASSERT(usable_keys.is_subset(table->s->keys_in_use));
for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next)
{
@@ -14826,7 +14858,7 @@ int JOIN::rollup_send_data(uint idx)
1 if write_data_failed()
*/
-int JOIN::rollup_write_data(uint idx, TABLE *table)
+int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
{
uint i;
for (i= send_group_parts ; i-- > idx ; )
@@ -14837,7 +14869,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table)
ref_pointer_array_size);
if ((!having || having->val_int()))
{
- int error;
+ int write_error;
Item *item;
List_iterator_fast<Item> it(rollup.fields[i]);
while ((item= it++))
@@ -14846,10 +14878,10 @@ int JOIN::rollup_write_data(uint idx, TABLE *table)
item->save_in_result_field(1);
}
copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
- if ((error= table->file->write_row(table->record[0])))
+ if ((write_error= table_arg->file->write_row(table_arg->record[0])))
{
- if (create_myisam_from_heap(thd, table, &tmp_table_param,
- error, 0))
+ if (create_myisam_from_heap(thd, table_arg, &tmp_table_param,
+ write_error, 0))
return 1;
}
}
@@ -15045,9 +15077,9 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
}
else
{
- TABLE_LIST *tab=table->pos_in_table_list;
- item_list.push_back(new Item_string(tab->alias,
- strlen(tab->alias),
+ TABLE_LIST *real_table= table->pos_in_table_list;
+ item_list.push_back(new Item_string(real_table->alias,
+ strlen(real_table->alias),
cs));
}
/* "partitions" column */
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
index 0ec7c54487a..5fa97dc5c2b 100644
--- a/sql/sql_servers.cc
+++ b/sql/sql_servers.cc
@@ -32,7 +32,6 @@ uint servers_cache_initialised=FALSE;
static uint servers_version=0;
static MEM_ROOT mem;
static rw_lock_t THR_LOCK_servers;
-static bool initialized=0;
static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length,
my_bool not_used __attribute__((unused)))
@@ -329,24 +328,22 @@ my_bool get_server_from_table_to_cache(TABLE *table)
my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options)
{
- byte server_key[MAX_KEY_LENGTH];
int result= 1;
int error= 0;
TABLE_LIST tables;
TABLE *table;
-
DBUG_ENTER("server_exists");
bzero((char*) &tables, sizeof(tables));
tables.db= (char*) "mysql";
tables.alias= tables.table_name= (char*) "servers";
- table->use_all_columns();
-
/* need to open before acquiring THR_LOCK_plugin or it will deadlock */
if (! (table= open_ltable(thd, &tables, TL_WRITE)))
DBUG_RETURN(TRUE);
+ table->use_all_columns();
+
rw_wrlock(&THR_LOCK_servers);
VOID(pthread_mutex_lock(&servers_cache_mutex));
@@ -393,7 +390,6 @@ my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options)
int insert_server(THD *thd, FOREIGN_SERVER *server)
{
- byte server_key[MAX_KEY_LENGTH];
int error= 0;
TABLE_LIST tables;
TABLE *table;
@@ -608,7 +604,6 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server)
int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
{
- byte server_key[MAX_KEY_LENGTH];
int error= 0;
TABLE_LIST tables;
TABLE *table;
@@ -1208,7 +1203,7 @@ void servers_free(bool end)
FOREIGN_SERVER *get_server_by_name(const char *server_name)
{
ulong error_num=0;
- uint i, server_name_length;
+ uint server_name_length;
FOREIGN_SERVER *server= 0;
DBUG_ENTER("get_server_by_name");
DBUG_PRINT("info", ("server_name %s", server_name));
diff --git a/sql/sql_servers.h b/sql/sql_servers.h
index 36fb4d07d1b..23b8cefd5bb 100644
--- a/sql/sql_servers.h
+++ b/sql/sql_servers.h
@@ -26,7 +26,6 @@ typedef struct st_federated_server
/* cache handlers */
my_bool servers_init(bool dont_read_server_table);
-static my_bool servers_load(THD *thd, TABLE_LIST *tables);
my_bool servers_reload(THD *thd);
my_bool get_server_from_table_to_cache(TABLE *table);
void servers_free(bool end=0);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index d8ae85ca440..2ca64b9d5ed 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -32,7 +32,6 @@
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
#endif
-
enum enum_i_s_events_fields
{
ISE_EVENT_CATALOG= 0,
@@ -57,11 +56,11 @@ enum enum_i_s_events_fields
};
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
static const char *grant_names[]={
"select","insert","update","delete","create","drop","reload","shutdown",
"process","file","grant","references","index","alter"};
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **),
"grant_types",
grant_names, NULL};
@@ -140,7 +139,6 @@ static my_bool show_plugins(THD *thd, st_plugin_int *plugin,
{
TABLE *table= (TABLE*) arg;
struct st_mysql_plugin *plug= plugin->plugin;
- Protocol *protocol= thd->protocol;
CHARSET_INFO *cs= system_charset_info;
char version_buf[20];
@@ -153,8 +151,7 @@ static my_bool show_plugins(THD *thd, st_plugin_int *plugin,
cs);
- switch (plugin->state)
- {
+ switch (plugin->state) {
/* case PLUGIN_IS_FREED: does not happen */
case PLUGIN_IS_DELETED:
table->field[2]->store(STRING_WITH_LEN("DELETED"), cs);
@@ -688,10 +685,10 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
bool mysqld_show_create_db(THD *thd, char *dbname,
HA_CREATE_INFO *create_info)
{
- Security_context *sctx= thd->security_ctx;
char buff[2048];
String buffer(buff, sizeof(buff), system_charset_info);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ Security_context *sctx= thd->security_ctx;
uint db_access;
#endif
HA_CREATE_INFO create;
@@ -1020,7 +1017,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
HA_CREATE_INFO *create_info_arg)
{
List<Item> field_list;
- char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end;
+ char tmp[MAX_FIELD_WIDTH], *for_str, buff[128];
const char *alias;
String type(tmp, sizeof(tmp), system_charset_info);
Field **ptr,*field;
@@ -1307,8 +1304,9 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
but may extrapolate its existence from that of an AUTO_INCREMENT column.
*/
- if(create_info.auto_increment_value > 1)
+ if (create_info.auto_increment_value > 1)
{
+ char *end;
packet->append(STRING_WITH_LEN(" AUTO_INCREMENT="));
end= longlong10_to_str(create_info.auto_increment_value, buff,10);
packet->append(buff, (uint) (end - buff));
@@ -1338,6 +1336,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
if (share->min_rows)
{
+ char *end;
packet->append(STRING_WITH_LEN(" MIN_ROWS="));
end= longlong10_to_str(share->min_rows, buff, 10);
packet->append(buff, (uint) (end- buff));
@@ -1345,6 +1344,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
if (share->max_rows && !table_list->schema_table)
{
+ char *end;
packet->append(STRING_WITH_LEN(" MAX_ROWS="));
end= longlong10_to_str(share->max_rows, buff, 10);
packet->append(buff, (uint) (end - buff));
@@ -1352,6 +1352,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
if (share->avg_row_length)
{
+ char *end;
packet->append(STRING_WITH_LEN(" AVG_ROW_LENGTH="));
end= longlong10_to_str(share->avg_row_length, buff,10);
packet->append(buff, (uint) (end - buff));
@@ -1372,6 +1373,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
}
if (table->s->key_block_size)
{
+ char *end;
packet->append(STRING_WITH_LEN(" KEY_BLOCK_SIZE="));
end= longlong10_to_str(table->s->key_block_size, buff, 10);
packet->append(buff, (uint) (end - buff));
@@ -2067,7 +2069,7 @@ static bool show_status_array(THD *thd, const char *wild,
if (show_type == SHOW_SYS)
{
- show_type= ((sys_var*) value)->type();
+ show_type= ((sys_var*) value)->show_type();
value= (char*) ((sys_var*) value)->value_ptr(thd, value_type,
&null_lex_str);
}
@@ -2120,7 +2122,7 @@ static bool show_status_array(THD *thd, const char *wild,
end= strend(pos);
break;
}
- case SHOW_CHAR_PTR:
+ case SHOW_CHAR_PTR:
{
if (!(pos= *(char**) value))
pos= "";
@@ -2541,19 +2543,21 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
List<char> bases;
List_iterator_fast<char> it(bases);
COND *partial_cond;
- Security_context *sctx= thd->security_ctx;
uint derived_tables= lex->derived_tables;
int error= 1;
enum legacy_db_type not_used;
Open_tables_state open_tables_state_backup;
bool save_view_prepare_mode= lex->view_prepare_mode;
Query_tables_list query_tables_list_backup;
- lex->view_prepare_mode= TRUE;
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ Security_context *sctx= thd->security_ctx;
+#endif
DBUG_ENTER("get_all_tables");
LINT_INIT(end);
LINT_INIT(len);
+ lex->view_prepare_mode= TRUE;
lex->reset_n_backup_query_tables_list(&query_tables_list_backup);
/*
@@ -2792,7 +2796,9 @@ int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond)
bool with_i_schema;
HA_CREATE_INFO create;
TABLE *table= tables->table;
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
Security_context *sctx= thd->security_ctx;
+#endif
DBUG_ENTER("fill_schema_shemata");
if (make_db_list(thd, &files, &idx_field_vals,
@@ -3147,7 +3153,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
field->real_type() == MYSQL_TYPE_VARCHAR || // For varbinary type
field->real_type() == MYSQL_TYPE_STRING) // For binary type
{
- uint32 octet_max_length= field->max_length();
+ uint32 octet_max_length= field->max_display_length();
if (is_blob && octet_max_length != (uint32) 4294967295U)
octet_max_length /= field->charset()->mbmaxlen;
longlong char_max_len= is_blob ?
@@ -3177,10 +3183,10 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
case MYSQL_TYPE_LONG:
case MYSQL_TYPE_LONGLONG:
case MYSQL_TYPE_INT24:
- field_length= field->max_length() - 1;
+ field_length= field->max_display_length() - 1;
break;
case MYSQL_TYPE_BIT:
- field_length= field->max_length();
+ field_length= field->max_display_length();
decimals= -1; // return NULL
break;
case MYSQL_TYPE_FLOAT:
@@ -3899,8 +3905,8 @@ static int get_schema_key_column_usage_record(THD *thd,
show_table->file->get_foreign_key_list(thd, &f_key_list);
FOREIGN_KEY_INFO *f_key_info;
- List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list);
- while ((f_key_info= it++))
+ List_iterator_fast<FOREIGN_KEY_INFO> fkey_it(f_key_list);
+ while ((f_key_info= fkey_it++))
{
LEX_STRING *f_info;
LEX_STRING *r_info;
@@ -4064,7 +4070,6 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables,
partition_element *part_elem;
List_iterator<partition_element> part_it(part_info->partitions);
uint part_pos= 0, part_id= 0;
- uint no_parts= part_info->no_parts;
restore_record(table, s->default_values);
table->field[1]->store(base_name, strlen(base_name), cs);
@@ -4234,6 +4239,7 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables,
}
+#ifdef NOT_USED
static interval_type get_real_interval_type(interval_type i_type)
{
switch (i_type) {
@@ -4277,6 +4283,8 @@ static interval_type get_real_interval_type(interval_type i_type)
return INTERVAL_SECOND;
}
+#endif
+
/*
Loads an event from mysql.event and copies it's data to a row of
@@ -5077,6 +5085,7 @@ bool get_schema_tables_result(JOIN *join)
if (is_subselect) // is subselect
{
+ table_list->table->file->extra(HA_EXTRA_NO_CACHE);
table_list->table->file->extra(HA_EXTRA_RESET_STATE);
table_list->table->file->delete_all_rows();
free_io_cache(table_list->table);
@@ -5120,7 +5129,6 @@ static my_bool run_hton_fill_schema_files(THD *thd, st_plugin_int *plugin,
int fill_schema_files(THD *thd, TABLE_LIST *tables, COND *cond)
{
- TABLE *table= tables->table;
DBUG_ENTER("fill_schema_files");
struct run_hton_fill_schema_files_args args;
@@ -5178,7 +5186,7 @@ int fill_schema_status(THD *thd, SHOW_VAR *variables,
if (show_type == SHOW_SYS)
{
- show_type= ((sys_var*) value)->type();
+ show_type= ((sys_var*) value)->show_type();
value= (char*) ((sys_var*) value)->value_ptr(thd, OPT_GLOBAL,
&null_lex_str);
}
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index 2aaab83796f..52af2f2dd90 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -607,27 +607,26 @@ skip:
}
/*
-** replace substring with string
-** If wrong parameter or not enough memory, do nothing
+ Replace substring with string
+ If wrong parameter or not enough memory, do nothing
*/
-
bool String::replace(uint32 offset,uint32 arg_length,const String &to)
{
return replace(offset,arg_length,to.ptr(),to.length());
}
bool String::replace(uint32 offset,uint32 arg_length,
- const char *to,uint32 length)
+ const char *to, uint32 to_length)
{
- long diff = (long) length-(long) arg_length;
+ long diff = (long) to_length-(long) arg_length;
if (offset+arg_length <= str_length)
{
if (diff < 0)
{
- if (length)
- memcpy(Ptr+offset,to,length);
- bmove(Ptr+offset+length,Ptr+offset+arg_length,
+ if (to_length)
+ memcpy(Ptr+offset,to,to_length);
+ bmove(Ptr+offset+to_length,Ptr+offset+arg_length,
str_length-offset-arg_length);
}
else
@@ -639,8 +638,8 @@ bool String::replace(uint32 offset,uint32 arg_length,
bmove_upp(Ptr+str_length+diff,Ptr+str_length,
str_length-offset-arg_length);
}
- if (length)
- memcpy(Ptr+offset,to,length);
+ if (to_length)
+ memcpy(Ptr+offset,to,to_length);
}
str_length+=(uint32) diff;
}
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 08c3a4cb60d..128ed749b5f 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -84,7 +84,8 @@ public:
{ /* never called */ }
~String() { free(); }
- inline void set_charset(CHARSET_INFO *charset) { str_charset= charset; }
+ inline void set_charset(CHARSET_INFO *charset_arg)
+ { str_charset= charset_arg; }
inline CHARSET_INFO *charset() const { return str_charset; }
inline uint32 length() const { return str_length;}
inline uint32 alloced_length() const { return Alloced_length;}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index c8dc2f2e942..bbf510a7437 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -598,7 +598,6 @@ bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
static bool init_ddl_log()
{
- bool error= FALSE;
char file_name[FN_REFLEN];
DBUG_ENTER("init_ddl_log");
@@ -2296,14 +2295,14 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
interval= sql_field->interval= typelib(stmt_root,
sql_field->interval_list);
- List_iterator<String> it(sql_field->interval_list);
+ List_iterator<String> int_it(sql_field->interval_list);
String conv, *tmp;
char comma_buf[2];
int comma_length= cs->cset->wc_mb(cs, ',', (uchar*) comma_buf,
(uchar*) comma_buf +
sizeof(comma_buf));
DBUG_ASSERT(comma_length > 0);
- for (uint i= 0; (tmp= it++); i++)
+ for (uint i= 0; (tmp= int_it++); i++)
{
uint lengthsp;
if (String::needs_conversion(tmp->length(), tmp->charset(),
@@ -4037,7 +4036,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
HA_CHECK_OPT *),
int (view_operator_func)(THD *, TABLE_LIST*))
{
- TABLE_LIST *table, *save_next_global, *save_next_local;
+ TABLE_LIST *table;
SELECT_LEX *select= &thd->lex->select_lex;
List<Item> field_list;
Item *item;
@@ -4071,46 +4070,48 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
thd->open_options|= extra_open_options;
table->lock_type= lock_type;
/* open only one table from local list of command */
- save_next_global= table->next_global;
- table->next_global= 0;
- save_next_local= table->next_local;
- table->next_local= 0;
- select->table_list.first= (byte*)table;
- /*
- Time zone tables and SP tables can be add to lex->query_tables list,
- so it have to be prepared.
- TODO: Investigate if we can put extra tables into argument instead of
- using lex->query_tables
- */
- lex->query_tables= table;
- lex->query_tables_last= &table->next_global;
- lex->query_tables_own_last= 0;
- thd->no_warnings_for_error= no_warnings_for_error;
- if (view_operator_func == NULL)
- table->required_type=FRMTYPE_TABLE;
-
- /*
- If we want to perform an admin operation on the log table
- (E.g. rename) and lock_type >= TL_READ_NO_INSERT disable
- log tables
- */
-
- if (check_if_log_table(table->db_length, table->db,
- table->table_name_length,
- table->table_name, 1) &&
- lock_type >= TL_READ_NO_INSERT)
{
- disable_logs= 1;
- logger.lock();
- logger.tmp_close_log_tables(thd);
- }
+ TABLE_LIST *save_next_global, *save_next_local;
+ save_next_global= table->next_global;
+ table->next_global= 0;
+ save_next_local= table->next_local;
+ table->next_local= 0;
+ select->table_list.first= (byte*)table;
+ /*
+ Time zone tables and SP tables can be add to lex->query_tables list,
+ so it have to be prepared.
+ TODO: Investigate if we can put extra tables into argument instead of
+ using lex->query_tables
+ */
+ lex->query_tables= table;
+ lex->query_tables_last= &table->next_global;
+ lex->query_tables_own_last= 0;
+ thd->no_warnings_for_error= no_warnings_for_error;
+ if (view_operator_func == NULL)
+ table->required_type=FRMTYPE_TABLE;
+
+ /*
+ If we want to perform an admin operation on the log table
+ (E.g. rename) and lock_type >= TL_READ_NO_INSERT disable
+ log tables
+ */
- open_and_lock_tables(thd, table);
- thd->no_warnings_for_error= 0;
- table->next_global= save_next_global;
- table->next_local= save_next_local;
- thd->open_options&= ~extra_open_options;
+ if (check_if_log_table(table->db_length, table->db,
+ table->table_name_length,
+ table->table_name, 1) &&
+ lock_type >= TL_READ_NO_INSERT)
+ {
+ disable_logs= 1;
+ logger.lock();
+ logger.tmp_close_log_tables(thd);
+ }
+ open_and_lock_tables(thd, table);
+ thd->no_warnings_for_error= 0;
+ table->next_global= save_next_global;
+ table->next_local= save_next_local;
+ thd->open_options&= ~extra_open_options;
+ }
if (prepare_func)
{
switch ((*prepare_func)(thd, table, check_opt)) {
@@ -5303,7 +5304,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
ALTER_INFO *alter_info, bool do_send_ok)
{
TABLE *table,*new_table=0;
- int error;
+ int error= 0;
char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN];
char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias;
char index_file[FN_REFLEN], data_file[FN_REFLEN], tablespace[FN_LEN];
@@ -5413,11 +5414,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
VOID(pthread_mutex_lock(&LOCK_open));
if (lock_table_names(thd, table_list))
{
- error= TRUE;
+ error= 1;
goto view_err;
}
- error= FALSE;
if (!do_rename(thd, table_list, new_db, new_name, new_name, 1))
{
if (mysql_bin_log.is_open())
@@ -5544,7 +5544,6 @@ view_err:
{
switch (alter_info->keys_onoff) {
case LEAVE_AS_IS:
- error= 0;
break;
case ENABLE:
/*
@@ -5576,10 +5575,10 @@ view_err:
}
if (error == HA_ERR_WRONG_COMMAND)
{
+ error= 0;
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
table->alias);
- error= 0;
}
VOID(pthread_mutex_lock(&LOCK_open));
@@ -5620,10 +5619,10 @@ view_err:
if (error == HA_ERR_WRONG_COMMAND)
{
+ error= 0;
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
table->alias);
- error= 0;
}
if (!error)
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 3f9058f74c2..df363c3c21c 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -781,7 +781,7 @@ bool Table_triggers_list::prepare_record1_accessors(TABLE *table)
table == (*fld)->table)))
return 1;
(*old_fld)->move_field_offset((my_ptrdiff_t)(table->record[1] -
- table->record[0]));
+ table->record[0]));
}
*old_fld= 0;
@@ -799,8 +799,8 @@ bool Table_triggers_list::prepare_record1_accessors(TABLE *table)
void Table_triggers_list::set_table(TABLE *new_table)
{
- table= new_table;
- for (Field **field= table->triggers->record1_field ; *field ; field++)
+ trigger_table= new_table;
+ for (Field **field= new_table->triggers->record1_field ; *field ; field++)
{
(*field)->table= (*field)->orig_table= new_table;
(*field)->table_name= &new_table->alias;
@@ -1363,7 +1363,8 @@ Table_triggers_list::change_table_name_in_triggers(THD *thd,
It is OK to allocate some memory on table's MEM_ROOT since this
table instance will be thrown out at the end of rename anyway.
*/
- new_def.str= memdup_root(&table->mem_root, buff.ptr(), buff.length());
+ new_def.str= memdup_root(&trigger_table->mem_root, buff.ptr(),
+ buff.length());
new_def.length= buff.length();
on_table_name->str= new_def.str + before_on_len;
on_table_name->length= on_q_table_name_len;
@@ -1541,12 +1542,12 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
if (old_row_is_record1)
{
old_field= record1_field;
- new_field= table->field;
+ new_field= trigger_table->field;
}
else
{
new_field= record1_field;
- old_field= table->field;
+ old_field= trigger_table->field;
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
Security_context *save_ctx;
@@ -1562,7 +1563,8 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
fill_effective_table_privileges(thd,
&subject_table_grants[event][time_type],
- table->s->db.str, table->s->table_name.str);
+ trigger_table->s->db.str,
+ trigger_table->s->table_name.str);
/* Check that the definer has TRIGGER privilege on the subject table. */
@@ -1573,7 +1575,7 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), priv_desc,
thd->security_ctx->priv_user, thd->security_ctx->host_or_ip,
- table->s->table_name.str);
+ trigger_table->s->table_name.str);
sp_restore_security_context(thd, save_ctx);
return TRUE;
@@ -1582,7 +1584,7 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
thd->reset_sub_statement_state(&statement_state, SUB_STMT_TRIGGER);
err_status= sp_trigger->execute_trigger
- (thd, table->s->db.str, table->s->table_name.str,
+ (thd, trigger_table->s->db.str, trigger_table->s->table_name.str,
&subject_table_grants[event][time_type]);
thd->restore_sub_statement_state(&statement_state);
@@ -1623,13 +1625,13 @@ void Table_triggers_list::mark_fields_used(trg_event_type event)
/* We cannot mark fields which does not present in table. */
if (trg_field->field_idx != (uint)-1)
{
- bitmap_set_bit(table->read_set, trg_field->field_idx);
+ bitmap_set_bit(trigger_table->read_set, trg_field->field_idx);
if (trg_field->get_settable_routine_parameter())
- bitmap_set_bit(table->write_set, trg_field->field_idx);
+ bitmap_set_bit(trigger_table->write_set, trg_field->field_idx);
}
}
}
- table->file->column_bitmaps_signal();
+ trigger_table->file->column_bitmaps_signal();
}
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index 3892e964aa7..75dda6be1cf 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -42,8 +42,9 @@ class Table_triggers_list: public Sql_alloc
*/
Field **new_field;
Field **old_field;
+
/* TABLE instance for which this triggers list object was created */
- TABLE *table;
+ TABLE *trigger_table;
/*
Names of triggers.
Should correspond to order of triggers on definitions_list,
@@ -83,7 +84,7 @@ public:
List<LEX_STRING> definers_list;
Table_triggers_list(TABLE *table_arg):
- record1_field(0), table(table_arg)
+ record1_field(0), trigger_table(table_arg)
{
bzero((char *)bodies, sizeof(bodies));
bzero((char *)trigger_fields, sizeof(trigger_fields));
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 985982d48de..9fe20c502d6 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -111,7 +111,7 @@ bool select_union::flush()
*/
bool
-select_union::create_result_table(THD *thd, List<Item> *column_types,
+select_union::create_result_table(THD *thd_arg, List<Item> *column_types,
bool is_union_distinct, ulonglong options,
const char *alias)
{
@@ -119,7 +119,7 @@ select_union::create_result_table(THD *thd, List<Item> *column_types,
tmp_table_param.init();
tmp_table_param.field_count= column_types->elements;
- if (! (table= create_tmp_table(thd, &tmp_table_param, *column_types,
+ if (! (table= create_tmp_table(thd_arg, &tmp_table_param, *column_types,
(ORDER*) 0, is_union_distinct, 1,
options, HA_POS_ERROR, (char*) alias)))
return TRUE;
@@ -141,9 +141,9 @@ select_union::create_result_table(THD *thd, List<Item> *column_types,
*/
void
-st_select_lex_unit::init_prepare_fake_select_lex(THD *thd)
+st_select_lex_unit::init_prepare_fake_select_lex(THD *thd_arg)
{
- thd->lex->current_select= fake_select_lex;
+ thd_arg->lex->current_select= fake_select_lex;
fake_select_lex->table_list.link_in_list((byte *)&result_table_list,
(byte **)
&result_table_list.next_local);
@@ -197,7 +197,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
DBUG_RETURN(FALSE);
}
prepared= 1;
- res= FALSE;
+ saved_error= FALSE;
thd_arg->lex->current_select= sl= first_sl;
found_rows_for_union= first_sl->options & OPTION_FOUND_ROWS;
@@ -238,23 +238,25 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
can_skip_order_by= is_union && !(sl->braces && sl->explicit_limit);
- res= join->prepare(&sl->ref_pointer_array,
- (TABLE_LIST*) sl->table_list.first, sl->with_wild,
- sl->where,
- (can_skip_order_by ? 0 : sl->order_list.elements) +
- sl->group_list.elements,
- can_skip_order_by ?
- (ORDER*) 0 : (ORDER *)sl->order_list.first,
- (ORDER*) sl->group_list.first,
- sl->having,
- (is_union ? (ORDER*) 0 :
- (ORDER*) thd_arg->lex->proc_list.first),
- sl, this);
+ saved_error= join->prepare(&sl->ref_pointer_array,
+ (TABLE_LIST*) sl->table_list.first,
+ sl->with_wild,
+ sl->where,
+ (can_skip_order_by ? 0 :
+ sl->order_list.elements) +
+ sl->group_list.elements,
+ can_skip_order_by ?
+ (ORDER*) 0 : (ORDER *)sl->order_list.first,
+ (ORDER*) sl->group_list.first,
+ sl->having,
+ (is_union ? (ORDER*) 0 :
+ (ORDER*) thd_arg->lex->proc_list.first),
+ sl, this);
/* There are no * in the statement anymore (for PS) */
sl->with_wild= 0;
last_procedure= join->procedure;
- if ((res= (res || thd_arg->is_fatal_error)))
+ if (saved_error || (saved_error= thd_arg->is_fatal_error))
goto err;
/*
Use items list of underlaid select for derived tables to preserve
@@ -349,12 +351,12 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
arena= thd->activate_stmt_arena_if_needed(&backup_arena);
- res= table->fill_item_list(&item_list);
+ saved_error= table->fill_item_list(&item_list);
if (arena)
thd->restore_active_arena(arena, &backup_arena);
- if (res)
+ if (saved_error)
goto err;
if (thd->stmt_arena->is_stmt_prepare())
@@ -373,7 +375,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
fake_select_lex->item_list= item_list;
thd_arg->lex->current_select= fake_select_lex;
- res= fake_select_lex->join->
+ saved_error= fake_select_lex->join->
prepare(&fake_select_lex->ref_pointer_array,
(TABLE_LIST*) fake_select_lex->table_list.first,
0, 0,
@@ -398,7 +400,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
thd_arg->lex->current_select= lex_select_save;
- DBUG_RETURN(res || thd_arg->is_fatal_error);
+ DBUG_RETURN(saved_error || thd_arg->is_fatal_error);
err:
thd_arg->lex->current_select= lex_select_save;
@@ -442,7 +444,7 @@ bool st_select_lex_unit::exec()
thd->lex->current_select= sl;
if (optimized)
- res= sl->join->reinit();
+ saved_error= sl->join->reinit();
else
{
set_limit(sl);
@@ -465,9 +467,9 @@ bool st_select_lex_unit::exec()
sl->join->select_options=
(select_limit_cnt == HA_POS_ERROR || sl->braces) ?
sl->options & ~OPTION_FOUND_ROWS : sl->options | found_rows_for_union;
- res= sl->join->optimize();
+ saved_error= sl->join->optimize();
}
- if (!res)
+ if (!saved_error)
{
records_at_start= table->file->stats.records;
sl->join->exec();
@@ -477,11 +479,11 @@ bool st_select_lex_unit::exec()
DBUG_RETURN(TRUE);
table->no_keyread=1;
}
- res= sl->join->error;
+ saved_error= sl->join->error;
offset_limit_cnt= (ha_rows)(sl->offset_limit ?
sl->offset_limit->val_uint() :
0);
- if (!res)
+ if (!saved_error)
{
examined_rows+= thd->examined_row_count;
if (union_result->flush())
@@ -491,10 +493,10 @@ bool st_select_lex_unit::exec()
}
}
}
- if (res)
+ if (saved_error)
{
thd->lex->current_select= lex_select_save;
- DBUG_RETURN(res);
+ DBUG_RETURN(saved_error);
}
/* Needed for the following test and for records_at_start in next loop */
int error= table->file->info(HA_STATUS_VARIABLE);
@@ -520,7 +522,7 @@ bool st_select_lex_unit::exec()
optimized= 1;
/* Send result to 'result' */
- res= TRUE;
+ saved_error= TRUE;
{
List<Item_func_match> empty_list;
empty_list.empty();
@@ -561,17 +563,17 @@ bool st_select_lex_unit::exec()
}
join->init(thd, item_list, fake_select_lex->options, result);
}
- res= mysql_select(thd, &fake_select_lex->ref_pointer_array,
- &result_table_list,
- 0, item_list, NULL,
- global_parameters->order_list.elements,
- (ORDER*)global_parameters->order_list.first,
- (ORDER*) NULL, NULL, (ORDER*) NULL,
- fake_select_lex->options | SELECT_NO_UNLOCK,
- result, this, fake_select_lex);
+ saved_error= mysql_select(thd, &fake_select_lex->ref_pointer_array,
+ &result_table_list,
+ 0, item_list, NULL,
+ global_parameters->order_list.elements,
+ (ORDER*)global_parameters->order_list.first,
+ (ORDER*) NULL, NULL, (ORDER*) NULL,
+ fake_select_lex->options | SELECT_NO_UNLOCK,
+ result, this, fake_select_lex);
fake_select_lex->table_list.empty();
- if (!res)
+ if (!saved_error)
{
thd->limit_found_rows = (ulonglong)table->file->stats.records + add_rows;
thd->examined_row_count+= examined_rows;
@@ -583,7 +585,7 @@ bool st_select_lex_unit::exec()
}
}
thd->lex->current_select= lex_select_save;
- DBUG_RETURN(res);
+ DBUG_RETURN(saved_error);
}
@@ -660,18 +662,18 @@ void st_select_lex_unit::reinit_exec_mechanism()
TRUE - error
*/
-bool st_select_lex_unit::change_result(select_subselect *result,
+bool st_select_lex_unit::change_result(select_subselect *new_result,
select_subselect *old_result)
{
bool res= FALSE;
for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select())
{
if (sl->join && sl->join->result == old_result)
- if (sl->join->change_result(result))
+ if (sl->join->change_result(new_result))
return TRUE;
}
if (fake_select_lex && fake_select_lex->join)
- res= fake_select_lex->join->change_result(result);
+ res= fake_select_lex->join->change_result(new_result);
return (res);
}
@@ -751,4 +753,3 @@ void st_select_lex::cleanup_all_joins(bool full)
for (sl= unit->first_select(); sl; sl= sl->next_select())
sl->cleanup_all_joins(full);
}
-
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 8881f2ee5ab..baccb3358f7 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -64,7 +64,6 @@ static bool check_fields(THD *thd, List<Item> &items)
List_iterator<Item> it(items);
Item *item;
Item_field *field;
- Name_resolution_context *context= &thd->lex->select_lex.context;
while ((item= it++))
{
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index a57d21ba7b2..eb3de565d9f 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -862,7 +862,8 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
Query_arena *arena, backup;
TABLE_LIST *top_view= table->top_table();
int res;
- bool result;
+ bool result, view_is_mergeable;
+ TABLE_LIST *view_main_select_tables;
DBUG_ENTER("mysql_make_view");
DBUG_PRINT("info", ("table: 0x%lx (%s)", (ulong) table, table->table_name));
@@ -1095,9 +1096,8 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
*/
if (lex->binlog_row_based_if_mixed)
old_lex->binlog_row_based_if_mixed= TRUE;
- bool view_is_mergeable= (table->algorithm != VIEW_ALGORITHM_TMPTABLE &&
- lex->can_be_merged());
- TABLE_LIST *view_main_select_tables;
+ view_is_mergeable= (table->algorithm != VIEW_ALGORITHM_TMPTABLE &&
+ lex->can_be_merged());
LINT_INIT(view_main_select_tables);
if (view_is_mergeable)
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index eb8641f011f..424fd98c4fc 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1222,7 +1222,7 @@ statement:
deallocate:
deallocate_or_drop PREPARE_SYM ident
{
- THD *thd=YYTHD;
+ THD *thd= YYTHD;
LEX *lex= thd->lex;
if (lex->stmt_prepare_mode)
{
@@ -1242,7 +1242,7 @@ deallocate_or_drop:
prepare:
PREPARE_SYM ident FROM prepare_src
{
- THD *thd=YYTHD;
+ THD *thd= YYTHD;
LEX *lex= thd->lex;
if (lex->stmt_prepare_mode)
{
@@ -1256,14 +1256,14 @@ prepare:
prepare_src:
TEXT_STRING_sys
{
- THD *thd=YYTHD;
+ THD *thd= YYTHD;
LEX *lex= thd->lex;
lex->prepared_stmt_code= $1;
lex->prepared_stmt_code_is_varref= FALSE;
}
| '@' ident_or_text
{
- THD *thd=YYTHD;
+ THD *thd= YYTHD;
LEX *lex= thd->lex;
lex->prepared_stmt_code= $2;
lex->prepared_stmt_code_is_varref= TRUE;
@@ -1272,7 +1272,7 @@ prepare_src:
execute:
EXECUTE_SYM ident
{
- THD *thd=YYTHD;
+ THD *thd= YYTHD;
LEX *lex= thd->lex;
if (lex->stmt_prepare_mode)
{
@@ -1433,7 +1433,7 @@ create:
CREATE opt_table_options TABLE_SYM opt_if_not_exists table_ident
{
THD *thd= YYTHD;
- LEX *lex=Lex;
+ LEX *lex= thd->lex;
lex->sql_command= SQLCOM_CREATE_TABLE;
if (!lex->select_lex.add_table_to_list(thd, $5, NULL,
TL_OPTION_UPDATING,
@@ -1811,7 +1811,7 @@ create_function_tail:
RETURNS_SYM udf_type SONAME_SYM TEXT_STRING_sys
{
THD *thd= YYTHD;
- LEX *lex=Lex;
+ LEX *lex= thd->lex;
if (lex->definer != NULL)
{
/*
@@ -1918,7 +1918,7 @@ create_function_tail:
sp_proc_stmt
{
THD *thd= YYTHD;
- LEX *lex= Lex;
+ LEX *lex= thd->lex;
sp_head *sp= lex->sphead;
if (sp->is_not_allowed_in_function("function"))
@@ -3458,8 +3458,8 @@ create2:
create3 {}
| LIKE table_ident
{
- LEX *lex=Lex;
- THD *thd= lex->thd;
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
if (!(lex->like_name= $2))
YYABORT;
if ($2->db.str == NULL &&
@@ -3470,8 +3470,8 @@ create2:
}
| '(' LIKE table_ident ')'
{
- LEX *lex=Lex;
- THD *thd= lex->thd;
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
if (!(lex->like_name= $3))
YYABORT;
if ($3->db.str == NULL &&
@@ -3760,7 +3760,6 @@ part_definition:
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
partition_element *p_elem= new partition_element();
- uint part_id= part_info->partitions.elements;
if (!p_elem || part_info->partitions.push_back(p_elem))
{
@@ -3912,9 +3911,8 @@ part_bit_expr:
bit_expr
{
Item *part_expr= $1;
- int part_expression_ok= 1;
- LEX *lex= Lex;
THD *thd= YYTHD;
+ LEX *lex= thd->lex;
Name_resolution_context *context= &lex->current_select->context;
TABLE_LIST *save_list= context->table_list;
const char *save_where= thd->where;
@@ -4961,8 +4959,8 @@ alter:
}
opt_create_database_options
{
- LEX *lex=Lex;
- THD *thd= Lex->thd;
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
lex->sql_command=SQLCOM_ALTER_DB;
lex->name= $3;
if (lex->name.str == NULL &&
@@ -5093,9 +5091,9 @@ alter:
| ALTER SERVER_SYM ident_or_text OPTIONS_SYM '(' server_options_list ')'
{
LEX *lex= Lex;
- Lex->sql_command= SQLCOM_ALTER_SERVER;
- Lex->server_options.server_name= $3.str;
- Lex->server_options.server_name_length= $3.length;
+ lex->sql_command= SQLCOM_ALTER_SERVER;
+ lex->server_options.server_name= $3.str;
+ lex->server_options.server_name_length= $3.length;
}
;
@@ -5399,8 +5397,8 @@ alter_list_item:
}
| RENAME opt_to table_ident
{
- LEX *lex=Lex;
- THD *thd= lex->thd;
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
uint dummy;
lex->select_lex.db=$3->db.str;
if (lex->select_lex.db == NULL &&
@@ -5767,9 +5765,9 @@ db_to_db:
ident TO_SYM ident
{
LEX *lex=Lex;
- if (Lex->db_list.push_back((LEX_STRING*)
+ if (lex->db_list.push_back((LEX_STRING*)
sql_memdup(&$1, sizeof(LEX_STRING))) ||
- Lex->db_list.push_back((LEX_STRING*)
+ lex->db_list.push_back((LEX_STRING*)
sql_memdup(&$3, sizeof(LEX_STRING))))
YYABORT;
};
@@ -6695,7 +6693,6 @@ function_call_generic:
udf_expr_list ')'
{
THD *thd= YYTHD;
- LEX *lex= Lex;
Create_func *builder;
Item *item= NULL;
@@ -6718,7 +6715,6 @@ function_call_generic:
#ifdef HAVE_DLOPEN
/* Retrieving the result of find_udf */
udf_func *udf= $<udf>3;
- LEX *lex= Lex;
if (udf)
{
@@ -7112,15 +7108,14 @@ join_table:
| table_ref normal_join table_ref
USING
{
- SELECT_LEX *sel= Select;
YYERROR_UNLESS($1 && $3);
}
'(' using_list ')'
- { add_join_natural($1,$3,$7); $$=$3; }
+ { add_join_natural($1,$3,$7,Select); $$=$3; }
| table_ref NATURAL JOIN_SYM table_factor
{
YYERROR_UNLESS($1 && ($$=$4));
- add_join_natural($1,$4,NULL);
+ add_join_natural($1,$4,NULL,Select);
}
/* LEFT JOIN variants */
@@ -7143,15 +7138,18 @@ join_table:
}
| table_ref LEFT opt_outer JOIN_SYM table_factor
{
- SELECT_LEX *sel= Select;
YYERROR_UNLESS($1 && $5);
}
USING '(' using_list ')'
- { add_join_natural($1,$5,$9); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; }
+ {
+ add_join_natural($1,$5,$9,Select);
+ $5->outer_join|=JOIN_TYPE_LEFT;
+ $$=$5;
+ }
| table_ref NATURAL LEFT opt_outer JOIN_SYM table_factor
{
YYERROR_UNLESS($1 && $6);
- add_join_natural($1,$6,NULL);
+ add_join_natural($1,$6,NULL,Select);
$6->outer_join|=JOIN_TYPE_LEFT;
$$=$6;
}
@@ -7177,7 +7175,6 @@ join_table:
}
| table_ref RIGHT opt_outer JOIN_SYM table_factor
{
- SELECT_LEX *sel= Select;
YYERROR_UNLESS($1 && $5);
}
USING '(' using_list ')'
@@ -7185,12 +7182,12 @@ join_table:
LEX *lex= Lex;
if (!($$= lex->current_select->convert_right_join()))
YYABORT;
- add_join_natural($$,$5,$9);
+ add_join_natural($$,$5,$9,Select);
}
| table_ref NATURAL RIGHT opt_outer JOIN_SYM table_factor
{
YYERROR_UNLESS($1 && $6);
- add_join_natural($6,$1,NULL);
+ add_join_natural($6,$1,NULL,Select);
LEX *lex= Lex;
if (!($$= lex->current_select->convert_right_join()))
YYABORT;
@@ -9128,7 +9125,7 @@ text_string:
param_marker:
PARAM_MARKER
{
- THD *thd=YYTHD;
+ THD *thd= YYTHD;
LEX *lex= thd->lex;
Item_param *item;
if (! lex->parsing_options.allows_variable)
@@ -10116,7 +10113,7 @@ option_value:
| charset old_or_new_charset_name_or_default
{
THD *thd= YYTHD;
- LEX *lex= Lex;
+ LEX *lex= thd->lex;
$2= $2 ? $2: global_system_variables.character_set_client;
lex->var_list.push_back(new set_var_collation_client($2,thd->variables.collation_database,$2));
}
@@ -10150,9 +10147,9 @@ option_value:
}
| PASSWORD equal text_or_password
{
- THD *thd=YYTHD;
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
LEX_USER *user;
- LEX *lex= Lex;
sp_pcontext *spc= lex->spcont;
LEX_STRING pw;
@@ -10633,8 +10630,8 @@ require_list_element:
grant_ident:
'*'
{
- LEX *lex= Lex;
- THD *thd= lex->thd;
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
uint dummy;
if (thd->copy_db_to(&lex->current_select->db, &dummy))
YYABORT;
@@ -10997,7 +10994,6 @@ subselect:
}
| '(' subselect_start subselect ')'
{
- LEX *lex= Lex;
THD *thd= YYTHD;
/*
note that a local variable can't be used for
@@ -11197,7 +11193,7 @@ view_select:
view_select_aux:
SELECT_SYM remember_name select_init2
{
- THD *thd=YYTHD;
+ THD *thd= YYTHD;
LEX *lex= thd->lex;
char *stmt_beg= (lex->sphead ?
(char *)lex->sphead->m_tmp_query :
@@ -11206,7 +11202,7 @@ view_select_aux:
}
| '(' remember_name select_paren ')' union_opt
{
- THD *thd=YYTHD;
+ THD *thd= YYTHD;
LEX *lex= thd->lex;
char *stmt_beg= (lex->sphead ?
(char *)lex->sphead->m_tmp_query :
diff --git a/sql/table.cc b/sql/table.cc
index cf2eb1705a5..fca9711115c 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1237,17 +1237,17 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
if (share->blob_fields)
{
Field **ptr;
- uint i, *save;
+ uint k, *save;
/* Store offsets to blob fields to find them fast */
if (!(share->blob_field= save=
(uint*) alloc_root(&share->mem_root,
(uint) (share->blob_fields* sizeof(uint)))))
goto err;
- for (i=0, ptr= share->field ; *ptr ; ptr++, i++)
+ for (k=0, ptr= share->field ; *ptr ; ptr++, k++)
{
if ((*ptr)->flags & BLOB_FLAG)
- (*save++)= i;
+ (*save++)= k;
}
}
@@ -1263,7 +1263,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
share->column_bitmap_size= bitmap_buffer_size(share->fields);
if (!(bitmaps= (my_bitmap_map*) alloc_root(&share->mem_root,
- share->column_bitmap_size)))
+ share->column_bitmap_size)))
goto err;
bitmap_init(&share->all_set, bitmaps, share->fields, FALSE);
bitmap_set_all(&share->all_set);
@@ -2972,19 +2972,17 @@ int st_table_list::view_check_option(THD *thd, bool ignore_failure)
{
if (check_option && check_option->val_int() == 0)
{
- TABLE_LIST *view= top_table();
+ TABLE_LIST *main_view= top_table();
if (ignore_failure)
{
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
ER_VIEW_CHECK_FAILED, ER(ER_VIEW_CHECK_FAILED),
- view->view_db.str, view->view_name.str);
+ main_view->view_db.str, main_view->view_name.str);
return(VIEW_CHECK_SKIP);
}
- else
- {
- my_error(ER_VIEW_CHECK_FAILED, MYF(0), view->view_db.str, view->view_name.str);
- return(VIEW_CHECK_ERROR);
- }
+ my_error(ER_VIEW_CHECK_FAILED, MYF(0), main_view->view_db.str,
+ main_view->view_name.str);
+ return(VIEW_CHECK_ERROR);
}
return(VIEW_CHECK_OK);
}
@@ -2996,19 +2994,20 @@ int st_table_list::view_check_option(THD *thd, bool ignore_failure)
SYNOPSIS
st_table_list::check_single_table()
- table reference on variable where to store found table
+ table_arg reference on variable where to store found table
(should be 0 on call, to find table, or point to table for
unique test)
map bit mask of tables
- view view for which we are looking table
+ view_arg view for which we are looking table
RETURN
FALSE table not found or found only one
TRUE found several tables
*/
-bool st_table_list::check_single_table(st_table_list **table, table_map map,
- st_table_list *view)
+bool st_table_list::check_single_table(st_table_list **table_arg,
+ table_map map,
+ st_table_list *view_arg)
{
for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local)
{
@@ -3016,13 +3015,13 @@ bool st_table_list::check_single_table(st_table_list **table, table_map map,
{
if (tbl->table->map & map)
{
- if (*table)
+ if (*table_arg)
return TRUE;
- *table= tbl;
- tbl->check_option= view->check_option;
+ *table_arg= tbl;
+ tbl->check_option= view_arg->check_option;
}
}
- else if (tbl->check_single_table(table, map, view))
+ else if (tbl->check_single_table(table_arg, map, view_arg))
return TRUE;
}
return FALSE;
@@ -3347,18 +3346,19 @@ bool st_table_list::prepare_security(THD *thd)
while ((tbl= tb++))
{
DBUG_ASSERT(tbl->referencing_view);
- char *db, *table_name;
+ char *local_db, *local_table_name;
if (tbl->view)
{
- db= tbl->view_db.str;
- table_name= tbl->view_name.str;
+ local_db= tbl->view_db.str;
+ local_table_name= tbl->view_name.str;
}
else
{
- db= tbl->db;
- table_name= tbl->table_name;
+ local_db= tbl->db;
+ local_table_name= tbl->table_name;
}
- fill_effective_table_privileges(thd, &tbl->grant, db, table_name);
+ fill_effective_table_privileges(thd, &tbl->grant, local_db,
+ local_table_name);
if (tbl->table)
tbl->table->grant= grant;
}
@@ -3430,6 +3430,7 @@ Field *Natural_join_column::field()
const char *Natural_join_column::table_name()
{
+ DBUG_ASSERT(table_ref);
return table_ref->alias;
}
@@ -3717,13 +3718,13 @@ Field_iterator_table_ref::get_or_create_column_ref(TABLE_LIST *parent_table_ref)
uint field_count;
TABLE_LIST *add_table_ref= parent_table_ref ?
parent_table_ref : table_ref;
-
LINT_INIT(field_count);
+
if (field_it == &table_field_it)
{
/* The field belongs to a stored table. */
- Field *field= table_field_it.field();
- nj_col= new Natural_join_column(field, table_ref);
+ Field *tmp_field= table_field_it.field();
+ nj_col= new Natural_join_column(tmp_field, table_ref);
field_count= table_ref->table->s->fields;
}
else if (field_it == &view_field_it)
@@ -4088,16 +4089,16 @@ void st_table_list::reinit_before_use(THD *thd)
is_schema_table_processed= FALSE;
TABLE_LIST *embedded; /* The table at the current level of nesting. */
- TABLE_LIST *embedding= this; /* The parent nested table reference. */
+ TABLE_LIST *parent_embedding= this; /* The parent nested table reference. */
do
{
- embedded= embedding;
+ embedded= parent_embedding;
if (embedded->prep_on_expr)
embedded->on_expr= embedded->prep_on_expr->copy_andor_structure(thd);
- embedding= embedded->embedding;
+ parent_embedding= embedded->embedding;
}
- while (embedding &&
- embedding->nested_join->join_list.head() == embedded);
+ while (parent_embedding &&
+ parent_embedding->nested_join->join_list.head() == embedded);
}
/*
diff --git a/sql/table.h b/sql/table.h
index 82083d79570..2923eb1db7b 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -158,7 +158,12 @@ typedef struct st_table_share
LEX_STRING path; /* Path to .frm file (from datadir) */
LEX_STRING normalized_path; /* unpack_filename(path) */
LEX_STRING connect_string;
- key_map keys_in_use; /* Keys in use for table */
+
+ /*
+ Set of keys in use, implemented as a Bitmap.
+ Excludes keys disabled by ALTER TABLE ... DISABLE KEYS.
+ */
+ key_map keys_in_use;
key_map keys_for_keyread;
ha_rows min_rows, max_rows; /* create information */
ulong avg_row_length; /* create information */
@@ -313,7 +318,21 @@ struct st_table {
byte *write_row_record; /* Used as optimisation in
THD::write_row */
byte *insert_values; /* used by INSERT ... UPDATE */
- key_map quick_keys, used_keys, keys_in_use_for_query, merge_keys;
+ key_map quick_keys, used_keys;
+
+ /*
+ A set of keys that can be used in the query that references this
+ table.
+
+ All indexes disabled on the table's TABLE_SHARE (see TABLE::s) will be
+ subtracted from this set upon instantiation. Thus for any TABLE t it holds
+ that t.keys_in_use_for_query is a subset of t.s.keys_in_use. Generally we
+ must not introduce any new keys here (see setup_tables).
+
+ The set is implemented as a bitmap.
+ */
+ key_map keys_in_use_for_query;
+ key_map merge_keys;
KEY *key_info; /* data of keys in database */
Field *next_number_field; /* Set if next_number is activated */
diff --git a/sql/tztime.cc b/sql/tztime.cc
index fe91aa71272..2cdc863565a 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1769,8 +1769,8 @@ end_with_setting_default_tz:
/* If we have default time zone try to load it */
if (default_tzname)
{
- String tmp_tzname(default_tzname, &my_charset_latin1);
- if (!(global_system_variables.time_zone= my_tz_find(&tmp_tzname, tables)))
+ String tmp_tzname2(default_tzname, &my_charset_latin1);
+ if (!(global_system_variables.time_zone= my_tz_find(&tmp_tzname2, tables)))
{
sql_print_error("Fatal error: Illegal or unknown default time zone '%s'",
default_tzname);
diff --git a/sql/unireg.cc b/sql/unireg.cc
index b1c29c885d9..5faacb02d5f 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -82,7 +82,7 @@ bool mysql_create_frm(THD *thd, const char *file_name,
uchar fileinfo[64],forminfo[288],*keybuff;
TYPELIB formnames;
uchar *screen_buff;
- char buff[32];
+ char buff[128];
#ifdef WITH_PARTITION_STORAGE_ENGINE
partition_info *part_info= thd->work_part_info;
#endif
@@ -175,7 +175,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
create_info->comment.length, 60);
if (tmp_len < create_info->comment.length)
{
- char buff[128];
(void) my_snprintf(buff, sizeof(buff), "Too long comment for table '%s'",
table);
if ((thd->variables.sql_mode &
@@ -549,11 +548,11 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
create_field *field;
while ((field=it++))
{
-
uint tmp_len= system_charset_info->cset->charpos(system_charset_info,
field->comment.str,
field->comment.str +
- field->comment.length, 255);
+ field->comment.length,
+ 255);
if (tmp_len < field->comment.length)
{
char buff[128];
@@ -622,8 +621,9 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
for (uint pos= 0; pos < field->interval->count; pos++)
{
char *dst;
- uint length= field->save_interval->type_lengths[pos], hex_length;
const char *src= field->save_interval->type_names[pos];
+ uint hex_length;
+ length= field->save_interval->type_lengths[pos];
hex_length= length * 2;
field->interval->type_lengths[pos]= hex_length;
field->interval->type_names[pos]= dst= sql_alloc(hex_length + 1);