summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorhery.ramilison@oracle.com <>2011-11-17 09:00:58 +0100
committerBuild Team <MYSQL-RE_WW@oracle.com>2011-11-17 09:00:58 +0100
commit85f07c7d7de623b5c9de188e99445fb96212957d (patch)
tree1dd794717469322a1b041143778258b1fc84fbff /sql
parent6c1fa38c50341a7558aa402ba3704926fbe5426a (diff)
parent4ee0f595650f9400887ccdc89f20860fa03417f5 (diff)
downloadmariadb-git-85f07c7d7de623b5c9de188e99445fb96212957d.tar.gz
Merge from mysql-5.5.18-release
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc3
-rw-r--r--sql/ha_partition.cc202
-rw-r--r--sql/handler.cc8
-rw-r--r--sql/handler.h6
-rw-r--r--sql/item.h2
-rw-r--r--sql/item_cmpfunc.cc46
-rw-r--r--sql/item_cmpfunc.h2
-rw-r--r--sql/item_strfunc.cc15
-rw-r--r--sql/my_decimal.h38
-rw-r--r--sql/opt_range.cc12
-rw-r--r--sql/share/errmsg-utf8.txt2
-rw-r--r--sql/sp_head.cc18
-rw-r--r--sql/sql_acl.cc145
-rw-r--r--sql/sql_cache.cc34
-rw-r--r--sql/sql_lex.cc59
-rw-r--r--sql/sql_parse.cc31
-rw-r--r--sql/sql_reload.cc32
-rw-r--r--sql/sql_repl.cc10
-rw-r--r--sql/sql_view.cc38
-rw-r--r--sql/sql_yacc.yy3
20 files changed, 571 insertions, 135 deletions
diff --git a/sql/field.cc b/sql/field.cc
index e1a24e82718..ef66c1ba9bb 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -9159,8 +9159,9 @@ void Create_field::init_for_tmp_table(enum_field_types sql_type_arg,
pack_flag= FIELDFLAG_INTERVAL;
break;
- case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_NEWDECIMAL:
+ DBUG_ASSERT(decimals_arg <= DECIMAL_MAX_SCALE);
+ case MYSQL_TYPE_DECIMAL:
case MYSQL_TYPE_FLOAT:
case MYSQL_TYPE_DOUBLE:
pack_flag= FIELDFLAG_NUMBER |
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 82bd39220a9..fdaa1b0cda6 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -6663,49 +6663,81 @@ bool ha_partition::check_if_incompatible_data(HA_CREATE_INFO *create_info,
/**
+ Helper class for [final_]add_index, see handler.h
+*/
+
+class ha_partition_add_index : public handler_add_index
+{
+public:
+ handler_add_index **add_array;
+ ha_partition_add_index(TABLE* table_arg, KEY* key_info_arg,
+ uint num_of_keys_arg)
+ : handler_add_index(table_arg, key_info_arg, num_of_keys_arg)
+ {}
+ ~ha_partition_add_index() {}
+};
+
+
+/**
Support of in-place add/drop index
+
+ @param table_arg Table to add index to
+ @param key_info Struct over the new keys to add
+ @param num_of_keys Number of keys to add
+ @param[out] add Data to be submitted with final_add_index
+
+ @return Operation status
+ @retval 0 Success
+ @retval != 0 Failure (error code returned, and all operations rollbacked)
*/
+
int ha_partition::add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys,
handler_add_index **add)
{
- handler **file;
+ uint i;
int ret= 0;
+ THD *thd= ha_thd();
+ ha_partition_add_index *part_add_index;
DBUG_ENTER("ha_partition::add_index");
- *add= new handler_add_index(table, key_info, num_of_keys);
/*
There has already been a check in fix_partition_func in mysql_alter_table
before this call, which checks for unique/primary key violations of the
partitioning function. So no need for extra check here.
*/
- for (file= m_file; *file; file++)
+
+ /*
+ This will be freed at the end of the statement.
+ And destroyed at final_add_index. (Sql_alloc does not free in delete).
+ */
+ part_add_index= new (thd->mem_root)
+ ha_partition_add_index(table_arg, key_info, num_of_keys);
+ if (!part_add_index)
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ part_add_index->add_array= (handler_add_index **)
+ thd->alloc(sizeof(void *) * m_tot_parts);
+ if (!part_add_index->add_array)
{
- handler_add_index *add_index;
- if ((ret= (*file)->add_index(table_arg, key_info, num_of_keys, &add_index)))
- goto err;
- if ((ret= (*file)->final_add_index(add_index, true)))
+ delete part_add_index;
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ }
+
+ for (i= 0; i < m_tot_parts; i++)
+ {
+ if ((ret= m_file[i]->add_index(table_arg, key_info, num_of_keys,
+ &part_add_index->add_array[i])))
goto err;
}
+ *add= part_add_index;
DBUG_RETURN(ret);
err:
- if (file > m_file)
+ /* Rollback all prepared partitions. i - 1 .. 0 */
+ while (i)
{
- uint *key_numbers= (uint*) ha_thd()->alloc(sizeof(uint) * num_of_keys);
- uint old_num_of_keys= table_arg->s->keys;
- uint i;
- /* The newly created keys have the last id's */
- for (i= 0; i < num_of_keys; i++)
- key_numbers[i]= i + old_num_of_keys;
- if (!table_arg->key_info)
- table_arg->key_info= key_info;
- while (--file >= m_file)
- {
- (void) (*file)->prepare_drop_index(table_arg, key_numbers, num_of_keys);
- (void) (*file)->final_drop_index(table_arg);
- }
- if (table_arg->key_info == key_info)
- table_arg->key_info= NULL;
+ i--;
+ (void) m_file[i]->final_add_index(part_add_index->add_array[i], false);
}
+ delete part_add_index;
DBUG_RETURN(ret);
}
@@ -6713,37 +6745,119 @@ err:
/**
Second phase of in-place add index.
+ @param add Info from add_index
+ @param commit Should we commit or rollback the add_index operation
+
+ @return Operation status
+ @retval 0 Success
+ @retval != 0 Failure (error code returned)
+
@note If commit is false, index changes are rolled back by dropping the
added indexes. If commit is true, nothing is done as the indexes
were already made active in ::add_index()
- */
+*/
int ha_partition::final_add_index(handler_add_index *add, bool commit)
{
+ ha_partition_add_index *part_add_index;
+ uint i;
+ int ret= 0;
+
DBUG_ENTER("ha_partition::final_add_index");
- // Rollback by dropping indexes.
- if (!commit)
- {
- TABLE *table_arg= add->table;
- uint num_of_keys= add->num_of_keys;
- handler **file;
- uint *key_numbers= (uint*) ha_thd()->alloc(sizeof(uint) * num_of_keys);
- uint old_num_of_keys= table_arg->s->keys;
- uint i;
- /* The newly created keys have the last id's */
- for (i= 0; i < num_of_keys; i++)
- key_numbers[i]= i + old_num_of_keys;
- if (!table_arg->key_info)
- table_arg->key_info= add->key_info;
- for (file= m_file; *file; file++)
+
+ if (!add)
+ {
+ DBUG_ASSERT(!commit);
+ DBUG_RETURN(0);
+ }
+ part_add_index= static_cast<class ha_partition_add_index*>(add);
+
+ for (i= 0; i < m_tot_parts; i++)
+ {
+ if ((ret= m_file[i]->final_add_index(part_add_index->add_array[i], commit)))
+ goto err;
+ DBUG_EXECUTE_IF("ha_partition_fail_final_add_index", {
+ /* Simulate a failure by rollback the second partition */
+ if (m_tot_parts > 1)
+ {
+ i++;
+ m_file[i]->final_add_index(part_add_index->add_array[i], false);
+ /* Set an error that is specific to ha_partition. */
+ ret= HA_ERR_NO_PARTITION_FOUND;
+ goto err;
+ }
+ });
+ }
+ delete part_add_index;
+ DBUG_RETURN(ret);
+err:
+ uint j;
+ uint *key_numbers= NULL;
+ KEY *old_key_info= NULL;
+ uint num_of_keys= 0;
+ int error;
+
+ /* How could this happen? Needed to create a covering test case :) */
+ DBUG_ASSERT(ret == HA_ERR_NO_PARTITION_FOUND);
+
+ if (i > 0)
+ {
+ num_of_keys= part_add_index->num_of_keys;
+ key_numbers= (uint*) ha_thd()->alloc(sizeof(uint) * num_of_keys);
+ if (!key_numbers)
+ {
+ sql_print_error("Failed with error handling of adding index:\n"
+ "committing index failed, and when trying to revert "
+ "already committed partitions we failed allocating\n"
+ "memory for the index for table '%s'",
+ table_share->table_name.str);
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ }
+ old_key_info= table->key_info;
+ /*
+ Use the newly added key_info as table->key_info to remove them.
+ Note that this requires the subhandlers to use name lookup of the
+ index. They must use given table->key_info[key_number], they cannot
+ use their local view of the keys, since table->key_info only include
+ the indexes to be removed here.
+ */
+ for (j= 0; j < num_of_keys; j++)
+ key_numbers[j]= j;
+ table->key_info= part_add_index->key_info;
+ }
+
+ for (j= 0; j < m_tot_parts; j++)
+ {
+ if (j < i)
{
- (void) (*file)->prepare_drop_index(table_arg, key_numbers, num_of_keys);
- (void) (*file)->final_drop_index(table_arg);
+ /* Remove the newly added index */
+ error= m_file[j]->prepare_drop_index(table, key_numbers, num_of_keys);
+ if (error || m_file[j]->final_drop_index(table))
+ {
+ sql_print_error("Failed with error handling of adding index:\n"
+ "committing index failed, and when trying to revert "
+ "already committed partitions we failed removing\n"
+ "the index for table '%s' partition nr %d",
+ table_share->table_name.str, j);
+ }
+ }
+ else if (j > i)
+ {
+ /* Rollback non finished partitions */
+ if (m_file[j]->final_add_index(part_add_index->add_array[j], false))
+ {
+ /* How could this happen? */
+ sql_print_error("Failed with error handling of adding index:\n"
+ "Rollback of add_index failed for table\n"
+ "'%s' partition nr %d",
+ table_share->table_name.str, j);
+ }
}
- if (table_arg->key_info == add->key_info)
- table_arg->key_info= NULL;
}
- DBUG_RETURN(0);
+ if (i > 0)
+ table->key_info= old_key_info;
+ delete part_add_index;
+ DBUG_RETURN(ret);
}
int ha_partition::prepare_drop_index(TABLE *table_arg, uint *key_num,
diff --git a/sql/handler.cc b/sql/handler.cc
index 6f7cf2c3456..b6df46ed48d 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -2738,7 +2738,13 @@ void handler::print_error(int error, myf errflag)
char key[MAX_KEY_LENGTH];
String str(key,sizeof(key),system_charset_info);
/* Table is opened and defined at this point */
- key_unpack(&str,table,(uint) key_nr);
+ key_unpack(&str,table,0 /* Use 0 instead of key_nr because key_nr
+ is a key number in the child FK table, not in our 'table'. See
+ Bug#12661768 UPDATE IGNORE CRASHES SERVER IF TABLE IS INNODB
+ AND IT IS PARENT FOR OTHER ONE
+ This bug gets a better fix in MySQL 5.6, but it is too risky
+ to get that in 5.1 and 5.5 (extending the handler interface
+ and adding new error message codes */);
max_length= (MYSQL_ERRMSG_SIZE-
(uint) strlen(ER(ER_FOREIGN_DUPLICATE_KEY)));
if (str.length() >= max_length)
diff --git a/sql/handler.h b/sql/handler.h
index d1222cb56a6..cbdfc231ed5 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1163,10 +1163,12 @@ uint calculate_key_len(TABLE *, uint, const uchar *, key_part_map);
/**
Index creation context.
- Created by handler::add_index() and freed by handler::final_add_index().
+ Created by handler::add_index() and destroyed by handler::final_add_index().
+ And finally freed at the end of the statement.
+ (Sql_alloc does not free in delete).
*/
-class handler_add_index
+class handler_add_index : public Sql_alloc
{
public:
/* Table where the indexes are added */
diff --git a/sql/item.h b/sql/item.h
index 5a40aaf2a93..223a519fa33 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -1371,7 +1371,7 @@ class Item_splocal :public Item_sp_variable,
enum_field_types m_field_type;
public:
/*
- Is this variable a parameter in LIMIT clause.
+ If this variable is a parameter in LIMIT clause.
Used only during NAME_CONST substitution, to not append
NAME_CONST to the resulting query and thus not break
the slave.
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index e28221109d9..f30b6adcb93 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -2625,37 +2625,43 @@ Item_func_if::fix_fields(THD *thd, Item **ref)
}
-void
-Item_func_if::fix_length_and_dec()
+void Item_func_if::cache_type_info(Item *source)
{
- maybe_null=args[1]->maybe_null || args[2]->maybe_null;
- decimals= max(args[1]->decimals, args[2]->decimals);
- unsigned_flag=args[1]->unsigned_flag && args[2]->unsigned_flag;
+ collation.set(source->collation);
+ cached_field_type= source->field_type();
+ cached_result_type= source->result_type();
+ decimals= source->decimals;
+ max_length= source->max_length;
+ maybe_null= source->maybe_null;
+ unsigned_flag= source->unsigned_flag;
+}
- enum Item_result arg1_type=args[1]->result_type();
- enum Item_result arg2_type=args[2]->result_type();
- bool null1=args[1]->const_item() && args[1]->null_value;
- bool null2=args[2]->const_item() && args[2]->null_value;
- if (null1)
+void
+Item_func_if::fix_length_and_dec()
+{
+ // Let IF(cond, expr, NULL) and IF(cond, NULL, expr) inherit type from expr.
+ if (args[1]->type() == NULL_ITEM)
{
- cached_result_type= arg2_type;
- collation.set(args[2]->collation);
- cached_field_type= args[2]->field_type();
- max_length= args[2]->max_length;
+ cache_type_info(args[2]);
+ maybe_null= true;
+ // If both arguments are NULL, make resulting type BINARY(0).
+ if (args[2]->type() == NULL_ITEM)
+ cached_field_type= MYSQL_TYPE_STRING;
return;
}
-
- if (null2)
+ if (args[2]->type() == NULL_ITEM)
{
- cached_result_type= arg1_type;
- collation.set(args[1]->collation);
- cached_field_type= args[1]->field_type();
- max_length= args[1]->max_length;
+ cache_type_info(args[1]);
+ maybe_null= true;
return;
}
agg_result_type(&cached_result_type, args + 1, 2);
+ maybe_null= args[1]->maybe_null || args[2]->maybe_null;
+ decimals= max(args[1]->decimals, args[2]->decimals);
+ unsigned_flag=args[1]->unsigned_flag && args[2]->unsigned_flag;
+
if (cached_result_type == STRING_RESULT)
{
if (agg_arg_charsets_for_string_result(collation, args + 1, 2))
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index b6928d63ea0..a735f01fb39 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -750,6 +750,8 @@ public:
void fix_length_and_dec();
uint decimal_precision() const;
const char *func_name() const { return "if"; }
+private:
+ void cache_type_info(Item *source);
};
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 55087879b98..a0fdb3cf811 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -2952,9 +2952,12 @@ String *Item_func_conv::val_str(String *str)
from_base, &endptr, &err);
}
- ptr= longlong2str(dec, ans, to_base);
- if (str->copy(ans, (uint32) (ptr-ans), default_charset()))
- return make_empty_result();
+ if (!(ptr= longlong2str(dec, ans, to_base)) ||
+ str->copy(ans, (uint32) (ptr - ans), default_charset()))
+ {
+ null_value= 1;
+ return NULL;
+ }
return str;
}
@@ -3113,8 +3116,10 @@ String *Item_func_hex::val_str_ascii(String *str)
if ((null_value= args[0]->null_value))
return 0;
- ptr= longlong2str(dec,ans,16);
- if (str->copy(ans,(uint32) (ptr-ans), &my_charset_numeric))
+
+ if (!(ptr= longlong2str(dec, ans, 16)) ||
+ str->copy(ans,(uint32) (ptr - ans),
+ &my_charset_numeric))
return make_empty_result(); // End of memory
return str;
}
diff --git a/sql/my_decimal.h b/sql/my_decimal.h
index 548d5ea3a53..64afb9a096e 100644
--- a/sql/my_decimal.h
+++ b/sql/my_decimal.h
@@ -98,12 +98,31 @@ inline int my_decimal_int_part(uint precision, uint decimals)
class my_decimal :public decimal_t
{
+ /*
+ Several of the routines in strings/decimal.c have had buffer
+ overrun/underrun problems. These are *not* caught by valgrind.
+ To catch them, we allocate dummy fields around the buffer,
+ and test that their values do not change.
+ */
+#if !defined(DBUG_OFF)
+ int foo1;
+#endif
+
decimal_digit_t buffer[DECIMAL_BUFF_LENGTH];
+#if !defined(DBUG_OFF)
+ int foo2;
+ static const int test_value= 123;
+#endif
+
public:
my_decimal(const my_decimal &rhs) : decimal_t(rhs)
{
+#if !defined(DBUG_OFF)
+ foo1= test_value;
+ foo2= test_value;
+#endif
for (uint i= 0; i < DECIMAL_BUFF_LENGTH; i++)
buffer[i]= rhs.buffer[i];
fix_buffer_pointer();
@@ -111,6 +130,10 @@ public:
my_decimal& operator=(const my_decimal &rhs)
{
+#if !defined(DBUG_OFF)
+ foo1= test_value;
+ foo2= test_value;
+#endif
if (this == &rhs)
return *this;
decimal_t::operator=(rhs);
@@ -122,6 +145,10 @@ public:
void init()
{
+#if !defined(DBUG_OFF)
+ foo1= test_value;
+ foo2= test_value;
+#endif
len= DECIMAL_BUFF_LENGTH;
buf= buffer;
}
@@ -130,6 +157,17 @@ public:
{
init();
}
+ ~my_decimal()
+ {
+ sanity_check();
+ }
+
+ void sanity_check()
+ {
+ DBUG_ASSERT(foo1 == test_value);
+ DBUG_ASSERT(foo2 == test_value);
+ }
+
void fix_buffer_pointer() { buf= buffer; }
bool sign() const { return decimal_t::sign; }
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 730024f4389..8a6607cf343 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -9318,6 +9318,11 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
except MIN and MAX. For queries with DISTINCT, aggregate functions
are allowed.
SA5. The select list in DISTINCT queries should not contain expressions.
+ SA6. Clustered index can not be used by GROUP_MIN_MAX quick select
+ for AGG_FUNC(DISTINCT ...) optimization because cursor position is
+ never stored after a unique key lookup in the clustered index and
+ furhter index_next/prev calls can not be used. So loose index scan
+ optimization can not be used in this case.
GA1. If Q has a GROUP BY clause, then GA is a prefix of I. That is, if
G_i = A_j => i = j.
GA2. If Q has a DISTINCT clause, then there is a permutation of SA that
@@ -9804,6 +9809,13 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
Field::itMBR : Field::itRAW))
DBUG_RETURN(NULL);
+ /*
+ Check (SA6) if clustered key is used
+ */
+ if (is_agg_distinct && index == table->s->primary_key &&
+ table->file->primary_key_is_clustered())
+ DBUG_RETURN(NULL);
+
/* The query passes all tests, so construct a new TRP object. */
read_plan= new (param->mem_root)
TRP_GROUP_MIN_MAX(have_min, have_max, is_agg_distinct,
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index ec867b43ff1..5507b633f8f 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -6371,7 +6371,7 @@ ER_DATA_OUT_OF_RANGE 22003
eng "%s value is out of range in '%s'"
ER_WRONG_SPVAR_TYPE_IN_LIMIT
- eng "A variable of a non-integer type in LIMIT clause"
+ eng "A variable of a non-integer based type in LIMIT clause"
ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE
eng "Mixing self-logging and non-self-logging engines in a statement is unsafe."
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index ce713504a38..eb29590b700 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -1005,6 +1005,8 @@ subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str)
if ((*splocal)->limit_clause_param)
{
res|= qbuf.append_ulonglong((*splocal)->val_uint());
+ if (res)
+ break;
continue;
}
@@ -1029,19 +1031,29 @@ subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str)
thd->query_name_consts++;
}
- res|= qbuf.append(cur + prev_pos, query_str->length - prev_pos);
- if (res)
+ if (res ||
+ qbuf.append(cur + prev_pos, query_str->length - prev_pos))
DBUG_RETURN(TRUE);
/*
Allocate additional space at the end of the new query string for the
query_cache_send_result_to_client function.
+
+ The query buffer layout is:
+ buffer :==
+ <statement> The input statement(s)
+ '\0' Terminating null char
+ <length> Length of following current database name (size_t)
+ <db_name> Name of current database
+ <flags> Flags struct
*/
- buf_len= qbuf.length() + thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE + 1;
+ buf_len= qbuf.length() + 1 + sizeof(size_t) + thd->db_length +
+ QUERY_CACHE_FLAGS_SIZE + 1;
if ((pbuf= (char *) alloc_root(thd->mem_root, buf_len)))
{
memcpy(pbuf, qbuf.ptr(), qbuf.length());
pbuf[qbuf.length()]= 0;
+ memcpy(pbuf+qbuf.length()+1, (char *) &thd->db_length, sizeof(size_t));
}
else
DBUG_RETURN(TRUE);
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 3f236dd672f..3438e60683f 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -520,23 +520,9 @@ static uchar* acl_entry_get_key(acl_entry *entry, size_t *length,
#define ACL_KEY_LENGTH (IP_ADDR_STRLEN + 1 + NAME_LEN + \
1 + USERNAME_LENGTH + 1)
-#if defined(HAVE_OPENSSL)
-/*
- Without SSL the handshake consists of one packet. This packet
- has both client capabilities and scrambled password.
- With SSL the handshake might consist of two packets. If the first
- packet (client capabilities) has CLIENT_SSL flag set, we have to
- switch to SSL and read the second packet. The scrambled password
- is in the second packet and client_capabilities field will be ignored.
- Maybe it is better to accept flags other than CLIENT_SSL from the
- second packet?
-*/
-#define SSL_HANDSHAKE_SIZE 2
-#define NORMAL_HANDSHAKE_SIZE 6
-#define MIN_HANDSHAKE_SIZE 2
-#else
-#define MIN_HANDSHAKE_SIZE 6
-#endif /* HAVE_OPENSSL && !EMBEDDED_LIBRARY */
+/** Size of the header fields of an authentication packet. */
+#define AUTH_PACKET_HEADER_SIZE_PROTO_41 32
+#define AUTH_PACKET_HEADER_SIZE_PROTO_40 5
static DYNAMIC_ARRAY acl_hosts, acl_users, acl_dbs, acl_proxy_users;
static MEM_ROOT mem, memex;
@@ -8552,37 +8538,92 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
#ifndef EMBEDDED_LIBRARY
NET *net= mpvio->net;
char *end;
-
+ bool packet_has_required_size= false;
DBUG_ASSERT(mpvio->status == MPVIO_EXT::FAILURE);
- if (pkt_len < MIN_HANDSHAKE_SIZE)
- return packet_error;
-
if (mpvio->connect_errors)
reset_host_errors(mpvio->ip);
- ulong client_capabilities= uint2korr(net->read_pos);
- if (client_capabilities & CLIENT_PROTOCOL_41)
+ uint charset_code= 0;
+ end= (char *)net->read_pos;
+ /*
+ In order to safely scan a head for '\0' string terminators
+ we must keep track of how many bytes remain in the allocated
+ buffer or we might read past the end of the buffer.
+ */
+ size_t bytes_remaining_in_packet= pkt_len;
+
+ /*
+ Peek ahead on the client capability packet and determine which version of
+ the protocol should be used.
+ */
+ if (bytes_remaining_in_packet < 2)
+ return packet_error;
+
+ mpvio->client_capabilities= uint2korr(end);
+
+ /*
+ JConnector only sends server capabilities before starting SSL
+ negotiation. The below code is patch for this.
+ */
+ if (bytes_remaining_in_packet == 4 &&
+ mpvio->client_capabilities & CLIENT_SSL)
{
- client_capabilities|= ((ulong) uint2korr(net->read_pos + 2)) << 16;
- mpvio->max_client_packet_length= uint4korr(net->read_pos + 4);
- DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8]));
- if (mpvio->charset_adapter->init_client_charset((uint) net->read_pos[8]))
+ mpvio->client_capabilities= uint4korr(end);
+ mpvio->max_client_packet_length= 0xfffff;
+ charset_code= default_charset_info->number;
+ if (mpvio->charset_adapter->init_client_charset(charset_code))
return packet_error;
- end= (char*) net->read_pos + 32;
+ goto skip_to_ssl;
}
+
+ if (mpvio->client_capabilities & CLIENT_PROTOCOL_41)
+ packet_has_required_size= bytes_remaining_in_packet >=
+ AUTH_PACKET_HEADER_SIZE_PROTO_41;
else
+ packet_has_required_size= bytes_remaining_in_packet >=
+ AUTH_PACKET_HEADER_SIZE_PROTO_40;
+
+ if (!packet_has_required_size)
+ return packet_error;
+
+ if (mpvio->client_capabilities & CLIENT_PROTOCOL_41)
{
- mpvio->max_client_packet_length= uint3korr(net->read_pos + 2);
- end= (char*) net->read_pos + 5;
+ mpvio->client_capabilities= uint4korr(end);
+ mpvio->max_client_packet_length= uint4korr(end + 4);
+ charset_code= (uint)(uchar)*(end + 8);
+ /*
+ Skip 23 remaining filler bytes which have no particular meaning.
+ */
+ end+= AUTH_PACKET_HEADER_SIZE_PROTO_41;
+ bytes_remaining_in_packet-= AUTH_PACKET_HEADER_SIZE_PROTO_41;
+ }
+ else
+ {
+ mpvio->client_capabilities= uint2korr(end);
+ mpvio->max_client_packet_length= uint3korr(end + 2);
+ end+= AUTH_PACKET_HEADER_SIZE_PROTO_40;
+ bytes_remaining_in_packet-= AUTH_PACKET_HEADER_SIZE_PROTO_40;
+ /**
+ Old clients didn't have their own charset. Instead the assumption
+ was that they used what ever the server used.
+ */
+ charset_code= default_charset_info->number;
}
- /* Disable those bits which are not supported by the client. */
- mpvio->client_capabilities&= client_capabilities;
-
+ DBUG_PRINT("info", ("client_character_set: %u", charset_code));
+ if (mpvio->charset_adapter->init_client_charset(charset_code))
+ return packet_error;
+skip_to_ssl:
#if defined(HAVE_OPENSSL)
DBUG_PRINT("info", ("client capabilities: %lu", mpvio->client_capabilities));
+
+ /*
+ If client requested SSL then we must stop parsing, try to switch to SSL,
+ and wait for the client to send a new handshake packet.
+ The client isn't expected to send any more bytes until SSL is initialized.
+ */
if (mpvio->client_capabilities & CLIENT_SSL)
{
unsigned long errptr;
@@ -8599,18 +8640,42 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
}
DBUG_PRINT("info", ("Reading user information over SSL layer"));
- pkt_len= my_net_read(net);
- if (pkt_len == packet_error || pkt_len < NORMAL_HANDSHAKE_SIZE)
+ if ((pkt_len= my_net_read(net)) == packet_error)
{
DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)",
pkt_len));
return packet_error;
}
- }
-#endif
+ /*
+ A new packet was read and the statistics reflecting the remaining bytes
+ in the packet must be updated.
+ */
+ bytes_remaining_in_packet= pkt_len;
- if (end > (char *)net->read_pos + pkt_len)
- return packet_error;
+ /*
+ After the SSL handshake is performed the client resends the handshake
+ packet but because of legacy reasons we chose not to parse the packet
+ fields a second time and instead only assert the length of the packet.
+ */
+ if (mpvio->client_capabilities & CLIENT_PROTOCOL_41)
+ {
+ packet_has_required_size= bytes_remaining_in_packet >=
+ AUTH_PACKET_HEADER_SIZE_PROTO_41;
+ end= (char *)net->read_pos + AUTH_PACKET_HEADER_SIZE_PROTO_41;
+ bytes_remaining_in_packet -= AUTH_PACKET_HEADER_SIZE_PROTO_41;
+ }
+ else
+ {
+ packet_has_required_size= bytes_remaining_in_packet >=
+ AUTH_PACKET_HEADER_SIZE_PROTO_40;
+ end= (char *)net->read_pos + AUTH_PACKET_HEADER_SIZE_PROTO_40;
+ bytes_remaining_in_packet -= AUTH_PACKET_HEADER_SIZE_PROTO_40;
+ }
+
+ if (!packet_has_required_size)
+ return packet_error;
+ }
+#endif /* HAVE_OPENSSL */
if ((mpvio->client_capabilities & CLIENT_TRANSACTIONS) &&
opt_using_transactions)
@@ -8634,7 +8699,7 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
we must keep track of how many bytes remain in the allocated
buffer or we might read past the end of the buffer.
*/
- size_t bytes_remaining_in_packet= pkt_len - (end - (char *)net->read_pos);
+ bytes_remaining_in_packet= pkt_len - (end - (char *)net->read_pos);
size_t user_len;
char *user= get_string(&end, &bytes_remaining_in_packet, &user_len);
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 47628f1c590..56ce6607c85 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1278,8 +1278,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
/* Key is query + database + flag */
if (thd->db_length)
{
- memcpy(thd->query() + thd->query_length() + 1, thd->db,
- thd->db_length);
+ memcpy(thd->query() + thd->query_length() + 1 + sizeof(size_t),
+ thd->db, thd->db_length);
DBUG_PRINT("qcache", ("database: %s length: %u",
thd->db, (unsigned) thd->db_length));
}
@@ -1288,7 +1288,7 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
DBUG_PRINT("qcache", ("No active database"));
}
tot_length= thd->query_length() + thd->db_length + 1 +
- QUERY_CACHE_FLAGS_SIZE;
+ sizeof(size_t) + QUERY_CACHE_FLAGS_SIZE;
/*
We should only copy structure (don't use it location directly)
because of alignment issue
@@ -1506,7 +1506,29 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
goto err;
}
}
+ {
+ /*
+ We have allocated buffer space (in alloc_query) to hold the
+ SQL statement(s) + the current database name + a flags struct.
+ If the database name has changed during execution, which might
+ happen if there are multiple statements, we need to make
+ sure the new current database has a name with the same length
+ as the previous one.
+ */
+ size_t db_len;
+ memcpy((char *) &db_len, (sql + query_length + 1), sizeof(size_t));
+ if (thd->db_length != db_len)
+ {
+ /*
+ We should probably reallocate the buffer in this case,
+ but for now we just leave it uncached
+ */
+ DBUG_PRINT("qcache",
+ ("Current database has changed since start of query"));
+ goto err;
+ }
+ }
/*
Try to obtain an exclusive lock on the query cache. If the cache is
disabled or if a full cache flush is in progress, the attempt to
@@ -1522,10 +1544,12 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
Query_cache_block *query_block;
- tot_length= query_length + thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE;
+ tot_length= query_length + 1 + sizeof(size_t) +
+ thd->db_length + QUERY_CACHE_FLAGS_SIZE;
+
if (thd->db_length)
{
- memcpy(sql+query_length+1, thd->db, thd->db_length);
+ memcpy(sql + query_length + 1 + sizeof(size_t), thd->db, thd->db_length);
DBUG_PRINT("qcache", ("database: '%s' length: %u",
thd->db, (unsigned)thd->db_length));
}
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index df27362633f..869a5916339 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -2606,7 +2606,47 @@ void st_select_lex_unit::set_limit(st_select_lex *sl)
ulonglong val;
DBUG_ASSERT(! thd->stmt_arena->is_stmt_prepare());
- val= sl->select_limit ? sl->select_limit->val_uint() : HA_POS_ERROR;
+ if (sl->select_limit)
+ {
+ Item *item = sl->select_limit;
+ /*
+ fix_fields() has not been called for sl->select_limit. That's due to the
+ historical reasons -- this item could be only of type Item_int, and
+ Item_int does not require fix_fields(). Thus, fix_fields() was never
+ called for sl->select_limit.
+
+ Some time ago, Item_splocal was also allowed for LIMIT / OFFSET clauses.
+ However, the fix_fields() behavior was not updated, which led to a crash
+ in some cases.
+
+ There is no single place where to call fix_fields() for LIMIT / OFFSET
+ items during the fix-fields-phase. Thus, for the sake of readability,
+ it was decided to do it here, on the evaluation phase (which is a
+ violation of design, but we chose the lesser of two evils).
+
+ We can call fix_fields() here, because sl->select_limit can be of two
+ types only: Item_int and Item_splocal. Item_int::fix_fields() is trivial,
+ and Item_splocal::fix_fields() (or rather Item_sp_variable::fix_fields())
+ has the following specific:
+ 1) it does not affect other items;
+ 2) it does not fail.
+
+ Nevertheless DBUG_ASSERT was added to catch future changes in
+ fix_fields() implementation. Also added runtime check against a result
+ of fix_fields() in order to handle error condition in non-debug build.
+ */
+ bool fix_fields_successful= true;
+ if (!item->fixed)
+ {
+ fix_fields_successful= !item->fix_fields(thd, NULL);
+
+ DBUG_ASSERT(fix_fields_successful);
+ }
+ val= fix_fields_successful ? item->val_uint() : HA_POS_ERROR;
+ }
+ else
+ val= HA_POS_ERROR;
+
select_limit_val= (ha_rows)val;
#ifndef BIG_TABLES
/*
@@ -2616,7 +2656,22 @@ void st_select_lex_unit::set_limit(st_select_lex *sl)
if (val != (ulonglong)select_limit_val)
select_limit_val= HA_POS_ERROR;
#endif
- val= sl->offset_limit ? sl->offset_limit->val_uint() : ULL(0);
+ if (sl->offset_limit)
+ {
+ Item *item = sl->offset_limit;
+ // see comment for sl->select_limit branch.
+ bool fix_fields_successful= true;
+ if (!item->fixed)
+ {
+ fix_fields_successful= !item->fix_fields(thd, NULL);
+
+ DBUG_ASSERT(fix_fields_successful);
+ }
+ val= fix_fields_successful ? item->val_uint() : HA_POS_ERROR;
+ }
+ else
+ val= ULL(0);
+
offset_limit_cnt= (ha_rows)val;
#ifndef BIG_TABLES
/* Check for truncation. */
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index e3da697ec78..32ccb8f2c5f 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -536,6 +536,8 @@ static void handle_bootstrap_impl(THD *thd)
query= (char *) thd->memdup_w_gap(buff, length + 1,
thd->db_length + 1 +
QUERY_CACHE_FLAGS_SIZE);
+ size_t db_len= 0;
+ memcpy(query + length + 1, (char *) &db_len, sizeof(size_t));
thd->set_query_and_id(query, length, thd->charset(), next_query_id());
DBUG_PRINT("query",("%-.4096s",thd->query()));
#if defined(ENABLED_PROFILING)
@@ -1218,6 +1220,14 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
case COM_REFRESH:
{
int not_used;
+
+ /*
+ Initialize thd->lex since it's used in many base functions, such as
+ open_tables(). Otherwise, it remains unitialized and may cause crash
+ during execution of COM_REFRESH.
+ */
+ lex_start(thd);
+
status_var_increment(thd->status_var.com_stat[SQLCOM_FLUSH]);
ulong options= (ulong) (uchar) packet[0];
if (trans_commit_implicit(thd))
@@ -1629,13 +1639,30 @@ bool alloc_query(THD *thd, const char *packet, uint packet_length)
pos--;
packet_length--;
}
- /* We must allocate some extra memory for query cache */
+ /* We must allocate some extra memory for query cache
+
+ The query buffer layout is:
+ buffer :==
+ <statement> The input statement(s)
+ '\0' Terminating null char (1 byte)
+ <length> Length of following current database name (size_t)
+ <db_name> Name of current database
+ <flags> Flags struct
+ */
if (! (query= (char*) thd->memdup_w_gap(packet,
packet_length,
- 1 + thd->db_length +
+ 1 + sizeof(size_t) + thd->db_length +
QUERY_CACHE_FLAGS_SIZE)))
return TRUE;
query[packet_length]= '\0';
+ /*
+ Space to hold the name of the current database is allocated. We
+ also store this length, in case current database is changed during
+ execution. We might need to reallocate the 'query' buffer
+ */
+ char *len_pos = (query + packet_length + 1);
+ memcpy(len_pos, (char *) &thd->db_length, sizeof(size_t));
+
thd->set_query(query, packet_length);
/* Reclaim some memory */
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 089077bb89c..b0665a9ea6b 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -118,7 +118,14 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
if (options & REFRESH_ERROR_LOG)
if (flush_error_log())
+ {
+ /*
+ When flush_error_log() failed, my_error() has not been called.
+ So, we have to do it here to keep the protocol.
+ */
+ my_error(ER_UNKNOWN_ERROR, MYF(0));
result= 1;
+ }
if ((options & REFRESH_SLOW_LOG) && opt_slow_log)
logger.flush_slow_log();
@@ -200,7 +207,13 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
if (close_cached_tables(thd, tables,
((options & REFRESH_FAST) ? FALSE : TRUE),
thd->variables.lock_wait_timeout))
+ {
+ /*
+ NOTE: my_error() has been already called by reopen_tables() within
+ close_cached_tables().
+ */
result= 1;
+ }
if (thd->global_read_lock.make_global_read_lock_block_commit(thd)) // Killed
{
@@ -256,7 +269,13 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
((options & REFRESH_FAST) ? FALSE : TRUE),
(thd ? thd->variables.lock_wait_timeout :
LONG_TIMEOUT)))
+ {
+ /*
+ NOTE: my_error() has been already called by reopen_tables() within
+ close_cached_tables().
+ */
result= 1;
+ }
}
my_dbopt_cleanup();
}
@@ -273,7 +292,8 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
tmp_write_to_binlog= 0;
if (reset_master(thd))
{
- result=1;
+ /* NOTE: my_error() has been already called by reset_master(). */
+ result= 1;
}
}
#endif
@@ -281,7 +301,10 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
if (options & REFRESH_DES_KEY_FILE)
{
if (des_key_file && load_des_key_file(des_key_file))
- result= 1;
+ {
+ /* NOTE: my_error() has been already called by load_des_key_file(). */
+ result= 1;
+ }
}
#endif
#ifdef HAVE_REPLICATION
@@ -290,7 +313,10 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
tmp_write_to_binlog= 0;
mysql_mutex_lock(&LOCK_active_mi);
if (reset_slave(thd, active_mi))
- result=1;
+ {
+ /* NOTE: my_error() has been already called by reset_slave(). */
+ result= 1;
+ }
mysql_mutex_unlock(&LOCK_active_mi);
}
#endif
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index ef7d35ecfaa..00c85d8eb43 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -731,7 +731,15 @@ impossible position";
/Alfranio
*/
if (binlog_can_be_corrupted)
- sql_print_information("The binlog may be corrupted.");
+ {
+ /*
+ Don't try to print out warning messages because this generates
+ erroneous messages in the error log and causes performance
+ problems.
+
+ /Alfranio
+ */
+ }
pos = my_b_tell(&log);
if (RUN_HOOK(binlog_transmit, before_send_event,
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 6051aa028c7..1d26d2b113c 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1281,9 +1281,41 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
if (!table->prelocking_placeholder &&
(old_lex->sql_command == SQLCOM_SELECT && old_lex->describe))
{
- if (check_table_access(thd, SELECT_ACL, view_tables, FALSE,
- UINT_MAX, TRUE) &&
- check_table_access(thd, SHOW_VIEW_ACL, table, FALSE, UINT_MAX, TRUE))
+ /*
+ The user we run EXPLAIN as (either the connected user who issued
+ the EXPLAIN statement, or the definer of a SUID stored routine
+ which contains the EXPLAIN) should have both SHOW_VIEW_ACL and
+ SELECT_ACL on the view being opened as well as on all underlying
+ views since EXPLAIN will disclose their structure. This user also
+ should have SELECT_ACL on all underlying tables of the view since
+ this EXPLAIN will disclose information about the number of rows in it.
+
+ To perform this privilege check we create auxiliary TABLE_LIST object
+ for the view in order a) to avoid trashing "table->grant" member for
+ original table list element, which contents can be important at later
+ stage for column-level privilege checking b) get TABLE_LIST object
+ with "security_ctx" member set to 0, i.e. forcing check_table_access()
+ to use active user's security context.
+
+ There is no need for creating similar copies of TABLE_LIST elements
+ for underlying tables since they just have been constructed and thus
+ have TABLE_LIST::security_ctx == 0 and fresh TABLE_LIST::grant member.
+
+ Finally at this point making sure we have SHOW_VIEW_ACL on the views
+ will suffice as we implicitly require SELECT_ACL anyway.
+ */
+
+ TABLE_LIST view_no_suid;
+ bzero(static_cast<void *>(&view_no_suid), sizeof(TABLE_LIST));
+ view_no_suid.db= table->db;
+ view_no_suid.table_name= table->table_name;
+
+ DBUG_ASSERT(view_tables == NULL || view_tables->security_ctx == NULL);
+
+ if (check_table_access(thd, SELECT_ACL, view_tables,
+ FALSE, UINT_MAX, TRUE) ||
+ check_table_access(thd, SHOW_VIEW_ACL, &view_no_suid,
+ FALSE, UINT_MAX, TRUE))
{
my_message(ER_VIEW_NO_EXPLAIN, ER(ER_VIEW_NO_EXPLAIN), MYF(0));
goto err;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 145c6c86714..87c3ae5b129 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -9965,7 +9965,8 @@ limit_option:
}
splocal->limit_clause_param= TRUE;
$$= splocal;
- } | param_marker
+ }
+ | param_marker
{
((Item_param *) $1)->limit_clause_param= TRUE;
}