summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/examples/ha_archive.cc3
-rw-r--r--sql/field.cc65
-rw-r--r--sql/field.h1
-rw-r--r--sql/ha_innodb.cc63
-rw-r--r--sql/ha_myisam.h1
-rw-r--r--sql/ha_ndbcluster.cc107
-rw-r--r--sql/ha_ndbcluster.h2
-rw-r--r--sql/handler.cc4
-rw-r--r--sql/handler.h1
-rw-r--r--sql/item.h6
-rw-r--r--sql/item_cmpfunc.cc62
-rw-r--r--sql/item_cmpfunc.h24
-rw-r--r--sql/item_func.cc4
-rw-r--r--sql/item_strfunc.cc11
-rw-r--r--sql/item_subselect.cc42
-rw-r--r--sql/item_subselect.h20
-rw-r--r--sql/item_sum.cc15
-rw-r--r--sql/item_sum.h8
-rw-r--r--sql/lock.cc65
-rw-r--r--sql/log.cc14
-rw-r--r--sql/log_event.cc30
-rw-r--r--sql/log_event.h20
-rw-r--r--sql/mysql_priv.h20
-rw-r--r--sql/mysqld.cc92
-rw-r--r--sql/net_serv.cc4
-rw-r--r--sql/opt_range.h2
-rw-r--r--sql/set_var.cc9
-rw-r--r--sql/slave.cc5
-rw-r--r--sql/slave.h2
-rw-r--r--sql/sql_acl.cc4
-rw-r--r--sql/sql_base.cc4
-rw-r--r--sql/sql_class.cc3
-rw-r--r--sql/sql_class.h12
-rw-r--r--sql/sql_db.cc44
-rw-r--r--sql/sql_delete.cc6
-rw-r--r--sql/sql_insert.cc63
-rw-r--r--sql/sql_lex.cc3
-rw-r--r--sql/sql_lex.h2
-rw-r--r--sql/sql_parse.cc165
-rw-r--r--sql/sql_prepare.cc13
-rw-r--r--sql/sql_rename.cc2
-rw-r--r--sql/sql_select.cc7
-rw-r--r--sql/sql_show.cc9
-rw-r--r--sql/sql_table.cc159
-rw-r--r--sql/sql_union.cc2
-rw-r--r--sql/sql_update.cc6
-rw-r--r--sql/sql_yacc.yy31
-rw-r--r--sql/strfunc.cc20
-rw-r--r--sql/table.cc26
-rw-r--r--sql/unireg.cc22
50 files changed, 937 insertions, 368 deletions
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index 6fbfb3f9f9d..771bf91d118 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -528,6 +528,7 @@ error:
int ha_archive::write_row(byte * buf)
{
z_off_t written;
+ Field_blob **field;
DBUG_ENTER("ha_archive::write_row");
statistic_increment(ha_write_count,&LOCK_status);
@@ -543,7 +544,7 @@ int ha_archive::write_row(byte * buf)
We should probably mark the table as damagaged if the record is written
but the blob fails.
*/
- for (Field_blob **field=table->blob_field ; *field ; field++)
+ for (field= table->blob_field ; *field ; field++)
{
char *ptr;
uint32 size= (*field)->get_length();
diff --git a/sql/field.cc b/sql/field.cc
index 72c27b6adf9..b27a319b00e 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1860,9 +1860,9 @@ int Field_long::store(double nr)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
- else if (nr > (double) (ulong) ~0L)
+ else if (nr > (double) UINT_MAX32)
{
- res=(int32) (uint32) ~0L;
+ res= UINT_MAX32;
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
@@ -2145,7 +2145,7 @@ int Field_longlong::store(double nr)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
- else if (nr >= (double) LONGLONG_MAX)
+ else if (nr >= (double) (ulonglong) LONGLONG_MAX)
{
res=(longlong) LONGLONG_MAX;
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
@@ -5529,8 +5529,7 @@ int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs)
}
/* Remove end space */
- while (length > 0 && my_isspace(system_charset_info,from[length-1]))
- length--;
+ length= field_charset->cset->lengthsp(field_charset, from, length);
uint tmp=find_type2(typelib, from, length, field_charset);
if (!tmp)
{
@@ -5632,7 +5631,7 @@ String *Field_enum::val_str(String *val_buffer __attribute__((unused)),
val_ptr->set("", 0, field_charset);
else
val_ptr->set((const char*) typelib->type_names[tmp-1],
- (uint) strlen(typelib->type_names[tmp-1]),
+ typelib->type_lengths[tmp-1],
field_charset);
return val_ptr;
}
@@ -5669,13 +5668,14 @@ void Field_enum::sql_type(String &res) const
res.append("enum(");
bool flag=0;
- for (const char **pos= typelib->type_names; *pos; pos++)
+ uint *len= typelib->type_lengths;
+ for (const char **pos= typelib->type_names; *pos; pos++, len++)
{
uint dummy_errors;
if (flag)
res.append(',');
/* convert to res.charset() == utf8, then quote */
- enum_item.copy(*pos, strlen(*pos), charset(), res.charset(), &dummy_errors);
+ enum_item.copy(*pos, *len, charset(), res.charset(), &dummy_errors);
append_unescaped(&res, enum_item.ptr(), enum_item.length());
flag= 1;
}
@@ -5754,14 +5754,15 @@ String *Field_set::val_str(String *val_buffer,
uint bitnr=0;
val_buffer->length(0);
+ val_buffer->set_charset(field_charset);
while (tmp && bitnr < (uint) typelib->count)
{
if (tmp & 1)
{
if (val_buffer->length())
- val_buffer->append(field_separator);
+ val_buffer->append(&field_separator, 1, &my_charset_latin1);
String str(typelib->type_names[bitnr],
- (uint) strlen(typelib->type_names[bitnr]),
+ typelib->type_lengths[bitnr],
field_charset);
val_buffer->append(str);
}
@@ -5781,13 +5782,14 @@ void Field_set::sql_type(String &res) const
res.append("set(");
bool flag=0;
- for (const char **pos= typelib->type_names; *pos; pos++)
+ uint *len= typelib->type_lengths;
+ for (const char **pos= typelib->type_names; *pos; pos++, len++)
{
uint dummy_errors;
if (flag)
res.append(',');
/* convert to res.charset() == utf8, then quote */
- set_item.copy(*pos, strlen(*pos), charset(), res.charset(), &dummy_errors);
+ set_item.copy(*pos, *len, charset(), res.charset(), &dummy_errors);
append_unescaped(&res, set_item.ptr(), set_item.length());
flag= 1;
}
@@ -5842,25 +5844,24 @@ bool Field_num::eq_def(Field *field)
void create_field::create_length_to_internal_length(void)
{
- switch (sql_type)
- {
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- case MYSQL_TYPE_BLOB:
- case MYSQL_TYPE_VAR_STRING:
- case MYSQL_TYPE_STRING:
- length*= charset->mbmaxlen;
- pack_length= calc_pack_length(sql_type == FIELD_TYPE_VAR_STRING ?
- FIELD_TYPE_STRING : sql_type, length);
- break;
- case MYSQL_TYPE_ENUM:
- case MYSQL_TYPE_SET:
- length*= charset->mbmaxlen;
- break;
- default:
- /* do nothing */
- break;
+ switch (sql_type) {
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_VAR_STRING:
+ case MYSQL_TYPE_STRING:
+ length*= charset->mbmaxlen;
+ pack_length= calc_pack_length(sql_type == FIELD_TYPE_VAR_STRING ?
+ FIELD_TYPE_STRING : sql_type, length);
+ break;
+ case MYSQL_TYPE_ENUM:
+ case MYSQL_TYPE_SET:
+ length*= charset->mbmaxlen;
+ break;
+ default:
+ /* do nothing */
+ break;
}
}
@@ -6085,6 +6086,8 @@ create_field::create_field(Field *old_field,Field *orig_field)
}
length=(length+charset->mbmaxlen-1)/charset->mbmaxlen; // QQ: Probably not needed
break;
+ case MYSQL_TYPE_ENUM:
+ case MYSQL_TYPE_SET:
case FIELD_TYPE_STRING:
case FIELD_TYPE_VAR_STRING:
length=(length+charset->mbmaxlen-1)/charset->mbmaxlen;
diff --git a/sql/field.h b/sql/field.h
index 8887da1dc0f..bb999222381 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1198,6 +1198,7 @@ public:
uint decimals,flags,pack_length;
Field::utype unireg_check;
TYPELIB *interval; // Which interval to use
+ List<String> interval_list;
CHARSET_INFO *charset;
Field::geometry_type geom_type;
Field *field; // For alter table
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 2515b4956d0..cc69762cbdb 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -2324,20 +2324,58 @@ ha_innobase::write_row(
position in the source table need not be adjusted after the
intermediate COMMIT, since writes by other transactions are
being blocked by a MySQL table lock TL_WRITE_ALLOW_READ. */
- ut_a(prebuilt->trx->mysql_n_tables_locked == 2);
- ut_a(UT_LIST_GET_LEN(prebuilt->trx->trx_locks) >= 2);
- dict_table_t* table = lock_get_ix_table(
- UT_LIST_GET_FIRST(prebuilt->trx->trx_locks));
+
+ dict_table_t* src_table;
+ ibool mode;
+
num_write_row = 0;
+
/* Commit the transaction. This will release the table
locks, so they have to be acquired again. */
- innobase_commit(user_thd, prebuilt->trx);
- /* Note that this transaction is still active. */
- user_thd->transaction.all.innodb_active_trans = 1;
- /* Re-acquire the IX table lock on the source table. */
- row_lock_table_for_mysql(prebuilt, table);
- /* We will need an IX lock on the destination table. */
- prebuilt->sql_stat_start = TRUE;
+
+ /* Altering an InnoDB table */
+ /* Get the source table. */
+ src_table = lock_get_src_table(
+ prebuilt->trx, prebuilt->table, &mode);
+ if (!src_table) {
+ no_commit:
+ /* Unknown situation: do not commit */
+ /*
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB error: ALTER TABLE is holding lock"
+ " on %lu tables!\n",
+ prebuilt->trx->mysql_n_tables_locked);
+ */
+ ;
+ } else if (src_table == prebuilt->table) {
+ /* Source table is not in InnoDB format:
+ no need to re-acquire locks on it. */
+
+ /* Altering to InnoDB format */
+ innobase_commit(user_thd, prebuilt->trx);
+ /* Note that this transaction is still active. */
+ user_thd->transaction.all.innodb_active_trans = 1;
+ /* We will need an IX lock on the destination table. */
+ prebuilt->sql_stat_start = TRUE;
+ } else {
+ /* Ensure that there are no other table locks than
+ LOCK_IX and LOCK_AUTO_INC on the destination table. */
+ if (!lock_is_table_exclusive(prebuilt->table,
+ prebuilt->trx)) {
+ goto no_commit;
+ }
+
+ /* Commit the transaction. This will release the table
+ locks, so they have to be acquired again. */
+ innobase_commit(user_thd, prebuilt->trx);
+ /* Note that this transaction is still active. */
+ user_thd->transaction.all.innodb_active_trans = 1;
+ /* Re-acquire the table lock on the source table. */
+ row_lock_table_for_mysql(prebuilt, src_table, mode);
+ /* We will need an IX lock on the destination table. */
+ prebuilt->sql_stat_start = TRUE;
+ }
}
num_write_row++;
@@ -5015,7 +5053,8 @@ ha_innobase::external_lock(
if (thd->in_lock_tables &&
thd->variables.innodb_table_locks) {
ulint error;
- error = row_lock_table_for_mysql(prebuilt, 0);
+ error = row_lock_table_for_mysql(prebuilt,
+ NULL, LOCK_TABLE_EXP);
if (error != DB_SUCCESS) {
error = convert_error_code_to_mysql(
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index 6fde84d6f6f..972d6b18e19 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -81,7 +81,6 @@ class ha_myisam: public handler
int index_first(byte * buf);
int index_last(byte * buf);
int index_next_same(byte *buf, const byte *key, uint keylen);
- int index_end() { ft_handler=NULL; return 0; }
int ft_init()
{
if (!ft_handler)
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index cf7b66c5f03..775ab96d1a1 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -1197,9 +1197,10 @@ int ha_ndbcluster::unique_index_read(const byte *key,
for (i= 0; key_part != end; key_part++, i++)
{
- if (set_ndb_key(op, key_part->field, i, key_ptr))
+ if (set_ndb_key(op, key_part->field, i,
+ key_part->null_bit ? key_ptr + 1 : key_ptr))
ERR_RETURN(trans->getNdbError());
- key_ptr+= key_part->length;
+ key_ptr+= key_part->store_length;
}
// Get non-index attribute(s)
@@ -2287,6 +2288,28 @@ int ha_ndbcluster::index_end()
DBUG_RETURN(close_scan());
}
+/**
+ * Check if key contains null
+ */
+static
+int
+check_null_in_key(const KEY* key_info, const byte *key, uint key_len)
+{
+ KEY_PART_INFO *curr_part, *end_part;
+ const byte* end_ptr = key + key_len;
+ curr_part= key_info->key_part;
+ end_part= curr_part + key_info->key_parts;
+
+
+ for (; curr_part != end_part && key < end_ptr; curr_part++)
+ {
+ if(curr_part->null_bit && *key)
+ return 1;
+
+ key += curr_part->store_length;
+ }
+ return 0;
+}
int ha_ndbcluster::index_read(byte *buf,
const byte *key, uint key_len,
@@ -2304,6 +2327,8 @@ int ha_ndbcluster::index_read(byte *buf,
case PRIMARY_KEY_INDEX:
if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len)
{
+ if(m_active_cursor && (error= close_scan()))
+ DBUG_RETURN(error);
DBUG_RETURN(pk_read(key, key_len, buf));
}
else if (type == PRIMARY_KEY_INDEX)
@@ -2313,8 +2338,11 @@ int ha_ndbcluster::index_read(byte *buf,
break;
case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX:
- if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len)
+ if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len &&
+ !check_null_in_key(key_info, key, key_len))
{
+ if(m_active_cursor && (error= close_scan()))
+ DBUG_RETURN(error);
DBUG_RETURN(unique_index_read(key, key_len, buf));
}
else if (type == UNIQUE_INDEX)
@@ -2418,6 +2446,8 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT)
{
+ if(m_active_cursor && (error= close_scan()))
+ DBUG_RETURN(error);
error= pk_read(start_key->key, start_key->length, buf);
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
}
@@ -2425,10 +2455,12 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX:
key_info= table->key_info + active_index;
- if (start_key &&
- start_key->length == key_info->key_length &&
- start_key->flag == HA_READ_KEY_EXACT)
+ if (start_key && start_key->length == key_info->key_length &&
+ start_key->flag == HA_READ_KEY_EXACT &&
+ !check_null_in_key(key_info, start_key->key, start_key->length))
{
+ if(m_active_cursor && (error= close_scan()))
+ DBUG_RETURN(error);
error= unique_index_read(start_key->key, start_key->length, buf);
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
}
@@ -3595,9 +3627,13 @@ int ha_ndbcluster::create_index(const char *name,
int ha_ndbcluster::rename_table(const char *from, const char *to)
{
+ NDBDICT *dict;
char new_tabname[FN_HEADLEN];
+ const NDBTAB *orig_tab;
+ int result;
DBUG_ENTER("ha_ndbcluster::rename_table");
+ DBUG_PRINT("info", ("Renaming %s to %s", from, to));
set_dbname(from);
set_tabname(from);
set_tabname(to, new_tabname);
@@ -3605,14 +3641,20 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
if (check_ndb_connection())
DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION);
+ dict= m_ndb->getDictionary();
+ if (!(orig_tab= dict->getTable(m_tabname)))
+ ERR_RETURN(dict->getNdbError());
- int result= alter_table_name(m_tabname, new_tabname);
- if (result == 0)
+ m_table= (void *)orig_tab;
+ // Change current database to that of target table
+ set_dbname(to);
+ m_ndb->setDatabaseName(m_dbname);
+ if (!(result= alter_table_name(new_tabname)))
{
- set_tabname(to);
- handler::rename_table(from, to);
+ // Rename .ndb file
+ result= handler::rename_table(from, to);
}
-
+
DBUG_RETURN(result);
}
@@ -3621,19 +3663,16 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
Rename a table in NDB Cluster using alter table
*/
-int ha_ndbcluster::alter_table_name(const char *from, const char *to)
+int ha_ndbcluster::alter_table_name(const char *to)
{
- NDBDICT *dict= m_ndb->getDictionary();
- const NDBTAB *orig_tab;
+ NDBDICT * dict= m_ndb->getDictionary();
+ const NDBTAB *orig_tab= (const NDBTAB *) m_table;
+ int ret;
DBUG_ENTER("alter_table_name_table");
- DBUG_PRINT("enter", ("Renaming %s to %s", from, to));
- if (!(orig_tab= dict->getTable(from)))
- ERR_RETURN(dict->getNdbError());
-
- NdbDictionary::Table copy_tab= dict->getTableForAlteration(from);
- copy_tab.setName(to);
- if (dict->alterTable(copy_tab) != 0)
+ NdbDictionary::Table new_tab= *orig_tab;
+ new_tab.setName(to);
+ if (dict->alterTable(new_tab) != 0)
ERR_RETURN(dict->getNdbError());
m_table= NULL;
@@ -3656,7 +3695,7 @@ int ha_ndbcluster::delete_table(const char *name)
if (check_ndb_connection())
DBUG_RETURN(HA_ERR_NO_CONNECTION);
-
+ // Remove .ndb file
handler::delete_table(name);
DBUG_RETURN(drop_table());
}
@@ -3912,6 +3951,7 @@ Ndb* check_ndb_in_thd(THD* thd)
}
+
int ha_ndbcluster::check_ndb_connection()
{
THD* thd= current_thd;
@@ -4199,7 +4239,7 @@ bool ndbcluster_init()
new Ndb_cluster_connection(ndbcluster_connectstring)) == 0)
{
DBUG_PRINT("error",("Ndb_cluster_connection(%s)",ndbcluster_connectstring));
- DBUG_RETURN(TRUE);
+ goto ndbcluster_init_error;
}
// Create a Ndb object to open the connection to NDB
@@ -4208,25 +4248,33 @@ bool ndbcluster_init()
if (g_ndb->init() != 0)
{
ERR_PRINT (g_ndb->getNdbError());
- DBUG_RETURN(TRUE);
+ goto ndbcluster_init_error;
}
- if ((res= g_ndb_cluster_connection->connect(1)) == 0)
+ if ((res= g_ndb_cluster_connection->connect(0,0,0)) == 0)
{
+ DBUG_PRINT("info",("NDBCLUSTER storage engine at %s on port %d",
+ g_ndb_cluster_connection->get_connected_host(),
+ g_ndb_cluster_connection->get_connected_port()));
g_ndb->waitUntilReady(10);
}
else if(res == 1)
{
if (g_ndb_cluster_connection->start_connect_thread()) {
DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()"));
- DBUG_RETURN(TRUE);
+ goto ndbcluster_init_error;
+ }
+ {
+ char buf[1024];
+ DBUG_PRINT("info",("NDBCLUSTER storage engine not started, will connect using %s",
+ g_ndb_cluster_connection->get_connectstring(buf,sizeof(buf))));
}
}
else
{
DBUG_ASSERT(res == -1);
DBUG_PRINT("error", ("permanent error"));
- DBUG_RETURN(TRUE);
+ goto ndbcluster_init_error;
}
(void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0,
@@ -4236,9 +4284,12 @@ bool ndbcluster_init()
ndbcluster_inited= 1;
#ifdef USE_DISCOVER_ON_STARTUP
if (ndb_discover_tables() != 0)
- DBUG_RETURN(TRUE);
+ goto ndbcluster_init_error;
#endif
DBUG_RETURN(FALSE);
+ ndbcluster_init_error:
+ ndbcluster_end();
+ DBUG_RETURN(TRUE);
}
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index f6c712620c1..2d7b14b2311 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -148,7 +148,7 @@ class ha_ndbcluster: public handler
uint8 table_cache_type();
private:
- int alter_table_name(const char *from, const char *to);
+ int alter_table_name(const char *to);
int drop_table();
int create_index(const char *name, KEY *key_info, bool unique);
int create_ordered_index(const char *name, KEY *key_info);
diff --git a/sql/handler.cc b/sql/handler.cc
index 7ddd7b80a34..530c5f137ec 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -723,7 +723,7 @@ int ha_rollback_to_savepoint(THD *thd, char *savepoint_name)
if (unlikely((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) &&
my_b_tell(&thd->transaction.trans_log)))
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE);
if (mysql_bin_log.write(&qinfo))
error= 1;
}
@@ -761,7 +761,7 @@ int ha_savepoint(THD *thd, char *savepoint_name)
innobase_savepoint(thd,savepoint_name,
my_b_tell(&thd->transaction.trans_log));
#endif
- Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE);
if (mysql_bin_log.write(&qinfo))
error= 1;
}
diff --git a/sql/handler.h b/sql/handler.h
index 252861e5c37..245defe61e0 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -372,6 +372,7 @@ public:
virtual int read_range_next();
int compare_key(key_range *range);
virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
+ void ft_end() { ft_handler=NULL; }
virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key,
uint keylen)
{ return NULL; }
diff --git a/sql/item.h b/sql/item.h
index ccb0fda1c49..3c4f80e3857 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -1127,7 +1127,7 @@ class Item_cache_int: public Item_cache
{
longlong value;
public:
- Item_cache_int(): Item_cache() {}
+ Item_cache_int(): Item_cache(), value(0) {}
void store(Item *item);
double val() { DBUG_ASSERT(fixed == 1); return (double) value; }
@@ -1145,7 +1145,7 @@ class Item_cache_real: public Item_cache
{
double value;
public:
- Item_cache_real(): Item_cache() {}
+ Item_cache_real(): Item_cache(), value(0) {}
void store(Item *item);
double val() { DBUG_ASSERT(fixed == 1); return value; }
@@ -1167,7 +1167,7 @@ class Item_cache_str: public Item_cache
char buffer[80];
String *value, value_buff;
public:
- Item_cache_str(): Item_cache() { }
+ Item_cache_str(): Item_cache(), value(0) { }
void store(Item *item);
double val();
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 4970517de87..51212418b09 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -106,7 +106,7 @@ longlong Item_func_not::val_int()
DBUG_ASSERT(fixed == 1);
double value=args[0]->val();
null_value=args[0]->null_value;
- return !null_value && value == 0 ? 1 : 0;
+ return ((!null_value && value == 0) ? 1 : 0);
}
/*
@@ -117,13 +117,23 @@ longlong Item_func_not_all::val_int()
{
DBUG_ASSERT(fixed == 1);
double value= args[0]->val();
- if (abort_on_null)
- {
- null_value= 0;
- return (args[0]->null_value || value == 0) ? 1 : 0;
- }
+
+ /*
+ return TRUE if there was records in underlaying select in max/min
+ optimisation
+ */
+ if (empty_underlying_subquery())
+ return 1;
+
null_value= args[0]->null_value;
- return (!null_value && value == 0) ? 1 : 0;
+ return ((!null_value && value == 0) ? 1 : 0);
+}
+
+
+bool Item_func_not_all::empty_underlying_subquery()
+{
+ return ((test_sum_item && !test_sum_item->any_value()) ||
+ (test_sub_item && !test_sub_item->any_value()));
}
void Item_func_not_all::print(String *str)
@@ -134,6 +144,30 @@ void Item_func_not_all::print(String *str)
args[0]->print(str);
}
+
+/*
+ Special NOP (No OPeration) for ALL subquery it is like Item_func_not_all
+ (return TRUE if underlaying sudquery do not return rows) but if subquery
+ returns some rows it return same value as argument (TRUE/FALSE).
+*/
+
+longlong Item_func_nop_all::val_int()
+{
+ DBUG_ASSERT(fixed == 1);
+ double value= args[0]->val();
+
+ /*
+ return TRUE if there was records in underlaying select in max/min
+ optimisation
+ */
+ if (empty_underlying_subquery())
+ return 1;
+
+ null_value= args[0]->null_value;
+ return (null_value || value == 0) ? 0 : 1;
+}
+
+
/*
Convert a constant expression or string to an integer.
This is done when comparing DATE's of different formats and
@@ -735,7 +769,7 @@ void Item_func_interval::fix_length_and_dec()
maybe_null= 0;
max_length= 2;
used_tables_cache|= row->used_tables();
- not_null_tables_cache&= row->not_null_tables();
+ not_null_tables_cache= row->not_null_tables();
with_sum_func= with_sum_func || row->with_sum_func;
const_item_cache&= row->const_item();
}
@@ -2365,10 +2399,10 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
}
int error;
if ((error= regcomp(&preg,res->c_ptr(),
- ((cmp_collation.collation->state & MY_CS_BINSORT) ||
- (cmp_collation.collation->state & MY_CS_CSSORT)) ?
+ ((cmp_collation.collation->state &
+ (MY_CS_BINSORT | MY_CS_CSSORT)) ?
REG_EXTENDED | REG_NOSUB :
- REG_EXTENDED | REG_NOSUB | REG_ICASE,
+ REG_EXTENDED | REG_NOSUB | REG_ICASE),
cmp_collation.collation)))
{
(void) regerror(error,&preg,buff,sizeof(buff));
@@ -2417,10 +2451,10 @@ longlong Item_func_regex::val_int()
regex_compiled=0;
}
if (regcomp(&preg,res2->c_ptr(),
- ((cmp_collation.collation->state & MY_CS_BINSORT) ||
- (cmp_collation.collation->state & MY_CS_CSSORT)) ?
+ ((cmp_collation.collation->state &
+ (MY_CS_BINSORT | MY_CS_CSSORT)) ?
REG_EXTENDED | REG_NOSUB :
- REG_EXTENDED | REG_NOSUB | REG_ICASE,
+ REG_EXTENDED | REG_NOSUB | REG_ICASE),
cmp_collation.collation))
{
null_value=1;
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 69528099aa1..6834799688d 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -229,21 +229,43 @@ public:
Item *neg_transformer(THD *thd);
};
+class Item_maxmin_subselect;
class Item_func_not_all :public Item_func_not
{
+ /* allow to check presence od values in max/min optimisation */
+ Item_sum_hybrid *test_sum_item;
+ Item_maxmin_subselect *test_sub_item;
+
bool abort_on_null;
public:
bool show;
- Item_func_not_all(Item *a) :Item_func_not(a), abort_on_null(0), show(0) {}
+ Item_func_not_all(Item *a)
+ :Item_func_not(a), test_sum_item(0), test_sub_item(0), abort_on_null(0),
+ show(0)
+ {}
virtual void top_level_item() { abort_on_null= 1; }
bool top_level() { return abort_on_null; }
longlong val_int();
enum Functype functype() const { return NOT_ALL_FUNC; }
const char *func_name() const { return "<not>"; }
void print(String *str);
+ void set_sum_test(Item_sum_hybrid *item) { test_sum_item= item; };
+ void set_sub_test(Item_maxmin_subselect *item) { test_sub_item= item; };
+ bool empty_underlying_subquery();
};
+
+class Item_func_nop_all :public Item_func_not_all
+{
+public:
+
+ Item_func_nop_all(Item *a) :Item_func_not_all(a) {}
+ longlong val_int();
+ const char *func_name() const { return "<nop>"; }
+};
+
+
class Item_func_eq :public Item_bool_rowready_func2
{
public:
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 32841ba447b..98b204d1809 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -171,7 +171,7 @@ bool Item_func::agg_arg_charsets(DTCollation &coll,
for (arg= args, last= args + nargs; arg < last; arg++)
{
Item* conv;
- uint dummy_offset;
+ uint32 dummy_offset;
if (!String::needs_conversion(0, coll.collation,
(*arg)->collation.collation,
&dummy_offset))
@@ -2010,7 +2010,7 @@ void item_user_lock_release(User_level_lock *ull)
tmp.copy(command, strlen(command), tmp.charset());
tmp.append(ull->key,ull->key_length);
tmp.append("\")", 2);
- Query_log_event qev(current_thd, tmp.ptr(), tmp.length(),1);
+ Query_log_event qev(current_thd, tmp.ptr(), tmp.length(),1, FALSE);
qev.error_code=0; // this query is always safe to run on slave
mysql_bin_log.write(&qev);
}
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index ebd794f1e76..a58e00a1b39 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -2319,17 +2319,6 @@ String *Item_func_hex::val_str(String *str)
return &tmp_value;
}
-inline int hexchar_to_int(char c)
-{
- if (c <= '9' && c >= '0')
- return c-'0';
- c|=32;
- if (c <= 'f' && c >= 'a')
- return c-'a'+10;
- return -1;
-}
-
-
/* Convert given hex string to a binary string */
String *Item_func_unhex::val_str(String *str)
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 62cd016b0df..69941b36ca0 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -155,6 +155,8 @@ bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref)
// did we changed top item of WHERE condition
if (unit->outer_select()->where == (*ref))
unit->outer_select()->where= substitution; // correct WHERE for PS
+ else if (unit->outer_select()->having == (*ref))
+ unit->outer_select()->having= substitution; // correct HAVING for PS
(*ref)= substitution;
substitution->name= name;
@@ -271,7 +273,7 @@ Item_singlerow_subselect::Item_singlerow_subselect(st_select_lex *select_lex)
Item_maxmin_subselect::Item_maxmin_subselect(Item_subselect *parent,
st_select_lex *select_lex,
bool max_arg)
- :Item_singlerow_subselect()
+ :Item_singlerow_subselect(), was_values(TRUE)
{
DBUG_ENTER("Item_maxmin_subselect::Item_maxmin_subselect");
max= max_arg;
@@ -290,12 +292,31 @@ Item_maxmin_subselect::Item_maxmin_subselect(Item_subselect *parent,
DBUG_VOID_RETURN;
}
+void Item_maxmin_subselect::cleanup()
+{
+ DBUG_ENTER("Item_maxmin_subselect::cleanup");
+ Item_singlerow_subselect::cleanup();
+
+ /*
+ By default it is TRUE to avoid TRUE reporting by
+ Item_func_not_all/Item_func_nop_all if this item was never called.
+
+ Engine exec() set it to FALSE by reset_value_registration() call.
+ select_max_min_finder_subselect::send_data() set it back to TRUE if some
+ value will be found.
+ */
+ was_values= TRUE;
+ DBUG_VOID_RETURN;
+}
+
+
void Item_maxmin_subselect::print(String *str)
{
str->append(max?"<max>":"<min>", 5);
Item_singlerow_subselect::print(str);
}
+
void Item_singlerow_subselect::reset()
{
null_value= 1;
@@ -303,6 +324,7 @@ void Item_singlerow_subselect::reset()
value->null_value= 1;
}
+
Item_subselect::trans_res
Item_singlerow_subselect::select_transformer(JOIN *join)
{
@@ -519,7 +541,7 @@ bool Item_in_subselect::test_limit(SELECT_LEX_UNIT *unit)
Item_in_subselect::Item_in_subselect(Item * left_exp,
st_select_lex *select_lex):
- Item_exists_subselect(), transformed(0), upper_not(0)
+ Item_exists_subselect(), transformed(0), upper_item(0)
{
DBUG_ENTER("Item_in_subselect::Item_in_subselect");
left_expr= left_exp;
@@ -680,7 +702,7 @@ Item_in_subselect::single_value_transformer(JOIN *join,
NULL/IS NOT NULL functions). If so, we rewrite ALL/ANY with NOT EXISTS
later in this method.
*/
- if ((abort_on_null || (upper_not && upper_not->top_level())) &&
+ if ((abort_on_null || (upper_item && upper_item->top_level())) &&
!select_lex->master_unit()->uncacheable && !func->eqne_op())
{
if (substitution)
@@ -694,7 +716,7 @@ Item_in_subselect::single_value_transformer(JOIN *join,
!select_lex->with_sum_func &&
!(select_lex->next_select()))
{
- Item *item;
+ Item_sum_hybrid *item;
if (func->l_op())
{
/*
@@ -711,6 +733,8 @@ Item_in_subselect::single_value_transformer(JOIN *join,
*/
item= new Item_sum_min(*select_lex->ref_pointer_array);
}
+ if (upper_item)
+ upper_item->set_sum_test(item);
*select_lex->ref_pointer_array= item;
{
List_iterator<Item> it(select_lex->item_list);
@@ -731,10 +755,13 @@ Item_in_subselect::single_value_transformer(JOIN *join,
}
else
{
+ Item_maxmin_subselect *item;
// remove LIMIT placed by ALL/ANY subquery
select_lex->master_unit()->global_parameters->select_limit=
HA_POS_ERROR;
- subs= new Item_maxmin_subselect(this, select_lex, func->l_op());
+ subs= item= new Item_maxmin_subselect(this, select_lex, func->l_op());
+ if (upper_item)
+ upper_item->set_sub_test(item);
}
// left expression belong to outer select
SELECT_LEX *current= thd->lex->current_select, *up;
@@ -1041,8 +1068,8 @@ Item_subselect::trans_res
Item_allany_subselect::select_transformer(JOIN *join)
{
transformed= 1;
- if (upper_not)
- upper_not->show= 1;
+ if (upper_item)
+ upper_item->show= 1;
return single_value_transformer(join, func);
}
@@ -1247,6 +1274,7 @@ int subselect_single_select_engine::exec()
}
if (!executed)
{
+ item->reset_value_registration();
join->exec();
executed= 1;
join->thd->where= save_where;
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 764c41f33b4..ab2d441ed7a 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -93,7 +93,7 @@ public:
return null_value;
}
bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref);
- bool exec();
+ virtual bool exec();
virtual void fix_length_and_dec();
table_map used_tables() const;
bool const_item() const;
@@ -109,6 +109,11 @@ public:
engine_changed= 1;
return eng == 0;
}
+ /*
+ Used by max/min subquery to initialize value presence registration
+ mechanism. Engine call this method before rexecution query.
+ */
+ virtual void reset_value_registration() {}
friend class select_subselect;
friend class Item_in_optimizer;
@@ -150,13 +155,20 @@ public:
};
/* used in static ALL/ANY optimisation */
+class select_max_min_finder_subselect;
class Item_maxmin_subselect :public Item_singlerow_subselect
{
+protected:
bool max;
+ bool was_values; // Set if we have found at least one row
public:
Item_maxmin_subselect(Item_subselect *parent,
st_select_lex *select_lex, bool max);
void print(String *str);
+ void cleanup();
+ bool any_value() { return was_values; }
+ void register_value() { was_values= TRUE; }
+ void reset_value_registration() { was_values= FALSE; }
};
/* exists subselect */
@@ -204,11 +216,11 @@ protected:
bool abort_on_null;
bool transformed;
public:
- Item_func_not_all *upper_not; // point on NOT before ALL subquery
+ Item_func_not_all *upper_item; // point on NOT/NOP before ALL/SOME subquery
Item_in_subselect(Item * left_expr, st_select_lex *select_lex);
Item_in_subselect()
- :Item_exists_subselect(), abort_on_null(0), transformed(0), upper_not(0)
+ :Item_exists_subselect(), abort_on_null(0), transformed(0), upper_item(0)
{}
@@ -249,7 +261,7 @@ public:
st_select_lex *select_lex, bool all);
// only ALL subquery has upper not
- subs_type substype() { return upper_not?ALL_SUBS:ANY_SUBS; }
+ subs_type substype() { return all?ALL_SUBS:ANY_SUBS; }
trans_res select_transformer(JOIN *join);
void print(String *str);
};
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index c43a7d87f8f..66d4fba205c 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -537,9 +537,24 @@ void Item_sum_hybrid::cleanup()
DBUG_ENTER("Item_sum_hybrid::cleanup");
Item_sum::cleanup();
used_table_cache= ~(table_map) 0;
+
+ /*
+ by default it is TRUE to avoid TRUE reporting by
+ Item_func_not_all/Item_func_nop_all if this item was never called.
+
+ no_rows_in_result() set it to FALSE if was not results found.
+ If some results found it will be left unchanged.
+ */
+ was_values= TRUE;
DBUG_VOID_RETURN;
}
+void Item_sum_hybrid::no_rows_in_result()
+{
+ Item_sum::no_rows_in_result();
+ was_values= FALSE;
+}
+
Item *Item_sum_min::copy_or_same(THD* thd)
{
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 521c595712b..cec611b8854 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -402,18 +402,20 @@ class Item_sum_hybrid :public Item_sum
enum_field_types hybrid_field_type;
int cmp_sign;
table_map used_table_cache;
+ bool was_values; // Set if we have found at least one row (for max/min only)
public:
Item_sum_hybrid(Item *item_par,int sign)
:Item_sum(item_par), sum(0.0), sum_int(0),
hybrid_type(INT_RESULT), hybrid_field_type(FIELD_TYPE_LONGLONG),
- cmp_sign(sign), used_table_cache(~(table_map) 0)
+ cmp_sign(sign), used_table_cache(~(table_map) 0), was_values(TRUE)
{ collation.set(&my_charset_bin); }
Item_sum_hybrid(THD *thd, Item_sum_hybrid *item):
Item_sum(thd, item), value(item->value),
sum(item->sum), sum_int(item->sum_int), hybrid_type(item->hybrid_type),
hybrid_field_type(item->hybrid_field_type),cmp_sign(item->cmp_sign),
- used_table_cache(item->used_table_cache)
+ used_table_cache(item->used_table_cache),
+ was_values(TRUE)
{ collation.set(item->collation); }
bool fix_fields(THD *, TABLE_LIST *, Item **);
table_map used_tables() const { return used_table_cache; }
@@ -433,6 +435,8 @@ class Item_sum_hybrid :public Item_sum
void min_max_update_real_field();
void min_max_update_int_field();
void cleanup();
+ bool any_value() { return was_values; }
+ void no_rows_in_result();
};
diff --git a/sql/lock.cc b/sql/lock.cc
index 646babea6a1..7cfa2aebe7b 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -705,15 +705,70 @@ static void print_lock_error(int error)
/****************************************************************************
Handling of global read locks
+ Taking the global read lock is TWO steps (2nd step is optional; without
+ it, COMMIT of existing transactions will be allowed):
+ lock_global_read_lock() THEN make_global_read_lock_block_commit().
+
The global locks are handled through the global variables:
global_read_lock
+ count of threads which have the global read lock (i.e. have completed at
+ least the first step above)
global_read_lock_blocks_commit
- waiting_for_read_lock
+ count of threads which have the global read lock and block
+ commits (i.e. have completed the second step above)
+ waiting_for_read_lock
+ count of threads which want to take a global read lock but cannot
protect_against_global_read_lock
+ count of threads which have set protection against global read lock.
+
+ How blocking of threads by global read lock is achieved: that's
+ advisory. Any piece of code which should be blocked by global read lock must
+ be designed like this:
+ - call to wait_if_global_read_lock(). When this returns 0, no global read
+ lock is owned; if argument abort_on_refresh was 0, none can be obtained.
+ - job
+ - if abort_on_refresh was 0, call to start_waiting_global_read_lock() to
+ allow other threads to get the global read lock. I.e. removal of the
+ protection.
+ (Note: it's a bit like an implementation of rwlock).
+
+ [ I am sorry to mention some SQL syntaxes below I know I shouldn't but found
+ no better descriptive way ]
+
+ Why does FLUSH TABLES WITH READ LOCK need to block COMMIT: because it's used
+ to read a non-moving SHOW MASTER STATUS, and a COMMIT writes to the binary
+ log.
+
+ Why getting the global read lock is two steps and not one. Because FLUSH
+ TABLES WITH READ LOCK needs to insert one other step between the two:
+ flushing tables. So the order is
+ 1) lock_global_read_lock() (prevents any new table write locks, i.e. stalls
+ all new updates)
+ 2) close_cached_tables() (the FLUSH TABLES), which will wait for tables
+ currently opened and being updated to close (so it's possible that there is
+ a moment where all new updates of server are stalled *and* FLUSH TABLES WITH
+ READ LOCK is, too).
+ 3) make_global_read_lock_block_commit().
+ If we have merged 1) and 3) into 1), we would have had this deadlock:
+ imagine thread 1 and 2, in non-autocommit mode, thread 3, and an InnoDB
+ table t.
+ thd1: SELECT * FROM t FOR UPDATE;
+ thd2: UPDATE t SET a=1; # blocked by row-level locks of thd1
+ thd3: FLUSH TABLES WITH READ LOCK; # blocked in close_cached_tables() by the
+ table instance of thd2
+ thd1: COMMIT; # blocked by thd3.
+ thd1 blocks thd2 which blocks thd3 which blocks thd1: deadlock.
+
+ Note that we need to support that one thread does
+ FLUSH TABLES WITH READ LOCK; and then COMMIT;
+ (that's what innobackup does, for some good reason).
+ So in this exceptional case the COMMIT should not be blocked by the FLUSH
+ TABLES WITH READ LOCK.
+
+ TODO in MySQL 5.x: make_global_read_lock_block_commit() should be
+ killable. Normally CPU does not spend a long time in this function (COMMITs
+ are quite fast), but it would still be nice.
- Taking the global read lock is TWO steps (2nd step is optional; without
- it, COMMIT of existing transactions will be allowed):
- lock_global_read_lock() THEN make_global_read_lock_block_commit().
****************************************************************************/
volatile uint global_read_lock=0;
@@ -828,6 +883,8 @@ void start_waiting_global_read_lock(THD *thd)
{
bool tmp;
DBUG_ENTER("start_waiting_global_read_lock");
+ if (unlikely(thd->global_read_lock))
+ DBUG_VOID_RETURN;
(void) pthread_mutex_lock(&LOCK_open);
tmp= (!--protect_against_global_read_lock && waiting_for_read_lock);
(void) pthread_mutex_unlock(&LOCK_open);
diff --git a/sql/log.cc b/sql/log.cc
index 83034c79dde..3a420866025 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1273,7 +1273,7 @@ bool MYSQL_LOG::write(Log_event* event_info)
(local_db && !db_ok(local_db, binlog_do_db, binlog_ignore_db)))
{
VOID(pthread_mutex_unlock(&LOCK_log));
- DBUG_PRINT("error",("!db_ok"));
+ DBUG_PRINT("error",("!db_ok('%s')", local_db));
DBUG_RETURN(0);
}
#endif /* HAVE_REPLICATION */
@@ -1316,7 +1316,7 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
(uint) thd->variables.collation_connection->number,
(uint) thd->variables.collation_database->number,
(uint) thd->variables.collation_server->number);
- Query_log_event e(thd, buf, written, 0);
+ Query_log_event e(thd, buf, written, 0, FALSE);
e.set_log_pos(this);
if (e.write(file))
goto err;
@@ -1332,7 +1332,7 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
char *buf_end= strxmov(buf, "SET ONE_SHOT TIME_ZONE='",
thd->variables.time_zone->get_name()->ptr(),
"'", NullS);
- Query_log_event e(thd, buf, buf_end - buf, 0);
+ Query_log_event e(thd, buf, buf_end - buf, 0, FALSE);
e.set_log_pos(this);
if (e.write(file))
goto err;
@@ -1401,7 +1401,7 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS)
{
- Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=0", 24, 0);
+ Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=0", 24, 0, FALSE);
e.set_log_pos(this);
if (e.write(file))
goto err;
@@ -1420,7 +1420,7 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
{
if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS)
{
- Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=1", 24, 0);
+ Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=1", 24, 0, FALSE);
e.set_log_pos(this);
if (e.write(file))
goto err;
@@ -1596,7 +1596,7 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback)
we will add the "COMMIT mark and write the buffer to the binlog.
*/
{
- Query_log_event qinfo(thd, "BEGIN", 5, TRUE);
+ Query_log_event qinfo(thd, "BEGIN", 5, TRUE, FALSE);
/*
Imagine this is rollback due to net timeout, after all statements of
the transaction succeeded. Then we want a zero-error code in BEGIN.
@@ -1637,7 +1637,7 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback)
Query_log_event qinfo(thd,
commit_or_rollback ? "COMMIT" : "ROLLBACK",
commit_or_rollback ? 6 : 8,
- TRUE);
+ TRUE, FALSE);
qinfo.error_code= 0;
qinfo.set_log_pos(this);
if (qinfo.write(&log_file) || flush_io_cache(&log_file) ||
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 2fdc89504d7..7a4d14d101a 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -780,7 +780,8 @@ void Query_log_event::pack_info(Protocol *protocol)
if (!(buf= my_malloc(9 + db_len + q_len, MYF(MY_WME))))
return;
pos= buf;
- if (db && db_len)
+ if (!(flags & LOG_EVENT_SUPPRESS_USE_F)
+ && db && db_len)
{
pos= strmov(buf, "use `");
memcpy(pos, db, db_len);
@@ -872,9 +873,12 @@ int Query_log_event::write_data(IO_CACHE* file)
#ifndef MYSQL_CLIENT
Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
- ulong query_length, bool using_trans)
- :Log_event(thd_arg, !thd_arg->tmp_table_used ?
- 0 : LOG_EVENT_THREAD_SPECIFIC_F, using_trans),
+ ulong query_length, bool using_trans,
+ bool suppress_use)
+ :Log_event(thd_arg,
+ ((thd_arg->tmp_table_used ? LOG_EVENT_THREAD_SPECIFIC_F : 0)
+ | (suppress_use ? LOG_EVENT_SUPPRESS_USE_F : 0)),
+ using_trans),
data_buf(0), query(query_arg),
db(thd_arg->db), q_len((uint32) query_length),
error_code(thd_arg->killed ?
@@ -949,14 +953,20 @@ void Query_log_event::print(FILE* file, bool short_form, char* last_db)
bool different_db= 1;
- if (db && last_db)
+ if (!(flags & LOG_EVENT_SUPPRESS_USE_F))
{
- if (different_db= memcmp(last_db, db, db_len + 1))
- memcpy(last_db, db, db_len + 1);
+ if (db && last_db)
+ {
+ if (different_db= memcmp(last_db, db, db_len + 1))
+ memcpy(last_db, db, db_len + 1);
+ }
+
+ if (db && db[0] && different_db)
+ {
+ fprintf(file, "use %s;\n", db);
+ }
}
-
- if (db && db[0] && different_db)
- fprintf(file, "use %s;\n", db);
+
end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10);
*end++=';';
*end++='\n';
diff --git a/sql/log_event.h b/sql/log_event.h
index 1606659e21e..8a2334e8574 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -264,6 +264,19 @@ struct sql_ex_info
*/
#define LOG_EVENT_THREAD_SPECIFIC_F 0x4
+/*
+ Suppress the generation of 'USE' statements before the actual
+ statement. This flag should be set for any events that does not need
+ the current database set to function correctly. Most notable cases
+ are 'CREATE DATABASE' and 'DROP DATABASE'.
+
+ This flags should only be used in exceptional circumstances, since
+ it introduce a significant change in behaviour regarding the
+ replication logic together with the flags --binlog-do-db and
+ --replicated-do-db.
+ */
+#define LOG_EVENT_SUPPRESS_USE_F 0x8
+
enum Log_event_type
{
UNKNOWN_EVENT= 0, START_EVENT= 1, QUERY_EVENT= 2, STOP_EVENT= 3,
@@ -331,8 +344,9 @@ public:
/*
Some 16 flags. Only one is really used now; look above for
- LOG_EVENT_TIME_F, LOG_EVENT_FORCED_ROTATE_F, LOG_EVENT_THREAD_SPECIFIC_F
- for notes.
+ LOG_EVENT_TIME_F, LOG_EVENT_FORCED_ROTATE_F,
+ LOG_EVENT_THREAD_SPECIFIC_F, and LOG_EVENT_SUPPRESS_USE_F for
+ notes.
*/
uint16 flags;
@@ -465,7 +479,7 @@ public:
#ifndef MYSQL_CLIENT
Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length,
- bool using_trans);
+ bool using_trans, bool suppress_use);
const char* get_db() { return db; }
#ifdef HAVE_REPLICATION
void pack_info(Protocol* protocol);
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 46f47e51b6d..8350122c4e2 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -689,7 +689,8 @@ bool add_field_to_list(THD *thd, char *field_name, enum enum_field_types type,
uint type_modifier,
Item *default_value, Item *on_update_value,
LEX_STRING *comment,
- char *change, TYPELIB *interval,CHARSET_INFO *cs,
+ char *change, List<String> *interval_list,
+ CHARSET_INFO *cs,
uint uint_geom_type);
void store_position_for_column(const char *name);
bool add_to_list(THD *thd, SQL_LIST &list,Item *group,bool asc=0);
@@ -1201,6 +1202,23 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr)
/*
+ SYNOPSYS
+ hexchar_to_int()
+ convert a hex digit into number
+*/
+
+inline int hexchar_to_int(char c)
+{
+ if (c <= '9' && c >= '0')
+ return c-'0';
+ c|=32;
+ if (c <= 'f' && c >= 'a')
+ return c-'a'+10;
+ return -1;
+}
+
+
+/*
Some functions that are different in the embedded library and the normal
server
*/
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index ccb38b40802..9343c79c9f0 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -35,6 +35,28 @@
#ifdef HAVE_NDBCLUSTER_DB
#include "ha_ndbcluster.h"
#endif
+
+#ifdef HAVE_INNOBASE_DB
+#define OPT_INNODB_DEFAULT 1
+#else
+#define OPT_INNODB_DEFAULT 0
+#endif
+#ifdef HAVE_BERKLEY_DB
+#define OPT_BDB_DEFAULT 1
+#else
+#define OPT_BDB_DEFAULT 0
+#endif
+#ifdef HAVE_ISAM_DB
+#define OPT_ISAM_DEFAULT 1
+#else
+#define OPT_ISAM_DEFAULT 0
+#endif
+#ifdef HAVE_NDBCLUSTER_DB
+#define OPT_NDBCLUSTER_DEFAULT 0
+#else
+#define OPT_NDBCLUSTER_DEFAULT 0
+#endif
+
#include <nisam.h>
#include <thr_alarm.h>
#include <ft_global.h>
@@ -145,6 +167,7 @@ static VolumeID_t datavolid;
static event_handle_t eh;
static Report_t ref;
static void *refneb= NULL;
+my_bool event_flag= FALSE;
static int volumeid= -1;
/* NEB event callback */
@@ -814,7 +837,8 @@ static void __cdecl kill_server(int sig_ptr)
else
unireg_end();
#ifdef __NETWARE__
- pthread_join(select_thread, NULL); // wait for main thread
+ if (!event_flag)
+ pthread_join(select_thread, NULL); // wait for main thread
#endif /* __NETWARE__ */
pthread_exit(0); /* purecov: deadcode */
@@ -1524,20 +1548,20 @@ static void check_data_home(const char *path)
// down server event callback
void mysql_down_server_cb(void *, void *)
{
+ event_flag = TRUE;
kill_server(0);
}
// destroy callback resources
void mysql_cb_destroy(void *)
-{
- UnRegisterEventNotification(eh); // cleanup down event notification
+{
+ UnRegisterEventNotification(eh); // cleanup down event notification
NX_UNWRAP_INTERFACE(ref);
-
- /* Deregister NSS volume deactivation event */
- NX_UNWRAP_INTERFACE(refneb);
+ /* Deregister NSS volume deactivation event */
+ NX_UNWRAP_INTERFACE(refneb);
if (neb_consumer_id)
- UnRegisterConsumer(neb_consumer_id, NULL);
+ UnRegisterConsumer(neb_consumer_id, NULL);
}
@@ -1557,7 +1581,7 @@ void mysql_cb_init()
Register for volume deactivation event
Wrap the callback function, as it is called by non-LibC thread
*/
- (void)NX_WRAP_INTERFACE(neb_event_callback, 1, &refneb);
+ (void *) NX_WRAP_INTERFACE(neb_event_callback, 1, &refneb);
registerwithneb();
NXVmRegisterExitHandler(mysql_cb_destroy, NULL); // clean-up
@@ -1654,7 +1678,9 @@ ulong neb_event_callback(struct EventBlock *eblock)
{
consoleprintf("MySQL data volume is deactivated, shutting down MySQL Server \n");
nw_panic = TRUE;
+ event_flag= TRUE;
kill_server(0);
+
}
}
return 0;
@@ -1728,8 +1754,8 @@ static void init_signals(void)
for (uint i=0 ; i < sizeof(signals)/sizeof(int) ; i++)
signal(signals[i], kill_server);
mysql_cb_init(); // initialize callbacks
-}
+}
static void start_signal_handler(void)
{
@@ -2236,7 +2262,13 @@ extern "C" pthread_handler_decl(handle_shutdown,arg)
#endif
-const char *load_default_groups[]= { "mysqld","server",MYSQL_BASE_VERSION,0,0};
+const char *load_default_groups[]= {
+#ifdef HAVE_NDBCLUSTER_DB
+"mysql_cluster",
+#endif
+"mysqld","server",MYSQL_BASE_VERSION,0,0};
+static const int load_default_groups_sz=
+sizeof(load_default_groups)/sizeof(load_default_groups[0]);
bool open_log(MYSQL_LOG *log, const char *hostname,
const char *opt_name, const char *extension,
@@ -2812,6 +2844,7 @@ int win_main(int argc, char **argv)
int main(int argc, char **argv)
#endif
{
+
DEBUGGER_OFF;
MY_INIT(argv[0]); // init my_sys library & pthreads
@@ -3007,7 +3040,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
#endif /* __NT__ */
/* (void) pthread_attr_destroy(&connection_attrib); */
-
+
DBUG_PRINT("quit",("Exiting main thread"));
#ifndef __WIN__
@@ -3057,6 +3090,7 @@ we force server id to 2, but this MySQL server will not act as a slave.");
#endif
clean_up_mutexes();
my_end(opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0);
+
exit(0);
return(0); /* purecov: deadcode */
}
@@ -3183,7 +3217,7 @@ int main(int argc, char **argv)
and we are now stuck with it.
*/
if (my_strcasecmp(system_charset_info, argv[1],"mysql"))
- load_default_groups[3]= argv[1];
+ load_default_groups[load_default_groups_sz-2]= argv[1];
start_mode= 1;
Service.Init(argv[1], mysql_service);
return 0;
@@ -3204,7 +3238,7 @@ int main(int argc, char **argv)
opt_argv=argv;
start_mode= 1;
if (my_strcasecmp(system_charset_info, argv[2],"mysql"))
- load_default_groups[3]= argv[2];
+ load_default_groups[load_default_groups_sz-2]= argv[2];
Service.Init(argv[2], mysql_service);
return 0;
}
@@ -4076,7 +4110,7 @@ struct my_option my_long_options[] =
0, 0, 0, 0, 0, 0},
{"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \
Disable with --skip-bdb (will save memory).",
- (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, 1, 0, 0,
+ (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, OPT_BDB_DEFAULT, 0, 0,
0, 0, 0},
#ifdef HAVE_BERKELEY_DB
{"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home,
@@ -4213,7 +4247,7 @@ Disable with --skip-bdb (will save memory).",
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"innodb", OPT_INNODB, "Enable InnoDB (if this version of MySQL supports it). \
Disable with --skip-innodb (will save memory).",
- (gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, 1, 0, 0,
+ (gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, OPT_INNODB_DEFAULT, 0, 0,
0, 0, 0},
{"innodb_data_file_path", OPT_INNODB_DATA_FILE_PATH,
"Path to individual files and their sizes.",
@@ -4273,7 +4307,7 @@ Disable with --skip-innodb (will save memory).",
#endif /* End HAVE_INNOBASE_DB */
{"isam", OPT_ISAM, "Enable ISAM (if this version of MySQL supports it). \
Disable with --skip-isam.",
- (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, 1, 0, 0,
+ (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, OPT_ISAM_DEFAULT, 0, 0,
0, 0, 0},
{"language", 'L',
"Client error messages in given language. May be given as a full path.",
@@ -4400,8 +4434,8 @@ master-ssl",
GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
{"ndbcluster", OPT_NDBCLUSTER, "Enable NDB Cluster (if this version of MySQL supports it). \
Disable with --skip-ndbcluster (will save memory).",
- (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, 1, 0, 0,
- 0, 0, 0},
+ (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG,
+ OPT_NDBCLUSTER_DEFAULT, 0, 0, 0, 0, 0},
#ifdef HAVE_NDBCLUSTER_DB
{"ndb-connectstring", OPT_NDB_CONNECTSTRING,
"Connect string for ndbcluster.",
@@ -4766,7 +4800,7 @@ replicating a LOAD DATA INFILE command.",
"Data file autoextend increment in megabytes",
(gptr*) &srv_auto_extend_increment,
(gptr*) &srv_auto_extend_increment,
- 0, GET_LONG, REQUIRED_ARG, 8L, 1L, ~0L, 0, 1L, 0},
+ 0, GET_LONG, REQUIRED_ARG, 8L, 1L, 1000L, 0, 1L, 0},
{"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
"If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.",
(gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0,
@@ -4969,7 +5003,7 @@ The minimum value for this variable is 4096.",
"Default pointer size to be used for MyISAM tables.",
(gptr*) &myisam_data_pointer_size,
(gptr*) &myisam_data_pointer_size, 0, GET_ULONG, REQUIRED_ARG,
- 4, 2, 7, 0, 1, 0},
+ 4, 2, 8, 0, 1, 0},
{"myisam_max_extra_sort_file_size", OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE,
"Used to help MySQL to decide when to use the slow but safe key cache index create method.",
(gptr*) &global_system_variables.myisam_max_extra_sort_file_size,
@@ -6235,6 +6269,24 @@ static void get_options(int argc,char **argv)
if ((ho_error= handle_options(&argc, &argv, my_long_options,
get_one_option)))
exit(ho_error);
+
+#ifndef HAVE_NDBCLUSTER_DB
+ if (opt_ndbcluster)
+ sql_print_warning("this binary does not contain NDBCLUSTER storage engine");
+#endif
+#ifndef HAVE_INNOBASE_DB
+ if (opt_innodb)
+ sql_print_warning("this binary does not contain INNODB storage engine");
+#endif
+#ifndef HAVE_ISAM
+ if (opt_isam)
+ sql_print_warning("this binary does not contain ISAM storage engine");
+#endif
+#ifndef HAVE_BERKELEY_DB
+ if (opt_bdb)
+ sql_print_warning("this binary does not contain BDB storage engine");
+#endif
+
if (argc > 0)
{
fprintf(stderr, "%s: Too many arguments (first extra is '%s').\nUse --help to get a list of available options\n", my_progname, *argv);
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index 5985cf63ed6..1e34ed90fee 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -165,8 +165,8 @@ my_bool net_realloc(NET *net, ulong length)
if (length >= net->max_packet_size)
{
- DBUG_PRINT("error",("Packet too large. Max sixe: %lu",
- net->max_packet_size));
+ DBUG_PRINT("error", ("Packet too large. Max size: %lu",
+ net->max_packet_size));
net->error= 1;
net->report_error= 1;
net->last_errno= ER_NET_PACKET_TOO_LARGE;
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 9b2e9e45bac..5a2044a59f4 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -154,7 +154,7 @@ class FT_SELECT: public QUICK_SELECT {
public:
FT_SELECT(THD *thd, TABLE *table, uint key):
QUICK_SELECT (thd, table, key, 1) { init(); }
-
+ ~FT_SELECT() { file->ft_end(); }
int init() { return error= file->ft_init(); }
int get_next() { return error= file->ft_read(record); }
};
diff --git a/sql/set_var.cc b/sql/set_var.cc
index bbc9cf77c9f..b92ceadf15f 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -2724,24 +2724,23 @@ sys_var *find_sys_var(const char *str, uint length)
int sql_set_variables(THD *thd, List<set_var_base> *var_list)
{
- int error= 0;
+ int error;
List_iterator_fast<set_var_base> it(*var_list);
DBUG_ENTER("sql_set_variables");
set_var_base *var;
while ((var=it++))
{
- if ((error=var->check(thd)))
+ if ((error= var->check(thd)))
goto err;
}
- if (!thd->net.report_error)
+ if (!(error= test(thd->net.report_error)))
{
it.rewind();
while ((var= it++))
error|= var->update(thd); // Returns 0, -1 or 1
}
- else
- error= 1;
+
err:
free_underlaid_joins(thd, &thd->lex->select_lex);
DBUG_RETURN(error);
diff --git a/sql/slave.cc b/sql/slave.cc
index 6d5c997bade..bd9650ed369 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -759,7 +759,7 @@ static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len)
1 should be logged/replicated
*/
-int tables_ok(THD* thd, TABLE_LIST* tables)
+bool tables_ok(THD* thd, TABLE_LIST* tables)
{
bool some_tables_updating= 0;
DBUG_ENTER("tables_ok");
@@ -1820,7 +1820,8 @@ int init_master_info(MASTER_INFO* mi, const char* master_info_fname,
position is at the beginning of the file, and will read the
"signature" and then fast-forward to the last position read.
*/
- if (thread_mask & SLAVE_SQL) {
+ if (thread_mask & SLAVE_SQL)
+ {
my_b_seek(mi->rli.cur_log, (my_off_t) 0);
}
DBUG_RETURN(0);
diff --git a/sql/slave.h b/sql/slave.h
index a4d123329c6..08cf0806717 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -496,7 +496,7 @@ int show_master_info(THD* thd, MASTER_INFO* mi);
int show_binlog_info(THD* thd);
/* See if the query uses any tables that should not be replicated */
-int tables_ok(THD* thd, TABLE_LIST* tables);
+bool tables_ok(THD* thd, TABLE_LIST* tables);
/*
Check to see if the database is ok to operate on with respect to the
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index b880a7b2b65..bfecec90237 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -1229,7 +1229,7 @@ bool change_password(THD *thd, const char *host, const char *user,
new_password));
thd->clear_error();
mysql_update_log.write(thd, buff, query_length);
- Query_log_event qinfo(thd, buff, query_length, 0);
+ Query_log_event qinfo(thd, buff, query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
DBUG_RETURN(0);
}
@@ -2034,7 +2034,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
{
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
if (table->file->index_read(table->record[0], (byte*) table->field[0]->ptr,
- table->key_info[0].key_length,
+ key_length,
HA_READ_KEY_EXACT))
goto end;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index a8e1d3020ca..05b11646cd7 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -523,7 +523,7 @@ void close_temporary_tables(THD *thd)
{
/* The -1 is to remove last ',' */
thd->clear_error();
- Query_log_event qinfo(thd, query, (ulong)(end-query)-1, 0);
+ Query_log_event qinfo(thd, query, (ulong)(end-query)-1, 0, FALSE);
/*
Imagine the thread had created a temp table, then was doing a SELECT, and
the SELECT was killed. Then it's not clever to mark the statement above as
@@ -1440,7 +1440,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db,
{
end = strxmov(strmov(query, "DELETE FROM `"),
db,"`.`",name,"`", NullS);
- Query_log_event qinfo(thd, query, (ulong)(end-query), 0);
+ Query_log_event qinfo(thd, query, (ulong)(end-query), 0, FALSE);
mysql_bin_log.write(&qinfo);
my_free(query, MYF(0));
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 1ba34595dd9..7512e661ea7 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1243,9 +1243,10 @@ bool select_singlerow_subselect::send_data(List<Item> &items)
bool select_max_min_finder_subselect::send_data(List<Item> &items)
{
DBUG_ENTER("select_max_min_finder_subselect::send_data");
- Item_singlerow_subselect *it= (Item_singlerow_subselect *)item;
+ Item_maxmin_subselect *it= (Item_maxmin_subselect *)item;
List_iterator_fast<Item> li(items);
Item *val_item= li++;
+ it->register_value();
if (it->assigned())
{
cache->store(val_item);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index d0d9afc7746..419c087afbc 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1092,6 +1092,12 @@ public:
void end_statement();
};
+#define tmp_disable_binlog(A) \
+ ulong save_options= (A)->options; \
+ (A)->options&= ~OPTION_BIN_LOG;
+
+#define reenable_binlog(A) (A)->options= save_options;
+
/* Flags for the THD::system_thread (bitmap) variable */
#define SYSTEM_THREAD_DELAYED_INSERT 1
#define SYSTEM_THREAD_SLAVE_IO 2
@@ -1235,6 +1241,7 @@ class select_insert :public select_result_interceptor {
~select_insert();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
bool send_data(List<Item> &items);
+ virtual void store_values(List<Item> &values);
void send_error(uint errcode,const char *err);
bool send_eof();
/* not implemented: select_insert is never re-used in prepared statements */
@@ -1262,7 +1269,8 @@ public:
create_info(create_info_par), lock(0)
{}
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
- bool send_data(List<Item> &values);
+ void store_values(List<Item> &values);
+ void send_error(uint errcode,const char *err);
bool send_eof();
void abort();
};
@@ -1311,7 +1319,7 @@ public:
if (copy_field) /* Fix for Intel compiler */
{
delete [] copy_field;
- copy_field=0;
+ save_copy_field= copy_field= 0;
}
}
};
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index cb360859049..ccf3b55d2fb 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -466,7 +466,29 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info,
mysql_update_log.write(thd, query, query_length);
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, query, query_length, 0);
+ Query_log_event qinfo(thd, query, query_length, 0,
+ /* suppress_use */ TRUE);
+
+ /*
+ Write should use the database being created as the "current
+ database" and not the threads current database, which is the
+ default. If we do not change the "current database" to the
+ database being created, the CREATE statement will not be
+ replicated when using --binlog-do-db to select databases to be
+ replicated.
+
+ An example (--binlog-do-db=sisyfos):
+
+ CREATE DATABASE bob; # Not replicated
+ USE bob; # 'bob' is the current database
+ CREATE DATABASE sisyfos; # Not replicated since 'bob' is
+ # current database.
+ USE sisyfos; # Will give error on slave since
+ # database does not exist.
+ */
+ qinfo.db = db;
+ qinfo.db_len = strlen(db);
+
mysql_bin_log.write(&qinfo);
}
send_ok(thd, result);
@@ -516,7 +538,15 @@ int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info)
mysql_update_log.write(thd,thd->query, thd->query_length);
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0,
+ /* suppress_use */ TRUE);
+
+ // Write should use the database being created as the "current
+ // database" and not the threads current database, which is the
+ // default.
+ qinfo.db = db;
+ qinfo.db_len = strlen(db);
+
thd->clear_error();
mysql_bin_log.write(&qinfo);
}
@@ -624,7 +654,15 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
mysql_update_log.write(thd, query, query_length);
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, query, query_length, 0);
+ Query_log_event qinfo(thd, query, query_length, 0,
+ /* suppress_use */ TRUE);
+
+ // Write should use the database being created as the "current
+ // database" and not the threads current database, which is the
+ // default.
+ qinfo.db = db;
+ qinfo.db_len = strlen(db);
+
thd->clear_error();
mysql_bin_log.write(&qinfo);
}
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 09893970803..29d86a99ff3 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -216,7 +216,7 @@ cleanup:
if (error <= 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- log_delayed);
+ log_delayed, FALSE);
if (mysql_bin_log.write(&qinfo) && transactional_table)
error=1;
}
@@ -565,7 +565,7 @@ bool multi_delete::send_eof()
if (error <= 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- log_delayed);
+ log_delayed, FALSE);
if (mysql_bin_log.write(&qinfo) && !normal_tables)
local_error=1; // Log write failed: roll back the SQL statement
}
@@ -674,7 +674,7 @@ end:
{
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- thd->tmp_table);
+ thd->tmp_table, FALSE);
mysql_bin_log.write(&qinfo);
}
send_ok(thd); // This should return record count
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index f191a4b327a..768acb0cf9e 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -367,7 +367,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
if (error <= 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- log_delayed);
+ log_delayed, FALSE);
if (mysql_bin_log.write(&qinfo) && transactional_table)
error=1;
}
@@ -1363,7 +1363,7 @@ bool delayed_insert::handle_inserts(void)
mysql_update_log.write(&thd,row->query, row->query_length);
if (row->log_query & DELAYED_LOG_BIN && using_bin_log)
{
- Query_log_event qinfo(&thd, row->query, row->query_length,0);
+ Query_log_event qinfo(&thd, row->query, row->query_length,0, FALSE);
mysql_bin_log.write(&qinfo);
}
}
@@ -1456,7 +1456,6 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
restore_record(table,default_values); // Get empty record
table->next_number_field=table->found_next_number_field;
- thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields
thd->cuted_fields=0;
if (info.handle_duplicates == DUP_IGNORE ||
info.handle_duplicates == DUP_REPLACE)
@@ -1486,27 +1485,34 @@ select_insert::~select_insert()
bool select_insert::send_data(List<Item> &values)
{
DBUG_ENTER("select_insert::send_data");
+ bool error=0;
if (unit->offset_limit_cnt)
{ // using limit offset,count
unit->offset_limit_cnt--;
DBUG_RETURN(0);
}
- if (fields->elements)
- fill_record(*fields, values, 1);
- else
- fill_record(table->field, values, 1);
- if (thd->net.report_error || write_record(table,&info))
- DBUG_RETURN(1);
- if (table->next_number_field) // Clear for next record
+ thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields
+ store_values(values);
+ error=thd->net.report_error || write_record(table,&info);
+ thd->count_cuted_fields= CHECK_FIELD_IGNORE;
+ if (!error && table->next_number_field) // Clear for next record
{
table->next_number_field->reset();
if (! last_insert_id && thd->insert_id_used)
last_insert_id=thd->insert_id();
}
- DBUG_RETURN(0);
+ DBUG_RETURN(error);
}
+void select_insert::store_values(List<Item> &values)
+{
+ if (fields->elements)
+ fill_record(*fields, values, 1);
+ else
+ fill_record(table->field, values, 1);
+}
+
void select_insert::send_error(uint errcode,const char *err)
{
DBUG_ENTER("select_insert::send_error");
@@ -1538,7 +1544,7 @@ void select_insert::send_error(uint errcode,const char *err)
if (mysql_bin_log.is_open())
{
Query_log_event qinfo(thd, thd->query, thd->query_length,
- table->file->has_transactions());
+ table->file->has_transactions(), FALSE);
mysql_bin_log.write(&qinfo);
}
if (!table->tmp_table)
@@ -1580,7 +1586,7 @@ bool select_insert::send_eof()
if (!error)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- table->file->has_transactions());
+ table->file->has_transactions(), FALSE);
mysql_bin_log.write(&qinfo);
}
if ((error2=ha_autocommit_or_rollback(thd,error)) && ! error)
@@ -1636,7 +1642,6 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table->next_number_field=table->found_next_number_field;
restore_record(table,default_values); // Get empty record
- thd->count_cuted_fields= CHECK_FIELD_WARN; // count warnings
thd->cuted_fields=0;
if (info.handle_duplicates == DUP_IGNORE ||
info.handle_duplicates == DUP_REPLACE)
@@ -1646,23 +1651,21 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
}
-bool select_create::send_data(List<Item> &values)
+void select_create::store_values(List<Item> &values)
{
- if (unit->offset_limit_cnt)
- { // using limit offset,count
- unit->offset_limit_cnt--;
- return 0;
- }
fill_record(field, values, 1);
- if (thd->net.report_error ||write_record(table,&info))
- return 1;
- if (table->next_number_field) // Clear for next record
- {
- table->next_number_field->reset();
- if (! last_insert_id && thd->insert_id_used)
- last_insert_id=thd->insert_id();
- }
- return 0;
+}
+
+
+void select_create::send_error(uint errcode,const char *err)
+{
+ /*
+ Disable binlog, because we "roll back" partial inserts in ::abort
+ by removing the table, even for non-transactional tables.
+ */
+ tmp_disable_binlog(thd);
+ select_insert::send_error(errcode, err);
+ reenable_binlog(thd);
}
@@ -1710,7 +1713,7 @@ void select_create::abort()
enum db_type table_type=table->db_type;
if (!table->tmp_table)
{
- ulong version= table->version;
+ ulong version= table->version;
hash_delete(&open_cache,(byte*) table);
if (!create_info->table_existed)
quick_rm_table(table_type, db, name);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 679ffb2140e..1d9afcc94a4 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -135,7 +135,7 @@ void lex_start(THD *thd, uchar *buf,uint length)
lex->select_lex.link_prev= (st_select_lex_node**)&(lex->all_selects_list);
lex->select_lex.options= 0;
lex->describe= 0;
- lex->derived_tables= FALSE;
+ lex->subqueries= lex->derived_tables= FALSE;
lex->lock_option= TL_READ;
lex->found_colon= 0;
lex->safe_to_cache_query= 1;
@@ -157,6 +157,7 @@ void lex_start(THD *thd, uchar *buf,uint length)
lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE);
lex->sql_command=SQLCOM_END;
lex->duplicates= DUP_ERROR;
+ lex->proc_list.first= 0;
}
void lex_end(LEX *lex)
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 90c020b3e93..b055a022eb4 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -592,7 +592,6 @@ typedef struct st_lex
List<set_var_base> var_list;
List<Item_param> param_list;
SQL_LIST proc_list, auxilliary_table_list, save_list;
- TYPELIB *interval;
create_field *last_field;
char *savepoint_name; // Transaction savepoint id
udf_func udf;
@@ -619,6 +618,7 @@ typedef struct st_lex
bool in_comment, ignore_space, verbose, no_write_to_binlog;
bool derived_tables;
bool safe_to_cache_query;
+ bool subqueries;
ALTER_INFO alter_info;
/* Prepared statements SQL syntax:*/
LEX_STRING prepared_stmt_name; /* Statement name (in all queries) */
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 3dec17ae8ba..e73c3d95b42 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1101,13 +1101,25 @@ extern "C" pthread_handler_decl(handle_bootstrap,arg)
thd->init_for_queries();
while (fgets(buff, thd->net.max_packet, file))
{
- uint length=(uint) strlen(buff);
- if (buff[length-1]!='\n' && !feof(file))
+ ulong length= (ulong) strlen(buff);
+ while (buff[length-1] != '\n' && !feof(file))
{
- send_error(thd,ER_NET_PACKET_TOO_LARGE, NullS);
- thd->fatal_error();
- break;
+ /*
+ We got only a part of the current string. Will try to increase
+ net buffer then read the rest of the current string.
+ */
+ if (net_realloc(&(thd->net), 2 * thd->net.max_packet))
+ {
+ send_error(thd, thd->net.last_errno, NullS);
+ thd->is_fatal_error= 1;
+ break;
+ }
+ buff= (char*) thd->net.buff;
+ fgets(buff + length, thd->net.max_packet - length, file);
+ length+= (ulong) strlen(buff + length);
}
+ if (thd->is_fatal_error)
+ break;
while (length && (my_isspace(thd->charset(), buff[length-1]) ||
buff[length-1] == ';'))
length--;
@@ -2587,7 +2599,9 @@ unsent_create_error:
check_access(thd, SELECT_ACL | EXTRA_ACL, tables->db,
&tables->grant.privilege,0,0))
goto error;
- res = mysqld_show_create(thd, tables);
+ if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0))
+ goto error;
+ res= mysqld_show_create(thd, tables);
break;
}
#endif
@@ -2613,7 +2627,7 @@ unsent_create_error:
if (mysql_bin_log.is_open())
{
thd->clear_error(); // No binlog error generated
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
}
@@ -2642,7 +2656,7 @@ unsent_create_error:
if (mysql_bin_log.is_open())
{
thd->clear_error(); // No binlog error generated
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
}
@@ -2665,7 +2679,7 @@ unsent_create_error:
if (mysql_bin_log.is_open())
{
thd->clear_error(); // No binlog error generated
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
}
@@ -3183,9 +3197,15 @@ purposes internal to the MySQL server", MYF(0));
}
case SQLCOM_ALTER_DB:
{
- if (!strip_sp(lex->name) || check_db_name(lex->name))
+ char *db= lex->name ? lex->name : thd->db;
+ if (!db)
{
- net_printf(thd, ER_WRONG_DB_NAME, lex->name);
+ send_error(thd, ER_NO_DB_ERROR);
+ goto error;
+ }
+ if (!strip_sp(db) || check_db_name(db))
+ {
+ net_printf(thd, ER_WRONG_DB_NAME, db);
break;
}
/*
@@ -3197,21 +3217,21 @@ purposes internal to the MySQL server", MYF(0));
*/
#ifdef HAVE_REPLICATION
if (thd->slave_thread &&
- (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) ||
- !db_ok_with_wild_table(lex->name)))
+ (!db_ok(db, replicate_do_db, replicate_ignore_db) ||
+ !db_ok_with_wild_table(db)))
{
my_error(ER_SLAVE_IGNORED_TABLE, MYF(0));
break;
}
#endif
- if (check_access(thd,ALTER_ACL,lex->name,0,1,0))
+ if (check_access(thd, ALTER_ACL, db, 0, 1, 0))
break;
if (thd->locked_tables || thd->active_transaction())
{
send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION);
goto error;
}
- res=mysql_alter_db(thd,lex->name,&lex->create_info);
+ res= mysql_alter_db(thd, db, &lex->create_info);
break;
}
case SQLCOM_SHOW_CREATE_DB:
@@ -3261,7 +3281,7 @@ purposes internal to the MySQL server", MYF(0));
mysql_update_log.write(thd, thd->query, thd->query_length);
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
send_ok(thd);
@@ -3277,7 +3297,7 @@ purposes internal to the MySQL server", MYF(0));
mysql_update_log.write(thd, thd->query, thd->query_length);
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
send_ok(thd);
@@ -3344,7 +3364,7 @@ purposes internal to the MySQL server", MYF(0));
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
}
@@ -3365,7 +3385,7 @@ purposes internal to the MySQL server", MYF(0));
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
if (mqh_used && lex->sql_command == SQLCOM_GRANT)
@@ -3408,7 +3428,7 @@ purposes internal to the MySQL server", MYF(0));
mysql_update_log.write(thd, thd->query, thd->query_length);
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
}
@@ -3939,7 +3959,6 @@ mysql_init_select(LEX *lex)
{
DBUG_ASSERT(lex->result == 0);
lex->exchange= 0;
- lex->proc_list.first= 0;
}
}
@@ -3955,6 +3974,7 @@ mysql_new_select(LEX *lex, bool move_down)
select_lex->init_select();
if (move_down)
{
+ lex->subqueries= TRUE;
/* first select_lex of subselect or derived table */
SELECT_LEX_UNIT *unit;
if (!(unit= new(lex->thd->mem_root) SELECT_LEX_UNIT()))
@@ -4114,31 +4134,6 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length)
#endif
-/*
- Calculate interval lengths.
- Strip trailing spaces from all strings.
- After this function call:
- - ENUM uses max_length
- - SET uses tot_length.
-*/
-void calculate_interval_lengths(THD *thd, TYPELIB *interval,
- uint32 *max_length, uint32 *tot_length)
-{
- const char **pos;
- uint *len;
- CHARSET_INFO *cs= thd->variables.character_set_client;
- *max_length= *tot_length= 0;
- for (pos= interval->type_names, len= interval->type_lengths;
- *pos ; pos++, len++)
- {
- *len= (uint) strip_sp((char*) *pos);
- uint length= cs->cset->numchars(cs, *pos, *pos + *len);
- *tot_length+= length;
- set_if_bigger(*max_length, (uint32)length);
- }
-}
-
-
/*****************************************************************************
** Store field definition for create
** Return 0 if ok
@@ -4149,7 +4144,8 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
uint type_modifier,
Item *default_value, Item *on_update_value,
LEX_STRING *comment,
- char *change, TYPELIB *interval, CHARSET_INFO *cs,
+ char *change,
+ List<String> *interval_list, CHARSET_INFO *cs,
uint uint_geom_type)
{
register create_field *new_field;
@@ -4444,62 +4440,39 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
break;
case FIELD_TYPE_SET:
{
- if (interval->count > sizeof(longlong)*8)
+ if (interval_list->elements > sizeof(longlong)*8)
{
- net_printf(thd,ER_TOO_BIG_SET,field_name); /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
+ net_printf(thd,ER_TOO_BIG_SET,field_name); /* purecov: inspected */
+ DBUG_RETURN(1); /* purecov: inspected */
}
- new_field->pack_length=(interval->count+7)/8;
+ new_field->pack_length= (interval_list->elements + 7) / 8;
if (new_field->pack_length > 4)
- new_field->pack_length=8;
- new_field->interval=interval;
- uint32 dummy_max_length;
- calculate_interval_lengths(thd, interval,
- &dummy_max_length, &new_field->length);
- new_field->length+= (interval->count - 1);
- set_if_smaller(new_field->length,MAX_FIELD_WIDTH-1);
- if (default_value)
- {
- char *not_used;
- uint not_used2;
- bool not_used3;
-
- thd->cuted_fields=0;
- String str,*res;
- res=default_value->val_str(&str);
- (void) find_set(interval, res->ptr(), res->length(),
- &my_charset_bin,
- &not_used, &not_used2, &not_used3);
- if (thd->cuted_fields)
- {
- net_printf(thd,ER_INVALID_DEFAULT,field_name);
- DBUG_RETURN(1);
- }
- }
+ new_field->pack_length=8;
+
+ List_iterator<String> it(*interval_list);
+ String *tmp;
+ while ((tmp= it++))
+ new_field->interval_list.push_back(tmp);
+ /*
+ Set fake length to 1 to pass the below conditions.
+ Real length will be set in mysql_prepare_table()
+ when we know the character set of the column
+ */
+ new_field->length= 1;
}
break;
case FIELD_TYPE_ENUM:
{
- new_field->interval=interval;
- new_field->pack_length=interval->count < 256 ? 1 : 2; // Should be safe
+ // Should be safe
+ new_field->pack_length= interval_list->elements < 256 ? 1 : 2;
- uint32 dummy_tot_length;
- calculate_interval_lengths(thd, interval,
- &new_field->length, &dummy_tot_length);
- set_if_smaller(new_field->length,MAX_FIELD_WIDTH-1);
- if (default_value)
- {
- String str,*res;
- res=default_value->val_str(&str);
- res->strip_sp();
- if (!find_type(interval, res->ptr(), res->length(), 0))
- {
- net_printf(thd,ER_INVALID_DEFAULT,field_name);
- DBUG_RETURN(1);
- }
- }
- break;
+ List_iterator<String> it(*interval_list);
+ String *tmp;
+ while ((tmp= it++))
+ new_field->interval_list.push_back(tmp);
+ new_field->length= 1; // See comment for FIELD_TYPE_SET above.
}
+ break;
}
if ((new_field->length > MAX_FIELD_CHARLENGTH && type != FIELD_TYPE_SET &&
@@ -5113,9 +5086,9 @@ Item * all_any_subquery_creator(Item *left_expr,
Item_allany_subselect *it=
new Item_allany_subselect(left_expr, (*cmp)(all), select_lex, all);
if (all)
- return it->upper_not= new Item_func_not_all(it); /* ALL */
+ return it->upper_item= new Item_func_not_all(it); /* ALL */
- return it; /* ANY/SOME */
+ return it->upper_item= new Item_func_nop_all(it); /* ANY/SOME */
}
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 69e3cddfdde..bcb9d18d827 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -426,8 +426,17 @@ static void set_param_date(Item_param *param, uchar **pos, ulong len)
#else/*!EMBEDDED_LIBRARY*/
void set_param_time(Item_param *param, uchar **pos, ulong len)
{
- MYSQL_TIME *to= (MYSQL_TIME*)*pos;
- param->set_time(to, MYSQL_TIMESTAMP_TIME,
+ MYSQL_TIME tm= *((MYSQL_TIME*)*pos);
+ tm.hour+= tm.day * 24;
+ tm.day= tm.year= tm.month= 0;
+ if (tm.hour > 838)
+ {
+ /* TODO: add warning 'Data truncated' here */
+ tm.hour= 838;
+ tm.minute= 59;
+ tm.second= 59;
+ }
+ param->set_time(&tm, MYSQL_TIMESTAMP_TIME,
MAX_TIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN);
}
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index afaf2ed0923..388034e0f1a 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -84,7 +84,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list)
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
send_ok(thd);
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 40dae434c5e..9e23163c35e 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -936,7 +936,7 @@ JOIN::optimize()
}
}
- if (select_lex->master_unit()->uncacheable)
+ if (thd->lex->subqueries)
{
if (!(tmp_join= (JOIN*)thd->alloc(sizeof(JOIN))))
DBUG_RETURN(-1);
@@ -3833,7 +3833,9 @@ JOIN::join_free(bool full)
JOIN_TAB *tab,*end;
DBUG_ENTER("JOIN::join_free");
- full= full || !select_lex->uncacheable;
+ full= full || (!select_lex->uncacheable &&
+ !thd->lex->subqueries &&
+ !thd->lex->describe); // do not cleanup too early on EXPLAIN
if (table)
{
@@ -3862,6 +3864,7 @@ JOIN::join_free(bool full)
for (tab= join_tab, end= tab+tables; tab != end; tab++)
tab->cleanup();
table= 0;
+ tables= 0;
}
else
{
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 4454499c1fc..ba13dd1ff04 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -1168,6 +1168,15 @@ append_identifier(THD *thd, String *packet, const char *name, uint length)
{
uchar chr= (uchar) *name;
length= my_mbcharlen(system_charset_info, chr);
+ /*
+ my_mbcharlen can retur 0 on a wrong multibyte
+ sequence. It is possible when upgrading from 4.0,
+ and identifier contains some accented characters.
+ The manual says it does not work. So we'll just
+ change length to 1 not to hang in the endless loop.
+ */
+ if (!length)
+ length= 1;
if (length == 1 && chr == (uchar) quote_char)
packet->append(&quote_char, 1, system_charset_info);
packet->append(name, length, packet->charset());
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index eedd9388877..c798760cfa8 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -29,12 +29,6 @@
#include <io.h>
#endif
-#define tmp_disable_binlog(A) \
- ulong save_options= (A)->options; \
- (A)->options&= ~OPTION_BIN_LOG;
-
-#define reenable_binlog(A) (A)->options= save_options;
-
const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
@@ -282,7 +276,8 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
if (!error)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- tmp_table_deleted && !some_tables_deleted);
+ tmp_table_deleted && !some_tables_deleted,
+ FALSE);
mysql_bin_log.write(&qinfo);
}
}
@@ -392,6 +387,41 @@ void check_duplicates_in_interval(const char *set_or_name,
}
}
+
+/*
+ Check TYPELIB (set or enum) max and total lengths
+
+ SYNOPSIS
+ calculate_interval_lengths()
+ cs charset+collation pair of the interval
+ typelib list of values for the column
+ max_length length of the longest item
+ tot_length sum of the item lengths
+
+ DESCRIPTION
+ After this function call:
+ - ENUM uses max_length
+ - SET uses tot_length.
+
+ RETURN VALUES
+ void
+*/
+void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval,
+ uint32 *max_length, uint32 *tot_length)
+{
+ const char **pos;
+ uint *len;
+ *max_length= *tot_length= 0;
+ for (pos= interval->type_names, len= interval->type_lengths;
+ *pos ; pos++, len++)
+ {
+ uint length= cs->cset->numchars(cs, *pos, *pos + *len);
+ *tot_length+= length;
+ set_if_bigger(*max_length, (uint32)length);
+ }
+}
+
+
/*
Preparation for table creation
@@ -455,6 +485,93 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
DBUG_RETURN(-1);
}
+ if (sql_field->sql_type == FIELD_TYPE_SET ||
+ sql_field->sql_type == FIELD_TYPE_ENUM)
+ {
+ uint32 dummy;
+ CHARSET_INFO *cs= sql_field->charset;
+ TYPELIB *interval= sql_field->interval;
+
+ /*
+ Create typelib from interval_list, and if necessary
+ convert strings from client character set to the
+ column character set.
+ */
+ if (!interval)
+ {
+ interval= sql_field->interval= typelib(sql_field->interval_list);
+ List_iterator<String> it(sql_field->interval_list);
+ String conv, *tmp;
+ for (uint i= 0; (tmp= it++); i++)
+ {
+ if (String::needs_conversion(tmp->length(), tmp->charset(),
+ cs, &dummy))
+ {
+ uint cnv_errs;
+ conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs);
+ char *buf= (char*) sql_alloc(conv.length()+1);
+ memcpy(buf, conv.ptr(), conv.length());
+ buf[conv.length()]= '\0';
+ interval->type_names[i]= buf;
+ interval->type_lengths[i]= conv.length();
+ }
+
+ // Strip trailing spaces.
+ uint lengthsp= cs->cset->lengthsp(cs, interval->type_names[i],
+ interval->type_lengths[i]);
+ interval->type_lengths[i]= lengthsp;
+ ((uchar *)interval->type_names[i])[lengthsp]= '\0';
+ }
+ sql_field->interval_list.empty(); // Don't need interval_list anymore
+ }
+
+ /*
+ Convert the default value from client character
+ set into the column character set if necessary.
+ */
+ if (sql_field->def)
+ {
+ sql_field->def=
+ sql_field->def->safe_charset_converter(cs);
+ }
+
+ if (sql_field->sql_type == FIELD_TYPE_SET)
+ {
+ if (sql_field->def)
+ {
+ char *not_used;
+ uint not_used2;
+ bool not_found= 0;
+ String str, *def= sql_field->def->val_str(&str);
+ def->length(cs->cset->lengthsp(cs, def->ptr(), def->length()));
+ (void) find_set(interval, def->ptr(), def->length(),
+ cs, &not_used, &not_used2, &not_found);
+ if (not_found)
+ {
+ my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
+ DBUG_RETURN(-1);
+ }
+ }
+ calculate_interval_lengths(cs, interval, &dummy, &sql_field->length);
+ sql_field->length+= (interval->count - 1);
+ }
+ else /* FIELD_TYPE_ENUM */
+ {
+ if (sql_field->def)
+ {
+ String str, *def= sql_field->def->val_str(&str);
+ def->length(cs->cset->lengthsp(cs, def->ptr(), def->length()));
+ if (!find_type2(interval, def->ptr(), def->length(), cs))
+ {
+ my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name);
+ DBUG_RETURN(-1);
+ }
+ }
+ calculate_interval_lengths(cs, interval, &sql_field->length, &dummy);
+ }
+ set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1);
+ }
+
sql_field->create_length_to_internal_length();
/* Don't pack keys in old tables if the user has requested this */
@@ -814,8 +931,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
DBUG_RETURN(-1);
}
}
- else
- if (key_info->algorithm == HA_KEY_ALG_RTREE)
+ else if (key_info->algorithm == HA_KEY_ALG_RTREE)
{
#ifdef HAVE_RTREE_KEYS
if ((key_info->key_parts & 1) == 1)
@@ -839,6 +955,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
CHARSET_INFO *ft_key_charset=0; // for FULLTEXT
for (uint column_nr=0 ; (column=cols++) ; column_nr++)
{
+ key_part_spec *dup_column;
+
it.rewind();
field=0;
while ((sql_field=it++) &&
@@ -853,9 +971,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
column->field_name);
DBUG_RETURN(-1);
}
- for (uint dup_nr= 0; dup_nr < column_nr; dup_nr++)
+ while ((dup_column= cols2++) != column)
{
- key_part_spec *dup_column= cols2++;
if (!my_strcasecmp(system_charset_info,
column->field_name, dup_column->field_name))
{
@@ -866,12 +983,6 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
}
}
cols2.rewind();
- /* for fulltext keys keyseg length is 1 for blobs (it's ignored in
- ft code anyway, and 0 (set to column width later) for char's.
- it has to be correct col width for char's, as char data are not
- prefixed with length (unlike blobs, where ft code takes data length
- from a data prefix, ignoring column->length).
- */
if (key->type == Key::FULLTEXT)
{
if ((sql_field->sql_type != FIELD_TYPE_STRING &&
@@ -1305,7 +1416,8 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
test(create_info->options &
- HA_LEX_CREATE_TMP_TABLE));
+ HA_LEX_CREATE_TMP_TABLE),
+ FALSE);
mysql_bin_log.write(&qinfo);
}
}
@@ -2230,7 +2342,8 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table,
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
test(create_info->options &
- HA_LEX_CREATE_TMP_TABLE));
+ HA_LEX_CREATE_TMP_TABLE),
+ FALSE);
mysql_bin_log.write(&qinfo);
}
res= 0;
@@ -2341,7 +2454,7 @@ mysql_discard_or_import_tablespace(THD *thd,
mysql_update_log.write(thd, thd->query,thd->query_length);
if (mysql_bin_log.is_open())
{
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
err:
@@ -2728,7 +2841,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
if (do_send_ok)
@@ -3123,7 +3236,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
goto end_temporary;
@@ -3258,7 +3371,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (mysql_bin_log.is_open())
{
thd->clear_error();
- Query_log_event qinfo(thd, thd->query, thd->query_length, 0);
+ Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE);
mysql_bin_log.write(&qinfo);
}
VOID(pthread_cond_broadcast(&COND_refresh));
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index e0e8f8d42c5..b35209faeb2 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -394,6 +394,8 @@ int st_select_lex_unit::exec()
if (uncacheable || !item || !item->assigned() || describe)
{
+ if (item)
+ item->reset_value_registration();
if (optimized && item)
{
if (item->assigned())
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 4a225913eaa..6fc68142a12 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -124,7 +124,7 @@ int mysql_update(THD *thd,
/* Check values */
table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege);
#endif
- if (setup_fields(thd, 0, update_table_list, values, 0, 0, 0))
+ if (setup_fields(thd, 0, update_table_list, values, 1, 0, 0))
{
free_underlaid_joins(thd, &thd->lex->select_lex);
DBUG_RETURN(-1); /* purecov: inspected */
@@ -343,7 +343,7 @@ int mysql_update(THD *thd,
if (error <= 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- log_delayed);
+ log_delayed, FALSE);
if (mysql_bin_log.write(&qinfo) && transactional_table)
error=1; // Rollback update
}
@@ -1220,7 +1220,7 @@ bool multi_update::send_eof()
if (local_error <= 0)
thd->clear_error();
Query_log_event qinfo(thd, thd->query, thd->query_length,
- log_delayed);
+ log_delayed, FALSE);
if (mysql_bin_log.write(&qinfo) && trans_safe)
local_error= 1; // Rollback update
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 03acc81b5ab..1b629e72ecc 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -608,7 +608,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%type <simple_string>
remember_name remember_end opt_ident opt_db text_or_password
- opt_constraint constraint
+ opt_constraint constraint ident_or_empty
%type <string>
text_string opt_gconcat_separator
@@ -1360,7 +1360,7 @@ field_spec:
field_ident
{
LEX *lex=Lex;
- lex->length=lex->dec=0; lex->type=0; lex->interval=0;
+ lex->length=lex->dec=0; lex->type=0;
lex->default_value= lex->on_update_value= 0;
lex->comment=0;
lex->charset=NULL;
@@ -1373,7 +1373,7 @@ field_spec:
lex->length,lex->dec,lex->type,
lex->default_value, lex->on_update_value,
lex->comment,
- lex->change,lex->interval,lex->charset,
+ lex->change,&lex->interval_list,lex->charset,
lex->uint_geom_type))
YYABORT;
};
@@ -1470,17 +1470,9 @@ type:
| FIXED_SYM float_options field_options
{ $$=FIELD_TYPE_DECIMAL;}
| ENUM {Lex->interval_list.empty();} '(' string_list ')' opt_binary
- {
- LEX *lex=Lex;
- lex->interval=typelib(lex->interval_list);
- $$=FIELD_TYPE_ENUM;
- }
+ { $$=FIELD_TYPE_ENUM; }
| SET { Lex->interval_list.empty();} '(' string_list ')' opt_binary
- {
- LEX *lex=Lex;
- lex->interval=typelib(lex->interval_list);
- $$=FIELD_TYPE_SET;
- }
+ { $$=FIELD_TYPE_SET; }
| LONG_SYM opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; }
| SERIAL_SYM
{
@@ -1877,7 +1869,7 @@ alter:
}
alter_list
{}
- | ALTER DATABASE ident
+ | ALTER DATABASE ident_or_empty
{
Lex->create_info.default_table_charset= NULL;
Lex->create_info.used_fields= 0;
@@ -1886,10 +1878,15 @@ alter:
{
LEX *lex=Lex;
lex->sql_command=SQLCOM_ALTER_DB;
- lex->name=$3.str;
+ lex->name= $3;
};
+ident_or_empty:
+ /* empty */ { $$= 0; }
+ | ident { $$= $1.str; };
+
+
alter_list:
| DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; }
| IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; }
@@ -1924,7 +1921,7 @@ alter_list_item:
| MODIFY_SYM opt_column field_ident
{
LEX *lex=Lex;
- lex->length=lex->dec=0; lex->type=0; lex->interval=0;
+ lex->length=lex->dec=0; lex->type=0;
lex->default_value= lex->on_update_value= 0;
lex->comment=0;
lex->charset= NULL;
@@ -1939,7 +1936,7 @@ alter_list_item:
lex->length,lex->dec,lex->type,
lex->default_value, lex->on_update_value,
lex->comment,
- $3.str, lex->interval, lex->charset,
+ $3.str, &lex->interval_list, lex->charset,
lex->uint_geom_type))
YYABORT;
}
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index b5255e9be06..8ab6992a63a 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -53,8 +53,22 @@ ulonglong find_set(TYPELIB *lib, const char *str, uint length, CHARSET_INFO *cs,
{
const char *pos= start;
uint var_len;
+ int mblen= 1;
- for (; pos != end && *pos != field_separator; pos++) ;
+ if (cs && cs->mbminlen > 1)
+ {
+ for ( ; pos < end; pos+= mblen)
+ {
+ my_wc_t wc;
+ if ((mblen= cs->cset->mb_wc(cs, &wc, (const uchar *) pos,
+ (const uchar *) end)) < 1)
+ mblen= 1; // Not to hang on a wrong multibyte sequence
+ if (wc == (my_wc_t) field_separator)
+ break;
+ }
+ }
+ else
+ for (; pos != end && *pos != field_separator; pos++) ;
var_len= (uint) (pos - start);
uint find= cs ? find_type2(lib, start, var_len, cs) :
find_type(lib, start, var_len, (bool) 0);
@@ -66,9 +80,9 @@ ulonglong find_set(TYPELIB *lib, const char *str, uint length, CHARSET_INFO *cs,
}
else
found|= ((longlong) 1 << (find - 1));
- if (pos == end)
+ if (pos >= end)
break;
- start= pos + 1;
+ start= pos + mblen;
}
}
return found;
diff --git a/sql/table.cc b/sql/table.cc
index cb565097c0b..992f6df0401 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -485,6 +485,32 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag,
charset= outparam->table_charset;
bzero((char*) &comment, sizeof(comment));
}
+
+ if (interval_nr && charset->mbminlen > 1)
+ {
+ /* Unescape UCS2 intervals from HEX notation */
+ TYPELIB *interval= outparam->intervals + interval_nr - 1;
+ for (uint pos= 0; pos < interval->count; pos++)
+ {
+ char *from, *to;
+ for (from= to= (char*) interval->type_names[pos]; *from; )
+ {
+ /*
+ Note, hexchar_to_int(*from++) doesn't work
+ one some compilers, e.g. IRIX. Looks like a compiler
+ bug in inline functions in combination with arguments
+ that have a side effect. So, let's use from[0] and from[1]
+ and increment 'from' by two later.
+ */
+
+ *to++= (char) (hexchar_to_int(from[0]) << 4) +
+ hexchar_to_int(from[1]);
+ from+= 2;
+ }
+ interval->type_lengths[pos] /= 2;
+ }
+ }
+
*field_ptr=reg_field=
make_field(record+recpos,
(uint32) field_length,
diff --git a/sql/unireg.cc b/sql/unireg.cc
index c82fcc4abef..6d72c6af135 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -423,6 +423,28 @@ static bool pack_header(uchar *forminfo, enum db_type table_type,
if (field->interval)
{
uint old_int_count=int_count;
+
+ if (field->charset->mbminlen > 1)
+ {
+ /* Escape UCS2 intervals using HEX notation */
+ for (uint pos= 0; pos < field->interval->count; pos++)
+ {
+ char *dst;
+ uint length= field->interval->type_lengths[pos], hex_length;
+ const char *src= field->interval->type_names[pos];
+ const char *srcend= src + length;
+ hex_length= length * 2;
+ field->interval->type_lengths[pos]= hex_length;
+ field->interval->type_names[pos]= dst= sql_alloc(hex_length + 1);
+ for ( ; src < srcend; src++)
+ {
+ *dst++= _dig_vec_upper[((uchar) *src) >> 4];
+ *dst++= _dig_vec_upper[((uchar) *src) & 15];
+ }
+ *dst= '\0';
+ }
+ }
+
field->interval_id=get_interval_id(&int_count,create_fields,field);
if (old_int_count != int_count)
{