summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/item.cc12
-rw-r--r--sql/item.h4
-rw-r--r--sql/log.cc42
-rw-r--r--sql/log.h10
-rw-r--r--sql/log_event.h6
-rw-r--r--sql/mysql_priv.h2
-rw-r--r--sql/mysqld.cc5
-rw-r--r--sql/opt_range.cc93
-rw-r--r--sql/opt_range.h83
-rw-r--r--sql/partition_info.cc4
-rw-r--r--sql/slave.cc31
-rw-r--r--sql/sql_base.cc10
-rw-r--r--sql/sql_class.cc27
-rw-r--r--sql/sql_load.cc2
-rw-r--r--sql/sql_parse.cc9
-rw-r--r--sql/sql_repl.cc24
-rw-r--r--sql/sql_select.cc66
-rw-r--r--sql/sql_table.cc14
-rw-r--r--sql/sql_yacc.yy2
-rw-r--r--sql/table.cc41
-rw-r--r--sql/table.h16
21 files changed, 347 insertions, 156 deletions
diff --git a/sql/item.cc b/sql/item.cc
index 3407d2fecd4..2175a579f4a 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -5366,13 +5366,25 @@ inline uint char_val(char X)
X-'a'+10);
}
+Item_hex_string::Item_hex_string()
+{
+ hex_string_init("", 0);
+}
Item_hex_string::Item_hex_string(const char *str, uint str_length)
{
+ hex_string_init(str, str_length);
+}
+
+void Item_hex_string::hex_string_init(const char *str, uint str_length)
+{
max_length=(str_length+1)/2;
char *ptr=(char*) sql_alloc(max_length+1);
if (!ptr)
+ {
+ str_value.set("", 0, &my_charset_bin);
return;
+ }
str_value.set(ptr,max_length,&my_charset_bin);
char *end=ptr+max_length;
if (max_length*2 != str_length)
diff --git a/sql/item.h b/sql/item.h
index d2303853743..174995b43e6 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -2123,7 +2123,7 @@ public:
class Item_hex_string: public Item_basic_constant
{
public:
- Item_hex_string() {}
+ Item_hex_string();
Item_hex_string(const char *str,uint str_length);
enum Type type() const { return VARBIN_ITEM; }
double val_real()
@@ -2143,6 +2143,8 @@ public:
bool eq(const Item *item, bool binary_cmp) const;
virtual Item *safe_charset_converter(CHARSET_INFO *tocs);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+private:
+ void hex_string_init(const char *str, uint str_length);
};
diff --git a/sql/log.cc b/sql/log.cc
index e7090a98fd9..7d820b48c43 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1440,11 +1440,6 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data,
trx_data->has_incident());
trx_data->reset();
- /*
- We need to step the table map version after writing the
- transaction cache to disk.
- */
- mysql_bin_log.update_table_map_version();
statistic_increment(binlog_cache_use, &LOCK_status);
if (trans_log->disk_writes != 0)
{
@@ -1470,13 +1465,6 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data,
}
else // ...statement
trx_data->truncate(trx_data->before_stmt_pos);
-
- /*
- We need to step the table map version on a rollback to ensure
- that a new table map event is generated instead of the one that
- was written to the thrown-away transaction cache.
- */
- mysql_bin_log.update_table_map_version();
}
DBUG_ASSERT(thd->binlog_get_pending_rows_event() == NULL);
@@ -2437,7 +2425,7 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
MYSQL_BIN_LOG::MYSQL_BIN_LOG()
:bytes_written(0), prepared_xids(0), file_id(1), open_count(1),
- need_start_event(TRUE), m_table_map_version(0),
+ need_start_event(TRUE),
is_relay_log(0),
description_event_for_exec(0), description_event_for_queue(0)
{
@@ -4078,7 +4066,6 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
DBUG_RETURN(error);
binlog_table_maps++;
- table->s->table_map_version= mysql_bin_log.table_map_version();
DBUG_RETURN(0);
}
@@ -4169,10 +4156,8 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
file= &trx_data->trans_log;
/*
- If we are writing to the log file directly, we could avoid
- locking the log. This does not work since we need to step the
- m_table_map_version below, and that change has to be protected
- by the LOCK_log mutex.
+ If we are not writing to the log file directly, we could avoid
+ locking the log.
*/
pthread_mutex_lock(&LOCK_log);
@@ -4186,24 +4171,6 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
DBUG_RETURN(1);
}
- /*
- We step the table map version if we are writing an event
- representing the end of a statement. We do this regardless of
- wheather we write to the transaction cache or to directly to the
- file.
-
- In an ideal world, we could avoid stepping the table map version
- if we were writing to a transaction cache, since we could then
- reuse the table map that was written earlier in the transaction
- cache. This does not work since STMT_END_F implies closing all
- table mappings on the slave side.
-
- TODO: Find a solution so that table maps does not have to be
- written several times within a transaction.
- */
- if (pending->get_flags(Rows_log_event::STMT_END_F))
- ++m_table_map_version;
-
delete pending;
if (file == &log_file)
@@ -4417,9 +4384,6 @@ err:
set_write_error(thd);
}
- if (event_info->flags & LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F)
- ++m_table_map_version;
-
pthread_mutex_unlock(&LOCK_log);
DBUG_RETURN(error);
}
diff --git a/sql/log.h b/sql/log.h
index 8b5dfcb3935..5af51e14d80 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -272,8 +272,6 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
*/
bool no_auto_events;
- ulonglong m_table_map_version;
-
int write_to_file(IO_CACHE *cache);
/*
This is used to start writing to a new log file. The difference from
@@ -314,14 +312,6 @@ public:
void unlog(ulong cookie, my_xid xid);
int recover(IO_CACHE *log, Format_description_log_event *fdle);
#if !defined(MYSQL_CLIENT)
- bool is_table_mapped(TABLE *table) const
- {
- return table->s->table_map_version == table_map_version();
- }
-
- ulonglong table_map_version() const { return m_table_map_version; }
- void update_table_map_version() { ++m_table_map_version; }
-
int flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event);
int remove_pending_rows_event(THD *thd);
diff --git a/sql/log_event.h b/sql/log_event.h
index 9b7f000648d..e3ca4ca3321 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -463,10 +463,10 @@ struct sql_ex_info
#define LOG_EVENT_SUPPRESS_USE_F 0x8
/*
- The table map version internal to the log should be increased after
- the event has been written to the binary log.
+ Note: this is a place holder for the flag
+ LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F (0x10), which is not used any
+ more, please do not reused this value for other flags.
*/
-#define LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F 0x10
/**
@def LOG_EVENT_ARTIFICIAL_F
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 112a5c98ffd..57875089cd8 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -2269,7 +2269,7 @@ void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form);
int rename_file_ext(const char * from,const char * to,const char * ext);
bool check_db_name(LEX_STRING *db);
bool check_column_name(const char *name);
-bool check_table_name(const char *name, uint length);
+bool check_table_name(const char *name, uint length, bool check_for_path_chars);
char *get_field(MEM_ROOT *mem, Field *field);
bool get_field(MEM_ROOT *mem, Field *field, class String *res);
int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 24614737a59..3664f46995f 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -8796,6 +8796,9 @@ bool is_secure_file_path(char *path)
if (!opt_secure_file_priv)
return TRUE;
+ if (strlen(path) >= FN_REFLEN)
+ return FALSE;
+
if (my_realpath(buff1, path, 0))
{
/*
@@ -8882,6 +8885,8 @@ static int fix_paths(void)
}
else
{
+ if (strlen(opt_secure_file_priv) >= FN_REFLEN)
+ opt_secure_file_priv[FN_REFLEN-1]= '\0';
if (my_realpath(buff, opt_secure_file_priv, 0))
{
sql_print_warning("Failed to normalize the argument for --secure-file-priv.");
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 34757a44c2f..5c6cb64c04f 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -8532,8 +8532,6 @@ int QUICK_RANGE_SELECT::get_next()
{
int result;
KEY_MULTI_RANGE *mrange;
- key_range *start_key;
- key_range *end_key;
DBUG_ENTER("QUICK_RANGE_SELECT::get_next");
DBUG_ASSERT(multi_range_length && multi_range &&
(cur_range >= (QUICK_RANGE**) ranges.buffer) &&
@@ -8573,26 +8571,9 @@ int QUICK_RANGE_SELECT::get_next()
mrange_slot < mrange_end;
mrange_slot++)
{
- start_key= &mrange_slot->start_key;
- end_key= &mrange_slot->end_key;
last_range= *(cur_range++);
-
- start_key->key= (const uchar*) last_range->min_key;
- start_key->length= last_range->min_length;
- start_key->flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
- (last_range->flag & EQ_RANGE) ?
- HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
- start_key->keypart_map= last_range->min_keypart_map;
- end_key->key= (const uchar*) last_range->max_key;
- end_key->length= last_range->max_length;
- /*
- We use HA_READ_AFTER_KEY here because if we are reading on a key
- prefix. We want to find all keys with this prefix.
- */
- end_key->flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
- HA_READ_AFTER_KEY);
- end_key->keypart_map= last_range->max_keypart_map;
-
+ last_range->make_min_endpoint(&mrange_slot->start_key);
+ last_range->make_max_endpoint(&mrange_slot->end_key);
mrange_slot->range_flag= last_range->flag;
}
@@ -8616,49 +8597,52 @@ end:
/*
Get the next record with a different prefix.
- SYNOPSIS
- QUICK_RANGE_SELECT::get_next_prefix()
- prefix_length length of cur_prefix
- cur_prefix prefix of a key to be searched for
+ @param prefix_length length of cur_prefix
+ @param group_key_parts The number of key parts in the group prefix
+ @param cur_prefix prefix of a key to be searched for
- DESCRIPTION
- Each subsequent call to the method retrieves the first record that has a
- prefix with length prefix_length different from cur_prefix, such that the
- record with the new prefix is within the ranges described by
- this->ranges. The record found is stored into the buffer pointed by
- this->record.
- The method is useful for GROUP-BY queries with range conditions to
- discover the prefix of the next group that satisfies the range conditions.
+ Each subsequent call to the method retrieves the first record that has a
+ prefix with length prefix_length and which is different from cur_prefix,
+ such that the record with the new prefix is within the ranges described by
+ this->ranges. The record found is stored into the buffer pointed by
+ this->record. The method is useful for GROUP-BY queries with range
+ conditions to discover the prefix of the next group that satisfies the range
+ conditions.
+
+ @todo
- TODO
This method is a modified copy of QUICK_RANGE_SELECT::get_next(), so both
methods should be unified into a more general one to reduce code
duplication.
- RETURN
- 0 on success
- HA_ERR_END_OF_FILE if returned all keys
- other if some error occurred
+ @retval 0 on success
+ @retval HA_ERR_END_OF_FILE if returned all keys
+ @retval other if some error occurred
*/
int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length,
- key_part_map keypart_map,
+ uint group_key_parts,
uchar *cur_prefix)
{
DBUG_ENTER("QUICK_RANGE_SELECT::get_next_prefix");
+ const key_part_map keypart_map= make_prev_keypart_map(group_key_parts);
for (;;)
{
int result;
- key_range start_key, end_key;
if (last_range)
{
/* Read the next record in the same range with prefix after cur_prefix. */
- DBUG_ASSERT(cur_prefix != 0);
+ DBUG_ASSERT(cur_prefix != NULL);
result= file->index_read_map(record, cur_prefix, keypart_map,
HA_READ_AFTER_KEY);
- if (result || (file->compare_key(file->end_range) <= 0))
+ if (result || last_range->max_keypart_map == 0)
DBUG_RETURN(result);
+
+ key_range previous_endpoint;
+ last_range->make_max_endpoint(&previous_endpoint, prefix_length, keypart_map);
+ if (file->compare_key(&previous_endpoint) <= 0)
+ DBUG_RETURN(0);
}
uint count= ranges.elements - (cur_range - (QUICK_RANGE**) ranges.buffer);
@@ -8670,21 +8654,9 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length,
}
last_range= *(cur_range++);
- start_key.key= (const uchar*) last_range->min_key;
- start_key.length= min(last_range->min_length, prefix_length);
- start_key.keypart_map= last_range->min_keypart_map & keypart_map;
- start_key.flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
- (last_range->flag & EQ_RANGE) ?
- HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
- end_key.key= (const uchar*) last_range->max_key;
- end_key.length= min(last_range->max_length, prefix_length);
- end_key.keypart_map= last_range->max_keypart_map & keypart_map;
- /*
- We use READ_AFTER_KEY here because if we are reading on a key
- prefix we want to find all keys with this prefix
- */
- end_key.flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
- HA_READ_AFTER_KEY);
+ key_range start_key, end_key;
+ last_range->make_min_endpoint(&start_key, prefix_length, keypart_map);
+ last_range->make_max_endpoint(&end_key, prefix_length, keypart_map);
result= file->read_range_first(last_range->min_keypart_map ? &start_key : 0,
last_range->max_keypart_map ? &end_key : 0,
@@ -8779,9 +8751,9 @@ bool QUICK_RANGE_SELECT::row_in_ranges()
}
/*
- This is a hack: we inherit from QUICK_SELECT so that we can use the
+ This is a hack: we inherit from QUICK_RANGE_SELECT so that we can use the
get_next() interface, but we have to hold a pointer to the original
- QUICK_SELECT because its data are used all over the place. What
+ QUICK_RANGE_SELECT because its data are used all over the place. What
should be done is to factor out the data that is needed into a base
class (QUICK_SELECT), and then have two subclasses (_ASC and _DESC)
which handle the ranges and implement the get_next() function. But
@@ -10903,7 +10875,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
{
uchar *cur_prefix= seen_first_key ? group_prefix : NULL;
if ((result= quick_prefix_select->get_next_prefix(group_prefix_len,
- make_prev_keypart_map(group_key_parts), cur_prefix)))
+ group_key_parts,
+ cur_prefix)))
DBUG_RETURN(result);
seen_first_key= TRUE;
}
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 8d2ba1bb0a6..e7d8297faf8 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -65,6 +65,85 @@ class QUICK_RANGE :public Sql_alloc {
dummy=0;
#endif
}
+
+ /**
+ Initalizes a key_range object for communication with storage engine.
+
+ This function facilitates communication with the Storage Engine API by
+ translating the minimum endpoint of the interval represented by this
+ QUICK_RANGE into an index range endpoint specifier for the engine.
+
+ @param Pointer to an uninitialized key_range C struct.
+
+ @param prefix_length The length of the search key prefix to be used for
+ lookup.
+
+ @param keypart_map A set (bitmap) of keyparts to be used.
+ */
+ void make_min_endpoint(key_range *kr, uint prefix_length,
+ key_part_map keypart_map) {
+ make_min_endpoint(kr);
+ kr->length= min(kr->length, prefix_length);
+ kr->keypart_map&= keypart_map;
+ }
+
+ /**
+ Initalizes a key_range object for communication with storage engine.
+
+ This function facilitates communication with the Storage Engine API by
+ translating the minimum endpoint of the interval represented by this
+ QUICK_RANGE into an index range endpoint specifier for the engine.
+
+ @param Pointer to an uninitialized key_range C struct.
+ */
+ void make_min_endpoint(key_range *kr) {
+ kr->key= (const uchar*)min_key;
+ kr->length= min_length;
+ kr->keypart_map= min_keypart_map;
+ kr->flag= ((flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
+ (flag & EQ_RANGE) ? HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
+ }
+
+ /**
+ Initalizes a key_range object for communication with storage engine.
+
+ This function facilitates communication with the Storage Engine API by
+ translating the maximum endpoint of the interval represented by this
+ QUICK_RANGE into an index range endpoint specifier for the engine.
+
+ @param Pointer to an uninitialized key_range C struct.
+
+ @param prefix_length The length of the search key prefix to be used for
+ lookup.
+
+ @param keypart_map A set (bitmap) of keyparts to be used.
+ */
+ void make_max_endpoint(key_range *kr, uint prefix_length,
+ key_part_map keypart_map) {
+ make_max_endpoint(kr);
+ kr->length= min(kr->length, prefix_length);
+ kr->keypart_map&= keypart_map;
+ }
+
+ /**
+ Initalizes a key_range object for communication with storage engine.
+
+ This function facilitates communication with the Storage Engine API by
+ translating the maximum endpoint of the interval represented by this
+ QUICK_RANGE into an index range endpoint specifier for the engine.
+
+ @param Pointer to an uninitialized key_range C struct.
+ */
+ void make_max_endpoint(key_range *kr) {
+ kr->key= (const uchar*)max_key;
+ kr->length= max_length;
+ kr->keypart_map= max_keypart_map;
+ /*
+ We use READ_AFTER_KEY here because if we are reading on a key
+ prefix we want to find all keys with this prefix
+ */
+ kr->flag= (flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY);
+ }
};
@@ -331,7 +410,7 @@ public:
int reset(void);
int get_next();
void range_end();
- int get_next_prefix(uint prefix_length, key_part_map keypart_map,
+ int get_next_prefix(uint prefix_length, uint group_key_parts,
uchar *cur_prefix);
bool reverse_sorted() { return 0; }
bool unique_key_range();
@@ -611,7 +690,7 @@ private:
uchar *record; /* Buffer where the next record is returned. */
uchar *tmp_record; /* Temporary storage for next_min(), next_max(). */
uchar *group_prefix; /* Key prefix consisting of the GROUP fields. */
- uint group_prefix_len; /* Length of the group prefix. */
+ const uint group_prefix_len; /* Length of the group prefix. */
uint group_key_parts; /* A number of keyparts in the group prefix */
uchar *last_prefix; /* Prefix of the last group for detecting EOF. */
bool have_min; /* Specify whether we are computing */
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index ba9ea0e876e..6e2f7dfad26 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -972,7 +972,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
part_elem->engine_type= default_engine_type;
}
if (check_table_name(part_elem->partition_name,
- strlen(part_elem->partition_name)))
+ strlen(part_elem->partition_name), FALSE))
{
my_error(ER_WRONG_PARTITION_NAME, MYF(0));
goto end;
@@ -990,7 +990,7 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
{
sub_elem= sub_it++;
if (check_table_name(sub_elem->partition_name,
- strlen(sub_elem->partition_name)))
+ strlen(sub_elem->partition_name), FALSE))
{
my_error(ER_WRONG_PARTITION_NAME, MYF(0));
goto end;
diff --git a/sql/slave.cc b/sql/slave.cc
index e8405ffcd37..2e4642d179e 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -2883,6 +2883,11 @@ pthread_handler_t handle_slave_sql(void *arg)
{
THD *thd; /* needs to be first for thread_stack */
char llbuff[22],llbuff1[22];
+ char saved_log_name[FN_REFLEN];
+ char saved_master_log_name[FN_REFLEN];
+ my_off_t saved_log_pos;
+ my_off_t saved_master_log_pos;
+ my_off_t saved_skip= 0;
Relay_log_info* rli = &((Master_info*)arg)->rli;
const char *errmsg;
@@ -3028,6 +3033,17 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
do not want to wait for next event in this case.
*/
pthread_mutex_lock(&rli->data_lock);
+ if (rli->slave_skip_counter)
+ {
+ char *pos;
+ pos= strmake(saved_log_name, rli->group_relay_log_name, FN_REFLEN - 1);
+ pos= '\0';
+ pos= strmake(saved_master_log_name, rli->group_master_log_name, FN_REFLEN - 1);
+ pos= '\0';
+ saved_log_pos= rli->group_relay_log_pos;
+ saved_master_log_pos= rli->group_master_log_pos;
+ saved_skip= rli->slave_skip_counter;
+ }
if (rli->until_condition != Relay_log_info::UNTIL_NONE &&
rli->is_until_satisfied(thd, NULL))
{
@@ -3046,6 +3062,21 @@ log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
thd_proc_info(thd, "Reading event from the relay log");
DBUG_ASSERT(rli->sql_thd == thd);
THD_CHECK_SENTRY(thd);
+
+ if (saved_skip && rli->slave_skip_counter == 0)
+ {
+ sql_print_information("'SQL_SLAVE_SKIP_COUNTER=%ld' executed at "
+ "relay_log_file='%s', relay_log_pos='%ld', master_log_name='%s', "
+ "master_log_pos='%ld' and new position at "
+ "relay_log_file='%s', relay_log_pos='%ld', master_log_name='%s', "
+ "master_log_pos='%ld' ",
+ (ulong) saved_skip, saved_log_name, (ulong) saved_log_pos,
+ saved_master_log_name, (ulong) saved_master_log_pos,
+ rli->group_relay_log_name, (ulong) rli->group_relay_log_pos,
+ rli->group_master_log_name, (ulong) rli->group_master_log_pos);
+ saved_skip= 0;
+ }
+
if (exec_relay_log_event(thd,rli))
{
DBUG_PRINT("info", ("exec_relay_log_event() failed"));
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 61a8d5af815..65649fc8921 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -5191,6 +5191,16 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables)
thd->variables.binlog_format));
DBUG_PRINT("info", ("multi_engine: %s",
multi_engine ? "TRUE" : "FALSE"));
+ /*
+ Reading from a self-logging engine and updating another engine
+ generates changes that are written to the binary log in the
+ statement format and may make slaves to diverge. In the mixed
+ mode, such changes should be written to the binary log in the
+ row format.
+ */
+ if (multi_engine &&
+ (flags_some_set & HA_HAS_OWN_BINLOGGING))
+ thd->lex->set_stmt_unsafe();
int error= 0;
if (flags_all_set == 0)
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 2633f03f2d6..93aa6a8268c 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1998,9 +1998,21 @@ bool select_export::send_data(List<Item> &items)
const char *from_end_pos;
const char *error_pos;
uint32 bytes;
- bytes= well_formed_copy_nchars(write_cs, cvt_buff, sizeof(cvt_buff),
+ uint64 estimated_bytes=
+ ((uint64) res->length() / res->charset()->mbminlen + 1) *
+ write_cs->mbmaxlen + 1;
+ set_if_smaller(estimated_bytes, UINT_MAX32);
+ if (cvt_str.realloc((uint32) estimated_bytes))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), (uint32) estimated_bytes);
+ goto err;
+ }
+
+ bytes= well_formed_copy_nchars(write_cs, (char *) cvt_str.ptr(),
+ cvt_str.alloced_length(),
res->charset(), res->ptr(), res->length(),
- sizeof(cvt_buff),
+ UINT_MAX32, // copy all input chars,
+ // i.e. ignore nchars parameter
&well_formed_error_pos,
&cannot_convert_error_pos,
&from_end_pos);
@@ -2018,6 +2030,15 @@ bool select_export::send_data(List<Item> &items)
"string", printable_buff,
item->name, row_count);
}
+ else if (from_end_pos < res->ptr() + res->length())
+ {
+ /*
+ result is longer than UINT_MAX32 and doesn't fit into String
+ */
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ WARN_DATA_TRUNCATED, ER(WARN_DATA_TRUNCATED),
+ item->full_name(), row_count);
+ }
cvt_str.length(bytes);
res= &cvt_str;
}
@@ -3804,7 +3825,6 @@ int THD::binlog_flush_pending_rows_event(bool stmt_end)
if (stmt_end)
{
pending->set_flags(Rows_log_event::STMT_END_F);
- pending->flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F;
binlog_table_maps= 0;
}
@@ -3932,7 +3952,6 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg,
{
Query_log_event qinfo(this, query_arg, query_len, is_trans, suppress_use,
errcode);
- qinfo.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F;
/*
Binlog table maps will be irrelevant after a Query_log_event
(they are just removed on the slave side) so after the query
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index e121f69dfdf..6628cc4e8bb 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -511,7 +511,6 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
else
{
Delete_file_log_event d(thd, db, transactional_table);
- d.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F;
(void) mysql_bin_log.write(&d);
}
}
@@ -691,7 +690,6 @@ static bool write_execute_load_query_log_event(THD *thd, sql_exchange* ex,
(duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE :
(ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR),
transactional_table, FALSE, errcode);
- e.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F;
return mysql_bin_log.write(&e);
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 11481933c8a..93d80164ffb 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1310,6 +1310,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
thd->convert_string(&conv_name, system_charset_info,
packet, arg_length, thd->charset());
+ if (check_table_name(conv_name.str, conv_name.length, FALSE))
+ {
+ /* this is OK due to convert_string() null-terminating the string */
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), conv_name.str);
+ break;
+ }
+
table_list.alias= table_list.table_name= conv_name.str;
packet= arg_end + 1;
@@ -6233,7 +6240,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
DBUG_RETURN(0); // End of memory
alias_str= alias ? alias->str : table->table.str;
if (!test(table_options & TL_OPTION_ALIAS) &&
- check_table_name(table->table.str, table->table.length))
+ check_table_name(table->table.str, table->table.length, FALSE))
{
my_error(ER_WRONG_TABLE_NAME, MYF(0), table->table.str);
DBUG_RETURN(0);
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 44215d90634..c220f609c09 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1134,6 +1134,10 @@ bool change_master(THD* thd, Master_info* mi)
int thread_mask;
const char* errmsg= 0;
bool need_relay_log_purge= 1;
+ char saved_host[HOSTNAME_LENGTH + 1];
+ uint saved_port;
+ char saved_log_name[FN_REFLEN];
+ my_off_t saved_log_pos;
DBUG_ENTER("change_master");
lock_slave_threads(mi);
@@ -1163,6 +1167,17 @@ bool change_master(THD* thd, Master_info* mi)
*/
/*
+ Before processing the command, save the previous state.
+ */
+ char *pos;
+ pos= strmake(saved_host, mi->host, HOSTNAME_LENGTH);
+ pos= '\0';
+ saved_port= mi->port;
+ pos= strmake(saved_log_name, mi->master_log_name, FN_REFLEN - 1);
+ pos= '\0';
+ saved_log_pos= mi->master_log_pos;
+
+ /*
If the user specified host or port without binlog or position,
reset binlog's name to FIRST and position to 4.
*/
@@ -1325,6 +1340,15 @@ bool change_master(THD* thd, Master_info* mi)
/* Clear the errors, for a clean start */
mi->rli.clear_error();
mi->rli.clear_until_condition();
+
+ sql_print_information("'CHANGE MASTER TO executed'. "
+ "Previous state master_host='%s', master_port='%u', master_log_file='%s', "
+ "master_log_pos='%ld'. "
+ "New state master_host='%s', master_port='%u', master_log_file='%s', "
+ "master_log_pos='%ld'.", saved_host, saved_port, saved_log_name,
+ (ulong) saved_log_pos, mi->host, mi->port, mi->master_log_name,
+ (ulong) mi->master_log_pos);
+
/*
If we don't write new coordinates to disk now, then old will remain in
relay-log.info until START SLAVE is issued; but if mysqld is shutdown
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 291432c2bb6..6f128cb8181 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -2967,8 +2967,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
s->quick=select->quick;
s->needed_reg=select->needed_reg;
select->quick=0;
- if (records == 0 && s->table->reginfo.impossible_range &&
- (s->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT))
+ if (records == 0 && s->table->reginfo.impossible_range)
{
/*
Impossible WHERE or ON expression
@@ -9091,6 +9090,46 @@ static bool check_interleaving_with_nj(JOIN_TAB *next_tab)
/**
Nested joins perspective: Remove the last table from the join order.
+ The algorithm is the reciprocal of check_interleaving_with_nj(), hence
+ parent join nest nodes are updated only when the last table in its child
+ node is removed. The ASCII graphic below will clarify.
+
+ %A table nesting such as <tt> t1 x [ ( t2 x t3 ) x ( t4 x t5 ) ] </tt>is
+ represented by the below join nest tree.
+
+ @verbatim
+ NJ1
+ _/ / \
+ _/ / NJ2
+ _/ / / \
+ / / / \
+ t1 x [ (t2 x t3) x (t4 x t5) ]
+ @endverbatim
+
+ At the point in time when check_interleaving_with_nj() adds the table t5 to
+ the query execution plan, QEP, it also directs the node named NJ2 to mark
+ the table as covered. NJ2 does so by incrementing its @c counter
+ member. Since all of NJ2's tables are now covered by the QEP, the algorithm
+ proceeds up the tree to NJ1, incrementing its counter as well. All join
+ nests are now completely covered by the QEP.
+
+ restore_prev_nj_state() does the above in reverse. As seen above, the node
+ NJ1 contains the nodes t2, t3, and NJ2. Its counter being equal to 3 means
+ that the plan covers t2, t3, and NJ2, @e and that the sub-plan (t4 x t5)
+ completely covers NJ2. The removal of t5 from the partial plan will first
+ decrement NJ2's counter to 1. It will then detect that NJ2 went from being
+ completely to partially covered, and hence the algorithm must continue
+ upwards to NJ1 and decrement its counter to 2. %A subsequent removal of t4
+ will however not influence NJ1 since it did not un-cover the last table in
+ NJ2.
+
+ SYNOPSIS
+ restore_prev_nj_state()
+ last join table to remove, it is assumed to be the last in current
+ partial join order.
+
+ DESCRIPTION
+
Remove the last table from the partial join order and update the nested
joins counters and join->cur_embedding_map. It is ok to call this
function for the first table in join order (for which
@@ -9104,19 +9143,20 @@ static void restore_prev_nj_state(JOIN_TAB *last)
{
TABLE_LIST *last_emb= last->table->pos_in_table_list->embedding;
JOIN *join= last->join;
- while (last_emb)
+ for (;last_emb != NULL; last_emb= last_emb->embedding)
{
- if (!(--last_emb->nested_join->counter))
- join->cur_embedding_map&= ~last_emb->nested_join->nj_map;
- else if (last_emb->nested_join->join_list.elements-1 ==
- last_emb->nested_join->counter)
- {
- join->cur_embedding_map|= last_emb->nested_join->nj_map;
- break;
- }
- else
+ NESTED_JOIN *nest= last_emb->nested_join;
+ DBUG_ASSERT(nest->counter > 0);
+
+ bool was_fully_covered= nest->is_fully_covered();
+
+ if (--nest->counter == 0)
+ join->cur_embedding_map&= ~nest->nj_map;
+
+ if (!was_fully_covered)
break;
- last_emb= last_emb->embedding;
+
+ join->cur_embedding_map|= nest->nj_map;
}
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 1101be67d5e..2a2daacf724 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -435,7 +435,21 @@ uint tablename_to_filename(const char *from, char *to, uint to_length)
DBUG_PRINT("enter", ("from '%s'", from));
if ((length= check_n_cut_mysql50_prefix(from, to, to_length)))
+ {
+ /*
+ Check if the name supplied is a valid mysql 5.0 name and
+ make the name a zero length string if it's not.
+ Note that just returning zero length is not enough :
+ a lot of places don't check the return value and expect
+ a zero terminated string.
+ */
+ if (check_table_name(to, length, TRUE))
+ {
+ to[0]= 0;
+ length= 0;
+ }
DBUG_RETURN(length);
+ }
length= strconvert(system_charset_info, from,
&my_charset_filename, to, to_length, &errors);
if (check_if_legal_tablename(to) &&
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 4f43ab8bebd..f815da006b1 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -6133,7 +6133,7 @@ alter_list_item:
{
MYSQL_YYABORT;
}
- if (check_table_name($3->table.str,$3->table.length) ||
+ if (check_table_name($3->table.str,$3->table.length, FALSE) ||
($3->db.str && check_db_name(&$3->db)))
{
my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str);
diff --git a/sql/table.cc b/sql/table.cc
index a4e2c59fb87..bd6251b5743 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -297,13 +297,6 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key,
share->version= refresh_version;
/*
- This constant is used to mark that no table map version has been
- assigned. No arithmetic is done on the value: it will be
- overwritten with a value taken from MYSQL_BIN_LOG.
- */
- share->table_map_version= ~(ulonglong)0;
-
- /*
Since alloc_table_share() can be called without any locking (for
example, ha_create_table... functions), we do not assign a table
map id here. Instead we assign a value that is not used
@@ -366,11 +359,6 @@ void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key,
share->path.length= share->normalized_path.length= strlen(path);
share->frm_version= FRM_VER_TRUE_VARCHAR;
- /*
- Temporary tables are not replicated, but we set up these fields
- anyway to be able to catch errors.
- */
- share->table_map_version= ~(ulonglong)0;
share->cached_row_logging_check= -1;
/*
@@ -494,6 +482,26 @@ inline bool is_system_table_name(const char *name, uint length)
}
+/**
+ Check if a string contains path elements
+*/
+
+static inline bool has_disabled_path_chars(const char *str)
+{
+ for (; *str; str++)
+ switch (*str)
+ {
+ case FN_EXTCHAR:
+ case '/':
+ case '\\':
+ case '~':
+ case '@':
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
/*
Read table definition from a binary / text based .frm file
@@ -549,7 +557,8 @@ int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags)
This kind of tables must have been opened only by the
my_open() above.
*/
- if (strchr(share->table_name.str, '@') ||
+ if (has_disabled_path_chars(share->table_name.str) ||
+ has_disabled_path_chars(share->db.str) ||
!strncmp(share->db.str, MYSQL50_TABLE_NAME_PREFIX,
MYSQL50_TABLE_NAME_PREFIX_LENGTH) ||
!strncmp(share->table_name.str, MYSQL50_TABLE_NAME_PREFIX,
@@ -2711,7 +2720,6 @@ bool check_db_name(LEX_STRING *org_name)
(name_length > NAME_CHAR_LEN)); /* purecov: inspected */
}
-
/*
Allow anything as a table name, as long as it doesn't contain an
' ' at the end
@@ -2719,7 +2727,7 @@ bool check_db_name(LEX_STRING *org_name)
*/
-bool check_table_name(const char *name, uint length)
+bool check_table_name(const char *name, uint length, bool check_for_path_chars)
{
uint name_length= 0; // name length in symbols
const char *end= name+length;
@@ -2746,6 +2754,9 @@ bool check_table_name(const char *name, uint length)
continue;
}
}
+ if (check_for_path_chars &&
+ (*name == '/' || *name == '\\' || *name == '~' || *name == FN_EXTCHAR))
+ return 1;
#endif
name++;
name_length++;
diff --git a/sql/table.h b/sql/table.h
index e797ef2b2de..bddb0731625 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -430,7 +430,6 @@ typedef struct st_table_share
bool name_lock, replace_with_name_lock;
bool waiting_on_cond; /* Protection against free */
ulong table_map_id; /* for row-based replication */
- ulonglong table_map_version;
/*
Cache for row-based replication table share checks that does not
@@ -1691,7 +1690,11 @@ typedef struct st_nested_join
List<TABLE_LIST> join_list; /* list of elements in the nested join */
table_map used_tables; /* bitmap of tables in the nested join */
table_map not_null_tables; /* tables that rejects nulls */
- struct st_join_table *first_nested;/* the first nested table in the plan */
+ /**
+ Used for pointing out the first table in the plan being covered by this
+ join nest. It is used exclusively within make_outerjoin_info().
+ */
+ struct st_join_table *first_nested;
/*
Used to count tables in the nested join in 2 isolated places:
1. In make_outerjoin_info().
@@ -1701,6 +1704,15 @@ typedef struct st_nested_join
*/
uint counter;
nested_join_map nj_map; /* Bit used to identify this nested join*/
+ /**
+ True if this join nest node is completely covered by the query execution
+ plan. This means two things.
+
+ 1. All tables on its @c join_list are covered by the plan.
+
+ 2. All child join nest nodes are fully covered.
+ */
+ bool is_fully_covered() const { return join_list.elements == counter; }
} NESTED_JOIN;