summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authormonty@mishka.local <>2004-09-15 22:10:31 +0300
committermonty@mishka.local <>2004-09-15 22:10:31 +0300
commit91ff64e107866821efc8591fc6ecf28f3b7ee9a0 (patch)
treed4323a7f81c07c98095c922e124755d6ac3253b8 /sql
parent3ce78a27cdcb9b0ba4d6e0f36f2953cf8db7b518 (diff)
downloadmariadb-git-91ff64e107866821efc8591fc6ecf28f3b7ee9a0.tar.gz
Added options --auto-increment-increment and --auto-increment-offset.
This allows one to setup a master <-> master replication with non conflicting auto-increment series. Cleaned up binary log code to make it easyer to add new state variables. Added simpler 'upper level' logic for artificial events (events that should not cause cleanups on slave). Simplified binary log handling. Changed how auto_increment works together with to SET INSERT_ID=# to make it more predictable: Now the inserted rows in a multi-row statement are set independent of the existing rows in the table. (Before only InnoDB did this correctly)
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_berkeley.cc6
-rw-r--r--sql/ha_berkeley.h2
-rw-r--r--sql/ha_heap.cc2
-rw-r--r--sql/ha_heap.h2
-rw-r--r--sql/ha_innodb.cc157
-rw-r--r--sql/ha_innodb.h2
-rw-r--r--sql/ha_myisam.cc15
-rw-r--r--sql/ha_myisam.h2
-rw-r--r--sql/ha_ndbcluster.cc19
-rw-r--r--sql/ha_ndbcluster.h2
-rw-r--r--sql/handler.cc164
-rw-r--r--sql/handler.h3
-rw-r--r--sql/log.cc59
-rw-r--r--sql/log_event.cc653
-rw-r--r--sql/log_event.h256
-rw-r--r--sql/mysqld.cc13
-rw-r--r--sql/set_var.cc9
-rw-r--r--sql/slave.cc122
-rw-r--r--sql/sql_class.cc26
-rw-r--r--sql/sql_class.h31
-rw-r--r--sql/sql_help.cc2
-rw-r--r--sql/sql_insert.cc8
-rw-r--r--sql/sql_parse.cc6
-rw-r--r--sql/sql_prepare.cc19
-rw-r--r--sql/sql_table.cc1
25 files changed, 819 insertions, 762 deletions
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index b4f07073afa..9f93437bc32 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -2089,9 +2089,9 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key,
}
-longlong ha_berkeley::get_auto_increment()
+ulonglong ha_berkeley::get_auto_increment()
{
- longlong nr=1; // Default if error or new key
+ ulonglong nr=1; // Default if error or new key
int error;
(void) ha_berkeley::extra(HA_EXTRA_KEYREAD);
@@ -2140,7 +2140,7 @@ longlong ha_berkeley::get_auto_increment()
}
}
if (!error)
- nr=(longlong)
+ nr=(ulonglong)
table->next_number_field->val_int_offset(table->rec_buff_length)+1;
ha_berkeley::index_end();
(void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD);
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
index 975d70abf7b..d4fb4ca5fbb 100644
--- a/sql/ha_berkeley.h
+++ b/sql/ha_berkeley.h
@@ -153,7 +153,7 @@ class ha_berkeley: public handler
int5store(to,share->auto_ident);
pthread_mutex_unlock(&share->mutex);
}
- longlong get_auto_increment();
+ ulonglong get_auto_increment();
void print_error(int error, myf errflag);
uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; }
bool primary_key_is_clustered() { return true; }
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index d7327362286..7340a6973b5 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -484,7 +484,7 @@ void ha_heap::update_create_info(HA_CREATE_INFO *create_info)
create_info->auto_increment_value= auto_increment_value;
}
-longlong ha_heap::get_auto_increment()
+ulonglong ha_heap::get_auto_increment()
{
ha_heap::info(HA_STATUS_AUTO);
return auto_increment_value;
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
index c326f570feb..e469a676b65 100644
--- a/sql/ha_heap.h
+++ b/sql/ha_heap.h
@@ -62,7 +62,7 @@ class ha_heap: public handler
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
- longlong get_auto_increment();
+ ulonglong get_auto_increment();
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int index_read_idx(byte * buf, uint idx, const byte * key,
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 3003425a489..8da04ed0ab1 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -1627,8 +1627,6 @@ ha_innobase::open(
}
}
- auto_inc_counter_for_this_stat = 0;
-
block_size = 16 * 1024; /* Index block size in InnoDB: used by MySQL
in query optimization */
@@ -2198,7 +2196,7 @@ ha_innobase::write_row(
longlong dummy;
ibool incremented_auto_inc_for_stat = FALSE;
ibool incremented_auto_inc_counter = FALSE;
- ibool skip_auto_inc_decr;
+ ibool skip_auto_inc_decr, auto_inc_used= FALSE;
DBUG_ENTER("ha_innobase::write_row");
@@ -2260,39 +2258,34 @@ ha_innobase::write_row(
prebuilt->sql_stat_start = TRUE;
}
- /* Fetch the value the user possibly has set in the
- autoincrement field */
+ /*
+ We must use the handler code to update the auto-increment
+ value to be sure that increment it correctly.
+ */
+ update_auto_increment();
+ auto_inc_used= 1;
- auto_inc = table->next_number_field->val_int();
+ }
- /* In replication and also otherwise the auto-inc column
- can be set with SET INSERT_ID. Then we must look at
- user_thd->next_insert_id. If it is nonzero and the user
- has not supplied a value, we must use it, and use values
- incremented by 1 in all subsequent inserts within the
- same SQL statement! */
+ if (prebuilt->mysql_template == NULL
+ || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) {
+ /* Build the template used in converting quickly between
+ the two database formats */
- if (auto_inc == 0 && user_thd->next_insert_id != 0) {
- auto_inc = user_thd->next_insert_id;
- auto_inc_counter_for_this_stat = auto_inc;
- }
+ build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
+ }
- if (auto_inc == 0 && auto_inc_counter_for_this_stat) {
- /* The user set the auto-inc counter for
- this SQL statement with SET INSERT_ID. We must
- assign sequential values from the counter. */
+ innodb_srv_conc_enter_innodb(prebuilt->trx);
- auto_inc_counter_for_this_stat++;
- incremented_auto_inc_for_stat = TRUE;
+ error = row_insert_for_mysql((byte*) record, prebuilt);
- auto_inc = auto_inc_counter_for_this_stat;
+ if (error == DB_SUCCESS && auto_inc_used) {
- /* We give MySQL a new value to place in the
- auto-inc column */
- user_thd->next_insert_id = auto_inc;
- }
+ /* Fetch the value that was set in the autoincrement field */
- if (auto_inc != 0) {
+ auto_inc = table->next_number_field->val_int();
+
+ if (auto_inc != 0) {
/* This call will calculate the max of the current
value and the value supplied by the user and
update the counter accordingly */
@@ -2304,104 +2297,19 @@ ha_innobase::write_row(
The lock is released at each SQL statement's
end. */
- innodb_srv_conc_enter_innodb(prebuilt->trx);
- error = row_lock_table_autoinc_for_mysql(prebuilt);
- innodb_srv_conc_exit_innodb(prebuilt->trx);
-
- if (error != DB_SUCCESS) {
-
- error = convert_error_code_to_mysql(error,
- user_thd);
- goto func_exit;
- }
-
- dict_table_autoinc_update(prebuilt->table, auto_inc);
- } else {
- innodb_srv_conc_enter_innodb(prebuilt->trx);
-
- if (!prebuilt->trx->auto_inc_lock) {
-
- error = row_lock_table_autoinc_for_mysql(
- prebuilt);
- if (error != DB_SUCCESS) {
- innodb_srv_conc_exit_innodb(
- prebuilt->trx);
-
- error = convert_error_code_to_mysql(
- error, user_thd);
- goto func_exit;
- }
- }
-
- /* The following call gets the value of the auto-inc
- counter of the table and increments it by 1 */
-
- auto_inc = dict_table_autoinc_get(prebuilt->table);
- incremented_auto_inc_counter = TRUE;
+ error = row_lock_table_autoinc_for_mysql(prebuilt);
- innodb_srv_conc_exit_innodb(prebuilt->trx);
+ if (error != DB_SUCCESS) {
- /* We can give the new value for MySQL to place in
- the field */
-
- user_thd->next_insert_id = auto_inc;
- }
-
- /* This call of a handler.cc function places
- user_thd->next_insert_id to the column value, if the column
- value was not set by the user */
-
- update_auto_increment();
- }
-
- if (prebuilt->mysql_template == NULL
- || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) {
- /* Build the template used in converting quickly between
- the two database formats */
-
- build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
- }
-
- innodb_srv_conc_enter_innodb(prebuilt->trx);
-
- error = row_insert_for_mysql((byte*) record, prebuilt);
+ error = convert_error_code_to_mysql(error, user_thd);
+ goto func_exit;
+ }
+ dict_table_autoinc_update(prebuilt->table, auto_inc);
+ }
+ }
innodb_srv_conc_exit_innodb(prebuilt->trx);
- if (error != DB_SUCCESS) {
- /* If the insert did not succeed we restore the value of
- the auto-inc counter we used; note that this behavior was
- introduced only in version 4.0.4.
- NOTE that a REPLACE command handles a duplicate key error
- itself, and we must not decrement the autoinc counter
- if we are performing a REPLACE statement.
- NOTE 2: if there was an error, for example a deadlock,
- which caused InnoDB to roll back the whole transaction
- already in the call of row_insert_for_mysql(), we may no
- longer have the AUTO-INC lock, and cannot decrement
- the counter here. */
-
- skip_auto_inc_decr = FALSE;
-
- if (error == DB_DUPLICATE_KEY
- && (user_thd->lex->sql_command == SQLCOM_REPLACE
- || user_thd->lex->sql_command
- == SQLCOM_REPLACE_SELECT)) {
-
- skip_auto_inc_decr= TRUE;
- }
-
- if (!skip_auto_inc_decr && incremented_auto_inc_counter
- && prebuilt->trx->auto_inc_lock) {
- dict_table_autoinc_decrement(prebuilt->table);
- }
-
- if (!skip_auto_inc_decr && incremented_auto_inc_for_stat
- && prebuilt->trx->auto_inc_lock) {
- auto_inc_counter_for_this_stat--;
- }
- }
-
error = convert_error_code_to_mysql(error, user_thd);
/* Tell InnoDB server that there might be work for
@@ -2412,6 +2320,7 @@ func_exit:
DBUG_RETURN(error);
}
+
/******************************************************************
Converts field data for storage in an InnoDB update vector. */
inline
@@ -5217,7 +5126,7 @@ initialized yet. This function does not change the value of the auto-inc
counter if it already has been initialized. Returns the value of the
auto-inc counter. */
-longlong
+ulonglong
ha_innobase::get_auto_increment()
/*=============================*/
/* out: auto-increment column value, -1 if error
@@ -5230,10 +5139,10 @@ ha_innobase::get_auto_increment()
if (error) {
- return(-1);
+ return(~(ulonglong) 0);
}
- return(nr);
+ return((ulonglong) nr);
}
/***********************************************************************
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 1bfb86de944..fa3e9f012e8 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -164,7 +164,7 @@ class ha_innobase: public handler
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
void init_table_handle_for_HANDLER();
- longlong get_auto_increment();
+ ulonglong get_auto_increment();
uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; }
static char *get_mysql_bin_log_name();
static ulonglong get_mysql_bin_log_pos();
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 4a6f2c556bf..938f3a40629 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -1520,8 +1520,12 @@ int ha_myisam::rename_table(const char * from, const char * to)
}
-longlong ha_myisam::get_auto_increment()
+ulonglong ha_myisam::get_auto_increment()
{
+ ulonglong nr;
+ int error;
+ byte key[MI_MAX_KEY_LENGTH];
+
if (!table->next_number_key_offset)
{ // Autoincrement at key-start
ha_myisam::info(HA_STATUS_AUTO);
@@ -1531,19 +1535,16 @@ longlong ha_myisam::get_auto_increment()
/* it's safe to call the following if bulk_insert isn't on */
mi_flush_bulk_insert(file, table->next_number_index);
- longlong nr;
- int error;
- byte key[MI_MAX_KEY_LENGTH];
(void) extra(HA_EXTRA_KEYREAD);
key_copy(key,table,table->next_number_index,
table->next_number_key_offset);
error=mi_rkey(file,table->record[1],(int) table->next_number_index,
key,table->next_number_key_offset,HA_READ_PREFIX_LAST);
if (error)
- nr=1;
+ nr= 1;
else
- nr=(longlong)
- table->next_number_field->val_int_offset(table->rec_buff_length)+1;
+ nr= ((ulonglong) table->next_number_field->
+ val_int_offset(table->rec_buff_length)+1);
extra(HA_EXTRA_NO_KEYREAD);
return nr;
}
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index 972d6b18e19..527e6a49aba 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -111,7 +111,7 @@ class ha_myisam: public handler
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
- longlong get_auto_increment();
+ ulonglong get_auto_increment();
int rename_table(const char * from, const char * to);
int delete_table(const char *name);
int check(THD* thd, HA_CHECK_OPT* check_opt);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 09cb0c0f02d..8b7b5e94965 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -3099,19 +3099,18 @@ int ndbcluster_drop_database(const char *path)
}
-longlong ha_ndbcluster::get_auto_increment()
+ulonglong ha_ndbcluster::get_auto_increment()
{
+ int cache_size;
+ Uint64 auto_value;
DBUG_ENTER("get_auto_increment");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
- int cache_size=
- (rows_to_insert > autoincrement_prefetch) ?
- rows_to_insert
- : autoincrement_prefetch;
- Uint64 auto_value=
- (skip_auto_increment) ?
- m_ndb->readAutoIncrementValue((NDBTAB *) m_table)
- : m_ndb->getAutoIncrementValue((NDBTAB *) m_table, cache_size);
- DBUG_RETURN((longlong)auto_value);
+ cache_size= ((rows_to_insert > autoincrement_prefetch) ?
+ rows_to_insert : autoincrement_prefetch);
+ auto_value= ((skip_auto_increment) ?
+ m_ndb->readAutoIncrementValue((NDBTAB *) m_table) :
+ m_ndb->getAutoIncrementValue((NDBTAB *) m_table, cache_size));
+ DBUG_RETURN((ulonglong) auto_value);
}
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index c49a6078e7a..777e234f935 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -204,7 +204,7 @@ class ha_ndbcluster: public handler
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
void print_results();
- longlong get_auto_increment();
+ ulonglong get_auto_increment();
int ndb_err(NdbConnection*);
bool uses_blob_value(bool all_fields);
diff --git a/sql/handler.cc b/sql/handler.cc
index e7e1c807306..8a480b0f131 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -111,7 +111,7 @@ TYPELIB tx_isolation_typelib= {array_elements(tx_isolation_names)-1,"",
enum db_type ha_resolve_by_name(const char *name, uint namelen)
{
- THD *thd=current_thd;
+ THD *thd= current_thd;
if (thd && !my_strcasecmp(&my_charset_latin1, name, "DEFAULT")) {
return (enum db_type) thd->variables.table_type;
}
@@ -142,6 +142,7 @@ const char *ha_get_storage_engine(enum db_type db_type)
enum db_type ha_checktype(enum db_type database_type)
{
show_table_type_st *types;
+ THD *thd= current_thd;
for (types= sys_table_types; types->type; types++)
{
if ((database_type == types->db_type) &&
@@ -161,8 +162,8 @@ enum db_type ha_checktype(enum db_type database_type)
}
return
- DB_TYPE_UNKNOWN != (enum db_type) current_thd->variables.table_type ?
- (enum db_type) current_thd->variables.table_type :
+ DB_TYPE_UNKNOWN != (enum db_type) thd->variables.table_type ?
+ (enum db_type) thd->variables.table_type :
DB_TYPE_UNKNOWN != (enum db_type) global_system_variables.table_type ?
(enum db_type) global_system_variables.table_type :
DB_TYPE_MYISAM;
@@ -946,7 +947,7 @@ int handler::read_first_row(byte * buf, uint primary_key)
void handler::update_timestamp(byte *record)
{
- long skr= (long) current_thd->query_start();
+ long skr= (long) table->in_use->query_start();
#ifdef WORDS_BIGENDIAN
if (table->db_low_byte_first)
{
@@ -958,42 +959,165 @@ void handler::update_timestamp(byte *record)
return;
}
+
/*
- Updates field with field_type NEXT_NUMBER according to following:
- if field = 0 change field to the next free key in database.
+ Generate the next auto-increment number based on increment and offset
+
+ In most cases increment= offset= 1, in which case we get:
+ 1,2,3,4,5,...
+ If increment=10 and offset=5 and previous number is 1, we get:
+ 1,5,15,25,35,...
+*/
+
+inline ulonglong
+next_insert_id(ulonglong nr,struct system_variables *variables)
+{
+ nr= (((nr+ variables->auto_increment_increment -
+ variables->auto_increment_offset)) /
+ (ulonglong) variables->auto_increment_increment);
+ return (nr* (ulonglong) variables->auto_increment_increment +
+ variables->auto_increment_offset);
+}
+
+
+/*
+ Updates columns with type NEXT_NUMBER if:
+
+ - If column value is set to NULL (in which case
+ auto_increment_field_not_null is 0)
+ - If column is set to 0 and (sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) is not
+ set. In the future we will only set NEXT_NUMBER fields if one sets them
+ to NULL (or they are not included in the insert list).
+
+
+ There are two different cases when the above is true:
+
+ - thd->next_insert_id == 0 (This is the normal case)
+ In this case we set the set the column for the first row to the value
+ next_insert_id(get_auto_increment(column))) which is normally
+ max-used-column-value +1.
+
+ We call get_auto_increment() only for the first row in a multi-row
+ statement. For the following rows we generate new numbers based on the
+ last used number.
+
+ - thd->next_insert_id != 0. This happens when we have read a statement
+ from the binary log or when one has used SET LAST_INSERT_ID=#.
+
+ In this case we will set the column to the value of next_insert_id.
+ The next row will be given the id
+ next_insert_id(next_insert_id)
+
+ The idea is the generated auto_increment values are predicatable and
+ independent of the column values in the table. This is needed to be
+ able to replicate into a table that alread has rows with a higher
+ auto-increment value than the one that is inserted.
+
+ After we have already generated an auto-increment number and the users
+ inserts a column with a higher value than the last used one, we will
+ start counting from the inserted value.
+
+ thd->next_insert_id is cleared after it's been used for a statement.
*/
void handler::update_auto_increment()
{
- longlong nr;
- THD *thd;
+ ulonglong nr;
+ THD *thd= table->in_use;
+ struct system_variables *variables= &thd->variables;
DBUG_ENTER("handler::update_auto_increment");
- if (table->next_number_field->val_int() != 0 ||
+
+ /*
+ We must save the previous value to be able to restore it if the
+ row was not inserted
+ */
+ thd->prev_insert_id= thd->next_insert_id;
+
+ if ((nr= table->next_number_field->val_int()) != 0 ||
table->auto_increment_field_not_null &&
- current_thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO)
+ thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO)
{
+ /* Clear flag for next row */
table->auto_increment_field_not_null= FALSE;
+ /* Mark that we didn't generated a new value **/
auto_increment_column_changed=0;
+
+ /* Update next_insert_id if we have already generated a value */
+ if (thd->clear_next_insert_id && nr >= thd->next_insert_id)
+ {
+ if (variables->auto_increment_increment != 1)
+ nr= next_insert_id(nr, variables);
+ else
+ nr++;
+ thd->next_insert_id= nr;
+ DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
+ }
DBUG_VOID_RETURN;
}
table->auto_increment_field_not_null= FALSE;
- thd=current_thd;
- if ((nr=thd->next_insert_id))
- thd->next_insert_id=0; // Clear after use
- else
- nr=get_auto_increment();
- if (!table->next_number_field->store(nr))
+ if (!(nr= thd->next_insert_id))
+ {
+ nr= get_auto_increment();
+ if (variables->auto_increment_increment != 1)
+ nr= next_insert_id(nr-1, variables);
+ /*
+ Update next row based on the found value. This way we don't have to
+ call the handler for every generated auto-increment value on a
+ multi-row statement
+ */
+ thd->next_insert_id= nr;
+ }
+
+ DBUG_PRINT("info",("auto_increment: %lu", (ulong) nr));
+
+ /* Mark that we should clear next_insert_id before next stmt */
+ thd->clear_next_insert_id= 1;
+
+ if (!table->next_number_field->store((longlong) nr))
thd->insert_id((ulonglong) nr);
else
thd->insert_id(table->next_number_field->val_int());
+
+ /*
+ We can't set next_insert_id if the auto-increment key is not the
+ first key part, as there is no gurantee that the first parts will be in
+ sequence
+ */
+ if (!table->next_number_key_offset)
+ {
+ /*
+ Set next insert id to point to next auto-increment value to be able to
+ handle multi-row statements
+ This works even if auto_increment_increment > 1
+ */
+ thd->next_insert_id= next_insert_id(nr, variables);
+ }
+ else
+ thd->next_insert_id= 0;
+
+ /* Mark that we generated a new value */
auto_increment_column_changed=1;
DBUG_VOID_RETURN;
}
+/*
+ restore_auto_increment
+
+ In case of error on write, we restore the last used next_insert_id value
+ because the previous value was not used.
+*/
+
+void handler::restore_auto_increment()
+{
+ THD *thd= table->in_use;
+ if (thd->next_insert_id)
+ thd->next_insert_id= thd->prev_insert_id;
+}
+
-longlong handler::get_auto_increment()
+ulonglong handler::get_auto_increment()
{
- longlong nr;
+ ulonglong nr;
int error;
(void) extra(HA_EXTRA_KEYREAD);
@@ -1014,8 +1138,8 @@ longlong handler::get_auto_increment()
if (error)
nr=1;
else
- nr=(longlong) table->next_number_field->
- val_int_offset(table->rec_buff_length)+1;
+ nr=((ulonglong) table->next_number_field->
+ val_int_offset(table->rec_buff_length)+1);
index_end();
(void) extra(HA_EXTRA_NO_KEYREAD);
return nr;
diff --git a/sql/handler.h b/sql/handler.h
index 7e5e626f713..9a08b8ed78c 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -404,7 +404,8 @@ public:
*/
virtual int delete_all_rows()
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
- virtual longlong get_auto_increment();
+ virtual ulonglong get_auto_increment();
+ virtual void restore_auto_increment();
virtual void update_create_info(HA_CREATE_INFO *create_info) {}
/* admin commands - called from mysql_admin_table */
diff --git a/sql/log.cc b/sql/log.cc
index 16381c8e26c..ab080366f95 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -366,12 +366,11 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
Format_description_log_event s(BINLOG_VERSION);
if (!s.is_valid())
goto err;
- s.set_log_pos(this);
if (null_created_arg)
s.created= 0;
if (s.write(&log_file))
goto err;
- bytes_written+= s.get_event_len();
+ bytes_written+= s.data_written;
}
if (description_event_for_queue &&
description_event_for_queue->binlog_version>=4)
@@ -386,24 +385,24 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg,
has been produced by
Format_description_log_event::Format_description_log_event(char*
buf,).
- Why don't we want to write the description_event_for_queue if this event
- is for format<4 (3.23 or 4.x): this is because in that case, the
- description_event_for_queue describes the data received from the master,
- but not the data written to the relay log (*conversion*), which is in
- format 4 (slave's).
+ Why don't we want to write the description_event_for_queue if this
+ event is for format<4 (3.23 or 4.x): this is because in that case, the
+ description_event_for_queue describes the data received from the
+ master, but not the data written to the relay log (*conversion*),
+ which is in format 4 (slave's).
*/
/*
- Set 'created' to 0, so that in next relay logs this event does not trigger
- cleaning actions on the slave in
+ Set 'created' to 0, so that in next relay logs this event does not
+ trigger cleaning actions on the slave in
Format_description_log_event::exec_event().
- Set 'log_pos' to 0 to show that it's an artificial event.
*/
description_event_for_queue->created= 0;
- description_event_for_queue->log_pos= 0;
+ /* Don't set log_pos in event header */
+ description_event_for_queue->artificial_event=1;
if (description_event_for_queue->write(&log_file))
goto err;
- bytes_written+= description_event_for_queue->get_event_len();
+ bytes_written+= description_event_for_queue->data_written;
}
if (flush_io_cache(&log_file) ||
my_sync(log_file.file, MYF(MY_WME)))
@@ -881,22 +880,18 @@ int MYSQL_LOG::purge_logs(const char *to_log,
while ((strcmp(to_log,log_info.log_file_name) || (exit_loop=included)) &&
!log_in_use(log_info.log_file_name))
{
- ulong file_size;
- LINT_INIT(file_size);
+ ulong file_size= 0;
if (decrease_log_space) //stat the file we want to delete
{
MY_STAT s;
+
+ /*
+ If we could not stat, we can't know the amount
+ of space that deletion will free. In most cases,
+ deletion won't work either, so it's not a problem.
+ */
if (my_stat(log_info.log_file_name,&s,MYF(0)))
file_size= s.st_size;
- else
- {
- /*
- If we could not stat, we can't know the amount
- of space that deletion will free. In most cases,
- deletion won't work either, so it's not a problem.
- */
- file_size= 0;
- }
}
/*
It's not fatal if we can't delete a log file ;
@@ -1069,9 +1064,8 @@ void MYSQL_LOG::new_file(bool need_lock)
*/
THD *thd = current_thd; /* may be 0 if we are reacting to SIGHUP */
Rotate_log_event r(thd,new_name+dirname_length(new_name));
- r.set_log_pos(this);
r.write(&log_file);
- bytes_written += r.get_event_len();
+ bytes_written += r.data_written;
}
/*
Update needs to be signalled even if there is no rotate event
@@ -1130,7 +1124,7 @@ bool MYSQL_LOG::append(Log_event* ev)
error=1;
goto err;
}
- bytes_written += ev->get_event_len();
+ bytes_written+= ev->data_written;
DBUG_PRINT("info",("max_size: %lu",max_size));
if ((uint) my_b_append_tell(&log_file) > max_size)
{
@@ -1376,7 +1370,6 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
(uint) thd->variables.collation_database->number,
(uint) thd->variables.collation_server->number);
Query_log_event e(thd, buf, written, 0);
- e.set_log_pos(this);
if (e.write(file))
goto err;
}
@@ -1392,7 +1385,6 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
thd->variables.time_zone->get_name()->ptr(),
"'", NullS);
Query_log_event e(thd, buf, buf_end - buf, 0);
- e.set_log_pos(this);
if (e.write(file))
goto err;
}
@@ -1401,21 +1393,18 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
{
Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT,
thd->current_insert_id);
- e.set_log_pos(this);
if (e.write(file))
goto err;
}
if (thd->insert_id_used)
{
Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id);
- e.set_log_pos(this);
if (e.write(file))
goto err;
}
if (thd->rand_used)
{
Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2);
- e.set_log_pos(this);
if (e.write(file))
goto err;
}
@@ -1431,7 +1420,6 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
user_var_event->length,
user_var_event->type,
user_var_event->charset_number);
- e.set_log_pos(this);
if (e.write(file))
goto err;
}
@@ -1443,7 +1431,6 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
p= strmov(strmov(buf, "SET CHARACTER SET "),
thd->variables.convert_set->name);
Query_log_event e(thd, buf, (ulong) (p - buf), 0);
- e.set_log_pos(this);
if (e.write(file))
goto err;
}
@@ -1452,7 +1439,6 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u",
/* Write the SQL command */
- event_info->set_log_pos(this);
if (event_info->write(file))
goto err;
@@ -1632,7 +1618,6 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback)
master's binlog, which would result in wrong positions being shown to
the user, MASTER_POS_WAIT undue waiting etc.
*/
- qinfo.set_log_pos(this);
if (qinfo.write(&log_file))
goto err;
}
@@ -1658,7 +1643,6 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback)
commit_or_rollback ? "COMMIT" : "ROLLBACK",
commit_or_rollback ? 6 : 8,
TRUE);
- qinfo.set_log_pos(this);
if (qinfo.write(&log_file) || flush_io_cache(&log_file) ||
sync_binlog(&log_file))
goto err;
@@ -1894,9 +1878,8 @@ void MYSQL_LOG::close(uint exiting)
(exiting & LOG_CLOSE_STOP_EVENT))
{
Stop_log_event s;
- s.set_log_pos(this);
s.write(&log_file);
- bytes_written+= s.get_event_len();
+ bytes_written+= s.data_written;
signal_update();
}
#endif /* HAVE_REPLICATION */
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 0c3b36c37b5..0e130158919 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -180,10 +180,12 @@ static void cleanup_load_tmpdir()
write_str()
*/
-static bool write_str(IO_CACHE *file, char *str, byte length)
+static bool write_str(IO_CACHE *file, char *str, uint length)
{
- return (my_b_safe_write(file, &length, 1) ||
- my_b_safe_write(file, (byte*) str, (int) length));
+ byte tmp[1];
+ tmp[0]= (byte) length;
+ return (my_b_safe_write(file, tmp, sizeof(tmp)) ||
+ my_b_safe_write(file, (byte*) str, length));
}
@@ -191,17 +193,18 @@ static bool write_str(IO_CACHE *file, char *str, byte length)
read_str()
*/
-static inline int read_str(char * &buf, char *buf_end, char * &str,
- uint8 &len)
+static inline int read_str(char **buf, char *buf_end, char **str,
+ uint8 *len)
{
- if (buf + (uint) (uchar) *buf >= buf_end)
+ if (*buf + ((uint) (uchar) **buf) >= buf_end)
return 1;
- len = (uint8) *buf;
- str= buf+1;
- buf+= (uint) len+1;
+ *len= (uint8) **buf;
+ *str= (*buf)+1;
+ (*buf)+= (uint) *len+1;
return 0;
}
+
/*
Transforms a string into "" or its expression in 0x... form.
*/
@@ -279,8 +282,7 @@ const char* Log_event::get_type_str()
#ifndef MYSQL_CLIENT
Log_event::Log_event(THD* thd_arg, uint16 flags_arg, bool using_trans)
- :log_pos(0), temp_buf(0), exec_time(0), cached_event_len(0),
- flags(flags_arg), thd(thd_arg)
+ :log_pos(0), temp_buf(0), exec_time(0), flags(flags_arg), thd(thd_arg)
{
server_id= thd->server_id;
when= thd->start_time;
@@ -297,7 +299,7 @@ Log_event::Log_event(THD* thd_arg, uint16 flags_arg, bool using_trans)
*/
Log_event::Log_event()
- :temp_buf(0), exec_time(0), cached_event_len(0), flags(0), cache_stmt(0),
+ :temp_buf(0), exec_time(0), flags(0), cache_stmt(0),
thd(0)
{
server_id= ::server_id;
@@ -313,7 +315,7 @@ Log_event::Log_event()
Log_event::Log_event(const char* buf,
const Format_description_log_event* description_event)
- :temp_buf(0), cached_event_len(0), cache_stmt(0)
+ :temp_buf(0), cache_stmt(0)
{
#ifndef MYSQL_CLIENT
thd = 0;
@@ -333,25 +335,29 @@ Log_event::Log_event(const char* buf,
thread or a 4.0 master binlog read by the I/O thread), log_pos is the
beginning of the event: we transform it into the end of the event, which is
more useful.
- But how do you know that the log is 4.0: you know it if description_event is
- version 3 *and* you are not reading a Format_desc (remember that mysqlbinlog
- starts by assuming that 5.0 logs are in 4.0 format, until it finds a
- Format_desc).
+ But how do you know that the log is 4.0: you know it if description_event
+ is version 3 *and* you are not reading a Format_desc (remember that
+ mysqlbinlog starts by assuming that 5.0 logs are in 4.0 format, until it
+ finds a Format_desc).
*/
if (description_event->binlog_version==3 &&
- buf[EVENT_TYPE_OFFSET]<FORMAT_DESCRIPTION_EVENT &&
+ buf[EVENT_TYPE_OFFSET]<FORMAT_DESCRIPTION_EVENT && log_pos)
+ {
/*
If log_pos=0, don't change it. log_pos==0 is a marker to mean
"don't change rli->group_master_log_pos" (see
inc_group_relay_log_pos()). As it is unreal log_pos, adding the event
len's is nonsense. For example, a fake Rotate event should
not have its log_pos (which is 0) changed or it will modify
- Exec_master_log_pos in SHOW SLAVE STATUS, displaying a nonsense value of
- (a non-zero offset which does not exist in the master's binlog, so which
- will cause problems if the user uses this value in CHANGE MASTER).
+ Exec_master_log_pos in SHOW SLAVE STATUS, displaying a nonsense value
+ of (a non-zero offset which does not exist in the master's binlog, so
+ which will cause problems if the user uses this value in
+ CHANGE MASTER).
*/
- log_pos)
log_pos+= uint4korr(buf + EVENT_LEN_OFFSET);
+ }
+ DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos));
+
flags= uint2korr(buf + FLAGS_OFFSET);
if ((buf[EVENT_TYPE_OFFSET] == FORMAT_DESCRIPTION_EVENT) ||
(buf[EVENT_TYPE_OFFSET] == ROTATE_EVENT))
@@ -363,10 +369,10 @@ Log_event::Log_event(const char* buf,
/*
Initialization to zero of all other Log_event members as they're not
specified. Currently there are no such members; in the future there will
- be an event UID (but Format_description and Rotate don't need this UID, as
- they are not propagated through --log-slave-updates (remember the UID is
- used to not play a query twice when you have two masters which are slaves
- of a 3rd master). Then we are done.
+ be an event UID (but Format_description and Rotate don't need this UID,
+ as they are not propagated through --log-slave-updates (remember the UID
+ is used to not play a query twice when you have two masters which are
+ slaves of a 3rd master). Then we are done.
*/
return;
}
@@ -495,65 +501,79 @@ void Log_event::init_show_field_list(List<Item>* field_list)
#endif /* !MYSQL_CLIENT */
+
/*
Log_event::write()
*/
-int Log_event::write(IO_CACHE* file)
+bool Log_event::write_header(IO_CACHE* file, ulong event_data_length)
{
- return (write_header(file) || write_data(file)) ? -1 : 0;
-}
+ byte header[LOG_EVENT_HEADER_LEN];
+ DBUG_ENTER("Log_event::write_header");
+ /* Store number of bytes that will be written by this event */
+ data_written= event_data_length + sizeof(header);
-/*
- Log_event::write_header()
-*/
+ /*
+ log_pos != 0 if this is relay-log event. In this case we should not
+ change the position
+ */
+
+ if (is_artificial_event())
+ {
+ /*
+ We should not do any cleanup on slave when reading this. We
+ mark this by setting log_pos to 0. Start_log_event_v3() will
+ detect this on reading and set artificial_event=1 for the event.
+ */
+ log_pos= 0;
+ }
+ else if (!log_pos)
+ {
+ /*
+ Calculate position of end of event
+
+ Note that with a SEQ_READ_APPEND cache, my_b_tell() does not
+ work well. So this will give slightly wrong positions for the
+ Format_desc/Rotate/Stop events which the slave writes to its
+ relay log. For example, the initial Format_desc will have
+ end_log_pos=91 instead of 95. Because after writing the first 4
+ bytes of the relay log, my_b_tell() still reports 0. Because
+ my_b_append() does not update the counter which my_b_tell()
+ later uses (one should probably use my_b_append_tell() to work
+ around this). To get right positions even when writing to the
+ relay log, we use the (new) my_b_safe_tell().
+
+ Note that this raises a question on the correctness of all these
+ DBUG_ASSERT(my_b_tell()=rli->event_relay_log_pos).
+
+ If in a transaction, the log_pos which we calculate below is not
+ very good (because then my_b_safe_tell() returns start position
+ of the BEGIN, so it's like the statement was at the BEGIN's
+ place), but it's not a very serious problem (as the slave, when
+ it is in a transaction, does not take those end_log_pos into
+ account (as it calls inc_event_relay_log_pos()). To be fixed
+ later, so that it looks less strange. But not bug.
+ */
+
+ log_pos= my_b_safe_tell(file)+data_written;
+ }
-int Log_event::write_header(IO_CACHE* file)
-{
/*
Header will be of size LOG_EVENT_HEADER_LEN for all events, except for
FORMAT_DESCRIPTION_EVENT and ROTATE_EVENT, where it will be
- LOG_EVENT_MINIMAL_HEADER_LEN (remember these 2 have a frozen header, because
- we read them before knowing the format).
+ LOG_EVENT_MINIMAL_HEADER_LEN (remember these 2 have a frozen header,
+ because we read them before knowing the format).
*/
- char buf[LOG_EVENT_HEADER_LEN];
- char* pos = buf;
- int4store(pos, (ulong) when); // timestamp
- pos += 4;
- *pos++ = get_type_code(); // event type code
- int4store(pos, server_id);
- pos += 4;
- long tmp; // total length of the event
- switch (get_type_code())
- {
- case FORMAT_DESCRIPTION_EVENT:
- case ROTATE_EVENT:
- tmp= get_data_size() + LOG_EVENT_MINIMAL_HEADER_LEN;
- break;
- default:
- tmp= get_data_size() + LOG_EVENT_HEADER_LEN;
- break;
- }
- int4store(pos, tmp);
- pos += 4;
- int4store(pos, log_pos);
- pos += 4;
- int2store(pos, flags);
- pos += 2;
- switch (get_type_code())
- {
- case FORMAT_DESCRIPTION_EVENT:
- case ROTATE_EVENT:
- break;
- default:
- /*
- Other data to print in the header (nothing now); in that case increment
- pos.
- */
- break;
- }
- return (my_b_safe_write(file, (byte*) buf, (uint) (pos - buf)));
+
+ int4store(header, (ulong) when); // timestamp
+ header[EVENT_TYPE_OFFSET]= get_type_code();
+ int4store(header+ SERVER_ID_OFFSET, server_id);
+ int4store(header+ EVENT_LEN_OFFSET, data_written);
+ int4store(header+ LOG_POS_OFFSET, log_pos);
+ int4store(header+ FLAGS_OFFSET, flags);
+
+ DBUG_RETURN(my_b_safe_write(file, header, sizeof(header)) != 0);
}
@@ -742,9 +762,10 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
const char **error,
const Format_description_log_event *description_event)
{
+ Log_event* ev;
DBUG_ENTER("Log_event::read_log_event(char*,...)");
DBUG_ASSERT(description_event);
- DBUG_PRINT("info", ("binlog_version=%d", description_event->binlog_version));
+ DBUG_PRINT("info", ("binlog_version: %d", description_event->binlog_version));
if (event_len < EVENT_LEN_OFFSET ||
(uint) event_len != uint4korr(buf+EVENT_LEN_OFFSET))
{
@@ -752,8 +773,6 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
DBUG_RETURN(NULL); // general sanity check - will fail on a partial read
}
- Log_event* ev = NULL;
-
switch(buf[EVENT_TYPE_OFFSET]) {
case QUERY_EVENT:
ev = new Query_log_event(buf, event_len, description_event);
@@ -805,19 +824,23 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
ev = new Format_description_log_event(buf, event_len, description_event);
break;
default:
+ DBUG_PRINT("error",("Unknown evernt code: %d",(int) buf[EVENT_TYPE_OFFSET]));
+ ev= NULL;
break;
}
/*
is_valid() are small event-specific sanity tests which are important; for
example there are some my_malloc() in constructors
(e.g. Query_log_event::Query_log_event(char*...)); when these my_malloc()
- fail we can't return an error out of the constructor (because constructor is
- "void") ; so instead we leave the pointer we wanted to allocate
+ fail we can't return an error out of the constructor (because constructor
+ is "void") ; so instead we leave the pointer we wanted to allocate
(e.g. 'query') to 0 and we test it in is_valid(). Same for
Format_description_log_event, member 'post_header_len'.
*/
if (!ev || !ev->is_valid())
{
+ DBUG_PRINT("error",("Found invalid event in binary log"));
+
delete ev;
#ifdef MYSQL_CLIENT
if (!force_opt) /* then mysqlbinlog dies */
@@ -831,7 +854,6 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
DBUG_RETURN(0);
#endif
}
- ev->cached_event_len = event_len;
DBUG_RETURN(ev);
}
@@ -878,42 +900,6 @@ void Log_event::print_timestamp(FILE* file, time_t* ts)
#endif /* MYSQL_CLIENT */
-/*
- Log_event::set_log_pos()
- Only used when we are writing an event which we created, to the BINlog. That
- is, when we have parsed and executed a query; we then want to set the event's
- log_pos to what it is going to be in the binlog after we write it. Note that
- this is the position of the END of the event.
-*/
-
-#ifndef MYSQL_CLIENT
-void Log_event::set_log_pos(MYSQL_LOG* log)
-{
- /*
- Note that with a SEQ_READ_APPEND cache, my_b_tell() does not work well.
- So this will give slightly wrong positions for the Format_desc/Rotate/Stop
- events which the slave writes to its relay log. For example, the initial
- Format_desc will have end_log_pos=91 instead of 95. Because after writing
- the first 4 bytes of the relay log, my_b_tell() still reports 0. Because
- my_b_append() does not update the counter which my_b_tell() later uses (one
- should probably use my_b_append_tell() to work around this).
- To get right positions even when writing to the relay log, we use the (new)
- my_b_safe_tell().
- Note that this raises a question on the correctness of all these
- DBUG_ASSERT(my_b_tell()=rli->event_relay_log_pos).
- If in a transaction, the log_pos which we calculate below is not very good
- (because then my_b_safe_tell() returns start position of the BEGIN, so it's
- like the statement was at the BEGIN's place), but it's not a very serious
- problem (as the slave, when it is in a transaction, does not take those
- end_log_pos into account (as it calls inc_event_relay_log_pos()). To be
- fixed later, so that it looks less strange. But not bug.
- */
- if (!log_pos)
- log_pos = my_b_safe_tell(&log->log_file)+get_event_len();
-}
-#endif /* !MYSQL_CLIENT */
-
-
/**************************************************************************
Query_log_event methods
**************************************************************************/
@@ -954,31 +940,27 @@ void Query_log_event::pack_info(Protocol *protocol)
/*
Query_log_event::write()
-*/
-
-int Query_log_event::write(IO_CACHE* file)
-{
- return query ? Log_event::write(file) : -1;
-}
-
-/*
- Query_log_event::write_data()
+ NOTES:
+ In this event we have to modify the header to have the correct
+ EVENT_LEN_OFFSET as we don't yet know how many status variables we
+ will print!
*/
-int Query_log_event::write_data(IO_CACHE* file)
+bool Query_log_event::write(IO_CACHE* file)
{
- uchar buf[QUERY_HEADER_LEN+1+4+1+8+1+1+FN_REFLEN], *start;
+ uchar buf[QUERY_HEADER_LEN+1+4+1+8+1+1+FN_REFLEN+5], *start, *start_of_status;
+ ulong event_length;
if (!query)
- return -1;
-
+ return 1; // Something wrong with event
+
/*
We want to store the thread id:
(- as an information for the user when he reads the binlog)
- if the query uses temporary table: for the slave SQL thread to know to
which master connection the temp table belongs.
- Now imagine we (write_data()) are called by the slave SQL thread (we are
+ Now imagine we (write()) are called by the slave SQL thread (we are
logging a query executed by this thread; the slave runs with
--log-slave-updates). Then this query will be logged with
thread_id=the_thread_id_of_the_SQL_thread. Imagine that 2 temp tables of
@@ -1014,14 +996,13 @@ int Query_log_event::write_data(IO_CACHE* file)
int4store(buf + Q_EXEC_TIME_OFFSET, exec_time);
buf[Q_DB_LEN_OFFSET] = (char) db_len;
int2store(buf + Q_ERR_CODE_OFFSET, error_code);
- int2store(buf + Q_STATUS_VARS_LEN_OFFSET, status_vars_len);
/*
You MUST always write status vars in increasing order of code. This
guarantees that a slightly older slave will be able to parse those he
knows.
*/
- start= buf+QUERY_HEADER_LEN;
+ start_of_status= start= buf+QUERY_HEADER_LEN;
if (flags2_inited)
{
*(start++)= Q_FLAGS2_CODE;
@@ -1056,6 +1037,13 @@ int Query_log_event::write_data(IO_CACHE* file)
*/
*(start++)= '\0';
}
+ if (auto_increment_increment != 1)
+ {
+ *start++= Q_AUTO_INCREMENT;
+ int2store(start, auto_increment_increment);
+ int2store(start+2, auto_increment_offset);
+ start+= 4;
+ }
/*
Here there could be code like
if (command-line-option-which-says-"log_this_variable")
@@ -1066,9 +1054,20 @@ int Query_log_event::write_data(IO_CACHE* file)
}
*/
- return (my_b_safe_write(file, (byte*) buf, (start-buf)) ||
+ /* Store length of status variables */
+ status_vars_len= (uint) (start-start_of_status);
+ int2store(buf + Q_STATUS_VARS_LEN_OFFSET, status_vars_len);
+
+ /*
+ Calculate length of whole event
+ The "1" below is the \0 in the db's length
+ */
+ event_length= (uint) (start-buf) + db_len + 1 + q_len;
+
+ return (write_header(file, event_length) ||
+ my_b_safe_write(file, (byte*) buf, (uint) (start-buf)) ||
my_b_safe_write(file, (db) ? (byte*) db : (byte*)"", db_len + 1) ||
- my_b_safe_write(file, (byte*) query, q_len)) ? -1 : 0;
+ my_b_safe_write(file, (byte*) query, q_len)) ? 1 : 0;
}
@@ -1089,7 +1088,10 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
thread_id(thd_arg->thread_id),
/* save the original thread id; we already know the server id */
slave_proxy_id(thd_arg->variables.pseudo_thread_id),
- flags2_inited(1), sql_mode_inited(1), flags2(0), sql_mode(0)
+ flags2_inited(1), sql_mode_inited(1), flags2(0),
+ sql_mode(thd_arg->variables.sql_mode),
+ auto_increment_increment(thd_arg->variables.auto_increment_increment),
+ auto_increment_offset(thd_arg->variables.auto_increment_offset)
{
time_t end_time;
time(&end_time);
@@ -1105,7 +1107,7 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
we will probably want to reclaim the 29 bits. So we need the &.
*/
flags2= thd_arg->options & OPTIONS_WRITTEN_TO_BIN_LOG;
- sql_mode= thd_arg->variables.sql_mode;
+
DBUG_PRINT("info",("Query_log_event has flags2=%lu sql_mode=%lu",flags2,sql_mode));
}
#endif /* MYSQL_CLIENT */
@@ -1118,13 +1120,16 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
Query_log_event::Query_log_event(const char* buf, uint event_len,
const Format_description_log_event *description_event)
- :Log_event(buf, description_event), data_buf(0), query(NULL),
- db(NULL), catalog_len(-1), status_vars_len(0),
+ :Log_event(buf, description_event), data_buf(0), query(NullS), catalog(NullS),
+ db(NullS), catalog_len(0), status_vars_len(0),
flags2_inited(0), sql_mode_inited(0)
{
ulong data_len;
+ uint32 tmp;
uint8 common_header_len, post_header_len;
+ const char *start, *end;
DBUG_ENTER("Query_log_event::Query_log_event(char*,...)");
+
common_header_len= description_event->common_header_len;
post_header_len= description_event->post_header_len[QUERY_EVENT-1];
DBUG_PRINT("info",("event_len=%ld, common_header_len=%d, post_header_len=%d",
@@ -1136,7 +1141,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
We use QUERY_HEADER_MINIMAL_LEN which is the same for 3.23, 4.0 & 5.0.
*/
if (event_len < (uint)(common_header_len + post_header_len))
- return;
+ DBUG_VOID_RETURN;
data_len = event_len - (common_header_len + post_header_len);
buf+= common_header_len;
@@ -1144,18 +1149,21 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
exec_time = uint4korr(buf + Q_EXEC_TIME_OFFSET);
db_len = (uint)buf[Q_DB_LEN_OFFSET];
error_code = uint2korr(buf + Q_ERR_CODE_OFFSET);
+ /* If auto_increment is not set by query_event, they should not be used */
+ auto_increment_increment= auto_increment_offset= 1;
/*
5.0 format starts here.
Depending on the format, we may or not have affected/warnings etc
The remnent post-header to be parsed has length:
*/
- uint32 tmp= post_header_len - QUERY_HEADER_MINIMAL_LEN;
+ tmp= post_header_len - QUERY_HEADER_MINIMAL_LEN;
if (tmp)
{
status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET);
- DBUG_PRINT("info", ("Query_log_event has status_vars_len=%d",
- status_vars_len));
+ data_len-= status_vars_len;
+ DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u",
+ (uint) status_vars_len));
tmp-= 2;
}
/* we have parsed everything we know in the post header */
@@ -1165,25 +1173,11 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
(%d more bytes)", tmp));
#endif
- /*
- Suppose you have a 4.0 master with --ansi and a 5.0 slave with --ansi.
- The slave sets flags2 to 0 (because that's a 4.0 event); if we simply use
- this value of 0, we will cancel --ansi on the slave, which is
- unwanted. In this example 0 means "unset", not really "set to 0".
- Fortunately we have flags2_inited==0 to distinguish between "unset" and
- "set to 0". See below.
- */
-
/* variable-part: the status vars; only in MySQL 5.0 */
- const uchar *start= (uchar*) (buf+post_header_len);
- const uchar *end= (uchar*) (start+status_vars_len);
- /*
- The place from which we will start string duplication.
- */
- const uchar *start_dup= end;
-
- for (const uchar* pos=start;pos<end;)
+ start= (char*) (buf+post_header_len);
+ end= (char*) (start+status_vars_len);
+ for (const uchar* pos= (const uchar*) start; pos < (const uchar*) end;)
{
switch (*pos++) {
case Q_FLAGS2_CODE:
@@ -1206,52 +1200,41 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
}
case Q_CATALOG_CODE:
catalog_len= *pos;
- /*
- Now 'pos' points to beginning of catalog - 1.
- The catalog must be included in the string which we will duplicate
- later. If string status vars having a smaller code had been seen before
- and so marked to-be-duplicated, start_dup would be != end and we would
- not need (and want) to change start_dup (because this would cut the
- previously marked status vars).
- */
- pos++;
- if (start_dup==end)
- start_dup= pos;
- pos+= catalog_len+1; // counting the end '\0'
+ if (catalog_len)
+ catalog= (char*) pos+1; // Will be copied later
+ pos+= catalog_len+2;
+ break;
+ case Q_AUTO_INCREMENT:
+ auto_increment_increment= uint2korr(pos);
+ auto_increment_offset= uint2korr(pos+2);
+ pos+= 4;
break;
default:
/* That's why you must write status vars in growing order of code */
DBUG_PRINT("info",("Query_log_event has unknown status vars (first has\
code: %u), skipping the rest of them", (uint) *(pos-1)));
- pos= end;
+ pos= (const uchar*) end; // Break look
}
}
/* A 2nd variable part; this is common to all versions */
- data_len-= (uint) (start_dup-start); /* cut not-to-be-duplicated head */
- if (!(data_buf = (char*) my_strdup_with_length((byte*) start_dup,
- data_len,
- MYF(MY_WME))))
- return;
-
- const char* tmp_buf= data_buf;
- /* Now set event's pointers to point to bits of the new string */
- if (catalog_len >= 0) // we have seen a catalog (zero-length or not)
+ if (!(start= data_buf = (char*) my_malloc(catalog_len + data_len +2, MYF(MY_WME))))
+ DBUG_VOID_RETURN;
+ if (catalog) // If catalog is given
{
- catalog= tmp_buf;
- tmp_buf+= (uint) (end-start_dup); /* "seek" to db */
+ memcpy((char*) start, catalog, catalog_len+1); // Copy name and end \0
+ catalog= start;
+ start+= catalog_len+1;
}
-#ifndef DBUG_OFF
- else
- catalog= 0; // for DBUG_PRINT
-#endif
- db= tmp_buf;
- query= tmp_buf + db_len + 1;
- q_len = data_buf + data_len - query;
+ memcpy((char*) start, end, data_len); // Copy db and query
+ ((char*) start)[data_len]= '\0'; // End query with \0 (For safetly)
+ db= start;
+ query= start + db_len + 1;
+ q_len= data_len - db_len -1;
/* This is used to detect wrong parsing. Could be removed in the future. */
- DBUG_PRINT("info", ("catalog_len:%d catalog: '%s' db: '%s' q_len: %d",
- catalog_len, catalog, db, q_len));
+ DBUG_PRINT("info", ("catalog: '%s' len: %u db: '%s' len: %u q_len: %lu",
+ catalog, (uint) catalog_len, db, (uint) db_len,q_len));
DBUG_VOID_RETURN;
}
@@ -1267,6 +1250,7 @@ void Query_log_event::print(FILE* file, bool short_form,
// TODO: print the catalog ??
char buff[40],*end; // Enough for SET TIMESTAMP
bool different_db= 1;
+ uint32 tmp;
if (!short_form)
{
@@ -1301,8 +1285,6 @@ void Query_log_event::print(FILE* file, bool short_form,
mysqlbinlog handles gracefully). So this code should always be good.
*/
- uint32 tmp;
-
if (likely(flags2_inited)) /* likely as this will mainly read 5.0 logs */
{
/* tmp is a bitmask of bits which have changed. */
@@ -1341,7 +1323,8 @@ void Query_log_event::print(FILE* file, bool short_form,
if (unlikely(!last_event_info->sql_mode_inited)) /* first Query event */
{
last_event_info->sql_mode_inited= 1;
- last_event_info->sql_mode= ~sql_mode; // force a difference to force write
+ /* force a difference to force write */
+ last_event_info->sql_mode= ~sql_mode;
}
if (unlikely(last_event_info->sql_mode != sql_mode))
{
@@ -1349,9 +1332,17 @@ void Query_log_event::print(FILE* file, bool short_form,
last_event_info->sql_mode= sql_mode;
}
}
+ if (last_event_info->auto_increment_increment != auto_increment_increment ||
+ last_event_info->auto_increment_offset != auto_increment_offset)
+ {
+ fprintf(file,"SET @@session.auto_increment_increment=%lu, @@session.auto_increment_offset=%lu;\n",
+ auto_increment_increment,auto_increment_offset);
+ last_event_info->auto_increment_increment= auto_increment_increment;
+ last_event_info->auto_increment_offset= auto_increment_offset;
+ }
my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME));
- fprintf(file, ";\n");
+ fputs(";\n", file);
}
#endif /* MYSQL_CLIENT */
@@ -1372,6 +1363,8 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli)
*/
thd->catalog= (char*) catalog;
thd->db= (char*) rewrite_db(db);
+ thd->variables.auto_increment_increment= auto_increment_increment;
+ thd->variables.auto_increment_offset= auto_increment_offset;
/*
InnoDB internally stores the master log position it has executed so far,
@@ -1383,13 +1376,9 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli)
END of the current log event (COMMIT). We save it in rli so that InnoDB can
access it.
*/
-#if MYSQL_VERSION_ID < 50000
- rli->future_group_master_log_pos= log_pos + get_event_len() -
- (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0);
-#else
- /* In 5.0 we store the end_log_pos in the relay log so no problem */
rli->future_group_master_log_pos= log_pos;
-#endif
+ DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos));
+
clear_all_errors(thd, rli);
if (db_ok(thd->db, replicate_do_db, replicate_ignore_db))
@@ -1552,7 +1541,7 @@ end:
**************************************************************************/
#ifndef MYSQL_CLIENT
-Start_log_event_v3::Start_log_event_v3() :Log_event(), binlog_version(BINLOG_VERSION)
+Start_log_event_v3::Start_log_event_v3() :Log_event(), binlog_version(BINLOG_VERSION), artificial_event(0)
{
created= when;
memcpy(server_version, ::server_version, ST_SERVER_VER_LEN);
@@ -1613,27 +1602,31 @@ Start_log_event_v3::Start_log_event_v3(const char* buf,
const Format_description_log_event* description_event)
:Log_event(buf, description_event)
{
- buf += description_event->common_header_len;
- binlog_version = uint2korr(buf+ST_BINLOG_VER_OFFSET);
+ buf+= description_event->common_header_len;
+ binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET);
memcpy(server_version, buf+ST_SERVER_VER_OFFSET,
ST_SERVER_VER_LEN);
- created = uint4korr(buf+ST_CREATED_OFFSET);
+ created= uint4korr(buf+ST_CREATED_OFFSET);
+ /* We use log_pos to mark if this was an artificial event or not */
+ artificial_event= (log_pos == 0);
}
/*
- Start_log_event_v3::write_data()
+ Start_log_event_v3::write()
*/
-int Start_log_event_v3::write_data(IO_CACHE* file)
+bool Start_log_event_v3::write(IO_CACHE* file)
{
char buff[START_V3_HEADER_LEN];
int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version);
memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN);
int4store(buff + ST_CREATED_OFFSET,created);
- return (my_b_safe_write(file, (byte*) buff, sizeof(buff)) ? -1 : 0);
+ return (write_header(file, sizeof(buff)) ||
+ my_b_safe_write(file, (byte*) buff, sizeof(buff)));
}
+
/*
Start_log_event_v3::exec_event()
@@ -1689,7 +1682,7 @@ int Start_log_event_v3::exec_event(struct st_relay_log_info* rli)
it comes to us; we'll know this by checking log_pos ("artificial" events
have log_pos == 0).
*/
- if (log_pos && (thd->options & OPTION_BEGIN))
+ if (!artificial_event && (thd->options & OPTION_BEGIN))
{
slave_print_error(rli, 0, "\
Rolling back unfinished transaction (no COMMIT or ROLLBACK) from relay log. \
@@ -1714,9 +1707,9 @@ binary log.");
close_temporary_tables(thd);
}
/*
- Otherwise, can't distinguish a Start_log_event generated at master startup
- and one generated by master FLUSH LOGS, so cannot be sure temp tables have
- to be dropped. So do nothing.
+ Otherwise, can't distinguish a Start_log_event generated at
+ master startup and one generated by master FLUSH LOGS, so cannot
+ be sure temp tables have to be dropped. So do nothing.
*/
break;
default:
@@ -1755,14 +1748,14 @@ binary log.");
*/
-Format_description_log_event::Format_description_log_event(uint8 binlog_ver,
- const char* server_ver)
- : Start_log_event_v3()
+Format_description_log_event::
+Format_description_log_event(uint8 binlog_ver,
+ const char* server_ver)
+ :Start_log_event_v3()
{
created= when;
binlog_version= binlog_ver;
- switch(binlog_ver)
- {
+ switch (binlog_ver) {
case 4: /* MySQL 5.0 */
memcpy(server_version, ::server_version, ST_SERVER_VER_LEN);
common_header_len= LOG_EVENT_HEADER_LEN;
@@ -1841,27 +1834,29 @@ Format_description_log_event::Format_description_log_event(uint8 binlog_ver,
}
}
-Format_description_log_event::Format_description_log_event(const char* buf,
- uint event_len,
- const
- Format_description_log_event*
- description_event)
- /*
- The problem with this constructor is that the fixed header may have a length
- different from this version, but we don't know this length as we have not
- read the Format_description_log_event which says it, yet. This length is in
- the post-header of the event, but we don't know where the post-header
- starts.
- So this type of event HAS to:
- - either have the header's length at the beginning (in the header, at a
- fixed position which will never be changed), not in the post-header. That
- would make the header be "shifted" compared to other events.
- - or have a header of size LOG_EVENT_MINIMAL_HEADER_LEN (19), in all future
- versions, so that we know for sure.
- I (Guilhem) chose the 2nd solution. Rotate has the same constraint (because
- it is sent before Format_description_log_event).
- */
-
+
+/*
+ The problem with this constructor is that the fixed header may have a
+ length different from this version, but we don't know this length as we
+ have not read the Format_description_log_event which says it, yet. This
+ length is in the post-header of the event, but we don't know where the
+ post-header starts.
+ So this type of event HAS to:
+ - either have the header's length at the beginning (in the header, at a
+ fixed position which will never be changed), not in the post-header. That
+ would make the header be "shifted" compared to other events.
+ - or have a header of size LOG_EVENT_MINIMAL_HEADER_LEN (19), in all future
+ versions, so that we know for sure.
+ I (Guilhem) chose the 2nd solution. Rotate has the same constraint (because
+ it is sent before Format_description_log_event).
+*/
+
+Format_description_log_event::
+Format_description_log_event(const char* buf,
+ uint event_len,
+ const
+ Format_description_log_event*
+ description_event)
:Start_log_event_v3(buf, description_event)
{
DBUG_ENTER("Format_description_log_event::Format_description_log_event(char*,...)");
@@ -1880,20 +1875,22 @@ Format_description_log_event::Format_description_log_event(const char* buf,
DBUG_VOID_RETURN;
}
-int Format_description_log_event::write_data(IO_CACHE* file)
+
+bool Format_description_log_event::write(IO_CACHE* file)
{
/*
- We don't call Start_log_event_v3::write_data() because this would make 2
+ We don't call Start_log_event_v3::write() because this would make 2
my_b_safe_write().
*/
- char buff[FORMAT_DESCRIPTION_HEADER_LEN];
+ byte buff[FORMAT_DESCRIPTION_HEADER_LEN];
int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version);
- memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN);
+ memcpy((char*) buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN);
int4store(buff + ST_CREATED_OFFSET,created);
buff[ST_COMMON_HEADER_LEN_OFFSET]= LOG_EVENT_HEADER_LEN;
- memcpy(buff+ST_COMMON_HEADER_LEN_OFFSET+1, (byte*) post_header_len,
+ memcpy((char*) buff+ST_COMMON_HEADER_LEN_OFFSET+1, (byte*) post_header_len,
LOG_EVENT_TYPES);
- return (my_b_safe_write(file, (byte*) buff, sizeof(buff)) ? -1 : 0);
+ return (write_header(file, sizeof(buff)) ||
+ my_b_safe_write(file, buff, sizeof(buff)));
}
/*
@@ -1924,8 +1921,8 @@ int Format_description_log_event::exec_event(struct st_relay_log_info* rli)
{
/*
Do not modify rli->group_master_log_pos, as this event did not exist on
- the master. That is, just update the *relay log* coordinates; this is done
- by passing log_pos=0 to inc_group_relay_log_pos, like we do in
+ the master. That is, just update the *relay log* coordinates; this is
+ done by passing log_pos=0 to inc_group_relay_log_pos, like we do in
Stop_log_event::exec_event().
If in a transaction, don't touch group_* coordinates.
*/
@@ -2073,7 +2070,7 @@ void Load_log_event::pack_info(Protocol *protocol)
Load_log_event::write_data_header()
*/
-int Load_log_event::write_data_header(IO_CACHE* file)
+bool Load_log_event::write_data_header(IO_CACHE* file)
{
char buf[LOAD_HEADER_LEN];
int4store(buf + L_THREAD_ID_OFFSET, slave_proxy_id);
@@ -2082,7 +2079,7 @@ int Load_log_event::write_data_header(IO_CACHE* file)
buf[L_TBL_LEN_OFFSET] = (char)table_name_len;
buf[L_DB_LEN_OFFSET] = (char)db_len;
int4store(buf + L_NUM_FIELDS_OFFSET, num_fields);
- return my_b_safe_write(file, (byte*)buf, LOAD_HEADER_LEN);
+ return my_b_safe_write(file, (byte*)buf, LOAD_HEADER_LEN) != 0;
}
@@ -2090,7 +2087,7 @@ int Load_log_event::write_data_header(IO_CACHE* file)
Load_log_event::write_data_body()
*/
-int Load_log_event::write_data_body(IO_CACHE* file)
+bool Load_log_event::write_data_body(IO_CACHE* file)
{
if (sql_ex.write_data(file))
return 1;
@@ -2193,6 +2190,7 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex,
}
#endif /* !MYSQL_CLIENT */
+
/*
Load_log_event::Load_log_event()
@@ -2437,12 +2435,8 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
if (!use_rli_only_for_errors)
{
/* Saved for InnoDB, see comment in Query_log_event::exec_event() */
-#if MYSQL_VERSION_ID < 50000
- rli->future_group_master_log_pos= log_pos + get_event_len() -
- (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0);
-#else
rli->future_group_master_log_pos= log_pos;
-#endif
+ DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos));
}
/*
@@ -2701,15 +2695,16 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len,
/*
- Rotate_log_event::write_data()
+ Rotate_log_event::write()
*/
-int Rotate_log_event::write_data(IO_CACHE* file)
+bool Rotate_log_event::write(IO_CACHE* file)
{
char buf[ROTATE_HEADER_LEN];
int8store(buf + R_POS_OFFSET, pos);
- return (my_b_safe_write(file, (byte*)buf, ROTATE_HEADER_LEN) ||
- my_b_safe_write(file, (byte*)new_log_ident, (uint) ident_len));
+ return (write_header(file, ROTATE_HEADER_LEN + ident_len) ||
+ my_b_safe_write(file, (byte*)buf, ROTATE_HEADER_LEN) ||
+ my_b_safe_write(file, (byte*)new_log_ident, (uint) ident_len));
}
@@ -2828,15 +2823,16 @@ const char* Intvar_log_event::get_var_type_name()
/*
- Intvar_log_event::write_data()
+ Intvar_log_event::write()
*/
-int Intvar_log_event::write_data(IO_CACHE* file)
+bool Intvar_log_event::write(IO_CACHE* file)
{
- char buf[9];
- buf[I_TYPE_OFFSET] = type;
+ byte buf[9];
+ buf[I_TYPE_OFFSET]= (byte) type;
int8store(buf + I_VAL_OFFSET, val);
- return my_b_safe_write(file, (byte*) buf, sizeof(buf));
+ return (write_header(file, sizeof(buf)) ||
+ my_b_safe_write(file, buf, sizeof(buf)));
}
@@ -2922,12 +2918,13 @@ Rand_log_event::Rand_log_event(const char* buf,
}
-int Rand_log_event::write_data(IO_CACHE* file)
+bool Rand_log_event::write(IO_CACHE* file)
{
- char buf[16];
+ byte buf[16];
int8store(buf + RAND_SEED1_OFFSET, seed1);
int8store(buf + RAND_SEED2_OFFSET, seed2);
- return my_b_safe_write(file, (byte*) buf, sizeof(buf));
+ return (write_header(file, sizeof(buf)) ||
+ my_b_safe_write(file, buf, sizeof(buf)));
}
@@ -3023,8 +3020,9 @@ void User_var_log_event::pack_info(Protocol* protocol)
#endif /* !MYSQL_CLIENT */
-User_var_log_event::User_var_log_event(const char* buf,
- const Format_description_log_event* description_event)
+User_var_log_event::
+User_var_log_event(const char* buf,
+ const Format_description_log_event* description_event)
:Log_event(buf, description_event)
{
buf+= description_event->common_header_len;
@@ -3051,13 +3049,14 @@ User_var_log_event::User_var_log_event(const char* buf,
}
-int User_var_log_event::write_data(IO_CACHE* file)
+bool User_var_log_event::write(IO_CACHE* file)
{
char buf[UV_NAME_LEN_SIZE];
char buf1[UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE +
UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE];
char buf2[8], *pos= buf2;
uint buf1_length;
+ ulong event_length;
int4store(buf, name_len);
@@ -3089,7 +3088,12 @@ int User_var_log_event::write_data(IO_CACHE* file)
return 0;
}
}
- return (my_b_safe_write(file, (byte*) buf, sizeof(buf)) ||
+
+ /* Length of the whole event */
+ event_length= sizeof(buf)+ name_len + buf1_length + val_len;
+
+ return (write_header(file, event_length) ||
+ my_b_safe_write(file, (byte*) buf, sizeof(buf)) ||
my_b_safe_write(file, (byte*) name, name_len) ||
my_b_safe_write(file, (byte*) buf1, buf1_length) ||
my_b_safe_write(file, (byte*) pos, val_len));
@@ -3337,12 +3341,15 @@ int Slave_log_event::get_data_size()
}
-int Slave_log_event::write_data(IO_CACHE* file)
+bool Slave_log_event::write(IO_CACHE* file)
{
+ ulong event_length= get_data_size();
int8store(mem_pool + SL_MASTER_POS_OFFSET, master_pos);
int2store(mem_pool + SL_MASTER_PORT_OFFSET, master_port);
// log and host are already there
- return my_b_safe_write(file, (byte*)mem_pool, get_data_size());
+
+ return (write_header(file, event_length) ||
+ my_b_safe_write(file, (byte*) mem_pool, event_length));
}
@@ -3476,13 +3483,13 @@ Create_file_log_event(THD* thd_arg, sql_exchange* ex,
Create_file_log_event::write_data_body()
*/
-int Create_file_log_event::write_data_body(IO_CACHE* file)
+bool Create_file_log_event::write_data_body(IO_CACHE* file)
{
- int res;
- if ((res = Load_log_event::write_data_body(file)) || fake_base)
+ bool res;
+ if ((res= Load_log_event::write_data_body(file)) || fake_base)
return res;
return (my_b_safe_write(file, (byte*) "", 1) ||
- my_b_safe_write(file, (byte*) block, block_len));
+ my_b_safe_write(file, (byte*) block, block_len));
}
@@ -3490,14 +3497,14 @@ int Create_file_log_event::write_data_body(IO_CACHE* file)
Create_file_log_event::write_data_header()
*/
-int Create_file_log_event::write_data_header(IO_CACHE* file)
+bool Create_file_log_event::write_data_header(IO_CACHE* file)
{
- int res;
- if ((res = Load_log_event::write_data_header(file)) || fake_base)
- return res;
+ bool res;
byte buf[CREATE_FILE_HEADER_LEN];
+ if ((res= Load_log_event::write_data_header(file)) || fake_base)
+ return res;
int4store(buf + CF_FILE_ID_OFFSET, file_id);
- return my_b_safe_write(file, buf, CREATE_FILE_HEADER_LEN);
+ return my_b_safe_write(file, buf, CREATE_FILE_HEADER_LEN) != 0;
}
@@ -3505,12 +3512,12 @@ int Create_file_log_event::write_data_header(IO_CACHE* file)
Create_file_log_event::write_base()
*/
-int Create_file_log_event::write_base(IO_CACHE* file)
+bool Create_file_log_event::write_base(IO_CACHE* file)
{
- int res;
- fake_base = 1; // pretend we are Load event
- res = write(file);
- fake_base = 0;
+ bool res;
+ fake_base= 1; // pretend we are Load event
+ res= write(file);
+ fake_base= 0;
return res;
}
@@ -3542,19 +3549,20 @@ Create_file_log_event::Create_file_log_event(const char* buf, uint len,
file_id= uint4korr(buf +
header_len +
load_header_len + CF_FILE_ID_OFFSET);
- // + 1 for \0 terminating fname
/*
Note that it's ok to use get_data_size() below, because it is computed
with values we have already read from this event (because we called
- copy_log_event()); we are not using slave's format info to decode master's
- format, we are really using master's format info.
- Anyway, both formats should be identical (except the common_header_len) as
- these Load events are not changed between 4.0 and 5.0 (as logging of LOAD
- DATA INFILE does not use Load_log_event in 5.0).
+ copy_log_event()); we are not using slave's format info to decode
+ master's format, we are really using master's format info.
+ Anyway, both formats should be identical (except the common_header_len)
+ as these Load events are not changed between 4.0 and 5.0 (as logging of
+ LOAD DATA INFILE does not use Load_log_event in 5.0).
+
+ The + 1 is for \0 terminating fname
*/
- block_offset= description_event->common_header_len +
- Load_log_event::get_data_size() +
- create_file_header_len + 1;
+ block_offset= (description_event->common_header_len +
+ Load_log_event::get_data_size() +
+ create_file_header_len + 1);
if (len < block_offset)
return;
block = (char*)buf + block_offset;
@@ -3737,14 +3745,15 @@ Append_block_log_event::Append_block_log_event(const char* buf, uint len,
/*
- Append_block_log_event::write_data()
+ Append_block_log_event::write()
*/
-int Append_block_log_event::write_data(IO_CACHE* file)
+bool Append_block_log_event::write(IO_CACHE* file)
{
byte buf[APPEND_BLOCK_HEADER_LEN];
int4store(buf + AB_FILE_ID_OFFSET, file_id);
- return (my_b_safe_write(file, buf, APPEND_BLOCK_HEADER_LEN) ||
+ return (write_header(file, APPEND_BLOCK_HEADER_LEN + block_len) ||
+ my_b_safe_write(file, buf, APPEND_BLOCK_HEADER_LEN) ||
my_b_safe_write(file, (byte*) block, block_len));
}
@@ -3854,14 +3863,15 @@ Delete_file_log_event::Delete_file_log_event(const char* buf, uint len,
/*
- Delete_file_log_event::write_data()
+ Delete_file_log_event::write()
*/
-int Delete_file_log_event::write_data(IO_CACHE* file)
+bool Delete_file_log_event::write(IO_CACHE* file)
{
byte buf[DELETE_FILE_HEADER_LEN];
int4store(buf + DF_FILE_ID_OFFSET, file_id);
- return my_b_safe_write(file, buf, DELETE_FILE_HEADER_LEN);
+ return (write_header(file, sizeof(buf)) ||
+ my_b_safe_write(file, buf, sizeof(buf)));
}
@@ -3947,14 +3957,15 @@ Execute_load_log_event::Execute_load_log_event(const char* buf, uint len,
/*
- Execute_load_log_event::write_data()
+ Execute_load_log_event::write()
*/
-int Execute_load_log_event::write_data(IO_CACHE* file)
+bool Execute_load_log_event::write(IO_CACHE* file)
{
byte buf[EXEC_LOAD_HEADER_LEN];
int4store(buf + EL_FILE_ID_OFFSET, file_id);
- return my_b_safe_write(file, buf, EXEC_LOAD_HEADER_LEN);
+ return (write_header(file, sizeof(buf)) ||
+ my_b_safe_write(file, buf, sizeof(buf)));
}
@@ -4027,15 +4038,7 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli)
mysql_load()).
*/
-#if MYSQL_VERSION_ID < 40100
- rli->future_master_log_pos= log_pos + get_event_len() -
- (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0);
-#elif MYSQL_VERSION_ID < 50000
- rli->future_group_master_log_pos= log_pos + get_event_len() -
- (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0);
-#else
- rli->future_group_master_log_pos= log_pos;
-#endif
+ rli->future_group_master_log_pos= log_pos;
if (lev->exec_event(0,rli,1))
{
/*
@@ -4093,15 +4096,15 @@ err:
sql_ex_info::write_data()
*/
-int sql_ex_info::write_data(IO_CACHE* file)
+bool sql_ex_info::write_data(IO_CACHE* file)
{
if (new_format())
{
- return (write_str(file, field_term, field_term_len) ||
- write_str(file, enclosed, enclosed_len) ||
- write_str(file, line_term, line_term_len) ||
- write_str(file, line_start, line_start_len) ||
- write_str(file, escaped, escaped_len) ||
+ return (write_str(file, field_term, (uint) field_term_len) ||
+ write_str(file, enclosed, (uint) enclosed_len) ||
+ write_str(file, line_term, (uint) line_term_len) ||
+ write_str(file, line_start, (uint) line_start_len) ||
+ write_str(file, escaped, (uint) escaped_len) ||
my_b_safe_write(file,(byte*) &opt_flags,1));
}
else
@@ -4114,7 +4117,7 @@ int sql_ex_info::write_data(IO_CACHE* file)
old_ex.escaped= *escaped;
old_ex.opt_flags= opt_flags;
old_ex.empty_flags=empty_flags;
- return my_b_safe_write(file, (byte*) &old_ex, sizeof(old_ex));
+ return my_b_safe_write(file, (byte*) &old_ex, sizeof(old_ex)) != 0;
}
}
@@ -4136,11 +4139,11 @@ char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format)
the case when we have old format because we will be reusing net buffer
to read the actual file before we write out the Create_file event.
*/
- if (read_str(buf, buf_end, field_term, field_term_len) ||
- read_str(buf, buf_end, enclosed, enclosed_len) ||
- read_str(buf, buf_end, line_term, line_term_len) ||
- read_str(buf, buf_end, line_start, line_start_len) ||
- read_str(buf, buf_end, escaped, escaped_len))
+ if (read_str(&buf, buf_end, &field_term, &field_term_len) ||
+ read_str(&buf, buf_end, &enclosed, &enclosed_len) ||
+ read_str(&buf, buf_end, &line_term, &line_term_len) ||
+ read_str(&buf, buf_end, &line_start, &line_start_len) ||
+ read_str(&buf, buf_end, &escaped, &escaped_len))
return 0;
opt_flags = *buf++;
}
diff --git a/sql/log_event.h b/sql/log_event.h
index c9cce1d40ea..a9d18c65f0c 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -139,7 +139,7 @@ struct sql_ex_info
field_term_len + enclosed_len + line_term_len +
line_start_len + escaped_len + 6 : 7);
}
- int write_data(IO_CACHE* file);
+ bool write_data(IO_CACHE* file);
char* init(char* buf,char* buf_end,bool use_new_format);
bool new_format()
{
@@ -231,7 +231,7 @@ struct sql_ex_info
#define Q_FLAGS2_CODE 0
#define Q_SQL_MODE_CODE 1
#define Q_CATALOG_CODE 2
-
+#define Q_AUTO_INCREMENT 3
/* Intvar event post-header */
@@ -387,8 +387,10 @@ typedef struct st_last_event_info
uint32 flags2;
bool sql_mode_inited;
ulong sql_mode; /* must be same as THD.variables.sql_mode */
+ ulong auto_increment_increment, auto_increment_offset;
st_last_event_info()
- : flags2_inited(0), flags2(0), sql_mode_inited(0), sql_mode(0)
+ :flags2_inited(0), flags2(0), sql_mode_inited(0), sql_mode(0),
+ auto_increment_increment(1),auto_increment_offset(1)
{
db[0]= 0; /* initially, the db is unknown */
}
@@ -407,13 +409,14 @@ class Log_event
{
public:
/*
- The offset in the log where this event originally appeared (it is preserved
- in relay logs, making SHOW SLAVE STATUS able to print coordinates of the
- event in the master's binlog). Note: when a transaction is written by the
- master to its binlog (wrapped in BEGIN/COMMIT) the log_pos of all the
- queries it contains is the one of the BEGIN (this way, when one does SHOW
- SLAVE STATUS it sees the offset of the BEGIN, which is logical as rollback
- may occur), except the COMMIT query which has its real offset.
+ The offset in the log where this event originally appeared (it is
+ preserved in relay logs, making SHOW SLAVE STATUS able to print
+ coordinates of the event in the master's binlog). Note: when a
+ transaction is written by the master to its binlog (wrapped in
+ BEGIN/COMMIT) the log_pos of all the queries it contains is the
+ one of the BEGIN (this way, when one does SHOW SLAVE STATUS it
+ sees the offset of the BEGIN, which is logical as rollback may
+ occur), except the COMMIT query which has its real offset.
*/
my_off_t log_pos;
/*
@@ -422,21 +425,24 @@ public:
*/
char *temp_buf;
/*
- Timestamp on the master(for debugging and replication of NOW()/TIMESTAMP).
- It is important for queries and LOAD DATA INFILE. This is set at the event's
- creation time, except for Query and Load (et al.) events where this is set
- at the query's execution time, which guarantees good replication (otherwise,
- we could have a query and its event with different timestamps).
+ Timestamp on the master(for debugging and replication of
+ NOW()/TIMESTAMP). It is important for queries and LOAD DATA
+ INFILE. This is set at the event's creation time, except for Query
+ and Load (et al.) events where this is set at the query's
+ execution time, which guarantees good replication (otherwise, we
+ could have a query and its event with different timestamps).
*/
time_t when;
/* The number of seconds the query took to run on the master. */
ulong exec_time;
+ /* Number of bytes written by write() function */
+ ulong data_written;
+
/*
- The master's server id (is preserved in the relay log; used to prevent from
- infinite loops in circular replication).
+ The master's server id (is preserved in the relay log; used to prevent from
+ infinite loops in circular replication).
*/
uint32 server_id;
- uint cached_event_len;
/*
Some 16 flags. Only one is really used now; look above for
@@ -453,26 +459,25 @@ public:
Log_event();
Log_event(THD* thd_arg, uint16 flags_arg, bool cache_stmt);
/*
- read_log_event() functions read an event from a binlog or relay log; used by
- SHOW BINLOG EVENTS, the binlog_dump thread on the master (reads master's
- binlog), the slave IO thread (reads the event sent by binlog_dump), the
- slave SQL thread (reads the event from the relay log).
- If mutex is 0, the read will proceed without mutex.
- We need the description_event to be able to parse the event (to know the
- post-header's size); in fact in read_log_event we detect the event's type,
- then call the specific event's constructor and pass description_event as an
- argument.
+ read_log_event() functions read an event from a binlog or relay
+ log; used by SHOW BINLOG EVENTS, the binlog_dump thread on the
+ master (reads master's binlog), the slave IO thread (reads the
+ event sent by binlog_dump), the slave SQL thread (reads the event
+ from the relay log). If mutex is 0, the read will proceed without
+ mutex. We need the description_event to be able to parse the
+ event (to know the post-header's size); in fact in read_log_event
+ we detect the event's type, then call the specific event's
+ constructor and pass description_event as an argument.
*/
static Log_event* read_log_event(IO_CACHE* file,
pthread_mutex_t* log_lock,
const Format_description_log_event *description_event);
static int read_log_event(IO_CACHE* file, String* packet,
pthread_mutex_t* log_lock);
- /* set_log_pos() is used to fill log_pos with tell(log). */
- void set_log_pos(MYSQL_LOG* log);
/*
- init_show_field_list() prepares the column names and types for the output of
- SHOW BINLOG EVENTS; it is used only by SHOW BINLOG EVENTS.
+ init_show_field_list() prepares the column names and types for the
+ output of SHOW BINLOG EVENTS; it is used only by SHOW BINLOG
+ EVENTS.
*/
static void init_show_field_list(List<Item>* field_list);
#ifdef HAVE_REPLICATION
@@ -494,7 +499,7 @@ public:
}
#else
Log_event() : temp_buf(0) {}
- // avoid having to link mysqlbinlog against libpthread
+ /* avoid having to link mysqlbinlog against libpthread */
static Log_event* read_log_event(IO_CACHE* file,
const Format_description_log_event *description_event);
/* print*() functions are used by mysqlbinlog */
@@ -512,13 +517,17 @@ public:
my_free((gptr) ptr, MYF(MY_WME|MY_ALLOW_ZERO_PTR));
}
- int write(IO_CACHE* file);
- int write_header(IO_CACHE* file);
- virtual int write_data(IO_CACHE* file)
- { return write_data_header(file) || write_data_body(file); }
- virtual int write_data_header(IO_CACHE* file __attribute__((unused)))
+ bool write_header(IO_CACHE* file, ulong data_length);
+ virtual bool write(IO_CACHE* file)
+ {
+ return (write_header(file, get_data_size()) ||
+ write_data_header(file) ||
+ write_data_body(file));
+ }
+ virtual bool is_artificial_event() { return 0; }
+ virtual bool write_data_header(IO_CACHE* file)
{ return 0; }
- virtual int write_data_body(IO_CACHE* file __attribute__((unused)))
+ virtual bool write_data_body(IO_CACHE* file __attribute__((unused)))
{ return 0; }
virtual Log_event_type get_type_code() = 0;
virtual bool is_valid() const = 0;
@@ -535,17 +544,10 @@ public:
}
}
virtual int get_data_size() { return 0;}
- int get_event_len()
- {
- /*
- We don't re-use the cached event's length anymore (we did in 4.x) because
- this leads to nasty problems: when the 5.0 slave reads an event from a 4.0
- master, it caches the event's length, then this event is converted before
- it goes into the relay log, so it would be written to the relay log with
- its old length, which is garbage.
- */
- return (cached_event_len=(LOG_EVENT_HEADER_LEN + get_data_size()));
- }
+ /*
+ Get event length for simple events. For complicated events the length
+ is calculated during write()
+ */
static Log_event* read_log_event(const char* buf, uint event_len,
const char **error,
const Format_description_log_event
@@ -592,32 +594,32 @@ public:
uint16 error_code;
ulong thread_id;
/*
- For events created by Query_log_event::exec_event (and
- Load_log_event::exec_event()) we need the *original* thread id, to be able
- to log the event with the original (=master's) thread id (fix for
- BUG#1686).
+ For events created by Query_log_event::exec_event (and
+ Load_log_event::exec_event()) we need the *original* thread id, to be able
+ to log the event with the original (=master's) thread id (fix for
+ BUG#1686).
*/
ulong slave_proxy_id;
/*
- Binlog format 3 and 4 start to differ (as far as class members are
- concerned) from here.
+ Binlog format 3 and 4 start to differ (as far as class members are
+ concerned) from here.
*/
- int catalog_len; // <= 255 char; -1 means uninited
+ int catalog_len; // <= 255 char; -1 means uninited
/*
We want to be able to store a variable number of N-bit status vars:
- (generally N=32; but N=64 for SQL_MODE) a user may want to log the number of
- affected rows (for debugging) while another does not want to lose 4 bytes in
- this.
+ (generally N=32; but N=64 for SQL_MODE) a user may want to log the number
+ of affected rows (for debugging) while another does not want to lose 4
+ bytes in this.
The storage on disk is the following:
status_vars_len is part of the post-header,
status_vars are in the variable-length part, after the post-header, before
the db & query.
status_vars on disk is a sequence of pairs (code, value) where 'code' means
- 'sql_mode', 'affected' etc. Sometimes 'value' must be a short string, so its
- first byte is its length. For now the order of status vars is:
+ 'sql_mode', 'affected' etc. Sometimes 'value' must be a short string, so
+ its first byte is its length. For now the order of status vars is:
flags2 - sql_mode - catalog.
We should add the same thing to Load_log_event, but in fact
LOAD DATA INFILE is going to be logged with a new type of event (logging of
@@ -643,6 +645,7 @@ public:
uint32 flags2;
/* In connections sql_mode is 32 bits now but will be 64 bits soon */
ulong sql_mode;
+ ulong auto_increment_increment, auto_increment_offset;
#ifndef MYSQL_CLIENT
@@ -667,14 +670,8 @@ public:
}
}
Log_event_type get_type_code() { return QUERY_EVENT; }
- int write(IO_CACHE* file);
- int write_data(IO_CACHE* file); // returns 0 on success, -1 on error
+ bool write(IO_CACHE* file);
bool is_valid() const { return query != 0; }
- int get_data_size()
- {
- /* Note that the "1" below is the db's length. */
- return (q_len + db_len + 1 + status_vars_len + QUERY_HEADER_LEN);
- }
};
#ifdef HAVE_REPLICATION
@@ -713,7 +710,7 @@ public:
int get_data_size();
bool is_valid() const { return master_host != 0; }
Log_event_type get_type_code() { return SLAVE_EVENT; }
- int write_data(IO_CACHE* file );
+ bool write(IO_CACHE* file);
};
#endif /* HAVE_REPLICATION */
@@ -804,8 +801,8 @@ public:
{
return sql_ex.new_format() ? NEW_LOAD_EVENT: LOAD_EVENT;
}
- int write_data_header(IO_CACHE* file);
- int write_data_body(IO_CACHE* file);
+ bool write_data_header(IO_CACHE* file);
+ bool write_data_body(IO_CACHE* file);
bool is_valid() const { return table_name != 0; }
int get_data_size()
{
@@ -830,23 +827,26 @@ extern char server_version[SERVER_VERSION_LENGTH];
is >4 (otherwise if ==4 the event will be sent naturally).
****************************************************************************/
+
class Start_log_event_v3: public Log_event
{
public:
/*
- If this event is at the start of the first binary log since server startup
- 'created' should be the timestamp when the event (and the binary log) was
- created.
- In the other case (i.e. this event is at the start of a binary log created
- by FLUSH LOGS or automatic rotation), 'created' should be 0.
- This "trick" is used by MySQL >=4.0.14 slaves to know if they must drop the
- stale temporary tables or not.
- Note that when 'created'!=0, it is always equal to the event's timestamp;
- indeed Start_log_event is written only in log.cc where the first
- constructor below is called, in which 'created' is set to 'when'.
- So in fact 'created' is a useless variable. When it is 0
- we can read the actual value from timestamp ('when') and when it is
- non-zero we can read the same value from timestamp ('when'). Conclusion:
+ If this event is at the start of the first binary log since server
+ startup 'created' should be the timestamp when the event (and the
+ binary log) was created. In the other case (i.e. this event is at
+ the start of a binary log created by FLUSH LOGS or automatic
+ rotation), 'created' should be 0. This "trick" is used by MySQL
+ >=4.0.14 slaves to know if they must drop the stale temporary
+ tables or not.
+
+ Note that when 'created'!=0, it is always equal to the event's
+ timestamp; indeed Start_log_event is written only in log.cc where
+ the first constructor below is called, in which 'created' is set
+ to 'when'. So in fact 'created' is a useless variable. When it is
+ 0 we can read the actual value from timestamp ('when') and when it
+ is non-zero we can read the same value from timestamp
+ ('when'). Conclusion:
- we use timestamp to print when the binlog was created.
- we use 'created' only to know if this is a first binlog or not.
In 3.23.57 we did not pay attention to this identity, so mysqlbinlog in
@@ -856,6 +856,12 @@ public:
time_t created;
uint16 binlog_version;
char server_version[ST_SERVER_VER_LEN];
+ /*
+ artifical_event is 1 in the case where this is a generated event that
+ should not case any cleanup actions. We handle this in the log by
+ setting log_event == 0 (for now).
+ */
+ bool artificial_event;
#ifndef MYSQL_CLIENT
Start_log_event_v3();
@@ -872,14 +878,16 @@ public:
const Format_description_log_event* description_event);
~Start_log_event_v3() {}
Log_event_type get_type_code() { return START_EVENT_V3;}
- int write_data(IO_CACHE* file);
+ bool write(IO_CACHE* file);
bool is_valid() const { return 1; }
int get_data_size()
{
return START_V3_HEADER_LEN; //no variable-sized part
}
+ virtual bool is_artificial_event() { return artificial_event; }
};
+
/*
For binlog version 4.
This event is saved by threads which read it, as they need it for future
@@ -912,18 +920,12 @@ public:
const Format_description_log_event* description_event);
~Format_description_log_event() { my_free((gptr)post_header_len, MYF(0)); }
Log_event_type get_type_code() { return FORMAT_DESCRIPTION_EVENT;}
- int write_data(IO_CACHE* file);
+ bool write(IO_CACHE* file);
bool is_valid() const
- {
- return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN :
- LOG_EVENT_MINIMAL_HEADER_LEN)) &&
- (post_header_len != NULL));
- }
- int get_event_len()
{
- int i= LOG_EVENT_MINIMAL_HEADER_LEN + get_data_size();
- DBUG_PRINT("info",("event_len=%d",i));
- return i;
+ return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN :
+ LOG_EVENT_MINIMAL_HEADER_LEN)) &&
+ (post_header_len != NULL));
}
int get_data_size()
{
@@ -944,6 +946,7 @@ public:
Logs special variables such as auto_increment values
****************************************************************************/
+
class Intvar_log_event: public Log_event
{
public:
@@ -967,10 +970,11 @@ public:
Log_event_type get_type_code() { return INTVAR_EVENT;}
const char* get_var_type_name();
int get_data_size() { return 9; /* sizeof(type) + sizeof(val) */;}
- int write_data(IO_CACHE* file);
+ bool write(IO_CACHE* file);
bool is_valid() const { return 1; }
};
+
/*****************************************************************************
Rand Log Event class
@@ -981,6 +985,7 @@ public:
waste, it does not cause bugs).
****************************************************************************/
+
class Rand_log_event: public Log_event
{
public:
@@ -1003,10 +1008,11 @@ class Rand_log_event: public Log_event
~Rand_log_event() {}
Log_event_type get_type_code() { return RAND_EVENT;}
int get_data_size() { return 16; /* sizeof(ulonglong) * 2*/ }
- int write_data(IO_CACHE* file);
+ bool write(IO_CACHE* file);
bool is_valid() const { return 1; }
};
+
/*****************************************************************************
User var Log Event class
@@ -1018,6 +1024,7 @@ class Rand_log_event: public Log_event
written before the Query_log_event, to set the user variable.
****************************************************************************/
+
class User_var_log_event: public Log_event
{
public:
@@ -1044,16 +1051,11 @@ public:
User_var_log_event(const char* buf, const Format_description_log_event* description_event);
~User_var_log_event() {}
Log_event_type get_type_code() { return USER_VAR_EVENT;}
- int get_data_size()
- {
- return (is_null ? UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL :
- UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE +
- UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE + val_len);
- }
- int write_data(IO_CACHE* file);
+ bool write(IO_CACHE* file);
bool is_valid() const { return 1; }
};
+
/*****************************************************************************
Stop Log Event class
@@ -1090,6 +1092,7 @@ public:
This will be depricated when we move to using sequence ids.
****************************************************************************/
+
class Rotate_log_event: public Log_event
{
public:
@@ -1121,22 +1124,18 @@ public:
my_free((gptr) new_log_ident, MYF(0));
}
Log_event_type get_type_code() { return ROTATE_EVENT;}
- int get_event_len()
- {
- return (LOG_EVENT_MINIMAL_HEADER_LEN + get_data_size());
- }
int get_data_size() { return ident_len + ROTATE_HEADER_LEN;}
bool is_valid() const { return new_log_ident != 0; }
- int write_data(IO_CACHE* file);
+ bool write(IO_CACHE* file);
};
+
/* the classes below are for the new LOAD DATA INFILE logging */
/*****************************************************************************
-
Create File Log Event class
-
****************************************************************************/
+
class Create_file_log_event: public Load_log_event
{
protected:
@@ -1187,13 +1186,13 @@ public:
4 + 1 + block_len);
}
bool is_valid() const { return inited_from_old || block != 0; }
- int write_data_header(IO_CACHE* file);
- int write_data_body(IO_CACHE* file);
+ bool write_data_header(IO_CACHE* file);
+ bool write_data_body(IO_CACHE* file);
/*
Cut out Create_file extentions and
write it as Load event - used on the slave
*/
- int write_base(IO_CACHE* file);
+ bool write_base(IO_CACHE* file);
};
@@ -1202,6 +1201,7 @@ public:
Append Block Log Event class
****************************************************************************/
+
class Append_block_log_event: public Log_event
{
public:
@@ -1209,14 +1209,15 @@ public:
uint block_len;
uint file_id;
/*
- 'db' is filled when the event is created in mysql_load() (the event needs to
- have a 'db' member to be well filtered by binlog-*-db rules). 'db' is not
- written to the binlog (it's not used by Append_block_log_event::write()), so
- it can't be read in the Append_block_log_event(const char* buf, int
- event_len) constructor.
- In other words, 'db' is used only for filtering by binlog-*-db rules.
- Create_file_log_event is different: its 'db' (which is inherited from
- Load_log_event) is written to the binlog and can be re-read.
+ 'db' is filled when the event is created in mysql_load() (the
+ event needs to have a 'db' member to be well filtered by
+ binlog-*-db rules). 'db' is not written to the binlog (it's not
+ used by Append_block_log_event::write()), so it can't be read in
+ the Append_block_log_event(const char* buf, int event_len)
+ constructor. In other words, 'db' is used only for filtering by
+ binlog-*-db rules. Create_file_log_event is different: it's 'db'
+ (which is inherited from Load_log_event) is written to the binlog
+ and can be re-read.
*/
const char* db;
@@ -1237,15 +1238,17 @@ public:
Log_event_type get_type_code() { return APPEND_BLOCK_EVENT;}
int get_data_size() { return block_len + APPEND_BLOCK_HEADER_LEN ;}
bool is_valid() const { return block != 0; }
- int write_data(IO_CACHE* file);
+ bool write(IO_CACHE* file);
const char* get_db() { return db; }
};
+
/*****************************************************************************
Delete File Log Event class
****************************************************************************/
+
class Delete_file_log_event: public Log_event
{
public:
@@ -1269,15 +1272,17 @@ public:
Log_event_type get_type_code() { return DELETE_FILE_EVENT;}
int get_data_size() { return DELETE_FILE_HEADER_LEN ;}
bool is_valid() const { return file_id != 0; }
- int write_data(IO_CACHE* file);
+ bool write(IO_CACHE* file);
const char* get_db() { return db; }
};
+
/*****************************************************************************
Execute Load Log Event class
****************************************************************************/
+
class Execute_load_log_event: public Log_event
{
public:
@@ -1300,10 +1305,11 @@ public:
Log_event_type get_type_code() { return EXEC_LOAD_EVENT;}
int get_data_size() { return EXEC_LOAD_HEADER_LEN ;}
bool is_valid() const { return file_id != 0; }
- int write_data(IO_CACHE* file);
+ bool write(IO_CACHE* file);
const char* get_db() { return db; }
};
+
#ifdef MYSQL_CLIENT
class Unknown_log_event: public Log_event
{
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 24f6c4a3fa9..50eabfaa903 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -4068,7 +4068,8 @@ enum options_mysqld
OPT_DEFAULT_TIME_ZONE,
OPT_OPTIMIZER_SEARCH_DEPTH,
OPT_OPTIMIZER_PRUNE_LEVEL,
- OPT_SQL_UPDATABLE_VIEW_KEY
+ OPT_SQL_UPDATABLE_VIEW_KEY,
+ OPT_AUTO_INCREMENT, OPT_AUTO_INCREMENT_OFFSET
};
@@ -4087,6 +4088,16 @@ struct my_option my_long_options[] =
#endif /* HAVE_REPLICATION */
{"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax.", 0, 0, 0,
GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"auto-increment-increment", OPT_AUTO_INCREMENT,
+ "Auto-increment columns are incremented by this",
+ (gptr*) &global_system_variables.auto_increment_increment,
+ (gptr*) &max_system_variables.auto_increment_increment, 0, GET_ULONG,
+ OPT_ARG, 1, 1, 65535, 0, 1, 0 },
+ {"auto-increment-offset", OPT_AUTO_INCREMENT_OFFSET,
+ "Offset added to Auto-increment columns. Used when auto-increment-increment != 1",
+ (gptr*) &global_system_variables.auto_increment_offset,
+ (gptr*) &max_system_variables.auto_increment_offset, 0, GET_ULONG, OPT_ARG,
+ 1, 1, 65535, 0, 1, 0 },
{"basedir", 'b',
"Path to installation directory. All paths are usually resolved relative to this.",
(gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG,
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 4b347f91869..04cd4d13b26 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -127,6 +127,11 @@ static byte *get_warning_count(THD *thd);
alphabetic order
*/
+sys_var_thd_ulong sys_auto_increment_increment("auto_increment_increment",
+ &SV::auto_increment_increment);
+sys_var_thd_ulong sys_auto_increment_offset("auto_increment_offset",
+ &SV::auto_increment_offset);
+
sys_var_long_ptr sys_binlog_cache_size("binlog_cache_size",
&binlog_cache_size);
sys_var_thd_ulong sys_bulk_insert_buff_size("bulk_insert_buffer_size",
@@ -476,6 +481,8 @@ sys_var_const_str sys_license("license", STRINGIFY_ARG(LICENSE));
sys_var *sys_variables[]=
{
&sys_auto_is_null,
+ &sys_auto_increment_increment,
+ &sys_auto_increment_offset,
&sys_autocommit,
&sys_big_tables,
&sys_big_selects,
@@ -624,6 +631,8 @@ sys_var *sys_variables[]=
*/
struct show_var_st init_vars[]= {
+ {"auto_incrememt_increment", (char*) &sys_auto_increment_increment, SHOW_SYS},
+ {"auto_increment_offset", (char*) &sys_auto_increment_offset, SHOW_SYS},
{"back_log", (char*) &back_log, SHOW_LONG},
{"basedir", mysql_home, SHOW_CHAR},
#ifdef HAVE_BERKELEY_DB
diff --git a/sql/slave.cc b/sql/slave.cc
index d7b60107096..2c6fb06b7cb 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -220,9 +220,10 @@ static byte* get_table_key(TABLE_RULE_ENT* e, uint* len,
look_for_description_event
1 if we should look for such an event. We only need
this when the SQL thread starts and opens an existing
- relay log and has to execute it (possibly from an offset
- >4); then we need to read the first event of the relay
- log to be able to parse the events we have to execute.
+ relay log and has to execute it (possibly from an
+ offset >4); then we need to read the first event of
+ the relay log to be able to parse the events we have
+ to execute.
DESCRIPTION
- Close old open relay log files.
@@ -333,8 +334,8 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
while (look_for_description_event)
{
/*
- Read the possible Format_description_log_event; if position was 4, no need, it will
- be read naturally.
+ Read the possible Format_description_log_event; if position
+ was 4, no need, it will be read naturally.
*/
DBUG_PRINT("info",("looking for a Format_description_log_event"));
@@ -373,9 +374,9 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
Format_desc (of slave)
Rotate (of master)
Format_desc (of master)
- So the Format_desc which really describes the rest of the relay log is
- the 3rd event (it can't be further than that, because we rotate the
- relay log when we queue a Rotate event from the master).
+ So the Format_desc which really describes the rest of the relay log
+ is the 3rd event (it can't be further than that, because we rotate
+ the relay log when we queue a Rotate event from the master).
But what describes the Rotate is the first Format_desc.
So what we do is:
go on searching for Format_description events, until you exceed the
@@ -424,7 +425,7 @@ err:
/*
- Init functio to set up array for errors that should be skipped for slave
+ Init function to set up array for errors that should be skipped for slave
SYNOPSIS
init_slave_skip_errors()
@@ -505,26 +506,11 @@ void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
the relay log is not "val".
With the end_log_pos solution, we avoid computations involving lengthes.
*/
- DBUG_PRINT("info", ("log_pos=%lld group_master_log_pos=%lld",
- log_pos,group_master_log_pos));
+ DBUG_PRINT("info", ("log_pos: %lu group_master_log_pos: %lu",
+ (long) log_pos, (long) group_master_log_pos));
if (log_pos) // 3.23 binlogs don't have log_posx
{
-#if MYSQL_VERSION_ID < 50000
- /*
- If the event was converted from a 3.23 format, get_event_len() has
- grown by 6 bytes (at least for most events, except LOAD DATA INFILE
- which is already a big problem for 3.23->4.0 replication); 6 bytes is
- the difference between the header's size in 4.0 (LOG_EVENT_HEADER_LEN)
- and the header's size in 3.23 (OLD_HEADER_LEN). Note that using
- mi->old_format will not help if the I/O thread has not started yet.
- Yes this is a hack but it's just to make 3.23->4.x replication work;
- 3.23->5.0 replication is working much better.
- */
- group_master_log_pos= log_pos -
- (mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0);
-#else
group_master_log_pos= log_pos;
-#endif /* MYSQL_VERSION_ID < 5000 */
}
pthread_cond_broadcast(&data_cond);
if (!skip_lock)
@@ -612,7 +598,8 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset,
goto err;
}
if (!just_reset)
- error= init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos,
+ error= init_relay_log_pos(rli, rli->group_relay_log_name,
+ rli->group_relay_log_pos,
0 /* do not need data lock */, errmsg, 0);
err:
@@ -880,8 +867,8 @@ static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len)
second call will make the decision (because
all_tables_not_ok() = !tables_ok(1st_list) && !tables_ok(2nd_list)).
- Thought which arose from a question of a big customer "I want to include all
- tables like "abc.%" except the "%.EFG"". This can't be done now. If we
+ Thought which arose from a question of a big customer "I want to include
+ all tables like "abc.%" except the "%.EFG"". This can't be done now. If we
supported Perl regexps we could do it with this pattern: /^abc\.(?!EFG)/
(I could not find an equivalent in the regex library MySQL uses).
@@ -1390,7 +1377,7 @@ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi)
else
{
mi->clock_diff_with_master= 0; /* The "most sensible" value */
- sql_print_error("Warning: \"SELECT UNIX_TIMESTAMP()\" failed on master, \
+ sql_print_warning("\"SELECT UNIX_TIMESTAMP()\" failed on master, \
do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS");
}
if (master_res)
@@ -2151,7 +2138,7 @@ file '%s')", fname);
goto errwithmsg;
#ifndef HAVE_OPENSSL
if (ssl)
- sql_print_error("SSL information in the master info file "
+ sql_print_warning("SSL information in the master info file "
"('%s') are ignored because this MySQL slave was compiled "
"without SSL support.", fname);
#endif /* HAVE_OPENSSL */
@@ -2569,17 +2556,16 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
ulong init_abort_pos_wait;
int error=0;
struct timespec abstime; // for timeout checking
- set_timespec(abstime,timeout);
-
+ const char *msg;
DBUG_ENTER("wait_for_pos");
- DBUG_PRINT("enter",("group_master_log_name: '%s' pos: %lu timeout: %ld",
- group_master_log_name, (ulong) group_master_log_pos,
- (long) timeout));
+ DBUG_PRINT("enter",("log_name: '%s' log_pos: %lu timeout: %lu",
+ log_name->c_ptr(), (ulong) log_pos, (ulong) timeout));
+ set_timespec(abstime,timeout);
pthread_mutex_lock(&data_lock);
- const char *msg= thd->enter_cond(&data_cond, &data_lock,
- "Waiting for the slave SQL thread to "
- "advance position");
+ msg= thd->enter_cond(&data_cond, &data_lock,
+ "Waiting for the slave SQL thread to "
+ "advance position");
/*
This function will abort when it notices that some CHANGE MASTER or
RESET MASTER has changed the master info.
@@ -2635,6 +2621,12 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name,
bool pos_reached;
int cmp_result= 0;
+ DBUG_PRINT("info",
+ ("init_abort_pos_wait: %ld abort_pos_wait: %ld",
+ init_abort_pos_wait, abort_pos_wait));
+ DBUG_PRINT("info",("group_master_log_name: '%s' pos: %lu",
+ group_master_log_name, (ulong) group_master_log_pos));
+
/*
group_master_log_name can be "", if we are just after a fresh
replication start or after a CHANGE MASTER TO MASTER_HOST/PORT
@@ -2941,8 +2933,8 @@ server_errno=%d)",
/* Check if eof packet */
if (len < 8 && mysql->net.read_pos[0] == 254)
{
- sql_print_error("Slave: received end packet from server, apparent\
- master shutdown: %s",
+ sql_print_information("Slave: received end packet from server, apparent "
+ "master shutdown: %s",
mysql_error(mysql));
return packet_error;
}
@@ -3261,14 +3253,14 @@ slave_begin:
thd->proc_info = "Connecting to master";
// we can get killed during safe_connect
if (!safe_connect(thd, mysql, mi))
- sql_print_error("Slave I/O thread: connected to master '%s@%s:%d',\
+ sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\
replication started in log '%s' at position %s", mi->user,
mi->host, mi->port,
IO_RPL_LOG_NAME,
llstr(mi->master_log_pos,llbuff));
else
{
- sql_print_error("Slave I/O thread killed while connecting to master");
+ sql_print_information("Slave I/O thread killed while connecting to master");
goto err;
}
@@ -3301,7 +3293,7 @@ connected:
sql_print_error("Failed on request_dump()");
if (io_slave_killed(thd,mi))
{
- sql_print_error("Slave I/O thread killed while requesting master \
+ sql_print_information("Slave I/O thread killed while requesting master \
dump");
goto err;
}
@@ -3325,7 +3317,7 @@ dump");
}
if (io_slave_killed(thd,mi))
{
- sql_print_error("Slave I/O thread killed while retrying master \
+ sql_print_information("Slave I/O thread killed while retrying master \
dump");
goto err;
}
@@ -3338,7 +3330,7 @@ reconnecting to try again, log '%s' at postion %s", IO_RPL_LOG_NAME,
if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
io_slave_killed(thd,mi))
{
- sql_print_error("Slave I/O thread killed during or \
+ sql_print_information("Slave I/O thread killed during or \
after reconnect");
goto err;
}
@@ -3360,7 +3352,7 @@ after reconnect");
if (io_slave_killed(thd,mi))
{
if (global_system_variables.log_warnings)
- sql_print_error("Slave I/O thread killed while reading event");
+ sql_print_information("Slave I/O thread killed while reading event");
goto err;
}
@@ -3397,20 +3389,20 @@ max_allowed_packet",
if (io_slave_killed(thd,mi))
{
if (global_system_variables.log_warnings)
- sql_print_error("Slave I/O thread killed while waiting to \
+ sql_print_information("Slave I/O thread killed while waiting to \
reconnect after a failed read");
goto err;
}
thd->proc_info = "Reconnecting after a failed master event read";
if (!suppress_warnings)
- sql_print_error("Slave I/O thread: Failed reading log event, \
+ sql_print_information("Slave I/O thread: Failed reading log event, \
reconnecting to retry, log '%s' position %s", IO_RPL_LOG_NAME,
llstr(mi->master_log_pos, llbuff));
if (safe_reconnect(thd, mysql, mi, suppress_warnings) ||
io_slave_killed(thd,mi))
{
if (global_system_variables.log_warnings)
- sql_print_error("Slave I/O thread killed during or after a \
+ sql_print_information("Slave I/O thread killed during or after a \
reconnect done to recover from failed read");
goto err;
}
@@ -3472,7 +3464,7 @@ log space");
// error = 0;
err:
// print the current replication position
- sql_print_error("Slave I/O thread exiting, read up to log '%s', position %s",
+ sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s",
IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
VOID(pthread_mutex_lock(&LOCK_thread_count));
thd->query = thd->db = 0; // extra safety
@@ -3623,7 +3615,7 @@ slave_begin:
rli->group_master_log_name,
llstr(rli->group_master_log_pos,llbuff)));
if (global_system_variables.log_warnings)
- sql_print_error("Slave SQL thread initialized, starting replication in \
+ sql_print_information("Slave SQL thread initialized, starting replication in \
log '%s' at position %s, relay log '%s' position: %s", RPL_LOG_NAME,
llstr(rli->group_master_log_pos,llbuff),rli->group_relay_log_name,
llstr(rli->group_relay_log_pos,llbuff1));
@@ -3661,7 +3653,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
}
/* Thread stopped. Print the current replication position to the log */
- sql_print_error("Slave SQL thread exiting, replication stopped in log \
+ sql_print_information("Slave SQL thread exiting, replication stopped in log \
'%s' at position %s",
RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff));
@@ -4373,7 +4365,7 @@ Error: '%s' errno: %d retry-time: %d retries: %d",
if (reconnect)
{
if (!suppress_warnings && global_system_variables.log_warnings)
- sql_print_error("Slave: connected to master '%s@%s:%d',\
+ sql_print_information("Slave: connected to master '%s@%s:%d',\
replication resumed in log '%s' at position %s", mi->user,
mi->host, mi->port,
IO_RPL_LOG_NAME,
@@ -4556,12 +4548,12 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
/*
Relay log is always in new format - if the master is 3.23, the
I/O thread will convert the format for us.
- A problem: the description event may be in a previous relay log. So if the
- slave has been shutdown meanwhile, we would have to look in old relay
+ A problem: the description event may be in a previous relay log. So if
+ the slave has been shutdown meanwhile, we would have to look in old relay
logs, which may even have been deleted. So we need to write this
description event at the beginning of the relay log.
- When the relay log is created when the I/O thread starts, easy: the master
- will send the description event and we will queue it.
+ When the relay log is created when the I/O thread starts, easy: the
+ master will send the description event and we will queue it.
But if the relay log is created by new_file(): then the solution is:
MYSQL_LOG::open() will write the buffered description event.
*/
@@ -4715,8 +4707,8 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
{
#ifdef EXTRA_DEBUG
if (global_system_variables.log_warnings)
- sql_print_error("next log '%s' is currently active",
- rli->linfo.log_file_name);
+ sql_print_information("next log '%s' is currently active",
+ rli->linfo.log_file_name);
#endif
rli->cur_log= cur_log= rli->relay_log.get_log_file();
rli->cur_log_old_open_count= rli->relay_log.get_open_count();
@@ -4745,8 +4737,8 @@ Log_event* next_event(RELAY_LOG_INFO* rli)
*/
#ifdef EXTRA_DEBUG
if (global_system_variables.log_warnings)
- sql_print_error("next log '%s' is not active",
- rli->linfo.log_file_name);
+ sql_print_information("next log '%s' is not active",
+ rli->linfo.log_file_name);
#endif
// open_binlog() will check the magic header
if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name,
@@ -4772,7 +4764,11 @@ event(errno: %d cur_log->error: %d)",
}
}
if (!errmsg && global_system_variables.log_warnings)
- errmsg = "slave SQL thread was killed";
+ {
+ sql_print_information("Error reading relay log event: %s",
+ "slave SQL thread was killed");
+ DBUG_RETURN(0);
+ }
err:
if (errmsg)
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index a807a4b75e5..0c42c45bf59 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -160,8 +160,8 @@ bool foreign_key_prefix(Key *a, Key *b)
THD::THD()
:user_time(0), global_read_lock(0), is_fatal_error(0),
- last_insert_id_used(0),
- insert_id_used(0), rand_used(0), time_zone_used(0),
+ rand_used(0), time_zone_used(0),
+ last_insert_id_used(0), insert_id_used(0), clear_next_insert_id(0),
in_lock_tables(0), bootstrap(0), spcont(NULL)
{
current_arena= this;
@@ -496,6 +496,24 @@ bool THD::store_globals()
}
+/* Cleanup after a query */
+
+void THD::cleanup_after_query()
+{
+ if (clear_next_insert_id)
+ {
+ clear_next_insert_id= 0;
+ next_insert_id= 0;
+ }
+ /* Free Items that were created during this execution */
+ free_items(free_list);
+ /*
+ In the rest of code we assume that free_list never points to garbage:
+ Keep this predicate true.
+ */
+ free_list= 0;
+}
+
/*
Convert a string to another character set
@@ -1461,8 +1479,8 @@ void Statement::end_statement()
lex_end(lex);
delete lex->result;
lex->result= 0;
- free_items(free_list);
- free_list= 0;
+ /* Note that free_list is freed in cleanup_after_query() */
+
/*
Don't free mem_root, as mem_root is freed in the end of dispatch_command
(once for any command).
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 1612ab6fb17..761ffc133a3 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -373,6 +373,7 @@ struct system_variables
ulonglong myisam_max_sort_file_size;
ha_rows select_limit;
ha_rows max_join_size;
+ ulong auto_increment_increment, auto_increment_offset;
ulong bulk_insert_buff_size;
ulong join_buff_size;
ulong long_query_time;
@@ -835,6 +836,8 @@ public:
generated auto_increment value in handler.cc
*/
ulonglong next_insert_id;
+ /* Remember last next_insert_id to reset it if something went wrong */
+ ulonglong prev_insert_id;
/*
The insert_id used for the last statement or set by SET LAST_INSERT_ID=#
or SELECT LAST_INSERT_ID(#). Used for binary log and returned by
@@ -889,6 +892,9 @@ public:
/* for user variables replication*/
DYNAMIC_ARRAY user_var_events;
+ enum killed_state { NOT_KILLED=0, KILL_CONNECTION=ER_SERVER_SHUTDOWN, KILL_QUERY=ER_QUERY_INTERRUPTED };
+ killed_state volatile killed;
+
/* scramble - random string sent to client on handshake */
char scramble[SCRAMBLE_LENGTH+1];
@@ -896,22 +902,10 @@ public:
bool locked, some_tables_deleted;
bool last_cuted_field;
bool no_errors, password, is_fatal_error;
- bool query_start_used,last_insert_id_used,insert_id_used,rand_used;
- bool time_zone_used;
+ bool query_start_used, rand_used, time_zone_used;
+ bool last_insert_id_used,insert_id_used, clear_next_insert_id;
bool in_lock_tables;
bool query_error, bootstrap, cleanup_done;
-
- enum killed_state { NOT_KILLED=0, KILL_CONNECTION=ER_SERVER_SHUTDOWN, KILL_QUERY=ER_QUERY_INTERRUPTED };
- killed_state volatile killed;
- inline int killed_errno() const
- {
- return killed;
- }
- inline void send_kill_message() const
- {
- my_error(killed_errno(), MYF(0));
- }
-
bool tmp_table_used;
bool charset_is_system_charset, charset_is_collation_connection;
bool slow_command;
@@ -951,6 +945,7 @@ public:
void init_for_queries();
void change_user(void);
void cleanup(void);
+ void cleanup_after_query();
bool store_globals();
#ifdef SIGNAL_WITH_VIO_CLOSE
inline void set_active_vio(Vio* vio)
@@ -1070,6 +1065,14 @@ public:
}
inline CHARSET_INFO *charset() { return variables.character_set_client; }
void update_charset();
+ inline int killed_errno() const
+ {
+ return killed;
+ }
+ inline void send_kill_message() const
+ {
+ my_error(killed_errno(), MYF(0));
+ }
};
/* Flags for the THD::system_thread (bitmap) variable */
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index 8fc0671c808..cba74c93a6a 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -640,7 +640,7 @@ int mysqld_help(THD *thd, const char *mask)
uint mlen= strlen(mask);
MEM_ROOT *mem_root= &thd->mem_root;
- if (res= open_and_lock_tables(thd, tables))
+ if ((res= open_and_lock_tables(thd, tables)))
goto end;
/*
Init tables and fields to be usable from items
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 14c4838900f..3508e962c60 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -311,8 +311,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
else
#endif
error=write_record(table,&info);
- if (error)
- break;
/*
If auto_increment values are used, save the first one
for LAST_INSERT_ID() and for the update log.
@@ -323,6 +321,8 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
{ // Get auto increment value
id= thd->last_insert_id;
}
+ if (error)
+ break;
thd->row_count++;
}
@@ -638,9 +638,10 @@ int write_record(TABLE *table,COPY_INFO *info)
{
while ((error=table->file->write_row(table->record[0])))
{
+ uint key_nr;
if (error != HA_WRITE_SKIP)
goto err;
- uint key_nr;
+ table->file->restore_auto_increment();
if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
{
error=HA_WRITE_SKIP; /* Database can't find key */
@@ -733,6 +734,7 @@ int write_record(TABLE *table,COPY_INFO *info)
if (info->handle_duplicates != DUP_IGNORE ||
(error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE))
goto err;
+ table->file->restore_auto_increment();
}
else
info->copied++;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 9a76fa2da84..1b4769d747e 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1571,8 +1571,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
check_grant(thd, SELECT_ACL, &table_list, 2, UINT_MAX, 0))
break;
mysqld_list_fields(thd,&table_list,fields);
- free_items(thd->free_list);
- thd->free_list= 0; /* free_list should never point to garbage */
+ thd->cleanup_after_query();
break;
}
#endif
@@ -4520,6 +4519,7 @@ void mysql_parse(THD *thd, char *inBuf, uint length)
}
thd->proc_info="freeing items";
thd->end_statement();
+ thd->cleanup_after_query();
}
DBUG_VOID_RETURN;
}
@@ -4546,10 +4546,12 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length)
all_tables_not_ok(thd,(TABLE_LIST*) lex->select_lex.table_list.first))
error= 1; /* Ignore question */
thd->end_statement();
+ thd->cleanup_after_query();
DBUG_RETURN(error);
}
#endif
+
/*****************************************************************************
** Store field definition for create
** Return 0 if ok
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 124db39ef3f..982e00391ef 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1628,8 +1628,7 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length,
thd->restore_backup_statement(stmt, &thd->stmt_backup);
cleanup_items(stmt->free_list);
close_thread_tables(thd);
- free_items(thd->free_list);
- thd->free_list= 0;
+ thd->cleanup_after_query();
thd->current_arena= thd;
if (error)
@@ -1856,12 +1855,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
cleanup_items(stmt->free_list);
reset_stmt_params(stmt);
close_thread_tables(thd); /* to close derived tables */
- /*
- Free items that were created during this execution of the PS by
- query optimizer.
- */
- free_items(thd->free_list);
- thd->free_list= 0;
+ thd->cleanup_after_query();
}
thd->set_statement(&thd->stmt_backup);
@@ -1969,13 +1963,8 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt,
reset_stmt_params(stmt);
close_thread_tables(thd); // to close derived tables
thd->set_statement(&thd->stmt_backup);
- /* Free Items that were created during this execution of the PS. */
- free_items(thd->free_list);
- /*
- In the rest of prepared statements code we assume that free_list
- never points to garbage: keep this predicate true.
- */
- thd->free_list= 0;
+ thd->cleanup_after_query();
+
if (stmt->state == Item_arena::PREPARED)
{
thd->current_arena= thd;
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 1d4414426d0..08cb90d2824 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -3389,6 +3389,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
to->file->print_error(error,MYF(0));
break;
}
+ to->file->restore_auto_increment();
delete_count++;
}
else