diff options
Diffstat (limited to 'sql')
-rw-r--r-- | sql/CMakeLists.txt | 2 | ||||
-rw-r--r-- | sql/Makefile.am | 4 | ||||
-rw-r--r-- | sql/field.cc | 39 | ||||
-rw-r--r-- | sql/field.h | 21 | ||||
-rw-r--r-- | sql/log.cc | 5 | ||||
-rw-r--r-- | sql/log_event.cc | 403 | ||||
-rw-r--r-- | sql/log_event.h | 35 | ||||
-rw-r--r-- | sql/mysql_priv.h | 5 | ||||
-rw-r--r-- | sql/mysqld.cc | 12 | ||||
-rw-r--r-- | sql/rpl_utility.cc | 158 | ||||
-rw-r--r-- | sql/rpl_utility.h | 60 | ||||
-rw-r--r-- | sql/sp_head.cc | 13 | ||||
-rw-r--r-- | sql/sql_class.cc | 46 | ||||
-rw-r--r-- | sql/sql_insert.cc | 23 | ||||
-rw-r--r-- | sql/sql_parse.cc | 9 | ||||
-rw-r--r-- | sql/sql_show.cc | 233 | ||||
-rw-r--r-- | sql/sql_yacc.yy | 3 | ||||
-rw-r--r-- | sql/table.h | 4 |
18 files changed, 881 insertions, 194 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index a089278448c..d59de23f8c2 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -52,7 +52,7 @@ ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc time.cc tztime.cc uniques.cc unireg.cc item_xmlfunc.cc rpl_tblmap.cc sql_binlog.cc event_scheduler.cc event_timed.cc sql_tablespace.cc events.cc ../sql-common/my_user.c - partition_info.cc rpl_injector.cc sql_locale.cc + partition_info.cc rpl_utility.cc rpl_injector.cc sql_locale.cc ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h ${PROJECT_SOURCE_DIR}/include/mysqld_error.h diff --git a/sql/Makefile.am b/sql/Makefile.am index d1ebea45d0c..a32ae399750 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -52,7 +52,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ ha_partition.h \ ha_ndbcluster.h ha_ndbcluster_binlog.h \ ha_ndbcluster_tables.h \ - opt_range.h protocol.h rpl_tblmap.h \ + opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \ log.h sql_show.h rpl_rli.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h \ lex.h lex_symbol.h sql_acl.h sql_crypt.h \ @@ -92,7 +92,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \ sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \ slave.cc sql_repl.cc rpl_filter.cc rpl_tblmap.cc \ - rpl_injector.cc \ + rpl_utility.cc rpl_injector.cc \ sql_union.cc sql_derived.cc \ client.c sql_client.cc mini_client_errors.c pack.c\ stacktrace.c repl_failsafe.h repl_failsafe.cc \ diff --git a/sql/field.cc b/sql/field.cc index 8d852b644bc..1a135328896 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1257,6 +1257,13 @@ void Field::hash(ulong *nr, ulong *nr2) } } +my_size_t +Field::do_last_null_byte() const +{ + DBUG_ASSERT(null_ptr == NULL || (byte*) null_ptr >= table->record[0]); + return null_ptr ? (byte*) null_ptr - table->record[0] + 1 : 0; +} + void Field::copy_from_tmp(int row_offset) { @@ -8094,6 +8101,30 @@ Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, } +my_size_t +Field_bit::do_last_null_byte() const +{ + /* + Code elsewhere is assuming that bytes are 8 bits, so I'm using + that value instead of the correct one: CHAR_BIT. + + REFACTOR SUGGESTION (Matz): Change to use the correct number of + bits. On systems with CHAR_BIT > 8 (not very common), the storage + will lose the extra bits. + */ + DBUG_PRINT("debug", ("bit_ofs=%d, bit_len=%d, bit_ptr=%p", + bit_ofs, bit_len, bit_ptr)); + uchar *result; + if (bit_len == 0) + result= null_ptr; + else if (bit_ofs + bit_len > 8) + result= bit_ptr + 1; + else + result= bit_ptr; + + return result ? (byte*) result - table->record[0] + 1 : 0; +} + Field *Field_bit::new_key_field(MEM_ROOT *root, struct st_table *new_table, char *new_ptr, uchar *new_null_ptr, @@ -8345,6 +8376,14 @@ const char *Field_bit::unpack(char *to, const char *from) } +void Field_bit::set_default() +{ + my_ptrdiff_t const offset= table->s->default_values - table->record[0]; + uchar bits= get_rec_bits(bit_ptr + offset, bit_ofs, bit_len); + set_rec_bits(bits, bit_ptr, bit_ofs, bit_len); + Field::set_default(); +} + /* Bit field support for non-MyISAM tables. */ diff --git a/sql/field.h b/sql/field.h index fce3b51c04b..5172c4f3b2a 100644 --- a/sql/field.h +++ b/sql/field.h @@ -217,6 +217,19 @@ public: { if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; } inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; } inline bool real_maybe_null(void) { return null_ptr != 0; } + + /* + Return a pointer to the last byte of the null bytes where the + field conceptually is placed. In the case that the field does not + use any bits of the null bytes, a null pointer is returned. + */ + my_size_t last_null_byte() const { + my_size_t bytes= do_last_null_byte(); + DBUG_PRINT("debug", ("last_null_byte() ==> %d", bytes)); + DBUG_ASSERT(bytes <= table->s->null_bytes); + return bytes; + } + virtual void make_field(Send_field *); virtual void sort_string(char *buff,uint length)=0; virtual bool optimize_range(uint idx, uint part); @@ -377,6 +390,9 @@ public: friend class Item_sum_min; friend class Item_sum_max; friend class Item_func_group_concat; + +private: + virtual my_size_t do_last_null_byte() const; }; @@ -1412,6 +1428,8 @@ public: void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); const char *unpack(char* to, const char *from); + virtual void set_default(); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, char *new_ptr, uchar *new_null_ptr, uint new_null_bit); @@ -1432,6 +1450,9 @@ public: Field::move_field_offset(ptr_diff); bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*); } + +private: + virtual my_size_t do_last_null_byte() const; }; diff --git a/sql/log.cc b/sql/log.cc index 8a8262c174d..5f33a9585e9 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -3171,7 +3171,7 @@ int MYSQL_BIN_LOG:: flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event) { DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)"); - DBUG_ASSERT(thd->current_stmt_binlog_row_based && mysql_bin_log.is_open()); + DBUG_ASSERT(mysql_bin_log.is_open()); DBUG_PRINT("enter", ("event=%p", event)); int error= 0; @@ -3416,9 +3416,6 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info) } } } - /* Forget those values, for next binlogger: */ - thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; - thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty(); } /* diff --git a/sql/log_event.cc b/sql/log_event.cc index ebd90446a7e..ceacc1eade7 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -24,6 +24,7 @@ #include "mysql_priv.h" #include "slave.h" #include "rpl_filter.h" +#include "rpl_utility.h" #include <my_dir.h> #endif /* MYSQL_CLIENT */ #include <base64.h> @@ -5290,38 +5291,114 @@ int Rows_log_event::do_add_row_data(byte *const row_data, #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) /* - Unpack a row into a record. The row is assumed to only consist of the fields - for which the bitset represented by 'arr' and 'bits'; the other parts of the - record are left alone. + Unpack a row into a record. + + SYNOPSIS + unpack_row() + rli Relay log info + table Table to unpack into + colcnt Number of columns to read from record + record Record where the data should be unpacked + row Packed row data + cols Pointer to columns data to fill in + row_end Pointer to variable that will hold the value of the + one-after-end position for the row + master_reclength + Pointer to variable that will hold the length of the + record on the master side + rw_set Pointer to bitmap that holds either the read_set or the + write_set of the table + + DESCRIPTION + + The row is assumed to only consist of the fields for which the + bitset represented by 'arr' and 'bits'; the other parts of the + record are left alone. + + At most 'colcnt' columns are read: if the table is larger than + that, the remaining fields are not filled in. */ -static char const *unpack_row(TABLE *table, - byte *record, char const *row, - MY_BITMAP const *cols) +static int +unpack_row(RELAY_LOG_INFO *rli, + TABLE *table, uint const colcnt, byte *record, + char const *row, MY_BITMAP const *cols, + char const **row_end, ulong *master_reclength, + MY_BITMAP* const rw_set) { DBUG_ASSERT(record && row); - - MY_BITMAP *write_set= table->write_set; - my_size_t const n_null_bytes= table->s->null_bytes; my_ptrdiff_t const offset= record - (byte*) table->record[0]; + my_size_t master_null_bytes= table->s->null_bytes; + + if (colcnt != table->s->fields) + { + Field **fptr= &table->field[colcnt-1]; + do + master_null_bytes= (*fptr)->last_null_byte(); + while (master_null_bytes == 0 && fptr-- > table->field); + + if (master_null_bytes == 0) + master_null_bytes= table->s->null_bytes; + } - memcpy(record, row, n_null_bytes); - char const *ptr= row + n_null_bytes; + DBUG_ASSERT(master_null_bytes <= table->s->null_bytes); + memcpy(record, row, master_null_bytes); // [1] + int error= 0; + + bitmap_set_all(rw_set); - bitmap_set_all(write_set); Field **const begin_ptr = table->field; - for (Field **field_ptr= begin_ptr ; *field_ptr ; ++field_ptr) + Field **field_ptr; { - Field *const f= *field_ptr; + char const *ptr= row + master_null_bytes; + Field **const end_ptr= begin_ptr + colcnt; + for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr) + { + Field *const f= *field_ptr; + + if (bitmap_is_set(cols, field_ptr - begin_ptr)) + { + ptr= f->unpack(f->ptr + offset, ptr); + /* Field...::unpack() cannot return 0 */ + DBUG_ASSERT(ptr != NULL); + } + else + bitmap_clear_bit(rw_set, field_ptr - begin_ptr); + } + + *row_end = ptr; + if (master_reclength) + { + if (*field_ptr) + *master_reclength = (*field_ptr)->ptr - (char*) table->record[0]; + else + *master_reclength = table->s->reclength; + } + } - if (bitmap_is_set(cols, (uint) (field_ptr - begin_ptr))) + /* + Set properties for remaining columns, if there are any. We let the + corresponding bit in the write_set be set, to write the value if + it was not there already. We iterate over all remaining columns, + even if there were an error, to get as many error messages as + possible. We are still able to return a pointer to the next row, + so wedo that. + */ + for ( ; *field_ptr ; ++field_ptr) + { + if ((*field_ptr)->flags & (NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG)) { - /* Field...::unpack() cannot return 0 */ - ptr= f->unpack(f->ptr + offset, ptr); + slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD, + "Field `%s` of table `%s`.`%s` " + "has no default value and cannot be NULL", + (*field_ptr)->field_name, table->s->db.str, + table->s->table_name.str); + error = ER_NO_DEFAULT_FOR_FIELD; } else - bitmap_clear_bit(write_set, (uint) (field_ptr - begin_ptr)); + (*field_ptr)->set_default(); } - return ptr; + + return error; } int Rows_log_event::exec_event(st_relay_log_info *rli) @@ -5425,6 +5502,9 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) /* When the open and locking succeeded, we add all the tables to the table map and remove them from tables to lock. + + We also invalidate the query cache for all the tables, since + they will now be changed. */ TABLE_LIST *ptr; @@ -5433,6 +5513,9 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) rli->m_table_map.set_table(ptr->table_id, ptr->table); rli->touching_table(ptr->db, ptr->table_name, ptr->table_id); } +#ifdef HAVE_QUERY_CACHE + query_cache.invalidate_locked_for_write(rli->tables_to_lock); +#endif rli->clear_tables_to_lock(); } @@ -5477,7 +5560,11 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) error= do_before_row_operations(table); while (error == 0 && row_start < (const char*) m_rows_end) { - char const *row_end= do_prepare_row(thd, table, row_start); + char const *row_end= NULL; + if ((error= do_prepare_row(thd, rli, table, row_start, &row_end))) + break; // We should to the after-row operation even in the + // case of error + DBUG_ASSERT(row_end != NULL); // cannot happen DBUG_ASSERT(row_end <= (const char*)m_rows_end); @@ -5682,7 +5769,7 @@ void Rows_log_event::pack_info(Protocol *protocol) #endif /************************************************************************** - Table_map_log_event member functions + Table_map_log_event member functions and support functions **************************************************************************/ /* @@ -5924,72 +6011,9 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) */ DBUG_ASSERT(m_table->in_use); - /* - Check that the number of columns and the field types in the - event match the number of columns and field types in the opened - table. - */ - uint col= m_table->s->fields; - - if (col == m_colcnt) - { - while (col-- > 0) - if (m_table->field[col]->type() != m_coltype[col]) - break; - } - - TABLE_SHARE const *const tsh= m_table->s; - - /* - Check the following termination conditions: - - (col == m_table->s->fields) - ==> (m_table->s->fields != m_colcnt) - (0 <= col < m_table->s->fields) - ==> (m_table->field[col]->type() != m_coltype[col]) - - Logically, A ==> B is equivalent to !A || B - - Since col is unsigned, is suffices to check that col <= - tsh->fields. If col wrapped (by decreasing col when it is 0), - the number will be UINT_MAX, which is greater than tsh->fields. - */ - DBUG_ASSERT(!(col == tsh->fields) || tsh->fields != m_colcnt); - DBUG_ASSERT(!(col < tsh->fields) || - (m_table->field[col]->type() != m_coltype[col])); - - if (col <= tsh->fields) + table_def const def(m_coltype, m_colcnt); + if (def.compatible_with(rli, m_table)) { - /* purecov: begin inspected */ - /* - If we get here, the number of columns in the event didn't - match the number of columns in the table on the slave, *or* - there were a column in the table on the slave that did not - have the same type as given in the event. - - If 'col' has the value that was assigned to it, it was a - mismatch between the number of columns on the master and the - slave. - */ - if (col == tsh->fields) - { - DBUG_ASSERT(tsh->db.str && tsh->table_name.str); - slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, - "Table width mismatch - " - "received %u columns, %s.%s has %u columns", - m_colcnt, tsh->db.str, tsh->table_name.str, tsh->fields); - } - else - { - DBUG_ASSERT(col < m_colcnt && col < tsh->fields); - DBUG_ASSERT(tsh->db.str && tsh->table_name.str); - slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, - "Column %d type mismatch - " - "received type %d, %s.%s has type %d", - col, m_coltype[col], tsh->db.str, tsh->table_name.str, - m_table->field[col]->type()); - } - thd->query_error= 1; error= ERR_BAD_TABLE_DEF; goto err; @@ -6188,19 +6212,21 @@ int Write_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -char const *Write_rows_log_event::do_prepare_row(THD *thd, TABLE *table, - char const *row_start) +int Write_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, + TABLE *table, + char const *row_start, + char const **row_end) { - char const *ptr= row_start; DBUG_ASSERT(table != NULL); - /* - This assertion actually checks that there is at least as many - columns on the slave as on the master. - */ - DBUG_ASSERT(table->s->fields >= m_width); - DBUG_ASSERT(ptr); - ptr= unpack_row(table, (byte*)table->record[0], ptr, &m_cols); - return ptr; + DBUG_ASSERT(row_start && row_end); + + int error; + error= unpack_row(rli, + table, m_width, table->record[0], + row_start, &m_cols, row_end, &m_master_reclength, + table->write_set); + bitmap_copy(table->read_set, table->write_set); + return error; } /* @@ -6247,21 +6273,104 @@ namespace { /* + Copy "extra" columns from record[1] to record[0]. + + Copy the extra fields that are not present on the master but are + present on the slave from record[1] to record[0]. This is used + after fetching a record that are to be updated, either inside + replace_record() or as part of executing an update_row(). + */ +static int +copy_extra_record_fields(TABLE *table, + my_size_t master_reclength, + my_ptrdiff_t master_fields) +{ + DBUG_PRINT("info", ("Copying to %p from field %d at offset %u to field %d at offset %u", + table->record[0], + master_fields, master_reclength, + table->s->fields, table->s->reclength)); + if (master_reclength < table->s->reclength) + bmove_align(table->record[0] + master_reclength, + table->record[1] + master_reclength, + table->s->reclength - master_reclength); + + /* + Bit columns are special. We iterate over all the remaining + columns and copy the "extra" bits to the new record. This is + not a very good solution: it should be refactored on + opportunity. + + REFACTORING SUGGESTION (Matz). Introduce a member function + similar to move_field_offset() called copy_field_offset() to + copy field values and implement it for all Field subclasses. Use + this function to copy data from the found record to the record + that are going to be inserted. + + The copy_field_offset() function need to be a virtual function, + which in this case will prevent copying an entire range of + fields efficiently. + */ + { + Field **field_ptr= table->field + master_fields; + for ( ; *field_ptr ; ++field_ptr) + { + /* + Set the null bit according to the values in record[1] + */ + if ((*field_ptr)->maybe_null() && + (*field_ptr)->is_null_in_record(reinterpret_cast<uchar*>(table->record[1]))) + (*field_ptr)->set_null(); + else + (*field_ptr)->set_notnull(); + + /* + Do the extra work for special columns. + */ + switch ((*field_ptr)->real_type()) + { + default: + /* Nothing to do */ + break; + + case FIELD_TYPE_BIT: + Field_bit *f= static_cast<Field_bit*>(*field_ptr); + my_ptrdiff_t const offset= table->record[1] - table->record[0]; + uchar const bits= + get_rec_bits(f->bit_ptr + offset, f->bit_ofs, f->bit_len); + set_rec_bits(bits, f->bit_ptr, f->bit_ofs, f->bit_len); + break; + } + } + } + return 0; // All OK +} + +/* Replace the provided record in the database. - Similar to how it is done in <code>mysql_insert()</code>, we first - try to do a <code>ha_write_row()</code> and of that fails due to - duplicated keys (or indices), we do an <code>ha_update_row()</code> - or a <code>ha_delete_row()</code> instead. + SYNOPSIS + replace_record() + thd Thread context for writing the record. + table Table to which record should be written. + master_reclength + Offset to first column that is not present on the master, + alternatively the length of the record on the master + side. - @param thd Thread context for writing the record. - @param table Table to which record should be written. + RETURN VALUE + Error code on failure, 0 on success. - @return Error code on failure, 0 on success. + DESCRIPTION + Similar to how it is done in mysql_insert(), we first try to do + a ha_write_row() and of that fails due to duplicated keys (or + indices), we do an ha_update_row() or a ha_delete_row() instead. */ static int -replace_record(THD *thd, TABLE *table) +replace_record(THD *thd, TABLE *table, + ulong const master_reclength, + uint const master_fields) { + DBUG_ENTER("replace_record"); DBUG_ASSERT(table != NULL && thd != NULL); int error; @@ -6273,7 +6382,7 @@ replace_record(THD *thd, TABLE *table) if ((keynum= table->file->get_dup_key(error)) < 0) { /* We failed to retrieve the duplicate key */ - return HA_ERR_FOUND_DUPP_KEY; + DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); } /* @@ -6290,20 +6399,20 @@ replace_record(THD *thd, TABLE *table) { error= table->file->rnd_pos(table->record[1], table->file->dup_ref); if (error) - return error; + DBUG_RETURN(error); } else { if (table->file->extra(HA_EXTRA_FLUSH_CACHE)) { - return my_errno; + DBUG_RETURN(my_errno); } if (key.get() == NULL) { key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length))); if (key.get() == NULL) - return ENOMEM; + DBUG_RETURN(ENOMEM); } key_copy((byte*)key.get(), table->record[0], table->key_info + keynum, 0); @@ -6312,7 +6421,7 @@ replace_record(THD *thd, TABLE *table) table->key_info[keynum].key_length, HA_READ_KEY_EXACT); if (error) - return error; + DBUG_RETURN(error); } /* @@ -6320,6 +6429,12 @@ replace_record(THD *thd, TABLE *table) will enable us to update it or, alternatively, delete it (so that we can insert the new row afterwards). + First we copy the columns into table->record[0] that are not + present on the master from table->record[1], if there are any. + */ + copy_extra_record_fields(table, master_reclength, master_fields); + + /* REPLACE is defined as either INSERT or DELETE + INSERT. If possible, we can replace it with an UPDATE, but that will not work on InnoDB if FOREIGN KEY checks are necessary. @@ -6339,22 +6454,22 @@ replace_record(THD *thd, TABLE *table) { error=table->file->ha_update_row(table->record[1], table->record[0]); - return error; + DBUG_RETURN(error); } else { if ((error= table->file->ha_delete_row(table->record[1]))) - return error; + DBUG_RETURN(error); /* Will retry ha_write_row() with the offending row removed. */ } } - return error; + DBUG_RETURN(error); } int Write_rows_log_event::do_exec_row(TABLE *table) { DBUG_ASSERT(table != NULL); - int error= replace_record(thd, table); + int error= replace_record(thd, table, m_master_reclength, m_width); return error; } #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ @@ -6640,20 +6755,23 @@ int Delete_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -char const *Delete_rows_log_event::do_prepare_row(THD *thd, TABLE *table, - char const *row_start) +int Delete_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, + TABLE *table, + char const *row_start, + char const **row_end) { - char const *ptr= row_start; - DBUG_ASSERT(ptr); + int error; + DBUG_ASSERT(row_start && row_end); /* This assertion actually checks that there is at least as many columns on the slave as on the master. */ DBUG_ASSERT(table->s->fields >= m_width); - DBUG_ASSERT(ptr != NULL); - ptr= unpack_row(table, table->record[0], ptr, &m_cols); - + error= unpack_row(rli, + table, m_width, table->record[0], + row_start, &m_cols, row_end, &m_master_reclength, + table->read_set); /* If we will access rows using the random access method, m_key will be set to NULL, so we do not need to make a key copy in that case. @@ -6665,7 +6783,7 @@ char const *Delete_rows_log_event::do_prepare_row(THD *thd, TABLE *table, key_copy(m_key, table->record[0], key_info, 0); } - return ptr; + return error; } int Delete_rows_log_event::do_exec_row(TABLE *table) @@ -6779,11 +6897,13 @@ int Update_rows_log_event::do_after_row_operations(TABLE *table, int error) return error; } -char const *Update_rows_log_event::do_prepare_row(THD *thd, TABLE *table, - char const *row_start) +int Update_rows_log_event::do_prepare_row(THD *thd, RELAY_LOG_INFO *rli, + TABLE *table, + char const *row_start, + char const **row_end) { - char const *ptr= row_start; - DBUG_ASSERT(ptr); + int error; + DBUG_ASSERT(row_start && row_end); /* This assertion actually checks that there is at least as many columns on the slave as on the master. @@ -6791,10 +6911,16 @@ char const *Update_rows_log_event::do_prepare_row(THD *thd, TABLE *table, DBUG_ASSERT(table->s->fields >= m_width); /* record[0] is the before image for the update */ - ptr= unpack_row(table, table->record[0], ptr, &m_cols); - DBUG_ASSERT(ptr != NULL); + error= unpack_row(rli, + table, m_width, table->record[0], + row_start, &m_cols, row_end, &m_master_reclength, + table->read_set); + row_start = *row_end; /* m_after_image is the after image for the update */ - ptr= unpack_row(table, m_after_image, ptr, &m_cols); + error= unpack_row(rli, + table, m_width, m_after_image, + row_start, &m_cols, row_end, &m_master_reclength, + table->write_set); /* If we will access rows using the random access method, m_key will @@ -6807,7 +6933,7 @@ char const *Update_rows_log_event::do_prepare_row(THD *thd, TABLE *table, key_copy(m_key, table->record[0], key_info, 0); } - return ptr; + return error; } int Update_rows_log_event::do_exec_row(TABLE *table) @@ -6825,17 +6951,20 @@ int Update_rows_log_event::do_exec_row(TABLE *table) example, the partition engine). Since find_and_fetch_row() puts the fetched record (i.e., the old - record) in record[0], we have to move it out of the way and into - record[1]. After that, we can put the new record (i.e., the after - image) into record[0]. + record) in record[1], we can keep it there. We put the new record + (i.e., the after image) into record[0], and copy the fields that + are on the slave (i.e., in record[1]) into record[0], effectively + overwriting the default values that where put there by the + unpack_row() function. */ - bmove_align(table->record[1], table->record[0], table->s->reclength); bmove_align(table->record[0], m_after_image, table->s->reclength); + copy_extra_record_fields(table, m_master_reclength, m_width); /* - Now we should have the right row to update. The old row (the one - we're looking for) has to be in record[1] and the new row has to - be in record[0] for all storage engines to work correctly. + Now we have the right row to update. The old row (the one we're + looking for) is in record[1] and the new row has is in record[0]. + We also have copied the original values already in the slave's + database into the after image delivered from the master. */ error= table->file->ha_update_row(table->record[1], table->record[0]); diff --git a/sql/log_event.h b/sql/log_event.h index 313b5174da9..801a83dcc97 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1874,6 +1874,7 @@ protected: ulong m_table_id; /* Table ID */ MY_BITMAP m_cols; /* Bitmap denoting columns available */ ulong m_width; /* The width of the columns bitmap */ + ulong m_master_reclength; /* Length of record on master side */ /* Bit buffer in the same memory as the class */ uint32 m_bitbuf[128/(sizeof(uint32)*8)]; @@ -1927,12 +1928,15 @@ private: since SQL thread specific data is not available: that data is made available for the do_exec function. - RETURN VALUE A pointer to the start of the next row, or NULL if the preparation failed. Currently, preparation cannot fail, but don't rely on this behavior. + + RETURN VALUE + Error code, if something went wrong, 0 otherwise. */ - virtual char const *do_prepare_row(THD*, TABLE*, char const *row_start) = 0; + virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + char const *row_start, char const **row_end) = 0; /* Primitive to do the actual execution necessary for a row. @@ -2000,10 +2004,11 @@ private: gptr m_memory; byte *m_after_image; - virtual int do_before_row_operations(TABLE *table); - virtual int do_after_row_operations(TABLE *table, int error); - virtual char const *do_prepare_row(THD*, TABLE*, char const *row_start); - virtual int do_exec_row(TABLE *table); + virtual int do_before_row_operations(TABLE *table); + virtual int do_after_row_operations(TABLE *table, int error); + virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + char const *row_start, char const **row_end); + virtual int do_exec_row(TABLE *table); #endif }; @@ -2064,10 +2069,11 @@ private: byte *m_key; byte *m_after_image; - virtual int do_before_row_operations(TABLE *table); - virtual int do_after_row_operations(TABLE *table, int error); - virtual char const *do_prepare_row(THD*, TABLE*, char const *row_start); - virtual int do_exec_row(TABLE *table); + virtual int do_before_row_operations(TABLE *table); + virtual int do_after_row_operations(TABLE *table, int error); + virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + char const *row_start, char const **row_end); + virtual int do_exec_row(TABLE *table); #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ }; @@ -2134,10 +2140,11 @@ private: byte *m_key; byte *m_after_image; - virtual int do_before_row_operations(TABLE *table); - virtual int do_after_row_operations(TABLE *table, int error); - virtual char const *do_prepare_row(THD*, TABLE*, char const *row_start); - virtual int do_exec_row(TABLE *table); + virtual int do_before_row_operations(TABLE *table); + virtual int do_after_row_operations(TABLE *table, int error); + virtual int do_prepare_row(THD*, RELAY_LOG_INFO*, TABLE*, + char const *row_start, char const **row_end); + virtual int do_exec_row(TABLE *table); #endif }; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 473de1951e5..86f5c9a045a 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -21,6 +21,9 @@ except the part which must be in the server and in the client. */ +#ifndef MYSQL_PRIV_H +#define MYSQL_PRIV_H + #ifndef MYSQL_CLIENT #include <my_global.h> @@ -2036,3 +2039,5 @@ bool schema_table_store_record(THD *thd, TABLE *table); #endif /* MYSQL_SERVER */ #endif /* MYSQL_CLIENT */ + +#endif /* MYSQL_PRIV_H */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 445027fa368..0658c332d8e 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -3120,7 +3120,11 @@ with --log-bin instead."); global_system_variables.binlog_format= BINLOG_FORMAT_ROW; else #endif +#if defined(HAVE_ROW_BASED_REPLICATION) + global_system_variables.binlog_format= BINLOG_FORMAT_MIXED; +#else global_system_variables.binlog_format= BINLOG_FORMAT_STMT; +#endif } /* Check that we have not let the format to unspecified at this point */ @@ -4885,7 +4889,13 @@ struct my_option my_long_options[] = "supports only statement-based binary logging, so only 'statement' is " "a legal value." #endif - , 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + , 0, 0, 0, GET_STR, REQUIRED_ARG, +#ifdef HAVE_ROW_BASED_REPLICATION + BINLOG_FORMAT_MIXED +#else + BINLOG_FORMAT_STMT +#endif + , 0, 0, 0, 0, 0 }, {"binlog-do-db", OPT_BINLOG_DO_DB, "Tells the master it should log updates for the specified database, and exclude all others not explicitly mentioned.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc new file mode 100644 index 00000000000..5405d022223 --- /dev/null +++ b/sql/rpl_utility.cc @@ -0,0 +1,158 @@ +/* Copyright 2006 MySQL AB. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "rpl_utility.h" + +uint32 +field_length_from_packed(enum_field_types const field_type, + byte const *const data) +{ + uint32 length; + + switch (field_type) { + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: + length= ~0UL; + break; + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_TINY: + length= 1; + break; + case MYSQL_TYPE_SHORT: + length= 2; + break; + case MYSQL_TYPE_INT24: + length= 3; + break; + case MYSQL_TYPE_LONG: + length= 4; + break; +#ifdef HAVE_LONG_LONG + case MYSQL_TYPE_LONGLONG: + length= 8; + break; +#endif + case MYSQL_TYPE_FLOAT: + length= sizeof(float); + break; + case MYSQL_TYPE_DOUBLE: + length= sizeof(double); + break; + case MYSQL_TYPE_NULL: + length= 0; + break; + case MYSQL_TYPE_NEWDATE: + length= 3; + break; + case MYSQL_TYPE_DATE: + length= 4; + break; + case MYSQL_TYPE_TIME: + length= 3; + break; + case MYSQL_TYPE_TIMESTAMP: + length= 4; + break; + case MYSQL_TYPE_DATETIME: + length= 8; + break; + break; + case MYSQL_TYPE_BIT: + length= ~0UL; + break; + default: + /* This case should never be chosen */ + DBUG_ASSERT(0); + /* If something goes awfully wrong, it's better to get a string than die */ + case MYSQL_TYPE_STRING: + length= uint2korr(data); + break; + + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: + length= ~0UL; // NYI + break; + + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_GEOMETRY: + length= ~0UL; // NYI + break; + } + + return length; +} + +/********************************************************************* + * table_def member definitions * + *********************************************************************/ + +/* + Is the definition compatible with a table? + + Compare the definition with a table to see if it is compatible with + it. A table definition is compatible with a table if + - the columns types of the table definition is a (not necessarily + proper) prefix of the column type of the table, or + - the other way around +*/ +int +table_def::compatible_with(RELAY_LOG_INFO *rli, TABLE *table) + const +{ + /* + We only check the initial columns for the tables. + */ + uint const cols_to_check= min(table->s->fields, size()); + int error= 0; + + TABLE_SHARE const *const tsh= table->s; + + /* + To get proper error reporting for all columns of the table, we + both check the width and iterate over all columns. + */ + if (tsh->fields < size()) + { + DBUG_ASSERT(tsh->db.str && tsh->table_name.str); + error= 1; + slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, + "Table width mismatch - " + "received %u columns, %s.%s has %u columns", + size(), tsh->db.str, tsh->table_name.str, tsh->fields); + } + + for (uint col= 0 ; col < cols_to_check ; ++col) + { + if (table->field[col]->type() != type(col)) + { + DBUG_ASSERT(col < size() && col < tsh->fields); + DBUG_ASSERT(tsh->db.str && tsh->table_name.str); + error= 1; + slave_print_msg(ERROR_LEVEL, rli, ER_BINLOG_ROW_WRONG_TABLE_DEF, + "Column %d type mismatch - " + "received type %d, %s.%s has type %d", + col, type(col), tsh->db.str, tsh->table_name.str, + table->field[col]->type()); + } + } + + return error; +} diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h new file mode 100644 index 00000000000..0ac3c10eec6 --- /dev/null +++ b/sql/rpl_utility.h @@ -0,0 +1,60 @@ +/* Copyright 2006 MySQL AB. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef RPL_UTILITY_H +#define RPL_UTILITY_H + +#ifndef __cplusplus +#error "Don't include this C++ header file from a non-C++ file!" +#endif + +#include "mysql_priv.h" + +uint32 +field_length_from_packed(enum_field_types const field_type, + byte const *const data); + +/* + A table definition from the master. + + RESPONSIBILITIES + + - Extract table definition data from the table map event + - Check if table definition in table map is compatible with table + definition on slave + */ + +class table_def +{ +public: + typedef unsigned char field_type; + + table_def(field_type *t, my_size_t s) + : m_type(t), m_size(s) + { + } + + my_size_t size() const { return m_size; } + field_type type(my_ptrdiff_t i) const { return m_type[i]; } + + int compatible_with(RELAY_LOG_INFO *rli, TABLE *table) const; + +private: + my_size_t m_size; + field_type *m_type; +}; + +#endif /* RPL_UTILITY_H */ diff --git a/sql/sp_head.cc b/sql/sp_head.cc index fc4aa5e26d6..f9c4cc8c68f 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -794,7 +794,7 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b) This set is produced by tracking user variable reads during statement execution. - Fo SPs, this has the following implications: + For SPs, this has the following implications: 1) thd->user_var_events may contain events from several SP statements and needs to be valid after exection of these statements was finished. In order to achieve that, we @@ -807,6 +807,14 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b) reset_dynamic(&thd->user_var_events); calls in several different places. (TODO cosider moving this into mysql_bin_log.write() function) + + 4.2 Auto_increment storage in binlog + + As we may write two statements to binlog from one single logical statement + (case of "SELECT func1(),func2()": it is binlogged as "SELECT func1()" and + then "SELECT func2()"), we need to reset auto_increment binlog variables + after each binlogged SELECT. Otherwise, the auto_increment value of the + first SELECT would be used for the second too. */ @@ -1526,6 +1534,9 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, "failed to reflect this change in the binary log"); } reset_dynamic(&thd->user_var_events); + /* Forget those values, in case more function calls are binlogged: */ + thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; + thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty(); } } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 3fc182e20f3..35b527584dc 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -634,6 +634,12 @@ bool THD::store_globals() void THD::cleanup_after_query() { + if (!in_sub_stmt) /* stored functions and triggers are a special case */ + { + /* Forget those values, for next binlogger: */ + stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; + auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + } if (first_successful_insert_id_in_cur_stmt > 0) { /* set what LAST_INSERT_ID() will return */ @@ -2660,7 +2666,12 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans, int THD::binlog_flush_pending_rows_event(bool stmt_end) { DBUG_ENTER("THD::binlog_flush_pending_rows_event"); - if (!current_stmt_binlog_row_based || !mysql_bin_log.is_open()) + /* + We shall flush the pending event even if we are not in row-based + mode: it might be the case that we left row-based mode before + flushing anything (e.g., if we have explicitly locked tables). + */ + if (!mysql_bin_log.is_open()) DBUG_RETURN(0); /* @@ -2726,6 +2737,21 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, DBUG_PRINT("enter", ("qtype=%d, query='%s'", qtype, query)); DBUG_ASSERT(query && mysql_bin_log.is_open()); + /* + If we are not in prelocked mode, mysql_unlock_tables() will be + called after this binlog_query(), so we have to flush the pending + rows event with the STMT_END_F set to unlock all tables at the + slave side as well. + + If we are in prelocked mode, the flushing will be done inside the + top-most close_thread_tables(). + */ +#ifdef HAVE_ROW_BASED_REPLICATION + if (this->prelocked_mode == NON_PRELOCKED) + if (int error= binlog_flush_pending_rows_event(TRUE)) + DBUG_RETURN(error); +#endif /*HAVE_ROW_BASED_REPLICATION*/ + switch (qtype) { case THD::MYSQL_QUERY_TYPE: /* @@ -2739,25 +2765,7 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, case THD::ROW_QUERY_TYPE: #ifdef HAVE_ROW_BASED_REPLICATION if (current_stmt_binlog_row_based) - { - /* - If thd->lock is set, then we are not inside a stored function. - In that case, mysql_unlock_tables() will be called after this - binlog_query(), so we have to flush the pending rows event - with the STMT_END_F set to unlock all tables at the slave side - as well. - - We will not flush the pending event, if thd->lock is NULL. - This means that we are inside a stored function or trigger, so - the flushing will be done inside the top-most - close_thread_tables(). - */ -#ifdef HAVE_ROW_BASED_REPLICATION - if (this->lock) - DBUG_RETURN(binlog_flush_pending_rows_event(TRUE)); -#endif /*HAVE_ROW_BASED_REPLICATION*/ DBUG_RETURN(0); - } #endif /* Otherwise, we fall through */ case THD::STMT_QUERY_TYPE: diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index d561fb19953..bb34e779c3b 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -572,7 +572,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, free_underlaid_joins(thd, &thd->lex->select_lex); joins_freed= TRUE; - table->file->ha_release_auto_increment(); /* Now all rows are inserted. Time to update logs and sends response to @@ -591,6 +590,11 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, else #endif { + /* + Do not do this release if this is a delayed insert, it would steal + auto_inc values from the delayed_insert thread as they share TABLE. + */ + table->file->ha_release_auto_increment(); if (!thd->prelocked_mode && table->file->ha_end_bulk_insert() && !error) { table->file->print_error(my_errno,MYF(0)); @@ -2114,8 +2118,16 @@ bool delayed_insert::handle_inserts(void) thd.start_time=row->start_time; thd.query_start_used=row->query_start_used; - /* for the binlog, forget auto_increment ids generated by previous rows */ -// thd.auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + /* + To get the exact auto_inc interval to store in the binlog we must not + use values from the previous interval (of the previous rows). + */ + bool log_query= (row->log_query && row->query.str != NULL); + if (log_query) + { + table->file->ha_release_auto_increment(); + thd.auto_inc_intervals_in_cur_stmt_for_binlog.empty(); + } thd.first_successful_insert_id_in_prev_stmt= row->first_successful_insert_id_in_prev_stmt; thd.stmt_depends_on_first_successful_insert_id_in_prev_stmt= @@ -2156,7 +2168,7 @@ bool delayed_insert::handle_inserts(void) table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); } - if (row->log_query && row->query.str != NULL && mysql_bin_log.is_open()) + if (log_query && mysql_bin_log.is_open()) { /* If the query has several rows to insert, only the first row will come @@ -2552,7 +2564,6 @@ bool select_insert::send_data(List<Item> &values) table->next_number_field->reset(); } } - table->file->ha_release_auto_increment(); DBUG_RETURN(error); } @@ -2626,6 +2637,7 @@ void select_insert::send_error(uint errcode,const char *err) } } ha_rollback_stmt(thd); + table->file->ha_release_auto_increment(); DBUG_VOID_RETURN; } @@ -2676,6 +2688,7 @@ bool select_insert::send_eof() } if ((error2=ha_autocommit_or_rollback(thd,error)) && ! error) error=error2; + table->file->ha_release_auto_increment(); if (error) { table->file->print_error(error,MYF(0)); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 7c600f7463e..4d3bd99d93e 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -5853,9 +5853,14 @@ void mysql_reset_thd_for_next_command(THD *thd) DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */ thd->free_list= 0; thd->select_number= 1; + /* + Those two lines below are theoretically unneeded as + THD::cleanup_after_query() should take care of this already. + */ thd->auto_inc_intervals_in_cur_stmt_for_binlog.empty(); - thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= - thd->query_start_used= 0; + thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 0; + + thd->query_start_used= 0; thd->is_fatal_error= thd->time_zone_used= 0; thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS | SERVER_QUERY_NO_INDEX_USED | diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 9a41c139f4e..5d1e2fc6958 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1995,15 +1995,22 @@ void remove_status_vars(SHOW_VAR *list) } } +inline void make_upper(char *buf) +{ + for (; *buf; buf++) + *buf= my_toupper(system_charset_info, *buf); +} + static bool show_status_array(THD *thd, const char *wild, SHOW_VAR *variables, enum enum_var_type value_type, struct system_status_var *status_var, - const char *prefix, TABLE *table) + const char *prefix, TABLE *table, + bool ucase_names) { char buff[SHOW_VAR_FUNC_BUFF_SIZE], *prefix_end; - /* the variable name should not be longer then 80 characters */ - char name_buffer[80]; + /* the variable name should not be longer than 64 characters */ + char name_buffer[64]; int len; LEX_STRING null_lex_str; SHOW_VAR tmp, *var; @@ -2021,6 +2028,8 @@ static bool show_status_array(THD *thd, const char *wild, { strnmov(prefix_end, variables->name, len); name_buffer[sizeof(name_buffer)-1]=0; /* Safety */ + if (ucase_names) + make_upper(name_buffer); /* if var->type is SHOW_FUNC, call the function. @@ -2032,8 +2041,8 @@ static bool show_status_array(THD *thd, const char *wild, SHOW_TYPE show_type=var->type; if (show_type == SHOW_ARRAY) { - show_status_array(thd, wild, (SHOW_VAR *) var->value, - value_type, status_var, name_buffer, table); + show_status_array(thd, wild, (SHOW_VAR *) var->value, value_type, + status_var, name_buffer, table, ucase_names); } else { @@ -2042,7 +2051,7 @@ static bool show_status_array(THD *thd, const char *wild, { char *value=var->value; const char *pos, *end; // We assign a lot of const's - long nr; + if (show_type == SHOW_SYS) { show_type= ((sys_var*) value)->type(); @@ -2124,6 +2133,7 @@ static bool show_status_array(THD *thd, const char *wild, table->field[0]->store(name_buffer, strlen(name_buffer), system_charset_info); table->field[1]->store(pos, (uint32) (end - pos), system_charset_info); + table->field[1]->set_notnull(); if (schema_table_store_record(thd, table)) DBUG_RETURN(TRUE); } @@ -4564,7 +4574,7 @@ int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond) const char *wild= lex->wild ? lex->wild->ptr() : NullS; pthread_mutex_lock(&LOCK_global_system_variables); res= show_status_array(thd, wild, init_vars, - lex->option_type, 0, "", tables->table); + lex->option_type, 0, "", tables->table, 0); pthread_mutex_unlock(&LOCK_global_system_variables); DBUG_RETURN(res); } @@ -4584,7 +4594,8 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond) (SHOW_VAR *)all_status_vars.buffer, OPT_GLOBAL, (lex->option_type == OPT_GLOBAL ? - &tmp: thd->initial_status_var), "",tables->table); + &tmp: thd->initial_status_var), + "", tables->table, 0); pthread_mutex_unlock(&LOCK_status); DBUG_RETURN(res); } @@ -4733,6 +4744,21 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list) DBUG_RETURN(0); } break; + case MYSQL_TYPE_DECIMAL: + if (!(item= new Item_decimal((longlong) fields_info->value, false))) + { + DBUG_RETURN(0); + } + item->unsigned_flag= (fields_info->field_length/10000)%10; + item->decimals= fields_info->field_length%10; + item->max_length= (fields_info->field_length/100)%100; + if (item->unsigned_flag == 0) + item->max_length+= 1; + if (item->decimals > 0) + item->max_length+= 1; + item->set_name(fields_info->field_name, + strlen(fields_info->field_name), cs); + break; default: /* this should be changed when Item_empty_string is fixed(in 4.1) */ if (!(item= new Item_empty_string("", 0, cs))) @@ -5172,6 +5198,173 @@ int fill_schema_files(THD *thd, TABLE_LIST *tables, COND *cond) DBUG_RETURN(0); } +int fill_schema_status(THD *thd, SHOW_VAR *variables, + struct system_status_var *status_var, + const char *prefix, TABLE *table) +{ + SHOW_VAR tmp, *var; + SHOW_TYPE show_type; + LEX_STRING null_lex_str; + char buff[SHOW_VAR_FUNC_BUFF_SIZE]; + char name_buf[64], *name_pos; + int name_len; + DBUG_ENTER("fill_schema_status"); + + null_lex_str.str= 0; + null_lex_str.length= 0; + + name_pos= strnmov(name_buf, prefix, sizeof(name_buf) - 1); + if (*prefix) + *name_pos++= '_'; + name_len= name_buf + sizeof(name_buf) - name_pos; + + for (; variables->name; variables++) + { + strnmov(name_pos, variables->name, name_len); + name_buf[sizeof(name_buf) - 1]= 0; + make_upper(name_buf); + + for (var= variables; var->type == SHOW_FUNC; var= &tmp) + ((mysql_show_var_func)(var->value))(thd, &tmp, buff); + + show_type= var->type; + + if (show_type == SHOW_ARRAY) + { + fill_schema_status(thd, (SHOW_VAR*) var->value, + status_var, name_buf, table); + } + else + { + char *value= var->value; + + restore_record(table, s->default_values); + table->field[0]->store(name_buf, strlen(name_buf), system_charset_info); + + if (show_type == SHOW_SYS) + { + show_type= ((sys_var*) value)->type(); + value= (char*) ((sys_var*) value)->value_ptr(thd, OPT_GLOBAL, + &null_lex_str); + } + + switch (show_type) + { + case SHOW_DOUBLE_STATUS: + value= (char*) status_var + (ulong) value; + table->field[1]->store(*(double*) value); + break; + case SHOW_LONG_STATUS: + value= (char*) status_var + (ulong) value; + /* fall through */ + case SHOW_LONG: + case SHOW_LONG_NOFLUSH: /* the difference lies in refresh_status() */ + table->field[1]->store((longlong) *(long*) value, false); + break; + case SHOW_LONGLONG: + table->field[1]->store(*(longlong*) value, false); + break; + case SHOW_HA_ROWS: + table->field[1]->store((longlong) *(ha_rows*) value, false); + break; + case SHOW_BOOL: + table->field[1]->store((longlong) *(bool*) value, false); + break; + case SHOW_MY_BOOL: + table->field[1]->store((longlong) *(my_bool*) value, false); + break; + case SHOW_INT: + table->field[1]->store((longlong) *(uint32*) value, false); + break; + case SHOW_HAVE: /* always displayed as 0 */ + table->field[1]->store((longlong) 0, false); + break; + case SHOW_CHAR_PTR: + value= *(char**) value; + /* fall through */ + case SHOW_CHAR: /* always displayed as 0 */ + table->field[1]->store((longlong) 0, false); + break; + case SHOW_KEY_CACHE_LONG: + value= (char*) dflt_key_cache + (ulong) value; + table->field[1]->store((longlong) *(long*) value, false); + break; + case SHOW_KEY_CACHE_LONGLONG: + value= (char*) dflt_key_cache + (ulong) value; + table->field[1]->store(*(longlong*) value, false); + break; + case SHOW_UNDEF: /* always displayed as 0 */ + table->field[1]->store((longlong) 0, false); + break; + case SHOW_SYS: /* cannot happen */ + default: + DBUG_ASSERT(0); + break; + } + + table->field[1]->set_notnull(); + if (schema_table_store_record(thd, table)) + DBUG_RETURN(1); + } + } + + DBUG_RETURN(0); +} + +int fill_schema_global_status(THD *thd, TABLE_LIST *tables, COND *cond) +{ + STATUS_VAR tmp; + int res= 0; + DBUG_ENTER("fill_schema_global_status"); + + pthread_mutex_lock(&LOCK_status); + calc_sum_of_all_status(&tmp); + res= fill_schema_status(thd, (SHOW_VAR*) all_status_vars.buffer, + &tmp, "", tables->table); + pthread_mutex_unlock(&LOCK_status); + + DBUG_RETURN(res); +} + +int fill_schema_session_status(THD *thd, TABLE_LIST *tables, COND *cond) +{ + int res= 0; + DBUG_ENTER("fill_schema_session_status"); + + pthread_mutex_lock(&LOCK_status); + res= fill_schema_status(thd, (SHOW_VAR*) all_status_vars.buffer, + &thd->status_var, "", tables->table); + pthread_mutex_unlock(&LOCK_status); + + DBUG_RETURN(res); +} + +int fill_schema_global_variables(THD *thd, TABLE_LIST *tables, COND *cond) +{ + int res= 0; + DBUG_ENTER("fill_schema_global_variables"); + + pthread_mutex_lock(&LOCK_global_system_variables); + res= show_status_array(thd, "", init_vars, OPT_GLOBAL, + NULL, "", tables->table, 1); + pthread_mutex_unlock(&LOCK_global_system_variables); + + DBUG_RETURN(res); +} + +int fill_schema_session_variables(THD *thd, TABLE_LIST *tables, COND *cond) +{ + int res= 0; + DBUG_ENTER("fill_schema_session_variables"); + + pthread_mutex_lock(&LOCK_global_system_variables); + res= show_status_array(thd, "", init_vars, OPT_SESSION, + NULL, "", tables->table, 1); + pthread_mutex_unlock(&LOCK_global_system_variables); + + DBUG_RETURN(res); +} + ST_FIELD_INFO schema_fields_info[]= { {"CATALOG_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, @@ -5524,6 +5717,22 @@ ST_FIELD_INFO variables_fields_info[]= }; +ST_FIELD_INFO status_fields_info[]= +{ + {"VARIABLE_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Variable_name"}, + {"VARIABLE_VALUE", 2207, MYSQL_TYPE_DECIMAL, 0, 0, "Value"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO system_variables_fields_info[]= +{ + {"VARIABLE_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Variable_name"}, + {"VARIABLE_VALUE", 65535, MYSQL_TYPE_STRING, 0, 1, "Value"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + ST_FIELD_INFO processlist_fields_info[]= { {"ID", 4, MYSQL_TYPE_LONG, 0, 0, "Id"}, @@ -5636,6 +5845,10 @@ ST_SCHEMA_TABLE schema_tables[]= fill_schema_events, make_old_format, 0, -1, -1, 0}, {"FILES", files_fields_info, create_schema_table, fill_schema_files, 0, 0, -1, -1, 0}, + {"GLOBAL_STATUS", status_fields_info, create_schema_table, + fill_schema_global_status, make_old_format, 0, -1, -1, 0}, + {"GLOBAL_VARIABLES", system_variables_fields_info, create_schema_table, + fill_schema_global_variables, make_old_format, 0, -1, -1, 0}, {"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table, get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0}, {"OPEN_TABLES", open_tables_fields_info, create_schema_table, @@ -5655,6 +5868,10 @@ ST_SCHEMA_TABLE schema_tables[]= fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0}, {"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table, fill_schema_schema_privileges, 0, 0, -1, -1, 0}, + {"SESSION_STATUS", status_fields_info, create_schema_table, + fill_schema_session_status, make_old_format, 0, -1, -1, 0}, + {"SESSION_VARIABLES", system_variables_fields_info, create_schema_table, + fill_schema_session_variables, make_old_format, 0, -1, -1, 0}, {"STATISTICS", stat_fields_info, create_schema_table, get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0}, {"STATUS", variables_fields_info, create_schema_table, fill_status, diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index b1a1e9cb9af..bdb05621a60 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -8782,6 +8782,9 @@ load_data: FROM MASTER_SYM { Lex->sql_command = SQLCOM_LOAD_MASTER_DATA; + WARN_DEPRECATED(yythd, "5.2", "LOAD DATA FROM MASTER", + "mysqldump or future " + "BACKUP/RESTORE DATABASE facility"); }; opt_local: diff --git a/sql/table.h b/sql/table.h index 3fb7222cb0d..c490c283b72 100644 --- a/sql/table.h +++ b/sql/table.h @@ -474,6 +474,8 @@ enum enum_schema_tables SCH_ENGINES, SCH_EVENTS, SCH_FILES, + SCH_GLOBAL_STATUS, + SCH_GLOBAL_VARIABLES, SCH_KEY_COLUMN_USAGE, SCH_OPEN_TABLES, SCH_PARTITIONS, @@ -483,6 +485,8 @@ enum enum_schema_tables SCH_PROCEDURES, SCH_SCHEMATA, SCH_SCHEMA_PRIVILEGES, + SCH_SESSION_STATUS, + SCH_SESSION_VARIABLES, SCH_STATISTICS, SCH_STATUS, SCH_TABLES, |