diff options
author | rafal@quant.(none) <> | 2007-08-27 14:01:19 +0200 |
---|---|---|
committer | rafal@quant.(none) <> | 2007-08-27 14:01:19 +0200 |
commit | fedc0a3ca8d13f49ed70f6537bd3c9b70f6a4998 (patch) | |
tree | 4a3c54f9e841d93b2ba708aa041f2081682b5f7b /sql | |
parent | 3500810898ea7680923122128fc55c76cac3a2b9 (diff) | |
parent | f8b64e17f94923ef421469d648c31c0f06d2cf96 (diff) | |
download | mariadb-git-fedc0a3ca8d13f49ed70f6537bd3c9b70f6a4998.tar.gz |
Merge quant.(none):/ext/mysql/bk/mysql-5.1-bug21842-5.1.22
into quant.(none):/ext/mysql/bk/mysql-5.1-bug21842-rpl
Diffstat (limited to 'sql')
59 files changed, 2529 insertions, 1081 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 1400c1487d0..a0c00e868e5 100755 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -28,9 +28,9 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ) SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc - ${CMAKE_SOURCE_DIR}/sql/message.h - ${CMAKE_SOURCE_DIR}/sql/sql_yacc.h - ${CMAKE_SOURCE_DIR}/sql/sql_yacc.cc + ${CMAKE_SOURCE_DIR}/sql/message.h + ${CMAKE_SOURCE_DIR}/sql/sql_yacc.h + ${CMAKE_SOURCE_DIR}/sql/sql_yacc.cc ${CMAKE_SOURCE_DIR}/include/mysql_version.h ${CMAKE_SOURCE_DIR}/sql/sql_builtin.cc ${CMAKE_SOURCE_DIR}/sql/lex_hash.h @@ -41,8 +41,8 @@ SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc ADD_DEFINITIONS(-DMYSQL_SERVER -D_CONSOLE -DHAVE_DLOPEN) -ADD_EXECUTABLE(mysqld${MYSQLD_EXE_SUFFIX} - ../sql-common/client.c derror.cc des_key_file.cc +ADD_EXECUTABLE(mysqld + ../sql-common/client.c derror.cc des_key_file.cc discover.cc ../libmysql/errmsg.c field.cc field_conv.cc filesort.cc gstream.cc ha_partition.cc @@ -74,74 +74,91 @@ ADD_EXECUTABLE(mysqld${MYSQLD_EXE_SUFFIX} sql_tablespace.cc events.cc ../sql-common/my_user.c partition_info.cc rpl_utility.cc rpl_injector.cc sql_locale.cc rpl_rli.cc rpl_mi.cc sql_servers.cc - sql_connect.cc scheduler.cc + sql_connect.cc scheduler.cc ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc - ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h - ${PROJECT_SOURCE_DIR}/include/mysqld_error.h - ${PROJECT_SOURCE_DIR}/include/mysqld_ername.h - ${PROJECT_SOURCE_DIR}/include/sql_state.h - ${PROJECT_SOURCE_DIR}/include/mysql_version.h - ${PROJECT_SOURCE_DIR}/sql/sql_builtin.cc - ${PROJECT_SOURCE_DIR}/sql/lex_hash.h) -TARGET_LINK_LIBRARIES(mysqld${MYSQLD_EXE_SUFFIX} - heap myisam myisammrg mysys yassl zlib debug dbug yassl + ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h + ${PROJECT_SOURCE_DIR}/include/mysqld_error.h + ${PROJECT_SOURCE_DIR}/include/mysqld_ername.h + ${PROJECT_SOURCE_DIR}/include/sql_state.h + ${PROJECT_SOURCE_DIR}/include/mysql_version.h + ${PROJECT_SOURCE_DIR}/sql/sql_builtin.cc + ${PROJECT_SOURCE_DIR}/sql/lex_hash.h) + +TARGET_LINK_LIBRARIES(mysqld + heap myisam myisammrg mysys yassl zlib debug dbug yassl taocrypt strings vio regex wsock32 ws2_32) +SET_TARGET_PROPERTIES(mysqld PROPERTIES OUTPUT_NAME mysqld${MYSQLD_EXE_SUFFIX}) + +# Work around for 2.4.6 bug, OUTPUT_NAME will not set the right .PDB +# file name. Note that COMPILE_FLAGS set some temporary pdb during build, +# LINK_FLAGS sets the real one. +SET_TARGET_PROPERTIES(mysqld PROPERTIES + COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/mysqld${MYSQLD_EXE_SUFFIX}.pdb" + LINK_FLAGS "/PDB:${CMAKE_CFG_INTDIR}/mysqld${MYSQLD_EXE_SUFFIX}.pdb") + IF(EMBED_MANIFESTS) MYSQL_EMBED_MANIFEST("mysqld" "asInvoker") ENDIF(EMBED_MANIFESTS) IF(WITH_ARCHIVE_STORAGE_ENGINE) - TARGET_LINK_LIBRARIES(mysqld${MYSQLD_EXE_SUFFIX} archive) + TARGET_LINK_LIBRARIES(mysqld archive) ENDIF(WITH_ARCHIVE_STORAGE_ENGINE) IF(WITH_BLACKHOLE_STORAGE_ENGINE) - TARGET_LINK_LIBRARIES(mysqld${MYSQLD_EXE_SUFFIX} blackhole) + TARGET_LINK_LIBRARIES(mysqld blackhole) ENDIF(WITH_BLACKHOLE_STORAGE_ENGINE) IF(WITH_CSV_STORAGE_ENGINE) - TARGET_LINK_LIBRARIES(mysqld${MYSQLD_EXE_SUFFIX} csv) + TARGET_LINK_LIBRARIES(mysqld csv) ENDIF(WITH_CSV_STORAGE_ENGINE) IF(WITH_EXAMPLE_STORAGE_ENGINE) - TARGET_LINK_LIBRARIES(mysqld${MYSQLD_EXE_SUFFIX} example) + TARGET_LINK_LIBRARIES(mysqld example) ENDIF(WITH_EXAMPLE_STORAGE_ENGINE) IF(WITH_FEDERATED_STORAGE_ENGINE) - TARGET_LINK_LIBRARIES(mysqld${MYSQLD_EXE_SUFFIX} federated) + TARGET_LINK_LIBRARIES(mysqld federated) ENDIF(WITH_FEDERATED_STORAGE_ENGINE) IF(WITH_INNOBASE_STORAGE_ENGINE) - TARGET_LINK_LIBRARIES(mysqld${MYSQLD_EXE_SUFFIX} innobase) + TARGET_LINK_LIBRARIES(mysqld innobase) ENDIF(WITH_INNOBASE_STORAGE_ENGINE) -ADD_DEPENDENCIES(mysqld${MYSQLD_EXE_SUFFIX} GenError) +ADD_DEPENDENCIES(mysqld GenError) + +# NOTE CMake 2.4.6 creates strange dependencies between files in OUTPUT, +# so for now we only list one if more than one # Sql Parser custom command ADD_CUSTOM_COMMAND( - SOURCE ${PROJECT_SOURCE_DIR}/sql/sql_yacc.yy - OUTPUT ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc - COMMAND bison.exe ARGS -y -p MYSQL --defines=sql_yacc.h - --output=sql_yacc.cc sql_yacc.yy - DEPENDS ${PROJECT_SOURCE_DIR}/sql/sql_yacc.yy) + OUTPUT ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h +# ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc + COMMAND bison.exe ARGS -y -p MYSQL --defines=sql_yacc.h + --output=sql_yacc.cc sql_yacc.yy + DEPENDS ${PROJECT_SOURCE_DIR}/sql/sql_yacc.yy) -ADD_CUSTOM_COMMAND( - OUTPUT ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h - COMMAND echo - DEPENDS ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc -) - -# Windows message file -ADD_CUSTOM_COMMAND( - SOURCE message.mc - OUTPUT message.rc message.h - COMMAND mc ARGS message.mc - DEPENDS message.mc) +IF(WIN32) + # Windows message file + ADD_CUSTOM_COMMAND( + OUTPUT ${PROJECT_SOURCE_DIR}/sql/message.h +# ${PROJECT_SOURCE_DIR}/sql/message.rc + COMMAND mc ARGS message.mc + DEPENDS ${PROJECT_SOURCE_DIR}/sql/message.mc) +ENDIF(WIN32) # Gen_lex_hash ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc) -TARGET_LINK_LIBRARIES(gen_lex_hash dbug mysqlclient wsock32) +TARGET_LINK_LIBRARIES(gen_lex_hash debug dbug mysqlclient wsock32) GET_TARGET_PROPERTY(GEN_LEX_HASH_EXE gen_lex_hash LOCATION) ADD_CUSTOM_COMMAND( - OUTPUT ${PROJECT_SOURCE_DIR}/sql/lex_hash.h - COMMAND ${GEN_LEX_HASH_EXE} ARGS > lex_hash.h - DEPENDS ${GEN_LEX_HASH_EXE} -) -ADD_DEPENDENCIES(mysqld${MYSQLD_EXE_SUFFIX} gen_lex_hash) + OUTPUT ${PROJECT_SOURCE_DIR}/sql/lex_hash.h + COMMAND ${GEN_LEX_HASH_EXE} ARGS > lex_hash.h + DEPENDS ${GEN_LEX_HASH_EXE}) + +ADD_CUSTOM_TARGET( + GenServerSource ALL + DEPENDS ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h +# ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc + ${PROJECT_SOURCE_DIR}/sql/message.h +# ${PROJECT_SOURCE_DIR}/sql/message.rc + ${PROJECT_SOURCE_DIR}/sql/lex_hash.h) + +ADD_DEPENDENCIES(mysqld GenServerSource) # Remove the auto-generated files as part of 'Clean Solution' SET_DIRECTORY_PROPERTIES(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index b7bcb6344fd..48477b1ba46 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -390,8 +390,9 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, } key_copy(key_buf, event_table->record[0], key_info, key_len); - if (!(ret= event_table->file->index_read(event_table->record[0], key_buf, - (key_part_map)1, HA_READ_PREFIX))) + if (!(ret= event_table->file->index_read_map(event_table->record[0], key_buf, + (key_part_map)1, + HA_READ_PREFIX))) { DBUG_PRINT("info",("Found rows. Let's retrieve them. ret=%d", ret)); do @@ -839,8 +840,8 @@ Event_db_repository::find_named_event(LEX_STRING db, LEX_STRING name, key_copy(key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, key, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (table->file->index_read_idx_map(table->record[0], 0, key, HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { DBUG_PRINT("info", ("Row not found")); DBUG_RETURN(TRUE); diff --git a/sql/event_queue.cc b/sql/event_queue.cc index 95f207844fc..e611432c6a2 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -52,8 +52,9 @@ execute_at.second_part is not considered during comparison */ -static int -event_queue_element_compare_q(void *vptr, uchar* a, uchar *b) +extern "C" int event_queue_element_compare_q(void *, uchar *, uchar *); + +int event_queue_element_compare_q(void *vptr, uchar* a, uchar *b) { my_time_t lhs = ((Event_queue_element *)a)->execute_at; my_time_t rhs = ((Event_queue_element *)b)->execute_at; diff --git a/sql/field.cc b/sql/field.cc index e3333c14573..1b01d626512 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -6298,12 +6298,12 @@ uint Field::is_equal(Create_field *new_field) /* If one of the fields is binary and the other one isn't return 1 else 0 */ -bool Field_str::compare_str_field_flags(Create_field *new_field, uint32 flags) +bool Field_str::compare_str_field_flags(Create_field *new_field, uint32 flag_arg) { return (((new_field->flags & (BINCMP_FLAG | BINARY_FLAG)) && - !(flags & (BINCMP_FLAG | BINARY_FLAG))) || + !(flag_arg & (BINCMP_FLAG | BINARY_FLAG))) || (!(new_field->flags & (BINCMP_FLAG | BINARY_FLAG)) && - (flags & (BINCMP_FLAG | BINARY_FLAG)))); + (flag_arg & (BINCMP_FLAG | BINARY_FLAG)))); } diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index d46b3a3bb08..6521be1cb9a 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1125,7 +1125,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, 0 Success */ -int ha_partition::prepare_new_partition(TABLE *table, +int ha_partition::prepare_new_partition(TABLE *tbl, HA_CREATE_INFO *create_info, handler *file, const char *part_name, partition_element *p_elem) @@ -1134,13 +1134,13 @@ int ha_partition::prepare_new_partition(TABLE *table, bool create_flag= FALSE; DBUG_ENTER("prepare_new_partition"); - if ((error= set_up_table_before_create(table, part_name, create_info, + if ((error= set_up_table_before_create(tbl, part_name, create_info, 0, p_elem))) goto error; - if ((error= file->create(part_name, table, create_info))) + if ((error= file->create(part_name, tbl, create_info))) goto error; create_flag= TRUE; - if ((error= file->ha_open(table, part_name, m_mode, m_open_test_lock))) + if ((error= file->ha_open(tbl, part_name, m_mode, m_open_test_lock))) goto error; /* Note: if you plan to add another call that may return failure, @@ -1776,7 +1776,7 @@ partition_element *ha_partition::find_partition_element(uint part_id) 4) Data file name on partition */ -int ha_partition::set_up_table_before_create(TABLE *table, +int ha_partition::set_up_table_before_create(TABLE *tbl, const char *partition_name_with_path, HA_CREATE_INFO *info, uint part_id, @@ -1793,8 +1793,8 @@ int ha_partition::set_up_table_before_create(TABLE *table, if (!part_elem) DBUG_RETURN(1); // Fatal error } - table->s->max_rows= part_elem->part_max_rows; - table->s->min_rows= part_elem->part_min_rows; + tbl->s->max_rows= part_elem->part_max_rows; + tbl->s->min_rows= part_elem->part_min_rows; partition_name= strrchr(partition_name_with_path, FN_LIBCHAR); if ((part_elem->index_file_name && (error= append_file_to_dir(thd, @@ -3400,11 +3400,11 @@ int ha_partition::index_end() used in conjuntion with multi read ranges. */ -int ha_partition::index_read(uchar * buf, const uchar * key, - key_part_map keypart_map, - enum ha_rkey_function find_flag) +int ha_partition::index_read_map(uchar *buf, const uchar *key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { - DBUG_ENTER("ha_partition::index_read"); + DBUG_ENTER("ha_partition::index_read_map"); end_range= 0; m_index_scan_type= partition_index_read; @@ -3566,7 +3566,7 @@ int ha_partition::common_first_last(uchar *buf) index_read_last() buf Read row in MySQL Row Format key Key - keylen Length of key + keypart_map Which part of key is used RETURN VALUE >0 Error code @@ -3577,8 +3577,8 @@ int ha_partition::common_first_last(uchar *buf) Can only be used on indexes supporting HA_READ_ORDER */ -int ha_partition::index_read_last(uchar *buf, const uchar *key, - key_part_map keypart_map) +int ha_partition::index_read_last_map(uchar *buf, const uchar *key, + key_part_map keypart_map) { DBUG_ENTER("ha_partition::index_read_last"); @@ -3929,9 +3929,9 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf) switch (m_index_scan_type) { case partition_index_read: DBUG_PRINT("info", ("index_read on partition %d", i)); - error= file->index_read(buf, m_start_key.key, - m_start_key.keypart_map, - m_start_key.flag); + error= file->index_read_map(buf, m_start_key.key, + m_start_key.keypart_map, + m_start_key.flag); break; case partition_index_first: DBUG_PRINT("info", ("index_first on partition %d", i)); @@ -4020,10 +4020,10 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) switch (m_index_scan_type) { case partition_index_read: - error= file->index_read(rec_buf_ptr, - m_start_key.key, - m_start_key.keypart_map, - m_start_key.flag); + error= file->index_read_map(rec_buf_ptr, + m_start_key.key, + m_start_key.keypart_map, + m_start_key.flag); break; case partition_index_first: error= file->index_first(rec_buf_ptr); @@ -4034,9 +4034,9 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) reverse_order= TRUE; break; case partition_index_read_last: - error= file->index_read_last(rec_buf_ptr, - m_start_key.key, - m_start_key.keypart_map); + error= file->index_read_last_map(rec_buf_ptr, + m_start_key.key, + m_start_key.keypart_map); reverse_order= TRUE; break; default: diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 895f001fa6a..555f7a72740 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -387,9 +387,9 @@ public: index_init initializes an index before using it and index_end does any end processing needed. */ - virtual int index_read(uchar * buf, const uchar * key, - key_part_map keypart_map, - enum ha_rkey_function find_flag); + virtual int index_read_map(uchar * buf, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); virtual int index_init(uint idx, bool sorted); virtual int index_end(); @@ -402,8 +402,8 @@ public: virtual int index_first(uchar * buf); virtual int index_last(uchar * buf); virtual int index_next_same(uchar * buf, const uchar * key, uint keylen); - virtual int index_read_last(uchar * buf, const uchar * key, - key_part_map keypart_map); + virtual int index_read_last_map(uchar * buf, const uchar * key, + key_part_map keypart_map); /* read_first_row is virtual method but is only implemented by diff --git a/sql/handler.cc b/sql/handler.cc index 3e9524e9821..dbe7f6727f7 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -25,8 +25,6 @@ #include "mysql_priv.h" #include "rpl_filter.h" - - #include <myisampack.h> #include <errno.h> @@ -64,7 +62,7 @@ static const LEX_STRING sys_table_aliases[]= }; const char *ha_row_type[] = { - "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", "?","?","?" + "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", "PAGE", "?","?","?" }; const char *tx_isolation_names[] = @@ -1993,9 +1991,9 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, key_copy(key, table->record[0], table->key_info + table->s->next_number_index, table->s->next_number_key_offset); - error= index_read(table->record[1], key, - make_prev_keypart_map(table->s->next_number_keypart), - HA_READ_PREFIX_LAST); + error= index_read_map(table->record[1], key, + make_prev_keypart_map(table->s->next_number_keypart), + HA_READ_PREFIX_LAST); /* MySQL needs to call us for next row: assume we are inserting ("a",null) here, we return 3, and next this statement will want to insert @@ -3218,10 +3216,10 @@ int handler::read_range_first(const key_range *start_key, if (!start_key) // Read first record result= index_first(table->record[0]); else - result= index_read(table->record[0], - start_key->key, - start_key->keypart_map, - start_key->flag); + result= index_read_map(table->record[0], + start_key->key, + start_key->keypart_map, + start_key->flag); if (result) DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) ? HA_ERR_END_OF_FILE @@ -3293,15 +3291,15 @@ int handler::compare_key(key_range *range) } -int handler::index_read_idx(uchar * buf, uint index, const uchar * key, - key_part_map keypart_map, - enum ha_rkey_function find_flag) +int handler::index_read_idx_map(uchar * buf, uint index, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) { int error, error1; error= index_init(index, 0); if (!error) { - error= index_read(buf, key, keypart_map, find_flag); + error= index_read_map(buf, key, keypart_map, find_flag); error1= index_end(); } return error ? error : error1; diff --git a/sql/handler.h b/sql/handler.h index 60e936bef5b..557b9fd7887 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -278,7 +278,7 @@ enum legacy_db_type enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED, ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED, - ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, ROW_TYPE_PAGES }; + ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, ROW_TYPE_PAGE }; enum enum_binlog_func { BFN_RESET_LOGS= 1, @@ -321,6 +321,7 @@ enum enum_binlog_command { #define HA_CREATE_USED_PASSWORD (1L << 17) #define HA_CREATE_USED_CONNECTION (1L << 18) #define HA_CREATE_USED_KEY_BLOCK_SIZE (1L << 19) +#define HA_CREATE_USED_TRANSACTIONAL (1L << 20) typedef ulonglong my_xid; // this line is the same as in log_event.h #define MYSQL_XID_PREFIX "MySQLXid" @@ -788,6 +789,7 @@ class partition_info; struct st_partition_iter; #define NOT_A_PARTITION_ID ((uint32)-1) +enum ha_choice { HA_CHOICE_UNDEF, HA_CHOICE_NO, HA_CHOICE_YES }; typedef struct st_ha_create_information { @@ -810,6 +812,8 @@ typedef struct st_ha_create_information uint options; /* OR of HA_CREATE_ options */ uint merge_insert_method; uint extra_size; /* length of extra data segment */ + /* 0 not used, 1 if not transactional, 2 if transactional */ + enum ha_choice transactional; bool table_existed; /* 1 in create if table existed */ bool frm_only; /* 1 if no ha_create_table() */ bool varchar; /* 1 if table has a VARCHAR */ @@ -1277,27 +1281,28 @@ public: enum ha_rkey_function find_flag) { return HA_ERR_WRONG_COMMAND; } public: -/** - @brief - Positions an index cursor to the index specified in the handle. Fetches the - row if available. If the key value is null, begin at the first key of the - index. -*/ - virtual int index_read(uchar * buf, const uchar * key, key_part_map keypart_map, - enum ha_rkey_function find_flag) - { - uint key_len= calculate_key_len(table, active_index, key, keypart_map); - return index_read(buf, key, key_len, find_flag); - } -/** - @brief - Positions an index cursor to the index specified in the handle. Fetches the - row if available. If the key value is null, begin at the first key of the - index. -*/ - virtual int index_read_idx(uchar * buf, uint index, const uchar * key, + /** + @brief + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. + */ + virtual int index_read_map(uchar * buf, const uchar * key, key_part_map keypart_map, - enum ha_rkey_function find_flag); + enum ha_rkey_function find_flag) + { + uint key_len= calculate_key_len(table, active_index, key, keypart_map); + return index_read(buf, key, key_len, find_flag); + } + /** + @brief + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. + */ + virtual int index_read_idx_map(uchar * buf, uint index, const uchar * key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); virtual int index_next(uchar * buf) { return HA_ERR_WRONG_COMMAND; } virtual int index_prev(uchar * buf) @@ -1311,17 +1316,17 @@ public: virtual int index_read_last(uchar * buf, const uchar * key, uint key_len) { return (my_errno=HA_ERR_WRONG_COMMAND); } public: -/** - @brief - The following functions works like index_read, but it find the last - row with the current key value or prefix. -*/ - virtual int index_read_last(uchar * buf, const uchar * key, - key_part_map keypart_map) - { - uint key_len= calculate_key_len(table, active_index, key, keypart_map); - return index_read_last(buf, key, key_len); - } + /** + @brief + The following functions works like index_read, but it find the last + row with the current key value or prefix. + */ + virtual int index_read_last_map(uchar * buf, const uchar * key, + key_part_map keypart_map) + { + uint key_len= calculate_key_len(table, active_index, key, keypart_map); + return index_read_last(buf, key, key_len); + } virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p, KEY_MULTI_RANGE *ranges, uint range_count, bool sorted, HANDLER_BUFFER *buffer); @@ -1864,7 +1869,7 @@ int ha_find_files(THD *thd,const char *db,const char *path, int ha_table_exists_in_engine(THD* thd, const char* db, const char* name); /* key cache */ -int ha_init_key_cache(const char *name, KEY_CACHE *key_cache); +extern "C" int ha_init_key_cache(const char *name, KEY_CACHE *key_cache); int ha_resize_key_cache(KEY_CACHE *key_cache); int ha_change_key_cache_param(KEY_CACHE *key_cache); int ha_change_key_cache(KEY_CACHE *old_key_cache, KEY_CACHE *new_key_cache); diff --git a/sql/item.cc b/sql/item.cc index 76aad708d22..2e68e96d518 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -3869,15 +3869,15 @@ bool Item_field::fix_fields(THD *thd, Item **reference) if ((*res)->type() == Item::FIELD_ITEM) { /* - It's an Item_field referencing another Item_field in the select - list. - use the field from the Item_field in the select list and leave - the Item_field instance in place. + It's an Item_field referencing another Item_field in the select + list. + Use the field from the Item_field in the select list and leave + the Item_field instance in place. */ - Field *field= (*((Item_field**)res))->field; + Field *new_field= (*((Item_field**)res))->field; - if (field == NULL) + if (new_field == NULL) { /* The column to which we link isn't valid. */ my_error(ER_BAD_FIELD_ERROR, MYF(0), (*res)->name, @@ -3885,7 +3885,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) return(1); } - set_field(field); + set_field(new_field); return 0; } else diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 755e711e383..8f5ff050dd6 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -3017,11 +3017,11 @@ uchar *in_longlong::get_value(Item *item) void in_datetime::set(uint pos,Item *item) { - Item **tmp= &item; + Item **tmp_item= &item; bool is_null; struct packed_longlong *buff= &((packed_longlong*) base)[pos]; - buff->val= get_datetime_value(thd, &tmp, 0, warn_item, &is_null); + buff->val= get_datetime_value(thd, &tmp_item, 0, warn_item, &is_null); buff->unsigned_flag= 1L; } diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 47914c59b4c..35ecfdb0f2c 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1158,7 +1158,6 @@ public: class Item_func_in :public Item_func_opt_neg { public: - Item_result cmp_type; /* an array of values when the right hand arguments of IN are all SQL constant and there are no nulls diff --git a/sql/item_func.cc b/sql/item_func.cc index 83b27de29b7..8768a71b7ae 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -4375,11 +4375,11 @@ int Item_func_set_user_var::save_in_field(Field *field, bool no_conversions, else if (result_type() == DECIMAL_RESULT) { my_decimal decimal_value; - my_decimal *value= entry->val_decimal(&null_value, &decimal_value); + my_decimal *val= entry->val_decimal(&null_value, &decimal_value); if (null_value) return set_field_to_null(field); field->set_notnull(); - error=field->store_decimal(value); + error=field->store_decimal(val); } else { diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 8ac812ec34a..d1c8e7a37e8 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1049,6 +1049,23 @@ String *Item_str_conv::val_str(String *str) } +void Item_func_lcase::fix_length_and_dec() +{ + collation.set(args[0]->collation); + multiply= collation.collation->casedn_multiply; + converter= collation.collation->cset->casedn; + max_length= args[0]->max_length * multiply; +} + +void Item_func_ucase::fix_length_and_dec() +{ + collation.set(args[0]->collation); + multiply= collation.collation->caseup_multiply; + converter= collation.collation->cset->caseup; + max_length= args[0]->max_length * multiply; +} + + String *Item_func_left::val_str(String *str) { DBUG_ASSERT(fixed == 1); diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index ea9517976a8..155f8457156 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -145,8 +145,7 @@ class Item_str_conv :public Item_str_func { protected: uint multiply; - size_t (*converter)(CHARSET_INFO *cs, char *src, size_t srclen, - char *dst, size_t dstlen); + my_charset_conv_case converter; String tmp_value; public: Item_str_conv(Item *item) :Item_str_func(item) {} @@ -159,13 +158,7 @@ class Item_func_lcase :public Item_str_conv public: Item_func_lcase(Item *item) :Item_str_conv(item) {} const char *func_name() const { return "lcase"; } - void fix_length_and_dec() - { - collation.set(args[0]->collation); - multiply= collation.collation->casedn_multiply; - converter= collation.collation->cset->casedn; - max_length= args[0]->max_length * multiply; - } + void fix_length_and_dec(); }; class Item_func_ucase :public Item_str_conv @@ -173,13 +166,7 @@ class Item_func_ucase :public Item_str_conv public: Item_func_ucase(Item *item) :Item_str_conv(item) {} const char *func_name() const { return "ucase"; } - void fix_length_and_dec() - { - collation.set(args[0]->collation); - multiply= collation.collation->caseup_multiply; - converter= collation.collation->cset->caseup; - max_length= args[0]->max_length * multiply; - } + void fix_length_and_dec(); }; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 1d042860d73..a253397e52c 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -2129,10 +2129,10 @@ int subselect_uniquesubquery_engine::exec() if (!table->file->inited) table->file->ha_index_init(tab->ref.key, 0); - error= table->file->index_read(table->record[0], - tab->ref.key_buff, - make_prev_keypart_map(tab->ref.key_parts), - HA_READ_KEY_EXACT); + error= table->file->index_read_map(table->record[0], + tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) error= report_error(table, error); @@ -2239,10 +2239,10 @@ int subselect_indexsubquery_engine::exec() if (!table->file->inited) table->file->ha_index_init(tab->ref.key, 1); - error= table->file->index_read(table->record[0], - tab->ref.key_buff, - make_prev_keypart_map(tab->ref.key_parts), - HA_READ_KEY_EXACT); + error= table->file->index_read_map(table->record[0], + tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) error= report_error(table, error); diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 067af930d6f..15be9c97b6e 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -570,13 +570,13 @@ String *Item_nodeset_func_rootelement::val_nodeset(String *nodeset) String * Item_nodeset_func_union::val_nodeset(String *nodeset) { - uint numnodes= pxml->length() / sizeof(MY_XML_NODE); + uint num_nodes= pxml->length() / sizeof(MY_XML_NODE); String set0, *s0= args[0]->val_nodeset(&set0); String set1, *s1= args[1]->val_nodeset(&set1); String both_str; - both_str.alloc(numnodes); + both_str.alloc(num_nodes); char *both= (char*) both_str.ptr(); - bzero((void*)both, numnodes); + bzero((void*)both, num_nodes); MY_XPATH_FLT *flt; fltbeg= (MY_XPATH_FLT*) s0->ptr(); @@ -590,7 +590,7 @@ String * Item_nodeset_func_union::val_nodeset(String *nodeset) both[flt->num]= 1; nodeset->length(0); - for (uint i= 0, pos= 0; i < numnodes; i++) + for (uint i= 0, pos= 0; i < num_nodes; i++) { if (both[i]) ((XPathFilter*)nodeset)->append_element(i, pos++); @@ -2656,7 +2656,9 @@ static uint xml_parent_tag(MY_XML_NODE *items, uint nitems, uint level) RETURN Currently only MY_XML_OK */ -static int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len) +extern "C" int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len); + +int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len) { MY_XML_USER_DATA *data= (MY_XML_USER_DATA*)st->user_data; MY_XML_NODE *nodes= (MY_XML_NODE*) data->pxml->ptr(); @@ -2687,7 +2689,9 @@ static int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len) RETURN Currently only MY_XML_OK */ -static int xml_value(MY_XML_PARSER *st,const char *attr, size_t len) +extern "C" int xml_value(MY_XML_PARSER *st,const char *attr, size_t len); + +int xml_value(MY_XML_PARSER *st,const char *attr, size_t len) { MY_XML_USER_DATA *data= (MY_XML_USER_DATA*)st->user_data; MY_XML_NODE *nodes= (MY_XML_NODE*) data->pxml->ptr(); @@ -2717,7 +2721,9 @@ static int xml_value(MY_XML_PARSER *st,const char *attr, size_t len) RETURN Currently only MY_XML_OK */ -static int xml_leave(MY_XML_PARSER *st,const char *attr, size_t len) +extern "C" int xml_leave(MY_XML_PARSER *st,const char *attr, size_t len); + +int xml_leave(MY_XML_PARSER *st,const char *attr, size_t len) { MY_XML_USER_DATA *data= (MY_XML_USER_DATA*)st->user_data; DBUG_ASSERT(data->level > 0); diff --git a/sql/key.cc b/sql/key.cc index fee06ec058f..e8354ed2635 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -480,6 +480,7 @@ int key_cmp(KEY_PART_INFO *key_part, const uchar *key, uint key_length) rec0 Pointer to table->record[0] first_rec Pointer to record compare with second_rec Pointer to record compare against first_rec + DESCRIPTION This method is set-up such that it can be called directly from the priority queue and it is attempted to be optimised as much as possible diff --git a/sql/log.cc b/sql/log.cc index 3686459fedf..74a210a35b3 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1934,7 +1934,7 @@ bool MYSQL_QUERY_LOG::write(time_t event_time, const char *user_host, { char buff[32]; uint length= 0; - char time_buff[MAX_TIME_SIZE]; + char local_time_buff[MAX_TIME_SIZE]; struct tm start; uint time_buff_len= 0; @@ -1950,13 +1950,13 @@ bool MYSQL_QUERY_LOG::write(time_t event_time, const char *user_host, localtime_r(&event_time, &start); - time_buff_len= my_snprintf(time_buff, MAX_TIME_SIZE, + time_buff_len= my_snprintf(local_time_buff, MAX_TIME_SIZE, "%02d%02d%02d %2d:%02d:%02d", start.tm_year % 100, start.tm_mon + 1, start.tm_mday, start.tm_hour, start.tm_min, start.tm_sec); - if (my_b_write(&log_file, (uchar*) &time_buff, time_buff_len)) + if (my_b_write(&log_file, (uchar*) local_time_buff, time_buff_len)) goto err; } else @@ -2346,8 +2346,7 @@ bool MYSQL_BIN_LOG::open(const char *log_name, s.flags|= LOG_EVENT_BINLOG_IN_USE_F; if (!s.is_valid()) goto err; - if (null_created_arg) - s.created= 0; + s.dont_set_created= null_created_arg; if (s.write(&log_file)) goto err; bytes_written+= s.data_written; diff --git a/sql/log_event.cc b/sql/log_event.cc index fecc35c95b9..2142aa0b54e 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -456,7 +456,11 @@ Log_event::Log_event() thd(0) { server_id= ::server_id; - when= my_time(0); + /* + We can't call my_time() here as this would cause a call before + my_init() is called + */ + when= 0; log_pos= 0; } #endif /* !MYSQL_CLIENT */ @@ -637,6 +641,7 @@ void Log_event::init_show_field_list(List<Item>* field_list) bool Log_event::write_header(IO_CACHE* file, ulong event_data_length) { uchar header[LOG_EVENT_HEADER_LEN]; + ulong now; DBUG_ENTER("Log_event::write_header"); /* Store number of bytes that will be written by this event */ @@ -687,6 +692,8 @@ bool Log_event::write_header(IO_CACHE* file, ulong event_data_length) log_pos= my_b_safe_tell(file)+data_written; } + now= (ulong) get_time(); // Query start time + /* Header will be of size LOG_EVENT_HEADER_LEN for all events, except for FORMAT_DESCRIPTION_EVENT and ROTATE_EVENT, where it will be @@ -694,7 +701,7 @@ bool Log_event::write_header(IO_CACHE* file, ulong event_data_length) because we read them before knowing the format). */ - int4store(header, (ulong) when); // timestamp + int4store(header, now); // timestamp header[EVENT_TYPE_OFFSET]= get_type_code(); int4store(header+ SERVER_ID_OFFSET, server_id); int4store(header+ EVENT_LEN_OFFSET, data_written); @@ -803,10 +810,12 @@ end: #ifndef MYSQL_CLIENT Log_event* Log_event::read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock, - const Format_description_log_event *description_event) + const Format_description_log_event + *description_event) #else Log_event* Log_event::read_log_event(IO_CACHE* file, - const Format_description_log_event *description_event) + const Format_description_log_event + *description_event) #endif { DBUG_ENTER("Log_event::read_log_event"); @@ -1461,7 +1470,7 @@ Query_log_event::Query_log_event() /* SYNOPSIS Query_log_event::Query_log_event() - thd - thread handle + thd_arg - thread handle query_arg - array of char representing the query query_length - size of the `query_arg' array using_trans - there is a modified transactional table @@ -1477,10 +1486,12 @@ Query_log_event::Query_log_event() */ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length, bool using_trans, - bool suppress_use, THD::killed_state killed_status_arg) + bool suppress_use, + THD::killed_state killed_status_arg) :Log_event(thd_arg, - (thd_arg->thread_specific_used ? LOG_EVENT_THREAD_SPECIFIC_F : 0) | - (suppress_use ? LOG_EVENT_SUPPRESS_USE_F : 0), + (thd_arg->thread_specific_used ? LOG_EVENT_THREAD_SPECIFIC_F : + 0) | + (suppress_use ? LOG_EVENT_SUPPRESS_USE_F : 0), using_trans), data_buf(0), query(query_arg), catalog(thd_arg->catalog), db(thd_arg->db), q_len((uint32) query_length), @@ -1501,10 +1512,10 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, error_code= (killed_status_arg == THD::NOT_KILLED) ? thd_arg->net.last_errno : ((thd_arg->system_thread & SYSTEM_THREAD_DELAYED_INSERT) ? 0 : - thd->killed_errno()); + thd_arg->killed_errno()); time(&end_time); - exec_time = (ulong) (end_time - thd->start_time); + exec_time = (ulong) (end_time - thd_arg->start_time); catalog_len = (catalog) ? (uint32) strlen(catalog) : 0; /* status_vars_len is set just before writing the event */ db_len = (db) ? (uint32) strlen(db) : 0; @@ -1513,15 +1524,15 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, /* If we don't use flags2 for anything else than options contained in - thd->options, it would be more efficient to flags2=thd_arg->options + thd_arg->options, it would be more efficient to flags2=thd_arg->options (OPTIONS_WRITTEN_TO_BINLOG would be used only at reading time). But it's likely that we don't want to use 32 bits for 3 bits; in the future we will probably want to reclaim the 29 bits. So we need the &. */ flags2= (uint32) (thd_arg->options & OPTIONS_WRITTEN_TO_BIN_LOG); - DBUG_ASSERT(thd->variables.character_set_client->number < 256*256); - DBUG_ASSERT(thd->variables.collation_connection->number < 256*256); - DBUG_ASSERT(thd->variables.collation_server->number < 256*256); + DBUG_ASSERT(thd_arg->variables.character_set_client->number < 256*256); + DBUG_ASSERT(thd_arg->variables.collation_connection->number < 256*256); + DBUG_ASSERT(thd_arg->variables.collation_server->number < 256*256); int2store(charset, thd_arg->variables.character_set_client->number); int2store(charset+2, thd_arg->variables.collation_connection->number); int2store(charset+4, thd_arg->variables.collation_server->number); @@ -2247,9 +2258,10 @@ Muted_query_log_event::Muted_query_log_event() **************************************************************************/ #ifndef MYSQL_CLIENT -Start_log_event_v3::Start_log_event_v3() :Log_event(), binlog_version(BINLOG_VERSION), artificial_event(0) +Start_log_event_v3::Start_log_event_v3() + :Log_event(), created(0), binlog_version(BINLOG_VERSION), + artificial_event(0), dont_set_created(0) { - created= when; memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); } #endif @@ -2319,7 +2331,8 @@ void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info) */ Start_log_event_v3::Start_log_event_v3(const char* buf, - const Format_description_log_event* description_event) + const Format_description_log_event + *description_event) :Log_event(buf, description_event) { buf+= description_event->common_header_len; @@ -2331,6 +2344,7 @@ Start_log_event_v3::Start_log_event_v3(const char* buf, created= uint4korr(buf+ST_CREATED_OFFSET); /* We use log_pos to mark if this was an artificial event or not */ artificial_event= (log_pos == 0); + dont_set_created= 1; } @@ -2344,6 +2358,8 @@ bool Start_log_event_v3::write(IO_CACHE* file) char buff[START_V3_HEADER_LEN]; int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version); memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN); + if (!dont_set_created) + created= when= get_time(); int4store(buff + ST_CREATED_OFFSET,created); return (write_header(file, sizeof(buff)) || my_b_safe_write(file, (uchar*) buff, sizeof(buff))); @@ -2374,8 +2390,7 @@ bool Start_log_event_v3::write(IO_CACHE* file) int Start_log_event_v3::do_apply_event(Relay_log_info const *rli) { DBUG_ENTER("Start_log_event_v3::do_apply_event"); - switch (binlog_version) - { + switch (binlog_version) { case 3: case 4: /* @@ -2446,7 +2461,6 @@ Format_description_log_event:: Format_description_log_event(uint8 binlog_ver, const char* server_ver) :Start_log_event_v3() { - created= when; binlog_version= binlog_ver; switch (binlog_ver) { case 4: /* MySQL 5.0 */ @@ -2598,6 +2612,8 @@ bool Format_description_log_event::write(IO_CACHE* file) uchar buff[FORMAT_DESCRIPTION_HEADER_LEN]; int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version); memcpy((char*) buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN); + if (!dont_set_created) + created= when= get_time(); int4store(buff + ST_CREATED_OFFSET,created); buff[ST_COMMON_HEADER_LEN_OFFSET]= LOG_EVENT_HEADER_LEN; memcpy((char*) buff+ST_COMMON_HEADER_LEN_OFFSET+1, (uchar*) post_header_len, @@ -4904,8 +4920,9 @@ err: */ #ifndef MYSQL_CLIENT -Append_block_log_event::Append_block_log_event(THD* thd_arg, const char* db_arg, - char* block_arg, +Append_block_log_event::Append_block_log_event(THD *thd_arg, + const char *db_arg, + char *block_arg, uint block_len_arg, bool using_trans) :Log_event(thd_arg,0, using_trans), block(block_arg), @@ -5154,7 +5171,8 @@ int Delete_file_log_event::do_apply_event(Relay_log_info const *rli) */ #ifndef MYSQL_CLIENT -Execute_load_log_event::Execute_load_log_event(THD *thd_arg, const char* db_arg, +Execute_load_log_event::Execute_load_log_event(THD *thd_arg, + const char* db_arg, bool using_trans) :Log_event(thd_arg, 0, using_trans), file_id(thd_arg->file_id), db(db_arg) { @@ -5355,7 +5373,7 @@ int Begin_load_query_log_event::get_create_or_append() const #ifndef MYSQL_CLIENT Execute_load_query_log_event:: -Execute_load_query_log_event(THD* thd_arg, const char* query_arg, +Execute_load_query_log_event(THD *thd_arg, const char* query_arg, ulong query_length_arg, uint fn_pos_start_arg, uint fn_pos_end_arg, enum_load_dup_handling dup_handling_arg, @@ -5636,13 +5654,14 @@ Rows_log_event::Rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid, m_table_id(tid), m_width(tbl_arg ? tbl_arg->s->fields : 1), m_rows_buf(0), m_rows_cur(0), m_rows_end(0), - m_flags(0) + m_curr_row(NULL), m_curr_row_end(NULL), + m_flags(0), m_key(NULL) { /* We allow a special form of dummy event when the table, and cols are null and the table id is ~0UL. This is a temporary solution, to be able to terminate a started statement in the - binary log: the extreneous events will be removed in the future. + binary log: the extraneous events will be removed in the future. */ DBUG_ASSERT(tbl_arg && tbl_arg->s && tid != ~0UL || !tbl_arg && !cols && tid == ~0UL); @@ -5651,7 +5670,7 @@ Rows_log_event::Rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid, set_flags(NO_FOREIGN_KEY_CHECKS_F); if (thd_arg->options & OPTION_RELAXED_UNIQUE_CHECKS) set_flags(RELAXED_UNIQUE_CHECKS_F); - /* if bitmap_init fails, catched in is_valid() */ + /* if bitmap_init fails, caught in is_valid() */ if (likely(!bitmap_init(&m_cols, m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, m_width, @@ -5678,7 +5697,10 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, *description_event) : Log_event(buf, description_event), m_row_count(0), - m_rows_buf(0), m_rows_cur(0), m_rows_end(0) + m_table(NULL), + m_rows_buf(0), m_rows_cur(0), m_rows_end(0), + m_curr_row(NULL), m_curr_row_end(NULL), + m_key(NULL) { DBUG_ENTER("Rows_log_event::Rows_log_event(const char*,...)"); uint8 const common_header_len= description_event->common_header_len; @@ -5737,7 +5759,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, { DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); - /* if bitmap_init fails, catched in is_valid() */ + /* if bitmap_init fails, caught in is_valid() */ if (likely(!bitmap_init(&m_cols_ai, m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL, m_width, @@ -5767,6 +5789,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, m_rows_buf= (uchar*) my_malloc(data_size, MYF(MY_WME)); if (likely((bool)m_rows_buf)) { + m_curr_row= m_rows_buf; m_rows_end= m_rows_buf + data_size; m_rows_cur= m_rows_end; memcpy(m_rows_buf, ptr_rows_data, data_size); @@ -5871,7 +5894,6 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) { DBUG_ENTER("Rows_log_event::do_apply_event(Relay_log_info*)"); int error= 0; - uchar const *row_start= m_rows_buf; /* If m_table_id == ~0UL, then we have a dummy event that does not @@ -6031,7 +6053,9 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) #endif } - TABLE* table= const_cast<Relay_log_info*>(rli)->m_table_map.get_table(m_table_id); + TABLE* + table= + m_table= const_cast<RELAY_LOG_INFO*>(rli)->m_table_map.get_table(m_table_id); if (table) { @@ -6076,24 +6100,43 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) inside a statement and halting abruptly might cause problems when restarting. */ - const_cast<Relay_log_info*>(rli)->set_flag(Relay_log_info::IN_STMT); + const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT); - error= do_before_row_operations(table); - while (error == 0 && row_start < m_rows_end) - { - uchar const *row_end= NULL; - if ((error= do_prepare_row(thd, rli, table, row_start, &row_end))) - break; // We should perform the after-row operation even in - // the case of error + if ( m_width == table->s->fields && bitmap_is_set_all(&m_cols)) + set_flags(COMPLETE_ROWS_F); + + /* + Set tables write and read sets. + + Read_set contains all slave columns (in case we are going to fetch + a complete record from slave) + + Write_set equals the m_cols bitmap sent from master but it can be + longer if slave has extra columns. + */ + + DBUG_PRINT_BITSET("debug", "Setting table's write_set from: %s", &m_cols); + + bitmap_set_all(table->read_set); + bitmap_set_all(table->write_set); + if (!get_flags(COMPLETE_ROWS_F)) + bitmap_intersect(table->write_set,&m_cols); + + // Do event specific preparations + + error= do_before_row_operations(rli); - DBUG_ASSERT(row_end != NULL); // cannot happen - DBUG_ASSERT(row_end <= m_rows_end); + // row processing loop + while (error == 0 && m_curr_row < m_rows_end) + { /* in_use can have been set to NULL in close_tables_for_reopen */ THD* old_thd= table->in_use; if (!table->in_use) table->in_use= thd; - error= do_exec_row(table); + + error= do_exec_row(rli); + table->in_use = old_thd; switch (error) { @@ -6114,21 +6157,38 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) break; } - row_start= row_end; - } + /* + If m_curr_row_end was not set during event execution (e.g., because + of errors) we can't proceed to the next row. If the error is transient + (i.e., error==0 at this point) we must call unpack_current_row() to set + m_curr_row_end. + */ + + if (!m_curr_row_end && !error) + unpack_current_row(rli); + + // at this moment m_curr_row_end should be set + DBUG_ASSERT(error || m_curr_row_end != NULL); + DBUG_ASSERT(error || m_curr_row < m_curr_row_end); + DBUG_ASSERT(error || m_curr_row_end <= m_rows_end); + + m_curr_row= m_curr_row_end; + + } // row processing loop + DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event", const_cast<Relay_log_info*>(rli)->abort_slave= 1;); - error= do_after_row_operations(table, error); + error= do_after_row_operations(rli, error); if (!cache_stmt) { DBUG_PRINT("info", ("Marked that we need to keep log")); thd->options|= OPTION_KEEP_LOG; } - } + } // if (table) /* - We need to delay this clear until the table def is no longer needed. - The table def is needed in unpack_row(). + We need to delay this clear until the table def stored in m_table_def is no + longer needed. It is used in unpack_current_row(). */ if (rli->tables_to_lock && get_flags(STMT_END_F)) const_cast<Relay_log_info*>(rli)->clear_tables_to_lock(); @@ -6474,7 +6534,7 @@ Table_map_log_event::Table_map_log_event(THD *thd, TABLE *tbl, ulong tid, m_data_size+= m_tbllen + 2; // Include length and terminating \0 m_data_size+= 1 + m_colcnt; // COLCNT and column types - /* If malloc fails, catched in is_valid() */ + /* If malloc fails, caught in is_valid() */ if ((m_memory= (uchar*) my_malloc(m_colcnt, MYF(MY_WME)))) { m_coltype= reinterpret_cast<uchar*>(m_memory); @@ -6593,7 +6653,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, (ulong) m_tbllen, (long) (ptr_tbllen-(const uchar*)vpart), m_colcnt, (long) (ptr_colcnt-(const uchar*)vpart))); - /* Allocate mem for all fields in one go. If fails, catched in is_valid() */ + /* Allocate mem for all fields in one go. If fails, caught in is_valid() */ m_memory= (uchar*) my_multi_malloc(MYF(MY_WME), &m_dbnam, (uint) m_dblen + 1, &m_tblnam, (uint) m_tbllen + 1, @@ -6919,7 +6979,8 @@ Write_rows_log_event::Write_rows_log_event(const char *buf, uint event_len, #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) -int Write_rows_log_event::do_before_row_operations(TABLE *table) +int +Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability *const) { int error= 0; @@ -6941,26 +7002,26 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table) /* Do not raise the error flag in case of hitting to an unique attribute */ - table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + m_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); /* NDB specific: update from ndb master wrapped as Write_rows */ /* so that the event should be applied to replace slave's row */ - table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); + m_table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); /* NDB specific: if update from ndb master wrapped as Write_rows does not find the row it's assumed idempotent binlog applying is taking place; don't raise the error. */ - table->file->extra(HA_EXTRA_IGNORE_NO_KEY); + m_table->file->extra(HA_EXTRA_IGNORE_NO_KEY); /* TODO: the cluster team (Tomas?) says that it's better if the engine knows how many rows are going to be inserted, then it can allocate needed memory from the start. */ - table->file->ha_start_bulk_insert(0); + m_table->file->ha_start_bulk_insert(0); /* We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill any TIMESTAMP column with data from the row but instead will use @@ -6976,45 +7037,31 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table) some cases we won't want TIMESTAMP_NO_AUTO_SET (will require some code to analyze if explicit data is provided for slave's TIMESTAMP columns). */ - table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + m_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; return error; } -int Write_rows_log_event::do_after_row_operations(TABLE *table, int error) +int +Write_rows_log_event::do_after_row_operations(const Slave_reporting_capability *const, + int error) { int local_error= 0; - table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); - table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); + m_table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + m_table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); /* reseting the extra with table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY); fires bug#27077 todo: explain or fix */ - if ((local_error= table->file->ha_end_bulk_insert())) + if ((local_error= m_table->file->ha_end_bulk_insert())) { - table->file->print_error(local_error, MYF(0)); + m_table->file->print_error(local_error, MYF(0)); } return error? error : local_error; } -int Write_rows_log_event::do_prepare_row(THD *thd, Relay_log_info const *rli, - TABLE *table, - uchar const *const row_start, - uchar const **const row_end) -{ - DBUG_ASSERT(table != NULL); - DBUG_ASSERT(row_start && row_end); - - if (int error= unpack_row(rli, table, m_width, row_start, &m_cols, row_end, - &m_master_reclength, table->write_set, WRITE_ROWS_EVENT)) - { - thd->net.last_errno= error; - return error; - } - bitmap_copy(table->read_set, table->write_set); - return 0; -} +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) /* Check if there are more UNIQUE keys after the given key. @@ -7028,135 +7075,6 @@ last_uniq_key(TABLE *table, uint keyno) return 1; } -/* Anonymous namespace for template functions/classes */ -namespace { - - /* - Smart pointer that will automatically call my_afree (a macro) when - the pointer goes out of scope. This is used so that I do not have - to remember to call my_afree() before each return. There is no - overhead associated with this, since all functions are inline. - - I (Matz) would prefer to use the free function as a template - parameter, but that is not possible when the "function" is a - macro. - */ - template <class Obj> - class auto_afree_ptr - { - Obj* m_ptr; - public: - auto_afree_ptr(Obj* ptr) : m_ptr(ptr) { } - ~auto_afree_ptr() { if (m_ptr) my_afree(m_ptr); } - void assign(Obj* ptr) { - /* Only to be called if it hasn't been given a value before. */ - DBUG_ASSERT(m_ptr == NULL); - m_ptr= ptr; - } - Obj* get() { return m_ptr; } - }; - -} - - -/* - Copy "extra" columns from record[1] to record[0]. - - Copy the extra fields that are not present on the master but are - present on the slave from record[1] to record[0]. This is used - after fetching a record that are to be updated, either inside - replace_record() or as part of executing an update_row(). - */ -static int -copy_extra_record_fields(TABLE *table, - size_t master_reclength, - my_ptrdiff_t master_fields) -{ - DBUG_ENTER("copy_extra_record_fields(table, master_reclen, master_fields)"); - DBUG_PRINT("info", ("Copying to 0x%lx " - "from field %lu at offset %lu " - "to field %d at offset %lu", - (long) table->record[0], - (ulong) master_fields, (ulong) master_reclength, - table->s->fields, table->s->reclength)); - /* - Copying the extra fields of the slave that does not exist on - master into record[0] (which are basically the default values). - */ - - if (table->s->fields < (uint) master_fields) - DBUG_RETURN(0); - - DBUG_ASSERT(master_reclength <= table->s->reclength); - if (master_reclength < table->s->reclength) - bmove_align(table->record[0] + master_reclength, - table->record[1] + master_reclength, - table->s->reclength - master_reclength); - - /* - Bit columns are special. We iterate over all the remaining - columns and copy the "extra" bits to the new record. This is - not a very good solution: it should be refactored on - opportunity. - - REFACTORING SUGGESTION (Matz). Introduce a member function - similar to move_field_offset() called copy_field_offset() to - copy field values and implement it for all Field subclasses. Use - this function to copy data from the found record to the record - that are going to be inserted. - - The copy_field_offset() function need to be a virtual function, - which in this case will prevent copying an entire range of - fields efficiently. - */ - { - Field **field_ptr= table->field + master_fields; - for ( ; *field_ptr ; ++field_ptr) - { - /* - Set the null bit according to the values in record[1] - */ - if ((*field_ptr)->maybe_null() && - (*field_ptr)->is_null_in_record(reinterpret_cast<uchar*>(table->record[1]))) - (*field_ptr)->set_null(); - else - (*field_ptr)->set_notnull(); - - /* - Do the extra work for special columns. - */ - switch ((*field_ptr)->real_type()) - { - default: - /* Nothing to do */ - break; - - case MYSQL_TYPE_BIT: - Field_bit *f= static_cast<Field_bit*>(*field_ptr); - if (f->bit_len > 0) - { - my_ptrdiff_t const offset= table->record[1] - table->record[0]; - uchar const bits= - get_rec_bits(f->bit_ptr + offset, f->bit_ofs, f->bit_len); - set_rec_bits(bits, f->bit_ptr, f->bit_ofs, f->bit_len); - } - break; - } - } - } - DBUG_RETURN(0); // All OK -} - -#define DBUG_PRINT_BITSET(N,FRM,BS) \ - do { \ - char buf[256]; \ - for (uint i = 0 ; i < (BS)->n_bits ; ++i) \ - buf[i] = bitmap_is_set((BS), i) ? '1' : '0'; \ - buf[(BS)->n_bits] = '\0'; \ - DBUG_PRINT((N), ((FRM), buf)); \ - } while (0) - - /** Check if an error is a duplicate key error. @@ -7182,45 +7100,76 @@ is_duplicate_key_error(int errcode) return false; } +/** + Write the current row into event's table. -/* - Replace the provided record in the database. + The row is located in the row buffer, pointed by @c m_curr_row member. + Number of columns of the row is stored in @c m_width member (it can be + different from the number of columns in the table to which we insert). + Bitmap @c m_cols indicates which columns are present in the row. It is assumed + that event's table is already open and pointed by @c m_table. - SYNOPSIS - replace_record() - thd Thread context for writing the record. - table Table to which record should be written. - master_reclength - Offset to first column that is not present on the master, - alternatively the length of the record on the master - side. + If the same record already exists in the table it can be either overwritten + or an error is reported depending on the value of @c overwrite flag + (error reporting not yet implemented). Note that the matching record can be + different from the row we insert if we use primary keys to identify records in + the table. - RETURN VALUE - Error code on failure, 0 on success. + The row to be inserted can contain values only for selected columns. The + missing columns are filled with default values using @c prepare_record() + function. If a matching record is found in the table and @c overwritte is + true, the missing columns are taken from it. - DESCRIPTION - Similar to how it is done in mysql_insert(), we first try to do - a ha_write_row() and of that fails due to duplicated keys (or - indices), we do an ha_update_row() or a ha_delete_row() instead. - */ -static int -replace_record(THD *thd, TABLE *table, - ulong const master_reclength, - uint const master_fields) + @param rli Relay log info (needed for row unpacking). + @param overwrite + Shall we overwrite if the row already exists or signal + error (currently ignored). + + @returns Error code on failure, 0 on success. + + This method, if successful, sets @c m_curr_row_end pointer to point at the + next row in the rows buffer. This is done when unpacking the row to be + inserted. + + @note If a matching record is found, it is either updated using + @c ha_update_row() or first deleted and then new record written. +*/ + +int +Rows_log_event::write_row(const RELAY_LOG_INFO *const rli, + const bool overwrite) { - DBUG_ENTER("replace_record"); - DBUG_ASSERT(table != NULL && thd != NULL); + DBUG_ENTER("write_row"); + DBUG_ASSERT(m_table != NULL && thd != NULL); + TABLE *table= m_table; // pointer to event's table int error; int keynum; auto_afree_ptr<char> key(NULL); + /* fill table->record[0] with default values */ + + if ((error= prepare_record(rli, table, m_width, + TRUE /* check if columns have def. values */))) + DBUG_RETURN(error); + + /* unpack row into table->record[0] */ + error= unpack_current_row(rli); // TODO: how to handle errors? + #ifndef DBUG_OFF DBUG_DUMP("record[0]", table->record[0], table->s->reclength); DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set); DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set); #endif + /* + Try to write record. If a corresponding record already exists in the table, + we try to change it using ha_update_row() if possible. Otherwise we delete + it and repeat the whole process again. + + TODO: Add safety measures against infinite looping. + */ + while ((error= table->file->ha_write_row(table->record[0]))) { if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT) @@ -7230,6 +7179,7 @@ replace_record(THD *thd, TABLE *table, } if ((keynum= table->file->get_dup_key(error)) < 0) { + DBUG_PRINT("info",("Can't locate duplicate key (get_dup_key returns %d)",keynum)); table->file->print_error(error, MYF(0)); /* We failed to retrieve the duplicate key @@ -7251,17 +7201,22 @@ replace_record(THD *thd, TABLE *table, */ if (table->file->ha_table_flags() & HA_DUPLICATE_POS) { + DBUG_PRINT("info",("Locating offending record using rnd_pos()")); error= table->file->rnd_pos(table->record[1], table->file->dup_ref); if (error) { + DBUG_PRINT("info",("rnd_pos() returns error %d",error)); table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } } else { + DBUG_PRINT("info",("Locating offending record using index_read_idx()")); + if (table->file->extra(HA_EXTRA_FLUSH_CACHE)) { + DBUG_PRINT("info",("Error when setting HA_EXTRA_FLUSH_CACHE")); DBUG_RETURN(my_errno); } @@ -7269,30 +7224,47 @@ replace_record(THD *thd, TABLE *table, { key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length))); if (key.get() == NULL) + { + DBUG_PRINT("info",("Can't allocate key buffer")); DBUG_RETURN(ENOMEM); + } } - key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum, 0); - error= table->file->index_read_idx(table->record[1], keynum, - (const uchar*)key.get(), - HA_WHOLE_KEY, - HA_READ_KEY_EXACT); + key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum, + 0); + error= table->file->index_read_idx_map(table->record[1], keynum, + (const uchar*)key.get(), + HA_WHOLE_KEY, + HA_READ_KEY_EXACT); if (error) { + DBUG_PRINT("info",("index_read_idx() returns error %d",error)); table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } } /* - Now, table->record[1] should contain the offending row. That + Now, record[1] should contain the offending row. That will enable us to update it or, alternatively, delete it (so that we can insert the new row afterwards). + */ - First we copy the columns into table->record[0] that are not - present on the master from table->record[1], if there are any. + /* + If row is incomplete we will use the record found to fill + missing columns. */ - copy_extra_record_fields(table, master_reclength, master_fields); + if (!get_flags(COMPLETE_ROWS_F)) + { + restore_record(table,record[1]); + error= unpack_current_row(rli); + } + +#ifndef DBUG_OFF + DBUG_PRINT("debug",("preparing for update: before and after image")); + DBUG_DUMP("record[1] (before)", table->record[1], table->s->reclength); + DBUG_DUMP("record[0] (after)", table->record[0], table->s->reclength); +#endif /* REPLACE is defined as either INSERT or DELETE + INSERT. If @@ -7312,18 +7284,32 @@ replace_record(THD *thd, TABLE *table, if (last_uniq_key(table, keynum) && !table->file->referenced_by_foreign_key()) { + DBUG_PRINT("info",("Updating row using ha_update_row()")); error=table->file->ha_update_row(table->record[1], table->record[0]); - if (error && error != HA_ERR_RECORD_IS_THE_SAME) - table->file->print_error(error, MYF(0)); - else + switch (error) { + + case HA_ERR_RECORD_IS_THE_SAME: + DBUG_PRINT("info",("ignoring HA_ERR_RECORD_IS_THE_SAME error from" + " ha_update_row()")); error= 0; + + case 0: + break; + + default: + DBUG_PRINT("info",("ha_update_row() returns error %d",error)); + table->file->print_error(error, MYF(0)); + } + DBUG_RETURN(error); } else { + DBUG_PRINT("info",("Deleting offending row and trying to write new one again")); if ((error= table->file->ha_delete_row(table->record[1]))) { + DBUG_PRINT("info",("ha_delete_row() returns error %d",error)); table->file->print_error(error, MYF(0)); DBUG_RETURN(error); } @@ -7334,12 +7320,20 @@ replace_record(THD *thd, TABLE *table, DBUG_RETURN(error); } -int Write_rows_log_event::do_exec_row(TABLE *table) +#endif + +int +Write_rows_log_event::do_exec_row(const RELAY_LOG_INFO *const rli) { - DBUG_ASSERT(table != NULL); - int error= replace_record(thd, table, m_master_reclength, m_width); - return error; + DBUG_ASSERT(m_table != NULL); + int error= write_row(rli, TRUE /* overwrite */); + + if (error && !thd->net.last_errno) + thd->net.last_errno= error; + + return error; } + #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ #ifdef MYSQL_CLIENT @@ -7432,40 +7426,52 @@ record_compare_exit: return result; } +/** + Locate the current row in event's table. -/* - Find the row given by 'key', if the table has keys, or else use a table scan - to find (and fetch) the row. - - If the engine allows random access of the records, a combination of - position() and rnd_pos() will be used. + The current row is pointed by @c m_curr_row. Member @c m_width tells how many + columns are there in the row (this can be differnet from the number of columns + in the table). It is assumed that event's table is already open and pointed + by @c m_table. - @param table Pointer to table to search - @param key Pointer to key to use for search, if table has key + If a corresponding record is found in the table it is stored in + @c m_table->record[0]. Note that when record is located based on a primary + key, it is possible that the record found differs from the row being located. - @pre <code>table->record[0]</code> shall contain the row to locate - and <code>key</code> shall contain a key to use for searching, if - the engine has a key. + If no key is specified or table does not have keys, a table scan is used to + find the row. In that case the row should be complete and contain values for + all columns. However, it can still be shorter than the table, i.e. the table + can contain extra columns not present in the row. It is also possible that + the table has fewer columns than the row being located. - @post If the return value is zero, <code>table->record[1]</code> - will contain the fetched row and the internal "cursor" will refer to - the row. If the return value is non-zero, - <code>table->record[1]</code> is undefined. In either case, - <code>table->record[0]</code> is undefined. + @returns Error code on failure, 0 on success. + + @post In case of success @c m_table->record[0] contains the record found. + Also, the internal "cursor" of the table is positioned at the record found. - @return Zero if the row was successfully fetched into - <code>table->record[1]</code>, error code otherwise. + @note If the engine allows random access of the records, a combination of + @c position() and @c rnd_pos() will be used. */ -static int find_and_fetch_row(TABLE *table, uchar *key) +int Rows_log_event::find_row(const RELAY_LOG_INFO *rli) { - DBUG_ENTER("find_and_fetch_row(TABLE *table, uchar *key, uchar *record)"); - DBUG_PRINT("enter", ("table: 0x%lx, key: 0x%lx record: 0x%lx", - (long) table, (long) key, (long) table->record[1])); + DBUG_ENTER("find_row"); + + DBUG_ASSERT(m_table && m_table->in_use != NULL); + + TABLE *table= m_table; + int error; + + /* unpack row - missing fields get default values */ - DBUG_ASSERT(table->in_use != NULL); + // TODO: shall we check and report errors here? + prepare_record(NULL,table,m_width,FALSE /* don't check errors */); + error= unpack_current_row(rli); +#ifndef DBUG_OFF + DBUG_PRINT("info",("looking for the following record")); DBUG_DUMP("record[0]", table->record[0], table->s->reclength); +#endif if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) && table->s->primary_key < MAX_KEY) @@ -7488,34 +7494,48 @@ static int find_and_fetch_row(TABLE *table, uchar *key) table->s->reclength) == 0); */ + DBUG_PRINT("info",("locating record using primary key (position)")); table->file->position(table->record[0]); - int error= table->file->rnd_pos(table->record[0], table->file->ref); - /* - rnd_pos() returns the record in table->record[0], so we have to - move it to table->record[1]. - */ - bmove_align(table->record[1], table->record[0], table->s->reclength); + error= table->file->rnd_pos(table->record[0], table->file->ref); + if (error) + { + DBUG_PRINT("info",("rnd_pos returns error %d",error)); + table->file->print_error(error, MYF(0)); + } DBUG_RETURN(error); } - /* We need to retrieve all fields */ - /* TODO: Move this out from this function to main loop */ + // We can't use pisition() - try other methods. + + /* + We need to retrieve all fields + TODO: Move this out from this function to main loop + */ table->use_all_columns(); if (table->s->keys > 0) { - int error; + DBUG_PRINT("info",("locating record using primary key (index_read)")); + /* We have a key: search the table using the index */ if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE))) + { + DBUG_PRINT("info",("ha_index_init returns error %d",error)); + table->file->print_error(error, MYF(0)); DBUG_RETURN(error); + } - /* - Don't print debug messages when running valgrind since they can - trigger false warnings. - */ + /* Fill key data for the row */ + + DBUG_ASSERT(m_key); + key_copy(m_key, table->record[0], table->key_info, 0); + + /* + Don't print debug messages when running valgrind since they can + trigger false warnings. + */ #ifndef HAVE_purify - DBUG_DUMP("table->record[0]", table->record[0], table->s->reclength); - DBUG_DUMP("table->record[1]", table->record[1], table->s->reclength); + DBUG_DUMP("key data", m_key, table->key_info->key_length); #endif /* @@ -7527,9 +7547,11 @@ static int find_and_fetch_row(TABLE *table, uchar *key) my_ptrdiff_t const pos= table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; table->record[1][pos]= 0xFF; - if ((error= table->file->index_read(table->record[1], key, HA_WHOLE_KEY, - HA_READ_KEY_EXACT))) + if ((error= table->file->index_read_map(table->record[1], m_key, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT))) { + DBUG_PRINT("info",("no record matching the key found in the table")); table->file->print_error(error, MYF(0)); table->file->ha_index_end(); DBUG_RETURN(error); @@ -7540,8 +7562,8 @@ static int find_and_fetch_row(TABLE *table, uchar *key) trigger false warnings. */ #ifndef HAVE_purify - DBUG_DUMP("table->record[0]", table->record[0], table->s->reclength); - DBUG_DUMP("table->record[1]", table->record[1], table->s->reclength); + DBUG_PRINT("info",("found first matching record")); + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); #endif /* Below is a minor "optimization". If the key (i.e., key number @@ -7563,10 +7585,15 @@ static int find_and_fetch_row(TABLE *table, uchar *key) DBUG_RETURN(0); } + /* + In case key is not unique, we still have to iterate over records found + and find the one which is identical to the row given. The row is unpacked + in record[1] where missing columns are filled with default values. + */ + DBUG_PRINT("info",("non-unique index, scanning it to find matching record")); + while (record_compare(table)) { - int error; - /* We need to set the null bytes to ensure that the filler bit are all set when returning. There are storage engines that @@ -7584,9 +7611,10 @@ static int find_and_fetch_row(TABLE *table, uchar *key) if ((error= table->file->index_next(table->record[1]))) { - table->file->print_error(error, MYF(0)); + DBUG_PRINT("info",("no record matching the given row found")); + table->file->print_error(error, MYF(0)); table->file->ha_index_end(); - DBUG_RETURN(error); + DBUG_RETURN(error); } } @@ -7597,44 +7625,57 @@ static int find_and_fetch_row(TABLE *table, uchar *key) } else { + DBUG_PRINT("info",("locating record using table scan (rnd_next)")); + int restart_count= 0; // Number of times scanning has restarted from top - int error; /* We don't have a key: search the table using rnd_next() */ if ((error= table->file->ha_rnd_init(1))) - return error; + { + DBUG_PRINT("info",("error initializing table scan" + " (ha_rnd_init returns %d)",error)); + table->file->print_error(error, MYF(0)); + DBUG_RETURN(error); + } /* Continue until we find the right record or have made a full loop */ do { error= table->file->rnd_next(table->record[1]); - DBUG_DUMP("record[0]", table->record[0], table->s->reclength); - DBUG_DUMP("record[1]", table->record[1], table->s->reclength); - switch (error) { + case 0: + DBUG_DUMP("record found", table->record[0], table->s->reclength); + case HA_ERR_RECORD_DELETED: - break; + break; case HA_ERR_END_OF_FILE: - if (++restart_count < 2) - table->file->ha_rnd_init(1); - break; + if (++restart_count < 2) + table->file->ha_rnd_init(1); + break; default: - table->file->print_error(error, MYF(0)); - DBUG_PRINT("info", ("Record not found")); + DBUG_PRINT("info", ("Failed to get next record" + " (rnd_next returns %d)",error)); + table->file->print_error(error, MYF(0)); table->file->ha_rnd_end(); - DBUG_RETURN(error); + DBUG_RETURN(error); } } while (restart_count < 2 && record_compare(table)); + + /* + Note: above record_compare will take into accout all record fields + which might be incorrect in case a partial row was given in the event + */ /* Have to restart the scan to be able to fetch the next row. */ - DBUG_PRINT("info", ("Record %sfound", restart_count == 2 ? "not " : "")); + if (restart_count == 2) + DBUG_PRINT("info", ("Record not found")); table->file->ha_rnd_end(); DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0); @@ -7643,6 +7684,7 @@ static int find_and_fetch_row(TABLE *table, uchar *key) DBUG_RETURN(0); } + #endif /* @@ -7654,9 +7696,6 @@ Delete_rows_log_event::Delete_rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid, MY_BITMAP const *cols, bool is_transactional) : Rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional) -#ifdef HAVE_REPLICATION - ,m_memory(NULL), m_key(NULL), m_after_image(NULL) -#endif { } #endif /* #if !defined(MYSQL_CLIENT) */ @@ -7668,23 +7707,18 @@ Delete_rows_log_event::Delete_rows_log_event(THD *thd_arg, TABLE *tbl_arg, Delete_rows_log_event::Delete_rows_log_event(const char *buf, uint event_len, const Format_description_log_event *description_event) -#if defined(MYSQL_CLIENT) : Rows_log_event(buf, event_len, DELETE_ROWS_EVENT, description_event) -#else - : Rows_log_event(buf, event_len, DELETE_ROWS_EVENT, description_event), - m_memory(NULL), m_key(NULL), m_after_image(NULL) -#endif { } #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) -int Delete_rows_log_event::do_before_row_operations(TABLE *table) -{ - DBUG_ASSERT(m_memory == NULL); - if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) && - table->s->primary_key < MAX_KEY) +int +Delete_rows_log_event::do_before_row_operations(const Slave_reporting_capability *const) +{ + if ((m_table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) && + m_table->s->primary_key < MAX_KEY) { /* We don't need to allocate any memory for m_after_image and @@ -7693,81 +7727,39 @@ int Delete_rows_log_event::do_before_row_operations(TABLE *table) return 0; } - int error= 0; - - if (table->s->keys > 0) + if (m_table->s->keys > 0) { - m_memory= (uchar*) my_multi_malloc(MYF(MY_WME), - &m_after_image, - (uint) table->s->reclength, - &m_key, - (uint) table->key_info->key_length, - NullS); + // Allocate buffer for key searches + m_key= (uchar*)my_malloc(m_table->key_info->key_length, MYF(MY_WME)); + if (!m_key) + return HA_ERR_OUT_OF_MEM; } - else - { - m_after_image= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)); - m_memory= (uchar*)m_after_image; - m_key= NULL; - } - if (!m_memory) - return HA_ERR_OUT_OF_MEM; - - return error; + return 0; } -int Delete_rows_log_event::do_after_row_operations(TABLE *table, int error) +int +Delete_rows_log_event::do_after_row_operations(const Slave_reporting_capability *const, + int error) { /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/ - table->file->ha_index_or_rnd_end(); - my_free(m_memory, MYF(MY_ALLOW_ZERO_PTR)); // Free for multi_malloc - m_memory= NULL; - m_after_image= NULL; + m_table->file->ha_index_or_rnd_end(); + my_free(m_key, MYF(MY_ALLOW_ZERO_PTR)); m_key= NULL; return error; } -int Delete_rows_log_event::do_prepare_row(THD *thd, Relay_log_info const *rli, - TABLE *table, - uchar const *const row_start, - uchar const **const row_end) -{ - DBUG_ASSERT(row_start && row_end); - if (int error= unpack_row(rli, table, m_width, row_start, &m_cols, row_end, - &m_master_reclength, table->read_set, DELETE_ROWS_EVENT)) - { - thd->net.last_errno= error; - return error; - } - - /* - If we will access rows using the random access method, m_key will - be set to NULL, so we do not need to make a key copy in that case. - */ - if (m_key) - { - KEY *const key_info= table->key_info; - - key_copy(m_key, table->record[0], key_info, 0); - } - - return 0; -} - -int Delete_rows_log_event::do_exec_row(TABLE *table) +int Delete_rows_log_event::do_exec_row(const RELAY_LOG_INFO *const rli) { int error; - DBUG_ASSERT(table != NULL); + DBUG_ASSERT(m_table != NULL); - if (!(error= find_and_fetch_row(table, m_key))) + if (!(error= find_row(rli))) { /* - Now we should have the right row to delete. We are using - record[0] since it is guaranteed to point to a record with the - correct value. + Delete the record found, located in record[0] */ - error= table->file->ha_delete_row(table->record[0]); + error= m_table->file->ha_delete_row(m_table->record[0]); } return error; } @@ -7797,10 +7789,6 @@ Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg, MY_BITMAP const *cols_ai, bool is_transactional) : Rows_log_event(thd_arg, tbl_arg, tid, cols_bi, is_transactional) -#ifdef HAVE_REPLICATION - , m_memory(NULL), m_key(NULL) - -#endif { init(cols_ai); } @@ -7810,16 +7798,13 @@ Update_rows_log_event::Update_rows_log_event(THD *thd_arg, TABLE *tbl_arg, MY_BITMAP const *cols, bool is_transactional) : Rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional) -#ifdef HAVE_REPLICATION - , m_memory(NULL), m_key(NULL) -#endif { init(cols); } void Update_rows_log_event::init(MY_BITMAP const *cols) { - /* if bitmap_init fails, catched in is_valid() */ + /* if bitmap_init fails, caught in is_valid() */ if (likely(!bitmap_init(&m_cols_ai, m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL, m_width, @@ -7852,151 +7837,87 @@ Update_rows_log_event::Update_rows_log_event(const char *buf, uint event_len, const Format_description_log_event *description_event) -#if defined(MYSQL_CLIENT) : Rows_log_event(buf, event_len, UPDATE_ROWS_EVENT, description_event) -#else - : Rows_log_event(buf, event_len, UPDATE_ROWS_EVENT, description_event), - m_memory(NULL), m_key(NULL) -#endif { } #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) -int Update_rows_log_event::do_before_row_operations(TABLE *table) -{ - DBUG_ASSERT(m_memory == NULL); - - int error= 0; - if (table->s->keys > 0) - { - m_memory= (uchar*) my_multi_malloc(MYF(MY_WME), - &m_after_image, - (uint) table->s->reclength, - &m_key, - (uint) table->key_info->key_length, - NullS); - } - else +int +Update_rows_log_event::do_before_row_operations(const Slave_reporting_capability *const) +{ + if (m_table->s->keys > 0) { - m_after_image= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)); - m_memory= m_after_image; - m_key= NULL; + // Allocate buffer for key searches + m_key= (uchar*)my_malloc(m_table->key_info->key_length, MYF(MY_WME)); + if (!m_key) + return HA_ERR_OUT_OF_MEM; } - if (!m_memory) - return HA_ERR_OUT_OF_MEM; - table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + m_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - return error; + return 0; } -int Update_rows_log_event::do_after_row_operations(TABLE *table, int error) +int +Update_rows_log_event::do_after_row_operations(const Slave_reporting_capability *const, + int error) { /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/ - table->file->ha_index_or_rnd_end(); - my_free(m_memory, MYF(MY_ALLOW_ZERO_PTR)); - m_memory= NULL; - m_after_image= NULL; + m_table->file->ha_index_or_rnd_end(); + my_free(m_key, MYF(MY_ALLOW_ZERO_PTR)); // Free for multi_malloc m_key= NULL; return error; } -int Update_rows_log_event::do_prepare_row(THD *thd, Relay_log_info const *rli, - TABLE *table, - uchar const *const row_start, - uchar const **const row_end) +int +Update_rows_log_event::do_exec_row(const RELAY_LOG_INFO *const rli) { - int error; - DBUG_ASSERT(row_start && row_end); - /* - We need to perform some juggling below since unpack_row() always - unpacks into table->record[0]. For more information, see the - comments for unpack_row(). - */ - - /* record[0] is the before image for the update */ - if ((error= unpack_row(rli, table, m_width, row_start, &m_cols, row_end, - &m_master_reclength, table->read_set, UPDATE_ROWS_EVENT))) - { - thd->net.last_errno= error; - return error; - } + DBUG_ASSERT(m_table != NULL); - store_record(table, record[1]); - uchar const *next_start = *row_end; - /* m_after_image is the after image for the update */ - if ((error= unpack_row(rli, table, m_width, next_start, &m_cols_ai, row_end, - &m_master_reclength, table->write_set, UPDATE_ROWS_EVENT))) - { - thd->net.last_errno= error; + int error= find_row(rli); + if (error) return error; - } - - bmove_align(m_after_image, table->record[0], table->s->reclength); - restore_record(table, record[1]); /* - Don't print debug messages when running valgrind since they can - trigger false warnings. - */ -#ifndef HAVE_purify - DBUG_DUMP("record[0]", table->record[0], table->s->reclength); - DBUG_DUMP("m_after_image", m_after_image, table->s->reclength); -#endif - - /* - If we will access rows using the random access method, m_key will - be set to NULL, so we do not need to make a key copy in that case. - */ - if (m_key) - { - KEY *const key_info= table->key_info; - - key_copy(m_key, table->record[0], key_info, 0); - } + This is the situation after locating BI: - return error; -} + ===|=== before image ====|=== after image ===|=== + ^ ^ + m_curr_row m_curr_row_end -int Update_rows_log_event::do_exec_row(TABLE *table) -{ - DBUG_ASSERT(table != NULL); + BI found in the table is stored in record[0]. We copy it to record[1] + and unpack AI to record[0]. + */ - int error= find_and_fetch_row(table, m_key); - if (error) - return error; + store_record(m_table,record[1]); - /* - We have to ensure that the new record (i.e., the after image) is - in record[0] and the old record (i.e., the before image) is in - record[1]. This since some storage engines require this (for - example, the partition engine). - - Since find_and_fetch_row() puts the fetched record (i.e., the old - record) in record[1], we can keep it there. We put the new record - (i.e., the after image) into record[0], and copy the fields that - are on the slave (i.e., in record[1]) into record[0], effectively - overwriting the default values that where put there by the - unpack_row() function. - */ - bmove_align(table->record[0], m_after_image, table->s->reclength); - copy_extra_record_fields(table, m_master_reclength, m_width); + m_curr_row= m_curr_row_end; + error= unpack_current_row(rli); // this also updates m_curr_row_end /* Now we have the right row to update. The old row (the one we're - looking for) is in record[1] and the new row has is in record[0]. - We also have copied the original values already in the slave's - database into the after image delivered from the master. + looking for) is in record[1] and the new row is in record[0]. */ - error= table->file->ha_update_row(table->record[1], table->record[0]); +#ifndef HAVE_purify + /* + Don't print debug messages when running valgrind since they can + trigger false warnings. + */ + DBUG_PRINT("info",("Updating row in table")); + DBUG_DUMP("old record", m_table->record[1], m_table->s->reclength); + DBUG_DUMP("new values", m_table->record[0], m_table->s->reclength); +#endif + + error= m_table->file->ha_update_row(m_table->record[1], m_table->record[0]); if (error == HA_ERR_RECORD_IS_THE_SAME) error= 0; return error; } + #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ #ifdef MYSQL_CLIENT @@ -8115,5 +8036,3 @@ Incident_log_event::write_data_body(IO_CACHE *file) DBUG_ENTER("Incident_log_event::write_data_body"); DBUG_RETURN(write_str(file, m_message.str, m_message.length)); } - - diff --git a/sql/log_event.h b/sql/log_event.h index 1c4c28e5c1a..0ec98561ae0 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -23,6 +23,10 @@ #include <my_bitmap.h> #include "rpl_constants.h" +#ifndef MYSQL_CLIENT +#include "rpl_record.h" +#include "rpl_reporting.h" +#endif #define LOG_READ_EOF -1 #define LOG_READ_BOGUS -2 @@ -57,8 +61,8 @@ which increments every time we write an event to the binlog) (3 bytes). Q: how do we handle when the counter is overflowed and restarts from 0 ? - - Query and Load (Create or Execute) events may have a more precise timestamp - (with microseconds), number of matched/affected/warnings rows + - Query and Load (Create or Execute) events may have a more precise + timestamp (with microseconds), number of matched/affected/warnings rows and fields of session variables: SQL_MODE, FOREIGN_KEY_CHECKS, UNIQUE_CHECKS, SQL_AUTO_IS_NULL, the collations and charsets, the PASSWORD() version (old/new/...). @@ -706,7 +710,8 @@ public: */ static Log_event* read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock, - const Format_description_log_event *description_event); + const Format_description_log_event + *description_event); static int read_log_event(IO_CACHE* file, String* packet, pthread_mutex_t* log_lock); /* @@ -734,7 +739,8 @@ public: Log_event() : temp_buf(0) {} /* avoid having to link mysqlbinlog against libpthread */ static Log_event* read_log_event(IO_CACHE* file, - const Format_description_log_event *description_event); + const Format_description_log_event + *description_event); /* print*() functions are used by mysqlbinlog */ virtual void print(FILE* file, PRINT_EVENT_INFO* print_event_info) = 0; void print_timestamp(IO_CACHE* file, time_t *ts = 0); @@ -770,12 +776,24 @@ public: { return 0; } virtual bool write_data_body(IO_CACHE* file __attribute__((unused))) { return 0; } + inline time_t get_time() + { + THD *tmp_thd; + if (when) + return when; + if (thd) + return thd->start_time; + if ((tmp_thd= current_thd)) + return tmp_thd->start_time; + return my_time(0); + } #endif virtual Log_event_type get_type_code() = 0; virtual bool is_valid() const = 0; virtual bool is_artificial_event() { return 0; } inline bool get_cache_stmt() const { return cache_stmt; } - Log_event(const char* buf, const Format_description_log_event* description_event); + Log_event(const char* buf, const Format_description_log_event + *description_event); virtual ~Log_event() { free_temp_buf();} void register_temp_buf(char* buf) { temp_buf = buf; } void free_temp_buf() @@ -798,6 +816,8 @@ public: /* returns the human readable name of the event's type */ const char* get_type_str(); + /* Return start of query time or current time */ + #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) public: @@ -809,7 +829,8 @@ public: @see do_apply_event */ - int apply_event(Relay_log_info const *rli) { + int apply_event(RELAY_LOG_INFO const *rli) + { return do_apply_event(rli); } @@ -917,7 +938,6 @@ protected: non-zero. The caller shall decrease the counter by one. */ virtual enum_skip_reason do_shall_skip(Relay_log_info *rli); - #endif }; @@ -1149,7 +1169,8 @@ private: char **fn_start, char **fn_end); protected: int copy_log_event(const char *buf, ulong event_len, - int body_offset, const Format_description_log_event* description_event); + int body_offset, + const Format_description_log_event* description_event); public: ulong thread_id; @@ -1294,6 +1315,11 @@ public: setting log_event == 0 (for now). */ bool artificial_event; + /* + We set this to 1 if we don't want to have the created time in the log, + which is the case when we rollover to a new log. + */ + bool dont_set_created; #ifndef MYSQL_CLIENT Start_log_event_v3(); @@ -1360,7 +1386,8 @@ public: Format_description_log_event(uint8 binlog_ver, const char* server_ver=0); Format_description_log_event(const char* buf, uint event_len, - const Format_description_log_event* description_event); + const Format_description_log_event + *description_event); ~Format_description_log_event() { my_free((uchar*)post_header_len, MYF(0)); } Log_event_type get_type_code() { return FORMAT_DESCRIPTION_EVENT;} #ifndef MYSQL_CLIENT @@ -1418,7 +1445,8 @@ public: void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Intvar_log_event(const char* buf, const Format_description_log_event* description_event); + Intvar_log_event(const char* buf, + const Format_description_log_event *description_event); ~Intvar_log_event() {} Log_event_type get_type_code() { return INTVAR_EVENT;} const char* get_var_type_name(); @@ -1465,7 +1493,8 @@ class Rand_log_event: public Log_event void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Rand_log_event(const char* buf, const Format_description_log_event* description_event); + Rand_log_event(const char* buf, + const Format_description_log_event *description_event); ~Rand_log_event() {} Log_event_type get_type_code() { return RAND_EVENT;} int get_data_size() { return 16; /* sizeof(ulonglong) * 2*/ } @@ -1508,7 +1537,8 @@ class Xid_log_event: public Log_event void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Xid_log_event(const char* buf, const Format_description_log_event* description_event); + Xid_log_event(const char* buf, + const Format_description_log_event *description_event); ~Xid_log_event() {} Log_event_type get_type_code() { return XID_EVENT;} int get_data_size() { return sizeof(xid); } @@ -1554,7 +1584,8 @@ public: void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - User_var_log_event(const char* buf, const Format_description_log_event* description_event); + User_var_log_event(const char* buf, + const Format_description_log_event *description_event); ~User_var_log_event() {} Log_event_type get_type_code() { return USER_VAR_EVENT;} #ifndef MYSQL_CLIENT @@ -1586,7 +1617,8 @@ public: void print(FILE* file, PRINT_EVENT_INFO* print_event_info); #endif - Stop_log_event(const char* buf, const Format_description_log_event* description_event): + Stop_log_event(const char* buf, + const Format_description_log_event *description_event): Log_event(buf, description_event) {} ~Stop_log_event() {} @@ -1695,7 +1727,8 @@ public: #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); - void print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool enable_local); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info, + bool enable_local); #endif Create_file_log_event(const char* buf, uint event_len, @@ -1770,7 +1803,8 @@ public: #endif Append_block_log_event(const char* buf, uint event_len, - const Format_description_log_event* description_event); + const Format_description_log_event + *description_event); ~Append_block_log_event() {} Log_event_type get_type_code() { return APPEND_BLOCK_EVENT;} int get_data_size() { return block_len + APPEND_BLOCK_HEADER_LEN ;} @@ -1806,7 +1840,8 @@ public: #endif /* HAVE_REPLICATION */ #else void print(FILE* file, PRINT_EVENT_INFO* print_event_info); - void print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool enable_local); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info, + bool enable_local); #endif Delete_file_log_event(const char* buf, uint event_len, @@ -1849,7 +1884,8 @@ public: #endif Execute_load_log_event(const char* buf, uint event_len, - const Format_description_log_event* description_event); + const Format_description_log_event + *description_event); ~Execute_load_log_event() {} Log_event_type get_type_code() { return EXEC_LOAD_EVENT;} int get_data_size() { return EXEC_LOAD_HEADER_LEN ;} @@ -1888,7 +1924,8 @@ public: #endif /* HAVE_REPLICATION */ #endif Begin_load_query_log_event(const char* buf, uint event_len, - const Format_description_log_event* description_event); + const Format_description_log_event + *description_event); ~Begin_load_query_log_event() {} Log_event_type get_type_code() { return BEGIN_LOAD_QUERY_EVENT; } }; @@ -1940,7 +1977,8 @@ public: const char *local_fname); #endif Execute_load_query_log_event(const char* buf, uint event_len, - const Format_description_log_event *description_event); + const Format_description_log_event + *description_event); ~Execute_load_query_log_event() {} Log_event_type get_type_code() { return EXECUTE_LOAD_QUERY_EVENT; } @@ -1967,7 +2005,8 @@ public: Log_event's ctor, this way we can extract maximum information from the event's header (the unique ID for example). */ - Unknown_log_event(const char* buf, const Format_description_log_event* description_event): + Unknown_log_event(const char* buf, + const Format_description_log_event *description_event): Log_event(buf, description_event) {} ~Unknown_log_event() {} @@ -2142,7 +2181,13 @@ public: NO_FOREIGN_KEY_CHECKS_F = (1U << 1), /* Value of the OPTION_RELAXED_UNIQUE_CHECKS flag in thd->options */ - RELAXED_UNIQUE_CHECKS_F = (1U << 2) + RELAXED_UNIQUE_CHECKS_F = (1U << 2), + + /** + Indicates that rows in this event are complete, that is contain + values for all columns of the table. + */ + COMPLETE_ROWS_F = (1U << 3) }; typedef uint16 flag_set; @@ -2246,7 +2291,26 @@ protected: uchar *m_rows_cur; /* One-after the end of the data */ uchar *m_rows_end; /* One-after the end of the allocated space */ + const uchar *m_curr_row; /* Start of the row being processed */ + const uchar *m_curr_row_end; /* One-after the end of the current row */ + flag_set m_flags; /* Flags for row-level events */ + uchar *m_key; /* Buffer to keep key value during searches */ + + /* helper functions */ + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + int find_row(const RELAY_LOG_INFO *const); + int write_row(const RELAY_LOG_INFO *const, const bool); + + // Unpack the current row into m_table->record[0] + int unpack_current_row(const RELAY_LOG_INFO *const rli) + { + DBUG_ASSERT(m_table); + return ::unpack_row(rli, m_table, m_width, m_curr_row, &m_cols, + &m_curr_row_end, &m_master_reclength); + } +#endif private: @@ -2271,7 +2335,8 @@ private: The member function will return 0 if all went OK, or a non-zero error code otherwise. */ - virtual int do_before_row_operations(TABLE *table) = 0; + virtual + int do_before_row_operations(const Slave_reporting_capability *const log) = 0; /* Primitive to clean up after a sequence of row executions. @@ -2281,45 +2346,33 @@ private: After doing a sequence of do_prepare_row() and do_exec_row(), this member function should be called to clean up and release any allocated buffers. + + The error argument, if non-zero, indicates an error which happened during + row processing before this function was called. In this case, even if + function is successful, it should return the error code given in the argument. */ - virtual int do_after_row_operations(TABLE *table, int error) = 0; - - /* - Primitive to prepare for handling one row in a row-level event. - - DESCRIPTION - - The member function prepares for execution of operations needed for one - row in a row-level event by reading up data from the buffer containing - the row. No specific interpretation of the data is normally done here, - since SQL thread specific data is not available: that data is made - available for the do_exec function. - - A pointer to the start of the next row, or NULL if the preparation - failed. Currently, preparation cannot fail, but don't rely on this - behavior. - - RETURN VALUE - Error code, if something went wrong, 0 otherwise. - */ - virtual int do_prepare_row(THD*, Relay_log_info const*, TABLE*, - uchar const *row_start, - uchar const **row_end) = 0; + virtual + int do_after_row_operations(const Slave_reporting_capability *const log, + int error) = 0; /* Primitive to do the actual execution necessary for a row. DESCRIPTION The member function will do the actual execution needed to handle a row. + The row is located at m_curr_row. When the function returns, + m_curr_row_end should point at the next row (one byte after the end + of the current row). RETURN VALUE 0 if execution succeeded, 1 if execution failed. */ - virtual int do_exec_row(TABLE *table) = 0; + virtual int do_exec_row(const RELAY_LOG_INFO *const rli) = 0; #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ -}; + friend class Old_rows_log_event; +}; /***************************************************************************** @@ -2369,14 +2422,9 @@ private: #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - uchar *m_memory; - uchar *m_after_image; - - virtual int do_before_row_operations(TABLE *table); - virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, Relay_log_info const*, TABLE*, - uchar const *row_start, uchar const **row_end); - virtual int do_exec_row(TABLE *table); + virtual int do_before_row_operations(const Slave_reporting_capability *const); + virtual int do_after_row_operations(const Slave_reporting_capability *const,int); + virtual int do_exec_row(const RELAY_LOG_INFO *const); #endif }; @@ -2448,15 +2496,9 @@ protected: #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - uchar *m_memory; - uchar *m_key; - uchar *m_after_image; - - virtual int do_before_row_operations(TABLE *table); - virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, Relay_log_info const*, TABLE*, - uchar const *row_start, uchar const **row_end); - virtual int do_exec_row(TABLE *table); + virtual int do_before_row_operations(const Slave_reporting_capability *const); + virtual int do_after_row_operations(const Slave_reporting_capability *const,int); + virtual int do_exec_row(const RELAY_LOG_INFO *const); #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ }; @@ -2519,15 +2561,9 @@ protected: #endif #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - uchar *m_memory; - uchar *m_key; - uchar *m_after_image; - - virtual int do_before_row_operations(TABLE *table); - virtual int do_after_row_operations(TABLE *table, int error); - virtual int do_prepare_row(THD*, Relay_log_info const*, TABLE*, - uchar const *row_start, uchar const **row_end); - virtual int do_exec_row(TABLE *table); + virtual int do_before_row_operations(const Slave_reporting_capability *const); + virtual int do_after_row_operations(const Slave_reporting_capability *const,int); + virtual int do_exec_row(const RELAY_LOG_INFO *const); #endif }; diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 8663963b35e..420df67dc54 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -1,11 +1,969 @@ #include "mysql_priv.h" +#ifndef MYSQL_CLIENT +#include "rpl_rli.h" +#include "rpl_utility.h" +#endif #include "log_event_old.h" #include "rpl_record_old.h" #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + +// Old implementation of do_apply_event() +int +Old_rows_log_event::do_apply_event(Rows_log_event *ev, const RELAY_LOG_INFO *rli) +{ + DBUG_ENTER("Rows_log_event::do_apply_event(st_relay_log_info*)"); + int error= 0; + THD *thd= ev->thd; + uchar const *row_start= ev->m_rows_buf; + + /* + If m_table_id == ~0UL, then we have a dummy event that does not + contain any data. In that case, we just remove all tables in the + tables_to_lock list, close the thread tables, and return with + success. + */ + if (ev->m_table_id == ~0UL) + { + /* + This one is supposed to be set: just an extra check so that + nothing strange has happened. + */ + DBUG_ASSERT(ev->get_flags(Rows_log_event::STMT_END_F)); + + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); + close_thread_tables(thd); + thd->clear_error(); + DBUG_RETURN(0); + } + + /* + 'thd' has been set by exec_relay_log_event(), just before calling + do_apply_event(). We still check here to prevent future coding + errors. + */ + DBUG_ASSERT(rli->sql_thd == thd); + + /* + If there is no locks taken, this is the first binrow event seen + after the table map events. We should then lock all the tables + used in the transaction and proceed with execution of the actual + event. + */ + if (!thd->lock) + { + bool need_reopen= 1; /* To execute the first lap of the loop below */ + + /* + lock_tables() reads the contents of thd->lex, so they must be + initialized. Contrary to in + Table_map_log_event::do_apply_event() we don't call + mysql_init_query() as that may reset the binlog format. + */ + lex_start(thd); + + while ((error= lock_tables(thd, rli->tables_to_lock, + rli->tables_to_lock_count, &need_reopen))) + { + if (!need_reopen) + { + if (thd->query_error || thd->is_fatal_error) + { + /* + Error reporting borrowed from Query_log_event with many excessive + simplifications (we don't honour --slave-skip-errors) + */ + uint actual_error= thd->net.last_errno; + rli->report(ERROR_LEVEL, actual_error, + "Error '%s' in %s event: when locking tables", + (actual_error ? thd->net.last_error : + "unexpected success or fatal error"), + ev->get_type_str()); + thd->is_fatal_error= 1; + } + else + { + rli->report(ERROR_LEVEL, error, + "Error in %s event: when locking tables", + ev->get_type_str()); + } + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); + DBUG_RETURN(error); + } + + /* + So we need to reopen the tables. + + We need to flush the pending RBR event, since it keeps a + pointer to an open table. + + ALTERNATIVE SOLUTION (not implemented): Extract a pointer to + the pending RBR event and reset the table pointer after the + tables has been reopened. + + NOTE: For this new scheme there should be no pending event: + need to add code to assert that is the case. + */ + thd->binlog_flush_pending_rows_event(false); + TABLE_LIST *tables= rli->tables_to_lock; + close_tables_for_reopen(thd, &tables); + + uint tables_count= rli->tables_to_lock_count; + if ((error= open_tables(thd, &tables, &tables_count, 0))) + { + if (thd->query_error || thd->is_fatal_error) + { + /* + Error reporting borrowed from Query_log_event with many excessive + simplifications (we don't honour --slave-skip-errors) + */ + uint actual_error= thd->net.last_errno; + rli->report(ERROR_LEVEL, actual_error, + "Error '%s' on reopening tables", + (actual_error ? thd->net.last_error : + "unexpected success or fatal error")); + thd->query_error= 1; + } + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); + DBUG_RETURN(error); + } + } + + /* + When the open and locking succeeded, we check all tables to + ensure that they still have the correct type. + + We can use a down cast here since we know that every table added + to the tables_to_lock is a RPL_TABLE_LIST. + */ + + { + RPL_TABLE_LIST *ptr= rli->tables_to_lock; + for ( ; ptr ; ptr= static_cast<RPL_TABLE_LIST*>(ptr->next_global)) + { + if (ptr->m_tabledef.compatible_with(rli, ptr->table)) + { + mysql_unlock_tables(thd, thd->lock); + thd->lock= 0; + thd->query_error= 1; + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); + DBUG_RETURN(Rows_log_event::ERR_BAD_TABLE_DEF); + } + } + } + + /* + ... and then we add all the tables to the table map and remove + them from tables to lock. + + We also invalidate the query cache for all the tables, since + they will now be changed. + + TODO [/Matz]: Maybe the query cache should not be invalidated + here? It might be that a table is not changed, even though it + was locked for the statement. We do know that each + Rows_log_event contain at least one row, so after processing one + Rows_log_event, we can invalidate the query cache for the + associated table. + */ + for (TABLE_LIST *ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global) + { + const_cast<RELAY_LOG_INFO*>(rli)->m_table_map.set_table(ptr->table_id, ptr->table); + } +#ifdef HAVE_QUERY_CACHE + query_cache.invalidate_locked_for_write(rli->tables_to_lock); +#endif + } + + TABLE* table= const_cast<RELAY_LOG_INFO*>(rli)->m_table_map.get_table(ev->m_table_id); + + if (table) + { + /* + table == NULL means that this table should not be replicated + (this was set up by Table_map_log_event::do_apply_event() + which tested replicate-* rules). + */ + + /* + It's not needed to set_time() but + 1) it continues the property that "Time" in SHOW PROCESSLIST shows how + much slave is behind + 2) it will be needed when we allow replication from a table with no + TIMESTAMP column to a table with one. + So we call set_time(), like in SBR. Presently it changes nothing. + */ + thd->set_time((time_t)ev->when); + /* + There are a few flags that are replicated with each row event. + Make sure to set/clear them before executing the main body of + the event. + */ + if (ev->get_flags(Rows_log_event::NO_FOREIGN_KEY_CHECKS_F)) + thd->options|= OPTION_NO_FOREIGN_KEY_CHECKS; + else + thd->options&= ~OPTION_NO_FOREIGN_KEY_CHECKS; + + if (ev->get_flags(Rows_log_event::RELAXED_UNIQUE_CHECKS_F)) + thd->options|= OPTION_RELAXED_UNIQUE_CHECKS; + else + thd->options&= ~OPTION_RELAXED_UNIQUE_CHECKS; + /* A small test to verify that objects have consistent types */ + DBUG_ASSERT(sizeof(thd->options) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS)); + + /* + Now we are in a statement and will stay in a statement until we + see a STMT_END_F. + + We set this flag here, before actually applying any rows, in + case the SQL thread is stopped and we need to detect that we're + inside a statement and halting abruptly might cause problems + when restarting. + */ + const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT); + + error= do_before_row_operations(table); + while (error == 0 && row_start < ev->m_rows_end) + { + uchar const *row_end= NULL; + if ((error= do_prepare_row(thd, rli, table, row_start, &row_end))) + break; // We should perform the after-row operation even in + // the case of error + + DBUG_ASSERT(row_end != NULL); // cannot happen + DBUG_ASSERT(row_end <= ev->m_rows_end); + + /* in_use can have been set to NULL in close_tables_for_reopen */ + THD* old_thd= table->in_use; + if (!table->in_use) + table->in_use= thd; + error= do_exec_row(table); + table->in_use = old_thd; + switch (error) + { + /* Some recoverable errors */ + case HA_ERR_RECORD_CHANGED: + case HA_ERR_KEY_NOT_FOUND: /* Idempotency support: OK if + tuple does not exist */ + error= 0; + case 0: + break; + + default: + rli->report(ERROR_LEVEL, thd->net.last_errno, + "Error in %s event: row application failed. %s", + ev->get_type_str(), + thd->net.last_error ? thd->net.last_error : ""); + thd->query_error= 1; + break; + } + + row_start= row_end; + } + DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event", + const_cast<RELAY_LOG_INFO*>(rli)->abort_slave= 1;); + error= do_after_row_operations(table, error); + if (!ev->cache_stmt) + { + DBUG_PRINT("info", ("Marked that we need to keep log")); + thd->options|= OPTION_KEEP_LOG; + } + } + + /* + We need to delay this clear until the table def is no longer needed. + The table def is needed in unpack_row(). + */ + if (rli->tables_to_lock && ev->get_flags(Rows_log_event::STMT_END_F)) + const_cast<RELAY_LOG_INFO*>(rli)->clear_tables_to_lock(); + + if (error) + { /* error has occured during the transaction */ + rli->report(ERROR_LEVEL, thd->net.last_errno, + "Error in %s event: error during transaction execution " + "on table %s.%s. %s", + ev->get_type_str(), table->s->db.str, + table->s->table_name.str, + thd->net.last_error ? thd->net.last_error : ""); + + /* + If one day we honour --skip-slave-errors in row-based replication, and + the error should be skipped, then we would clear mappings, rollback, + close tables, but the slave SQL thread would not stop and then may + assume the mapping is still available, the tables are still open... + So then we should clear mappings/rollback/close here only if this is a + STMT_END_F. + For now we code, knowing that error is not skippable and so slave SQL + thread is certainly going to stop. + rollback at the caller along with sbr. + */ + thd->reset_current_stmt_binlog_row_based(); + const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, error); + thd->query_error= 1; + DBUG_RETURN(error); + } + + /* + This code would ideally be placed in do_update_pos() instead, but + since we have no access to table there, we do the setting of + last_event_start_time here instead. + */ + if (table && (table->s->primary_key == MAX_KEY) && + !ev->cache_stmt && + ev->get_flags(Rows_log_event::STMT_END_F) == Rows_log_event::RLE_NO_FLAGS) + { + /* + ------------ Temporary fix until WL#2975 is implemented --------- + + This event is not the last one (no STMT_END_F). If we stop now + (in case of terminate_slave_thread()), how will we restart? We + have to restart from Table_map_log_event, but as this table is + not transactional, the rows already inserted will still be + present, and idempotency is not guaranteed (no PK) so we risk + that repeating leads to double insert. So we desperately try to + continue, hope we'll eventually leave this buggy situation (by + executing the final Rows_log_event). If we are in a hopeless + wait (reached end of last relay log and nothing gets appended + there), we timeout after one minute, and notify DBA about the + problem. When WL#2975 is implemented, just remove the member + st_relay_log_info::last_event_start_time and all its occurences. + */ + const_cast<RELAY_LOG_INFO*>(rli)->last_event_start_time= my_time(0); + } + + DBUG_RETURN(0); +} +#endif + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + +/* + Check if there are more UNIQUE keys after the given key. +*/ +static int +last_uniq_key(TABLE *table, uint keyno) +{ + while (++keyno < table->s->keys) + if (table->key_info[keyno].flags & HA_NOSAME) + return 0; + return 1; +} + +/* + Compares table->record[0] and table->record[1] + + Returns TRUE if different. +*/ +static bool record_compare(TABLE *table) +{ + /* + Need to set the X bit and the filler bits in both records since + there are engines that do not set it correctly. + + In addition, since MyISAM checks that one hasn't tampered with the + record, it is necessary to restore the old bytes into the record + after doing the comparison. + + TODO[record format ndb]: Remove it once NDB returns correct + records. Check that the other engines also return correct records. + */ + + bool result= FALSE; + uchar saved_x[2], saved_filler[2]; + + if (table->s->null_bytes > 0) + { + for (int i = 0 ; i < 2 ; ++i) + { + saved_x[i]= table->record[i][0]; + saved_filler[i]= table->record[i][table->s->null_bytes - 1]; + table->record[i][0]|= 1U; + table->record[i][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } + } + + if (table->s->blob_fields + table->s->varchar_fields == 0) + { + result= cmp_record(table,record[1]); + goto record_compare_exit; + } + + /* Compare null bits */ + if (memcmp(table->null_flags, + table->null_flags+table->s->rec_buff_length, + table->s->null_bytes)) + { + result= TRUE; // Diff in NULL value + goto record_compare_exit; + } + + /* Compare updated fields */ + for (Field **ptr=table->field ; *ptr ; ptr++) + { + if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + { + result= TRUE; + goto record_compare_exit; + } + } + +record_compare_exit: + /* + Restore the saved bytes. + + TODO[record format ndb]: Remove this code once NDB returns the + correct record format. + */ + if (table->s->null_bytes > 0) + { + for (int i = 0 ; i < 2 ; ++i) + { + table->record[i][0]= saved_x[i]; + table->record[i][table->s->null_bytes - 1]= saved_filler[i]; + } + } + + return result; +} + +/* + Copy "extra" columns from record[1] to record[0]. + + Copy the extra fields that are not present on the master but are + present on the slave from record[1] to record[0]. This is used + after fetching a record that are to be updated, either inside + replace_record() or as part of executing an update_row(). + */ +static int +copy_extra_record_fields(TABLE *table, + size_t master_reclength, + my_ptrdiff_t master_fields) +{ + DBUG_ENTER("copy_extra_record_fields(table, master_reclen, master_fields)"); + DBUG_PRINT("info", ("Copying to 0x%lx " + "from field %lu at offset %lu " + "to field %d at offset %lu", + (long) table->record[0], + (ulong) master_fields, (ulong) master_reclength, + table->s->fields, table->s->reclength)); + /* + Copying the extra fields of the slave that does not exist on + master into record[0] (which are basically the default values). + */ + + if (table->s->fields < (uint) master_fields) + DBUG_RETURN(0); + + DBUG_ASSERT(master_reclength <= table->s->reclength); + if (master_reclength < table->s->reclength) + bmove_align(table->record[0] + master_reclength, + table->record[1] + master_reclength, + table->s->reclength - master_reclength); + + /* + Bit columns are special. We iterate over all the remaining + columns and copy the "extra" bits to the new record. This is + not a very good solution: it should be refactored on + opportunity. + + REFACTORING SUGGESTION (Matz). Introduce a member function + similar to move_field_offset() called copy_field_offset() to + copy field values and implement it for all Field subclasses. Use + this function to copy data from the found record to the record + that are going to be inserted. + + The copy_field_offset() function need to be a virtual function, + which in this case will prevent copying an entire range of + fields efficiently. + */ + { + Field **field_ptr= table->field + master_fields; + for ( ; *field_ptr ; ++field_ptr) + { + /* + Set the null bit according to the values in record[1] + */ + if ((*field_ptr)->maybe_null() && + (*field_ptr)->is_null_in_record(reinterpret_cast<uchar*>(table->record[1]))) + (*field_ptr)->set_null(); + else + (*field_ptr)->set_notnull(); + + /* + Do the extra work for special columns. + */ + switch ((*field_ptr)->real_type()) + { + default: + /* Nothing to do */ + break; + + case MYSQL_TYPE_BIT: + Field_bit *f= static_cast<Field_bit*>(*field_ptr); + if (f->bit_len > 0) + { + my_ptrdiff_t const offset= table->record[1] - table->record[0]; + uchar const bits= + get_rec_bits(f->bit_ptr + offset, f->bit_ofs, f->bit_len); + set_rec_bits(bits, f->bit_ptr, f->bit_ofs, f->bit_len); + } + break; + } + } + } + DBUG_RETURN(0); // All OK +} + +/* + Replace the provided record in the database. + + SYNOPSIS + replace_record() + thd Thread context for writing the record. + table Table to which record should be written. + master_reclength + Offset to first column that is not present on the master, + alternatively the length of the record on the master + side. + + RETURN VALUE + Error code on failure, 0 on success. + + DESCRIPTION + Similar to how it is done in mysql_insert(), we first try to do + a ha_write_row() and of that fails due to duplicated keys (or + indices), we do an ha_update_row() or a ha_delete_row() instead. + */ +static int +replace_record(THD *thd, TABLE *table, + ulong const master_reclength, + uint const master_fields) +{ + DBUG_ENTER("replace_record"); + DBUG_ASSERT(table != NULL && thd != NULL); + + int error; + int keynum; + auto_afree_ptr<char> key(NULL); + +#ifndef DBUG_OFF + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set); + DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set); +#endif + + while ((error= table->file->ha_write_row(table->record[0]))) + { + if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT) + { + table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */ + DBUG_RETURN(error); + } + if ((keynum= table->file->get_dup_key(error)) < 0) + { + table->file->print_error(error, MYF(0)); + /* + We failed to retrieve the duplicate key + - either because the error was not "duplicate key" error + - or because the information which key is not available + */ + DBUG_RETURN(error); + } + + /* + We need to retrieve the old row into record[1] to be able to + either update or delete the offending record. We either: + + - use rnd_pos() with a row-id (available as dupp_row) to the + offending row, if that is possible (MyISAM and Blackhole), or else + + - use index_read_idx() with the key that is duplicated, to + retrieve the offending row. + */ + if (table->file->ha_table_flags() & HA_DUPLICATE_POS) + { + error= table->file->rnd_pos(table->record[1], table->file->dup_ref); + if (error) + { + table->file->print_error(error, MYF(0)); + DBUG_RETURN(error); + } + } + else + { + if (table->file->extra(HA_EXTRA_FLUSH_CACHE)) + { + DBUG_RETURN(my_errno); + } + + if (key.get() == NULL) + { + key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length))); + if (key.get() == NULL) + DBUG_RETURN(ENOMEM); + } + + key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum, + 0); + error= table->file->index_read_idx_map(table->record[1], keynum, + (const uchar*)key.get(), + HA_WHOLE_KEY, + HA_READ_KEY_EXACT); + if (error) + { + table->file->print_error(error, MYF(0)); + DBUG_RETURN(error); + } + } + + /* + Now, table->record[1] should contain the offending row. That + will enable us to update it or, alternatively, delete it (so + that we can insert the new row afterwards). + + First we copy the columns into table->record[0] that are not + present on the master from table->record[1], if there are any. + */ + copy_extra_record_fields(table, master_reclength, master_fields); + + /* + REPLACE is defined as either INSERT or DELETE + INSERT. If + possible, we can replace it with an UPDATE, but that will not + work on InnoDB if FOREIGN KEY checks are necessary. + + I (Matz) am not sure of the reason for the last_uniq_key() + check as, but I'm guessing that it's something along the + following lines. + + Suppose that we got the duplicate key to be a key that is not + the last unique key for the table and we perform an update: + then there might be another key for which the unique check will + fail, so we're better off just deleting the row and inserting + the correct row. + */ + if (last_uniq_key(table, keynum) && + !table->file->referenced_by_foreign_key()) + { + error=table->file->ha_update_row(table->record[1], + table->record[0]); + if (error && error != HA_ERR_RECORD_IS_THE_SAME) + table->file->print_error(error, MYF(0)); + else + error= 0; + DBUG_RETURN(error); + } + else + { + if ((error= table->file->ha_delete_row(table->record[1]))) + { + table->file->print_error(error, MYF(0)); + DBUG_RETURN(error); + } + /* Will retry ha_write_row() with the offending row removed. */ + } + } + + DBUG_RETURN(error); +} + +/** + Find the row given by 'key', if the table has keys, or else use a table scan + to find (and fetch) the row. + + If the engine allows random access of the records, a combination of + position() and rnd_pos() will be used. + + @param table Pointer to table to search + @param key Pointer to key to use for search, if table has key + + @pre <code>table->record[0]</code> shall contain the row to locate + and <code>key</code> shall contain a key to use for searching, if + the engine has a key. + + @post If the return value is zero, <code>table->record[1]</code> + will contain the fetched row and the internal "cursor" will refer to + the row. If the return value is non-zero, + <code>table->record[1]</code> is undefined. In either case, + <code>table->record[0]</code> is undefined. + + @return Zero if the row was successfully fetched into + <code>table->record[1]</code>, error code otherwise. + */ + +static int find_and_fetch_row(TABLE *table, uchar *key) +{ + DBUG_ENTER("find_and_fetch_row(TABLE *table, uchar *key, uchar *record)"); + DBUG_PRINT("enter", ("table: 0x%lx, key: 0x%lx record: 0x%lx", + (long) table, (long) key, (long) table->record[1])); + + DBUG_ASSERT(table->in_use != NULL); + + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + + if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) && + table->s->primary_key < MAX_KEY) + { + /* + Use a more efficient method to fetch the record given by + table->record[0] if the engine allows it. We first compute a + row reference using the position() member function (it will be + stored in table->file->ref) and the use rnd_pos() to position + the "cursor" (i.e., record[0] in this case) at the correct row. + + TODO: Add a check that the correct record has been fetched by + comparing with the original record. Take into account that the + record on the master and slave can be of different + length. Something along these lines should work: + + ADD>>> store_record(table,record[1]); + int error= table->file->rnd_pos(table->record[0], table->file->ref); + ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0], + table->s->reclength) == 0); + + */ + table->file->position(table->record[0]); + int error= table->file->rnd_pos(table->record[0], table->file->ref); + /* + rnd_pos() returns the record in table->record[0], so we have to + move it to table->record[1]. + */ + bmove_align(table->record[1], table->record[0], table->s->reclength); + DBUG_RETURN(error); + } + + /* We need to retrieve all fields */ + /* TODO: Move this out from this function to main loop */ + table->use_all_columns(); + + if (table->s->keys > 0) + { + int error; + /* We have a key: search the table using the index */ + if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE))) + DBUG_RETURN(error); + + /* + Don't print debug messages when running valgrind since they can + trigger false warnings. + */ +#ifndef HAVE_purify + DBUG_DUMP("table->record[0]", table->record[0], table->s->reclength); + DBUG_DUMP("table->record[1]", table->record[1], table->s->reclength); +#endif + + /* + We need to set the null bytes to ensure that the filler bit are + all set when returning. There are storage engines that just set + the necessary bits on the bytes and don't set the filler bits + correctly. + */ + my_ptrdiff_t const pos= + table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0; + table->record[1][pos]= 0xFF; + if ((error= table->file->index_read_map(table->record[1], key, HA_WHOLE_KEY, + HA_READ_KEY_EXACT))) + { + table->file->print_error(error, MYF(0)); + table->file->ha_index_end(); + DBUG_RETURN(error); + } + + /* + Don't print debug messages when running valgrind since they can + trigger false warnings. + */ +#ifndef HAVE_purify + DBUG_DUMP("table->record[0]", table->record[0], table->s->reclength); + DBUG_DUMP("table->record[1]", table->record[1], table->s->reclength); +#endif + /* + Below is a minor "optimization". If the key (i.e., key number + 0) has the HA_NOSAME flag set, we know that we have found the + correct record (since there can be no duplicates); otherwise, we + have to compare the record with the one found to see if it is + the correct one. + + CAVEAT! This behaviour is essential for the replication of, + e.g., the mysql.proc table since the correct record *shall* be + found using the primary key *only*. There shall be no + comparison of non-PK columns to decide if the correct record is + found. I can see no scenario where it would be incorrect to + chose the row to change only using a PK or an UNNI. + */ + if (table->key_info->flags & HA_NOSAME) + { + table->file->ha_index_end(); + DBUG_RETURN(0); + } + + while (record_compare(table)) + { + int error; + + /* + We need to set the null bytes to ensure that the filler bit + are all set when returning. There are storage engines that + just set the necessary bits on the bytes and don't set the + filler bits correctly. + + TODO[record format ndb]: Remove this code once NDB returns the + correct record format. + */ + if (table->s->null_bytes > 0) + { + table->record[1][table->s->null_bytes - 1]|= + 256U - (1U << table->s->last_null_bit_pos); + } + + if ((error= table->file->index_next(table->record[1]))) + { + table->file->print_error(error, MYF(0)); + table->file->ha_index_end(); + DBUG_RETURN(error); + } + } + + /* + Have to restart the scan to be able to fetch the next row. + */ + table->file->ha_index_end(); + } + else + { + int restart_count= 0; // Number of times scanning has restarted from top + int error; + + /* We don't have a key: search the table using rnd_next() */ + if ((error= table->file->ha_rnd_init(1))) + return error; + + /* Continue until we find the right record or have made a full loop */ + do + { + error= table->file->rnd_next(table->record[1]); + + DBUG_DUMP("record[0]", table->record[0], table->s->reclength); + DBUG_DUMP("record[1]", table->record[1], table->s->reclength); + + switch (error) { + case 0: + case HA_ERR_RECORD_DELETED: + break; + + case HA_ERR_END_OF_FILE: + if (++restart_count < 2) + table->file->ha_rnd_init(1); + break; + + default: + table->file->print_error(error, MYF(0)); + DBUG_PRINT("info", ("Record not found")); + table->file->ha_rnd_end(); + DBUG_RETURN(error); + } + } + while (restart_count < 2 && record_compare(table)); + + /* + Have to restart the scan to be able to fetch the next row. + */ + DBUG_PRINT("info", ("Record %sfound", restart_count == 2 ? "not " : "")); + table->file->ha_rnd_end(); + + DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0); + DBUG_RETURN(error); + } + + DBUG_RETURN(0); +} + +/********************************************************** + Row handling primitives for Write_rows_log_event_old + **********************************************************/ + +int Write_rows_log_event_old::do_before_row_operations(TABLE *table) +{ + int error= 0; + + /* + We are using REPLACE semantics and not INSERT IGNORE semantics + when writing rows, that is: new rows replace old rows. We need to + inform the storage engine that it should use this behaviour. + */ + + /* Tell the storage engine that we are using REPLACE semantics. */ + thd->lex->duplicates= DUP_REPLACE; + + /* + Pretend we're executing a REPLACE command: this is needed for + InnoDB and NDB Cluster since they are not (properly) checking the + lex->duplicates flag. + */ + thd->lex->sql_command= SQLCOM_REPLACE; + /* + Do not raise the error flag in case of hitting to an unique attribute + */ + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + /* + NDB specific: update from ndb master wrapped as Write_rows + */ + /* + so that the event should be applied to replace slave's row + */ + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); + /* + NDB specific: if update from ndb master wrapped as Write_rows + does not find the row it's assumed idempotent binlog applying + is taking place; don't raise the error. + */ + table->file->extra(HA_EXTRA_IGNORE_NO_KEY); + /* + TODO: the cluster team (Tomas?) says that it's better if the engine knows + how many rows are going to be inserted, then it can allocate needed memory + from the start. + */ + table->file->ha_start_bulk_insert(0); + /* + We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill + any TIMESTAMP column with data from the row but instead will use + the event's current time. + As we replicate from TIMESTAMP to TIMESTAMP and slave has no extra + columns, we know that all TIMESTAMP columns on slave will receive explicit + data from the row, so TIMESTAMP_NO_AUTO_SET is ok. + When we allow a table without TIMESTAMP to be replicated to a table having + more columns including a TIMESTAMP column, or when we allow a TIMESTAMP + column to be replicated into a BIGINT column and the slave's table has a + TIMESTAMP column, then the slave's TIMESTAMP column will take its value + from set_time() which we called earlier (consistent with SBR). And then in + some cases we won't want TIMESTAMP_NO_AUTO_SET (will require some code to + analyze if explicit data is provided for slave's TIMESTAMP columns). + */ + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + return error; +} + +int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error) +{ + int local_error= 0; + table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); + /* + reseting the extra with + table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY); + fires bug#27077 + todo: explain or fix + */ + if ((local_error= table->file->ha_end_bulk_insert())) + { + table->file->print_error(local_error, MYF(0)); + } + return error? error : local_error; +} + int -Write_rows_log_event_old::do_prepare_row(THD *thd, +Write_rows_log_event_old::do_prepare_row(THD *thd_arg, Relay_log_info const *rli, TABLE *table, uchar const *row_start, @@ -23,9 +981,68 @@ Write_rows_log_event_old::do_prepare_row(THD *thd, return error; } +int Write_rows_log_event_old::do_exec_row(TABLE *table) +{ + DBUG_ASSERT(table != NULL); + int error= replace_record(thd, table, m_master_reclength, m_width); + return error; +} + +/********************************************************** + Row handling primitives for Delete_rows_log_event_old + **********************************************************/ + +int Delete_rows_log_event_old::do_before_row_operations(TABLE *table) +{ + DBUG_ASSERT(m_memory == NULL); + + if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) && + table->s->primary_key < MAX_KEY) + { + /* + We don't need to allocate any memory for m_after_image and + m_key since they are not used. + */ + return 0; + } + + int error= 0; + + if (table->s->keys > 0) + { + m_memory= (uchar*) my_multi_malloc(MYF(MY_WME), + &m_after_image, + (uint) table->s->reclength, + &m_key, + (uint) table->key_info->key_length, + NullS); + } + else + { + m_after_image= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)); + m_memory= (uchar*)m_after_image; + m_key= NULL; + } + if (!m_memory) + return HA_ERR_OUT_OF_MEM; + + return error; +} + +int Delete_rows_log_event_old::do_after_row_operations(TABLE *table, int error) +{ + /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/ + table->file->ha_index_or_rnd_end(); + my_free(m_memory, MYF(MY_ALLOW_ZERO_PTR)); // Free for multi_malloc + m_memory= NULL; + m_after_image= NULL; + m_key= NULL; + + return error; +} int -Delete_rows_log_event_old::do_prepare_row(THD *thd, +Delete_rows_log_event_old::do_prepare_row(THD *thd_arg, Relay_log_info const *rli, TABLE *table, uchar const *row_start, @@ -57,8 +1074,69 @@ Delete_rows_log_event_old::do_prepare_row(THD *thd, return error; } +int Delete_rows_log_event_old::do_exec_row(TABLE *table) +{ + int error; + DBUG_ASSERT(table != NULL); + + if (!(error= ::find_and_fetch_row(table, m_key))) + { + /* + Now we should have the right row to delete. We are using + record[0] since it is guaranteed to point to a record with the + correct value. + */ + error= table->file->ha_delete_row(table->record[0]); + } + return error; +} + +/********************************************************** + Row handling primitives for Update_rows_log_event_old + **********************************************************/ + +int Update_rows_log_event_old::do_before_row_operations(TABLE *table) +{ + DBUG_ASSERT(m_memory == NULL); + + int error= 0; + + if (table->s->keys > 0) + { + m_memory= (uchar*) my_multi_malloc(MYF(MY_WME), + &m_after_image, + (uint) table->s->reclength, + &m_key, + (uint) table->key_info->key_length, + NullS); + } + else + { + m_after_image= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)); + m_memory= m_after_image; + m_key= NULL; + } + if (!m_memory) + return HA_ERR_OUT_OF_MEM; + + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + + return error; +} + +int Update_rows_log_event_old::do_after_row_operations(TABLE *table, int error) +{ + /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/ + table->file->ha_index_or_rnd_end(); + my_free(m_memory, MYF(MY_ALLOW_ZERO_PTR)); + m_memory= NULL; + m_after_image= NULL; + m_key= NULL; + + return error; +} -int Update_rows_log_event_old::do_prepare_row(THD *thd, +int Update_rows_log_event_old::do_prepare_row(THD *thd_arg, Relay_log_info const *rli, TABLE *table, uchar const *row_start, @@ -101,4 +1179,41 @@ int Update_rows_log_event_old::do_prepare_row(THD *thd, return error; } +int Update_rows_log_event_old::do_exec_row(TABLE *table) +{ + DBUG_ASSERT(table != NULL); + + int error= ::find_and_fetch_row(table, m_key); + if (error) + return error; + + /* + We have to ensure that the new record (i.e., the after image) is + in record[0] and the old record (i.e., the before image) is in + record[1]. This since some storage engines require this (for + example, the partition engine). + + Since find_and_fetch_row() puts the fetched record (i.e., the old + record) in record[1], we can keep it there. We put the new record + (i.e., the after image) into record[0], and copy the fields that + are on the slave (i.e., in record[1]) into record[0], effectively + overwriting the default values that where put there by the + unpack_row() function. + */ + bmove_align(table->record[0], m_after_image, table->s->reclength); + copy_extra_record_fields(table, m_master_reclength, m_width); + + /* + Now we have the right row to update. The old row (the one we're + looking for) is in record[1] and the new row has is in record[0]. + We also have copied the original values already in the slave's + database into the after image delivered from the master. + */ + error= table->file->ha_update_row(table->record[1], table->record[0]); + if (error == HA_ERR_RECORD_IS_THE_SAME) + error= 0; + + return error; +} + #endif diff --git a/sql/log_event_old.h b/sql/log_event_old.h index b6e25b6bc73..ffe87a045cc 100644 --- a/sql/log_event_old.h +++ b/sql/log_event_old.h @@ -20,9 +20,90 @@ Need to include this file at the proper position of log_event.h */ + +class Old_rows_log_event +{ + public: + + virtual ~Old_rows_log_event() {} + + protected: + +#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) + + int do_apply_event(Rows_log_event*,const RELAY_LOG_INFO*); + + /* + Primitive to prepare for a sequence of row executions. + + DESCRIPTION + + Before doing a sequence of do_prepare_row() and do_exec_row() + calls, this member function should be called to prepare for the + entire sequence. Typically, this member function will allocate + space for any buffers that are needed for the two member + functions mentioned above. + + RETURN VALUE + + The member function will return 0 if all went OK, or a non-zero + error code otherwise. + */ + virtual int do_before_row_operations(TABLE *table) = 0; + + /* + Primitive to clean up after a sequence of row executions. + + DESCRIPTION + + After doing a sequence of do_prepare_row() and do_exec_row(), + this member function should be called to clean up and release + any allocated buffers. + */ + virtual int do_after_row_operations(TABLE *table, int error) = 0; + + /* + Primitive to prepare for handling one row in a row-level event. + + DESCRIPTION -class Write_rows_log_event_old : public Write_rows_log_event + The member function prepares for execution of operations needed for one + row in a row-level event by reading up data from the buffer containing + the row. No specific interpretation of the data is normally done here, + since SQL thread specific data is not available: that data is made + available for the do_exec function. + + A pointer to the start of the next row, or NULL if the preparation + failed. Currently, preparation cannot fail, but don't rely on this + behavior. + + RETURN VALUE + Error code, if something went wrong, 0 otherwise. + */ + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, + uchar const *row_start, + uchar const **row_end) = 0; + + /* + Primitive to do the actual execution necessary for a row. + + DESCRIPTION + The member function will do the actual execution needed to handle a row. + + RETURN VALUE + 0 if execution succeeded, 1 if execution failed. + + */ + virtual int do_exec_row(TABLE *table) = 0; + +#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ +}; + + +class Write_rows_log_event_old + : public Write_rows_log_event, public Old_rows_log_event { + public: enum { @@ -49,14 +130,26 @@ private: virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - virtual int do_prepare_row(THD*, Relay_log_info const*, TABLE*, + // use old definition of do_apply_event() + virtual int do_apply_event(const RELAY_LOG_INFO *rli) + { return Old_rows_log_event::do_apply_event(this,rli); } + + // primitives for old version of do_apply_event() + virtual int do_before_row_operations(TABLE *table); + virtual int do_after_row_operations(TABLE *table, int error); + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, uchar const *row_start, uchar const **row_end); + virtual int do_exec_row(TABLE *table); + #endif }; -class Update_rows_log_event_old : public Update_rows_log_event +class Update_rows_log_event_old + : public Update_rows_log_event, public Old_rows_log_event { + uchar *m_after_image, *m_memory; + public: enum { @@ -67,14 +160,16 @@ public: #if !defined(MYSQL_CLIENT) Update_rows_log_event_old(THD *thd, TABLE *table, ulong table_id, MY_BITMAP const *cols, bool is_transactional) - : Update_rows_log_event(thd, table, table_id, cols, is_transactional) + : Update_rows_log_event(thd, table, table_id, cols, is_transactional), + m_after_image(NULL), m_memory(NULL) { } #endif #if defined(HAVE_REPLICATION) Update_rows_log_event_old(const char *buf, uint event_len, const Format_description_log_event *descr) - : Update_rows_log_event(buf, event_len, descr) + : Update_rows_log_event(buf, event_len, descr), + m_after_image(NULL), m_memory(NULL) { } #endif @@ -83,14 +178,25 @@ private: virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - virtual int do_prepare_row(THD*, Relay_log_info const*, TABLE*, + // use old definition of do_apply_event() + virtual int do_apply_event(const RELAY_LOG_INFO *rli) + { return Old_rows_log_event::do_apply_event(this,rli); } + + // primitives for old version of do_apply_event() + virtual int do_before_row_operations(TABLE *table); + virtual int do_after_row_operations(TABLE *table, int error); + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, uchar const *row_start, uchar const **row_end); + virtual int do_exec_row(TABLE *table); #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */ }; -class Delete_rows_log_event_old : public Delete_rows_log_event +class Delete_rows_log_event_old + : public Delete_rows_log_event, public Old_rows_log_event { + uchar *m_after_image, *m_memory; + public: enum { @@ -101,14 +207,16 @@ public: #if !defined(MYSQL_CLIENT) Delete_rows_log_event_old(THD *thd, TABLE *table, ulong table_id, MY_BITMAP const *cols, bool is_transactional) - : Delete_rows_log_event(thd, table, table_id, cols, is_transactional) + : Delete_rows_log_event(thd, table, table_id, cols, is_transactional), + m_after_image(NULL), m_memory(NULL) { } #endif #if defined(HAVE_REPLICATION) Delete_rows_log_event_old(const char *buf, uint event_len, const Format_description_log_event *descr) - : Delete_rows_log_event(buf, event_len, descr) + : Delete_rows_log_event(buf, event_len, descr), + m_after_image(NULL), m_memory(NULL) { } #endif @@ -117,8 +225,16 @@ private: virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) - virtual int do_prepare_row(THD*, Relay_log_info const*, TABLE*, + // use old definition of do_apply_event() + virtual int do_apply_event(const RELAY_LOG_INFO *rli) + { return Old_rows_log_event::do_apply_event(this,rli); } + + // primitives for old version of do_apply_event() + virtual int do_before_row_operations(TABLE *table); + virtual int do_after_row_operations(TABLE *table, int error); + virtual int do_prepare_row(THD*, RELAY_LOG_INFO const*, TABLE*, uchar const *row_start, uchar const **row_end); + virtual int do_exec_row(TABLE *table); #endif }; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 34d92a13945..805cf23118f 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -168,7 +168,7 @@ public: protected: Object_creation_ctx() {} - virtual Object_creation_ctx *create_backup_ctx(THD *thd) = 0; + virtual Object_creation_ctx *create_backup_ctx(THD *thd) const = 0; virtual void change_env(THD *thd) const = 0; @@ -204,7 +204,7 @@ protected: CHARSET_INFO *connection_cl); protected: - virtual Object_creation_ctx *create_backup_ctx(THD *thd); + virtual Object_creation_ctx *create_backup_ctx(THD *thd) const; virtual void change_env(THD *thd) const; @@ -1643,7 +1643,7 @@ bool key_cmp_if_same(TABLE *form,const uchar *key,uint index,uint key_length); void key_unpack(String *to,TABLE *form,uint index); bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields); int key_cmp(KEY_PART_INFO *key_part, const uchar *key, uint key_length); -int key_rec_cmp(void *key_info, uchar *a, uchar *b); +extern "C" int key_rec_cmp(void *key_info, uchar *a, uchar *b); bool init_errmessage(void); #endif /* MYSQL_SERVER */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 6c8fa7e7ac3..ce3532b3a9f 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -693,7 +693,7 @@ struct st_VioSSLFd *ssl_acceptor_fd; pthread_handler_t signal_hand(void *arg); static void mysql_init_variables(void); static void get_options(int *argc,char **argv); -static my_bool get_one_option(int, const struct my_option *, char *); +extern "C" my_bool mysqld_get_one_option(int, const struct my_option *, char *); static void set_server_version(void); static int init_thread_environment(); static char *get_relative_path(const char *path); @@ -1141,6 +1141,7 @@ void unireg_end(void) extern "C" void unireg_abort(int exit_code) { DBUG_ENTER("unireg_abort"); + if (exit_code) sql_print_error("Aborting\n"); else if (opt_help) @@ -2353,7 +2354,7 @@ static void init_signals(void) sigaddset(&set,SIGTSTP); #endif if (thd_lib_detected != THD_LIB_LT) - sigaddset(&set,THR_SERVER_ALARM); + sigaddset(&set,THR_SERVER_ALARM); if (test_flags & TEST_SIGINT) { // May be SIGINT @@ -2549,7 +2550,9 @@ static void check_data_home(const char *path) /* ARGSUSED */ -static int my_message_sql(uint error, const char *str, myf MyFlags) +extern "C" int my_message_sql(uint error, const char *str, myf MyFlags); + +int my_message_sql(uint error, const char *str, myf MyFlags) { THD *thd; DBUG_ENTER("my_message_sql"); @@ -2612,13 +2615,16 @@ static int my_message_sql(uint error, const char *str, myf MyFlags) #ifndef EMBEDDED_LIBRARY -static void *my_str_malloc_mysqld(size_t size) +extern "C" void *my_str_malloc_mysqld(size_t size); +extern "C" void my_str_free_mysqld(void *ptr); + +void *my_str_malloc_mysqld(size_t size) { return my_malloc(size, MYF(MY_FAE)); } -static void my_str_free_mysqld(void *ptr) +void my_str_free_mysqld(void *ptr) { my_free((uchar*)ptr, MYF(MY_FAE)); } @@ -3418,7 +3424,7 @@ server."); my_getopt_skip_unknown= 0; if ((ho_error= handle_options(&defaults_argc, &tmp_argv, no_opts, - get_one_option))) + mysqld_get_one_option))) unireg_abort(ho_error); if (defaults_argc) @@ -3902,7 +3908,6 @@ we force server id to 2, but this MySQL server will not act as a slave."); */ if (init_slave() && !active_mi) { - end_thr_alarm(1); // Don't allow alarms unireg_abort(1); } @@ -3910,16 +3915,12 @@ we force server id to 2, but this MySQL server will not act as a slave."); { select_thread_in_use= 0; // Allow 'kill' to work bootstrap(stdin); - end_thr_alarm(1); // Don't allow alarms unireg_abort(bootstrap_error ? 1 : 0); } if (opt_init_file) { if (read_init_file(opt_init_file)) - { - end_thr_alarm(1); // Don't allow alarms unireg_abort(1); - } } execute_ddl_log_recovery(); @@ -6879,6 +6880,7 @@ SHOW_VAR status_vars[]= { {"Open_streams", (char*) &my_stream_opened, SHOW_LONG_NOFLUSH}, {"Open_table_definitions", (char*) &show_table_definitions, SHOW_FUNC}, {"Open_tables", (char*) &show_open_tables, SHOW_FUNC}, + {"Opened_files", (char*) &my_file_total_opened, SHOW_LONG_NOFLUSH}, {"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS}, {"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC}, #ifdef HAVE_QUERY_CACHE @@ -7246,9 +7248,10 @@ static void mysql_init_variables(void) } -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) +my_bool +mysqld_get_one_option(int optid, + const struct my_option *opt __attribute__((unused)), + char *argument) { switch(optid) { case '#': @@ -7744,7 +7747,10 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), /* Handle arguments for multiple key caches */ -static uchar* * +extern "C" uchar **mysql_getopt_value(const char *keyname, uint key_length, + const struct my_option *option); + +uchar* * mysql_getopt_value(const char *keyname, uint key_length, const struct my_option *option) { @@ -7773,11 +7779,19 @@ mysql_getopt_value(const char *keyname, uint key_length, } -static void option_error_reporter(enum loglevel level, const char *format, ...) +extern "C" void option_error_reporter(enum loglevel level, const char *format, ...); + +void option_error_reporter(enum loglevel level, const char *format, ...) { va_list args; va_start(args, format); - vprint_msg_to_log(level, format, args); + + /* Don't print warnings for --loose options during bootstrap */ + if (level == ERROR_LEVEL || !opt_bootstrap || + global_system_variables.log_warnings) + { + vprint_msg_to_log(level, format, args); + } va_end(args); } @@ -7795,7 +7809,7 @@ static void get_options(int *argc,char **argv) my_getopt_skip_unknown= TRUE; if ((ho_error= handle_options(argc, &argv, my_long_options, - get_one_option))) + mysqld_get_one_option))) exit(ho_error); (*argc)++; /* add back one for the progname handle_options removes */ /* no need to do this for argv as we are discarding it. */ @@ -8156,7 +8170,7 @@ void refresh_status(THD *thd) add_to_status(&global_status_var, &thd->status_var); /* Reset thread's status variables */ - bzero((char*) &thd->status_var, sizeof(thd->status_var)); + bzero((uchar*) &thd->status_var, sizeof(thd->status_var)); /* Reset some global variables */ reset_status_vars(); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 948b268d9a3..516da13d857 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -8375,7 +8375,8 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, { /* Read the next record in the same range with prefix after cur_prefix. */ DBUG_ASSERT(cur_prefix != 0); - result= file->index_read(record, cur_prefix, keypart_map, HA_READ_AFTER_KEY); + result= file->index_read_map(record, cur_prefix, keypart_map, + HA_READ_AFTER_KEY); if (result || (file->compare_key(file->end_range) <= 0)) DBUG_RETURN(result); } @@ -8446,9 +8447,10 @@ int QUICK_RANGE_SELECT_GEOM::get_next() } last_range= *(cur_range++); - result= file->index_read(record, last_range->min_key, - last_range->min_keypart_map, - (ha_rkey_function)(last_range->flag ^ GEOM_FLAG)); + result= file->index_read_map(record, last_range->min_key, + last_range->min_keypart_map, + (ha_rkey_function)(last_range->flag ^ + GEOM_FLAG)); if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE) DBUG_RETURN(result); last_range= 0; // Not found, to next range @@ -8579,18 +8581,19 @@ int QUICK_SELECT_DESC::get_next() if (last_range->flag & EQ_RANGE) { - result = file->index_read(record, last_range->max_key, - last_range->max_keypart_map, HA_READ_KEY_EXACT); + result = file->index_read_map(record, last_range->max_key, + last_range->max_keypart_map, + HA_READ_KEY_EXACT); } else { DBUG_ASSERT(last_range->flag & NEAR_MAX || range_reads_after_key(last_range)); - result=file->index_read(record, last_range->max_key, - last_range->max_keypart_map, - ((last_range->flag & NEAR_MAX) ? - HA_READ_BEFORE_KEY : - HA_READ_PREFIX_LAST_OR_PREV)); + result=file->index_read_map(record, last_range->max_key, + last_range->max_keypart_map, + ((last_range->flag & NEAR_MAX) ? + HA_READ_BEFORE_KEY : + HA_READ_PREFIX_LAST_OR_PREV)); } if (result) { @@ -10446,9 +10449,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next() first sub-group with the extended prefix. */ if (!have_min && !have_max && key_infix_len > 0) - result= file->index_read(record, group_prefix, - make_prev_keypart_map(real_key_parts), - HA_READ_KEY_EXACT); + result= file->index_read_map(record, group_prefix, + make_prev_keypart_map(real_key_parts), + HA_READ_KEY_EXACT); result= have_min ? min_res : have_max ? max_res : result; } while ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) && @@ -10510,9 +10513,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min() /* Apply the constant equality conditions to the non-group select fields */ if (key_infix_len > 0) { - if ((result= file->index_read(record, group_prefix, - make_prev_keypart_map(real_key_parts), - HA_READ_KEY_EXACT))) + if ((result= file->index_read_map(record, group_prefix, + make_prev_keypart_map(real_key_parts), + HA_READ_KEY_EXACT))) DBUG_RETURN(result); } @@ -10527,9 +10530,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min() { /* Find the first subsequent record without NULL in the MIN/MAX field. */ key_copy(tmp_record, record, index_info, 0); - result= file->index_read(record, tmp_record, - make_keypart_map(real_key_parts), - HA_READ_AFTER_KEY); + result= file->index_read_map(record, tmp_record, + make_keypart_map(real_key_parts), + HA_READ_AFTER_KEY); /* Check if the new record belongs to the current group by comparing its prefix with the group's prefix. If it is from the next group, then the @@ -10584,9 +10587,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max() if (min_max_ranges.elements > 0) result= next_max_in_range(); else - result= file->index_read(record, group_prefix, - make_prev_keypart_map(real_key_parts), - HA_READ_PREFIX_LAST); + result= file->index_read_map(record, group_prefix, + make_prev_keypart_map(real_key_parts), + HA_READ_PREFIX_LAST); DBUG_RETURN(result); } @@ -10637,9 +10640,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() else { /* Load the first key in this group into record. */ - result= file->index_read(record, group_prefix, - make_prev_keypart_map(group_key_parts), - HA_READ_AFTER_KEY); + result= file->index_read_map(record, group_prefix, + make_prev_keypart_map(group_key_parts), + HA_READ_AFTER_KEY); if (result) DBUG_RETURN(result); } @@ -10716,7 +10719,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT; } - result= file->index_read(record, group_prefix, keypart_map, find_flag); + result= file->index_read_map(record, group_prefix, keypart_map, find_flag); if (result) { if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) && @@ -10849,7 +10852,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV; } - result= file->index_read(record, group_prefix, keypart_map, find_flag); + result= file->index_read_map(record, group_prefix, keypart_map, find_flag); if (result) { diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 8851401d21f..6836c53db4e 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -270,9 +270,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) Closed interval: Either The MIN argument is non-nullable, or we have a >= predicate for the MIN argument. */ - error= table->file->index_read(table->record[0], ref.key_buff, - make_prev_keypart_map(ref.key_parts), - HA_READ_KEY_OR_NEXT); + error= table->file->index_read_map(table->record[0], + ref.key_buff, + make_prev_keypart_map(ref.key_parts), + HA_READ_KEY_OR_NEXT); else { /* @@ -281,9 +282,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) 2) there is a > predicate on it, nullability is irrelevant. We need to scan the next bigger record first. */ - error= table->file->index_read(table->record[0], ref.key_buff, - make_prev_keypart_map(ref.key_parts), - HA_READ_AFTER_KEY); + error= table->file->index_read_map(table->record[0], + ref.key_buff, + make_prev_keypart_map(ref.key_parts), + HA_READ_AFTER_KEY); /* If the found record is outside the group formed by the search prefix, or there is no such record at all, check if all @@ -305,9 +307,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) key_cmp_if_same(table, ref.key_buff, ref.key, prefix_len))) { DBUG_ASSERT(item_field->field->real_maybe_null()); - error= table->file->index_read(table->record[0], ref.key_buff, - make_prev_keypart_map(ref.key_parts), - HA_READ_KEY_EXACT); + error= table->file->index_read_map(table->record[0], + ref.key_buff, + make_prev_keypart_map(ref.key_parts), + HA_READ_KEY_EXACT); } } } @@ -394,11 +397,11 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) if (!ref.key_length) error= table->file->index_last(table->record[0]); else - error= table->file->index_read(table->record[0], key_buff, - make_prev_keypart_map(ref.key_parts), - range_fl & NEAR_MAX ? - HA_READ_BEFORE_KEY : - HA_READ_PREFIX_LAST_OR_PREV); + error= table->file->index_read_map(table->record[0], key_buff, + make_prev_keypart_map(ref.key_parts), + range_fl & NEAR_MAX ? + HA_READ_BEFORE_KEY : + HA_READ_PREFIX_LAST_OR_PREV); if (!error && reckey_in_range(1, &ref, item_field->field, conds, range_fl, prefix_len)) error= HA_ERR_KEY_NOT_FOUND; diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 308b96d84f8..ab887d5dda0 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -91,10 +91,10 @@ partition_info *partition_info::get_clone() #define MAX_PART_NAME_SIZE 8 char *partition_info::create_default_partition_names(uint part_no, - uint no_parts, + uint no_parts_arg, uint start_no) { - char *ptr= (char*) sql_calloc(no_parts*MAX_PART_NAME_SIZE); + char *ptr= (char*) sql_calloc(no_parts_arg*MAX_PART_NAME_SIZE); char *move_ptr= ptr; uint i= 0; DBUG_ENTER("create_default_partition_names"); @@ -105,11 +105,11 @@ char *partition_info::create_default_partition_names(uint part_no, { my_sprintf(move_ptr, (move_ptr,"p%u", (start_no + i))); move_ptr+=MAX_PART_NAME_SIZE; - } while (++i < no_parts); + } while (++i < no_parts_arg); } else { - mem_alloc_error(no_parts*MAX_PART_NAME_SIZE); + mem_alloc_error(no_parts_arg*MAX_PART_NAME_SIZE); } DBUG_RETURN(ptr); } diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc index bb5db0106eb..fb609e12dcb 100644 --- a/sql/rpl_filter.cc +++ b/sql/rpl_filter.cc @@ -362,9 +362,11 @@ Rpl_filter::add_ignore_db(const char* table_spec) ignore_db.push_back(db); } +extern "C" uchar *get_table_key(const uchar *, size_t *, my_bool); +extern "C" void free_table_ent(void* a); -static uchar* get_table_key(const uchar* a, size_t *len, - my_bool __attribute__((unused))) +uchar *get_table_key(const uchar* a, size_t *len, + my_bool __attribute__((unused))) { TABLE_RULE_ENT *e= (TABLE_RULE_ENT *) a; @@ -373,7 +375,7 @@ static uchar* get_table_key(const uchar* a, size_t *len, } -static void free_table_ent(void* a) +void free_table_ent(void* a) { TABLE_RULE_ENT *e= (TABLE_RULE_ENT *) a; diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc index 8b8eb625bc2..65c8e106112 100644 --- a/sql/rpl_record.cc +++ b/sql/rpl_record.cc @@ -157,9 +157,8 @@ pack_row(TABLE *table, MY_BITMAP const* cols, the various member functions of Field and subclasses expect to write. - The row is assumed to only consist of the fields for which the - bitset represented by @c arr and @c bits; the other parts of the - record are left alone. + The row is assumed to only consist of the fields for which the corresponding + bit in bitset @c cols is set; the other parts of the record are left alone. At most @c colcnt columns are read: if the table is larger than that, the remaining fields are not filled in. @@ -169,15 +168,12 @@ pack_row(TABLE *table, MY_BITMAP const* cols, @param colcnt Number of columns to read from record @param row_data Packed row data - @param cols Pointer to columns data to fill in + @param cols Pointer to bitset describing columns to fill in @param row_end Pointer to variable that will hold the value of the one-after-end position for the row @param master_reclength Pointer to variable that will be set to the length of the record on the master side - @param rw_set Pointer to bitmap that holds either the read_set or the - write_set of the table - @retval 0 No error @@ -191,8 +187,7 @@ int unpack_row(Relay_log_info const *rli, TABLE *table, uint const colcnt, uchar const *const row_data, MY_BITMAP const *cols, - uchar const **const row_end, ulong *const master_reclength, - MY_BITMAP* const rw_set, Log_event_type const event_type) + uchar const **const row_end, ulong *const master_reclength) { DBUG_ENTER("unpack_row"); DBUG_ASSERT(row_data); @@ -202,10 +197,6 @@ unpack_row(Relay_log_info const *rli, uchar const *null_ptr= row_data; uchar const *pack_ptr= row_data + master_null_byte_count; - bitmap_clear_all(rw_set); - - empty_record(table); - Field **const begin_ptr = table->field; Field **field_ptr; Field **const end_ptr= begin_ptr + colcnt; @@ -265,7 +256,6 @@ unpack_row(Relay_log_info const *rli, #endif } - bitmap_set_bit(rw_set, f->field_index); null_mask <<= 1; } i++; @@ -307,30 +297,58 @@ unpack_row(Relay_log_info const *rli, else *master_reclength = table->s->reclength; } + + DBUG_RETURN(error); +} - /* - Set properties for remaining columns, if there are any. We let the - corresponding bit in the write_set be set, to write the value if - it was not there already. We iterate over all remaining columns, - even if there were an error, to get as many error messages as - possible. We are still able to return a pointer to the next row, - so redo that. - - This generation of error messages is only relevant when inserting - new rows. - */ - for ( ; *field_ptr ; ++field_ptr) +/** + Fills @c table->record[0] with default values. + + First @c empty_record() is called and then, additionally, fields are + initialized explicitly with a call to @c set_default(). + + For optimization reasons, the explicit initialization can be skipped for + first @c skip fields. This is useful if later we are going to fill these + fields from other source (e.g. from a Rows replication event). + + If @c check is true, fields are explicitly initialized only if they have + default value or can be NULL. Otherwise error is reported. + + @param log Used to report errors. + @param table Table whose record[0] buffer is prepared. + @param skip Number of columns for which default value initialization + should be skipped. + @param check Indicates if errors should be checked when setting default + values. + + @returns 0 on success. + */ +int prepare_record(const Slave_reporting_capability *const log, + TABLE *const table, + const uint skip, const bool check) +{ + DBUG_ENTER("prepare_record"); + + int error= 0; + empty_record(table); + + if (skip >= table->s->fields) // nothing to do + DBUG_RETURN(0); + + /* Explicit initialization of fields */ + + for (Field **field_ptr= table->field+skip ; *field_ptr ; ++field_ptr) { uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG; Field *const f= *field_ptr; - if (event_type == WRITE_ROWS_EVENT && - ((*field_ptr)->flags & mask) == mask) + if (check && ((f->flags & mask) == mask)) { - rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, + DBUG_ASSERT(log); + log->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, "Field `%s` of table `%s`.`%s` " "has no default value and cannot be NULL", - (*field_ptr)->field_name, table->s->db.str, + f->field_name, table->s->db.str, table->s->table_name.str); error = ER_NO_DEFAULT_FOR_FIELD; } diff --git a/sql/rpl_record.h b/sql/rpl_record.h index b817dba0292..0d6ceda7433 100644 --- a/sql/rpl_record.h +++ b/sql/rpl_record.h @@ -16,6 +16,8 @@ #ifndef RPL_RECORD_H #define RPL_RECORD_H +#include <rpl_reporting.h> + #if !defined(MYSQL_CLIENT) size_t pack_row(TABLE* table, MY_BITMAP const* cols, uchar *row_data, const uchar *data); @@ -25,9 +27,11 @@ size_t pack_row(TABLE* table, MY_BITMAP const* cols, int unpack_row(Relay_log_info const *rli, TABLE *table, uint const colcnt, uchar const *const row_data, MY_BITMAP const *cols, - uchar const **const row_end, ulong *const master_reclength, - MY_BITMAP* const rw_set, - Log_event_type const event_type); + uchar const **const row_end, ulong *const master_reclength); + +// Fill table's record[0] with default values. +int prepare_record(const Slave_reporting_capability *const, TABLE *const, + const uint =0, const bool =FALSE); #endif #endif diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index bc552650ac5..d1ce5bf3b7b 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -24,7 +24,7 @@ This function returns the field size in raw bytes based on the type and the encoded field data from the master's raw data. */ -uint32 table_def::calc_field_size(uint col, uchar *master_data) +uint32 table_def::calc_field_size(uint col, uchar *master_data) const { uint32 length; diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h index eac3d14dfc6..4fd38022da0 100644 --- a/sql/rpl_utility.h +++ b/sql/rpl_utility.h @@ -217,7 +217,7 @@ public: WL#3915) or needs to advance the pointer for the fields in the raw data from the master to a specific column. */ - uint32 calc_field_size(uint col, uchar *master_data); + uint32 calc_field_size(uint col, uchar *master_data) const; /** Decide if the table definition is compatible with a table. @@ -258,4 +258,44 @@ struct RPL_TABLE_LIST table_def m_tabledef; }; + +/* Anonymous namespace for template functions/classes */ +namespace { + + /* + Smart pointer that will automatically call my_afree (a macro) when + the pointer goes out of scope. This is used so that I do not have + to remember to call my_afree() before each return. There is no + overhead associated with this, since all functions are inline. + + I (Matz) would prefer to use the free function as a template + parameter, but that is not possible when the "function" is a + macro. + */ + template <class Obj> + class auto_afree_ptr + { + Obj* m_ptr; + public: + auto_afree_ptr(Obj* ptr) : m_ptr(ptr) { } + ~auto_afree_ptr() { if (m_ptr) my_afree(m_ptr); } + void assign(Obj* ptr) { + /* Only to be called if it hasn't been given a value before. */ + DBUG_ASSERT(m_ptr == NULL); + m_ptr= ptr; + } + Obj* get() { return m_ptr; } + }; + +} + +#define DBUG_PRINT_BITSET(N,FRM,BS) \ + do { \ + char buf[256]; \ + for (uint i = 0 ; i < (BS)->n_bits ; ++i) \ + buf[i] = bitmap_is_set((BS), i) ? '1' : '0'; \ + buf[(BS)->n_bits] = '\0'; \ + DBUG_PRINT((N), ((FRM), buf)); \ + } while (0) + #endif /* RPL_UTILITY_H */ diff --git a/sql/set_var.cc b/sql/set_var.cc index ace3bee8342..697de9cda97 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -3266,11 +3266,12 @@ bool sys_var_thd_storage_engine::check(THD *thd, set_var *var) var->save_result.plugin= NULL; if (var->value->result_type() == STRING_RESULT) { - LEX_STRING name; + LEX_STRING engine_name; handlerton *hton; if (!(res=var->value->val_str(&str)) || - !(name.str= (char *)res->ptr()) || !(name.length= res->length()) || - !(var->save_result.plugin= ha_resolve_by_name(thd, &name)) || + !(engine_name.str= (char *)res->ptr()) || + !(engine_name.length= res->length()) || + !(var->save_result.plugin= ha_resolve_by_name(thd, &engine_name)) || !(hton= plugin_data(var->save_result.plugin, handlerton *)) || ha_checktype(thd, ha_legacy_type(hton), 1, 0) != hton) { @@ -3292,13 +3293,13 @@ uchar *sys_var_thd_storage_engine::value_ptr(THD *thd, enum_var_type type, { uchar* result; handlerton *hton; - LEX_STRING *name; + LEX_STRING *engine_name; plugin_ref plugin= thd->variables.*offset; if (type == OPT_GLOBAL) plugin= my_plugin_lock(thd, &(global_system_variables.*offset)); hton= plugin_data(plugin, handlerton*); - name= &hton2plugin[hton->slot]->name; - result= (uchar *) thd->strmake(name->str, name->length); + engine_name= &hton2plugin[hton->slot]->name; + result= (uchar *) thd->strmake(engine_name->str, engine_name->length); if (type == OPT_GLOBAL) plugin_unlock(thd, plugin); return result; @@ -3585,7 +3586,7 @@ void free_key_cache(const char *name, KEY_CACHE *key_cache) } -bool process_key_caches(int (* func) (const char *name, KEY_CACHE *)) +bool process_key_caches(process_key_cache_t func) { I_List_iterator<NAMED_LIST> it(key_caches); NAMED_LIST *element; diff --git a/sql/set_var.h b/sql/set_var.h index 67ec449a02f..eb2c893c89e 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -1128,6 +1128,11 @@ public: }; +extern "C" +{ + typedef int (*process_key_cache_t) (const char *, KEY_CACHE *); +} + /* Named lists (used for keycaches) */ class NAMED_LIST :public ilink @@ -1152,8 +1157,7 @@ public: { my_free((uchar*) name, MYF(0)); } - friend bool process_key_caches(int (* func) (const char *name, - KEY_CACHE *)); + friend bool process_key_caches(process_key_cache_t func); friend void delete_elements(I_List<NAMED_LIST> *list, void (*free_element)(const char*, uchar*)); }; @@ -1201,6 +1205,6 @@ extern sys_var_str sys_var_general_log_path, sys_var_slow_log_path; KEY_CACHE *get_key_cache(LEX_STRING *cache_name); KEY_CACHE *get_or_create_key_cache(const char *name, uint length); void free_key_cache(const char *name, KEY_CACHE *key_cache); -bool process_key_caches(int (* func) (const char *name, KEY_CACHE *)); +bool process_key_caches(process_key_cache_t func); void delete_elements(I_List<NAMED_LIST> *list, void (*free_element)(const char*, uchar*)); diff --git a/sql/sp.cc b/sql/sp.cc index 372aa9c6780..0956561719b 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -97,7 +97,8 @@ public: protected: virtual Object_creation_ctx *create_backup_ctx(THD *thd) const { - return new Stored_routine_creation_ctx(thd); + DBUG_ENTER("Stored_routine_creation_ctx::create_backup_ctx"); + DBUG_RETURN(new Stored_routine_creation_ctx(thd)); } private: @@ -348,8 +349,8 @@ db_find_routine_aux(THD *thd, int type, sp_name *name, TABLE *table) key_copy(key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, key, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (table->file->index_read_idx_map(table->record[0], 0, key, HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) DBUG_RETURN(SP_KEY_NOT_FOUND); DBUG_RETURN(SP_OK); @@ -1182,9 +1183,9 @@ sp_drop_db_routines(THD *thd, char *db) ret= SP_OK; table->file->ha_index_init(0, 1); - if (! table->file->index_read(table->record[0], - (uchar *)table->field[MYSQL_PROC_FIELD_DB]->ptr, - (key_part_map)1, HA_READ_KEY_EXACT)) + if (! table->file->index_read_map(table->record[0], + (uchar *)table->field[MYSQL_PROC_FIELD_DB]->ptr, + (key_part_map)1, HA_READ_KEY_EXACT)) { int nxtres; bool deleted= FALSE; diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc index 84b15ee15c4..cc6ba9ef1d0 100644 --- a/sql/sp_cache.cc +++ b/sql/sp_cache.cc @@ -214,8 +214,12 @@ void sp_cache_flush_obsolete(sp_cache **cp) Internal functions *************************************************************************/ -static uchar *hash_get_key_for_sp_head(const uchar *ptr, size_t *plen, - my_bool first) +extern "C" uchar *hash_get_key_for_sp_head(const uchar *ptr, size_t *plen, + my_bool first); +extern "C" void hash_free_sp_head(void *p); + +uchar *hash_get_key_for_sp_head(const uchar *ptr, size_t *plen, + my_bool first) { sp_head *sp= (sp_head *)ptr; *plen= sp->m_qname.length; @@ -223,8 +227,7 @@ static uchar *hash_get_key_for_sp_head(const uchar *ptr, size_t *plen, } -static void -hash_free_sp_head(void *p) +void hash_free_sp_head(void *p) { sp_head *sp= (sp_head *)p; delete sp; diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 7b2073b8de3..b8535ee9958 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -32,6 +32,8 @@ #include <my_user.h> +extern "C" uchar *sp_table_key(const uchar *ptr, size_t *plen, my_bool first); + Item_result sp_map_result_type(enum enum_field_types type) { @@ -483,7 +485,6 @@ sp_head::sp_head() */ m_db= m_name= m_qname= str_reset; - extern uchar *sp_table_key(const uchar *ptr, size_t *plen, my_bool first); DBUG_ENTER("sp_head::sp_head"); m_backpatch.empty(); @@ -1078,6 +1079,12 @@ sp_head::execute(THD *thd) old_arena= thd->stmt_arena; /* + Switch query context. This has to be done early as this is sometimes + allocated trough sql_alloc + */ + saved_creation_ctx= m_creation_ctx->set_n_backup(thd); + + /* We have to save/restore this info when we are changing call level to be able properly do close_thread_tables() in instructions. */ @@ -1122,10 +1129,6 @@ sp_head::execute(THD *thd) */ thd->spcont->callers_arena= &backup_arena; - /* Switch query context. */ - - saved_creation_ctx= m_creation_ctx->set_n_backup(thd); - do { sp_instr *i; @@ -3602,8 +3605,8 @@ typedef struct st_sp_table uint8 trg_event_map; } SP_TABLE; -uchar * -sp_table_key(const uchar *ptr, size_t *plen, my_bool first) + +uchar *sp_table_key(const uchar *ptr, size_t *plen, my_bool first) { SP_TABLE *tab= (SP_TABLE *)ptr; *plen= tab->qname.length; diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 91f1570f653..7e017d7d028 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1815,9 +1815,9 @@ static bool update_user_table(THD *thd, TABLE *table, key_copy((uchar *) user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, - (uchar *) user_key, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (table->file->index_read_idx_map(table->record[0], 0, + (uchar *) user_key, HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), MYF(0)); /* purecov: deadcode */ @@ -1908,8 +1908,9 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, user_key, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (table->file->index_read_idx_map(table->record[0], 0, user_key, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { /* what == 'N' means revoke */ if (what == 'N') @@ -2131,8 +2132,9 @@ static int replace_db_table(TABLE *table, const char *db, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0],0, user_key, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (table->file->index_read_idx_map(table->record[0],0, user_key, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { if (what == 'N') { // no row, no revoke @@ -2348,8 +2350,8 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) col_privs->field[4]->store("",0, &my_charset_latin1); col_privs->file->ha_index_init(0, 1); - if (col_privs->file->index_read(col_privs->record[0], (uchar*) key, - (key_part_map)15, HA_READ_KEY_EXACT)) + if (col_privs->file->index_read_map(col_privs->record[0], (uchar*) key, + (key_part_map)15, HA_READ_KEY_EXACT)) { cols = 0; /* purecov: deadcode */ col_privs->file->ha_index_end(); @@ -2511,8 +2513,8 @@ static int replace_column_table(GRANT_TABLE *g_t, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read(table->record[0], user_key, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (table->file->index_read_map(table->record[0], user_key, HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { if (revoke_grant) { @@ -2589,8 +2591,9 @@ static int replace_column_table(GRANT_TABLE *g_t, key_copy(user_key, table->record[0], table->key_info, key_prefix_length); - if (table->file->index_read(table->record[0], user_key, (key_part_map)15, - HA_READ_KEY_EXACT)) + if (table->file->index_read_map(table->record[0], user_key, + (key_part_map)15, + HA_READ_KEY_EXACT)) goto end; /* Scan through all rows with the same host,db,user and table */ @@ -2691,8 +2694,9 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, key_copy(user_key, table->record[0], table->key_info, table->key_info->key_length); - if (table->file->index_read_idx(table->record[0], 0, user_key, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (table->file->index_read_idx_map(table->record[0], 0, user_key, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { /* The following should never happen as we first check the in memory @@ -2816,9 +2820,10 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name, TRUE); store_record(table,record[1]); // store at pos 1 - if (table->file->index_read_idx(table->record[0], 0, - (uchar*) table->field[0]->ptr, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (table->file->index_read_idx_map(table->record[0], 0, + (uchar*) table->field[0]->ptr, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { /* The following should never happen as we first check the in memory @@ -5015,9 +5020,9 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, table->key_info->key_part[1].store_length); key_copy(user_key, table->record[0], table->key_info, key_prefix_length); - if ((error= table->file->index_read_idx(table->record[0], 0, - user_key, (key_part_map)3, - HA_READ_KEY_EXACT))) + if ((error= table->file->index_read_idx_map(table->record[0], 0, + user_key, (key_part_map)3, + HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index a08d1abe438..59d0d245597 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2730,14 +2730,17 @@ void mark_transaction_to_rollback(THD *thd, bool all) pthread_mutex_t LOCK_xid_cache; HASH xid_cache; -static uchar *xid_get_hash_key(const uchar *ptr, size_t *length, +extern "C" uchar *xid_get_hash_key(const uchar *, size_t *, my_bool); +extern "C" void xid_free_hash(void *); + +uchar *xid_get_hash_key(const uchar *ptr, size_t *length, my_bool not_used __attribute__((unused))) { *length=((XID_STATE*)ptr)->xid.key_length(); return ((XID_STATE*)ptr)->xid.key(); } -static void xid_free_hash (void *ptr) +void xid_free_hash(void *ptr) { if (!((XID_STATE*)ptr)->in_thd) my_free((uchar*)ptr, MYF(0)); @@ -3271,13 +3274,13 @@ int THD::binlog_flush_pending_rows_event(bool stmt_end) RETURN VALUE Error code, or 0 if no error. */ -int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query, +int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg, ulong query_len, bool is_trans, bool suppress_use, THD::killed_state killed_status_arg) { DBUG_ENTER("THD::binlog_query"); - DBUG_PRINT("enter", ("qtype=%d, query='%s'", qtype, query)); - DBUG_ASSERT(query && mysql_bin_log.is_open()); + DBUG_PRINT("enter", ("qtype: %d query: '%s'", qtype, query_arg)); + DBUG_ASSERT(query_arg && mysql_bin_log.is_open()); /* If we are not in prelocked mode, mysql_unlock_tables() will be @@ -3333,7 +3336,7 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query, flush the pending rows event if necessary. */ { - Query_log_event qinfo(this, query, query_len, is_trans, suppress_use, + Query_log_event qinfo(this, query_arg, query_len, is_trans, suppress_use, killed_status_arg); qinfo.flags|= LOG_EVENT_UPDATE_TABLE_MAP_VERSION_F; /* diff --git a/sql/sql_class.h b/sql/sql_class.h index 55987ae8ad9..7117c08a7e1 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1317,10 +1317,10 @@ public: mode, row-based binlogging is used for such cases where two auto_increment columns are inserted. */ - inline void record_first_successful_insert_id_in_cur_stmt(ulonglong id) + inline void record_first_successful_insert_id_in_cur_stmt(ulonglong id_arg) { if (first_successful_insert_id_in_cur_stmt == 0) - first_successful_insert_id_in_cur_stmt= id; + first_successful_insert_id_in_cur_stmt= id_arg; } inline ulonglong read_first_successful_insert_id_in_prev_stmt(void) { diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 575db5b80f7..910f4216452 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -59,8 +59,10 @@ typedef struct my_dblock_st lock_db key. */ -static uchar* lock_db_get_key(my_dblock_t *ptr, size_t *length, - my_bool not_used __attribute__((unused))) +extern "C" uchar* lock_db_get_key(my_dblock_t *, size_t *, my_bool not_used); + +uchar* lock_db_get_key(my_dblock_t *ptr, size_t *length, + my_bool not_used __attribute__((unused))) { *length= ptr->name_length; return (uchar*) ptr->name; @@ -71,7 +73,9 @@ static uchar* lock_db_get_key(my_dblock_t *ptr, size_t *length, Free lock_db hash element. */ -static void lock_db_free_element(void *ptr) +extern "C" void lock_db_free_element(void *ptr); + +void lock_db_free_element(void *ptr) { my_free(ptr, MYF(0)); } @@ -155,8 +159,11 @@ typedef struct my_dbopt_st Function we use in the creation of our hash to get key. */ -static uchar* dboptions_get_key(my_dbopt_t *opt, size_t *length, - my_bool not_used __attribute__((unused))) +extern "C" uchar* dboptions_get_key(my_dbopt_t *opt, size_t *length, + my_bool not_used); + +uchar* dboptions_get_key(my_dbopt_t *opt, size_t *length, + my_bool not_used __attribute__((unused))) { *length= opt->name_length; return (uchar*) opt->name; @@ -182,7 +189,9 @@ static inline void write_to_binlog(THD *thd, char *query, uint q_len, Function to free dboptions hash element */ -static void free_dbopt(void *dbopt) +extern "C" void free_dbopt(void *dbopt); + +void free_dbopt(void *dbopt) { my_free((uchar*) dbopt, MYF(0)); } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 37f1325228d..303918f42a2 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -760,7 +760,7 @@ void multi_delete::send_error(uint errcode,const char *err) int multi_delete::do_deletes() { - int local_error= 0, counter= 0, error; + int local_error= 0, counter= 0, tmp_error; bool will_batch; DBUG_ENTER("do_deletes"); DBUG_ASSERT(do_delete); @@ -814,11 +814,11 @@ int multi_delete::do_deletes() break; } } - if (will_batch && (error= table->file->end_bulk_delete())) + if (will_batch && (tmp_error= table->file->end_bulk_delete())) { if (!local_error) { - local_error= error; + local_error= tmp_error; table->file->print_error(local_error,MYF(0)); } } diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 300ec7f3c62..0e556f4c551 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -541,8 +541,8 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, table->file->ha_index_or_rnd_end(); table->file->ha_index_init(keyno, 1); key_copy(key, table->record[0], table->key_info + keyno, key_len); - error= table->file->index_read(table->record[0], - key, keypart_map, ha_rkey_mode); + error= table->file->index_read_map(table->record[0], + key, keypart_map, ha_rkey_mode); mode=rkey_to_rnext[(int)ha_rkey_mode]; break; } diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 023bd1fec94..c1962c8c650 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -294,9 +294,9 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, rkey_id->store((longlong) key_id, TRUE); rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW); - int key_res= relations->file->index_read(relations->record[0], - buff, (key_part_map) 1, - HA_READ_KEY_EXACT); + int key_res= relations->file->index_read_map(relations->record[0], + buff, (key_part_map) 1, + HA_READ_KEY_EXACT); for ( ; !key_res && key_id == (int16) rkey_id->val_int() ; @@ -308,8 +308,8 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, field->store((longlong) topic_id, TRUE); field->get_key_image(topic_id_buff, field->pack_length(), Field::itRAW); - if (!topics->file->index_read(topics->record[0], topic_id_buff, - (key_part_map)1, HA_READ_KEY_EXACT)) + if (!topics->file->index_read_map(topics->record[0], topic_id_buff, + (key_part_map)1, HA_READ_KEY_EXACT)) { memorize_variant_topic(thd,topics,count,find_fields, names,name,description,example); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 36623c1a10f..157309a3366 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1398,9 +1398,9 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) } } key_copy((uchar*) key,table->record[0],table->key_info+key_nr,0); - if ((error=(table->file->index_read_idx(table->record[1],key_nr, - (uchar*) key, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)))) + if ((error=(table->file->index_read_idx_map(table->record[1],key_nr, + (uchar*) key, HA_WHOLE_KEY, + HA_READ_KEY_EXACT)))) goto err; } if (info->handle_duplicates == DUP_UPDATE) @@ -3556,13 +3556,13 @@ select_create::binlog_show_create_table(TABLE **tables, uint count) char buf[2048]; String query(buf, sizeof(buf), system_charset_info); int result; - TABLE_LIST table_list; + TABLE_LIST tmp_table_list; - memset(&table_list, 0, sizeof(table_list)); - table_list.table = *tables; + memset(&tmp_table_list, 0, sizeof(tmp_table_list)); + tmp_table_list.table = *tables; query.length(0); // Have to zero it since constructor doesn't - result= store_create_info(thd, &table_list, &query, create_info); + result= store_create_info(thd, &tmp_table_list, &query, create_info); DBUG_ASSERT(result == 0); /* store_create_info() always return 0 */ thd->binlog_query(THD::STMT_QUERY_TYPE, diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index c83c7a1c894..19264a17d2f 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -2826,8 +2826,8 @@ void st_select_lex::fix_prepare_information(THD *thd, Item **conds, SYNOPSIS set_index_hint_type() - type the kind of hints to be added from now on. - clause the clause to use for hints to be added from now on. + type_arg The kind of hints to be added from now on. + clause The clause to use for hints to be added from now on. DESCRIPTION Used in filling up the tagged hints list. @@ -2836,10 +2836,10 @@ void st_select_lex::fix_prepare_information(THD *thd, Item **conds, Then the context variable index_hint_type can be reset to the next hint type. */ -void st_select_lex::set_index_hint_type(enum index_hint_type type, +void st_select_lex::set_index_hint_type(enum index_hint_type type_arg, index_clause_map clause) { - current_index_hint_type= type; + current_index_hint_type= type_arg; current_index_hint_clause= clause; } diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 0d088063462..6da6517b0f5 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1058,8 +1058,12 @@ err: } -static uchar *get_hash_key(const uchar *buff, size_t *length, - my_bool not_used __attribute__((unused))) +extern "C" uchar *get_plugin_hash_key(const uchar *, size_t *, my_bool); +extern "C" uchar *get_bookmark_hash_key(const uchar *, size_t *, my_bool); + + +uchar *get_plugin_hash_key(const uchar *buff, size_t *length, + my_bool not_used __attribute__((unused))) { struct st_plugin_int *plugin= (st_plugin_int *)buff; *length= (uint)plugin->name.length; @@ -1067,8 +1071,8 @@ static uchar *get_hash_key(const uchar *buff, size_t *length, } -static uchar *get_bookmark_hash_key(const uchar *buff, size_t *length, - my_bool not_used __attribute__((unused))) +uchar *get_bookmark_hash_key(const uchar *buff, size_t *length, + my_bool not_used __attribute__((unused))) { struct st_bookmark *var= (st_bookmark *)buff; *length= var->name_len + 1; @@ -1115,7 +1119,7 @@ int plugin_init(int *argc, char **argv, int flags) for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++) { if (hash_init(&plugin_hash[i], system_charset_info, 16, 0, 0, - get_hash_key, NULL, HASH_UNIQUE)) + get_plugin_hash_key, NULL, HASH_UNIQUE)) goto err; } @@ -1702,9 +1706,10 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name) table->use_all_columns(); table->field[0]->store(name->str, name->length, system_charset_info); - if (! table->file->index_read_idx(table->record[0], 0, - (uchar *)table->field[0]->ptr, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (! table->file->index_read_idx_map(table->record[0], 0, + (uchar *)table->field[0]->ptr, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { int error; if ((error= table->file->ha_delete_row(table->record[0]))) @@ -2769,8 +2774,10 @@ static void plugin_opt_set_limits(struct my_option *options, options->arg_type= OPT_ARG; } +extern "C" my_bool get_one_plugin_option(int optid, const struct my_option *, + char *); -static my_bool get_one_option(int optid __attribute__((unused)), +my_bool get_one_plugin_option(int optid __attribute__((unused)), const struct my_option *opt, char *argument) { @@ -3078,7 +3085,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp, DBUG_RETURN(-1); } - error= handle_options(argc, &argv, opts, get_one_option); + error= handle_options(argc, &argv, opts, get_one_plugin_option); (*argc)++; /* add back one for the program name */ if (error) @@ -3140,7 +3147,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp, DBUG_RETURN(0); } - if (enabled_saved) + if (enabled_saved && global_system_variables.log_warnings) sql_print_information("Plugin '%s' disabled by command line option", tmp->name.str); DBUG_RETURN(1); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 6f3851b66a1..f7b9e720155 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -11152,10 +11152,10 @@ int safe_index_read(JOIN_TAB *tab) { int error; TABLE *table= tab->table; - if ((error=table->file->index_read(table->record[0], - tab->ref.key_buff, - make_prev_keypart_map(tab->ref.key_parts), - HA_READ_KEY_EXACT))) + if ((error=table->file->index_read_map(table->record[0], + tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT))) return report_error(table, error); return 0; } @@ -11291,10 +11291,10 @@ join_read_const(JOIN_TAB *tab) error=HA_ERR_KEY_NOT_FOUND; else { - error=table->file->index_read_idx(table->record[0],tab->ref.key, - (uchar*) tab->ref.key_buff, - make_prev_keypart_map(tab->ref.key_parts), - HA_READ_KEY_EXACT); + error=table->file->index_read_idx_map(table->record[0],tab->ref.key, + (uchar*) tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); } if (error) { @@ -11335,10 +11335,10 @@ join_read_key(JOIN_TAB *tab) table->status=STATUS_NOT_FOUND; return -1; } - error=table->file->index_read(table->record[0], - tab->ref.key_buff, - make_prev_keypart_map(tab->ref.key_parts), - HA_READ_KEY_EXACT); + error=table->file->index_read_map(table->record[0], + tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT); if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); } @@ -11364,10 +11364,10 @@ join_read_always_key(JOIN_TAB *tab) } if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref)) return -1; - if ((error=table->file->index_read(table->record[0], - tab->ref.key_buff, - make_prev_keypart_map(tab->ref.key_parts), - HA_READ_KEY_EXACT))) + if ((error=table->file->index_read_map(table->record[0], + tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts), + HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -11392,8 +11392,9 @@ join_read_last_key(JOIN_TAB *tab) table->file->ha_index_init(tab->ref.key, tab->sorted); if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref)) return -1; - if ((error=table->file->index_read_last(table->record[0], - tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts)))) + if ((error=table->file->index_read_last_map(table->record[0], + tab->ref.key_buff, + make_prev_keypart_map(tab->ref.key_parts)))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -11934,9 +11935,10 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (item->maybe_null) group->buff[-1]= (char) group->field->is_null(); } - if (!table->file->index_read(table->record[1], - join->tmp_table_param.group_buff, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (!table->file->index_read_map(table->record[1], + join->tmp_table_param.group_buff, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { /* Update old record */ restore_record(table,record[1]); update_tmptable_sum_func(join->sum_funcs,table); diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 911372d5f4e..a780c561ffe 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -516,9 +516,10 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) system_charset_info); /* read index until record is that specified in server_name */ - if ((error= table->file->index_read_idx(table->record[0], 0, - (uchar *)table->field[0]->ptr, HA_WHOLE_KEY, - HA_READ_KEY_EXACT))) + if ((error= table->file->index_read_idx_map(table->record[0], 0, + (uchar *)table->field[0]->ptr, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT))) { /* if not found, err */ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) @@ -858,9 +859,10 @@ update_server_record(TABLE *table, FOREIGN_SERVER *server) server->server_name_length, system_charset_info); - if ((error= table->file->index_read_idx(table->record[0], 0, - (uchar *)table->field[0]->ptr, ~(longlong)0, - HA_READ_KEY_EXACT))) + if ((error= table->file->index_read_idx_map(table->record[0], 0, + (uchar *)table->field[0]->ptr, + ~(longlong)0, + HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) table->file->print_error(error, MYF(0)); @@ -914,9 +916,10 @@ delete_server_record(TABLE *table, /* set the field that's the PK to the value we're looking for */ table->field[0]->store(server_name, server_name_length, system_charset_info); - if ((error= table->file->index_read_idx(table->record[0], 0, - (uchar *)table->field[0]->ptr, HA_WHOLE_KEY, - HA_READ_KEY_EXACT))) + if ((error= table->file->index_read_idx_map(table->record[0], 0, + (uchar *)table->field[0]->ptr, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT))) { if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) table->file->print_error(error, MYF(0)); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 5a1619aa5cf..bb4f5107df8 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1356,6 +1356,11 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(STRING_WITH_LEN(" ROW_FORMAT=")); packet->append(ha_row_type[(uint) share->row_type]); } + if (share->transactional != HA_CHOICE_UNDEF) + { + packet->append(STRING_WITH_LEN(" TRANSACTIONAL=")); + packet->append(share->transactional == HA_CHOICE_YES ? "1" : "0", 1); + } if (table->s->key_block_size) { char *end; @@ -2952,49 +2957,70 @@ static int fill_schema_table_from_frm(THD *thd,TABLE *table, LEX_STRING *table_name, enum enum_schema_tables schema_table_idx) { - TABLE_SHARE share; + TABLE_SHARE *share; TABLE tbl; TABLE_LIST table_list; - char path[FN_REFLEN]; - uint res; + uint res= 0; + int error; + char key[MAX_DBKEY_LENGTH]; + uint key_length; + bzero((char*) &table_list, sizeof(TABLE_LIST)); bzero((char*) &tbl, sizeof(TABLE)); - (void) build_table_filename(path, sizeof(path), db_name->str, - table_name->str, "", 0); - init_tmp_table_share(&share, "", 0, "", path); - if (!(res= open_table_def(thd, &share, OPEN_VIEW))) + + table_list.table_name= table_name->str; + table_list.db= db_name->str; + key_length= create_table_def_key(thd, key, &table_list, 0); + pthread_mutex_lock(&LOCK_open); + share= get_table_share(thd, &table_list, key, + key_length, OPEN_VIEW, &error); + if (!share) { - share.tmp_table= NO_TMP_TABLE; - tbl.s= &share; - table_list.table= &tbl; - if (schema_table->i_s_requested_object & OPEN_TABLE_FROM_SHARE) + res= 0; + goto err; + } + + if (share->is_view) + { + if (schema_table->i_s_requested_object & OPEN_TABLE_ONLY) { - if (share.is_view || - open_table_from_share(thd, &share, table_name->str, 0, - (READ_KEYINFO | COMPUTE_TYPES | - EXTRA_RECORD | OPEN_FRM_FILE_ONLY), - thd->open_options, &tbl, FALSE)) - { - share.tmp_table= INTERNAL_TMP_TABLE; - free_table_share(&share); - return (share.is_view && - !(schema_table->i_s_requested_object & - ~(OPEN_TABLE_FROM_SHARE|OPTIMIZE_I_S_TABLE))); - } + /* skip view processing */ + res= 0; + goto err1; } - table_list.view= (st_lex*) share.is_view; + else if (schema_table->i_s_requested_object & OPEN_VIEW_FULL) + { + /* + tell get_all_tables() to fall back to + open_normal_and_derived_tables() + */ + res= 1; + goto err1; + } + } + + if (share->is_view || + !open_table_from_share(thd, share, table_name->str, 0, + (READ_KEYINFO | COMPUTE_TYPES | + EXTRA_RECORD | OPEN_FRM_FILE_ONLY), + thd->open_options, &tbl, FALSE)) + { + tbl.s= share; + table_list.table= &tbl; + table_list.view= (st_lex*) share->is_view; res= schema_table->process_table(thd, &table_list, table, res, db_name, table_name); - share.tmp_table= INTERNAL_TMP_TABLE; - if (schema_table->i_s_requested_object & OPEN_TABLE_FROM_SHARE) - closefrm(&tbl, true); - else - free_table_share(&share); + closefrm(&tbl, true); + goto err; } - if (res) - thd->clear_error(); - return 0; +err1: + release_table_share(share, RELEASE_NORMAL); + +err: + pthread_mutex_unlock(&LOCK_open); + thd->clear_error(); + return res; } @@ -3033,7 +3059,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) enum enum_schema_tables schema_table_idx; List<LEX_STRING> db_names; List_iterator_fast<LEX_STRING> it(db_names); - COND *partial_cond; + COND *partial_cond= 0; uint derived_tables= lex->derived_tables; int error= 1; Open_tables_state open_tables_state_backup; @@ -3072,20 +3098,35 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) DBUG_PRINT("INDEX VALUES",("db_name='%s', table_name='%s'", lookup_field_vals.db_value.str, lookup_field_vals.table_value.str)); - if (lookup_field_vals.db_value.length && - !lookup_field_vals.wild_db_value && - lookup_field_vals.table_value.length && - !lookup_field_vals.wild_table_value) - partial_cond= 0; - else - partial_cond= make_cond_for_info_schema(cond, tables); - if (lookup_field_vals.db_value.length && !lookup_field_vals.wild_db_value) + if (!lookup_field_vals.wild_db_value && !lookup_field_vals.wild_table_value) + { + /* + if lookup value is empty string then + it's impossible table name or db name + */ + if (lookup_field_vals.db_value.str && + !lookup_field_vals.db_value.str[0] || + lookup_field_vals.table_value.str && + !lookup_field_vals.table_value.str[0]) + { + error= 0; + goto err; + } + } + + if (lookup_field_vals.db_value.length && + !lookup_field_vals.wild_db_value) tables->has_db_lookup_value= TRUE; if (lookup_field_vals.table_value.length && !lookup_field_vals.wild_table_value) tables->has_table_lookup_value= TRUE; + if (tables->has_db_lookup_value && tables->has_table_lookup_value) + partial_cond= 0; + else + partial_cond= make_cond_for_info_schema(cond, tables); + tables->table_open_method= table_open_method= get_table_open_method(tables, schema_table, schema_table_idx); @@ -3250,7 +3291,7 @@ bool store_schema_shemata(THD* thd, TABLE *table, LEX_STRING *db_name, } -int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond) +int fill_schema_schemata(THD *thd, TABLE_LIST *tables, COND *cond) { /* TODO: fill_schema_shemata() is called when new client is connected. @@ -3276,6 +3317,23 @@ int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond) &with_i_schema)) DBUG_RETURN(1); + /* + If we have lookup db value we should check that the database exists + */ + if(lookup_field_vals.db_value.str && !lookup_field_vals.wild_db_value) + { + char path[FN_REFLEN+16]; + uint path_len; + MY_STAT stat_info; + if (!lookup_field_vals.db_value.str[0]) + DBUG_RETURN(0); + path_len= build_table_filename(path, sizeof(path), + lookup_field_vals.db_value.str, "", "", 0); + path[path_len-1]= 0; + if (!my_stat(path,&stat_info,MYF(0))) + DBUG_RETURN(0); + } + List_iterator_fast<LEX_STRING> it(db_names); while ((db_name=it++)) { @@ -3393,6 +3451,12 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, ptr=strxmov(ptr, " row_format=", ha_row_type[(uint) share->row_type], NullS); + if (share->transactional != HA_CHOICE_UNDEF) + { + ptr= strxmov(ptr, " TRANSACTIONAL=", + (share->transactional == HA_CHOICE_YES ? "1" : "0"), + NullS); + } #ifdef WITH_PARTITION_STORAGE_ENGINE if (show_table->s->db_type() == partition_hton && show_table->part_info != NULL && @@ -3434,7 +3498,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, case ROW_TYPE_COMPACT: tmp_buff= "Compact"; break; - case ROW_TYPE_PAGES: + case ROW_TYPE_PAGE: tmp_buff= "Paged"; break; } @@ -6398,7 +6462,7 @@ ST_SCHEMA_TABLE schema_tables[]= create_schema_table, fill_schema_coll_charset_app, 0, 0, -1, -1, 0, 0}, {"COLUMNS", columns_fields_info, create_schema_table, get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2, 0, - OPEN_TABLE_FROM_SHARE|OPTIMIZE_I_S_TABLE}, + OPTIMIZE_I_S_TABLE|OPEN_VIEW_FULL}, {"COLUMN_PRIVILEGES", column_privileges_fields_info, create_schema_table, fill_schema_column_privileges, 0, 0, -1, -1, 0, 0}, {"ENGINES", engines_fields_info, create_schema_table, @@ -6428,7 +6492,7 @@ ST_SCHEMA_TABLE schema_tables[]= {"ROUTINES", proc_fields_info, create_schema_table, fill_schema_proc, make_proc_old_format, 0, -1, -1, 0, 0}, {"SCHEMATA", schema_fields_info, create_schema_table, - fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0, 0}, + fill_schema_schemata, make_schemata_old_format, 0, 1, -1, 0, 0}, {"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table, fill_schema_schema_privileges, 0, 0, -1, -1, 0, 0}, {"SESSION_STATUS", variables_fields_info, create_schema_table, @@ -6437,7 +6501,7 @@ ST_SCHEMA_TABLE schema_tables[]= fill_variables, make_old_format, 0, -1, -1, 0, 0}, {"STATISTICS", stat_fields_info, create_schema_table, get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0, - OPEN_TABLE_ONLY|OPEN_TABLE_FROM_SHARE|OPTIMIZE_I_S_TABLE}, + OPEN_TABLE_ONLY|OPTIMIZE_I_S_TABLE}, {"STATUS", variables_fields_info, create_schema_table, fill_status, make_old_format, 0, -1, -1, 1, 0}, {"TABLES", tables_fields_info, create_schema_table, diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 6c7dea6bf22..a8eb7360339 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -795,10 +795,8 @@ copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs, const uchar *from_end= (const uchar*) from+from_length; char *to_start= to; uchar *to_end= (uchar*) to+to_length; - int (*mb_wc)(struct charset_info_st *, my_wc_t *, const uchar *, - const uchar *) = from_cs->cset->mb_wc; - int (*wc_mb)(struct charset_info_st *, my_wc_t, uchar *s, uchar *e)= - to_cs->cset->wc_mb; + my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc; + my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb; uint error_count= 0; while (1) @@ -940,10 +938,8 @@ well_formed_copy_nchars(CHARSET_INFO *to_cs, { int cnvres; my_wc_t wc; - int (*mb_wc)(struct charset_info_st *, my_wc_t *, - const uchar *, const uchar *)= from_cs->cset->mb_wc; - int (*wc_mb)(struct charset_info_st *, my_wc_t, - uchar *s, uchar *e)= to_cs->cset->wc_mb; + my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc; + my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb; const uchar *from_end= (const uchar*) from + from_length; uchar *to_end= (uchar*) to + to_length; char *to_start= to; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index eb81e7647eb..b5dbc64e50b 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -5282,6 +5282,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, } if (!(used_fields & HA_CREATE_USED_KEY_BLOCK_SIZE)) create_info->key_block_size= table->s->key_block_size; + if (!(used_fields & HA_CREATE_USED_TRANSACTIONAL)) + create_info->transactional= table->s->transactional; if (!create_info->tablespace && create_info->storage_media != HA_SM_MEMORY) { diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 10bb7844d88..19582af38f4 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -551,9 +551,10 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) goto err; table->use_all_columns(); table->field[0]->store(exact_name_str, exact_name_len, &my_charset_bin); - if (!table->file->index_read_idx(table->record[0], 0, - (uchar*) table->field[0]->ptr, HA_WHOLE_KEY, - HA_READ_KEY_EXACT)) + if (!table->file->index_read_idx_map(table->record[0], 0, + (uchar*) table->field[0]->ptr, + HA_WHOLE_KEY, + HA_READ_KEY_EXACT)) { int error; if ((error = table->file->ha_delete_row(table->record[0]))) diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index ddd63da9adb..af6dd590679 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -857,6 +857,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token OUT_SYM /* SQL-2003-R */ %token OWNER_SYM %token PACK_KEYS_SYM +%token PAGE_SYM %token PARAM_MARKER %token PARSER_SYM %token PARTIAL /* SQL-2003-N */ @@ -1009,6 +1010,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token TO_SYM /* SQL-2003-R */ %token TRAILING /* SQL-2003-R */ %token TRANSACTION_SYM +%token TRANSACTIONAL_SYM %token TRIGGERS_SYM %token TRIGGER_SYM /* SQL-2003-R */ %token TRIM /* SQL-2003-N */ @@ -4364,6 +4366,12 @@ create_table_option: Lex->create_info.used_fields|= HA_CREATE_USED_KEY_BLOCK_SIZE; Lex->create_info.key_block_size= $3; } + | TRANSACTIONAL_SYM opt_equal ulong_num + { + Lex->create_info.used_fields|= HA_CREATE_USED_TRANSACTIONAL; + Lex->create_info.transactional= ($3 != 0 ? HA_CHOICE_YES : + HA_CHOICE_NO); + } ; default_charset: @@ -4442,7 +4450,8 @@ row_types: | DYNAMIC_SYM { $$= ROW_TYPE_DYNAMIC; } | COMPRESSED_SYM { $$= ROW_TYPE_COMPRESSED; } | REDUNDANT_SYM { $$= ROW_TYPE_REDUNDANT; } - | COMPACT_SYM { $$= ROW_TYPE_COMPACT; }; + | COMPACT_SYM { $$= ROW_TYPE_COMPACT; } + | PAGE_SYM { $$= ROW_TYPE_PAGE; }; merge_insert_types: NO_SYM { $$= MERGE_INSERT_DISABLED; } @@ -10073,6 +10082,7 @@ keyword_sp: | ONE_SHOT_SYM {} | ONE_SYM {} | PACK_KEYS_SYM {} + | PAGE_SYM {} | PARTIAL {} | PARTITIONING_SYM {} | PARTITIONS_SYM {} @@ -10142,6 +10152,7 @@ keyword_sp: | TEXT_SYM {} | THAN_SYM {} | TRANSACTION_SYM {} + | TRANSACTIONAL_SYM {} | TRIGGERS_SYM {} | TIMESTAMP {} | TIMESTAMP_ADD {} diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 9ffc5fd127f..c03365cfc2b 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -265,10 +265,8 @@ uint strconvert(CHARSET_INFO *from_cs, const char *from, my_wc_t wc; char *to_start= to; uchar *to_end= (uchar*) to + to_length - 1; - int (*mb_wc)(struct charset_info_st *, my_wc_t *, const uchar *, - const uchar *)= from_cs->cset->mb_wc; - int (*wc_mb)(struct charset_info_st *, my_wc_t, uchar *s, uchar *e)= - to_cs->cset->wc_mb; + my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc; + my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb; uint error_count= 0; while (1) diff --git a/sql/table.cc b/sql/table.cc index 27f9ccc418e..c1d8e3abe94 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -51,11 +51,13 @@ inline bool is_system_table_name(const char *name, uint length); Object_creation_ctx *Object_creation_ctx::set_n_backup(THD *thd) { - Object_creation_ctx *backup_ctx= create_backup_ctx(thd); + Object_creation_ctx *backup_ctx; + DBUG_ENTER("Object_creation_ctx::set_n_backup"); + backup_ctx= create_backup_ctx(thd); change_env(thd); - return backup_ctx; + DBUG_RETURN(backup_ctx); } void Object_creation_ctx::restore_env(THD *thd, Object_creation_ctx *backup_ctx) @@ -84,7 +86,7 @@ Default_object_creation_ctx::Default_object_creation_ctx( { } Object_creation_ctx * -Default_object_creation_ctx::create_backup_ctx(THD *thd) +Default_object_creation_ctx::create_backup_ctx(THD *thd) const { return new Default_object_creation_ctx(thd); } @@ -703,7 +705,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, if (!head[32]) // New frm file in 3.23 { share->avg_row_length= uint4korr(head+34); - share-> row_type= (row_type) head[40]; + share->transactional= (ha_choice) head[39]; + share->row_type= (row_type) head[40]; share->table_charset= get_charset((uint) head[38],MYF(0)); share->null_field_first= 1; } @@ -1750,7 +1753,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, } #ifdef WITH_PARTITION_STORAGE_ENGINE - if (share->partition_info_len) + if (share->partition_info_len && outparam->file) { /* In this execution we must avoid calling thd->change_item_tree since @@ -2433,6 +2436,7 @@ File create_frm(THD *thd, const char *name, const char *db, int4store(fileinfo+34,create_info->avg_row_length); fileinfo[38]= (create_info->default_table_charset ? create_info->default_table_charset->number : 0); + fileinfo[39]= (uchar) create_info->transactional; fileinfo[40]= (uchar) create_info->row_type; /* Next few bytes were for RAID support */ fileinfo[41]= 0; @@ -4565,11 +4569,11 @@ Item_subselect *TABLE_LIST::containing_subselect() FALSE no errors found TRUE found and reported an error. */ -bool TABLE_LIST::process_index_hints(TABLE *table) +bool TABLE_LIST::process_index_hints(TABLE *tbl) { /* initialize the result variables */ - table->keys_in_use_for_query= table->keys_in_use_for_group_by= - table->keys_in_use_for_order_by= table->s->keys_in_use; + tbl->keys_in_use_for_query= tbl->keys_in_use_for_group_by= + tbl->keys_in_use_for_order_by= tbl->s->keys_in_use; /* index hint list processing */ if (index_hints) @@ -4621,8 +4625,8 @@ bool TABLE_LIST::process_index_hints(TABLE *table) Check if an index with the given name exists and get his offset in the keys bitmask for the table */ - if (table->s->keynames.type_names == 0 || - (pos= find_type(&table->s->keynames, hint->key_name.str, + if (tbl->s->keynames.type_names == 0 || + (pos= find_type(&tbl->s->keynames, hint->key_name.str, hint->key_name.length, 1)) <= 0) { my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), hint->key_name.str, alias); @@ -4658,7 +4662,7 @@ bool TABLE_LIST::process_index_hints(TABLE *table) !index_order[INDEX_HINT_FORCE].is_clear_all() || !index_group[INDEX_HINT_FORCE].is_clear_all()) { - table->force_index= TRUE; + tbl->force_index= TRUE; index_join[INDEX_HINT_USE].merge(index_join[INDEX_HINT_FORCE]); index_order[INDEX_HINT_USE].merge(index_order[INDEX_HINT_FORCE]); index_group[INDEX_HINT_USE].merge(index_group[INDEX_HINT_FORCE]); @@ -4666,20 +4670,20 @@ bool TABLE_LIST::process_index_hints(TABLE *table) /* apply USE INDEX */ if (!index_join[INDEX_HINT_USE].is_clear_all() || have_empty_use_join) - table->keys_in_use_for_query.intersect(index_join[INDEX_HINT_USE]); + tbl->keys_in_use_for_query.intersect(index_join[INDEX_HINT_USE]); if (!index_order[INDEX_HINT_USE].is_clear_all() || have_empty_use_order) - table->keys_in_use_for_order_by.intersect (index_order[INDEX_HINT_USE]); + tbl->keys_in_use_for_order_by.intersect (index_order[INDEX_HINT_USE]); if (!index_group[INDEX_HINT_USE].is_clear_all() || have_empty_use_group) - table->keys_in_use_for_group_by.intersect (index_group[INDEX_HINT_USE]); + tbl->keys_in_use_for_group_by.intersect (index_group[INDEX_HINT_USE]); /* apply IGNORE INDEX */ - table->keys_in_use_for_query.subtract (index_join[INDEX_HINT_IGNORE]); - table->keys_in_use_for_order_by.subtract (index_order[INDEX_HINT_IGNORE]); - table->keys_in_use_for_group_by.subtract (index_group[INDEX_HINT_IGNORE]); + tbl->keys_in_use_for_query.subtract (index_join[INDEX_HINT_IGNORE]); + tbl->keys_in_use_for_order_by.subtract (index_order[INDEX_HINT_IGNORE]); + tbl->keys_in_use_for_group_by.subtract (index_group[INDEX_HINT_IGNORE]); } /* make sure covering_keys don't include indexes disabled with a hint */ - table->covering_keys.intersect(table->keys_in_use_for_query); + tbl->covering_keys.intersect(tbl->keys_in_use_for_query); return 0; } diff --git a/sql/table.h b/sql/table.h index ffe7350f250..79d8cfbdff9 100644 --- a/sql/table.h +++ b/sql/table.h @@ -310,6 +310,7 @@ typedef struct st_table_share } enum row_type row_type; /* How rows are stored */ enum tmp_table_type tmp_table; + enum ha_choice transactional; uint ref_count; /* How many TABLE objects uses this */ uint open_count; /* Number of tables in open list */ diff --git a/sql/tztime.cc b/sql/tztime.cc index 14192d06978..9eb38e97827 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1854,8 +1854,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) */ (void)table->file->ha_index_init(0, 1); - if (table->file->index_read(table->record[0], table->field[0]->ptr, - HA_WHOLE_KEY, HA_READ_KEY_EXACT)) + if (table->file->index_read_map(table->record[0], table->field[0]->ptr, + HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { #ifdef EXTRA_DEBUG /* @@ -1881,8 +1881,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0, 1); - if (table->file->index_read(table->record[0], table->field[0]->ptr, - HA_WHOLE_KEY, HA_READ_KEY_EXACT)) + if (table->file->index_read_map(table->record[0], table->field[0]->ptr, + HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { sql_print_error("Can't find description of time zone '%u'", tzid); goto end; @@ -1908,8 +1908,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0, 1); - res= table->file->index_read(table->record[0], table->field[0]->ptr, - (key_part_map)1, HA_READ_KEY_EXACT); + res= table->file->index_read_map(table->record[0], table->field[0]->ptr, + (key_part_map)1, HA_READ_KEY_EXACT); while (!res) { ttid= (uint)table->field[1]->val_int(); @@ -1979,8 +1979,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0, 1); - res= table->file->index_read(table->record[0], table->field[0]->ptr, - (key_part_map)1, HA_READ_KEY_EXACT); + res= table->file->index_read_map(table->record[0], table->field[0]->ptr, + (key_part_map)1, HA_READ_KEY_EXACT); while (!res) { ttime= (my_time_t)table->field[1]->val_int(); diff --git a/sql/unireg.h b/sql/unireg.h index b368eee6f0e..f0b4a88c7f8 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -155,11 +155,40 @@ #define OPEN_VIEW 8192 /* Allow open on view */ #define OPEN_VIEW_NO_PARSE 16384 /* Open frm only if it's a view, but do not parse view itself */ -#define OPEN_FRM_FILE_ONLY 32768 /* Open frm file only */ -#define OPEN_TABLE_ONLY OPEN_FRM_FILE_ONLY*2 /* Open view only */ -#define OPEN_VIEW_ONLY OPEN_TABLE_ONLY*2 /* Open table only */ -#define OPEN_TABLE_FROM_SHARE OPEN_VIEW_ONLY*2 /* For I_S tables*/ -#define OPTIMIZE_I_S_TABLE OPEN_TABLE_FROM_SHARE*2 /* For I_S tables*/ +/* + This flag is used in function get_all_tables() which fills + I_S tables with data which are retrieved from frm files and storage engine + The flag means that we need to open FRM file only to get necessary data. +*/ +#define OPEN_FRM_FILE_ONLY 32768 +/* + This flag is used in function get_all_tables() which fills + I_S tables with data which are retrieved from frm files and storage engine + The flag means that we need to process tables only to get necessary data. + Views are not processed. +*/ +#define OPEN_TABLE_ONLY OPEN_FRM_FILE_ONLY*2 +/* + This flag is used in function get_all_tables() which fills + I_S tables with data which are retrieved from frm files and storage engine + The flag means that we need to process views only to get necessary data. + Tables are not processed. +*/ +#define OPEN_VIEW_ONLY OPEN_TABLE_ONLY*2 +/* + This flag is used in function get_all_tables() which fills + I_S tables with data which are retrieved from frm files and storage engine. + The flag means that we need to open a view using + open_normal_and_derived_tables() function. +*/ +#define OPEN_VIEW_FULL OPEN_VIEW_ONLY*2 +/* + This flag is used in function get_all_tables() which fills + I_S tables with data which are retrieved from frm files and storage engine. + The flag means that I_S table uses optimization algorithm. +*/ +#define OPTIMIZE_I_S_TABLE OPEN_VIEW_FULL*2 + #define SC_INFO_LENGTH 4 /* Form format constant */ #define TE_INFO_LENGTH 3 #define MTYP_NOEMPTY_BIT 128 |