diff options
Diffstat (limited to 'storage')
40 files changed, 334 insertions, 802 deletions
diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp index 0c56b3e6614..9b83a1c93a5 100644 --- a/storage/connect/tabtbl.cpp +++ b/storage/connect/tabtbl.cpp @@ -650,7 +650,7 @@ bool TDBTBM::IsLocal(PTABLE tbp) return ((!stricmp(tdbp->Host, "localhost") || !strcmp(tdbp->Host, "127.0.0.1")) && - tdbp->Port == (int)GetDefaultPort()); + (int) tdbp->Port == (int)GetDefaultPort()); } // end of IsLocal /***********************************************************************/ diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 83e64c854d6..e3ac8fb7605 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -5041,7 +5041,7 @@ btr_cur_del_mark_set_clust_rec( << rec_printer(rec, offsets).str()); if (dict_index_is_online_ddl(index)) { - row_log_table_delete(rec, entry, index, offsets, NULL); + row_log_table_delete(rec, index, offsets, NULL); } row_upd_rec_sys_fields(rec, page_zip, index, offsets, trx, roll_ptr); diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 84ab2015348..53f92927b28 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -4161,7 +4161,8 @@ buf_page_get_gen( ulint retries = 0; buf_pool_t* buf_pool = buf_pool_get(page_id); - ut_ad(mtr->is_active()); + ut_ad((mtr == NULL) == (mode == BUF_EVICT_IF_IN_POOL)); + ut_ad(!mtr || mtr->is_active()); ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH) || (rw_latch == RW_SX_LATCH) @@ -4173,29 +4174,31 @@ buf_page_get_gen( #ifdef UNIV_DEBUG switch (mode) { + case BUF_EVICT_IF_IN_POOL: + /* After DISCARD TABLESPACE, the tablespace would not exist, + but in IMPORT TABLESPACE, PageConverter::operator() must + replace any old pages, which were not evicted during DISCARD. + Skip the assertion on space_page_size. */ + break; + default: + ut_error; case BUF_GET_NO_LATCH: ut_ad(rw_latch == RW_NO_LATCH); - break; + /* fall through */ case BUF_GET: case BUF_GET_IF_IN_POOL: case BUF_PEEK_IF_IN_POOL: case BUF_GET_IF_IN_POOL_OR_WATCH: case BUF_GET_POSSIBLY_FREED: - break; - default: - ut_error; + bool found; + const page_size_t& space_page_size + = fil_space_get_page_size(page_id.space(), &found); + ut_ad(found); + ut_ad(page_size.equals_to(space_page_size)); } - - bool found; - const page_size_t& space_page_size - = fil_space_get_page_size(page_id.space(), &found); - - ut_ad(found); - - ut_ad(page_size.equals_to(space_page_size)); #endif /* UNIV_DEBUG */ - ut_ad(!ibuf_inside(mtr) + ut_ad(!mtr || !ibuf_inside(mtr) || ibuf_page_low(page_id, page_size, FALSE, file, line, NULL)); buf_pool->stat.n_page_gets++; @@ -4283,13 +4286,15 @@ loop: rw_lock_x_unlock(hash_lock); } - if (mode == BUF_GET_IF_IN_POOL - || mode == BUF_PEEK_IF_IN_POOL - || mode == BUF_GET_IF_IN_POOL_OR_WATCH) { - + switch (mode) { + case BUF_GET_IF_IN_POOL: + case BUF_GET_IF_IN_POOL_OR_WATCH: + case BUF_PEEK_IF_IN_POOL: + case BUF_EVICT_IF_IN_POOL: +#ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)); ut_ad(!rw_lock_own(hash_lock, RW_LOCK_S)); - +#endif /* UNIV_SYNC_DEBUG */ return(NULL); } @@ -4383,8 +4388,10 @@ loop: got_block: - if (mode == BUF_GET_IF_IN_POOL || mode == BUF_PEEK_IF_IN_POOL) { - + switch (mode) { + case BUF_GET_IF_IN_POOL: + case BUF_PEEK_IF_IN_POOL: + case BUF_EVICT_IF_IN_POOL: buf_page_t* fix_page = &fix_block->page; BPageMutex* fix_mutex = buf_page_get_mutex(fix_page); mutex_enter(fix_mutex); @@ -4416,6 +4423,20 @@ got_block: os_thread_sleep(WAIT_FOR_WRITE); goto loop; } + + if (UNIV_UNLIKELY(mode == BUF_EVICT_IF_IN_POOL)) { +evict_from_pool: + ut_ad(!fix_block->page.oldest_modification); + buf_pool_mutex_enter(buf_pool); + buf_block_unfix(fix_block); + + if (!buf_LRU_free_page(&fix_block->page, true)) { + ut_ad(0); + } + + buf_pool_mutex_exit(buf_pool); + return(NULL); + } break; case BUF_BLOCK_ZIP_PAGE: @@ -4448,6 +4469,10 @@ got_block: goto loop; } + if (UNIV_UNLIKELY(mode == BUF_EVICT_IF_IN_POOL)) { + goto evict_from_pool; + } + /* Buffer-fix the block so that it cannot be evicted or relocated while we are attempting to allocate an uncompressed page. */ diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 44a3c19235b..043328902ee 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -3879,24 +3879,16 @@ FlushObserver::notify_remove( void FlushObserver::flush() { - buf_remove_t buf_remove; - - if (m_interrupted) { - buf_remove = BUF_REMOVE_FLUSH_NO_WRITE; - } else { - buf_remove = BUF_REMOVE_FLUSH_WRITE; - - if (m_stage != NULL) { - ulint pages_to_flush = - buf_flush_get_dirty_pages_count( - m_space_id, this); - - m_stage->begin_phase_flush(pages_to_flush); - } + if (!m_interrupted && m_stage) { + m_stage->begin_phase_flush(buf_flush_get_dirty_pages_count( + m_space_id, this)); } - /* Flush or remove dirty pages. */ - buf_LRU_flush_or_remove_pages(m_space_id, buf_remove, m_trx); + /* MDEV-14317 FIXME: Discard all changes to only those pages + that will be freed by the clean-up of the ALTER operation. + (Maybe, instead of buf_pool->flush_list, use a dedicated list + for pages on which redo logging has been disabled.) */ + buf_LRU_flush_or_remove_pages(m_space_id, m_trx); /* Wait for all dirty pages were flushed. */ for (ulint i = 0; i < srv_buf_pool_instances; i++) { diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 2137760e815..9d0d9627d26 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -586,8 +586,8 @@ rescan: /* If flush observer is NULL, flush page for space id, or flush page for flush observer. */ - if ((observer != NULL && observer != bpage->flush_observer) - || (observer == NULL && id != bpage->id.space())) { + if (observer ? (observer != bpage->flush_observer) + : (id != bpage->id.space())) { /* Skip this block, as it does not belong to the target space. */ @@ -657,24 +657,27 @@ rescan: return(all_freed ? DB_SUCCESS : DB_FAIL); } -/******************************************************************//** -Remove or flush all the dirty pages that belong to a given tablespace +/** Remove or flush all the dirty pages that belong to a given tablespace inside a specific buffer pool instance. The pages will remain in the LRU list and will be evicted from the LRU list as they age and move towards -the tail of the LRU list. */ +the tail of the LRU list. +@param[in,out] buf_pool buffer pool +@param[in] id tablespace identifier +@param[in] observer flush observer, + or NULL if the files should not be written to +@param[in] trx transaction (to check for interrupt), + or NULL if the files should not be written to +*/ static void buf_flush_dirty_pages( -/*==================*/ - buf_pool_t* buf_pool, /*!< buffer pool instance */ - ulint id, /*!< in: space id */ - FlushObserver* observer, /*!< in: flush observer */ - bool flush, /*!< in: flush to disk if true otherwise - remove the pages without flushing */ - const trx_t* trx) /*!< to check if the operation must - be interrupted */ + buf_pool_t* buf_pool, + ulint id, + FlushObserver* observer, + const trx_t* trx) { dberr_t err; + bool flush = trx != NULL; do { buf_pool_mutex_enter(buf_pool); @@ -708,238 +711,30 @@ buf_flush_dirty_pages( || buf_pool_get_dirty_pages_count(buf_pool, id, observer) == 0); } -/******************************************************************//** -Remove all pages that belong to a given tablespace inside a specific -buffer pool instance when we are DISCARDing the tablespace. */ -static -void -buf_LRU_remove_all_pages( -/*=====================*/ - buf_pool_t* buf_pool, /*!< buffer pool instance */ - ulint id) /*!< in: space id */ -{ - buf_page_t* bpage; - ibool all_freed; - -scan_again: - buf_pool_mutex_enter(buf_pool); - - all_freed = TRUE; - - for (bpage = UT_LIST_GET_LAST(buf_pool->LRU); - bpage != NULL; - /* No op */) { - - rw_lock_t* hash_lock; - buf_page_t* prev_bpage; - BPageMutex* block_mutex; - - ut_a(buf_page_in_file(bpage)); - ut_ad(bpage->in_LRU_list); - - prev_bpage = UT_LIST_GET_PREV(LRU, bpage); - - /* bpage->id.space() and bpage->io_fix are protected by - buf_pool->mutex and the block_mutex. It is safe to check - them while holding buf_pool->mutex only. */ - - if (bpage->id.space() != id) { - /* Skip this block, as it does not belong to - the space that is being invalidated. */ - goto next_page; - } else if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) { - /* We cannot remove this page during this scan - yet; maybe the system is currently reading it - in, or flushing the modifications to the file */ - - all_freed = FALSE; - goto next_page; - } else { - hash_lock = buf_page_hash_lock_get(buf_pool, bpage->id); - - rw_lock_x_lock(hash_lock); - - block_mutex = buf_page_get_mutex(bpage); - - mutex_enter(block_mutex); - - if (bpage->buf_fix_count > 0) { - - mutex_exit(block_mutex); - - rw_lock_x_unlock(hash_lock); - - /* We cannot remove this page during - this scan yet; maybe the system is - currently reading it in, or flushing - the modifications to the file */ - - all_freed = FALSE; - - goto next_page; - } - } - - ut_ad(mutex_own(block_mutex)); - - DBUG_PRINT("ib_buf", ("evict page %u:%u" - " state %u", - bpage->id.space(), - bpage->id.page_no(), - bpage->state)); -#ifdef BTR_CUR_HASH_ADAPT - if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { - /* Do nothing, because the adaptive hash index - covers uncompressed pages only. */ - } else if (((buf_block_t*) bpage)->index) { - buf_pool_mutex_exit(buf_pool); - - rw_lock_x_unlock(hash_lock); - - mutex_exit(block_mutex); - - /* Note that the following call will acquire - and release block->lock X-latch. - Note that the table cannot be evicted during - the execution of ALTER TABLE...DISCARD TABLESPACE - because MySQL is keeping the table handle open. */ - - btr_search_drop_page_hash_when_freed( - bpage->id, bpage->size); - - goto scan_again; - } else { - /* This debug check uses a dirty read that could - theoretically cause false positives while - buf_pool_clear_hash_index() is executing, - if the writes to block->index=NULL and - block->n_pointers=0 are reordered. - (Other conflicting access paths to the adaptive hash - index should not be possible, because when a - tablespace is being discarded or dropped, there must - be no concurrect access to the contained tables.) */ - assert_block_ahi_empty((buf_block_t*) bpage); - } -#endif /* BTR_CUR_HASH_ADAPT */ - - if (bpage->oldest_modification != 0) { - - buf_flush_remove(bpage); - } - - ut_ad(!bpage->in_flush_list); - - /* Remove from the LRU list. */ - - if (buf_LRU_block_remove_hashed(bpage, true)) { - buf_LRU_block_free_hashed_page((buf_block_t*) bpage); - } else { - ut_ad(block_mutex == &buf_pool->zip_mutex); - } - - ut_ad(!mutex_own(block_mutex)); - - /* buf_LRU_block_remove_hashed() releases the hash_lock */ - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)); - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_S)); - -next_page: - bpage = prev_bpage; - } - - buf_pool_mutex_exit(buf_pool); - - if (!all_freed) { - os_thread_sleep(20000); - - goto scan_again; - } -} - -/******************************************************************//** -Remove pages belonging to a given tablespace inside a specific -buffer pool instance when we are deleting the data file(s) of that -tablespace. The pages still remain a part of LRU and are evicted from -the list as they age towards the tail of the LRU only if buf_remove -is BUF_REMOVE_FLUSH_NO_WRITE. */ -static +/** Empty the flush list for all pages belonging to a tablespace. +@param[in] id tablespace identifier +@param[in] trx transaction, for checking for user interrupt; + or NULL if nothing is to be written +@param[in] drop_ahi whether to drop the adaptive hash index */ void -buf_LRU_remove_pages( -/*=================*/ - buf_pool_t* buf_pool, /*!< buffer pool instance */ - ulint id, /*!< in: space id */ - buf_remove_t buf_remove, /*!< in: remove or flush strategy */ - const trx_t* trx) /*!< to check if the operation must - be interrupted */ +buf_LRU_flush_or_remove_pages(ulint id, const trx_t* trx, bool drop_ahi) { FlushObserver* observer = (trx == NULL) ? NULL : trx->flush_observer; + /* Pages in the system tablespace must never be discarded. */ + ut_ad(id || trx); - switch (buf_remove) { - case BUF_REMOVE_ALL_NO_WRITE: - buf_LRU_remove_all_pages(buf_pool, id); - break; - - case BUF_REMOVE_FLUSH_NO_WRITE: - /* Pass trx as NULL to avoid interruption check. */ - buf_flush_dirty_pages(buf_pool, id, observer, false, NULL); - break; - - case BUF_REMOVE_FLUSH_WRITE: - buf_flush_dirty_pages(buf_pool, id, observer, true, trx); - - if (observer == NULL) { - /* Ensure that all asynchronous IO is completed. */ - os_aio_wait_until_no_pending_writes(); - fil_flush(id); + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + buf_pool_t* buf_pool = buf_pool_from_array(i); + if (drop_ahi) { + buf_LRU_drop_page_hash_for_tablespace(buf_pool, id); } - - break; + buf_flush_dirty_pages(buf_pool, id, observer, trx); } -} -/******************************************************************//** -Flushes all dirty pages or removes all pages belonging -to a given tablespace. A PROBLEM: if readahead is being started, what -guarantees that it will not try to read in pages after this operation -has completed? */ -void -buf_LRU_flush_or_remove_pages( -/*==========================*/ - ulint id, /*!< in: space id */ - buf_remove_t buf_remove, /*!< in: remove or flush strategy */ - const trx_t* trx) /*!< to check if the operation must - be interrupted */ -{ - ulint i; - - /* Before we attempt to drop pages one by one we first - attempt to drop page hash index entries in batches to make - it more efficient. The batching attempt is a best effort - attempt and does not guarantee that all pages hash entries - will be dropped. We get rid of remaining page hash entries - one by one below. */ - for (i = 0; i < srv_buf_pool_instances; i++) { - buf_pool_t* buf_pool; - - buf_pool = buf_pool_from_array(i); -#ifdef BTR_CUR_HASH_ADAPT - switch (buf_remove) { - case BUF_REMOVE_ALL_NO_WRITE: - buf_LRU_drop_page_hash_for_tablespace(buf_pool, id); - break; - - case BUF_REMOVE_FLUSH_NO_WRITE: - /* It is a DROP TABLE for a single table - tablespace. No AHI entries exist because - we already dealt with them when freeing up - extents. */ - case BUF_REMOVE_FLUSH_WRITE: - /* We allow read-only queries against the - table, there is no need to drop the AHI entries. */ - break; - } -#endif /* BTR_CUR_HASH_ADAPT */ - buf_LRU_remove_pages(buf_pool, id, buf_remove, trx); + if (trx && !observer && !trx_is_interrupted(trx)) { + /* Ensure that all asynchronous IO is completed. */ + os_aio_wait_until_no_pending_writes(); + fil_flush(id); } } diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index 071218c4060..b07140b8dd7 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -1630,7 +1630,7 @@ dict_table_rename_in_cache( return(DB_OUT_OF_MEMORY); } - fil_delete_tablespace(table->space, BUF_REMOVE_ALL_NO_WRITE); + fil_delete_tablespace(table->space, true); /* Delete any temp file hanging around. */ if (os_file_status(filepath, &exists, &ftype) diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 73132754fdf..90a3baa6f83 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2449,7 +2449,7 @@ fil_recreate_tablespace( /* Step-1: Invalidate buffer pool pages belonging to the tablespace to re-create. */ - buf_LRU_flush_or_remove_pages(space_id, BUF_REMOVE_ALL_NO_WRITE, 0); + buf_LRU_flush_or_remove_pages(space_id, NULL); /* Remove all insert buffer entries for the tablespace */ ibuf_delete_for_discarded_space(space_id); @@ -2907,7 +2907,7 @@ fil_close_tablespace( completely and permanently. The flag stop_new_ops also prevents fil_flush() from being applied to this tablespace. */ - buf_LRU_flush_or_remove_pages(id, BUF_REMOVE_FLUSH_WRITE, trx); + buf_LRU_flush_or_remove_pages(id, trx); /* If the free is successful, the X lock will be released before the space memory data structure is freed. */ @@ -2959,17 +2959,12 @@ fil_table_accessible(const dict_table_t* table) } } -/** Deletes an IBD tablespace, either general or single-table. -The tablespace must be cached in the memory cache. This will delete the -datafile, fil_space_t & fil_node_t entries from the file_system_t cache. -@param[in] space_id Tablespace id -@param[in] buf_remove Specify the action to take on the pages -for this table in the buffer pool. -@return DB_SUCCESS or error */ +/** Delete a tablespace and associated .ibd file. +@param[in] id tablespace identifier +@param[in] drop_ahi whether to drop the adaptive hash index +@return DB_SUCCESS or error */ dberr_t -fil_delete_tablespace( - ulint id, - buf_remove_t buf_remove) +fil_delete_tablespace(ulint id, bool drop_ahi) { char* path = 0; fil_space_t* space = 0; @@ -3012,7 +3007,7 @@ fil_delete_tablespace( To deal with potential read requests, we will check the ::stop_new_ops flag in fil_io(). */ - buf_LRU_flush_or_remove_pages(id, buf_remove, 0); + buf_LRU_flush_or_remove_pages(id, NULL, drop_ahi); /* If it is a delete then also delete any generated files, otherwise when we drop the database the remove directory will fail. */ @@ -3103,7 +3098,7 @@ fil_truncate_tablespace( /* Step-2: Invalidate buffer pool pages belonging to the tablespace to re-create. Remove all insert buffer entries for the tablespace */ - buf_LRU_flush_or_remove_pages(space_id, BUF_REMOVE_ALL_NO_WRITE, 0); + buf_LRU_flush_or_remove_pages(space_id, NULL); /* Step-3: Truncate the tablespace and accordingly update the fil_space_t handler that is used to access this tablespace. */ @@ -3199,7 +3194,7 @@ fil_reinit_space_header_for_table( from disabling AHI during the scan */ btr_search_s_lock_all(); DEBUG_SYNC_C("buffer_pool_scan"); - buf_LRU_flush_or_remove_pages(id, BUF_REMOVE_ALL_NO_WRITE, 0); + buf_LRU_flush_or_remove_pages(id, NULL); btr_search_s_unlock_all(); row_mysql_lock_data_dictionary(trx); @@ -3292,7 +3287,7 @@ fil_discard_tablespace( { dberr_t err; - switch (err = fil_delete_tablespace(id, BUF_REMOVE_ALL_NO_WRITE)) { + switch (err = fil_delete_tablespace(id, true)) { case DB_SUCCESS: break; @@ -4348,8 +4343,19 @@ fil_ibd_discover( /* Look for a remote file-per-table tablespace. */ - df_rem_per.set_name(db); - if (df_rem_per.open_link_file() == DB_SUCCESS) { + switch (srv_operation) { + case SRV_OPERATION_BACKUP: + case SRV_OPERATION_RESTORE_DELTA: + ut_ad(0); + break; + case SRV_OPERATION_RESTORE_EXPORT: + case SRV_OPERATION_RESTORE: + break; + case SRV_OPERATION_NORMAL: + df_rem_per.set_name(db); + if (df_rem_per.open_link_file() != DB_SUCCESS) { + break; + } /* An ISL file was found with contents. */ if (df_rem_per.open_read_only(false) != DB_SUCCESS @@ -4439,6 +4445,18 @@ fil_ibd_load( return(FIL_LOAD_OK); } + if (srv_operation == SRV_OPERATION_RESTORE) { + /* Replace absolute DATA DIRECTORY file paths with + short names relative to the backup directory. */ + if (const char* name = strrchr(filename, OS_PATH_SEPARATOR)) { + while (--name > filename + && *name != OS_PATH_SEPARATOR); + if (name > filename) { + filename = name + 1; + } + } + } + Datafile file; file.set_filepath(filename); file.open_read_only(false); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 10d54498feb..96aca74c217 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -19640,7 +19640,7 @@ wsrep_innobase_kill_one_trx( wsrep_thd_awake(thd, signal); } else { /* abort currently executing query */ - DBUG_PRINT("wsrep",("sending KILL_QUERY to: %ld", + DBUG_PRINT("wsrep",("sending KILL_QUERY to: %lu", thd_get_thread_id(thd))); WSREP_DEBUG("kill query for: %ld", thd_get_thread_id(thd)); @@ -19784,7 +19784,8 @@ wsrep_fake_trx_id( mutex_enter(&trx_sys->mutex); trx_id_t trx_id = trx_sys_get_new_trx_id(); mutex_exit(&trx_sys->mutex); - WSREP_DEBUG("innodb fake trx id: %lu thd: %s", trx_id, wsrep_thd_query(thd)); + WSREP_DEBUG("innodb fake trx id: " TRX_ID_FMT " thd: %s", + trx_id, wsrep_thd_query(thd)); wsrep_ws_handle_for_trx(wsrep_thd_ws_handle(thd), trx_id); } diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 7b9980c22bb..67174336a7c 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -427,6 +427,26 @@ innobase_fulltext_exist( return(false); } +/** Determine whether indexed virtual columns exist in a table. +@param[in] table table definition +@return whether indexes exist on virtual columns */ +static bool innobase_indexed_virtual_exist(const TABLE* table) +{ + const KEY* const end = &table->key_info[table->s->keys]; + + for (const KEY* key = table->key_info; key < end; key++) { + const KEY_PART_INFO* const key_part_end = key->key_part + + key->user_defined_key_parts; + for (const KEY_PART_INFO* key_part = key->key_part; + key_part < key_part_end; key_part++) { + if (!key_part->field->stored_in_db()) + return true; + } + } + + return false; +} + /** Determine if spatial indexes exist in a given table. @param table MySQL table @return whether spatial indexes exist on the table */ @@ -1134,7 +1154,8 @@ next_column: & Alter_inplace_info::ADD_PK_INDEX) || innobase_need_rebuild(ha_alter_info, table)) && (innobase_fulltext_exist(altered_table) - || innobase_spatial_exist(altered_table))) { + || innobase_spatial_exist(altered_table) + || innobase_indexed_virtual_exist(altered_table))) { /* Refuse to rebuild the table online, if FULLTEXT OR SPATIAL indexes are to survive the rebuild. */ online = false; @@ -1150,6 +1171,10 @@ next_column: ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS); + } else if (!innobase_fulltext_exist(altered_table)) { + /* MDEV-14341 FIXME: Remove this limitation. */ + ha_alter_info->unsupported_reason = + "online rebuild with indexed virtual columns"; } else { ha_alter_info->unsupported_reason = innobase_get_err_msg( @@ -5308,7 +5333,7 @@ not_instant_add_column: if (alt_opt.encryption != opt.encryption || alt_opt.encryption_key_id != opt.encryption_key_id) { - key_id = alt_opt.encryption_key_id; + key_id = uint32_t(alt_opt.encryption_key_id); mode = fil_encryption_t(alt_opt.encryption); } } diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index c276a4ce137..b0d9ccbde7d 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -4964,21 +4964,36 @@ ibuf_check_bitmap_on_import( const trx_t* trx, /*!< in: transaction */ ulint space_id) /*!< in: tablespace identifier */ { - ulint size; ulint page_no; ut_ad(space_id); ut_ad(trx->mysql_thd); - bool found; - const page_size_t& page_size - = fil_space_get_page_size(space_id, &found); - - if (!found) { + FilSpace space(space_id); + if (!space()) { return(DB_TABLE_NOT_FOUND); } - size = fil_space_get_size(space_id); + const page_size_t page_size(space->flags); + /* fil_space_t::size and fil_space_t::free_limit would still be 0 + at this point. So, we will have to read page 0. */ + ut_ad(!space->free_limit); + ut_ad(!space->size); + + mtr_t mtr; + ulint size; + mtr.start(); + if (buf_block_t* sp = buf_page_get(page_id_t(space_id, 0), page_size, + RW_S_LATCH, &mtr)) { + size = std::min( + mach_read_from_4(FSP_HEADER_OFFSET + FSP_FREE_LIMIT + + sp->frame), + mach_read_from_4(FSP_HEADER_OFFSET + FSP_SIZE + + sp->frame)); + } else { + size = 0; + } + mtr.commit(); if (size == 0) { return(DB_TABLE_NOT_FOUND); @@ -4993,7 +5008,6 @@ ibuf_check_bitmap_on_import( the space, as usual. */ for (page_no = 0; page_no < size; page_no += page_size.physical()) { - mtr_t mtr; page_t* bitmap_page; ulint i; diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 516898066aa..4a54c30629b 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -65,6 +65,7 @@ struct fil_addr_t; #define BUF_GET_POSSIBLY_FREED 16 /*!< Like BUF_GET, but do not mind if the file page has been freed. */ +#define BUF_EVICT_IF_IN_POOL 20 /*!< evict a clean block if found */ /* @} */ /** @name Modes for buf_page_get_known_nowait */ /* @{ */ diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h index 3cc01473da1..54c001ce478 100644 --- a/storage/innobase/include/buf0lru.h +++ b/storage/innobase/include/buf0lru.h @@ -50,18 +50,14 @@ These are low-level functions /** Minimum LRU list length for which the LRU_old pointer is defined */ #define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */ -/******************************************************************//** -Flushes all dirty pages or removes all pages belonging -to a given tablespace. A PROBLEM: if readahead is being started, what -guarantees that it will not try to read in pages after this operation -has completed? */ +/** Empty the flush list for all pages belonging to a tablespace. +@param[in] id tablespace identifier +@param[in] trx transaction, for checking for user interrupt; + or NULL if nothing is to be written +@param[in] drop_ahi whether to drop the adaptive hash index */ +UNIV_INTERN void -buf_LRU_flush_or_remove_pages( -/*==========================*/ - ulint id, /*!< in: space id */ - buf_remove_t buf_remove, /*!< in: remove or flush strategy */ - const trx_t* trx); /*!< to check if the operation must - be interrupted */ +buf_LRU_flush_or_remove_pages(ulint id, const trx_t* trx, bool drop_ahi=false); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /********************************************************************//** diff --git a/storage/innobase/include/buf0types.h b/storage/innobase/include/buf0types.h index 102b831ec61..719699f5ee2 100644 --- a/storage/innobase/include/buf0types.h +++ b/storage/innobase/include/buf0types.h @@ -59,17 +59,6 @@ enum buf_flush_t { BUF_FLUSH_N_TYPES /*!< index of last element + 1 */ }; -/** Algorithm to remove the pages for a tablespace from the buffer pool. -See buf_LRU_flush_or_remove_pages(). */ -enum buf_remove_t { - BUF_REMOVE_ALL_NO_WRITE, /*!< Remove all pages from the buffer - pool, don't write or sync to disk */ - BUF_REMOVE_FLUSH_NO_WRITE, /*!< Remove only, from the flush list, - don't write or sync to disk */ - BUF_REMOVE_FLUSH_WRITE /*!< Flush dirty pages to disk only - don't remove from the buffer pool */ -}; - /** Flags for io_fix types */ enum buf_io_fix { BUF_IO_NONE = 0, /**< no pending I/O */ diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 12395a3f060..695c490ea94 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -940,17 +940,12 @@ bool fil_table_accessible(const dict_table_t* table) MY_ATTRIBUTE((warn_unused_result, nonnull)); -/** Deletes an IBD tablespace, either general or single-table. -The tablespace must be cached in the memory cache. This will delete the -datafile, fil_space_t & fil_node_t entries from the file_system_t cache. -@param[in] space_id Tablespace id -@param[in] buf_remove Specify the action to take on the pages -for this table in the buffer pool. -@return true if success */ +/** Delete a tablespace and associated .ibd file. +@param[in] id tablespace identifier +@param[in] drop_ahi whether to drop the adaptive hash index +@return DB_SUCCESS or error */ dberr_t -fil_delete_tablespace( - ulint id, - buf_remove_t buf_remove); +fil_delete_tablespace(ulint id, bool drop_ahi = false); /** Truncate the tablespace to needed size. @param[in] space_id id of tablespace to truncate diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h index 58802e23e77..062e4f8d8ab 100644 --- a/storage/innobase/include/rem0rec.h +++ b/storage/innobase/include/rem0rec.h @@ -989,29 +989,6 @@ rec_convert_dtuple_to_temp( rec_comp_status_t status = REC_STATUS_ORDINARY) MY_ATTRIBUTE((nonnull)); -/** Determine the converted size of virtual column data in a temporary file. -@see rec_convert_dtuple_to_temp_v() -@param[in] index clustered index -@param[in] v clustered index record augmented with the values - of virtual columns -@return size in bytes */ -ulint -rec_get_converted_size_temp_v(const dict_index_t* index, const dtuple_t* v) - MY_ATTRIBUTE((warn_unused_result, nonnull)); - -/** Write indexed virtual column data into a temporary file. -@see rec_get_converted_size_temp_v() -@param[out] rec serialized record -@param[in] index clustered index -@param[in] v_entry clustered index record augmented with the values - of virtual columns */ -void -rec_convert_dtuple_to_temp_v( - byte* rec, - const dict_index_t* index, - const dtuple_t* v_entry) - MY_ATTRIBUTE((nonnull)); - /**************************************************************//** Copies the first n fields of a physical record to a new physical record in a buffer. diff --git a/storage/innobase/include/row0log.h b/storage/innobase/include/row0log.h index 11b7b8e9ad3..df9920d9bcc 100644 --- a/storage/innobase/include/row0log.h +++ b/storage/innobase/include/row0log.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -132,7 +133,6 @@ row_log_table_delete( /*=================*/ const rec_t* rec, /*!< in: clustered index leaf page record, page X-latched */ - const dtuple_t* ventry, /*!< in: dtuple holding virtual column info */ dict_index_t* index, /*!< in/out: clustered index, S-latched or X-latched */ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */ @@ -151,12 +151,8 @@ row_log_table_update( dict_index_t* index, /*!< in/out: clustered index, S-latched or X-latched */ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */ - const dtuple_t* old_pk, /*!< in: row_log_table_get_pk() + const dtuple_t* old_pk);/*!< in: row_log_table_get_pk() before the update */ - const dtuple_t* new_v_row,/*!< in: dtuple contains the new virtual - columns */ - const dtuple_t* old_v_row);/*!< in: dtuple contains the old virtual - columns */ /******************************************************//** Constructs the old PRIMARY KEY and DB_TRX_ID,DB_ROLL_PTR @@ -185,7 +181,6 @@ row_log_table_insert( /*=================*/ const rec_t* rec, /*!< in: clustered index leaf page record, page X-latched */ - const dtuple_t* ventry, /*!< in: dtuple holding virtual column info */ dict_index_t* index, /*!< in/out: clustered index, S-latched or X-latched */ const ulint* offsets);/*!< in: rec_get_offsets(rec,index) */ diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h index a6889696036..ed45e1de82e 100644 --- a/storage/innobase/include/trx0rec.h +++ b/storage/innobase/include/trx0rec.h @@ -276,15 +276,13 @@ trx_undo_rec_get_col_val( @param[in] table the table @param[in] ptr undo log pointer @param[in,out] row the dtuple to fill -@param[in] in_purge called by purge thread -@param[in] col_map online rebuild column map */ +@param[in] in_purge whether this is called by purge */ void trx_undo_read_v_cols( const dict_table_t* table, const byte* ptr, const dtuple_t* row, - bool in_purge, - const ulint* col_map); + bool in_purge); /** Read virtual column index from undo log if the undo log contains such info, and verify the column is still indexed, and output its position diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index c6b9def79a1..e556f6bef6d 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -5382,13 +5382,21 @@ fallback: } while (err == EINTR && srv_shutdown_state == SRV_SHUTDOWN_NONE); - if (err) { + switch (err) { + case 0: + return true; + default: ib::error() << "preallocating " << size << " bytes for file " << name << " failed with error " << err; + /* fall through */ + case EINTR: + errno = err; + return false; + case EINVAL: + /* fall back to the code below */ + break; } - errno = err; - return(!err); # endif /* HAVE_POSIX_ALLOCATE */ #endif /* _WIN32*/ @@ -5410,14 +5418,9 @@ fallback: memset(buf, 0, buf_size); os_offset_t current_size = os_file_get_size(file); - bool write_progress_info = - (size - current_size >= (os_offset_t) 100 << 20); - - if (write_progress_info) { - ib::info() << "Progress in MB:"; - } - while (current_size < size) { + while (current_size < size + && srv_shutdown_state == SRV_SHUTDOWN_NONE) { ulint n_bytes; if (size - current_size < (os_offset_t) buf_size) { @@ -5433,32 +5436,15 @@ fallback: request, name, file, buf, current_size, n_bytes); if (err != DB_SUCCESS) { - - ut_free(buf2); - return(false); - } - - /* Print about progress for each 100 MB written */ - if (write_progress_info && - ((current_size + n_bytes) / (100 << 20) - != current_size / (100 << 20))) { - - fprintf(stderr, " %lu00", - (ulong) ((current_size + n_bytes) - / (100 << 20))); + break; } current_size += n_bytes; } - if (write_progress_info) { - - fprintf(stderr, "\n"); - } - ut_free(buf2); - return(os_file_flush(file)); + return(current_size >= size && os_file_flush(file)); } /** Truncates a file to a specified size in bytes. diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc index 70710a1e6ca..8fb24855e97 100644 --- a/storage/innobase/rem/rem0rec.cc +++ b/storage/innobase/rem/rem0rec.cc @@ -840,6 +840,7 @@ rec_get_offsets_func( ut_ad(is_user_rec || n == 1); ut_ad(!is_user_rec || leaf || index->is_dummy || dict_index_is_ibuf(index) + || n == n_fields /* dict_stats_analyze_index_level() */ || n == dict_index_get_n_unique_in_tree_nonleaf(index) + 1); ut_ad(!is_user_rec || !leaf || index->is_dummy @@ -1199,49 +1200,6 @@ rec_get_converted_size_comp_prefix_low( return(extra_size + data_size); } -/** Determine the converted size of virtual column data in a temporary file. -@see rec_convert_dtuple_to_temp_v() -@param[in] index clustered index -@param[in] v clustered index record augmented with the values - of virtual columns -@return size in bytes */ -ulint -rec_get_converted_size_temp_v(const dict_index_t* index, const dtuple_t* v) -{ - ut_ad(dict_index_is_clust(index)); - - /* length marker */ - ulint data_size = 2; - const ulint n_v_fields = dtuple_get_n_v_fields(v); - - for (ulint i = 0; i < n_v_fields; i++) { - const dict_v_col_t* col - = dict_table_get_nth_v_col(index->table, i); - - /* Only those indexed needs to be logged */ - if (!col->m_col.ord_part) { - continue; - } - - data_size += mach_get_compressed_size(i + REC_MAX_N_FIELDS); - const dfield_t* vfield = dtuple_get_nth_v_field(v, col->v_pos); - ulint flen = vfield->len; - - if (flen != UNIV_SQL_NULL) { - flen = ut_min( - flen, - static_cast<ulint>( - DICT_MAX_FIELD_LEN_BY_FORMAT( - index->table))); - data_size += flen; - } - - data_size += mach_get_compressed_size(flen); - } - - return(data_size); -} - /**********************************************************//** Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT. @return total size */ @@ -1650,68 +1608,6 @@ rec_convert_dtuple_to_rec_comp( } } -/** Write indexed virtual column data into a temporary file. -@see rec_get_converted_size_temp_v() -@param[out] rec serialized record -@param[in] index clustered index -@param[in] v_entry clustered index record augmented with the values - of virtual columns */ -void -rec_convert_dtuple_to_temp_v( - byte* rec, - const dict_index_t* index, - const dtuple_t* v_entry) -{ - ut_ad(dict_index_is_clust(index)); - const ulint num_v = dtuple_get_n_v_fields(v_entry); - - /* reserve 2 bytes for writing length */ - byte* ptr = rec; - ptr += 2; - - /* Now log information on indexed virtual columns */ - for (ulint col_no = 0; col_no < num_v; col_no++) { - dfield_t* vfield; - ulint flen; - - const dict_v_col_t* col - = dict_table_get_nth_v_col(index->table, col_no); - - if (col->m_col.ord_part) { - ulint pos = col_no; - - pos += REC_MAX_N_FIELDS; - - ptr += mach_write_compressed(ptr, pos); - - vfield = dtuple_get_nth_v_field( - v_entry, col->v_pos); - - flen = vfield->len; - - if (flen != UNIV_SQL_NULL) { - /* The virtual column can only be in sec - index, and index key length is bound by - DICT_MAX_FIELD_LEN_BY_FORMAT */ - flen = ut_min( - flen, - static_cast<ulint>( - DICT_MAX_FIELD_LEN_BY_FORMAT( - index->table))); - } - - ptr += mach_write_compressed(ptr, flen); - - if (flen != UNIV_SQL_NULL) { - ut_memcpy(ptr, dfield_get_data(vfield), flen); - ptr += flen; - } - } - } - - mach_write_to_2(rec, ptr - rec); -} - /*********************************************************//** Builds a new-style physical record out of a data tuple and stores it beginning from the start of the given buffer. diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 757fbd28a88..9ea0bdd949c 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -1544,18 +1544,16 @@ PageConverter::PageConverter( : AbstractCallback(trx), m_cfg(cfg), + m_index(cfg->m_indexes), + m_current_lsn(log_get_lsn()), m_page_zip_ptr(0), - m_heap(0) UNIV_NOTHROW + m_rec_iter(), + m_offsets_(), m_offsets(m_offsets_), + m_heap(0), + m_cluster_index(dict_table_get_first_index(cfg->m_table)) UNIV_NOTHROW { - m_index = m_cfg->m_indexes; - - m_current_lsn = log_get_lsn(); ut_a(m_current_lsn > 0); - - m_offsets = m_offsets_; rec_offs_init(m_offsets_); - - m_cluster_index = dict_table_get_first_index(m_cfg->m_table); } /** Adjust the BLOB reference for a single column that is externally stored @@ -2024,7 +2022,7 @@ PageConverter::operator() ( we can work on them */ if ((err = update_page(block, page_type)) != DB_SUCCESS) { - return(err); + break; } /* Note: For compressed pages this function will write to the @@ -2063,9 +2061,15 @@ PageConverter::operator() ( << " at offset " << offset << " looks corrupted in file " << m_filepath; - return(DB_CORRUPTION); + err = DB_CORRUPTION; } + /* If we already had and old page with matching number + in the buffer pool, evict it now, because + we no longer evict the pages on DISCARD TABLESPACE. */ + buf_page_get_gen(block->page.id, get_page_size(), + RW_NO_LATCH, NULL, BUF_EVICT_IF_IN_POOL, + __FILE__, __LINE__, NULL, NULL); return(err); } @@ -3668,8 +3672,7 @@ row_import_for_mysql( The only dirty pages generated should be from the pessimistic purge of delete marked records that couldn't be purged in Phase I. */ - buf_LRU_flush_or_remove_pages( - prebuilt->table->space, BUF_REMOVE_FLUSH_WRITE, trx); + buf_LRU_flush_or_remove_pages(prebuilt->table->space, trx); if (trx_is_interrupted(trx)) { ib::info() << "Phase III - Flush interrupted"; diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 3729256c03b..2200a2092bd 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -2509,8 +2509,7 @@ row_ins_index_entry_big_rec( if (error == DB_SUCCESS && dict_index_is_online_ddl(index)) { - row_log_table_insert(btr_pcur_get_rec(&pcur), entry, - index, offsets); + row_log_table_insert(btr_pcur_get_rec(&pcur), index, offsets); } mtr.commit(); @@ -2727,7 +2726,7 @@ err_exit: entry_heap, entry, thr, &mtr); if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) { - row_log_table_insert(btr_cur_get_rec(cursor), entry, + row_log_table_insert(btr_cur_get_rec(cursor), index, offsets); } @@ -2788,7 +2787,7 @@ do_insert: if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) { row_log_table_insert( - insert_rec, entry, index, offsets); + insert_rec, index, offsets); } mtr_commit(&mtr); diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc index be4a7138ac7..76ca4e8b940 100644 --- a/storage/innobase/row/row0log.cc +++ b/storage/innobase/row/row0log.cc @@ -217,11 +217,6 @@ struct row_log_t { byte* crypt_head; /*!< reader context; temporary buffer used in encryption, decryption or NULL */ - ulint n_old_col; - /*!< number of non-virtual column in - old table */ - ulint n_old_vcol; - /*!< number of virtual column in old table */ const char* path; /*!< where to create temporary file during log operation */ }; @@ -609,7 +604,6 @@ row_log_table_delete( /*=================*/ const rec_t* rec, /*!< in: clustered index leaf page record, page X-latched */ - const dtuple_t* ventry, /*!< in: dtuple holding virtual column info */ dict_index_t* index, /*!< in/out: clustered index, S-latched or X-latched */ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */ @@ -739,11 +733,6 @@ row_log_table_delete( } } - /* Check if we need to log virtual column data */ - if (ventry->n_v_fields > 0) { - mrec_size += rec_get_converted_size_temp_v(new_index, ventry); - } - if (byte* b = row_log_table_open(index->online_log, mrec_size, &avail_size)) { *b++ = ROW_T_DELETE; @@ -786,12 +775,6 @@ row_log_table_delete( b += ext_size; } - /* log virtual columns */ - if (ventry->n_v_fields > 0) { - rec_convert_dtuple_to_temp_v(b, new_index, ventry); - b += mach_read_from_2(b); - } - row_log_table_close(index, b, mrec_size, avail_size); } @@ -808,10 +791,6 @@ row_log_table_low_redundant( const rec_t* rec, /*!< in: clustered index leaf page record in ROW_FORMAT=REDUNDANT, page X-latched */ - const dtuple_t* ventry, /*!< in: dtuple holding virtual - column info or NULL */ - const dtuple_t* o_ventry,/*!< in: old dtuple holding virtual - column info or NULL */ dict_index_t* index, /*!< in/out: clustered index, S-latched or X-latched */ bool insert, /*!< in: true if insert, @@ -831,7 +810,6 @@ row_log_table_low_redundant( ulint avail_size; mem_heap_t* heap = NULL; dtuple_t* tuple; - ulint num_v = ventry ? dtuple_get_n_v_fields(ventry) : 0; const ulint n_fields = rec_get_n_fields_old(rec); ut_ad(!page_is_comp(page_align(rec))); @@ -842,13 +820,9 @@ row_log_table_low_redundant( ut_ad(dict_index_is_clust(new_index)); heap = mem_heap_create(DTUPLE_EST_ALLOC(n_fields)); - tuple = dtuple_create_with_vcol(heap, n_fields, num_v); + tuple = dtuple_create(heap, n_fields); dict_index_copy_types(tuple, index, n_fields); - if (num_v) { - dict_table_copy_v_types(tuple, index->table); - } - dtuple_set_n_fields_cmp(tuple, dict_index_get_n_unique(index)); if (rec_get_1byte_offs_flag(rec)) { @@ -888,19 +862,8 @@ row_log_table_low_redundant( size++; extra_size++; } - ulint v_size = num_v - ? rec_get_converted_size_temp_v(index, ventry) : 0; - - mrec_size = ROW_LOG_HEADER_SIZE + size + v_size + (extra_size >= 0x80); - if (num_v) { - if (o_ventry) { - mrec_size += rec_get_converted_size_temp_v( - index, o_ventry); - } - } else if (index->table->n_v_cols) { - mrec_size += 2; - } + mrec_size = ROW_LOG_HEADER_SIZE + size + (extra_size >= 0x80); if (insert || index->online_log->same_pk) { ut_ad(!old_pk); @@ -957,22 +920,6 @@ row_log_table_low_redundant( b + extra_size, index, tuple->fields, tuple->n_fields, status); b += size; - ut_ad(!num_v == !v_size); - if (num_v) { - rec_convert_dtuple_to_temp_v(b, new_index, ventry); - b += v_size; - if (o_ventry) { - rec_convert_dtuple_to_temp_v( - b, new_index, o_ventry); - b += mach_read_from_2(b); - } - } else if (index->table->n_v_cols) { - /* The table contains virtual columns, but nothing - has changed for them, so just mark a 2 bytes length - field */ - mach_write_to_2(b, 2); - b += 2; - } row_log_table_close(index, b, mrec_size, avail_size); } @@ -988,9 +935,6 @@ row_log_table_low( /*==============*/ const rec_t* rec, /*!< in: clustered index leaf page record, page X-latched */ - const dtuple_t* ventry, /*!< in: dtuple holding virtual column info */ - const dtuple_t* o_ventry,/*!< in: dtuple holding old virtual column - info */ dict_index_t* index, /*!< in/out: clustered index, S-latched or X-latched */ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */ @@ -1036,8 +980,6 @@ row_log_table_low( with no information on virtual columns */ ut_ad(!old_pk || !insert); ut_ad(!old_pk || old_pk->n_v_fields == 0); - ut_ad(!o_ventry || !insert); - ut_ad(!o_ventry || ventry); if (dict_index_is_corrupted(index) || !dict_index_is_online_ddl(index) @@ -1047,8 +989,7 @@ row_log_table_low( if (!rec_offs_comp(offsets)) { row_log_table_low_redundant( - rec, ventry, o_ventry, index, insert, - old_pk, new_index); + rec, index, insert, old_pk, new_index); return; } @@ -1065,20 +1006,6 @@ row_log_table_low( + (extra_size >= 0x80) + rec_offs_size(offsets) - omit_size + index->is_instant(); - if (ventry && ventry->n_v_fields > 0) { - mrec_size += rec_get_converted_size_temp_v(new_index, ventry); - - if (o_ventry) { - mrec_size += rec_get_converted_size_temp_v( - new_index, o_ventry); - } - } else if (index->table->n_v_cols) { - /* Always leave 2 bytes length marker for virtual column - data logging even if there is none of them is indexed if table - has virtual columns */ - mrec_size += 2; - } - if (insert || index->online_log->same_pk) { ut_ad(!old_pk); old_pk_extra_size = old_pk_size = 0; @@ -1133,23 +1060,6 @@ row_log_table_low( memcpy(b, rec, rec_offs_data_size(offsets)); b += rec_offs_data_size(offsets); - if (ventry && ventry->n_v_fields > 0) { - rec_convert_dtuple_to_temp_v(b, new_index, ventry); - b += mach_read_from_2(b); - - if (o_ventry) { - rec_convert_dtuple_to_temp_v( - b, new_index, o_ventry); - b += mach_read_from_2(b); - } - } else if (index->table->n_v_cols) { - /* The table contains virtual columns, but nothing - has changed for them, so just mark a 2 bytes length - field */ - mach_write_to_2(b, 2); - b += 2; - } - row_log_table_close(index, b, mrec_size, avail_size); } } @@ -1165,15 +1075,10 @@ row_log_table_update( dict_index_t* index, /*!< in/out: clustered index, S-latched or X-latched */ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */ - const dtuple_t* old_pk, /*!< in: row_log_table_get_pk() + const dtuple_t* old_pk) /*!< in: row_log_table_get_pk() before the update */ - const dtuple_t* new_v_row,/*!< in: dtuple contains the new virtual - columns */ - const dtuple_t* old_v_row)/*!< in: dtuple contains the old virtual - columns */ { - row_log_table_low(rec, new_v_row, old_v_row, index, offsets, - false, old_pk); + row_log_table_low(rec, index, offsets, false, old_pk); } /** Gets the old table column of a PRIMARY KEY column. @@ -1477,12 +1382,11 @@ row_log_table_insert( /*=================*/ const rec_t* rec, /*!< in: clustered index leaf page record, page X-latched */ - const dtuple_t* ventry, /*!< in: dtuple holding virtual column info */ dict_index_t* index, /*!< in/out: clustered index, S-latched or X-latched */ const ulint* offsets)/*!< in: rec_get_offsets(rec,index) */ { - row_log_table_low(rec, ventry, NULL, index, offsets, true, NULL); + row_log_table_low(rec, index, offsets, true, NULL); } /******************************************************//** @@ -1579,7 +1483,6 @@ row_log_table_apply_convert_mrec( reason of failure */ { dtuple_t* row; - ulint num_v = dict_table_get_n_v_cols(log->table); *error = DB_SUCCESS; @@ -1593,8 +1496,7 @@ row_log_table_apply_convert_mrec( dfield_get_type(dtuple_get_nth_field(row, i))); } } else { - row = dtuple_create_with_vcol( - heap, dict_table_get_n_cols(log->table), num_v); + row = dtuple_create(heap, dict_table_get_n_cols(log->table)); dict_table_copy_types(row, log->table); } @@ -1719,14 +1621,6 @@ blob_done: dfield_get_type(dfield))); } - /* read the virtual column data if any */ - if (num_v) { - byte* b = const_cast<byte*>(mrec) - + rec_offs_data_size(offsets); - trx_undo_read_v_cols(log->table, b, row, false, - &(log->col_map[log->n_old_col])); - } - return(row); } @@ -1867,8 +1761,6 @@ row_log_table_apply_delete_low( /*===========================*/ btr_pcur_t* pcur, /*!< in/out: B-tree cursor, will be trashed */ - const dtuple_t* ventry, /*!< in: dtuple holding - virtual column info */ const ulint* offsets, /*!< in: offsets on pcur */ const row_ext_t* save_ext, /*!< in: saved external field info, or NULL */ @@ -1894,9 +1786,6 @@ row_log_table_apply_delete_low( ROW_COPY_DATA, index, btr_pcur_get_rec(pcur), offsets, NULL, NULL, NULL, save_ext ? NULL : &ext, heap); - if (ventry) { - dtuple_copy_v_fields(row, ventry); - } if (!save_ext) { save_ext = ext; @@ -1988,20 +1877,15 @@ row_log_table_apply_delete( mtr_t mtr; btr_pcur_t pcur; ulint* offsets; - ulint num_v = new_table->n_v_cols; ut_ad(rec_offs_n_fields(moffsets) == dict_index_get_n_unique(index) + 2); ut_ad(!rec_offs_any_extern(moffsets)); /* Convert the row to a search tuple. */ - old_pk = dtuple_create_with_vcol(heap, index->n_uniq, num_v); + old_pk = dtuple_create(heap, index->n_uniq); dict_index_copy_types(old_pk, index, index->n_uniq); - if (num_v) { - dict_table_copy_v_types(old_pk, index->table); - } - for (ulint i = 0; i < index->n_uniq; i++) { ulint len; const void* field; @@ -2089,14 +1973,7 @@ all_done: } } - if (num_v) { - byte* b = (byte*)mrec + rec_offs_data_size(moffsets) - + ext_size; - trx_undo_read_v_cols(log->table, b, old_pk, false, - &(log->col_map[log->n_old_col])); - } - - return(row_log_table_apply_delete_low(&pcur, old_pk, + return(row_log_table_apply_delete_low(&pcur, offsets, save_ext, heap, &mtr)); } @@ -2309,13 +2186,12 @@ func_exit_committed: /* Some BLOBs are missing, so we are interpreting this ROW_T_UPDATE as ROW_T_DELETE (see *1). */ error = row_log_table_apply_delete_low( - &pcur, old_pk, cur_offsets, NULL, heap, &mtr); + &pcur, cur_offsets, NULL, heap, &mtr); goto func_exit_committed; } - /** It allows to create tuple with virtual column information. */ dtuple_t* entry = row_build_index_entry_low( - row, NULL, index, heap, ROW_BUILD_FOR_INSERT); + row, NULL, index, heap, ROW_BUILD_NORMAL); upd_t* update = row_upd_build_difference_binary( index, entry, btr_pcur_get_rec(&pcur), cur_offsets, false, NULL, heap, dup->table); @@ -2348,7 +2224,7 @@ func_exit_committed: } error = row_log_table_apply_delete_low( - &pcur, old_pk, cur_offsets, NULL, heap, &mtr); + &pcur, cur_offsets, NULL, heap, &mtr); ut_ad(mtr.has_committed()); if (error == DB_SUCCESS) { @@ -2543,13 +2419,6 @@ row_log_table_apply_op( next_mrec = mrec + rec_offs_data_size(offsets); - if (log->table->n_v_cols) { - if (next_mrec + 2 > mrec_end) { - return(NULL); - } - next_mrec += mach_read_from_2(next_mrec); - } - if (next_mrec > mrec_end) { return(NULL); } else { @@ -2581,13 +2450,6 @@ row_log_table_apply_op( rec_offs_set_n_fields(offsets, new_index->n_uniq + 2); rec_init_offsets_temp(mrec, new_index, offsets); next_mrec = mrec + rec_offs_data_size(offsets) + ext_size; - if (log->table->n_v_cols) { - if (next_mrec + 2 > mrec_end) { - return(NULL); - } - - next_mrec += mach_read_from_2(next_mrec); - } if (next_mrec > mrec_end) { return(NULL); @@ -2632,7 +2494,6 @@ row_log_table_apply_op( definition of the columns belonging to PRIMARY KEY is not changed, the log will only contain DB_TRX_ID,new_row. */ - ulint num_v = new_index->table->n_v_cols; if (dup->index->online_log->same_pk) { ut_ad(new_index->n_uniq == dup->index->n_uniq); @@ -2667,14 +2528,9 @@ row_log_table_apply_op( return(NULL); } - old_pk = dtuple_create_with_vcol( - heap, new_index->n_uniq, num_v); + old_pk = dtuple_create(heap, new_index->n_uniq); dict_index_copy_types( old_pk, new_index, old_pk->n_fields); - if (num_v) { - dict_table_copy_v_types( - old_pk, new_index->table); - } /* Copy the PRIMARY KEY fields from mrec to old_pk. */ for (ulint i = 0; i < new_index->n_uniq; i++) { @@ -2715,16 +2571,10 @@ row_log_table_apply_op( /* Copy the PRIMARY KEY fields and DB_TRX_ID, DB_ROLL_PTR from mrec to old_pk. */ - old_pk = dtuple_create_with_vcol( - heap, new_index->n_uniq + 2, num_v); + old_pk = dtuple_create(heap, new_index->n_uniq + 2); dict_index_copy_types(old_pk, new_index, old_pk->n_fields); - if (num_v) { - dict_table_copy_v_types( - old_pk, new_index->table); - } - for (ulint i = 0; i < dict_index_get_n_unique(new_index) + 2; i++) { @@ -2777,31 +2627,6 @@ row_log_table_apply_op( } } - /* Read virtual column info from log */ - if (num_v) { - ulint o_v_size = 0; - ulint n_v_size = 0; - n_v_size = mach_read_from_2(next_mrec); - next_mrec += n_v_size; - if (next_mrec > mrec_end) { - return(NULL); - } - - /* if there is more than 2 bytes length info */ - if (n_v_size > 2) { - trx_undo_read_v_cols( - log->table, const_cast<byte*>( - next_mrec), old_pk, false, - &(log->col_map[log->n_old_col])); - o_v_size = mach_read_from_2(next_mrec); - } - - next_mrec += o_v_size; - if (next_mrec > mrec_end) { - return(NULL); - } - } - ut_ad(next_mrec <= mrec_end); log->head.total += next_mrec - mrec_start; dtuple_set_n_fields_cmp(old_pk, new_index->n_uniq); @@ -3344,8 +3169,6 @@ row_log_allocate( log->head.blocks = log->head.bytes = 0; log->head.total = 0; log->path = path; - log->n_old_col = index->table->n_cols; - log->n_old_vcol = index->table->n_v_cols; dict_index_set_online_status(index, ONLINE_INDEX_CREATION); index->online_log = log; diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 3f7c500c7f5..c729e5b95f4 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -2505,10 +2505,7 @@ err_exit: /* We already have .ibd file here. it should be deleted. */ if (dict_table_is_file_per_table(table) - && fil_delete_tablespace( - table->space, - BUF_REMOVE_FLUSH_NO_WRITE) - != DB_SUCCESS) { + && fil_delete_tablespace(table->space) != DB_SUCCESS) { ib::error() << "Not able to delete tablespace " << table->space << " of table " @@ -3177,9 +3174,6 @@ row_discard_tablespace( 4) FOREIGN KEY operations: if table->n_foreign_key_checks_running > 0, we do not allow the discard. */ - /* Play safe and remove all insert buffer entries, though we should - have removed them already when DISCARD TABLESPACE was called */ - ibuf_delete_for_discarded_space(table->space); table_id_t new_id; @@ -3544,8 +3538,7 @@ row_drop_single_table_tablespace( ib::info() << "Removed datafile " << filepath << " for table " << tablename; - } else if (fil_delete_tablespace(space_id, BUF_REMOVE_FLUSH_NO_WRITE) - != DB_SUCCESS) { + } else if (fil_delete_tablespace(space_id) != DB_SUCCESS) { ib::error() << "We removed the InnoDB internal data" " dictionary entry of table " << tablename diff --git a/storage/innobase/row/row0quiesce.cc b/storage/innobase/row/row0quiesce.cc index 54583956107..ccf58b9e73f 100644 --- a/storage/innobase/row/row0quiesce.cc +++ b/storage/innobase/row/row0quiesce.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -536,8 +537,7 @@ row_quiesce_table_start( } if (!trx_is_interrupted(trx)) { - buf_LRU_flush_or_remove_pages( - table->space, BUF_REMOVE_FLUSH_WRITE, trx); + buf_LRU_flush_or_remove_pages(table->space, trx); if (trx_is_interrupted(trx)) { diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc index 6628007909e..58707fb21c5 100644 --- a/storage/innobase/row/row0uins.cc +++ b/storage/innobase/row/row0uins.cc @@ -117,7 +117,7 @@ row_undo_ins_remove_clust_rec( mem_heap_t* heap = NULL; const ulint* offsets = rec_get_offsets( rec, index, NULL, true, ULINT_UNDEFINED, &heap); - row_log_table_delete(rec, node->row, index, offsets, NULL); + row_log_table_delete(rec, index, offsets, NULL); mem_heap_free(heap); } @@ -457,7 +457,7 @@ close_table: } if (node->table->n_v_cols) { trx_undo_read_v_cols(node->table, ptr, - node->row, false, NULL); + node->row, false); } } else { diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc index ecf6b76a593..0b1d800212f 100644 --- a/storage/innobase/row/row0umod.cc +++ b/storage/innobase/row/row0umod.cc @@ -334,18 +334,16 @@ row_undo_mod_clust( switch (node->rec_type) { case TRX_UNDO_DEL_MARK_REC: row_log_table_insert( - btr_pcur_get_rec(pcur), node->row, - index, offsets); + btr_pcur_get_rec(pcur), index, offsets); break; case TRX_UNDO_UPD_EXIST_REC: row_log_table_update( btr_pcur_get_rec(pcur), index, offsets, - rebuilt_old_pk, node->undo_row, node->row); + rebuilt_old_pk); break; case TRX_UNDO_UPD_DEL_REC: row_log_table_delete( - btr_pcur_get_rec(pcur), node->row, - index, offsets, sys); + btr_pcur_get_rec(pcur), index, offsets, sys); break; default: ut_ad(0); diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index 26ee2849be3..8e59fe7f113 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -2914,18 +2914,9 @@ row_upd_clust_rec( if (err == DB_SUCCESS) { success: if (dict_index_is_online_ddl(index)) { - dtuple_t* new_v_row = NULL; - dtuple_t* old_v_row = NULL; - - if (!(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) { - new_v_row = node->upd_row; - old_v_row = node->update->old_vrow; - } - row_log_table_update( btr_cur_get_rec(btr_cur), - index, offsets, rebuilt_old_pk, new_v_row, - old_v_row); + index, offsets, rebuilt_old_pk); } } diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 25a5a964375..988ddf1a759 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -1097,21 +1097,22 @@ srv_undo_tablespaces_init(bool create_new_db) mtr_commit(&mtr); /* Step-2: Flush the dirty pages from the buffer pool. */ + trx_t* trx = trx_allocate_for_background(); + for (undo::undo_spaces_t::const_iterator it = undo::Truncate::s_fix_up_spaces.begin(); it != undo::Truncate::s_fix_up_spaces.end(); ++it) { - buf_LRU_flush_or_remove_pages( - TRX_SYS_SPACE, BUF_REMOVE_FLUSH_WRITE, NULL); + buf_LRU_flush_or_remove_pages(TRX_SYS_SPACE, trx); - buf_LRU_flush_or_remove_pages( - *it, BUF_REMOVE_FLUSH_WRITE, NULL); + buf_LRU_flush_or_remove_pages(*it, trx); /* Remove the truncate redo log file. */ undo::Truncate undo_trunc; undo_trunc.done_logging(*it); } + trx_free_for_background(trx); } return(DB_SUCCESS); diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc index 50a321ea68b..7f9476832ba 100644 --- a/storage/innobase/trx/trx0rec.cc +++ b/storage/innobase/trx/trx0rec.cc @@ -2366,7 +2366,7 @@ trx_undo_prev_version_build( ut_ad(index->table->n_v_cols); trx_undo_read_v_cols(index->table, ptr, *vrow, - v_status & TRX_UNDO_PREV_IN_PURGE, NULL); + v_status & TRX_UNDO_PREV_IN_PURGE); } return(true); @@ -2375,16 +2375,14 @@ trx_undo_prev_version_build( /** Read virtual column value from undo log @param[in] table the table @param[in] ptr undo log pointer -@param[in,out] row the row struct to fill -@param[in] in_purge called by purge thread -@param[in] col_map online rebuild column map */ +@param[in,out] row the dtuple to fill +@param[in] in_purge whether this is called by purge */ void trx_undo_read_v_cols( const dict_table_t* table, const byte* ptr, const dtuple_t* row, - bool in_purge, - const ulint* col_map) + bool in_purge) { const byte* end_ptr; bool first_v_col = true; @@ -2424,21 +2422,10 @@ trx_undo_read_v_cols( } if (is_virtual) { - ulint col_no; dict_v_col_t* vcol = dict_table_get_nth_v_col( table, field_no); - if (!col_map) { - col_no = vcol->v_pos; - } else { - col_no = col_map[vcol->v_pos]; - } - - if (col_no == ULINT_UNDEFINED) { - continue; - } - - dfield = dtuple_get_nth_v_field(row, col_no); + dfield = dtuple_get_nth_v_field(row, vcol->v_pos); if (!in_purge || dfield_get_type(dfield)->mtype == DATA_MISSING) { diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index 5a0c81d3e3e..2092e024e28 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -274,7 +274,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) { int kfile,open_mode,save_errno; uint i,j,len,errpos,head_length,base_pos,keys, realpath_err, - key_parts,unique_key_parts,fulltext_keys,uniques; + key_parts,base_key_parts,unique_key_parts,fulltext_keys,uniques; uint internal_table= MY_TEST(open_flags & HA_OPEN_INTERNAL_TABLE); uint file_version; size_t info_length; @@ -404,21 +404,11 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) Allocate space for header information and for data that is too big to keep on stack */ - if (!my_multi_malloc(MY_WME, - &disk_cache, info_length+128, - &rec_per_key_part, - (sizeof(*rec_per_key_part) * HA_MAX_POSSIBLE_KEY * - HA_MAX_KEY_SEG), - &nulls_per_key_part, - (sizeof(*nulls_per_key_part) * HA_MAX_POSSIBLE_KEY * - HA_MAX_KEY_SEG), - NullS)) + if (!(disk_cache= my_malloc(info_length+128, MYF(MY_WME)))) { my_errno=ENOMEM; goto err; } - share_buff.state.rec_per_key_part= rec_per_key_part; - share_buff.state.nulls_per_key_part= nulls_per_key_part; end_pos=disk_cache+info_length; errpos= 3; @@ -431,7 +421,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) keys= (uint) share->state.header.keys; uniques= (uint) share->state.header.uniques; fulltext_keys= (uint) share->state.header.fulltext_keys; - key_parts= mi_uint2korr(share->state.header.key_parts); + base_key_parts= key_parts= mi_uint2korr(share->state.header.key_parts); unique_key_parts= mi_uint2korr(share->state.header.unique_key_parts); if (len != MARIA_STATE_INFO_SIZE) { @@ -441,7 +431,8 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) } share->state_diff_length=len-MARIA_STATE_INFO_SIZE; - _ma_state_info_read(disk_cache, &share->state); + if (!_ma_state_info_read(disk_cache, &share->state)) + goto err; len= mi_uint2korr(share->state.header.base_info_length); if (len != MARIA_BASE_INFO_SIZE) { @@ -582,9 +573,9 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) share->open_file_name.length= strlen(name); if (!my_multi_malloc(MY_WME, &share,sizeof(*share), - &share->state.rec_per_key_part, + &rec_per_key_part, sizeof(double) * key_parts, - &share->state.nulls_per_key_part, + &nulls_per_key_part, sizeof(long)* key_parts, &share->keyinfo,keys*sizeof(MARIA_KEYDEF), &share->uniqueinfo,uniques*sizeof(MARIA_UNIQUEDEF), @@ -609,11 +600,16 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) goto err; errpos= 4; - *share=share_buff; - memcpy((char*) share->state.rec_per_key_part, - (char*) rec_per_key_part, sizeof(double)*key_parts); - memcpy((char*) share->state.nulls_per_key_part, - (char*) nulls_per_key_part, sizeof(long)*key_parts); + *share= share_buff; + share->state.rec_per_key_part= rec_per_key_part; + share->state.nulls_per_key_part= nulls_per_key_part; + + memcpy((char*) rec_per_key_part, + (char*) share_buff.state.rec_per_key_part, + sizeof(double)*base_key_parts); + memcpy((char*) nulls_per_key_part, + (char*) share_buff.state.nulls_per_key_part, + sizeof(long)*base_key_parts); memcpy((char*) share->state.key_root, (char*) key_root, sizeof(my_off_t)*keys); strmov(share->unique_file_name.str, name_buff); @@ -911,6 +907,10 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) (keys ? MARIA_INDEX_BLOCK_MARGIN * share->block_size * keys : 0)); my_free(disk_cache); + my_free(share_buff.state.rec_per_key_part); + disk_cache= 0; + share_buff.state.rec_per_key_part= 0; + _ma_setup_functions(share); max_data_file_length= share->base.max_data_file_length; if ((*share->once_init)(share, info.dfile.file)) @@ -1092,6 +1092,7 @@ err: /* fall through */ case 3: my_free(disk_cache); + my_free(share_buff.state.rec_per_key_part); /* fall through */ case 1: mysql_file_close(kfile,MYF(0)); @@ -1507,6 +1508,16 @@ static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state) keys= (uint) state->header.keys; key_parts= mi_uint2korr(state->header.key_parts); + /* Allocate memory for key parts if not already done */ + if (!state->rec_per_key_part && + !my_multi_malloc(MY_WME, + &state->rec_per_key_part, + sizeof(*state->rec_per_key_part) * key_parts, + &state->nulls_per_key_part, + sizeof(*state->nulls_per_key_part) * key_parts, + NullS)) + DBUG_RETURN(0); + state->open_count = mi_uint2korr(ptr); ptr+= 2; state->changed= mi_uint2korr(ptr); ptr+= 2; state->create_rename_lsn= lsn_korr(ptr); ptr+= LSN_STORE_SIZE; diff --git a/storage/perfschema/pfs_autosize.cc b/storage/perfschema/pfs_autosize.cc index 6f267cb4599..fd428cd6004 100644 --- a/storage/perfschema/pfs_autosize.cc +++ b/storage/perfschema/pfs_autosize.cc @@ -124,7 +124,7 @@ PFS_sizing_data small_data= /* Account / user / host */ 10, 5, 20, /* History sizes */ - 5, 100, 5, 100, 5, 100, + 10, 100, 10, 100, 10, 100, /* Digests */ 1000, /* Session connect attrs. */ @@ -140,7 +140,7 @@ PFS_sizing_data medium_data= /* Account / user / host */ 100, 100, 100, /* History sizes */ - 10, 1000, 10, 1000, 10, 1000, + 20, 1000, 20, 1000, 20, 1000, /* Digests */ 5000, /* Session connect attrs. */ @@ -156,7 +156,7 @@ PFS_sizing_data large_data= /* Account / user / host */ 100, 100, 100, /* History sizes */ - 10, 10000, 10, 10000, 10, 10000, + 20, 10000, 20, 10000, 20, 10000, /* Digests */ 10000, /* Session connect attrs. */ diff --git a/storage/perfschema/pfs_server.cc b/storage/perfschema/pfs_server.cc index 7577154515d..ee965c0e7da 100644 --- a/storage/perfschema/pfs_server.cc +++ b/storage/perfschema/pfs_server.cc @@ -67,8 +67,10 @@ initialize_performance_schema(PFS_global_param *param) The performance schema is disabled in the startup command line. All the instrumentation is turned off. */ + pfs_enabled= 0; return NULL; } + pfs_enabled= TRUE; init_timers(); diff --git a/storage/perfschema/pfs_server.h b/storage/perfschema/pfs_server.h index bc0c69e86b9..dd092713d8e 100644 --- a/storage/perfschema/pfs_server.h +++ b/storage/perfschema/pfs_server.h @@ -49,7 +49,7 @@ #define PFS_MAX_SETUP_OBJECT 100 #endif #ifndef PFS_MAX_STAGE_CLASS - #define PFS_MAX_STAGE_CLASS 150 + #define PFS_MAX_STAGE_CLASS 160 #endif #ifndef PFS_STATEMENTS_STACK_SIZE #define PFS_STATEMENTS_STACK_SIZE 10 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result index d2d3befdf04..e566691af28 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result @@ -21,6 +21,7 @@ b CHAR(30), PRIMARY KEY(pk) COMMENT "cf1", KEY(a) ) COLLATE 'latin1_bin' PARTITION BY KEY() PARTITIONS 4; +connect other,localhost,root,,; set session transaction isolation level repeatable read; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; STAT_TYPE VALUE @@ -29,6 +30,7 @@ start transaction with consistent snapshot; select * from information_schema.rocksdb_dbstats where stat_type='DB_NUM_SNAPSHOTS'; STAT_TYPE VALUE DB_NUM_SNAPSHOTS 1 +connection default; set rocksdb_bulk_load=1; set rocksdb_bulk_load_size=100000; LOAD DATA INFILE <input_file> INTO TABLE t1; @@ -79,4 +81,5 @@ count(b) 5000000 longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp test.bulk_load.tmp +disconnect other; DROP TABLE t1, t2, t3; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result b/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result index b83f0a474cc..db21c3c01d4 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/read_only_tx.result @@ -1,11 +1,14 @@ DROP TABLE IF EXISTS t1; +connect con1,localhost,root,,; +connect con2,localhost,root,,; +connection con1; CREATE TABLE t1 (id INT, value int, PRIMARY KEY (id), INDEX (value)) ENGINE=RocksDB; INSERT INTO t1 VALUES (1,1); select variable_value into @p from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; -START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; -File Position Gtid_executed -master-bin.000001 734 uuid:1-3 +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connection con2; +connection con1; select case when variable_value-@p < 1000 then 'true' else variable_value-@p end from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; case when variable_value-@p < 1000 then 'true' else variable_value-@p end true @@ -27,10 +30,15 @@ id value 1 10001 2 2 BEGIN; +connection con2; +connection con1; SELECT COUNT(*) FROM t1; COUNT(*) 9998 COMMIT; +connection default; +disconnect con1; +disconnect con2; OPTIMIZE TABLE t1; Table Op Msg_type Msg_text test.t1 optimize status OK diff --git a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test index 84fe0046e7b..d715eb7df7a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/add_index_inplace_crash.test @@ -105,3 +105,13 @@ SHOW CREATE TABLE t1; SELECT COUNT(*) FROM t1; DROP TABLE t1; + +# Cleanup temporary #sql files. In the future server will remove these +# automatically but for now we need to do the delete explicit + +--disable_query_log +--disable_result_log +let $datadir=`select @@datadir`; +--remove_files_wildcard $datadir/test #sql* +--enable_result_log +--enable_query_log diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def index 3af1d99ff22..118d8598de3 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def +++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def @@ -67,6 +67,8 @@ lock_wait_timeout_stats: MDEV-13404 compact_deletes: MDEV-12663 : rocksdb.compact_deletes times out and causes other tests to fail blind_delete_without_tx_api: MDEV-12286: rocksdb.blind_delete_without_tx_api test fails +unique_check: wrong error number +autoinc_vars_thread: debug sync point wait timed out # Enabling these didn't seem to cause any trouble: # autoinc_vars_thread : MDEV-12474 Regularly fails on buildbot @@ -80,7 +82,5 @@ blind_delete_without_tx_api: MDEV-12286: rocksdb.blind_delete_without_tx_api tes ## information_schema : MariaRocks: requires GTIDs mysqlbinlog_gtid_skip_empty_trans_rocksdb : MariaRocks: requires GTIDs -read_only_tx : MariaRocks: requires GTIDs +#read_only_tx : MariaRocks: requires GTIDs rpl_row_triggers : MariaRocks: requires GTIDs - - diff --git a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt index 52f4895dc2f..221b35c672a 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt +++ b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx-master.opt @@ -1 +1 @@ ---rocksdb_default_cf_options=write_buffer_size=16k --log-bin --binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log-slave-updates +--rocksdb_default_cf_options=write_buffer_size=16k --log-bin --binlog_format=row --log-slave-updates diff --git a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test index 52f65095d33..3a1025a3623 100644 --- a/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test +++ b/storage/rocksdb/mysql-test/rocksdb/t/read_only_tx.test @@ -2,9 +2,9 @@ --source include/have_rocksdb.inc --source include/count_sessions.inc --disable_warnings ---source include/have_gtid.inc +#--source include/have_gtid.inc --enable_warnings --- let $uuid = `select @@server_uuid;` +#-- let $uuid = `select @@server_uuid;` --disable_warnings DROP TABLE IF EXISTS t1; @@ -20,8 +20,8 @@ INSERT INTO t1 VALUES (1,1); # Read-only, long-running transaction. SingleDelete/Put shouldn't increase much. select variable_value into @p from information_schema.global_status where variable_name='rocksdb_number_sst_entry_put'; select variable_value into @s from information_schema.global_status where variable_name='rocksdb_number_sst_entry_singledelete'; --- replace_result $uuid uuid -START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT; +#-- replace_result $uuid uuid +START TRANSACTION WITH CONSISTENT SNAPSHOT; connection con2; --disable_query_log diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/checkpoint_lock.result b/storage/tokudb/mysql-test/tokudb_bugs/r/checkpoint_lock.result index dff746fa280..1a09412ea08 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/checkpoint_lock.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/checkpoint_lock.result @@ -17,7 +17,7 @@ flush logs;; connection conn1; select DB, command, state, info from information_schema.processlist where id != connection_id(); DB command state info -test Query init flush logs +test Query Init flush logs set tokudb_checkpoint_lock=0; connection default; disconnect conn1; |