summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
Diffstat (limited to 'storage')
-rw-r--r--storage/archive/ha_archive.cc2
-rw-r--r--storage/connect/filamvct.cpp22
-rw-r--r--storage/connect/tabvct.cpp6
-rw-r--r--storage/csv/ha_tina.cc2
-rw-r--r--storage/federated/ha_federated.cc2
-rw-r--r--storage/federatedx/ha_federatedx.cc2
-rw-r--r--storage/heap/hp_hash.c2
-rw-r--r--storage/innobase/btr/btr0defragment.cc4
-rw-r--r--storage/innobase/buf/buf0lru.cc11
-rw-r--r--storage/innobase/dict/dict0dict.cc7
-rw-r--r--storage/innobase/handler/ha_innodb.cc198
-rw-r--r--storage/innobase/handler/ha_innodb.h3
-rw-r--r--storage/innobase/handler/handler0alter.cc12
-rw-r--r--storage/innobase/include/buf0lru.h8
-rw-r--r--storage/innobase/include/dict0mem.h4
-rw-r--r--storage/innobase/include/fil0fil.ic2
-rw-r--r--storage/innobase/include/fsp0fsp.h2
-rw-r--r--storage/innobase/include/row0mysql.h39
-rw-r--r--storage/innobase/include/row0sel.h10
-rw-r--r--storage/innobase/lock/lock0lock.cc2
-rw-r--r--storage/innobase/log/log0recv.cc22
-rw-r--r--storage/innobase/os/os0file.cc12
-rw-r--r--storage/innobase/row/row0import.cc28
-rw-r--r--storage/innobase/row/row0ins.cc20
-rw-r--r--storage/innobase/row/row0merge.cc27
-rw-r--r--storage/innobase/row/row0mysql.cc50
-rw-r--r--storage/innobase/row/row0sel.cc70
-rw-r--r--storage/innobase/row/row0upd.cc23
-rw-r--r--storage/innobase/row/row0vers.cc18
-rw-r--r--storage/innobase/srv/srv0start.cc4
-rw-r--r--storage/maria/ma_control_file.c2
-rw-r--r--storage/maria/ma_key.c2
-rw-r--r--storage/maria/ma_loghandler.c225
-rw-r--r--storage/maria/ma_open.c8
-rw-r--r--storage/mroonga/data/install.sql.in6
-rw-r--r--storage/myisam/ha_myisam.cc2
-rw-r--r--storage/myisam/mi_key.c2
-rw-r--r--storage/myisam/mi_open.c8
-rw-r--r--storage/rocksdb/build_rocksdb.cmake17
-rw-r--r--storage/tokudb/CMakeLists.txt3
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake2
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.cc8
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc16
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc116
-rw-r--r--storage/tokudb/PerconaFT/portability/memory.h2
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_debug_sync.h3
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_pthread.h12
-rw-r--r--storage/tokudb/PerconaFT/src/CMakeLists.txt2
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh0
-rwxr-xr-x[-rw-r--r--]storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh0
-rw-r--r--storage/tokudb/PerconaFT/util/dmt.cc4
-rw-r--r--storage/tokudb/PerconaFT/util/omt.h8
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result68
-rw-r--r--storage/tokudb/tokudb.cnf.in1
-rw-r--r--storage/tokudb/tokudb.conf.in1
-rw-r--r--storage/tokudb/tokudb_thread.h49
66 files changed, 784 insertions, 397 deletions
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index 42d2c219ebf..487c0038239 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -1909,7 +1909,7 @@ maria_declare_plugin(archive)
&archive_storage_engine,
"ARCHIVE",
"Brian Aker, MySQL AB",
- "Archive storage engine",
+ "gzip-compresses tables for a low storage footprint",
PLUGIN_LICENSE_GPL,
archive_db_init, /* Plugin Init */
NULL, /* Plugin Deinit */
diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp
index 244acfdc5c8..a660461e9ee 100644
--- a/storage/connect/filamvct.cpp
+++ b/storage/connect/filamvct.cpp
@@ -515,7 +515,8 @@ bool VCTFAM::AllocateBuffer(PGLOBAL g)
for (; cp; cp = (PVCTCOL)cp->Next)
cp->Blk = AllocValBlock(g, NewBlock + Nrec * cp->Deplac,
cp->Buf_Type, Nrec, cp->Format.Length,
- cp->Format.Prec, chk);
+ cp->Format.Prec, chk, true,
+ cp->IsUnsigned());
return InitInsert(g); // Initialize inserting
} else {
@@ -549,7 +550,8 @@ bool VCTFAM::AllocateBuffer(PGLOBAL g)
for (; cp; cp = (PVCTCOL)cp->Next)
if (!cp->IsSpecial()) // Not a pseudo column
cp->Blk = AllocValBlock(g, NULL, cp->Buf_Type, Nrec,
- cp->Format.Length, cp->Format.Prec);
+ cp->Format.Length, cp->Format.Prec,
+ true, true, cp->IsUnsigned());
} //endif mode
@@ -1516,7 +1518,8 @@ bool VCMFAM::AllocateBuffer(PGLOBAL g)
for (cp = (PVCTCOL)Tdbp->GetColumns(); cp; cp = (PVCTCOL)cp->Next)
if (!cp->IsSpecial()) { // Not a pseudo column
cp->Blk = AllocValBlock(g, (void*)1, cp->Buf_Type, Nrec,
- cp->Format.Length, cp->Format.Prec);
+ cp->Format.Length, cp->Format.Prec,
+ true, true, cp->IsUnsigned());
cp->AddStatus(BUF_MAPPED);
} // endif IsSpecial
@@ -2067,7 +2070,7 @@ bool VECFAM::AllocateBuffer(PGLOBAL g)
for (cp = (PVCTCOL)tdbp->Columns; cp; cp = (PVCTCOL)cp->Next)
cp->Blk = AllocValBlock(g, To_Bufs[cp->Index - 1],
cp->Buf_Type, Nrec, cp->Format.Length,
- cp->Format.Prec, chk);
+ cp->Format.Prec, chk, true, cp->IsUnsigned());
return InitInsert(g);
} else {
@@ -2116,7 +2119,8 @@ bool VECFAM::AllocateBuffer(PGLOBAL g)
for (cp = (PVCTCOL)tdbp->Columns; cp; cp = (PVCTCOL)cp->Next)
if (!cp->IsSpecial()) // Not a pseudo column
cp->Blk = AllocValBlock(g, NULL, cp->Buf_Type, Nrec,
- cp->Format.Length, cp->Format.Prec);
+ cp->Format.Length, cp->Format.Prec,
+ true, true, cp->IsUnsigned());
} // endif mode
@@ -2887,7 +2891,8 @@ bool VMPFAM::AllocateBuffer(PGLOBAL g)
for (cp = (PVCTCOL)Tdbp->GetColumns(); cp; cp = (PVCTCOL)cp->Next)
if (!cp->IsSpecial()) { // Not a pseudo column
cp->Blk = AllocValBlock(g, (void*)1, cp->Buf_Type, Nrec,
- cp->Format.Length, cp->Format.Prec);
+ cp->Format.Length, cp->Format.Prec,
+ true, true, cp->IsUnsigned());
cp->AddStatus(BUF_MAPPED);
} // endif IsSpecial
@@ -3669,7 +3674,7 @@ bool BGVFAM::AllocateBuffer(PGLOBAL g)
for (; cp; cp = (PVCTCOL)cp->Next)
cp->Blk = AllocValBlock(g, NewBlock + Nrec * cp->Deplac,
cp->Buf_Type, Nrec, cp->Format.Length,
- cp->Format.Prec, chk);
+ cp->Format.Prec, chk, true, cp->IsUnsigned());
InitInsert(g); // Initialize inserting
@@ -3717,7 +3722,8 @@ bool BGVFAM::AllocateBuffer(PGLOBAL g)
for (; cp; cp = (PVCTCOL)cp->Next)
if (!cp->IsSpecial()) // Not a pseudo column
cp->Blk = AllocValBlock(g, NULL, cp->Buf_Type, Nrec,
- cp->Format.Length, cp->Format.Prec);
+ cp->Format.Length, cp->Format.Prec,
+ true, true, cp->IsUnsigned());
} //endif mode
diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp
index 11b344ef652..40d020202ea 100644
--- a/storage/connect/tabvct.cpp
+++ b/storage/connect/tabvct.cpp
@@ -456,13 +456,11 @@ bool VCTCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
if (tdbp->Txfp->GetAmType() == TYPE_AM_VMP && ok) {
Blk = AllocValBlock(g, (void*)1, Buf_Type, tdbp->Txfp->Nrec,
- Format.Length,
- Format.Prec, check);
+ Format.Length, Format.Prec, check, true, Unsigned);
Status |= BUF_MAPPED; // Will point into mapped file
} else
Blk = AllocValBlock(g, NULL, Buf_Type, tdbp->Txfp->Nrec,
- Format.Length,
- Format.Prec, check);
+ Format.Length, Format.Prec, check, true, Unsigned);
} // endif Mode
return false;
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index f465ee2e947..0e092e2fd90 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -1788,7 +1788,7 @@ maria_declare_plugin(csv)
&csv_storage_engine,
"CSV",
"Brian Aker, MySQL AB",
- "CSV storage engine",
+ "Stores tables as CSV files",
PLUGIN_LICENSE_GPL,
tina_init_func, /* Plugin Init */
tina_done_func, /* Plugin Deinit */
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index 98d4ef4d7f9..4aed06885e5 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -3410,7 +3410,7 @@ maria_declare_plugin(federated)
&federated_storage_engine,
"FEDERATED",
"Patrick Galbraith and Brian Aker, MySQL AB",
- "Federated MySQL storage engine",
+ "Allows to access tables on other MariaDB servers",
PLUGIN_LICENSE_GPL,
federated_db_init, /* Plugin Init */
federated_done, /* Plugin Deinit */
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index 57cf66f4b18..74d547cb674 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -3678,7 +3678,7 @@ maria_declare_plugin(federatedx)
&federatedx_storage_engine,
"FEDERATED",
"Patrick Galbraith",
- "FederatedX pluggable storage engine",
+ "Allows to access tables on other MariaDB servers, supports transactions and more",
PLUGIN_LICENSE_GPL,
federatedx_db_init, /* Plugin Init */
federatedx_done, /* Plugin Deinit */
diff --git a/storage/heap/hp_hash.c b/storage/heap/hp_hash.c
index 847483ba9bf..46ac0270300 100644
--- a/storage/heap/hp_hash.c
+++ b/storage/heap/hp_hash.c
@@ -980,7 +980,7 @@ void heap_update_auto_increment(HP_INFO *info, const uchar *record)
switch (info->s->auto_key_type) {
case HA_KEYTYPE_INT8:
- s_value= (longlong) *(char*)key;
+ s_value= (longlong) *(const signed char*) key;
break;
case HA_KEYTYPE_BINARY:
value=(ulonglong) *(uchar*) key;
diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc
index 0e11b6af8a4..6f36198f129 100644
--- a/storage/innobase/btr/btr0defragment.cc
+++ b/storage/innobase/btr/btr0defragment.cc
@@ -778,7 +778,7 @@ DECLARE_THREAD(btr_defragment_thread)(void*)
err = dict_stats_save_defrag_stats(index);
if (err != DB_SUCCESS) {
ib::error() << "Saving defragmentation stats for table "
- << index->table->name.m_name
+ << index->table->name
<< " index " << index->name()
<< " failed with error " << err;
} else {
@@ -786,7 +786,7 @@ DECLARE_THREAD(btr_defragment_thread)(void*)
if (err != DB_SUCCESS) {
ib::error() << "Saving defragmentation summary for table "
- << index->table->name.m_name
+ << index->table->name
<< " index " << index->name()
<< " failed with error " << err;
}
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 252f37b4495..bf0891840f8 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -353,9 +353,10 @@ next_page:
ut_free(page_arr);
}
-/** Drop the adaptive hash index for a tablespace.
-@param[in,out] table table */
-void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
+/** Try to drop the adaptive hash index for a tablespace.
+@param[in,out] table table
+@return whether anything was dropped */
+bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
{
for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
@@ -366,13 +367,15 @@ void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
}
}
- return;
+ return false;
drop_ahi:
ulint id = table->space_id;
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
id);
}
+
+ return true;
}
#endif /* BTR_CUR_HASH_ADAPT */
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 9415624465f..31b78f94ab6 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -569,7 +569,7 @@ dict_table_close_and_drop(
if (err != DB_SUCCESS) {
ib::error() << "At " << __FILE__ << ":" << __LINE__
<< " row_merge_drop_table returned error: " << err
- << " table: " << table->name.m_name;
+ << " table: " << table->name;
}
}
@@ -2589,12 +2589,11 @@ dict_index_remove_from_cache_low(
zero. See also: dict_table_can_be_evicted() */
do {
- if (!btr_search_info_get_ref_count(info, index)) {
+ if (!btr_search_info_get_ref_count(info, index)
+ || !buf_LRU_drop_page_hash_for_tablespace(table)) {
break;
}
- buf_LRU_drop_page_hash_for_tablespace(table);
-
ut_a(++retries < 10000);
} while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
#endif /* BTR_CUR_HASH_ADAPT */
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 66a28ea56b7..a74fa32da58 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -2445,7 +2445,11 @@ innobase_mysql_tmpfile(
}
return hDup;
#else
+#ifdef F_DUPFD_CLOEXEC
+ int fd2 = fcntl(fd, F_DUPFD_CLOEXEC, 0);
+#else
int fd2 = dup(fd);
+#endif
my_close(fd, MYF(MY_WME));
if (fd2 < 0) {
set_my_errno(errno);
@@ -3080,6 +3084,83 @@ AUTOCOMMIT==0 or we are inside BEGIN ... COMMIT. Thus transactions no longer
put restrictions on the use of the query cache.
*/
+/** Check if mysql can allow the transaction to read from/store to
+the query cache.
+@param[in] table table object
+@param[in] trx transaction object
+@return whether the storing or retrieving from the query cache is permitted */
+static bool innobase_query_caching_table_check_low(
+ const dict_table_t* table,
+ trx_t* trx)
+{
+ /* The following conditions will decide the query cache
+ retrieval or storing into:
+
+ (1) There should not be any locks on the table.
+ (2) Someother trx shouldn't invalidate the cache before this
+ transaction started.
+ (3) Read view shouldn't exist. If exists then the view
+ low_limit_id should be greater than or equal to the transaction that
+ invalidates the cache for the particular table.
+
+ For read-only transaction: should satisfy (1) and (3)
+ For read-write transaction: should satisfy (1), (2), (3) */
+
+ if (lock_table_get_n_locks(table)) {
+ return false;
+ }
+
+ if (trx->id && trx->id < table->query_cache_inv_trx_id) {
+ return false;
+ }
+
+ return !trx->read_view.is_open()
+ || trx->read_view.low_limit_id()
+ >= table->query_cache_inv_trx_id;
+}
+
+/** Checks if MySQL at the moment is allowed for this table to retrieve a
+consistent read result, or store it to the query cache.
+@param[in,out] trx transaction
+@param[in] norm_name concatenation of database name,
+ '/' char, table name
+@return whether storing or retrieving from the query cache is permitted */
+static bool innobase_query_caching_table_check(
+ trx_t* trx,
+ const char* norm_name)
+{
+ dict_table_t* table = dict_table_open_on_name(
+ norm_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
+
+ if (table == NULL) {
+ return false;
+ }
+
+ /* Start the transaction if it is not started yet */
+ trx_start_if_not_started(trx, false);
+
+ bool allow = innobase_query_caching_table_check_low(table, trx);
+
+ dict_table_close(table, FALSE, FALSE);
+
+ if (allow) {
+ /* If the isolation level is high, assign a read view for the
+ transaction if it does not yet have one */
+
+ if (trx->isolation_level >= TRX_ISO_REPEATABLE_READ
+ && !srv_read_only_mode
+ && !trx->read_view.is_open()) {
+
+ /* Start the transaction if it is not started yet */
+ trx_start_if_not_started(trx, false);
+
+ trx->read_view.open(trx);
+ }
+ }
+
+ return allow;
+}
+
/******************************************************************//**
The MySQL query cache uses this to check from InnoDB if the query cache at
the moment is allowed to operate on an InnoDB table. The SQL query must
@@ -3155,7 +3236,7 @@ innobase_query_caching_of_table_permitted(
innobase_register_trx(innodb_hton_ptr, thd, trx);
- return(row_search_check_if_query_cache_permitted(trx, norm_name));
+ return innobase_query_caching_table_check(trx, norm_name);
}
/*****************************************************************//**
@@ -8222,13 +8303,12 @@ report_error:
error, m_prebuilt->table->flags, m_user_thd);
#ifdef WITH_WSREP
- if (!error_result &&
- wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE &&
- wsrep_on(m_user_thd) &&
- !wsrep_consistency_check(m_user_thd) &&
- !wsrep_thd_ignore_table(m_user_thd)) {
- if (wsrep_append_keys(m_user_thd, false, record, NULL))
- {
+ if (!error_result
+ && wsrep_on(m_user_thd)
+ && wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE
+ && !wsrep_consistency_check(m_user_thd)
+ && !wsrep_thd_ignore_table(m_user_thd)) {
+ if (wsrep_append_keys(m_user_thd, false, record, NULL)) {
DBUG_PRINT("wsrep", ("row key failed"));
error_result = HA_ERR_INTERNAL_ERROR;
goto wsrep_error;
@@ -18723,8 +18803,10 @@ wsrep_innobase_kill_one_trx(
thd_get_thread_id(thd),
victim_trx->id);
- WSREP_DEBUG("Aborting query: %s",
- (thd && wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void");
+ WSREP_DEBUG("Aborting query: %s conf %d trx: %lu",
+ (thd && wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void",
+ wsrep_thd_conflict_state(thd, FALSE),
+ wsrep_thd_ws_handle(thd)->trx_id);
wsrep_thd_LOCK(thd);
DBUG_EXECUTE_IF("sync.wsrep_after_BF_victim_lock",
@@ -18787,7 +18869,7 @@ wsrep_innobase_kill_one_trx(
wsrep_t *wsrep= get_wsrep();
rcode = wsrep->abort_pre_commit(
wsrep, bf_seqno,
- (wsrep_trx_id_t)victim_trx->id
+ (wsrep_trx_id_t)wsrep_thd_ws_handle(thd)->trx_id
);
switch (rcode) {
@@ -18912,12 +18994,14 @@ wsrep_abort_transaction(
my_bool signal)
{
DBUG_ENTER("wsrep_innobase_abort_thd");
- trx_t* victim_trx = thd_to_trx(victim_thd);
- trx_t* bf_trx = (bf_thd) ? thd_to_trx(bf_thd) : NULL;
- WSREP_DEBUG("abort transaction: BF: %s victim: %s",
- wsrep_thd_query(bf_thd),
- wsrep_thd_query(victim_thd));
+ trx_t* victim_trx = thd_to_trx(victim_thd);
+ trx_t* bf_trx = (bf_thd) ? thd_to_trx(bf_thd) : NULL;
+
+ WSREP_DEBUG("abort transaction: BF: %s victim: %s victim conf: %d",
+ wsrep_thd_query(bf_thd),
+ wsrep_thd_query(victim_thd),
+ wsrep_thd_conflict_state(victim_thd, FALSE));
if (victim_trx) {
lock_mutex_enter();
@@ -20700,6 +20784,77 @@ innobase_get_field_from_update_vector(
return (NULL);
}
+
+/**
+ Allocate a heap and record for calculating virtual fields
+ Used mainly for virtual fields in indexes
+
+@param[in] thd MariaDB THD
+@param[in] index Index in use
+@param[out] heap Heap that holds temporary row
+@param[in,out] mysql_table MariaDB table
+@param[out] rec Pointer to allocated MariaDB record
+@param[out] storage Internal storage for blobs etc
+
+@return FALSE ok
+@return TRUE malloc failure
+*/
+
+bool innobase_allocate_row_for_vcol(
+ THD * thd,
+ dict_index_t* index,
+ mem_heap_t** heap,
+ TABLE** table,
+ byte** record,
+ VCOL_STORAGE** storage)
+{
+ TABLE *maria_table;
+ String *blob_value_storage;
+ if (!*table)
+ *table= innobase_find_mysql_table_for_vc(thd, index->table);
+ maria_table= *table;
+ if (!*heap && !(*heap= mem_heap_create(srv_page_size)))
+ {
+ *storage= 0;
+ return TRUE;
+ }
+ *record= static_cast<byte*>(mem_heap_alloc(*heap,
+ maria_table->s->reclength));
+ *storage= static_cast<VCOL_STORAGE*>
+ (mem_heap_alloc(*heap, sizeof(**storage)));
+ blob_value_storage= static_cast<String*>
+ (mem_heap_alloc(*heap,
+ maria_table->s->virtual_not_stored_blob_fields *
+ sizeof(String)));
+ if (!*record || !*storage || !blob_value_storage)
+ {
+ *storage= 0;
+ return TRUE;
+ }
+ (*storage)->maria_table= maria_table;
+ (*storage)->innobase_record= *record;
+ (*storage)->maria_record= maria_table->field[0]->record_ptr();
+ (*storage)->blob_value_storage= blob_value_storage;
+
+ maria_table->move_fields(maria_table->field, *record,
+ (*storage)->maria_record);
+ maria_table->remember_blob_values(blob_value_storage);
+
+ return FALSE;
+}
+
+
+/** Free memory allocated by innobase_allocate_row_for_vcol() */
+
+void innobase_free_row_for_vcol(VCOL_STORAGE *storage)
+{
+ TABLE *maria_table= storage->maria_table;
+ maria_table->move_fields(maria_table->field, storage->maria_record,
+ storage->innobase_record);
+ maria_table->restore_blob_values(storage->blob_value_storage);
+}
+
+
/** Get the computed value by supplying the base column values.
@param[in,out] row the data row
@param[in] col virtual column
@@ -20725,12 +20880,12 @@ innobase_get_computed_value(
const dict_field_t* ifield,
THD* thd,
TABLE* mysql_table,
+ byte* mysql_rec,
const dict_table_t* old_table,
upd_t* parent_update,
dict_foreign_t* foreign)
{
byte rec_buf2[REC_VERSION_56_MAX_INDEX_COL_LEN];
- byte* mysql_rec;
byte* buf;
dfield_t* field;
ulint len;
@@ -20743,6 +20898,7 @@ innobase_get_computed_value(
ut_ad(index->table->vc_templ);
ut_ad(thd != NULL);
+ ut_ad(mysql_table);
const mysql_row_templ_t*
vctempl = index->table->vc_templ->vtempl[
@@ -20760,14 +20916,6 @@ innobase_get_computed_value(
buf = rec_buf2;
}
- if (!mysql_table) {
- mysql_table = innobase_find_mysql_table_for_vc(thd, index->table);
- }
-
- ut_ad(mysql_table);
-
- mysql_rec = mysql_table->record[0];
-
for (ulint i = 0; i < col->num_base; i++) {
dict_col_t* base_col = col->base_col[i];
const dfield_t* row_field = NULL;
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index d4fda9d43c5..f2bb8aa5ef2 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -749,6 +749,9 @@ public:
ulint flags() const
{ return(m_flags); }
+ /** Update table flags. */
+ void flags_set(ulint flags) { m_flags |= flags; }
+
/** Get table flags2. */
ulint flags2() const
{ return(m_flags2); }
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index fbec55f97f4..3f251b2f6f6 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -6436,6 +6436,13 @@ check_if_ok_to_rename:
goto err_exit_no_heap;
}
+ if (info.flags2() & DICT_TF2_USE_FILE_PER_TABLE) {
+ /* Preserve the DATA DIRECTORY attribute, because it
+ currently cannot be changed during ALTER TABLE. */
+ info.flags_set(m_prebuilt->table->flags
+ & 1U << DICT_TF_POS_DATA_DIR);
+ }
+
max_col_len = DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(info.flags());
/* Check each index's column length to make sure they do not
@@ -9974,12 +9981,11 @@ foreign_fail:
error = row_merge_drop_table(trx, ctx->old_table);
if (error != DB_SUCCESS) {
- ib::error() << "Inplace alter table " << ctx->old_table->name.m_name
+ ib::error() << "Inplace alter table " << ctx->old_table->name
<< " dropping copy of the old table failed error "
<< error
<< ". tmp_name " << (ctx->tmp_name ? ctx->tmp_name : "N/A")
- << " new_table " << (ctx->new_table ? ctx->new_table->name.m_name
- : "N/A");
+ << " new_table " << ctx->new_table->name;
}
trx_commit_for_mysql(trx);
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index f811b5a6811..dd7129a86ac 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -53,9 +53,11 @@ These are low-level functions
#ifdef BTR_CUR_HASH_ADAPT
struct dict_table_t;
-/** Drop the adaptive hash index for a tablespace.
-@param[in,out] table table */
-void buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table);
+/** Try to drop the adaptive hash index for a tablespace.
+@param[in,out] table table
+@return whether anything was dropped */
+bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
+ MY_ATTRIBUTE((warn_unused_result,nonnull));
#else
# define buf_LRU_drop_page_hash_for_tablespace(table)
#endif /* BTR_CUR_HASH_ADAPT */
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 154a503f1b3..948b244a7fc 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -1737,8 +1737,8 @@ struct dict_table_t {
/** Transactions whose view low limit is greater than this number are
not allowed to store to the MySQL query cache or retrieve from it.
When a trx with undo logs commits, it sets this to the value of the
- current time. */
- trx_id_t query_cache_inv_id;
+ transaction id. */
+ trx_id_t query_cache_inv_trx_id;
/** Transaction id that last touched the table definition. Either when
loading the definition or CREATE TABLE, or ALTER TABLE (prepare,
diff --git a/storage/innobase/include/fil0fil.ic b/storage/innobase/include/fil0fil.ic
index 1dd4c64f73e..2a7d06e243f 100644
--- a/storage/innobase/include/fil0fil.ic
+++ b/storage/innobase/include/fil0fil.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2015, 2017, MariaDB Corporation.
+Copyright (c) 2015, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index 161d7a30ea4..3222f1c761a 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -686,7 +686,7 @@ fsp_flags_convert_from_101(ulint flags)
/* Bits 13..16 are the wrong position for PAGE_SSIZE, and they
should contain one of the values 3,4,6,7, that is, be of the form
- 0b0011 or 0b01xx (except 0b0110).
+ 0b0011 or 0b01xx (except 0b0101).
In correct versions, these bits should be 0bc0se
where c is the MariaDB COMPRESSED flag
and e is the MySQL 5.7 ENCRYPTION flag
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index 7c0b5d3ece9..4d8b055e13f 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -879,6 +879,44 @@ struct SysIndexCallback {
virtual void operator()(mtr_t* mtr, btr_pcur_t* pcur) throw() = 0;
};
+
+/** Storage for calculating virtual columns */
+
+class String;
+struct VCOL_STORAGE
+{
+ TABLE *maria_table;
+ byte *innobase_record;
+ byte *maria_record;
+ String *blob_value_storage;
+};
+
+/**
+ Allocate a heap and record for calculating virtual fields
+ Used mainly for virtual fields in indexes
+
+@param[in] thd MariaDB THD
+@param[in] index Index in use
+@param[out] heap Heap that holds temporary row
+@param[in,out] mysql_table MariaDB table
+@param[out] rec Pointer to allocated MariaDB record
+@param[out] storage Internal storage for blobs etc
+
+@return FALSE ok
+@return TRUE malloc failure
+*/
+
+bool innobase_allocate_row_for_vcol(
+ THD * thd,
+ dict_index_t* index,
+ mem_heap_t** heap,
+ TABLE** table,
+ byte** record,
+ VCOL_STORAGE** storage);
+
+/** Free memory allocated by innobase_allocate_row_for_vcol() */
+void innobase_free_row_for_vcol(VCOL_STORAGE *storage);
+
/** Get the computed value by supplying the base column values.
@param[in,out] row the data row
@param[in] col virtual column
@@ -903,6 +941,7 @@ innobase_get_computed_value(
const dict_field_t* ifield,
THD* thd,
TABLE* mysql_table,
+ byte* mysql_rec,
const dict_table_t* old_table,
upd_t* parent_update,
dict_foreign_t* foreign);
diff --git a/storage/innobase/include/row0sel.h b/storage/innobase/include/row0sel.h
index 366c24acec8..ef0ccbbda9f 100644
--- a/storage/innobase/include/row0sel.h
+++ b/storage/innobase/include/row0sel.h
@@ -212,16 +212,6 @@ row_count_rtree_recs(
ulint* n_rows); /*!< out: number of entries
seen in the consistent read */
-/*******************************************************************//**
-Checks if MySQL at the moment is allowed for this table to retrieve a
-consistent read result, or store it to the query cache.
-@return whether storing or retrieving from the query cache is permitted */
-bool
-row_search_check_if_query_cache_permitted(
-/*======================================*/
- trx_t* trx, /*!< in: transaction object */
- const char* norm_name); /*!< in: concatenation of database name,
- '/' char, table name */
/** Read the max AUTOINC value from an index.
@param[in] index index starting with an AUTO_INCREMENT column
@return the largest AUTO_INCREMENT value
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index b5494ef7313..4987d60dd5a 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -4325,7 +4325,7 @@ lock_release(
block the use of the MySQL query cache for
all currently active transactions. */
- table->query_cache_inv_id = max_trx_id;
+ table->query_cache_inv_trx_id = max_trx_id;
}
lock_table_dequeue(lock);
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index b2c66bd389b..1f1829370c3 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -1684,6 +1684,11 @@ recv_recover_page(bool just_read_in, buf_block_t* block)
ut_ad(recv_needed_recovery);
+ if (UNIV_UNLIKELY(srv_print_verbose_log == 2)) {
+ fprintf(stderr, "Applying log to page %u:%u\n",
+ recv_addr->space, recv_addr->page_no);
+ }
+
DBUG_LOG("ib_log", "Applying log to page " << block->page.id);
recv_addr->state = RECV_BEING_PROCESSED;
@@ -1779,6 +1784,13 @@ recv_recover_page(bool just_read_in, buf_block_t* block)
start_lsn = recv->start_lsn;
}
+ if (UNIV_UNLIKELY(srv_print_verbose_log == 2)) {
+ fprintf(stderr, "apply " LSN_PF ":"
+ " %d len " ULINTPF " page %u:%u\n",
+ recv->start_lsn, recv->type, recv->len,
+ recv_addr->space, recv_addr->page_no);
+ }
+
DBUG_LOG("ib_log", "apply " << recv->start_lsn << ": "
<< get_mlog_string(recv->type)
<< " len " << recv->len
@@ -2327,6 +2339,16 @@ loop:
compile_time_assert(SIZE_OF_MLOG_CHECKPOINT == 1 + 8);
lsn = mach_read_from_8(ptr + 1);
+ if (UNIV_UNLIKELY(srv_print_verbose_log == 2)) {
+ fprintf(stderr,
+ "MLOG_CHECKPOINT(" LSN_PF ") %s at "
+ LSN_PF "\n", lsn,
+ lsn != checkpoint_lsn ? "ignored"
+ : recv_sys->mlog_checkpoint_lsn
+ ? "reread" : "read",
+ recv_sys->recovered_lsn);
+ }
+
DBUG_PRINT("ib_log",
("MLOG_CHECKPOINT(" LSN_PF ") %s at "
LSN_PF,
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index da3b2822fa0..5d9ad7bab67 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -2703,7 +2703,7 @@ os_file_create_simple_func(
bool retry;
do {
- file = open(name, create_flag, os_innodb_umask);
+ file = open(name, create_flag | O_CLOEXEC, os_innodb_umask);
if (file == -1) {
*success = false;
@@ -3005,7 +3005,7 @@ os_file_create_func(
bool retry;
do {
- file = open(name, create_flag, os_innodb_umask);
+ file = open(name, create_flag | O_CLOEXEC, os_innodb_umask);
if (file == -1) {
const char* operation;
@@ -3139,7 +3139,7 @@ os_file_create_simple_no_error_handling_func(
return(OS_FILE_CLOSED);
}
- file = open(name, create_flag, os_innodb_umask);
+ file = open(name, create_flag | O_CLOEXEC, os_innodb_umask);
*success = (file != -1);
@@ -4990,12 +4990,12 @@ os_file_write_func(
<< offset << ", " << n
<< " bytes should have been written,"
" only " << n_bytes << " were written."
- " Operating system error number " << errno << "."
+ " Operating system error number " << IF_WIN(GetLastError(),errno) << "."
" Check that your OS and file system"
" support files of this size."
" Check also that the disk is not full"
" or a disk quota exceeded.";
-
+#ifndef _WIN32
if (strerror(errno) != NULL) {
ib::error()
@@ -5004,7 +5004,7 @@ os_file_write_func(
}
ib::info() << OPERATING_SYSTEM_ERROR_MSG;
-
+#endif
os_has_said_disk_full = true;
}
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index 415951e921f..0357804f617 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -3873,6 +3873,23 @@ row_import_for_mysql(
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;);
+ /* On DISCARD TABLESPACE, we did not drop any adaptive hash
+ index entries. If we replaced the discarded tablespace with a
+ smaller one here, there could still be some adaptive hash
+ index entries that point to cached garbage pages in the buffer
+ pool, because PageConverter::operator() only evicted those
+ pages that were replaced by the imported pages. We must
+ discard all remaining adaptive hash index entries, because the
+ adaptive hash index must be a subset of the table contents;
+ false positives are not tolerated. */
+ while (buf_LRU_drop_page_hash_for_tablespace(table)) {
+ if (trx_is_interrupted(trx)
+ || srv_shutdown_state != SRV_SHUTDOWN_NONE) {
+ err = DB_INTERRUPTED;
+ break;
+ }
+ }
+
if (err != DB_SUCCESS) {
char table_name[MAX_FULL_NAME_LEN + 1];
@@ -3891,17 +3908,6 @@ row_import_for_mysql(
return(row_import_cleanup(prebuilt, trx, err));
}
- /* On DISCARD TABLESPACE, we did not drop any adaptive hash
- index entries. If we replaced the discarded tablespace with a
- smaller one here, there could still be some adaptive hash
- index entries that point to cached garbage pages in the buffer
- pool, because PageConverter::operator() only evicted those
- pages that were replaced by the imported pages. We must
- discard all remaining adaptive hash index entries, because the
- adaptive hash index must be a subset of the table contents;
- false positives are not tolerated. */
- buf_LRU_drop_page_hash_for_tablespace(table);
-
row_mysql_lock_data_dictionary(trx);
/* If the table is stored in a remote tablespace, we need to
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index 97b79b705b2..470d5a9bd15 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -944,6 +944,9 @@ row_ins_foreign_fill_virtual(
rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &cascade->heap);
mem_heap_t* v_heap = NULL;
+ TABLE* mysql_table= NULL;
+ VCOL_STORAGE* vcol_storage= NULL;
+ byte* record;
upd_t* update = cascade->update;
ulint n_v_fld = index->table->n_v_def;
ulint n_diff;
@@ -963,6 +966,14 @@ row_ins_foreign_fill_virtual(
innobase_init_vc_templ(index->table);
}
+ if (innobase_allocate_row_for_vcol(thd, index, &v_heap,
+ &mysql_table,
+ &record, &vcol_storage))
+ {
+ *err = DB_OUT_OF_MEMORY;
+ goto func_exit;
+ }
+
for (ulint i = 0; i < n_v_fld; i++) {
dict_v_col_t* col = dict_table_get_nth_v_col(
@@ -976,8 +987,8 @@ row_ins_foreign_fill_virtual(
dfield_t* vfield = innobase_get_computed_value(
update->old_vrow, col, index,
- &v_heap, update->heap, NULL, thd, NULL,
- NULL, NULL, NULL);
+ &v_heap, update->heap, NULL, thd, mysql_table,
+ record, NULL, NULL, NULL);
if (vfield == NULL) {
*err = DB_COMPUTE_VALUE_FAILED;
@@ -1007,7 +1018,8 @@ row_ins_foreign_fill_virtual(
dfield_t* new_vfield = innobase_get_computed_value(
update->old_vrow, col, index,
&v_heap, update->heap, NULL, thd,
- NULL, NULL, node->update, foreign);
+ mysql_table, record, NULL,
+ node->update, foreign);
if (new_vfield == NULL) {
*err = DB_COMPUTE_VALUE_FAILED;
@@ -1025,6 +1037,8 @@ row_ins_foreign_fill_virtual(
func_exit:
if (v_heap) {
+ if (vcol_storage)
+ innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(v_heap);
}
}
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index abf6a0850ab..be3c081562e 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -532,6 +532,8 @@ row_merge_buf_add(
ulint bucket = 0;
doc_id_t write_doc_id;
ulint n_row_added = 0;
+ VCOL_STORAGE* vcol_storage= 0;
+ byte* record;
DBUG_ENTER("row_merge_buf_add");
if (buf->n_tuples >= buf->max_tuples) {
@@ -604,14 +606,21 @@ row_merge_buf_add(
dict_index_t* clust_index
= dict_table_get_first_index(new_table);
+ if (!vcol_storage &&
+ innobase_allocate_row_for_vcol(trx->mysql_thd, clust_index, v_heap, &my_table, &record, &vcol_storage)) {
+ *err = DB_OUT_OF_MEMORY;
+ goto error;
+ }
+
row_field = innobase_get_computed_value(
row, v_col, clust_index,
v_heap, NULL, ifield, trx->mysql_thd,
- my_table, old_table, NULL, NULL);
+ my_table, record, old_table, NULL,
+ NULL);
if (row_field == NULL) {
*err = DB_COMPUTE_VALUE_FAILED;
- DBUG_RETURN(0);
+ goto error;
}
dfield_copy(field, row_field);
} else {
@@ -647,7 +656,7 @@ row_merge_buf_add(
ib::warn() << "FTS Doc ID is"
" zero. Record"
" skipped";
- DBUG_RETURN(0);
+ goto error;
}
}
@@ -795,7 +804,7 @@ row_merge_buf_add(
/* If this is FTS index, we already populated the sort buffer, return
here */
if (index->type & DICT_FTS) {
- DBUG_RETURN(n_row_added);
+ goto end;
}
#ifdef UNIV_DEBUG
@@ -829,7 +838,7 @@ row_merge_buf_add(
/* Reserve bytes for the end marker of row_merge_block_t. */
if (buf->total_size + data_size >= srv_sort_buf_size) {
- DBUG_RETURN(0);
+ goto error;
}
buf->total_size += data_size;
@@ -848,7 +857,15 @@ row_merge_buf_add(
mem_heap_empty(conv_heap);
}
+end:
+ if (vcol_storage)
+ innobase_free_row_for_vcol(vcol_storage);
DBUG_RETURN(n_row_added);
+
+error:
+ if (vcol_storage)
+ innobase_free_row_for_vcol(vcol_storage);
+ DBUG_RETURN(0);
}
/*************************************************************//**
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index b603292705a..49ac2638ef8 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -63,6 +63,7 @@ Created 9/17/2000 Heikki Tuuri
#include "trx0rec.h"
#include "trx0roll.h"
#include "trx0undo.h"
+#include "srv0start.h"
#include "row0ext.h"
#include "srv0start.h"
#include "ut0new.h"
@@ -3525,12 +3526,36 @@ row_drop_table_for_mysql(
/* make sure background stats thread is not running on the table */
ut_ad(!(table->stats_bg_flag & BG_STAT_IN_PROGRESS));
- /* Delete the link file if used. */
- if (DICT_TF_HAS_DATA_DIR(table->flags)) {
- RemoteDatafile::delete_link_file(name);
- }
-
if (!table->no_rollback()) {
+ if (table->space != fil_system.sys_space) {
+ /* On DISCARD TABLESPACE, we would not drop the
+ adaptive hash index entries. If the tablespace is
+ missing here, delete-marking the record in SYS_INDEXES
+ would not free any pages in the buffer pool. Thus,
+ dict_index_remove_from_cache() would hang due to
+ adaptive hash index entries existing in the buffer
+ pool. To prevent this hang, and also to guarantee
+ that btr_search_drop_page_hash_when_freed() will avoid
+ calling btr_search_drop_page_hash_index() while we
+ hold the InnoDB dictionary lock, we will drop any
+ adaptive hash index entries upfront. */
+ while (buf_LRU_drop_page_hash_for_tablespace(table)) {
+ if (trx_is_interrupted(trx)
+ || srv_shutdown_state
+ != SRV_SHUTDOWN_NONE) {
+ err = DB_INTERRUPTED;
+ table->to_be_dropped = false;
+ dict_table_close(table, true, false);
+ goto funct_exit;
+ }
+ }
+
+ /* Delete the link file if used. */
+ if (DICT_TF_HAS_DATA_DIR(table->flags)) {
+ RemoteDatafile::delete_link_file(name);
+ }
+ }
+
dict_stats_recalc_pool_del(table);
dict_stats_defrag_pool_del(table, NULL);
if (btr_defragment_thread_active) {
@@ -3702,21 +3727,6 @@ defer:
rw_lock_x_unlock(dict_index_get_lock(index));
}
- if (table->space_id != TRX_SYS_SPACE) {
- /* On DISCARD TABLESPACE, we would not drop the
- adaptive hash index entries. If the tablespace is
- missing here, delete-marking the record in SYS_INDEXES
- would not free any pages in the buffer pool. Thus,
- dict_index_remove_from_cache() would hang due to
- adaptive hash index entries existing in the buffer
- pool. To prevent this hang, and also to guarantee
- that btr_search_drop_page_hash_when_freed() will avoid
- calling btr_search_drop_page_hash_index() while we
- hold the InnoDB dictionary lock, we will drop any
- adaptive hash index entries upfront. */
- buf_LRU_drop_page_hash_for_tablespace(table);
- }
-
/* Deleting a row from SYS_INDEXES table will invoke
dict_drop_index_tree(). */
info = pars_info_create();
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 3c081657e35..67fc30d526e 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -178,6 +178,8 @@ row_sel_sec_rec_is_for_clust_rec(
ulint* clust_offs = clust_offsets_;
ulint* sec_offs = sec_offsets_;
ibool is_equal = TRUE;
+ VCOL_STORAGE* vcol_storage= 0;
+ byte* record;
rec_offs_init(clust_offsets_);
rec_offs_init(sec_offsets_);
@@ -225,6 +227,17 @@ row_sel_sec_rec_is_for_clust_rec(
dfield_t* vfield;
row_ext_t* ext;
+ if (!vcol_storage)
+ {
+ TABLE *mysql_table= thr->prebuilt->m_mysql_table;
+ innobase_allocate_row_for_vcol(thr_get_trx(thr)->mysql_thd,
+ clust_index,
+ &heap,
+ &mysql_table,
+ &record,
+ &vcol_storage);
+ }
+
v_col = reinterpret_cast<const dict_v_col_t*>(col);
row = row_build(ROW_COPY_POINTERS,
@@ -236,8 +249,8 @@ row_sel_sec_rec_is_for_clust_rec(
row, v_col, clust_index,
&heap, NULL, NULL,
thr_get_trx(thr)->mysql_thd,
- thr->prebuilt->m_mysql_table, NULL,
- NULL, NULL);
+ thr->prebuilt->m_mysql_table,
+ record, NULL, NULL, NULL);
clust_len = vfield->len;
clust_field = static_cast<byte*>(vfield->data);
@@ -325,6 +338,8 @@ inequal:
func_exit:
if (UNIV_LIKELY_NULL(heap)) {
+ if (UNIV_LIKELY_NULL(vcol_storage))
+ innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(heap);
}
return(is_equal);
@@ -5864,57 +5879,6 @@ func_exit:
}
/*******************************************************************//**
-Checks if MySQL at the moment is allowed for this table to retrieve a
-consistent read result, or store it to the query cache.
-@return whether storing or retrieving from the query cache is permitted */
-bool
-row_search_check_if_query_cache_permitted(
-/*======================================*/
- trx_t* trx, /*!< in: transaction object */
- const char* norm_name) /*!< in: concatenation of database name,
- '/' char, table name */
-{
- dict_table_t* table = dict_table_open_on_name(
- norm_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
-
- if (table == NULL) {
-
- return(false);
- }
-
- /* Start the transaction if it is not started yet */
-
- trx_start_if_not_started(trx, false);
-
- /* If there are locks on the table or some trx has invalidated the
- cache before this transaction started then this transaction cannot
- read/write from/to the cache.
-
- If a read view has not been created for the transaction then it doesn't
- really matter what this transaction sees. If a read view was created
- then the view low_limit_id is the max trx id that this transaction
- saw at the time of the read view creation. */
-
- const bool ret = lock_table_get_n_locks(table) == 0
- && ((trx->id != 0 && trx->id >= table->query_cache_inv_id)
- || !trx->read_view.is_open()
- || trx->read_view.low_limit_id()
- >= table->query_cache_inv_id);
- if (ret) {
- /* If the isolation level is high, assign a read view for the
- transaction if it does not yet have one */
-
- if (trx->isolation_level >= TRX_ISO_REPEATABLE_READ) {
- trx->read_view.open(trx);
- }
- }
-
- dict_table_close(table, FALSE, FALSE);
-
- return(ret);
-}
-
-/*******************************************************************//**
Read the AUTOINC column from the current row. If the value is less than
0 and the type is not unsigned then we reset the value to 0.
@return value read from the column */
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index bca7464bc66..39206e66d75 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -1043,6 +1043,7 @@ row_upd_build_sec_rec_difference_binary(
return(update);
}
+
/** Builds an update vector from those fields, excluding the roll ptr and
trx id fields, which in an index entry differ from a record that has
the equal ordering fields. NOTE: we compare the fields as binary strings!
@@ -1142,6 +1143,9 @@ row_upd_build_difference_binary(
if (n_v_fld > 0) {
row_ext_t* ext;
mem_heap_t* v_heap = NULL;
+ byte* record;
+ VCOL_STORAGE* vcol_storage;
+
THD* thd;
if (trx == NULL) {
@@ -1152,6 +1156,10 @@ row_upd_build_difference_binary(
ut_ad(!update->old_vrow);
+ innobase_allocate_row_for_vcol(thd, index, &v_heap,
+ &mysql_table,
+ &record, &vcol_storage);
+
for (i = 0; i < n_v_fld; i++) {
const dict_v_col_t* col
= dict_table_get_nth_v_col(index->table, i);
@@ -1170,7 +1178,7 @@ row_upd_build_difference_binary(
dfield_t* vfield = innobase_get_computed_value(
update->old_vrow, col, index,
- &v_heap, heap, NULL, thd, mysql_table,
+ &v_heap, heap, NULL, thd, mysql_table, record,
NULL, NULL, NULL);
if (!dfield_data_is_binary_equal(
@@ -1196,6 +1204,8 @@ row_upd_build_difference_binary(
}
if (v_heap) {
+ if (vcol_storage)
+ innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(v_heap);
}
}
@@ -2122,6 +2132,12 @@ row_upd_store_v_row(
{
mem_heap_t* heap = NULL;
dict_index_t* index = dict_table_get_first_index(node->table);
+ byte* record= 0;
+ VCOL_STORAGE *vcol_storage= 0;
+
+ if (!update)
+ innobase_allocate_row_for_vcol(thd, index, &heap, &mysql_table,
+ &record, &vcol_storage);
for (ulint col_no = 0; col_no < dict_table_get_n_v_cols(node->table);
col_no++) {
@@ -2174,7 +2190,7 @@ row_upd_store_v_row(
innobase_get_computed_value(
node->row, col, index,
&heap, node->heap, NULL,
- thd, mysql_table, NULL,
+ thd, mysql_table, record, NULL,
NULL, NULL);
}
}
@@ -2182,8 +2198,11 @@ row_upd_store_v_row(
}
if (heap) {
+ if (vcol_storage)
+ innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(heap);
}
+
}
/** Stores to the heap the row on which the node->pcur is positioned.
diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc
index d585ef3a9d3..bfaa2721746 100644
--- a/storage/innobase/row/row0vers.cc
+++ b/storage/innobase/row/row0vers.cc
@@ -434,6 +434,19 @@ row_vers_build_clust_v_col(
mem_heap_t* heap)
{
mem_heap_t* local_heap = NULL;
+ VCOL_STORAGE *vcol_storage= NULL;
+ THD* thd= current_thd;
+ TABLE* maria_table= 0;
+ byte* record= 0;
+
+ ut_ad(dict_index_has_virtual(index));
+
+ innobase_allocate_row_for_vcol(thd, index,
+ &local_heap,
+ &maria_table,
+ &record,
+ &vcol_storage);
+
for (ulint i = 0; i < dict_index_get_n_fields(index); i++) {
const dict_field_t* ind_field = dict_index_get_nth_field(
index, i);
@@ -446,15 +459,18 @@ row_vers_build_clust_v_col(
innobase_get_computed_value(
row, col, clust_index, &local_heap,
- heap, NULL, current_thd, NULL, NULL,
+ heap, NULL, thd, maria_table, record, NULL,
NULL, NULL);
}
}
if (local_heap) {
+ if (vcol_storage)
+ innobase_free_row_for_vcol(vcol_storage);
mem_heap_free(local_heap);
}
}
+
/** Build latest virtual column data from undo log
@param[in] in_purge whether this is the purge thread
@param[in] rec clustered index record
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 89070214a1b..08b21bcdd7d 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -834,6 +834,10 @@ srv_undo_tablespaces_init(bool create_new_db)
ut_a(srv_undo_tablespaces <= TRX_SYS_N_RSEGS);
ut_a(!create_new_db || srv_operation == SRV_OPERATION_NORMAL);
+ if (srv_undo_tablespaces == 1) { /* 1 is not allowed, make it 0 */
+ srv_undo_tablespaces = 0;
+ }
+
memset(undo_tablespace_ids, 0x0, sizeof(undo_tablespace_ids));
/* Create the undo spaces only if we are creating a new
diff --git a/storage/maria/ma_control_file.c b/storage/maria/ma_control_file.c
index 94f9ad46a48..9c289ba234f 100644
--- a/storage/maria/ma_control_file.c
+++ b/storage/maria/ma_control_file.c
@@ -277,7 +277,7 @@ CONTROL_FILE_ERROR ma_control_file_open(my_bool create_if_missing,
" file is probably in use by another process";
uint new_cf_create_time_size, new_cf_changeable_size, new_block_size;
my_off_t file_size;
- int open_flags= O_BINARY | /*O_DIRECT |*/ O_RDWR;
+ int open_flags= O_BINARY | /*O_DIRECT |*/ O_RDWR | O_CLOEXEC;
int error= CONTROL_FILE_UNKNOWN_ERROR;
DBUG_ENTER("ma_control_file_open");
diff --git a/storage/maria/ma_key.c b/storage/maria/ma_key.c
index 703ce118843..dbdde1d5963 100644
--- a/storage/maria/ma_key.c
+++ b/storage/maria/ma_key.c
@@ -726,7 +726,7 @@ ulonglong ma_retrieve_auto_increment(const uchar *key, uint8 key_type)
switch (key_type) {
case HA_KEYTYPE_INT8:
- s_value= (longlong) *(const char*)key;
+ s_value= (longlong) *(const signed char*) key;
break;
case HA_KEYTYPE_BINARY:
value=(ulonglong) *key;
diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c
index c81c7735df5..158904f6692 100644
--- a/storage/maria/ma_loghandler.c
+++ b/storage/maria/ma_loghandler.c
@@ -78,6 +78,32 @@ typedef union
uchar buffer[TRANSLOG_PAGE_SIZE];
} TRANSLOG_PAGE_SIZE_BUFF;
+#define MAX_TRUNSLOG_USED_BUFFERS 3
+
+typedef struct
+{
+ struct st_translog_buffer *buff[MAX_TRUNSLOG_USED_BUFFERS];
+ uint8 wrt_ptr;
+ uint8 unlck_ptr;
+} TRUNSLOG_USED_BUFFERS;
+
+static void
+used_buffs_init(TRUNSLOG_USED_BUFFERS *buffs)
+{
+ buffs->unlck_ptr= buffs->wrt_ptr= 0;
+}
+
+static void
+used_buffs_add(TRUNSLOG_USED_BUFFERS *buffs,
+ struct st_translog_buffer *buff);
+
+static void
+used_buffs_register_unlock(TRUNSLOG_USED_BUFFERS *buffs,
+ struct st_translog_buffer *buff);
+
+static void
+used_buffs_urgent_unlock(TRUNSLOG_USED_BUFFERS *buffs);
+
/* min chunk length */
#define TRANSLOG_MIN_CHUNK 3
/*
@@ -156,7 +182,28 @@ struct st_translog_buffer
TRANSLOG_FILE *file;
/* Threads which are waiting for buffer filling/freeing */
mysql_cond_t waiting_filling_buffer;
- /* Number of records which are in copy progress */
+ /*
+ Number of records which are in copy progress.
+
+ Controlled via translog_buffer_increase_writers() and
+ translog_buffer_decrease_writers().
+
+ 1 Simple case: translog_force_current_buffer_to_finish both called in
+ the same procedure.
+
+ 2 Simple case: translog_write_variable_record_1group:
+ translog_advance_pointer() increase writer of the buffer and
+ translog_buffer_decrease_writers() decrease it.
+
+ Usual case:
+ 1) translog_advance_pointer (i.e. reserve place for future writing)
+ increase writers for all buffers where place reserved.
+ Simpliest case: just all space reserved in one buffer
+ complex case: end of the first buffer, all second buffer, beginning
+ of the third buffer.
+ 2) When we finish with writing translog_chaser_page_next() will be
+ called and unlock the buffer by decreasing number of writers.
+ */
uint copy_to_buffer_in_progress;
/* list of waiting buffer ready threads */
struct st_my_thread_var *waiting_flush;
@@ -214,6 +261,7 @@ struct st_translog_buffer
struct st_buffer_cursor
{
+ TRUNSLOG_USED_BUFFERS buffs;
/* pointer into the buffer */
uchar *ptr;
/* current buffer */
@@ -935,7 +983,7 @@ static File create_logfile_by_number_no_cache(uint32 file_no)
/* TODO: add O_DIRECT to open flags (when buffer is aligned) */
if ((file= mysql_file_create(key_file_translog,
translog_filename_by_fileno(file_no, path),
- 0, O_BINARY | O_RDWR, MYF(MY_WME))) < 0)
+ 0, O_BINARY | O_RDWR | O_CLOEXEC, MYF(MY_WME))) < 0)
{
DBUG_PRINT("error", ("Error %d during creating file '%s'", errno, path));
translog_stop_writing();
@@ -973,7 +1021,7 @@ static File open_logfile_by_number_no_cache(uint32 file_no)
/* TODO: use mysql_file_create() */
if ((file= mysql_file_open(key_file_translog,
translog_filename_by_fileno(file_no, path),
- log_descriptor.open_flags,
+ log_descriptor.open_flags | O_CLOEXEC,
MYF(MY_WME))) < 0)
{
DBUG_PRINT("error", ("Error %d during opening file '%s'", errno, path));
@@ -1648,15 +1696,12 @@ static my_bool translog_create_new_file()
DBUG_PRINT("info", ("file_no: %lu", (ulong)file_no));
if (translog_write_file_header())
- DBUG_RETURN(1);
+ goto error;
if (ma_control_file_write_and_force(last_checkpoint_lsn, file_no,
max_trid_in_control_file,
recovery_failures))
- {
- translog_stop_writing();
- DBUG_RETURN(1);
- }
+ goto error;
DBUG_RETURN(0);
@@ -1697,10 +1742,6 @@ static void translog_buffer_lock(struct st_translog_buffer *buffer)
SYNOPSIS
translog_buffer_unlock()
buffer This buffer which should be unlocked
-
- RETURN
- 0 OK
- 1 Error
*/
static void translog_buffer_unlock(struct st_translog_buffer *buffer)
@@ -1894,7 +1935,10 @@ static void translog_finish_page(TRANSLOG_ADDRESS *horizon,
(uint) cursor->buffer->size,
(uint) (cursor->ptr -cursor->buffer->buffer),
(uint) cursor->current_page_fill, (uint) left));
- DBUG_ASSERT(LSN_FILE_NO(*horizon) == LSN_FILE_NO(cursor->buffer->offset));
+ DBUG_ASSERT(LSN_FILE_NO(*horizon) == LSN_FILE_NO(cursor->buffer->offset)
+ || translog_status == TRANSLOG_UNINITED);
+ if ((LSN_FILE_NO(*horizon) != LSN_FILE_NO(cursor->buffer->offset)))
+ DBUG_VOID_RETURN; // everything wrong do not write to awoid more problems
translog_check_cursor(cursor);
if (cursor->protected)
{
@@ -3243,7 +3287,7 @@ static my_bool translog_get_last_page_addr(TRANSLOG_ADDRESS *addr,
File fd;
if ((fd= mysql_file_open(key_file_translog,
translog_filename_by_fileno(file_no, path),
- O_RDONLY, (no_errors ? MYF(0) : MYF(MY_WME)))) < 0)
+ O_RDONLY | O_CLOEXEC, (no_errors ? MYF(0) : MYF(MY_WME)))) < 0)
{
my_errno= errno;
DBUG_PRINT("error", ("Error %d during opening file #%d",
@@ -4588,6 +4632,7 @@ static my_bool translog_chaser_page_next(TRANSLOG_ADDRESS *horizon,
{
translog_buffer_lock(buffer_to_flush);
translog_buffer_decrease_writers(buffer_to_flush);
+ used_buffs_register_unlock(&cursor->buffs, buffer_to_flush);
if (!rc)
rc= translog_buffer_flush(buffer_to_flush);
translog_buffer_unlock(buffer_to_flush);
@@ -4692,7 +4737,8 @@ translog_write_variable_record_chunk3_page(struct st_translog_parts *parts,
1 Error
*/
-static my_bool translog_advance_pointer(int pages, uint16 last_page_data)
+static my_bool translog_advance_pointer(int pages, uint16 last_page_data,
+ TRUNSLOG_USED_BUFFERS *buffs)
{
translog_size_t last_page_offset= (log_descriptor.page_overhead +
last_page_data);
@@ -4709,6 +4755,8 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data)
(uint) last_page_data));
translog_lock_assert_owner();
+ used_buffs_init(buffs);
+
if (pages == -1)
{
/*
@@ -4786,8 +4834,10 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data)
translog_wait_for_buffer_free(new_buffer);
#ifndef DBUG_OFF
/* We keep the handler locked so nobody can start this new buffer */
- DBUG_ASSERT(offset == new_buffer->offset && new_buffer->file == NULL &&
- (file == NULL ? ver : (uint8)(ver + 1)) == new_buffer->ver);
+ DBUG_ASSERT((offset == new_buffer->offset && new_buffer->file == NULL &&
+ (file == NULL ? ver : (uint8)(ver + 1)) ==
+ new_buffer->ver) ||
+ translog_status == TRANSLOG_READONLY);
}
#endif
@@ -4808,6 +4858,8 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data)
DBUG_ASSERT(log_descriptor.bc.buffer->buffer_no ==
log_descriptor.bc.buffer_no);
translog_buffer_increase_writers(log_descriptor.bc.buffer);
+ // register for case of error
+ used_buffs_add(buffs, log_descriptor.bc.buffer);
if (file_end_offset <= buffer_end_offset)
{
@@ -4818,6 +4870,10 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data)
(ulong) LSN_FILE_NO(log_descriptor.horizon)));
if (translog_create_new_file())
{
+ struct st_translog_buffer *ob= log_descriptor.bc.buffer;
+ translog_buffer_unlock(ob);
+ used_buffs_urgent_unlock(buffs);
+ translog_buffer_lock(ob);
DBUG_RETURN(1);
}
}
@@ -4839,6 +4895,7 @@ end:
log_descriptor.bc.ptr+= offset;
log_descriptor.bc.buffer->size+= offset;
translog_buffer_increase_writers(log_descriptor.bc.buffer);
+ used_buffs_add(buffs, log_descriptor.bc.buffer);
log_descriptor.horizon+= offset; /* offset increasing */
log_descriptor.bc.current_page_fill= last_page_offset;
DBUG_PRINT("info", ("NewP buffer #%u: %p chaser: %d Size: %lu (%lu) "
@@ -4859,6 +4916,56 @@ end:
DBUG_RETURN(0);
}
+static void
+used_buffs_add(TRUNSLOG_USED_BUFFERS *buffs,
+ struct st_translog_buffer *buff)
+{
+ DBUG_ENTER("used_buffs_add");
+ DBUG_PRINT("enter", ("ADD buffs: %p unlk %u (%p) wrt_ptr: %u (%p)"
+ " buff %p (%u)",
+ buffs,
+ buffs->wrt_ptr, buffs->buff[buffs->wrt_ptr],
+ buffs->unlck_ptr, buffs->buff[buffs->unlck_ptr],
+ buff, buff->buffer_no));
+ DBUG_ASSERT(buffs->wrt_ptr < MAX_TRUNSLOG_USED_BUFFERS);
+ buffs->buff[buffs->wrt_ptr++]= buff;
+ DBUG_VOID_RETURN;
+}
+
+static void
+used_buffs_register_unlock(TRUNSLOG_USED_BUFFERS *buffs,
+ struct st_translog_buffer *buff
+ __attribute__((unused)) )
+{
+ DBUG_ENTER("used_buffs_register_unlock");
+ DBUG_PRINT("enter", ("SUB buffs: %p unlk %u (%p) wrt_ptr: %u (%p)"
+ " buff %p (%u)",
+ buffs,
+ buffs->wrt_ptr, buffs->buff[buffs->wrt_ptr],
+ buffs->unlck_ptr, buffs->buff[buffs->unlck_ptr],
+ buff, buff->buffer_no));
+ DBUG_ASSERT(buffs->buff[buffs->unlck_ptr] == buff);
+ buffs->unlck_ptr++;
+ DBUG_VOID_RETURN;
+}
+static void used_buffs_urgent_unlock(TRUNSLOG_USED_BUFFERS *buffs)
+{
+ uint i;
+ DBUG_ENTER("used_buffs_urgent_unlock");
+ translog_lock();
+ translog_stop_writing();
+ translog_unlock();
+ for (i= buffs->unlck_ptr; i < buffs->wrt_ptr; i++)
+ {
+ struct st_translog_buffer *buf= buffs->buff[i];
+ translog_buffer_lock(buf);
+ translog_buffer_decrease_writers(buf);
+ translog_buffer_unlock(buf);
+ buffs->buff[i]= NULL;
+ }
+ used_buffs_init(buffs);
+ DBUG_VOID_RETURN;
+}
/*
Get page rest
@@ -4997,6 +5104,11 @@ translog_write_variable_record_1group(LSN *lsn,
lsn, hook_arg)))
{
translog_unlock();
+ if (buffer_to_flush != NULL)
+ {
+ translog_buffer_flush(buffer_to_flush);
+ translog_buffer_unlock(buffer_to_flush);
+ }
DBUG_RETURN(1);
}
cursor= log_descriptor.bc;
@@ -5027,8 +5139,9 @@ translog_write_variable_record_1group(LSN *lsn,
(log_descriptor.page_capacity_chunk_2 - 1),
record_rest, parts->record_length));
/* record_rest + 3 is chunk type 3 overhead + record_rest */
- rc|= translog_advance_pointer((int)(full_pages + additional_chunk3_page),
- (record_rest ? record_rest + 3 : 0));
+ rc= translog_advance_pointer((int)(full_pages + additional_chunk3_page),
+ (record_rest ? record_rest + 3 : 0),
+ &cursor.buffs);
log_descriptor.bc.buffer->last_lsn= *lsn;
DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p",
LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn),
@@ -5047,7 +5160,11 @@ translog_write_variable_record_1group(LSN *lsn,
translog_buffer_unlock(buffer_to_flush);
}
if (rc)
+ {
+ //translog_advance_pointer decreased writers so it is OK
+ DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr);
DBUG_RETURN(1);
+ }
translog_write_variable_record_1group_header(parts, type, short_trid,
header_length, chunk0_header);
@@ -5062,7 +5179,7 @@ translog_write_variable_record_1group(LSN *lsn,
for (i= 0; i < full_pages; i++)
{
if (translog_write_variable_record_chunk2_page(parts, &horizon, &cursor))
- DBUG_RETURN(1);
+ goto error;
DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.horizon),
@@ -5075,7 +5192,7 @@ translog_write_variable_record_1group(LSN *lsn,
log_descriptor.
page_capacity_chunk_2 - 2,
&horizon, &cursor))
- DBUG_RETURN(1);
+ goto error;
DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon)));
@@ -5085,17 +5202,22 @@ translog_write_variable_record_1group(LSN *lsn,
if (translog_write_variable_record_chunk3_page(parts,
record_rest,
&horizon, &cursor))
- DBUG_RETURN(1);
- DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT,
- (uint) LSN_FILE_NO(log_descriptor.horizon),
- (uint) LSN_OFFSET(log_descriptor.horizon),
- (uint) LSN_FILE_NO(horizon),
- (uint) LSN_OFFSET(horizon)));
+ goto error;
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT,
+ (uint) LSN_FILE_NO(log_descriptor.horizon),
+ (uint) LSN_OFFSET(log_descriptor.horizon),
+ (uint) LSN_FILE_NO(horizon),
+ (uint) LSN_OFFSET(horizon)));
translog_buffer_lock(cursor.buffer);
translog_buffer_decrease_writers(cursor.buffer);
+ used_buffs_register_unlock(&cursor.buffs, cursor.buffer);
translog_buffer_unlock(cursor.buffer);
- DBUG_RETURN(rc);
+ DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr);
+ DBUG_RETURN(0);
+error:
+ used_buffs_urgent_unlock(&cursor.buffs);
+ DBUG_RETURN(1);
}
@@ -5149,7 +5271,8 @@ translog_write_variable_record_1chunk(LSN *lsn,
lsn, hook_arg)))
{
translog_unlock();
- DBUG_RETURN(1);
+ rc= 1;
+ goto err;
}
rc= translog_write_parts_on_page(&log_descriptor.horizon,
@@ -5165,6 +5288,7 @@ translog_write_variable_record_1chunk(LSN *lsn,
check if we switched buffer and need process it (current buffer is
unlocked already => we will not delay other threads
*/
+err:
if (buffer_to_flush != NULL)
{
if (!rc)
@@ -5505,9 +5629,11 @@ translog_write_variable_record_mgroup(LSN *lsn,
uint file_of_the_first_group;
int pages_to_skip;
struct st_translog_buffer *buffer_of_last_lsn;
+ my_bool external_buffer_to_flush= TRUE;
DBUG_ENTER("translog_write_variable_record_mgroup");
translog_lock_assert_owner();
+ used_buffs_init(&cursor.buffs);
chunk2_header[0]= TRANSLOG_CHUNK_NOHDR;
if (my_init_dynamic_array(&groups,
@@ -5515,6 +5641,11 @@ translog_write_variable_record_mgroup(LSN *lsn,
10, 10, MYF(0)))
{
translog_unlock();
+ if (buffer_to_flush != NULL)
+ {
+ translog_buffer_flush(buffer_to_flush);
+ translog_buffer_unlock(buffer_to_flush);
+ }
DBUG_PRINT("error", ("init array failed"));
DBUG_RETURN(1);
}
@@ -5541,6 +5672,7 @@ translog_write_variable_record_mgroup(LSN *lsn,
translog_mark_file_unfinished(file_of_the_first_group);
do
{
+ DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr);
group.addr= horizon= log_descriptor.horizon;
cursor= log_descriptor.bc;
cursor.chaser= 1;
@@ -5573,21 +5705,26 @@ translog_write_variable_record_mgroup(LSN *lsn,
(ulong)(parts->record_length - (first_page - 1 +
buffer_rest) -
done)));
- rc|= translog_advance_pointer((int)full_pages, 0);
+ rc= translog_advance_pointer((int)full_pages, 0, &cursor.buffs);
translog_unlock();
if (buffer_to_flush != NULL)
{
- translog_buffer_decrease_writers(buffer_to_flush);
+ if (!external_buffer_to_flush)
+ translog_buffer_decrease_writers(buffer_to_flush);
if (!rc)
rc= translog_buffer_flush(buffer_to_flush);
translog_buffer_unlock(buffer_to_flush);
buffer_to_flush= NULL;
}
+ external_buffer_to_flush= FALSE;
+
if (rc)
{
DBUG_PRINT("error", ("flush of unlock buffer failed"));
+ //translog_advance_pointer decreased writers so it is OK
+ DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr);
goto err;
}
@@ -5624,6 +5761,7 @@ translog_write_variable_record_mgroup(LSN *lsn,
}
translog_buffer_lock(cursor.buffer);
translog_buffer_decrease_writers(cursor.buffer);
+ used_buffs_register_unlock(&cursor.buffs, cursor.buffer);
translog_buffer_unlock(cursor.buffer);
translog_lock();
@@ -5638,6 +5776,11 @@ translog_write_variable_record_mgroup(LSN *lsn,
first_page= translog_get_current_page_rest();
}
buffer_rest= translog_get_current_group_size();
+
+ if (buffer_to_flush)
+ used_buffs_register_unlock(&cursor.buffs,
+ buffer_to_flush); // will be unlocked
+
} while ((translog_size_t)(first_page + buffer_rest) <
(translog_size_t)(parts->record_length - done));
@@ -5733,17 +5876,21 @@ translog_write_variable_record_mgroup(LSN *lsn,
(ulong) full_pages *
log_descriptor.page_capacity_chunk_2,
chunk3_pages, (uint) chunk3_size, (uint) record_rest));
+
+ DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr);
rc= translog_advance_pointer(pages_to_skip + (int)(chunk0_pages - 1),
record_rest + header_fixed_part +
(groups.elements -
((page_capacity -
header_fixed_part) / (7 + 1)) *
- (chunk0_pages - 1)) * (7 + 1));
+ (chunk0_pages - 1)) * (7 + 1),
+ &cursor.buffs);
buffer_of_last_lsn= log_descriptor.bc.buffer;
translog_unlock();
if (buffer_to_flush != NULL)
{
+ DBUG_ASSERT(!external_buffer_to_flush);
translog_buffer_decrease_writers(buffer_to_flush);
if (!rc)
rc= translog_buffer_flush(buffer_to_flush);
@@ -5907,8 +6054,10 @@ translog_write_variable_record_mgroup(LSN *lsn,
} while (chunk0_pages != 0);
translog_buffer_lock(cursor.buffer);
translog_buffer_decrease_writers(cursor.buffer);
+ used_buffs_register_unlock(&cursor.buffs, cursor.buffer);
translog_buffer_unlock(cursor.buffer);
rc= 0;
+ DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr);
if (translog_set_lsn_for_files(file_of_the_first_group, LSN_FILE_NO(*lsn),
*lsn, FALSE))
@@ -5917,17 +6066,22 @@ translog_write_variable_record_mgroup(LSN *lsn,
translog_mark_file_finished(file_of_the_first_group);
delete_dynamic(&groups);
- DBUG_RETURN(rc);
+ DBUG_RETURN(0);
err_unlock:
translog_unlock();
err:
+
+ if (cursor.buffs.unlck_ptr != cursor.buffs.wrt_ptr)
+ used_buffs_urgent_unlock(&cursor.buffs);
+
if (buffer_to_flush != NULL)
{
/* This is to prevent locking buffer forever in case of error */
- translog_buffer_decrease_writers(buffer_to_flush);
+ if (!external_buffer_to_flush)
+ translog_buffer_decrease_writers(buffer_to_flush);
if (!rc)
rc= translog_buffer_flush(buffer_to_flush);
translog_buffer_unlock(buffer_to_flush);
@@ -7481,7 +7635,8 @@ static void translog_force_current_buffer_to_finish()
DBUG_ASSERT(log_descriptor.bc.ptr !=NULL);
DBUG_ASSERT(LSN_FILE_NO(log_descriptor.horizon) ==
- LSN_FILE_NO(old_buffer->offset));
+ LSN_FILE_NO(old_buffer->offset) ||
+ translog_status == TRANSLOG_READONLY );
translog_check_cursor(&log_descriptor.bc);
DBUG_ASSERT(left < TRANSLOG_PAGE_SIZE);
if (left)
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index 4139409d477..13f7a64e786 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -334,13 +334,13 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
});
DEBUG_SYNC_C("mi_open_kfile");
if ((kfile=mysql_file_open(key_file_kfile, name_buff,
- (open_mode=O_RDWR) | O_SHARE | O_NOFOLLOW,
+ (open_mode=O_RDWR) | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
MYF(MY_NOSYMLINKS))) < 0)
{
if ((errno != EROFS && errno != EACCES) ||
mode != O_RDONLY ||
(kfile=mysql_file_open(key_file_kfile, name_buff,
- (open_mode=O_RDONLY) | O_SHARE | O_NOFOLLOW,
+ (open_mode=O_RDONLY) | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
MYF(MY_NOSYMLINKS))) < 0)
goto err;
}
@@ -1960,7 +1960,7 @@ int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share)
DEBUG_SYNC_C("mi_open_datafile");
info->dfile.file= share->bitmap.file.file=
mysql_file_open(key_file_dfile, share->data_file_name.str,
- share->mode | O_SHARE, MYF(flags));
+ share->mode | O_SHARE | O_CLOEXEC, MYF(flags));
return info->dfile.file >= 0 ? 0 : 1;
}
@@ -1974,7 +1974,7 @@ int _ma_open_keyfile(MARIA_SHARE *share)
mysql_mutex_lock(&share->intern_lock);
share->kfile.file= mysql_file_open(key_file_kfile,
share->unique_file_name.str,
- share->mode | O_SHARE | O_NOFOLLOW,
+ share->mode | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
MYF(MY_WME | MY_NOSYMLINKS));
mysql_mutex_unlock(&share->intern_lock);
return (share->kfile.file < 0);
diff --git a/storage/mroonga/data/install.sql.in b/storage/mroonga/data/install.sql.in
index d7d5f3c4ad6..0a2f308aef4 100644
--- a/storage/mroonga/data/install.sql.in
+++ b/storage/mroonga/data/install.sql.in
@@ -1,6 +1,6 @@
-DELETE IGNORE FROM mysql.plugin WHERE dl = 'ha_mroonga@MRN_PLUGIN_SUFFIX@';
-
-INSTALL PLUGIN Mroonga SONAME 'ha_mroonga@MRN_PLUGIN_SUFFIX@';
+SET @inst=IF(EXISTS(SELECT * FROM mysql.plugin WHERE NAME='mroonga'),'DO 1', "INSTALL PLUGIN mroonga SONAME 'ha_mroonga'");
+PREPARE s FROM @inst;
+EXECUTE s;
DROP FUNCTION IF EXISTS last_insert_grn_id;
CREATE FUNCTION last_insert_grn_id RETURNS INTEGER
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 164851974d6..2ad81f85e7e 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -2582,7 +2582,7 @@ maria_declare_plugin(myisam)
&myisam_storage_engine,
"MyISAM",
"MySQL AB",
- "MyISAM storage engine",
+ "Non-transactional engine with good performance and small data footprint",
PLUGIN_LICENSE_GPL,
myisam_init, /* Plugin Init */
NULL, /* Plugin Deinit */
diff --git a/storage/myisam/mi_key.c b/storage/myisam/mi_key.c
index 43babb2968b..4bd01dcbfa0 100644
--- a/storage/myisam/mi_key.c
+++ b/storage/myisam/mi_key.c
@@ -551,7 +551,7 @@ ulonglong retrieve_auto_increment(MI_INFO *info,const uchar *record)
switch (keyseg->type) {
case HA_KEYTYPE_INT8:
- s_value= (longlong) *(char*)key;
+ s_value= (longlong) *(const signed char*) key;
break;
case HA_KEYTYPE_BINARY:
value=(ulonglong) *(uchar*) key;
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index a50fe0879c3..4e8920395d5 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -139,13 +139,13 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
DEBUG_SYNC_C("mi_open_kfile");
if ((kfile= mysql_file_open(mi_key_file_kfile, name_buff,
- (open_mode= O_RDWR) | O_SHARE | O_NOFOLLOW,
+ (open_mode= O_RDWR) | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
MYF(MY_NOSYMLINKS))) < 0)
{
if ((errno != EROFS && errno != EACCES) ||
mode != O_RDONLY ||
(kfile= mysql_file_open(mi_key_file_kfile, name_buff,
- (open_mode= O_RDONLY) | O_SHARE| O_NOFOLLOW,
+ (open_mode= O_RDONLY) | O_SHARE| O_NOFOLLOW | O_CLOEXEC,
MYF(MY_NOSYMLINKS))) < 0)
goto err;
}
@@ -1270,7 +1270,7 @@ int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share)
myf flags= MY_WME | (share->mode & O_NOFOLLOW ? MY_NOSYMLINKS: 0);
DEBUG_SYNC_C("mi_open_datafile");
info->dfile= mysql_file_open(mi_key_file_dfile, share->data_file_name,
- share->mode | O_SHARE, MYF(flags));
+ share->mode | O_SHARE | O_CLOEXEC, MYF(flags));
return info->dfile >= 0 ? 0 : 1;
}
@@ -1279,7 +1279,7 @@ int mi_open_keyfile(MYISAM_SHARE *share)
{
if ((share->kfile= mysql_file_open(mi_key_file_kfile,
share->unique_file_name,
- share->mode | O_SHARE | O_NOFOLLOW,
+ share->mode | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
MYF(MY_NOSYMLINKS | MY_WME))) < 0)
return 1;
return 0;
diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake
index c76f711463e..8f01024be63 100644
--- a/storage/rocksdb/build_rocksdb.cmake
+++ b/storage/rocksdb/build_rocksdb.cmake
@@ -64,10 +64,20 @@ if(SNAPPY_FOUND AND (NOT WITH_ROCKSDB_SNAPPY STREQUAL "OFF"))
list(APPEND THIRDPARTY_LIBS ${SNAPPY_LIBRARIES})
endif()
+include(CheckFunctionExists)
if(ZSTD_FOUND AND (NOT WITH_ROCKSDB_ZSTD STREQUAL "OFF"))
- add_definitions(-DZSTD)
- include_directories(${ZSTD_INCLUDE_DIR})
- list(APPEND THIRDPARTY_LIBS ${ZSTD_LIBRARY})
+ SET(CMAKE_REQUIRED_LIBRARIES zstd)
+ CHECK_FUNCTION_EXISTS(ZDICT_trainFromBuffer ZSTD_VALID)
+ UNSET(CMAKE_REQUIRED_LIBRARIES)
+ if (WITH_ROCKSDB_ZSTD STREQUAL "ON" AND NOT ZSTD_VALID)
+ MESSAGE(FATAL_ERROR
+ "WITH_ROCKSDB_ZSTD is ON and ZSTD library was found, but the version needs to be >= 1.1.3")
+ endif()
+ if (ZSTD_VALID)
+ add_definitions(-DZSTD)
+ include_directories(${ZSTD_INCLUDE_DIR})
+ list(APPEND THIRDPARTY_LIBS ${ZSTD_LIBRARY})
+ endif()
endif()
add_definitions(-DZLIB)
@@ -119,7 +129,6 @@ int main() {
endif()
endif()
-include(CheckFunctionExists)
CHECK_FUNCTION_EXISTS(malloc_usable_size HAVE_MALLOC_USABLE_SIZE)
if(HAVE_MALLOC_USABLE_SIZE)
add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE)
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index 16f7ab4ce75..8facf6c3f10 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -54,7 +54,7 @@ ELSEIF(LIBJEMALLOC STREQUAL jemalloc)
GET_FILENAME_COMPONENT(LIBJEMALLOC_PATH ${LIBJEMALLOC_SO} REALPATH CACHE)
ENDIF()
-IF(LIBJEMALLOC_PATH AND RPM MATCHES fedora28) # TODO check for jemalloc version
+IF(LIBJEMALLOC_PATH AND (RPM OR DEB))
UNSET(LIBJEMALLOC)
GET_DIRECTORY_PROPERTY(V DIRECTORY ${CMAKE_SOURCE_DIR} DEFINITION CPACK_RPM_tokudb-engine_PACKAGE_REQUIRES)
SET(CPACK_RPM_tokudb-engine_PACKAGE_REQUIRES "${V} jemalloc" PARENT_SCOPE)
@@ -75,6 +75,7 @@ IF(INSTALL_SYSCONFDIR)
COMPONENT tokudb-engine)
ENDIF()
+MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-shadow")
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-vla" DEBUG)
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-implicit-fallthrough")
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
index 385723aebc7..50d35ee4906 100644
--- a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
@@ -149,7 +149,7 @@ set_cflags_if_supported(
-Wmissing-prototypes
-Wmissing-declarations
-Wpointer-arith
- -Wshadow
+ #-Wshadow will fail with GCC-8
${OPTIONAL_CFLAGS}
## other flags to try:
#-Wunsafe-loop-optimizations
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc
index a98768158dd..aefd6f0ec22 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.cc
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc
@@ -822,22 +822,22 @@ int toku_ftnode_fetch_callback(CACHEFILE UU(cachefile),
fprintf(
stderr,
"%s:%d:toku_ftnode_fetch_callback - "
- "file[%s], blocknum[%ld], toku_deserialize_ftnode_from "
+ "file[%s], blocknum[%lld], toku_deserialize_ftnode_from "
"failed with a checksum error.\n",
__FILE__,
__LINE__,
toku_cachefile_fname_in_env(cachefile),
- blocknum.b);
+ (longlong)blocknum.b);
} else {
fprintf(
stderr,
"%s:%d:toku_ftnode_fetch_callback - "
- "file[%s], blocknum[%ld], toku_deserialize_ftnode_from "
+ "file[%s], blocknum[%lld], toku_deserialize_ftnode_from "
"failed with %d.\n",
__FILE__,
__LINE__,
toku_cachefile_fname_in_env(cachefile),
- blocknum.b,
+ (longlong)blocknum.b,
r);
}
// make absolutely sure we crash before doing anything else.
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc
index b24d72a5dff..0d6573972d7 100644
--- a/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc
@@ -656,20 +656,20 @@ exit:
fprintf(stderr, \
"%s:%d toku_deserialize_ft_from: " \
"filename[%s] " \
- "r[%d] max_acceptable_lsn[%lu]" \
- "r0[%d] checkpoint_lsn_0[%lu] checkpoint_count_0[%lu] " \
- "r1[%d] checkpoint_lsn_1[%lu] checkpoint_count_1[%lu]\n", \
+ "r[%d] max_acceptable_lsn[%llu]" \
+ "r0[%d] checkpoint_lsn_0[%llu] checkpoint_count_0[%llu] " \
+ "r1[%d] checkpoint_lsn_1[%llu] checkpoint_count_1[%llu]\n", \
__FILE__, \
__LINE__, \
fn, \
r, \
- max_acceptable_lsn.lsn, \
+ (ulonglong)max_acceptable_lsn.lsn, \
r0, \
- checkpoint_lsn_0.lsn, \
- checkpoint_count_0, \
+ (ulonglong)checkpoint_lsn_0.lsn, \
+ (ulonglong)checkpoint_count_0, \
r1, \
- checkpoint_lsn_1.lsn, \
- checkpoint_count_1);
+ (ulonglong)checkpoint_lsn_1.lsn, \
+ (ulonglong)checkpoint_count_1);
int toku_deserialize_ft_from(int fd,
const char *fn,
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
index 22a562ae24c..f3b31eb31be 100644
--- a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
@@ -1170,11 +1170,11 @@ int verify_ftnode_sub_block(struct sub_block *sb,
fprintf(
stderr,
"%s:%d:verify_ftnode_sub_block - "
- "file[%s], blocknum[%ld], stored_xsum[%u] != actual_xsum[%u]\n",
+ "file[%s], blocknum[%lld], stored_xsum[%u] != actual_xsum[%u]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
stored_xsum,
actual_xsum);
dump_bad_block((Bytef *) sb->uncompressed_ptr, sb->uncompressed_size);
@@ -1197,11 +1197,11 @@ static int deserialize_ftnode_info(struct sub_block *sb, FTNODE node) {
fprintf(
stderr,
"%s:%d:deserialize_ftnode_info - "
- "file[%s], blocknum[%ld], verify_ftnode_sub_block failed with %d\n",
+ "file[%s], blocknum[%lld], verify_ftnode_sub_block failed with %d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- node->blocknum.b,
+ (longlong)node->blocknum.b,
r);
dump_bad_block(static_cast<unsigned char *>(sb->uncompressed_ptr),
sb->uncompressed_size);
@@ -1253,11 +1253,11 @@ static int deserialize_ftnode_info(struct sub_block *sb, FTNODE node) {
fprintf(
stderr,
"%s:%d:deserialize_ftnode_info - "
- "file[%s], blocknum[%ld], data_size[%d] != rb.ndone[%d]\n",
+ "file[%s], blocknum[%lld], data_size[%d] != rb.ndone[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- node->blocknum.b,
+ (longlong)node->blocknum.b,
data_size,
rb.ndone);
dump_bad_block(rb.buf, rb.size);
@@ -1388,12 +1388,12 @@ static int deserialize_ftnode_partition(
if (r != 0) {
fprintf(stderr,
"%s:%d:deserialize_ftnode_partition - "
- "file[%s], blocknum[%ld], "
+ "file[%s], blocknum[%lld], "
"verify_ftnode_sub_block failed with %d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- node->blocknum.b,
+ (longlong)node->blocknum.b,
r);
goto exit;
}
@@ -1410,12 +1410,12 @@ static int deserialize_ftnode_partition(
if (ch != FTNODE_PARTITION_MSG_BUFFER) {
fprintf(stderr,
"%s:%d:deserialize_ftnode_partition - "
- "file[%s], blocknum[%ld], ch[%d] != "
+ "file[%s], blocknum[%lld], ch[%d] != "
"FTNODE_PARTITION_MSG_BUFFER[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- node->blocknum.b,
+ (longlong)node->blocknum.b,
ch,
FTNODE_PARTITION_MSG_BUFFER);
dump_bad_block(rb.buf, rb.size);
@@ -1433,12 +1433,12 @@ static int deserialize_ftnode_partition(
if (ch != FTNODE_PARTITION_DMT_LEAVES) {
fprintf(stderr,
"%s:%d:deserialize_ftnode_partition - "
- "file[%s], blocknum[%ld], ch[%d] != "
+ "file[%s], blocknum[%lld], ch[%d] != "
"FTNODE_PARTITION_DMT_LEAVES[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- node->blocknum.b,
+ (longlong)node->blocknum.b,
ch,
FTNODE_PARTITION_DMT_LEAVES);
dump_bad_block(rb.buf, rb.size);
@@ -1457,11 +1457,11 @@ static int deserialize_ftnode_partition(
if (rb.ndone != rb.size) {
fprintf(stderr,
"%s:%d:deserialize_ftnode_partition - "
- "file[%s], blocknum[%ld], rb.ndone[%d] != rb.size[%d]\n",
+ "file[%s], blocknum[%lld], rb.ndone[%d] != rb.size[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- node->blocknum.b,
+ (longlong)node->blocknum.b,
rb.ndone,
rb.size);
dump_bad_block(rb.buf, rb.size);
@@ -1485,12 +1485,12 @@ static int decompress_and_deserialize_worker(struct rbuf curr_rbuf,
const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
fprintf(stderr,
"%s:%d:decompress_and_deserialize_worker - "
- "file[%s], blocknum[%ld], read_and_decompress_sub_block failed "
+ "file[%s], blocknum[%lld], read_and_decompress_sub_block failed "
"with %d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- node->blocknum.b,
+ (longlong)node->blocknum.b,
r);
dump_bad_block(curr_rbuf.buf, curr_rbuf.size);
goto exit;
@@ -1502,12 +1502,12 @@ static int decompress_and_deserialize_worker(struct rbuf curr_rbuf,
const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
fprintf(stderr,
"%s:%d:decompress_and_deserialize_worker - "
- "file[%s], blocknum[%ld], deserialize_ftnode_partition failed "
+ "file[%s], blocknum[%lld], deserialize_ftnode_partition failed "
"with %d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- node->blocknum.b,
+ (longlong)node->blocknum.b,
r);
dump_bad_block(curr_rbuf.buf, curr_rbuf.size);
goto exit;
@@ -1582,11 +1582,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
fprintf(
stderr,
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
- "file[%s], blocknum[%ld], rb->size[%u] < 24\n",
+ "file[%s], blocknum[%lld], rb->size[%u] < 24\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
rb->size);
dump_bad_block(rb->buf, rb->size);
// TODO: What error do we return here?
@@ -1602,12 +1602,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
fprintf(
stderr,
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
- "file[%s], blocknum[%ld], unrecognized magic number "
+ "file[%s], blocknum[%lld], unrecognized magic number "
"%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
static_cast<const uint8_t*>(magic)[0],
static_cast<const uint8_t*>(magic)[1],
static_cast<const uint8_t*>(magic)[2],
@@ -1627,12 +1627,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
fprintf(
stderr,
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
- "file[%s], blocknum[%ld], node->layout_version_read_from_disk[%d] "
+ "file[%s], blocknum[%lld], node->layout_version_read_from_disk[%d] "
"< FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
node->layout_version_read_from_disk,
FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES);
dump_bad_block(rb->buf, rb->size);
@@ -1667,11 +1667,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
fprintf(
stderr,
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
- "file[%s], blocknum[%ld], needed_size[%d] > rb->size[%d]\n",
+ "file[%s], blocknum[%lld], needed_size[%d] > rb->size[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
needed_size,
rb->size);
dump_bad_block(rb->buf, rb->size);
@@ -1695,11 +1695,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
fprintf(
stderr,
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
- "file[%s], blocknum[%ld], stored_checksum[%d] != checksum[%d]\n",
+ "file[%s], blocknum[%lld], stored_checksum[%d] != checksum[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
stored_checksum,
checksum);
dump_bad_block(rb->buf, rb->size);
@@ -1717,12 +1717,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
fprintf(
stderr,
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
- "file[%s], blocknum[%ld], rb->size[%d] - rb->ndone[%d] < "
+ "file[%s], blocknum[%lld], rb->size[%d] - rb->ndone[%d] < "
"sb_node_info.compressed_size[%d] + 8\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
rb->size,
rb->ndone,
sb_node_info.compressed_size);
@@ -1744,11 +1744,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
fprintf(
stderr,
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
- "file[%s], blocknum[%ld], sb_node_info.xsum[%d] != actual_xsum[%d]\n",
+ "file[%s], blocknum[%lld], sb_node_info.xsum[%d] != actual_xsum[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
sb_node_info.xsum,
actual_xsum);
dump_bad_block(rb->buf, rb->size);
@@ -1774,12 +1774,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
fprintf(
stderr,
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
- "file[%s], blocknum[%ld], deserialize_ftnode_info failed with "
+ "file[%s], blocknum[%lld], deserialize_ftnode_info failed with "
"%d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
r);
dump_bad_block(
static_cast<unsigned char *>(sb_node_info.uncompressed_ptr),
@@ -1812,12 +1812,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
fprintf(
stderr,
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
- "file[%s], blocknum[%ld], toku_ftnode_pf_callback failed with "
+ "file[%s], blocknum[%lld], toku_ftnode_pf_callback failed with "
"%d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
r);
dump_bad_block(rb->buf, rb->size);
goto cleanup;
@@ -2164,12 +2164,12 @@ static int deserialize_and_upgrade_ftnode(FTNODE node,
const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
fprintf(stderr,
"%s:%d:deserialize_and_upgrade_ftnode - "
- "file[%s], blocknum[%ld], "
+ "file[%s], blocknum[%lld], "
"read_and_decompress_block_from_fd_into_rbuf failed with %d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
r);
goto exit;
}
@@ -2190,12 +2190,12 @@ static int deserialize_and_upgrade_ftnode(FTNODE node,
const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
fprintf(stderr,
"%s:%d:deserialize_and_upgrade_ftnode - "
- "file[%s], blocknum[%ld], version[%d] > "
+ "file[%s], blocknum[%lld], version[%d] > "
"FT_LAYOUT_VERSION_14[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
version,
FT_LAYOUT_VERSION_14);
dump_bad_block(rb.buf, rb.size);
@@ -2278,12 +2278,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
memcmp(magic, "tokunode", 8) != 0) {
fprintf(stderr,
"%s:%d:deserialize_ftnode_from_rbuf - "
- "file[%s], blocknum[%ld], unrecognized magic number "
+ "file[%s], blocknum[%lld], unrecognized magic number "
"%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
static_cast<const uint8_t *>(magic)[0],
static_cast<const uint8_t *>(magic)[1],
static_cast<const uint8_t *>(magic)[2],
@@ -2309,12 +2309,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
if (r != 0) {
fprintf(stderr,
"%s:%d:deserialize_ftnode_from_rbuf - "
- "file[%s], blocknum[%ld], deserialize_and_upgrade_ftnode "
+ "file[%s], blocknum[%lld], deserialize_and_upgrade_ftnode "
"failed with %d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
r);
dump_bad_block(rb->buf, rb->size);
goto cleanup;
@@ -2355,11 +2355,11 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
fprintf(
stderr,
"%s:%d:deserialize_ftnode_from_rbuf - "
- "file[%s], blocknum[%ld], stored_checksum[%d] != checksum[%d]\n",
+ "file[%s], blocknum[%lld], stored_checksum[%d] != checksum[%d]\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
stored_checksum,
checksum);
dump_bad_block(rb->buf, rb->size);
@@ -2377,12 +2377,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
fprintf(
stderr,
"%s:%d:deserialize_ftnode_from_rbuf - "
- "file[%s], blocknum[%ld], read_and_decompress_sub_block failed "
+ "file[%s], blocknum[%lld], read_and_decompress_sub_block failed "
"with %d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
r);
dump_bad_block(
static_cast<unsigned char *>(sb_node_info.uncompressed_ptr),
@@ -2398,12 +2398,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
fprintf(
stderr,
"%s:%d:deserialize_ftnode_from_rbuf - "
- "file[%s], blocknum[%ld], deserialize_ftnode_info failed with "
+ "file[%s], blocknum[%lld], deserialize_ftnode_info failed with "
"%d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
r);
dump_bad_block(rb->buf, rb->size);
goto cleanup;
@@ -2470,12 +2470,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
fprintf(
stderr,
"%s:%d:deserialize_ftnode_from_rbuf - "
- "file[%s], blocknum[%ld], childnum[%d], "
+ "file[%s], blocknum[%lld], childnum[%d], "
"decompress_and_deserialize_worker failed with %d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
i,
r);
dump_bad_block(rb->buf, rb->size);
@@ -2490,13 +2490,13 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
fprintf(
stderr,
"%s:%d:deserialize_ftnode_from_rbuf - "
- "file[%s], blocknum[%ld], childnum[%d], "
+ "file[%s], blocknum[%lld], childnum[%d], "
"check_and_copy_compressed_sub_block_worker failed with "
"%d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
i,
r);
dump_bad_block(rb->buf, rb->size);
@@ -2641,12 +2641,12 @@ int toku_deserialize_bp_from_compressed(FTNODE node,
const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
fprintf(stderr,
"%s:%d:toku_deserialize_bp_from_compressed - "
- "file[%s], blocknum[%ld], "
+ "file[%s], blocknum[%lld], "
"deserialize_ftnode_partition failed with %d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- node->blocknum.b,
+ (longlong)node->blocknum.b,
r);
dump_bad_block(static_cast<unsigned char *>(curr_sb->compressed_ptr),
curr_sb->compressed_size);
@@ -2689,12 +2689,12 @@ static int deserialize_ftnode_from_fd(int fd,
fprintf(
stderr,
"%s:%d:deserialize_ftnode_from_fd - "
- "file[%s], blocknum[%ld], deserialize_ftnode_from_rbuf failed with "
+ "file[%s], blocknum[%lld], deserialize_ftnode_from_rbuf failed with "
"%d\n",
__FILE__,
__LINE__,
fname ? fname : "unknown",
- blocknum.b,
+ (longlong)blocknum.b,
r);
dump_bad_block(rb.buf, rb.size);
}
diff --git a/storage/tokudb/PerconaFT/portability/memory.h b/storage/tokudb/PerconaFT/portability/memory.h
index 5ae652d39fc..851e4d69e03 100644
--- a/storage/tokudb/PerconaFT/portability/memory.h
+++ b/storage/tokudb/PerconaFT/portability/memory.h
@@ -107,7 +107,7 @@ size_t toku_malloc_usable_size(void *p) __attribute__((__visibility__("default")
#define XMALLOC(v) CAST_FROM_VOIDP(v, toku_xmalloc(sizeof(*v)))
#define XMALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xmalloc((n)*sizeof(*v)))
#define XCALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xcalloc((n), (sizeof(*v))))
-#define XCALLOC(v) XCALLOC_N(1,(v))
+#define XCALLOC(v) XCALLOC_N(1,v)
#define XREALLOC(v,s) CAST_FROM_VOIDP(v, toku_xrealloc(v, s))
#define XREALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xrealloc(v, (n)*sizeof(*v)))
diff --git a/storage/tokudb/PerconaFT/portability/toku_debug_sync.h b/storage/tokudb/PerconaFT/portability/toku_debug_sync.h
index cbe72b16912..cf0788f6f2b 100644
--- a/storage/tokudb/PerconaFT/portability/toku_debug_sync.h
+++ b/storage/tokudb/PerconaFT/portability/toku_debug_sync.h
@@ -63,9 +63,6 @@ inline void toku_debug_sync(struct tokutxn *txn, const char *sync_point_name) {
void *client_extra;
THD *thd;
- if (likely(!opt_debug_sync_timeout))
- return;
-
toku_txn_get_client_id(txn, &client_id, &client_extra);
thd = reinterpret_cast<THD *>(client_extra);
DEBUG_SYNC(thd, sync_point_name);
diff --git a/storage/tokudb/PerconaFT/portability/toku_pthread.h b/storage/tokudb/PerconaFT/portability/toku_pthread.h
index e3bd3bce598..a0dfcc246a7 100644
--- a/storage/tokudb/PerconaFT/portability/toku_pthread.h
+++ b/storage/tokudb/PerconaFT/portability/toku_pthread.h
@@ -162,10 +162,20 @@ typedef struct toku_mutex_aligned {
#define ZERO_COND_INITIALIZER \
{ 0 }
#elif defined(__APPLE__)
+#if TOKU_PTHREAD_DEBUG
+#define ZERO_COND_INITIALIZER \
+ { \
+ { 0 , { 0 } }, \
+ nullptr, \
+ 0 \
+ }
+#else
#define ZERO_COND_INITIALIZER \
{ \
- { 0 } \
+ { 0 , { 0 } }, \
+ nullptr \
}
+#endif
#else // __linux__, at least
#define ZERO_COND_INITIALIZER \
{}
diff --git a/storage/tokudb/PerconaFT/src/CMakeLists.txt b/storage/tokudb/PerconaFT/src/CMakeLists.txt
index 65bf4814cf8..bae37389004 100644
--- a/storage/tokudb/PerconaFT/src/CMakeLists.txt
+++ b/storage/tokudb/PerconaFT/src/CMakeLists.txt
@@ -18,7 +18,7 @@ set(tokudb_srcs
## make the shared library
add_library(${LIBTOKUDB} SHARED ${tokudb_srcs})
add_dependencies(${LIBTOKUDB} install_tdb_h generate_log_code)
-target_link_libraries(${LIBTOKUDB} LINK_PRIVATE locktree_static ft_static util_static lzma snappy ${LIBTOKUPORTABILITY})
+target_link_libraries(${LIBTOKUDB} LINK_PRIVATE locktree_static ft_static util_static lzma snappy dbug ${LIBTOKUPORTABILITY})
target_link_libraries(${LIBTOKUDB} LINK_PUBLIC ${ZLIB_LIBRARY} )
## make the static library
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh
index f0195ecadd3..f0195ecadd3 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile
index ec64c622026..ec64c622026 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess
index 7501b1bee01..7501b1bee01 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath
index c492a93b663..c492a93b663 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub
index a39437d0158..a39437d0158 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp
index df8eea7e4ce..df8eea7e4ce 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh
index 6781b987bdb..6781b987bdb 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh
index b36c4ad366c..b36c4ad366c 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing
index 28055d2ae6f..28055d2ae6f 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash
index 35ea4dae973..35ea4dae973 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh
index ff0cb304df4..ff0cb304df4 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh
diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh
index 7dd9a3901bf..7dd9a3901bf 100644..100755
--- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh
+++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh
diff --git a/storage/tokudb/PerconaFT/util/dmt.cc b/storage/tokudb/PerconaFT/util/dmt.cc
index b5b94982487..642c9367d7e 100644
--- a/storage/tokudb/PerconaFT/util/dmt.cc
+++ b/storage/tokudb/PerconaFT/util/dmt.cc
@@ -80,8 +80,8 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create_from_sorted_memory_of_fix
paranoid_invariant(numvalues > 0);
void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize);
paranoid_invariant_notnull(ptr);
- uint8_t * const CAST_FROM_VOIDP(dest, ptr);
- const uint8_t * const CAST_FROM_VOIDP(src, mem);
+ uint8_t * CAST_FROM_VOIDP(dest, ptr);
+ const uint8_t * CAST_FROM_VOIDP(src, mem);
if (pad_bytes == 0) {
paranoid_invariant(aligned_memsize == mem_length);
memcpy(dest, src, aligned_memsize);
diff --git a/storage/tokudb/PerconaFT/util/omt.h b/storage/tokudb/PerconaFT/util/omt.h
index c7ed2ca546f..36946401381 100644
--- a/storage/tokudb/PerconaFT/util/omt.h
+++ b/storage/tokudb/PerconaFT/util/omt.h
@@ -127,7 +127,7 @@ public:
paranoid_invariant(index != NODE_NULL);
m_index = index;
}
-} __attribute__((__packed__,aligned(4)));
+} ;
template<>
class subtree_templated<true> {
@@ -184,7 +184,7 @@ public:
inline void disable_bit(void) {
m_bitfield &= MASK_INDEX;
}
-} __attribute__((__packed__)) ;
+} ;
template<typename omtdata_t, bool subtree_supports_marks>
class omt_node_templated {
@@ -197,7 +197,7 @@ public:
// this needs to be in both implementations because we don't have
// a "static if" the caller can use
inline void clear_stolen_bits(void) {}
-} __attribute__((__packed__,aligned(4)));
+} ;
template<typename omtdata_t>
class omt_node_templated<omtdata_t, true> {
@@ -234,7 +234,7 @@ public:
this->unset_marked_bit();
this->unset_marks_below_bit();
}
-} __attribute__((__packed__,aligned(4)));
+} ;
}
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
index 7f8b9dd5104..a5060d9e3bf 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
@@ -483,72 +483,72 @@ SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
******************** CREATE USER ********************
CREATE USER 'user_test_rpl'@'localhost' IDENTIFIED BY PASSWORD '*1111111111111111111111111111111111111111';
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 N
connection slave;
USE test_rpl;
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 N
connection master;
******************** GRANT ********************
GRANT SELECT ON *.* TO 'user_test_rpl'@'localhost';
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 Y
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 Y
connection slave;
USE test_rpl;
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 Y
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 Y
connection master;
******************** REVOKE ********************
REVOKE SELECT ON *.* FROM 'user_test_rpl'@'localhost';
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 N
connection slave;
USE test_rpl;
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 N
connection master;
******************** SET PASSWORD ********************
SET PASSWORD FOR 'user_test_rpl'@'localhost' = '*0000000000000000000000000000000000000000';
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl *0000000000000000000000000000000000000000 N
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl mysql_native_password *0000000000000000000000000000000000000000 N
connection slave;
USE test_rpl;
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl *0000000000000000000000000000000000000000 N
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl mysql_native_password *0000000000000000000000000000000000000000 N
connection master;
******************** RENAME USER ********************
RENAME USER 'user_test_rpl'@'localhost' TO 'user_test_rpl_2'@'localhost';
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl_2 *0000000000000000000000000000000000000000 N
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl_2 mysql_native_password *0000000000000000000000000000000000000000 N
connection slave;
USE test_rpl;
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
-localhost user_test_rpl_2 *0000000000000000000000000000000000000000 N
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
+localhost user_test_rpl_2 mysql_native_password *0000000000000000000000000000000000000000 N
connection master;
******************** DROP USER ********************
DROP USER 'user_test_rpl_2'@'localhost';
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
connection slave;
USE test_rpl;
-SELECT host, user, password, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password select_priv
+SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
+host user password plugin authentication_string select_priv
connection master;
INSERT INTO t1 VALUES(100, 'test');
diff --git a/storage/tokudb/tokudb.cnf.in b/storage/tokudb/tokudb.cnf.in
index de9b5b711ee..ff7f0a5f5f6 100644
--- a/storage/tokudb/tokudb.cnf.in
+++ b/storage/tokudb/tokudb.cnf.in
@@ -5,5 +5,4 @@
plugin-load-add=ha_tokudb.so
[mysqld_safe]
-# it might be necessary to uncomment the following line if jemalloc >= 5.0.0
@cnf_malloc_lib@
diff --git a/storage/tokudb/tokudb.conf.in b/storage/tokudb/tokudb.conf.in
index d22f6686d91..a5ff055f44c 100644
--- a/storage/tokudb/tokudb.conf.in
+++ b/storage/tokudb/tokudb.conf.in
@@ -1,3 +1,2 @@
[Service]
-# it might be necessary to uncomment the following line if jemalloc >= 5.0.0
@systemd_env@
diff --git a/storage/tokudb/tokudb_thread.h b/storage/tokudb/tokudb_thread.h
index 0be5583ffb2..bd89f4dfd9c 100644
--- a/storage/tokudb/tokudb_thread.h
+++ b/storage/tokudb/tokudb_thread.h
@@ -111,7 +111,6 @@ public:
// wait for the event to become signalled
void wait(void);
- int wait(ulonglong microseconds);
// signal the event
void signal(void);
@@ -152,7 +151,6 @@ public:
// wait for the semaphore to become signalled
E_WAIT wait(void);
- E_WAIT wait(ulonglong microseconds);
// signal the semaphore to increase the count
// return true if signalled, false if ignored due to count
@@ -372,28 +370,6 @@ inline void event_t::wait(void) {
assert_debug(r == 0);
return;
}
-inline int event_t::wait(ulonglong microseconds) {
- timespec waittime = time::offset_timespec(microseconds);
- int r = pthread_mutex_timedlock(&_mutex, &waittime);
- if (r == ETIMEDOUT) return ETIMEDOUT;
- assert_debug(r == 0);
- while (_signalled == false && _pulsed == false) {
- r = pthread_cond_timedwait(&_cond, &_mutex, &waittime);
- if (r == ETIMEDOUT) {
- r = pthread_mutex_unlock(&_mutex);
- assert_debug(r == 0);
- return ETIMEDOUT;
- }
- assert_debug(r == 0);
- }
- if (_manual_reset == false)
- _signalled = false;
- if (_pulsed)
- _pulsed = false;
- r = pthread_mutex_unlock(&_mutex);
- assert_debug(r == 0);
- return 0;
-}
inline void event_t::signal(void) {
int r MY_ATTRIBUTE((unused)) = pthread_mutex_lock(&_mutex);
assert_debug(r == 0);
@@ -479,31 +455,6 @@ inline semaphore_t::E_WAIT semaphore_t::wait(void) {
assert_debug(r == 0);
return ret;
}
-inline semaphore_t::E_WAIT semaphore_t::wait(ulonglong microseconds) {
- E_WAIT ret;
- timespec waittime = time::offset_timespec(microseconds);
- int r = pthread_mutex_timedlock(&_mutex, &waittime);
- if (r == ETIMEDOUT) return E_TIMEDOUT;
- assert_debug(r == 0);
- while (_signalled == 0 && _interrupted == false) {
- r = pthread_cond_timedwait(&_cond, &_mutex, &waittime);
- if (r == ETIMEDOUT) {
- r = pthread_mutex_unlock(&_mutex);
- assert_debug(r == 0);
- return E_TIMEDOUT;
- }
- assert_debug(r == 0);
- }
- if (_interrupted) {
- ret = E_INTERRUPTED;
- } else {
- _signalled--;
- ret = E_SIGNALLED;
- }
- r = pthread_mutex_unlock(&_mutex);
- assert_debug(r == 0);
- return ret;
-}
inline bool semaphore_t::signal(void) {
bool ret = false;
int r MY_ATTRIBUTE((unused)) = pthread_mutex_lock(&_mutex);