summaryrefslogtreecommitdiff
path: root/storage/xtradb/handler
diff options
context:
space:
mode:
authorVicențiu Ciorbaru <vicentiu@mariadb.org>2017-05-16 14:16:11 +0300
committerVicențiu Ciorbaru <vicentiu@mariadb.org>2017-05-16 14:16:11 +0300
commit360a4a037271d65ab6471f7ab3f9b6a893d90a31 (patch)
treee3ecdbb7930396ec2f7069a1d81b7ff169a288c3 /storage/xtradb/handler
parent8d69ce7b821a88b5ba8749a08489839050283a63 (diff)
downloadmariadb-git-360a4a037271d65ab6471f7ab3f9b6a893d90a31.tar.gz
5.6.36-82.0
Diffstat (limited to 'storage/xtradb/handler')
-rw-r--r--storage/xtradb/handler/ha_innodb.cc179
-rw-r--r--storage/xtradb/handler/ha_innodb.h6
-rw-r--r--storage/xtradb/handler/handler0alter.cc6
3 files changed, 118 insertions, 73 deletions
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 05a77436a12..d1f6c3b499a 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2012, Facebook Inc.
@@ -701,6 +701,32 @@ innobase_is_fake_change(
THD* thd); /*!< in: MySQL thread handle of the user for
whom the transaction is being committed */
+/** Empty free list algorithm.
+Checks if buffer pool is big enough to enable backoff algorithm.
+InnoDB empty free list algorithm backoff requires free pages
+from LRU for the best performance.
+buf_LRU_buf_pool_running_out cancels query if 1/4 of
+buffer pool belongs to LRU or freelist.
+At the same time buf_flush_LRU_list_batch
+keeps up to BUF_LRU_MIN_LEN in LRU.
+In order to avoid deadlock baclkoff requires buffer pool
+to be at least 4*BUF_LRU_MIN_LEN,
+but flush peformance is bad because of trashing
+and additional BUF_LRU_MIN_LEN pages are requested.
+@param[in] algorithm desired algorithm from srv_empty_free_list_t
+@return true if it's possible to enable backoff. */
+static inline
+bool
+innodb_empty_free_list_algorithm_allowed(
+ srv_empty_free_list_t algorithm)
+{
+ long long buf_pool_pages = srv_buf_pool_size / srv_page_size
+ / srv_buf_pool_instances;
+
+ return(buf_pool_pages >= BUF_LRU_MIN_LEN * (4 + 1)
+ || algorithm != SRV_EMPTY_FREE_LIST_BACKOFF);
+}
+
/** Get the list of foreign keys referencing a specified table
table.
@param thd The thread handle
@@ -970,6 +996,10 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_x_lock_spin_rounds, SHOW_LONGLONG},
{"x_lock_spin_waits",
(char*) &export_vars.innodb_x_lock_spin_waits, SHOW_LONGLONG},
+ {"secondary_index_triggered_cluster_reads",
+ (char*) &export_vars.innodb_sec_rec_cluster_reads, SHOW_LONG},
+ {"secondary_index_triggered_cluster_reads_avoided",
+ (char*) &export_vars.innodb_sec_rec_cluster_reads_avoided, SHOW_LONG},
{NullS, NullS, SHOW_LONG}
};
@@ -1385,28 +1415,6 @@ innobase_drop_zip_dict(
/*!< in/out: zip dictionary name length */
/*************************************************************//**
-Checks if buffer pool is big enough to enable backoff algorithm.
-InnoDB empty free list algorithm backoff requires free pages
-from LRU for the best performance.
-buf_LRU_buf_pool_running_out cancels query if 1/4 of
-buffer pool belongs to LRU or freelist.
-At the same time buf_flush_LRU_list_batch
-keeps up to BUF_LRU_MIN_LEN in LRU.
-In order to avoid deadlock baclkoff requires buffer pool
-to be at least 4*BUF_LRU_MIN_LEN,
-but flush peformance is bad because of trashing
-and additional BUF_LRU_MIN_LEN pages are requested.
-@return true if it's possible to enable backoff. */
-static
-bool
-innodb_empty_free_list_algorithm_backoff_allowed(
- srv_empty_free_list_t
- algorithm, /*!< in: desired algorithm
- from srv_empty_free_list_t */
- long long buf_pool_pages); /*!< in: total number
- of pages inside buffer pool */
-
-/*************************************************************//**
Removes old archived transaction log files.
@return true on error */
static bool innobase_purge_archive_logs(
@@ -3446,7 +3454,8 @@ innobase_init(
innobase_hton->flush_logs = innobase_flush_logs;
innobase_hton->show_status = innobase_show_status;
innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS |
- HTON_SUPPORTS_ONLINE_BACKUPS | HTON_SUPPORTS_FOREIGN_KEYS;
+ HTON_SUPPORTS_ONLINE_BACKUPS | HTON_SUPPORTS_FOREIGN_KEYS |
+ HTON_SUPPORTS_COMPRESSED_COLUMNS;
innobase_hton->release_temporary_latches =
innobase_release_temporary_latches;
@@ -3915,10 +3924,9 @@ innobase_change_buffering_inited_ok:
innobase_commit_concurrency_init_default();
/* Do not enable backoff algorithm for small buffer pool. */
- if (!innodb_empty_free_list_algorithm_backoff_allowed(
+ if (!innodb_empty_free_list_algorithm_allowed(
static_cast<srv_empty_free_list_t>(
- srv_empty_free_list_algorithm),
- innobase_buffer_pool_size / srv_page_size)) {
+ srv_empty_free_list_algorithm))) {
sql_print_information(
"InnoDB: innodb_empty_free_list_algorithm "
"has been changed to legacy "
@@ -4160,6 +4168,10 @@ innobase_create_zip_dict(
if (UNIV_UNLIKELY(high_level_read_only)) {
DBUG_RETURN(HA_CREATE_ZIP_DICT_READ_ONLY);
}
+
+ if (UNIV_UNLIKELY(THDVAR(NULL, fake_changes))) {
+ DBUG_RETURN(HA_CREATE_ZIP_DICT_FAKE_CHANGES);
+ }
if (UNIV_UNLIKELY(*name_len > ZIP_DICT_MAX_NAME_LENGTH)) {
*name_len = ZIP_DICT_MAX_NAME_LENGTH;
@@ -4178,6 +4190,15 @@ innobase_create_zip_dict(
case DB_DUPLICATE_KEY:
result = HA_CREATE_ZIP_DICT_ALREADY_EXISTS;
break;
+ case DB_OUT_OF_MEMORY:
+ result = HA_CREATE_ZIP_DICT_OUT_OF_MEMORY;
+ break;
+ case DB_OUT_OF_FILE_SPACE:
+ result = HA_CREATE_ZIP_DICT_OUT_OF_FILE_SPACE;
+ break;
+ case DB_TOO_MANY_CONCURRENT_TRXS:
+ result = HA_CREATE_ZIP_DICT_TOO_MANY_CONCURRENT_TRXS;
+ break;
default:
ut_ad(0);
result = HA_CREATE_ZIP_DICT_UNKNOWN_ERROR;
@@ -4204,6 +4225,10 @@ innobase_drop_zip_dict(
DBUG_RETURN(HA_DROP_ZIP_DICT_READ_ONLY);
}
+ if (UNIV_UNLIKELY(THDVAR(NULL, fake_changes))) {
+ DBUG_RETURN(HA_DROP_ZIP_DICT_FAKE_CHANGES);
+ }
+
switch (dict_drop_zip_dict(name, *name_len)) {
case DB_SUCCESS:
result = HA_DROP_ZIP_DICT_OK;
@@ -4214,6 +4239,15 @@ innobase_drop_zip_dict(
case DB_ROW_IS_REFERENCED:
result = HA_DROP_ZIP_DICT_IS_REFERENCED;
break;
+ case DB_OUT_OF_MEMORY:
+ result = HA_DROP_ZIP_DICT_OUT_OF_MEMORY;
+ break;
+ case DB_OUT_OF_FILE_SPACE:
+ result = HA_DROP_ZIP_DICT_OUT_OF_FILE_SPACE;
+ break;
+ case DB_TOO_MANY_CONCURRENT_TRXS:
+ result = HA_DROP_ZIP_DICT_TOO_MANY_CONCURRENT_TRXS;
+ break;
default:
ut_ad(0);
result = HA_DROP_ZIP_DICT_UNKNOWN_ERROR;
@@ -6032,6 +6066,8 @@ table_opened:
prebuilt->default_rec = table->s->default_values;
ut_ad(prebuilt->default_rec);
+ prebuilt->mysql_handler = this;
+
/* Looks like MySQL-3.23 sometimes has primary key number != 0 */
primary_key = table->s->primary_key;
key_used_on_scan = primary_key;
@@ -7181,9 +7217,31 @@ build_template_field(
ut_a(templ->clust_rec_field_no != ULINT_UNDEFINED);
if (dict_index_is_clust(index)) {
+ templ->rec_field_is_prefix = false;
templ->rec_field_no = templ->clust_rec_field_no;
+ templ->rec_prefix_field_no = ULINT_UNDEFINED;
} else {
- templ->rec_field_no = dict_index_get_nth_col_pos(index, i);
+ /* If we're in a secondary index, keep track of the original
+ index position even if this is just a prefix index; we will use
+ this later to avoid a cluster index lookup in some cases.*/
+
+ templ->rec_field_no = dict_index_get_nth_col_pos(index, i,
+ &templ->rec_prefix_field_no);
+ templ->rec_field_is_prefix
+ = (templ->rec_field_no == ULINT_UNDEFINED)
+ && (templ->rec_prefix_field_no != ULINT_UNDEFINED);
+#ifdef UNIV_DEBUG
+ if (templ->rec_prefix_field_no != ULINT_UNDEFINED)
+ {
+ const dict_field_t* field = dict_index_get_nth_field(
+ index,
+ templ->rec_prefix_field_no);
+ ut_ad(templ->rec_field_is_prefix
+ == (field->prefix_len != 0));
+ } else {
+ ut_ad(!templ->rec_field_is_prefix);
+ }
+#endif
}
if (field->real_maybe_null()) {
@@ -7373,7 +7431,8 @@ ha_innobase::build_template(
} else {
templ->icp_rec_field_no
= dict_index_get_nth_col_pos(
- prebuilt->index, i);
+ prebuilt->index, i,
+ NULL);
}
if (dict_index_is_clust(prebuilt->index)) {
@@ -7403,7 +7462,7 @@ ha_innobase::build_template(
templ->icp_rec_field_no
= dict_index_get_nth_col_or_prefix_pos(
- prebuilt->index, i, TRUE);
+ prebuilt->index, i, TRUE, NULL);
ut_ad(templ->icp_rec_field_no
!= ULINT_UNDEFINED);
@@ -11288,7 +11347,8 @@ ha_innobase::delete_table(
extension, in contrast to ::create */
normalize_table_name(norm_name, name);
- if (srv_read_only_mode) {
+ if (srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) {
DBUG_RETURN(HA_ERR_TABLE_READONLY);
} else if (row_is_magic_monitor_table(norm_name)
&& check_global_access(thd, PROCESS_ACL)) {
@@ -15301,16 +15361,21 @@ and SYS_ZIP_DICT_COLS for all columns marked with
COLUMN_FORMAT_TYPE_COMPRESSED flag and updates
zip_dict_name / zip_dict_data for those which have associated
compression dictionaries.
+
+@param part_name Full table name (including partition part).
+ Must be non-NULL only if called from ha_partition.
*/
UNIV_INTERN
void
-ha_innobase::update_field_defs_with_zip_dict_info()
+ha_innobase::update_field_defs_with_zip_dict_info(const char *part_name)
{
DBUG_ENTER("update_field_defs_with_zip_dict_info");
ut_ad(!mutex_own(&dict_sys->mutex));
char norm_name[FN_REFLEN];
- normalize_table_name(norm_name, table_share->normalized_path.str);
+ normalize_table_name(norm_name,
+ part_name != 0 ? part_name :
+ table_share->normalized_path.str);
dict_table_t* ib_table = dict_table_open_on_name(
norm_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
@@ -16687,15 +16752,17 @@ innodb_buffer_pool_evict_uncompressed(void)
ut_ad(block->page.in_LRU_list);
mutex_enter(&block->mutex);
- if (!buf_LRU_free_page(&block->page, false)) {
- mutex_exit(&block->mutex);
- all_evicted = false;
- } else {
- mutex_exit(&block->mutex);
+ all_evicted = buf_LRU_free_page(&block->page, false);
+ mutex_exit(&block->mutex);
+
+ if (all_evicted) {
+
mutex_enter(&buf_pool->LRU_list_mutex);
- }
+ block = UT_LIST_GET_LAST(buf_pool->unzip_LRU);
+ } else {
- block = prev_block;
+ block = prev_block;
+ }
}
mutex_exit(&buf_pool->LRU_list_mutex);
@@ -17498,32 +17565,6 @@ innodb_status_output_update(
}
/*************************************************************//**
-Empty free list algorithm.
-Checks if buffer pool is big enough to enable backoff algorithm.
-InnoDB empty free list algorithm backoff requires free pages
-from LRU for the best performance.
-buf_LRU_buf_pool_running_out cancels query if 1/4 of
-buffer pool belongs to LRU or freelist.
-At the same time buf_flush_LRU_list_batch
-keeps up to BUF_LRU_MIN_LEN in LRU.
-In order to avoid deadlock baclkoff requires buffer pool
-to be at least 4*BUF_LRU_MIN_LEN,
-but flush peformance is bad because of trashing
-and additional BUF_LRU_MIN_LEN pages are requested.
-@return true if it's possible to enable backoff. */
-static
-bool
-innodb_empty_free_list_algorithm_backoff_allowed(
- srv_empty_free_list_t algorithm, /*!< in: desired algorithm
- from srv_empty_free_list_t */
- long long buf_pool_pages) /*!< in: total number
- of pages inside buffer pool */
-{
- return(buf_pool_pages >= BUF_LRU_MIN_LEN * (4 + 1)
- || algorithm != SRV_EMPTY_FREE_LIST_BACKOFF);
-}
-
-/*************************************************************//**
Empty free list algorithm. This function is registered as
a callback with MySQL.
@return 0 for valid algorithm */
@@ -17564,13 +17605,11 @@ innodb_srv_empty_free_list_algorithm_validate(
return(1);
algorithm = static_cast<srv_empty_free_list_t>(algo);
- if (!innodb_empty_free_list_algorithm_backoff_allowed(
- algorithm,
- innobase_buffer_pool_size / srv_page_size)) {
+ if (!innodb_empty_free_list_algorithm_allowed(algorithm)) {
sql_print_warning(
"InnoDB: innodb_empty_free_list_algorithm "
"= 'backoff' requires at least"
- " 20MB buffer pool.\n");
+ " 20MB buffer pool instances.\n");
return(1);
}
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index 39de6b179ab..99953a22121 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -295,8 +295,12 @@ class ha_innobase: public handler
COLUMN_FORMAT_TYPE_COMPRESSED flag and updates
zip_dict_name / zip_dict_data for those which have associated
compression dictionaries.
+
+ @param part_name Full table name (including partition part).
+ Must be non-NULL only if called from ha_partition.
*/
- virtual void update_field_defs_with_zip_dict_info();
+ virtual void update_field_defs_with_zip_dict_info(
+ const char* part_name);
private:
/** Builds a 'template' to the prebuilt struct.
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index 098d93d4529..d6576fe27d5 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -1148,7 +1148,8 @@ innobase_rec_to_mysql(
field->reset();
- ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE);
+ ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE,
+ NULL);
if (ipos == ULINT_UNDEFINED
|| rec_offs_nth_extern(offsets, ipos)) {
@@ -1196,7 +1197,8 @@ innobase_fields_to_mysql(
field->reset();
- ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE);
+ ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE,
+ NULL);
if (ipos == ULINT_UNDEFINED
|| dfield_is_ext(&fields[ipos])