summaryrefslogtreecommitdiff
path: root/sql/sql_select.cc
diff options
context:
space:
mode:
authorMonty <monty@mariadb.org>2017-06-30 17:56:58 +0300
committerMonty <monty@mariadb.org>2017-06-30 22:31:37 +0300
commitdd8474b1dc556d0ea9491d1908a2d1237818e8c1 (patch)
tree9ab8535fbac53bf3a644482a41c59eaca1371754 /sql/sql_select.cc
parent9f484b63f1b61e6ade1481cfb8465f8fe208386d (diff)
downloadmariadb-git-dd8474b1dc556d0ea9491d1908a2d1237818e8c1.tar.gz
Added tmp_disk_table_size to limit size of Aria temp tables in tmpdir
- Added variable tmp_disk_table_size - Added variable tmp_memory_table_size as an alias for tmp_table_size - Changed internal variable tmp_table_size to tmp_memory_table_size - create_info.data_file_length is now set with tmp_disk_table_size - Fixed that Aria doesn't reset max_data_file_length for internal tables - Added status flag if table is full so that we can detect this on next insert. This ensures that the table is always 'correct', but we get the error one row after the row that grow the table too big. - Removed some mutex lock for internal temporary tables
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r--sql/sql_select.cc19
1 files changed, 7 insertions, 12 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 6c56f0115ca..540271bc961 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -16703,7 +16703,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (blob_count || using_unique_constraint
|| (thd->variables.big_tables && !(select_options & SELECT_SMALL_RESULT))
|| (select_options & TMP_TABLE_FORCE_MYISAM)
- || thd->variables.tmp_table_size == 0)
+ || thd->variables.tmp_memory_table_size == 0)
{
share->db_plugin= ha_lock_engine(0, TMP_ENGINE_HTON);
table->file= get_new_handler(share, &table->mem_root,
@@ -16867,14 +16867,14 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
param->recinfo= recinfo; // Pointer to after last field
store_record(table,s->default_values); // Make empty default record
- if (thd->variables.tmp_table_size == ~ (ulonglong) 0) // No limit
+ if (thd->variables.tmp_memory_table_size == ~ (ulonglong) 0) // No limit
share->max_rows= ~(ha_rows) 0;
else
share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ?
- MY_MIN(thd->variables.tmp_table_size,
+ MY_MIN(thd->variables.tmp_memory_table_size,
thd->variables.max_heap_table_size) :
- thd->variables.tmp_table_size) /
- share->reclength);
+ thd->variables.tmp_memory_table_size) /
+ share->reclength);
set_if_bigger(share->max_rows,1); // For dummy start options
/*
Push the LIMIT clause to the temporary table creation, so that we
@@ -17412,10 +17412,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
}
bzero((char*) &create_info,sizeof(create_info));
-
- /* Use long data format, to ensure we never get a 'table is full' error */
- if (!(options & SELECT_SMALL_RESULT))
- create_info.data_file_length= ~(ulonglong) 0;
+ create_info.data_file_length= table->in_use->variables.tmp_disk_table_size;
/*
The logic for choosing the record format:
@@ -17611,9 +17608,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
MI_CREATE_INFO create_info;
bzero((char*) &create_info,sizeof(create_info));
-
- if (!(options & SELECT_SMALL_RESULT))
- create_info.data_file_length= ~(ulonglong) 0;
+ create_info.data_file_length= table->in_use->variables.tmp_disk_table_size;
if ((error=mi_create(share->table_name.str, share->keys, &keydef,
(uint) (*recinfo-start_recinfo),