summaryrefslogtreecommitdiff
path: root/sql/sql_table.cc
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2018-05-19 16:30:36 +0200
committerSergei Golubchik <serg@mariadb.org>2018-05-19 22:05:55 +0200
commit91dfb6141f45aed5cf3fe585d8c5db86f9ddbfe9 (patch)
tree2a21bd15ac2547c12f0f60a8a5f680f0c996cd9b /sql/sql_table.cc
parentf76a17e355f4ee031f3cabbbd679643d85347834 (diff)
parentc1b5d2801e81443c8ad02823e12d54f662e4c801 (diff)
downloadmariadb-git-91dfb6141f45aed5cf3fe585d8c5db86f9ddbfe9.tar.gz
Merge branch '10.0' into 10.1
Diffstat (limited to 'sql/sql_table.cc')
-rw-r--r--sql/sql_table.cc32
1 files changed, 24 insertions, 8 deletions
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index ed11fa6ec60..72488fc870f 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -9414,9 +9414,7 @@ bool mysql_trans_prepare_alter_copy_data(THD *thd)
This needs to be done before external_lock.
*/
- if (ha_enable_transaction(thd, FALSE))
- DBUG_RETURN(TRUE);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(ha_enable_transaction(thd, FALSE) != 0);
}
@@ -9472,6 +9470,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
ha_rows examined_rows;
ha_rows found_rows;
bool auto_increment_field_copied= 0;
+ bool cleanup_done= 0;
ulonglong save_sql_mode= thd->variables.sql_mode;
ulonglong prev_insert_id, time_to_report_progress;
Field **dfield_ptr= to->default_field;
@@ -9480,15 +9479,23 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
/* Two or 3 stages; Sorting, copying data and update indexes */
thd_progress_init(thd, 2 + MY_TEST(order));
- if (mysql_trans_prepare_alter_copy_data(thd))
- DBUG_RETURN(-1);
-
if (!(copy= new Copy_field[to->s->fields]))
DBUG_RETURN(-1); /* purecov: inspected */
+ if (mysql_trans_prepare_alter_copy_data(thd))
+ {
+ delete [] copy;
+ DBUG_RETURN(-1);
+ }
+
/* We need external lock before we can disable/enable keys */
if (to->file->ha_external_lock(thd, F_WRLCK))
+ {
+ /* Undo call to mysql_trans_prepare_alter_copy_data() */
+ ha_enable_transaction(thd, TRUE);
+ delete [] copy;
DBUG_RETURN(-1);
+ }
alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff);
@@ -9498,7 +9505,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
from->file->info(HA_STATUS_VARIABLE);
to->file->ha_start_bulk_insert(from->file->stats.records,
ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT);
-
List_iterator<Create_field> it(create);
Create_field *def;
copy_end=copy;
@@ -9702,7 +9708,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
}
end_read_record(&info);
free_io_cache(from);
- delete [] copy;
THD_STAGE_INFO(thd, stage_enabling_keys);
thd_progress_next_stage(thd);
@@ -9717,6 +9722,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
to->file->print_error(my_errno,MYF(0));
error= 1;
}
+ cleanup_done= 1;
to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
if (mysql_trans_commit_alter_copy_data(thd))
@@ -9728,6 +9734,16 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
*copied= found_count;
*deleted=delete_count;
to->file->ha_release_auto_increment();
+ delete [] copy;
+
+ if (!cleanup_done)
+ {
+ /* This happens if we get an error during initialzation of data */
+ DBUG_ASSERT(error);
+ to->file->ha_end_bulk_insert();
+ ha_enable_transaction(thd, TRUE);
+ }
+
if (to->file->ha_external_lock(thd,F_UNLCK))
error=1;
if (error < 0 && to->file->extra(HA_EXTRA_PREPARE_FOR_RENAME))