summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorIgor Babaev <igor@askmonty.org>2012-12-20 10:58:40 -0800
committerIgor Babaev <igor@askmonty.org>2012-12-20 10:58:40 -0800
commitca2cdaad86750509764256ff8086e031b4870b24 (patch)
treebda13b02b556072c72a247b2c6f13f2ec9722ffe /sql
parent6c3de76ad5cb8683ab8b049e0bbba670115d304a (diff)
downloadmariadb-git-ca2cdaad86750509764256ff8086e031b4870b24.tar.gz
The patch for the task mdev-539.
The patch lifts the limitation of the current implementation of ALTER TABLE that does not allow to build unique/primary indexes by sort for MyISAM and Aria engines.
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_partition.cc3
-rw-r--r--sql/ha_partition.h2
-rw-r--r--sql/handler.h6
-rw-r--r--sql/sql_table.cc3
4 files changed, 8 insertions, 6 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 463a1678449..b87192db69f 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -3809,6 +3809,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
SYNOPSIS
start_bulk_insert()
rows Number of rows to insert
+ flags Flags to control index creation
RETURN VALUE
NONE
@@ -3816,7 +3817,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
DESCRIPTION
rows == 0 means we will probably insert many rows
*/
-void ha_partition::start_bulk_insert(ha_rows rows)
+void ha_partition::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_partition::start_bulk_insert");
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 71408324c1b..fff586f52e4 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -372,7 +372,7 @@ public:
virtual int delete_row(const uchar * buf);
virtual int delete_all_rows(void);
virtual int truncate();
- virtual void start_bulk_insert(ha_rows rows);
+ virtual void start_bulk_insert(ha_rows rows, uint flags);
virtual int end_bulk_insert();
private:
ha_rows guess_bulk_insert_rows();
diff --git a/sql/handler.h b/sql/handler.h
index a7141246993..f1f2ac6469f 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1952,11 +1952,11 @@ public:
/** to be actually called to get 'check()' functionality*/
int ha_check(THD *thd, HA_CHECK_OPT *check_opt);
int ha_repair(THD* thd, HA_CHECK_OPT* check_opt);
- void ha_start_bulk_insert(ha_rows rows)
+ void ha_start_bulk_insert(ha_rows rows, uint flags= 0)
{
DBUG_ENTER("handler::ha_start_bulk_insert");
estimation_rows_to_insert= rows;
- start_bulk_insert(rows);
+ start_bulk_insert(rows, flags);
DBUG_VOID_RETURN;
}
int ha_end_bulk_insert()
@@ -2861,7 +2861,7 @@ private:
DBUG_ASSERT(!(ha_table_flags() & HA_CAN_REPAIR));
return HA_ADMIN_NOT_IMPLEMENTED;
}
- virtual void start_bulk_insert(ha_rows rows) {}
+ virtual void start_bulk_insert(ha_rows rows, uint flags) {}
virtual int end_bulk_insert() { return 0; }
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
enum ha_rkey_function find_flag)
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 72bc3b6dc69..ea82937aef1 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -7351,7 +7351,8 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
MODE_STRICT_ALL_TABLES));
from->file->info(HA_STATUS_VARIABLE);
- to->file->ha_start_bulk_insert(from->file->stats.records);
+ to->file->ha_start_bulk_insert(from->file->stats.records,
+ ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT);
errpos= 3;
copy_end=copy;