summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/thr_lock.h4
-rw-r--r--mysql-test/include/partition_1.inc747
-rw-r--r--mysql-test/include/partition_10.inc73
-rw-r--r--mysql-test/include/partition_11.inc34
-rw-r--r--mysql-test/include/partition_12.inc65
-rw-r--r--mysql-test/include/partition_layout.inc13
-rw-r--r--mysql-test/r/ndb_autodiscover.result2
-rw-r--r--mysql-test/r/ndb_bitfield.result2
-rw-r--r--mysql-test/r/ndb_gis.result4
-rw-r--r--mysql-test/r/ndb_partition_key.result9
-rw-r--r--mysql-test/r/partition.result129
-rw-r--r--mysql-test/r/partition_02myisam.result1724
-rw-r--r--mysql-test/r/partition_03ndb.result1361
-rw-r--r--mysql-test/r/partition_error.result5
-rw-r--r--mysql-test/r/partition_mgm_err.result41
-rw-r--r--mysql-test/t/disabled.def1
-rw-r--r--mysql-test/t/ndb_partition_key.test16
-rw-r--r--mysql-test/t/partition.test146
-rw-r--r--mysql-test/t/partition_02myisam.test25
-rw-r--r--mysql-test/t/partition_03ndb.test26
-rw-r--r--mysql-test/t/partition_error.test12
-rw-r--r--mysql-test/t/partition_mgm_err.test25
-rw-r--r--mysys/thr_lock.c211
-rw-r--r--sql/ha_archive.cc2
-rw-r--r--sql/ha_berkeley.cc2
-rw-r--r--sql/ha_blackhole.cc2
-rw-r--r--sql/ha_federated.cc2
-rw-r--r--sql/ha_heap.cc2
-rw-r--r--sql/ha_innodb.cc2
-rw-r--r--sql/ha_myisam.cc2
-rw-r--r--sql/ha_myisammrg.cc2
-rw-r--r--sql/ha_ndbcluster.cc554
-rw-r--r--sql/ha_ndbcluster.h20
-rw-r--r--sql/ha_ndbcluster_binlog.cc4
-rw-r--r--sql/ha_partition.cc2861
-rw-r--r--sql/ha_partition.h88
-rw-r--r--sql/handler.cc7
-rw-r--r--sql/handler.h163
-rw-r--r--sql/lex.h3
-rw-r--r--sql/lock.cc20
-rw-r--r--sql/log.cc2
-rw-r--r--sql/mysql_priv.h63
-rw-r--r--sql/share/errmsg.txt74
-rw-r--r--sql/sql_base.cc162
-rw-r--r--sql/sql_lex.h42
-rw-r--r--sql/sql_partition.cc2774
-rw-r--r--sql/sql_show.cc4
-rw-r--r--sql/sql_table.cc965
-rw-r--r--sql/sql_yacc.yy271
-rw-r--r--sql/table.cc48
-rw-r--r--sql/table.h2
-rw-r--r--sql/unireg.cc21
-rw-r--r--storage/csv/ha_tina.cc2
-rw-r--r--storage/example/ha_example.cc2
-rw-r--r--storage/ndb/include/kernel/ndb_limits.h1
-rw-r--r--storage/ndb/include/kernel/signaldata/AlterTable.hpp64
-rw-r--r--storage/ndb/include/kernel/signaldata/DiAddTab.hpp1
-rw-r--r--storage/ndb/include/kernel/signaldata/DictTabInfo.hpp25
-rw-r--r--storage/ndb/include/kernel/signaldata/LqhFrag.hpp1
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp70
-rw-r--r--storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp29
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp181
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp25
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp3
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp5
-rw-r--r--storage/ndb/src/ndbapi/NdbBlob.cpp7
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionary.cpp102
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp580
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp29
-rw-r--r--storage/ndb/test/ndbapi/test_event.cpp2
-rw-r--r--storage/ndb/tools/restore/Restore.cpp2
71 files changed, 11870 insertions, 2100 deletions
diff --git a/include/thr_lock.h b/include/thr_lock.h
index 251d8e7c9cf..c3a7909175f 100644
--- a/include/thr_lock.h
+++ b/include/thr_lock.h
@@ -143,10 +143,12 @@ void thr_unlock(THR_LOCK_DATA *data);
enum enum_thr_lock_result thr_multi_lock(THR_LOCK_DATA **data,
uint count, THR_LOCK_OWNER *owner);
void thr_multi_unlock(THR_LOCK_DATA **data,uint count);
-void thr_abort_locks(THR_LOCK *lock);
+void thr_abort_locks(THR_LOCK *lock, bool upgrade_lock);
my_bool thr_abort_locks_for_thread(THR_LOCK *lock, pthread_t thread);
void thr_print_locks(void); /* For debugging */
my_bool thr_upgrade_write_delay_lock(THR_LOCK_DATA *data);
+void thr_downgrade_write_lock(THR_LOCK_DATA *data,
+ enum thr_lock_type new_lock_type);
my_bool thr_reschedule_write_lock(THR_LOCK_DATA *data);
#ifdef __cplusplus
}
diff --git a/mysql-test/include/partition_1.inc b/mysql-test/include/partition_1.inc
new file mode 100644
index 00000000000..a6a14fde4c6
--- /dev/null
+++ b/mysql-test/include/partition_1.inc
@@ -0,0 +1,747 @@
+# include/partition_1.inc
+#
+# Partitionong tests
+#
+# Attention: The variable
+# $engine -- Storage engine to be tested.
+# must be set within the script sourcing this file.
+#
+--disable_abort_on_error
+SET AUTOCOMMIT= 1;
+
+##### Disabled testcases, because of open bugs #####
+--echo
+--echo #------------------------------------------------------------------------
+--echo # There are several testcases disabled because ouf the open bugs
+--echo # #15407 , #15408 , #15890 , #15961 , #13447 , #15966 , #15968, #16370
+--echo #------------------------------------------------------------------------
+# Bug#15407 Partitions: crash if subpartition
+let $fixed_bug15407= 0;
+# Bug#15408 Partitions: subpartition names are not unique
+let $fixed_bug15408= 0;
+# Bug#15890 Partitions: Strange interpretation of partition number
+let $fixed_bug15890= 0;
+# Bug#15961 Partitions: Creation of subpart. table without subpart. rule not rejected
+let $fixed_bug15961= 0;
+# Bug#13447 Partitions: crash with alter table
+let $fixed_bug13447= 0;
+# Bug#15966 Partitions: crash if session default engine <> engine used in create table
+let $fixed_bug15966= 0;
+# Bug#15968 Partitions: crash when INSERT with f1 = -1 into PARTITION BY HASH(f1)
+let $fixed_bug15968= 0;
+# Bug #16370 Partitions: subpartitions names not mentioned in SHOW CREATE TABLE output
+let $fixed_bug16370= 0;
+
+##### Option, for displaying files #####
+#
+# Attention: Displaying the directory content via "ls var/master-data/test/t*"
+# is probably not portable.
+# let $ls= 0; disables the execution of "ls ....."
+let $ls= 0;
+
+################################################################################
+# Partitioning syntax
+#
+# CREATE TABLE .... (column-list ..)
+# PARTITION BY
+# KEY '(' ( column-list ) ')'
+# | RANGE '(' ( expr ) ')'
+# | LIST '(' ( expr ) ')'
+# | HASH '(' ( expr ) ')'
+# [PARTITIONS num ]
+# [SUBPARTITION BY
+# KEY '(' ( column-list ) ')'
+# | HASH '(' ( expr ) ')'
+# [SUBPARTITIONS num ]
+# ]
+# [ '('
+# ( PARTITION logical-name
+# [ VALUES LESS THAN '(' ( expr | MAX_VALUE ) ')' ]
+# [ VALUES IN '(' (expr)+ ')' ]
+# [ TABLESPACE tablespace-name ]
+# [ [ STORAGE ] ENGINE [ '=' ] storage-engine-name ]
+# [ NODEGROUP nodegroup-id ]
+# [ '('
+# ( SUBPARTITION logical-name
+# [ TABLESPACE tablespace-name ]
+# [ STORAGE ENGINE = storage-engine-name ]
+# [ NODEGROUP nodegroup-id ]
+# )+
+# ')'
+# )+
+# ')'
+# ]
+################################################################################
+
+--echo
+--echo #------------------------------------------------------------------------
+--echo # 0. Setting of auxiliary variables + Creation of an auxiliary table
+--echo # needed in all testcases
+--echo #------------------------------------------------------------------------
+let $max_row= `SELECT @max_row`;
+let $max_row_div2= `SELECT @max_row DIV 2`;
+let $max_row_div3= `SELECT @max_row DIV 3`;
+let $max_row_div4= `SELECT @max_row DIV 4`;
+let $max_int_4= 2147483647;
+--disable_warnings
+DROP TABLE IF EXISTS t0_template;
+--enable_warnings
+CREATE TABLE t0_template ( f1 INTEGER, f2 char(20), PRIMARY KEY(f1))
+ENGINE = MEMORY;
+--echo # Logging of <max_row> INSERTs into t0_template suppressed
+--disable_query_log
+let $num= $max_row;
+while ($num)
+{
+ eval INSERT INTO t0_template SET f1 = $num, f2 = '---$num---';
+
+ dec $num;
+}
+--enable_query_log
+
+--echo
+--echo #------------------------------------------------------------------------
+--echo # 1. Some syntax checks
+--echo #------------------------------------------------------------------------
+--echo # 1.1 Subpartioned table without subpartitioning rule must be rejected
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+if ($fixed_bug15961)
+{
+# Bug#15961 Partitions: Creation of subpart. table without subpart. rule not rejected
+--error 9999
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+( PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11));
+}
+--echo # FIXME Implement testcases, where it is checked that all create and
+--echo # alter table statements
+--echo # - with missing mandatory parameters are rejected
+--echo # - with optional parameters are accepted
+--echo # - with wrong combinations of optional parameters are rejected
+--echo # - ............
+
+--echo
+--echo #------------------------------------------------------------------------
+--echo # 2. Checks where the engine is assigned on all supported (CREATE TABLE
+--echo # statement) positions + basic operations on the tables
+--echo # Storage engine mixups are currently (2005-12-23) not supported
+--echo #------------------------------------------------------------------------
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--echo # 2.1 non partitioned table (for comparison)
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine;
+# MLML Full size (as check of check routine)
+--source include/partition_10.inc
+DROP TABLE t1;
+#
+--echo # 2.2 Assignment of storage engine just after column list only
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine
+ PARTITION BY HASH(f1) PARTITIONS 2;
+--source include/partition_10.inc
+DROP TABLE t1;
+#
+--echo # 2.3 Assignment of storage engine just after partition or subpartition
+--echo # name only
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY HASH(f1)
+ ( PARTITION part1 STORAGE ENGINE = $engine,
+ PARTITION part2 STORAGE ENGINE = $engine
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY RANGE(f1)
+ SUBPARTITION BY HASH(f1)
+ ( PARTITION part1 VALUES LESS THAN ($max_row_div2)
+ (SUBPARTITION subpart11 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart12 STORAGE ENGINE = $engine),
+ PARTITION part2 VALUES LESS THAN ($max_int_4)
+ (SUBPARTITION subpart21 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart22 STORAGE ENGINE = $engine)
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+#
+--echo # 2.4 Some but not all named partitions or subpartitions get a storage
+--echo # engine assigned
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY HASH(f1)
+ ( PARTITION part1 STORAGE ENGINE = $engine,
+ PARTITION part2
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY HASH(f1)
+ ( PARTITION part1 ,
+ PARTITION part2 STORAGE ENGINE = $engine
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY RANGE(f1)
+ SUBPARTITION BY HASH(f1)
+ ( PARTITION part1 VALUES LESS THAN ($max_row_div2)
+ (SUBPARTITION subpart11,
+ SUBPARTITION subpart12 STORAGE ENGINE = $engine),
+ PARTITION part2 VALUES LESS THAN ($max_int_4)
+ (SUBPARTITION subpart21 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart22 STORAGE ENGINE = $engine)
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY RANGE(f1)
+ SUBPARTITION BY HASH(f1)
+ ( PARTITION part1 VALUES LESS THAN ($max_row_div2)
+ (SUBPARTITION subpart11 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart12 STORAGE ENGINE = $engine),
+ PARTITION part2 VALUES LESS THAN ($max_int_4)
+ (SUBPARTITION subpart21,
+ SUBPARTITION subpart22 )
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+#
+--echo # 2.5 Storage engine assignment after partition name + after name of
+--echo # subpartitions belonging to another partition
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY RANGE(f1)
+ SUBPARTITION BY HASH(f1)
+ ( PARTITION part1 VALUES LESS THAN ($max_row_div2) ENGINE = $engine
+ (SUBPARTITION subpart11,
+ SUBPARTITION subpart12),
+ PARTITION part2 VALUES LESS THAN ($max_int_4)
+ (SUBPARTITION subpart21 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart22 STORAGE ENGINE = $engine)
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY RANGE(f1)
+ SUBPARTITION BY HASH(f1)
+ ( PARTITION part1 VALUES LESS THAN ($max_row_div2)
+ (SUBPARTITION subpart11 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart12 STORAGE ENGINE = $engine),
+ PARTITION part2 VALUES LESS THAN ($max_int_4) ENGINE = $engine
+ (SUBPARTITION subpart21,
+ SUBPARTITION subpart22)
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+#
+--echo # 2.6 Precedence of storage engine assignments
+--echo # 2.6.1 Storage engine assignment after column list + after partition
+--echo # or subpartition name
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine
+ PARTITION BY HASH(f1)
+ ( PARTITION part1 STORAGE ENGINE = $engine,
+ PARTITION part2 STORAGE ENGINE = $engine
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine
+ PARTITION BY RANGE(f1)
+ SUBPARTITION BY HASH(f1)
+ ( PARTITION part1 VALUES LESS THAN ($max_row_div2)
+ (SUBPARTITION subpart11 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart12 STORAGE ENGINE = $engine),
+ PARTITION part2 VALUES LESS THAN ($max_int_4)
+ (SUBPARTITION subpart21 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart22 STORAGE ENGINE = $engine)
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+--echo # 2.6.2 Storage engine assignment after partition name + after
+--echo # subpartition name
+# in partition part + in sub partition part
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY RANGE(f1)
+ SUBPARTITION BY HASH(f1)
+ ( PARTITION part1 VALUES LESS THAN ($max_row_div2) STORAGE ENGINE = $engine
+ (SUBPARTITION subpart11 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart12 STORAGE ENGINE = $engine),
+ PARTITION part2 VALUES LESS THAN ($max_int_4)
+ (SUBPARTITION subpart21 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart22 STORAGE ENGINE = $engine)
+ );
+--source include/partition_10.inc
+DROP TABLE t1;
+
+--echo # 2.7 Session default engine differs from engine used within create table
+eval SET SESSION storage_engine=$engine_other;
+if ($fixed_bug15966)
+{
+# Bug#15966 Partitions: crash if session default engine <> engine used in create table
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) ( PARTITION part1 ENGINE = $engine);
+--source include/partition_10.inc
+DROP TABLE t1;
+# Bug#15966 Partitions: crash if session default engine <> engine used in create table
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+ PARTITION BY RANGE(f1)
+ SUBPARTITION BY HASH(f1)
+ ( PARTITION part1 VALUES LESS THAN (1000)
+ (SUBPARTITION subpart11 STORAGE ENGINE = $engine,
+ SUBPARTITION subpart12 STORAGE ENGINE = $engine));
+--source include/partition_10.inc
+DROP TABLE t1;
+}
+eval SET SESSION storage_engine=$engine;
+
+
+--echo
+--echo #------------------------------------------------------------------------
+--echo # 3. Check assigning the number of partitions and subpartitions
+--echo # with and without named partitions/subpartitions
+--echo #------------------------------------------------------------------------
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+--echo # 3.1 (positive) without partition/subpartition number assignment
+--echo # 3.1.1 no partition number, no named partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1);
+--source include/partition_10.inc
+DROP TABLE t1;
+--echo # 3.1.2 no partition number, named partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2);
+--source include/partition_10.inc
+DROP TABLE t1;
+# Attention: Several combinations are impossible
+# If subpartitioning exists
+# - partitioning algorithm must be RANGE or LIST
+# This implies the assignment of named partitions.
+# - subpartitioning algorithm must be HASH or KEY
+--echo # 3.1.3 variations on no partition/subpartition number, named partitions,
+--echo # different subpartitions are/are not named
+#
+# Partition name -- "properties"
+# part1 -- first/non last
+# part2 -- non first/non last
+# part3 -- non first/ last
+#
+# Testpattern:
+# named subpartitions in
+# Partition part1 part2 part3
+# N N N
+# N N Y
+# N Y N
+# N Y Y
+# Y N N
+# Y N Y
+# Y Y N
+# Y Y Y
+--disable_query_log
+let $part0= CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1);
+#
+eval SET @aux = '(PARTITION part1 VALUES LESS THAN ($max_row_div2),';
+let $part1_N= `SELECT @AUX`;
+eval SET @aux = '(PARTITION part1 VALUES LESS THAN ($max_row_div2)
+(SUBPARTITION subpart11 , SUBPARTITION subpart12 ),';
+let $part1_Y= `SELECT @AUX`;
+#
+eval SET @aux = 'PARTITION part2 VALUES LESS THAN ($max_row),';
+let $part2_N= `SELECT @AUX`;
+eval SET @aux = 'PARTITION part2 VALUES LESS THAN ($max_row)
+(SUBPARTITION subpart21 , SUBPARTITION subpart22 ),';
+let $part2_Y= `SELECT @AUX`;
+#
+eval SET @aux = 'PARTITION part3 VALUES LESS THAN ($max_int_4))';
+let $part3_N= `SELECT @AUX`;
+eval SET @aux = 'PARTITION part3 VALUES LESS THAN ($max_int_4)
+(SUBPARTITION subpart31 , SUBPARTITION subpart32 ))';
+let $part3_Y= `SELECT @AUX`;
+--enable_query_log
+
+eval $part0 $part1_N $part2_N $part3_N ;
+DROP TABLE t1;
+# Bug#15407 Partitions: crash if subpartition
+if ($fixed_bug15407)
+{
+eval $part0 $part1_N $part2_N $part3_Y ;
+--source include/partition_10.inc
+DROP TABLE t1;
+eval $part0 $part1_N $part2_Y $part3_N ;
+--source include/partition_10.inc
+DROP TABLE t1;
+eval $part0 $part1_N $part2_Y $part3_Y ;
+--source include/partition_10.inc
+DROP TABLE t1;
+eval $part0 $part1_Y $part2_N $part3_N ;
+--source include/partition_10.inc
+DROP TABLE t1;
+eval $part0 $part1_Y $part2_N $part3_Y ;
+--source include/partition_10.inc
+DROP TABLE t1;
+eval $part0 $part1_Y $part2_Y $part3_N ;
+--source include/partition_10.inc
+DROP TABLE t1;
+}
+eval $part0 $part1_Y $part2_Y $part3_Y ;
+--source include/partition_10.inc
+DROP TABLE t1;
+
+--echo # 3.2 partition/subpartition numbers good and bad values and notations
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+--echo # 3.2.1 partition/subpartition numbers INTEGER notation
+# ML: "positive/negative" is my private judgement. It need no to correspond
+# with the server response.
+# (positive) number = 2
+let $part_number= 2;
+--source include/partition_11.inc
+# (positive) special case number = 1
+let $part_number= 1;
+--source include/partition_11.inc
+# (negative) 0 is non sense
+let $part_number= 0;
+--source include/partition_11.inc
+# (negative) -1 is non sense
+let $part_number= -1;
+--source include/partition_11.inc
+# (negative) 1000000 is too huge
+let $part_number= 1000000;
+--source include/partition_11.inc
+
+if ($fixed_bug15890)
+{
+--echo # 3.2.2 partition/subpartition numbers DECIMAL notation
+# (positive) number = 2.0
+let $part_number= 2.0;
+--source include/partition_11.inc
+# (negative) -2.0 is non sense
+let $part_number= -2.0;
+--source include/partition_11.inc
+# (negative) case number = 0.0 is non sense
+let $part_number= 0.0;
+--source include/partition_11.inc
+# Bug#15890 Partitions: Strange interpretation of partition number
+# (negative) number = 1.5 is non sense
+let $part_number= 1.5;
+--source include/partition_11.inc
+# (negative) number is too huge
+let $part_number= 999999999999999999999999999999.999999999999999999999999999999;
+--source include/partition_11.inc
+# (negative) number is nearly zero
+let $part_number= 0.000000000000000000000000000001;
+--source include/partition_11.inc
+
+--echo # 3.2.3 partition/subpartition numbers FLOAT notation
+##### FLOAT notation
+# (positive) number = 2.0E+0
+let $part_number= 2.0E+0;
+--source include/partition_11.inc
+# Bug#15890 Partitions: Strange interpretation of partition number
+# (positive) number = 0.2E+1
+let $part_number= 0.2E+1;
+--source include/partition_11.inc
+# (negative) -2.0E+0 is non sense
+let $part_number= -2.0E+0;
+--source include/partition_11.inc
+# (negative) 0.15E+1 is non sense
+let $part_number= 0.15E+1;
+--source include/partition_11.inc
+# (negative) 0.0E+300 is zero
+let $part_number= 0.0E+300;
+--source include/partition_11.inc
+# Bug#15890 Partitions: Strange interpretation of partition number
+# (negative) 1E+300 is too huge
+let $part_number= 1E+300;
+--source include/partition_11.inc
+# (negative) 1E-300 is nearly zero
+let $part_number= 1E-300;
+--source include/partition_11.inc
+}
+
+--echo # 3.2.4 partition/subpartition numbers STRING notation
+##### STRING notation
+# (negative?) case number = '2'
+let $part_number= '2';
+--source include/partition_11.inc
+# (negative?) case number = '2.0'
+let $part_number= '2.0';
+--source include/partition_11.inc
+# (negative?) case number = '0.2E+1'
+let $part_number= '0.2E+1';
+--source include/partition_11.inc
+# (negative) Strings starts with digit, but 'A' follows
+let $part_number= '2A';
+--source include/partition_11.inc
+# (negative) Strings starts with 'A', but digit follows
+let $part_number= 'A2';
+--source include/partition_11.inc
+# (negative) empty string
+let $part_number= '';
+--source include/partition_11.inc
+# (negative) string without any digits
+let $part_number= 'GARBAGE';
+--source include/partition_11.inc
+
+--echo # 3.2.5 partition/subpartition numbers other notations
+# (negative) Strings starts with digit, but 'A' follows
+let $part_number= 2A;
+--source include/partition_11.inc
+# (negative) Strings starts with 'A', but digit follows
+let $part_number= A2;
+--source include/partition_11.inc
+# (negative) string without any digits
+let $part_number= GARBAGE;
+--source include/partition_11.inc
+
+# (negative?) double quotes
+let $part_number= "2";
+--source include/partition_11.inc
+# (negative) Strings starts with digit, but 'A' follows
+let $part_number= "2A";
+--source include/partition_11.inc
+# (negative) Strings starts with 'A', but digit follows
+let $part_number= "A2";
+--source include/partition_11.inc
+# (negative) string without any digits
+let $part_number= "GARBAGE";
+--source include/partition_11.inc
+
+--echo # 3.3 Mixups of assigned partition/subpartition numbers and names
+--echo # 3.3.1 (positive) number of partition/subpartition
+--echo # = number of named partition/subpartition
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1, PARTITION part2 ) ;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) PARTITIONS 2
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+ (SUBPARTITION subpart11, SUBPARTITION subpart12),
+ PARTITION part2 VALUES LESS THAN (2147483647)
+ (SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+--source include/partition_layout.inc
+DROP TABLE t1;
+--echo # 3.3.2 (positive) number of partition/subpartition ,
+--echo # 0 (= no) named partition/subpartition
+--echo # already checked above
+--echo # 3.3.3 (negative) number of partitions/subpartitions
+--echo # > number of named partitions/subpartitions
+--error 1064
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1 ) ;
+# Wrong number of named subpartitions in first partition
+--error 1064
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+ (SUBPARTITION subpart11 ),
+ PARTITION part2 VALUES LESS THAN (2147483647)
+ (SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+# Wrong number of named subpartitions in non first/non last partition
+--error 1064
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+ (SUBPARTITION subpart11, SUBPARTITION subpart12),
+ PARTITION part2 VALUES LESS THAN (2000)
+ (SUBPARTITION subpart21 ),
+ PARTITION part3 VALUES LESS THAN (2147483647)
+ (SUBPARTITION subpart31, SUBPARTITION subpart32)
+);
+# Wrong number of named subpartitions in last partition
+--error 1064
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) PARTITIONS 2
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+ (SUBPARTITION subpart11, SUBPARTITION subpart12),
+ PARTITION part2 VALUES LESS THAN (2147483647)
+ (SUBPARTITION subpart21 )
+);
+--echo # 3.3.4 (negative) number of partitions < number of named partitions
+--error 1064
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 1 ( PARTITION part1, PARTITION part2 ) ;
+# Wrong number of named subpartitions in first partition
+--error 1064
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 1
+( PARTITION part1 VALUES LESS THAN (1000)
+ (SUBPARTITION subpart11, SUBPARTITION subpart12),
+ PARTITION part2 VALUES LESS THAN (2147483647)
+ (SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+# Wrong number of named subpartitions in non first/non last partition
+--error 1064
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 1
+( PARTITION part1 VALUES LESS THAN (1000)
+ (SUBPARTITION subpart11, SUBPARTITION subpart12),
+ PARTITION part2 VALUES LESS THAN (2000)
+ (SUBPARTITION subpart21 ),
+ PARTITION part3 VALUES LESS THAN (2147483647)
+ (SUBPARTITION subpart31, SUBPARTITION subpart32)
+);
+# Wrong number of named subpartitions in last partition
+--error 1064
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 1
+( PARTITION part1 VALUES LESS THAN (1000)
+ (SUBPARTITION subpart11, SUBPARTITION subpart12),
+ PARTITION part2 VALUES LESS THAN (2147483647)
+ (SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+
+
+--echo
+--echo #------------------------------------------------------------------------
+--echo # 4. Checks of logical partition/subpartition name
+--echo # file name clashes during CREATE TABLE
+--echo #------------------------------------------------------------------------
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--echo # 4.1 (negative) A partition name used more than once
+--error ER_SAME_NAME_PARTITION
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part1);
+#
+if ($fixed_bug15408)
+{
+# Bug#15408 Partitions: subpartition names are not unique
+--error ER_SAME_NAME_PARTITION
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (1000)
+ (SUBPARTITION subpart11, SUBPARTITION subpart11)
+);
+}
+--echo # FIXME Implement testcases with filename problems
+--echo # existing file of other table --- partition/subpartition file name
+--echo # partition/subpartition file name --- file of the same table
+
+--echo
+--echo #------------------------------------------------------------------------
+--echo # 5. Alter table experiments
+--echo #------------------------------------------------------------------------
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+--echo # 5.1 alter table add partition
+--echo # 5.1.1 (negative) add partition to non partitioned table
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20));
+--source include/partition_layout.inc
+# MyISAM gets ER_PARTITION_MGMT_ON_NONPARTITIONED and NDB 1005
+# The error code of NDB differs, because all NDB tables are partitioned even
+# if the CREATE TABLE does not contain a partitioning clause.
+--error ER_PARTITION_MGMT_ON_NONPARTITIONED,1005
+ALTER TABLE t1 ADD PARTITION (PARTITION part1);
+--source include/partition_layout.inc
+DROP TABLE t1;
+
+--echo # 5.1.2 Add one partition to a table with one partition
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1);
+--source include/partition_layout.inc
+eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
+--disable_query_log
+eval SELECT $engine = 'NDB' INTO @aux;
+let $my_exit= `SELECT @aux`;
+if ($my_exit)
+{
+ exit;
+}
+--enable_query_log
+ALTER TABLE t1 ADD PARTITION (PARTITION part1);
+--source include/partition_12.inc
+DROP TABLE t1;
+
+--echo # 5.1.3 Several times add one partition to a table with some partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3);
+--source include/partition_layout.inc
+eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
+# Partition name before first existing partition name
+ALTER TABLE t1 ADD PARTITION (PARTITION part0);
+--source include/partition_12.inc
+DELETE FROM t1;
+eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
+# Partition name between existing partition names
+ALTER TABLE t1 ADD PARTITION (PARTITION part2);
+--source include/partition_12.inc
+DELETE FROM t1;
+eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
+if ($fixed_bug13447)
+{
+# Partition name after all existing partition names
+# Bug#13447 Partitions: crash with alter table
+ALTER TABLE t1 ADD PARTITION (PARTITION part4);
+}
+--source include/partition_12.inc
+DROP TABLE t1;
+
+--echo # 5.1.4 Add several partitions to a table with some partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3);
+--source include/partition_layout.inc
+eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
+if ($fixed_bug13447)
+{
+# Bug#13447 Partitions: crash with alter table
+ALTER TABLE t1 ADD PARTITION (PARTITION part0, PARTITION part2, PARTITION part4);
+}
+--source include/partition_12.inc
+DROP TABLE t1;
+
+--echo # 5.1.5 (negative) Add partitions to a table with some partitions
+--echo # clash on new and already existing partition names
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2, PARTITION part3);
+# Clash on first/non last partition name
+--error ER_SAME_NAME_PARTITION
+ALTER TABLE t1 ADD PARTITION (PARTITION part1);
+# Clash on non first/non last partition name
+--error ER_SAME_NAME_PARTITION
+ALTER TABLE t1 ADD PARTITION (PARTITION part2);
+# Clash on non first/last partition name
+--error ER_SAME_NAME_PARTITION
+ALTER TABLE t1 ADD PARTITION (PARTITION part3);
+# Clash on all partition names
+--error ER_SAME_NAME_PARTITION
+ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part2, PARTITION part3);
+DROP TABLE t1;
+
+# FIXME Is there any way to add a subpartition to an already existing partition
+
+--echo # 5.2 alter table add subpartition
+--echo # 5.2.1 Add one subpartition to a table with subpartitioning rule and
+--echo # no explicit defined subpartitions
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+(PARTITION part1 VALUES LESS THAN ($max_row_div2));
+if ($fixed_bug16370)
+{
+--source include/partition_layout.inc
+}
+eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1;
+eval ALTER TABLE t1 ADD PARTITION (PARTITION part2 VALUES LESS THAN ($max_int_4)
+ (SUBPARTITION subpart21));
+if ($fixed_bug16370)
+{
+--source include/partition_12.inc
+}
+DROP TABLE t1;
diff --git a/mysql-test/include/partition_10.inc b/mysql-test/include/partition_10.inc
new file mode 100644
index 00000000000..74b0fdf7f6a
--- /dev/null
+++ b/mysql-test/include/partition_10.inc
@@ -0,0 +1,73 @@
+# include/partition_10.inc
+#
+# Do some basic checks on a table.
+#
+# FIXME: Do not write the statements and results, if SQL return code = 0
+# and result set like expected. Write a message, that all is like
+# expected instead.
+#
+# All SELECTs are so written, that we get my_value = 1, when everything
+# is like expected.
+#
+
+--source include/partition_layout.inc
+
+####### Variations with multiple records
+# Select on empty table
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+# (mass) Insert of $max_row records
+eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row;
+# Select
+eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row)
+ AS my_value FROM t1;
+# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
+# (mass) Update $max_row_div4 * 2 + 1 records
+eval UPDATE t1 SET f1 = f1 + $max_row
+WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 AND $max_row_div2 + $max_row_div4;
+# Select
+eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row_div2 + $max_row_div4 + $max_row )
+ AS my_value FROM t1;
+# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
+# (mass) Delete $max_row_div4 * 2 + 1 records
+eval DELETE FROM t1
+WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 + $max_row AND $max_row_div2 + $max_row_div4 + $max_row;
+# Select
+eval SELECT (COUNT(*) = $max_row - $max_row_div4 - $max_row_div4 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row)
+ AS my_value FROM t1;
+# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
+
+####### Variations with single records
+# Insert one record at beginning
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+# Select this record
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+# Insert one record at end
+eval INSERT INTO t1 SET f1 = $max_row + 1, f2 = '#######';
+# Select this record
+eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 1 AND f2 = '#######';
+# Update one record
+eval UPDATE t1 SET f1 = $max_row + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+# Select
+eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ';
+if ($fixed_bug15968)
+{
+# Bug #15968: Partitions: crash when INSERT with f1 = -1 into PARTITION BY HASH(f1)
+eval UPDATE t1 SET f1 = 0 - 1, f2 = 'ZZZZZZZ'
+ WHERE f1 = $max_row + 1 AND f2 = '#######';
+# Select
+SELECT COUNT(*) AS my_value FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ';
+}
+# Delete
+eval DELETE FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ';
+if ($fixed_bug15968)
+{
+DELETE FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ';
+}
+# Select
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+
+# Truncate
+TRUNCATE t1;
+# Select on empty table
+SELECT COUNT(*) = 0 AS my_value FROM t1;
diff --git a/mysql-test/include/partition_11.inc b/mysql-test/include/partition_11.inc
new file mode 100644
index 00000000000..7ed4d882aa0
--- /dev/null
+++ b/mysql-test/include/partition_11.inc
@@ -0,0 +1,34 @@
+# include/partition_11.inc
+#
+# Try to create a table with the given partition number
+#
+
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS $part_number;
+--disable_query_log
+eval SET @my_errno= $mysql_errno ;
+let $run= `SELECT @my_errno = 0`;
+--enable_query_log
+#
+# If this operation was successfull, check + drop this table
+if ($run)
+{
+ --source include/partition_10.inc
+ eval DROP TABLE t1;
+}
+#### Try to create a table with the given subpartition number
+eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS $part_number
+(PARTITION part1 VALUES LESS THAN ($max_row_div2), PARTITION part2 VALUES LESS THAN ($max_int_4));
+--disable_query_log
+eval SET @my_errno= $mysql_errno ;
+let $run= `SELECT @my_errno = 0`;
+--enable_query_log
+#
+# If this operation was successfull, check + drop this table
+if ($run)
+{
+ --source include/partition_10.inc
+ eval DROP TABLE t1;
+}
diff --git a/mysql-test/include/partition_12.inc b/mysql-test/include/partition_12.inc
new file mode 100644
index 00000000000..2a5610b82e1
--- /dev/null
+++ b/mysql-test/include/partition_12.inc
@@ -0,0 +1,65 @@
+# include/partition_12.inc
+#
+# Do some basic things on a table, if the SQL command executed just before
+# sourcing this file was successful.
+#
+
+--source include/partition_layout.inc
+
+####### Variations with multiple records
+ # (mass) Insert max_row_div2 + 1 records
+ eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN $max_row_div2 AND $max_row;
+ # Select
+ eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row)
+ AS my_value FROM t1;
+ # DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
+ # (mass) Update $max_row_div4 * 2 + 1 records
+ eval UPDATE t1 SET f1 = f1 + $max_row
+ WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 AND $max_row_div2 + $max_row_div4;
+ # Select
+ eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row_div2 + $max_row_div4 + $max_row )
+ AS my_value FROM t1;
+ # DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
+ # (mass) Delete $max_row_div4 * 2 + 1 records
+ eval DELETE FROM t1
+ WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 + $max_row AND $max_row_div2 + $max_row_div4 + $max_row;
+ # Select
+ eval SELECT (COUNT(*) = $max_row - $max_row_div4 - $max_row_div4 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row)
+ AS my_value FROM t1;
+ # DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1;
+
+####### Variations with single records
+# Insert one record at beginning
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+# Select this record
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+# Insert one record at end
+eval INSERT INTO t1 SET f1 = $max_row + 1, f2 = '#######';
+# Select this record
+eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 1 AND f2 = '#######';
+# Update one record
+eval UPDATE t1 SET f1 = $max_row + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+# Select
+eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ';
+if ($fixed_bug15968)
+{
+# Bug #15968: Partitions: crash when INSERT with f1 = -1 into PARTITION BY HASH(f1)
+eval UPDATE t1 SET f1 = 0 - 1, f2 = 'ZZZZZZZ'
+ WHERE f1 = $max_row + 1 AND f2 = '#######';
+# Select
+SELECT COUNT(*) AS my_value FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ';
+}
+# Delete
+eval DELETE FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ';
+if ($fixed_bug15968)
+{
+DELETE FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ';
+}
+# Select
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+
+# Truncate
+TRUNCATE t1;
+# Select on empty table
+SELECT COUNT(*) = 0 AS my_value FROM t1;
diff --git a/mysql-test/include/partition_layout.inc b/mysql-test/include/partition_layout.inc
new file mode 100644
index 00000000000..0a59c23fafd
--- /dev/null
+++ b/mysql-test/include/partition_layout.inc
@@ -0,0 +1,13 @@
+# include/partition_layout.inc
+#
+# Print partitioning related informations about the table t1
+#
+
+eval SHOW CREATE TABLE t1;
+
+# Optional (most probably issues with separators and case sensitivity)
+# listing of files belonging to the table t1
+if ($ls)
+{
+ --exec ls var/master-data/test/t1*
+}
diff --git a/mysql-test/r/ndb_autodiscover.result b/mysql-test/r/ndb_autodiscover.result
index 813e37e8892..cb85c4ac873 100644
--- a/mysql-test/r/ndb_autodiscover.result
+++ b/mysql-test/r/ndb_autodiscover.result
@@ -110,7 +110,7 @@ t3 CREATE TABLE `t3` (
`id` int(11) NOT NULL,
`name` char(255) default NULL,
PRIMARY KEY (`id`)
-) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
select * from t3;
id name
1 Explorer
diff --git a/mysql-test/r/ndb_bitfield.result b/mysql-test/r/ndb_bitfield.result
index 0b7d09dd9b9..b719a2c220e 100644
--- a/mysql-test/r/ndb_bitfield.result
+++ b/mysql-test/r/ndb_bitfield.result
@@ -9,7 +9,7 @@ t1 CREATE TABLE `t1` (
`pk1` int(11) NOT NULL,
`b` bit(64) default NULL,
PRIMARY KEY (`pk1`)
-) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
insert into t1 values
(0,b'1111111111111111111111111111111111111111111111111111111111111111'),
(1,b'1000000000000000000000000000000000000000000000000000000000000000'),
diff --git a/mysql-test/r/ndb_gis.result b/mysql-test/r/ndb_gis.result
index f49572b893b..5f8eb299093 100644
--- a/mysql-test/r/ndb_gis.result
+++ b/mysql-test/r/ndb_gis.result
@@ -13,7 +13,7 @@ Table Create Table
gis_point CREATE TABLE `gis_point` (
`fid` int(11) default NULL,
`g` point default NULL
-) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
SHOW FIELDS FROM gis_point;
Field Type Null Key Default Extra
fid int(11) YES NULL
@@ -471,7 +471,7 @@ Table Create Table
gis_point CREATE TABLE `gis_point` (
`fid` int(11) default NULL,
`g` point default NULL
-) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
SHOW FIELDS FROM gis_point;
Field Type Null Key Default Extra
fid int(11) YES NULL
diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result
index 415b9d37b4d..3cfbca63c1f 100644
--- a/mysql-test/r/ndb_partition_key.result
+++ b/mysql-test/r/ndb_partition_key.result
@@ -80,3 +80,12 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY USING HASH (`a`,`b`,`c`)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (b)
DROP TABLE t1;
+CREATE TABLE t1 (a int not null primary key)
+PARTITION BY KEY(a)
+(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+drop table t1;
+CREATE TABLE t1 (a int not null primary key);
+ALTER TABLE t1
+PARTITION BY KEY(a)
+(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+drop table t1;
diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result
index 58f02681682..7ceee9c17ff 100644
--- a/mysql-test/r/partition.result
+++ b/mysql-test/r/partition.result
@@ -65,6 +65,8 @@ partitions 3
(partition x1 tablespace ts1,
partition x2 tablespace ts2,
partition x3 tablespace ts3);
+CREATE TABLE t2 LIKE t1;
+drop table t2;
drop table t1;
CREATE TABLE t1 (
a int not null,
@@ -108,6 +110,127 @@ insert into t1 values (3);
insert into t1 values (4);
UNLOCK TABLES;
drop table t1;
+CREATE TABLE t1 (a int, name VARCHAR(50), purchased DATE)
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (3),
+PARTITION p1 VALUES LESS THAN (7),
+PARTITION p2 VALUES LESS THAN (9),
+PARTITION p3 VALUES LESS THAN (11));
+INSERT INTO t1 VALUES
+(1, 'desk organiser', '2003-10-15'),
+(2, 'CD player', '1993-11-05'),
+(3, 'TV set', '1996-03-10'),
+(4, 'bookcase', '1982-01-10'),
+(5, 'exercise bike', '2004-05-09'),
+(6, 'sofa', '1987-06-05'),
+(7, 'popcorn maker', '2001-11-22'),
+(8, 'acquarium', '1992-08-04'),
+(9, 'study desk', '1984-09-16'),
+(10, 'lava lamp', '1998-12-25');
+SELECT * from t1 ORDER BY a;
+a name purchased
+1 desk organiser 2003-10-15
+2 CD player 1993-11-05
+3 TV set 1996-03-10
+4 bookcase 1982-01-10
+5 exercise bike 2004-05-09
+6 sofa 1987-06-05
+7 popcorn maker 2001-11-22
+8 acquarium 1992-08-04
+9 study desk 1984-09-16
+10 lava lamp 1998-12-25
+ALTER TABLE t1 DROP PARTITION p0;
+SELECT * from t1 ORDER BY a;
+a name purchased
+3 TV set 1996-03-10
+4 bookcase 1982-01-10
+5 exercise bike 2004-05-09
+6 sofa 1987-06-05
+7 popcorn maker 2001-11-22
+8 acquarium 1992-08-04
+9 study desk 1984-09-16
+10 lava lamp 1998-12-25
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4,5,6));
+insert into t1 values (1),(2),(3),(4),(5),(6);
+select * from t1;
+a
+1
+2
+3
+4
+5
+6
+truncate t1;
+select * from t1;
+a
+truncate t1;
+select * from t1;
+a
+drop table t1;
+CREATE TABLE t1 (a int, b int, primary key(a,b))
+PARTITION BY KEY(b,a) PARTITIONS 4;
+insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
+select * from t1 where a = 4;
+a b
+4 4
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+PARTITIONS 1
+(PARTITION x1 VALUES IN (1) ENGINE=MEMORY);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) default NULL
+) ENGINE=MEMORY DEFAULT CHARSET=latin1 PARTITION BY LIST (a) (PARTITION x1 VALUES IN (1) ENGINE = MEMORY)
+drop table t1;
+CREATE TABLE t1 (a int, unique(a))
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
+REPLACE t1 SET a = 4;
+ERROR HY000: Table has no partition for value 4
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (2), PARTITION x2 VALUES IN (3));
+insert into t1 values (2), (3);
+insert into t1 values (4);
+ERROR HY000: Table has no partition for value 4
+insert into t1 values (1);
+ERROR HY000: Table has no partition for value 1
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY HASH(a)
+PARTITIONS 5;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (a) PARTITIONS 5
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY RANGE (a)
+(PARTITION x1 VALUES LESS THAN (2));
+insert into t1 values (1);
+update t1 set a = 5;
+ERROR HY000: Table has no partition for value 5
+drop table t1;
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+drop table t1;
+CREATE TABLE `t1` (
+`id` int(11) default NULL
+) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ;
+SELECT * FROM t1;
+id
+drop table t1;
CREATE TABLE `t1` (
`id` int(11) default NULL
) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ;
@@ -119,8 +242,8 @@ create table t1
partition by range (a)
( partition p0 values less than(10),
partition p1 values less than (20),
-partition p2 values less than maxvalue);
-alter table t1 reorganise partition p2 into (partition p2 values less than (30));
+partition p2 values less than (25));
+alter table t1 reorganize partition p2 into (partition p2 values less than (30));
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -139,7 +262,7 @@ PARTITION x6 VALUES LESS THAN (14),
PARTITION x7 VALUES LESS THAN (16),
PARTITION x8 VALUES LESS THAN (18),
PARTITION x9 VALUES LESS THAN (20));
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
(PARTITION x1 VALUES LESS THAN (6));
show create table t1;
Table Create Table
diff --git a/mysql-test/r/partition_02myisam.result b/mysql-test/r/partition_02myisam.result
new file mode 100644
index 00000000000..647e9379360
--- /dev/null
+++ b/mysql-test/r/partition_02myisam.result
@@ -0,0 +1,1724 @@
+SET SESSION storage_engine='MYISAM';
+SET @max_row = 200;
+SET AUTOCOMMIT= 1;
+
+#------------------------------------------------------------------------
+# There are several testcases disabled because ouf the open bugs
+# #15407 , #15408 , #15890 , #15961 , #13447 , #15966 , #15968, #16370
+#------------------------------------------------------------------------
+
+#------------------------------------------------------------------------
+# 0. Setting of auxiliary variables + Creation of an auxiliary table
+# needed in all testcases
+#------------------------------------------------------------------------
+DROP TABLE IF EXISTS t0_template;
+CREATE TABLE t0_template ( f1 INTEGER, f2 char(20), PRIMARY KEY(f1))
+ENGINE = MEMORY;
+# Logging of <max_row> INSERTs into t0_template suppressed
+
+#------------------------------------------------------------------------
+# 1. Some syntax checks
+#------------------------------------------------------------------------
+# 1.1 Subpartioned table without subpartitioning rule must be rejected
+DROP TABLE IF EXISTS t1;
+# FIXME Implement testcases, where it is checked that all create and
+# alter table statements
+# - with missing mandatory parameters are rejected
+# - with optional parameters are accepted
+# - with wrong combinations of optional parameters are rejected
+# - ............
+
+#------------------------------------------------------------------------
+# 2. Checks where the engine is assigned on all supported (CREATE TABLE
+# statement) positions + basic operations on the tables
+# Storage engine mixups are currently (2005-12-23) not supported
+#------------------------------------------------------------------------
+DROP TABLE IF EXISTS t1;
+# 2.1 non partitioned table (for comparison)
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'MYISAM';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.2 Assignment of storage engine just after column list only
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'MYISAM'
+PARTITION BY HASH(f1) PARTITIONS 2;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 2
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.3 Assignment of storage engine just after partition or subpartition
+# name only
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1)
+( PARTITION part1 STORAGE ENGINE = 'MYISAM',
+PARTITION part2 STORAGE ENGINE = 'MYISAM'
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (100)
+(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM')
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.4 Some but not all named partitions or subpartitions get a storage
+# engine assigned
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1)
+( PARTITION part1 STORAGE ENGINE = 'MYISAM',
+PARTITION part2
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1)
+( PARTITION part1 ,
+PARTITION part2 STORAGE ENGINE = 'MYISAM'
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (100)
+(SUBPARTITION subpart11,
+SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM')
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (100)
+(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21,
+SUBPARTITION subpart22 )
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.5 Storage engine assignment after partition name + after name of
+# subpartitions belonging to another partition
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (100) ENGINE = 'MYISAM'
+(SUBPARTITION subpart11,
+SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM')
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (100)
+(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'),
+PARTITION part2 VALUES LESS THAN (2147483647) ENGINE = 'MYISAM'
+(SUBPARTITION subpart21,
+SUBPARTITION subpart22)
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.6 Precedence of storage engine assignments
+# 2.6.1 Storage engine assignment after column list + after partition
+# or subpartition name
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'MYISAM'
+PARTITION BY HASH(f1)
+( PARTITION part1 STORAGE ENGINE = 'MYISAM',
+PARTITION part2 STORAGE ENGINE = 'MYISAM'
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'MYISAM'
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (100)
+(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM')
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.6.2 Storage engine assignment after partition name + after
+# subpartition name
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (100) STORAGE ENGINE = 'MYISAM'
+(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM',
+SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM')
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.7 Session default engine differs from engine used within create table
+SET SESSION storage_engine='MEMORY';
+SET SESSION storage_engine='MYISAM';
+
+#------------------------------------------------------------------------
+# 3. Check assigning the number of partitions and subpartitions
+# with and without named partitions/subpartitions
+#------------------------------------------------------------------------
+DROP TABLE IF EXISTS t1;
+# 3.1 (positive) without partition/subpartition number assignment
+# 3.1.1 no partition number, no named partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 3.1.2 no partition number, named partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 3.1.3 variations on no partition/subpartition number, named partitions,
+# different subpartitions are/are not named
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) (PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (200), PARTITION part3 VALUES LESS THAN (2147483647)) ;
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) (PARTITION part1 VALUES LESS THAN (100)
+(SUBPARTITION subpart11 , SUBPARTITION subpart12 ), PARTITION part2 VALUES LESS THAN (200)
+(SUBPARTITION subpart21 , SUBPARTITION subpart22 ), PARTITION part3 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart31 , SUBPARTITION subpart32 )) ;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (200) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM), PARTITION part3 VALUES LESS THAN (2147483647) (SUBPARTITION subpart31 ENGINE = MyISAM, SUBPARTITION subpart32 ENGINE = MyISAM))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 3.2 partition/subpartition numbers good and bad values and notations
+DROP TABLE IF EXISTS t1;
+# 3.2.1 partition/subpartition numbers INTEGER notation
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 2
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 2
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) SUBPARTITIONS 2 (PARTITION part1 VALUES LESS THAN (100) , PARTITION part2 VALUES LESS THAN (2147483647) )
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 1
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 1
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) SUBPARTITIONS 1 (PARTITION part1 VALUES LESS THAN (100) , PARTITION part2 VALUES LESS THAN (2147483647) )
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 0;
+ERROR HY000: Number of partitions = 0 is not an allowed value
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 0
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR HY000: Number of subpartitions = 0 is not an allowed value
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS -1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-1' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS -1
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-1
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (21' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 1000000;
+ERROR HY000: Too many partitions were defined
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 1000000
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR HY000: Too many partitions were defined
+# 3.2.4 partition/subpartition numbers STRING notation
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '2';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS '2'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '2.0';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2.0'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS '2.0'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2.0'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN ' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '0.2E+1';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''0.2E+1'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS '0.2E+1'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''0.2E+1'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS TH' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '2A';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2A'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS '2A'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2A'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 'A2';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''A2'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 'A2'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''A2'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '''' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS ''
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '''
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (21' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 'GARBAGE';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''GARBAGE'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 'GARBAGE'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''GARBAGE'
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS T' at line 3
+# 3.2.5 partition/subpartition numbers other notations
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2A;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '2A' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 2A
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '2A
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (21' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS A2;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'A2' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS A2
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'A2
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (21' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS GARBAGE;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'GARBAGE' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS GARBAGE
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'GARBAGE
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THA' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS "2";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2"' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS "2"
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2"
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS "2A";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2A"' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS "2A"
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2A"
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS "A2";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"A2"' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS "A2"
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"A2"
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS "GARBAGE";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"GARBAGE"' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS "GARBAGE"
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"GARBAGE"
+(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS T' at line 3
+# 3.3 Mixups of assigned partition/subpartition numbers and names
+# 3.3.1 (positive) number of partition/subpartition
+# = number of named partition/subpartition
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1, PARTITION part2 ) ;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM)
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) PARTITIONS 2
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11, SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM))
+DROP TABLE t1;
+# 3.3.2 (positive) number of partition/subpartition ,
+# 0 (= no) named partition/subpartition
+# already checked above
+# 3.3.3 (negative) number of partitions/subpartitions
+# > number of named partitions/subpartitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1 ) ;
+ERROR 42000: Wrong number of partitions defined, mismatch with previous setting near ')' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11 ),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPAR' at line 5
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11, SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 ),
+PARTITION part3 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart31, SUBPARTITION subpart32)
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '),
+PARTITION part3 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart31, SUBPAR' at line 7
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) PARTITIONS 2
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11, SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21 )
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near ')
+)' at line 7
+# 3.3.4 (negative) number of partitions < number of named partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 1 ( PARTITION part1, PARTITION part2 ) ;
+ERROR 42000: Wrong number of partitions defined, mismatch with previous setting near ')' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 1
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11, SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPAR' at line 5
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 1
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11, SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 ),
+PARTITION part3 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart31, SUBPARTITION subpart32)
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 ' at line 5
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 1
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11, SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPAR' at line 5
+
+#------------------------------------------------------------------------
+# 4. Checks of logical partition/subpartition name
+# file name clashes during CREATE TABLE
+#------------------------------------------------------------------------
+DROP TABLE IF EXISTS t1;
+# 4.1 (negative) A partition name used more than once
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part1);
+ERROR HY000: Duplicate partition name part1
+# FIXME Implement testcases with filename problems
+# existing file of other table --- partition/subpartition file name
+# partition/subpartition file name --- file of the same table
+
+#------------------------------------------------------------------------
+# 5. Alter table experiments
+#------------------------------------------------------------------------
+DROP TABLE IF EXISTS t1;
+# 5.1 alter table add partition
+# 5.1.1 (negative) add partition to non partitioned table
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+ALTER TABLE t1 ADD PARTITION (PARTITION part1);
+Got one of the listed errors
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+# 5.1.2 Add one partition to a table with one partition
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1)
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1;
+ALTER TABLE t1 ADD PARTITION (PARTITION part1);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION p0 ENGINE = MyISAM, PARTITION part1 ENGINE = MyISAM)
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 5.1.3 Several times add one partition to a table with some partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM)
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1;
+ALTER TABLE t1 ADD PARTITION (PARTITION part0);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM, PARTITION part0 ENGINE = MyISAM)
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1;
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1;
+ALTER TABLE t1 ADD PARTITION (PARTITION part2);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM, PARTITION part0 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM)
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1;
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM, PARTITION part0 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM)
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 5.1.4 Add several partitions to a table with some partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM)
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM)
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 5.1.5 (negative) Add partitions to a table with some partitions
+# clash on new and already existing partition names
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2, PARTITION part3);
+ALTER TABLE t1 ADD PARTITION (PARTITION part1);
+ERROR HY000: Duplicate partition name part1
+ALTER TABLE t1 ADD PARTITION (PARTITION part2);
+ERROR HY000: Duplicate partition name part2
+ALTER TABLE t1 ADD PARTITION (PARTITION part3);
+ERROR HY000: Duplicate partition name part3
+ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part2, PARTITION part3);
+ERROR HY000: Duplicate partition name part1
+DROP TABLE t1;
+# 5.2 alter table add subpartition
+# 5.2.1 Add one subpartition to a table with subpartitioning rule and
+# no explicit defined subpartitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+(PARTITION part1 VALUES LESS THAN (100));
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1;
+ALTER TABLE t1 ADD PARTITION (PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21));
+DROP TABLE t1;
diff --git a/mysql-test/r/partition_03ndb.result b/mysql-test/r/partition_03ndb.result
new file mode 100644
index 00000000000..28339cc7435
--- /dev/null
+++ b/mysql-test/r/partition_03ndb.result
@@ -0,0 +1,1361 @@
+SET SESSION storage_engine='NDB' ;
+SET @max_row = 200;
+SET AUTOCOMMIT= 1;
+#------------------------------------------------------------------------
+# 0. Creation of an auxiliary table needed in all testcases
+#------------------------------------------------------------------------
+DROP TABLE IF EXISTS t0_template;
+CREATE TABLE t0_template ( f1 INTEGER, f2 char(20), PRIMARY KEY(f1))
+ENGINE = MEMORY;
+# Logging of 200 INSERTs into t0_template suppressed
+#------------------------------------------------------------------------
+# 1. Some syntax checks
+#------------------------------------------------------------------------
+# 1.1 Subpartioned table without subpartitioning rule must be rejected
+DROP TABLE IF EXISTS t1;
+#------------------------------------------------------------------------
+# 2. Checks where the engine is set on all supported CREATE TABLE
+# statement positions + basic operations on the tables
+#------------------------------------------------------------------------
+DROP TABLE IF EXISTS t1;
+# 2.1 table (non partitioned) for comparison
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'NDB' ;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.2 table with engine setting just after column list
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'NDB'
+PARTITION BY HASH(f1) PARTITIONS 2;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 2
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.3 table with engine setting in the named partition part
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1)
+( PARTITION part1 STORAGE ENGINE = 'NDB' ,
+PARTITION part2 STORAGE ENGINE = 'NDB'
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.4 table with engine setting in the named subpartition part
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' )
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.5 Ugly "incomplete" storage engine assignments
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1)
+( PARTITION part1 STORAGE ENGINE = 'NDB' ,
+PARTITION part2
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1)
+( PARTITION part1 ,
+PARTITION part2 STORAGE ENGINE = 'NDB'
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11,
+SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' )
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 ,
+SUBPARTITION subpart22 )
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.6 Ugly "over determined" storage engine assignments
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'NDB'
+PARTITION BY HASH(f1)
+( PARTITION part1 STORAGE ENGINE = 'NDB' ,
+PARTITION part2 STORAGE ENGINE = 'NDB'
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'NDB'
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' )
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (1000) STORAGE ENGINE = 'NDB'
+(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' )
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.7 Ugly storage engine assignments mixups
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (1000) ENGINE = 'NDB'
+(SUBPARTITION subpart11 ,
+SUBPARTITION subpart12 ),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' )
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1)
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' ,
+SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ),
+PARTITION part2 VALUES LESS THAN (2000) ENGINE = 'NDB'
+(SUBPARTITION subpart21 ,
+SUBPARTITION subpart22 )
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 2.8 Session default engine differs from engine used within create table
+SET SESSION storage_engine='MEMORY';
+SET SESSION storage_engine='NDB' ;
+#------------------------------------------------------------------------
+# 3. Check number of partitions and subpartitions
+#------------------------------------------------------------------------
+DROP TABLE IF EXISTS t1;
+# 3.1 (positive) without partition/subpartition number assignment
+# 3.1.1 no partition number, no named partitions, no subpartitions mentioned
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 3.1.2 no partition number, named partitions, no subpartitions mentioned
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster)
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 3.1.3 variations on no partition/subpartition number, named partitions,
+# different subpartitions are/are not named
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) (PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2000), PARTITION part3 VALUES LESS THAN (2147483647)) ;
+DROP TABLE t1;
+# FIXME several subtestcases of 3.1.3 disabled because of server crashes
+# Bug#15407 Partitions: crash if subpartition
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) (PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11 , SUBPARTITION subpart12 ), PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 , SUBPARTITION subpart22 ), PARTITION part3 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21 , SUBPARTITION subpart22 )) ;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster), PARTITION part3 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster))
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+# 3.2 partition/subpartition numbers good and bad values and notations
+DROP TABLE IF EXISTS t1;
+# 3.2.1 partition/subpartition numbers INTEGER notation
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 2
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 2
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) SUBPARTITIONS 2 (PARTITION part1 VALUES LESS THAN (1000) , PARTITION part2 VALUES LESS THAN (2147483647) )
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 1
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 1
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) SUBPARTITIONS 1 (PARTITION part1 VALUES LESS THAN (1000) , PARTITION part2 VALUES LESS THAN (2147483647) )
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+UPDATE t1 SET f1 = f1 + 200
+WHERE f1 BETWEEN 100 - 50 AND 100 + 50;
+SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 )
+AS my_value FROM t1;
+my_value
+1
+DELETE FROM t1
+WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200;
+SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200)
+AS my_value FROM t1;
+my_value
+1
+INSERT INTO t1 SET f1 = 0 , f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######';
+my_value
+1
+INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######';
+my_value
+1
+UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ'
+ WHERE f1 = 0 AND f2 = '#######';
+SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+my_value
+1
+DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ';
+SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ';
+my_value
+1
+TRUNCATE t1;
+SELECT COUNT(*) = 0 AS my_value FROM t1;
+my_value
+1
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 0;
+ERROR HY000: Number of partitions = 0 is not an allowed value
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 0
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR HY000: Number of subpartitions = 0 is not an allowed value
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS -1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-1' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS -1
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-1
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 1000000;
+ERROR HY000: Too many partitions were defined
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 1000000
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR HY000: Too many partitions were defined
+# 3.2.4 partition/subpartition numbers STRING notation
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '2';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS '2'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '2.0';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2.0'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS '2.0'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2.0'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '0.2E+1';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''0.2E+1'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS '0.2E+1'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''0.2E+1'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS T' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '2A';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2A'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS '2A'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2A'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN ' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 'A2';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''A2'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 'A2'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''A2'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN ' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS '';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '''' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS ''
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '''
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 'GARBAGE';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''GARBAGE'' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 'GARBAGE'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''GARBAGE'
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS ' at line 3
+# 3.2.5 partition/subpartition numbers other notations
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2A;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '2A' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS 2A
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '2A
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS A2;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'A2' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS A2
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'A2
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS GARBAGE;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'GARBAGE' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS GARBAGE
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'GARBAGE
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS TH' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS "2";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2"' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS "2"
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2"
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS "2A";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2A"' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS "2A"
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2A"
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN ' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS "A2";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"A2"' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS "A2"
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"A2"
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN ' at line 3
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS "GARBAGE";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"GARBAGE"' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1)
+SUBPARTITIONS "GARBAGE"
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"GARBAGE"
+(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS ' at line 3
+# 3.3 Mixups of number and names of partition/subpartition assigned
+# 3.3.1 (positive) number of partition/subpartition = number of named partition/subpartition
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1, PARTITION part2 ) ;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster)
+DROP TABLE t1;
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) PARTITIONS 2
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11, SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) default NULL,
+ `f2` char(20) default NULL
+) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster))
+DROP TABLE t1;
+# 3.3.2 (positive) number of partition/subpartition , 0 (= no) named partition/subpartition
+# already checked above
+# 3.3.3 (negative) number of partitions > number of named partitions
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1 ) ;
+ERROR 42000: Wrong number of partitions defined, mismatch with previous setting near ')' at line 2
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11 ),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPARTITION subpart22)
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21, SUBPAR' at line 5
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1)
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11, SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2000)
+(SUBPARTITION subpart21 ),
+PARTITION part3 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart31, SUBPARTITION subpart32)
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '),
+PARTITION part3 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart31, SUBPAR' at line 7
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY RANGE(f1) PARTITIONS 2
+SUBPARTITION BY HASH(f1) SUBPARTITIONS 2
+( PARTITION part1 VALUES LESS THAN (1000)
+(SUBPARTITION subpart11, SUBPARTITION subpart12),
+PARTITION part2 VALUES LESS THAN (2147483647)
+(SUBPARTITION subpart21 )
+);
+ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near ')
+)' at line 7
+#------------------------------------------------------------------------
+# 4. Checks of logical partition/subpartition name
+# file name clashes during CREATE TABLE
+#------------------------------------------------------------------------
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1) (PARTITION part1, PARTITION part1);
+ERROR HY000: Duplicate partition name part1
+#------------------------------------------------------------------------
+# 5. Alter table experiments
+#------------------------------------------------------------------------
+# 5.1 alter table add partition
+# 5.1.1 (negative) add partition to non partitioned table
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20));
+# FIXME Why does the error numbers of MyISAM(1482) and NDB(1005) differ ?
+ALTER TABLE t1 ADD PARTITION (PARTITION part1);
+Got one of the listed errors
+DROP TABLE t1;
+# 5.1.2 Add one partition to a table with one partition
+CREATE TABLE t1 ( f1 INTEGER, f2 char(20))
+PARTITION BY HASH(f1);
+INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100;
diff --git a/mysql-test/r/partition_error.result b/mysql-test/r/partition_error.result
index 90faa3b20b8..1a0b1dd9b3a 100644
--- a/mysql-test/r/partition_error.result
+++ b/mysql-test/r/partition_error.result
@@ -1,3 +1,4 @@
+drop table if exists t1;
partition by list (a)
partitions 3
(partition x1 values in (1,2,9,4) tablespace ts1,
@@ -544,6 +545,10 @@ partitions 2
partition x2 values in (5));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '4,
partition x2 values in (5))' at line 8
+CREATE TABLE t1 (a int)
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (x1));
+ERROR 42S22: Unknown column 'x1' in 'partition function'
CREATE TABLE t1(a int)
PARTITION BY RANGE (a) (PARTITION p1 VALUES LESS THAN(5));
insert into t1 values (10);
diff --git a/mysql-test/r/partition_mgm_err.result b/mysql-test/r/partition_mgm_err.result
index 01709e726bd..3c2c50fc6f8 100644
--- a/mysql-test/r/partition_mgm_err.result
+++ b/mysql-test/r/partition_mgm_err.result
@@ -1,3 +1,4 @@
+drop table if exists t1;
CREATE TABLE t1 (a int, b int)
PARTITION BY RANGE (a)
(PARTITION x0 VALUES LESS THAN (2),
@@ -10,48 +11,52 @@ PARTITION x6 VALUES LESS THAN (14),
PARTITION x7 VALUES LESS THAN (16),
PARTITION x8 VALUES LESS THAN (18),
PARTITION x9 VALUES LESS THAN (20));
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (2),
PARTITION x11 VALUES LESS THAN (5));
-ERROR HY000: The new partitions cover a bigger range then the reorganised partitions do
+ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range
ALTER TABLE t1 DROP PARTITION x0, x1, x2, x3, x3;
-ERROR HY000: Error in list of partitions to change
+ERROR HY000: Error in list of partitions to DROP
ALTER TABLE t1 DROP PARTITION x0, x1, x2, x10;
-ERROR HY000: Error in list of partitions to change
+ERROR HY000: Error in list of partitions to DROP
ALTER TABLE t1 DROP PARTITION x10, x1, x2, x1;
-ERROR HY000: Error in list of partitions to change
+ERROR HY000: Error in list of partitions to DROP
ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3;
-ERROR HY000: Error in list of partitions to change
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
+ERROR HY000: Error in list of partitions to DROP
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
(PARTITION x11 VALUES LESS THAN (22));
ERROR HY000: More partitions to reorganise than there are partitions
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
(PARTITION x3 VALUES LESS THAN (6));
-ERROR HY000: All partitions must have unique names in the table
-ALTER TABLE t1 REORGANISE PARTITION x0, x2 INTO
+ERROR HY000: Duplicate partition name x3
+ALTER TABLE t1 REORGANIZE PARTITION x0, x2 INTO
(PARTITION x11 VALUES LESS THAN (2));
ERROR HY000: When reorganising a set of partitions they must be in consecutive order
-ALTER TABLE t1 REORGANISE PARTITION x0, x1, x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0, x1, x1 INTO
(PARTITION x11 VALUES LESS THAN (4));
-ERROR HY000: Error in list of partitions to change
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ERROR HY000: Error in list of partitions to REORGANIZE
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (5));
-ERROR HY000: The new partitions cover a bigger range then the reorganised partitions do
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (4),
PARTITION x11 VALUES LESS THAN (2));
+ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
+(PARTITION x01 VALUES LESS THAN (6),
+PARTITION x11 VALUES LESS THAN (4));
ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
DROP TABLE t1;
CREATE TABLE t1 (a int)
PARTITION BY KEY (a)
PARTITIONS 2;
ALTER TABLE t1 ADD PARTITION (PARTITION p1);
-ERROR HY000: All partitions must have unique names in the table
+ERROR HY000: Duplicate partition name p1
DROP TABLE t1;
CREATE TABLE t1 (a int)
PARTITION BY KEY (a)
(PARTITION x0, PARTITION x1, PARTITION x2, PARTITION x3, PARTITION x3);
-ERROR HY000: All partitions must have unique names in the table
+ERROR HY000: Duplicate partition name x3
CREATE TABLE t1 (a int)
PARTITION BY RANGE (a)
SUBPARTITION BY KEY (a)
@@ -100,7 +105,7 @@ PARTITION x1 VALUES LESS THAN (8));
ALTER TABLE t1 ADD PARTITION PARTITIONS 1;
ERROR HY000: For RANGE partitions each partition must be defined
ALTER TABLE t1 DROP PARTITION x2;
-ERROR HY000: Error in list of partitions to change
+ERROR HY000: Error in list of partitions to DROP
ALTER TABLE t1 COALESCE PARTITION 1;
ERROR HY000: COALESCE PARTITION can only be used on HASH/KEY partitions
ALTER TABLE t1 DROP PARTITION x1;
diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def
index 88157e4f2c9..21d92f834b8 100644
--- a/mysql-test/t/disabled.def
+++ b/mysql-test/t/disabled.def
@@ -19,6 +19,7 @@ innodb_concurrent : Results are not deterministic, Elliot will fix (BUG#3300)
subselect : Bug#15706
ps_7ndb : dbug assert in RBR mode when executing test suite
rpl_ddl : Bug#15963 SBR does not show "Definer" correctly
+partition_03ndb : Bug#16385
events : Affects flush test case. A table lock not released somewhere
ndb_binlog_basic : Results are not deterministic, Tomas will fix
rpl_ndb_basic : Bug#16228
diff --git a/mysql-test/t/ndb_partition_key.test b/mysql-test/t/ndb_partition_key.test
index 76c36924618..7f6120fe094 100644
--- a/mysql-test/t/ndb_partition_key.test
+++ b/mysql-test/t/ndb_partition_key.test
@@ -63,3 +63,19 @@ insert into t1 values (1,"a",1,1),(2,"a",1,1),(3,"a",1,1);
show create table t1;
DROP TABLE t1;
+
+#
+# Bug #13155: Problem in Create Table using SHOW CREATE TABLE syntax
+#
+CREATE TABLE t1 (a int not null primary key)
+PARTITION BY KEY(a)
+(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+
+drop table t1;
+
+CREATE TABLE t1 (a int not null primary key);
+ALTER TABLE t1
+PARTITION BY KEY(a)
+(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB);
+
+drop table t1;
diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test
index 8b1c3f58071..deb95b7fb5c 100644
--- a/mysql-test/t/partition.test
+++ b/mysql-test/t/partition.test
@@ -8,6 +8,7 @@
--disable_warnings
drop table if exists t1;
--enable_warnings
+
#
# Partition by key no partition defined => OK
#
@@ -97,6 +98,9 @@ partitions 3
partition x2 tablespace ts2,
partition x3 tablespace ts3);
+CREATE TABLE t2 LIKE t1;
+
+drop table t2;
drop table t1;
#
@@ -163,6 +167,141 @@ UNLOCK TABLES;
drop table t1;
#
+# Bug #13644 DROP PARTITION NULL's DATE column
+#
+CREATE TABLE t1 (a int, name VARCHAR(50), purchased DATE)
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (3),
+ PARTITION p1 VALUES LESS THAN (7),
+ PARTITION p2 VALUES LESS THAN (9),
+ PARTITION p3 VALUES LESS THAN (11));
+INSERT INTO t1 VALUES
+(1, 'desk organiser', '2003-10-15'),
+(2, 'CD player', '1993-11-05'),
+(3, 'TV set', '1996-03-10'),
+(4, 'bookcase', '1982-01-10'),
+(5, 'exercise bike', '2004-05-09'),
+(6, 'sofa', '1987-06-05'),
+(7, 'popcorn maker', '2001-11-22'),
+(8, 'acquarium', '1992-08-04'),
+(9, 'study desk', '1984-09-16'),
+(10, 'lava lamp', '1998-12-25');
+
+SELECT * from t1 ORDER BY a;
+ALTER TABLE t1 DROP PARTITION p0;
+SELECT * from t1 ORDER BY a;
+
+drop table t1;
+
+#
+# Bug #13442; Truncate Partitioned table doesn't work
+#
+
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4,5,6));
+
+insert into t1 values (1),(2),(3),(4),(5),(6);
+select * from t1;
+truncate t1;
+select * from t1;
+truncate t1;
+select * from t1;
+drop table t1;
+
+#
+# Bug #13445 Partition by KEY method crashes server
+#
+CREATE TABLE t1 (a int, b int, primary key(a,b))
+PARTITION BY KEY(b,a) PARTITIONS 4;
+
+insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6);
+select * from t1 where a = 4;
+
+drop table t1;
+
+#
+# Bug #13438: Engine clause in PARTITION clause causes crash
+#
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+PARTITIONS 1
+(PARTITION x1 VALUES IN (1) ENGINE=MEMORY);
+
+show create table t1;
+drop table t1;
+
+#
+# Bug #13440: REPLACE causes crash in partitioned table
+#
+CREATE TABLE t1 (a int, unique(a))
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
+
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+REPLACE t1 SET a = 4;
+drop table t1;
+
+#
+# Bug #14365: Crash if value too small in list partitioned table
+#
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (2), PARTITION x2 VALUES IN (3));
+
+insert into t1 values (2), (3);
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+insert into t1 values (4);
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+insert into t1 values (1);
+drop table t1;
+
+#
+# Bug 14327: PARTITIONS clause gets lost in SHOW CREATE TABLE
+#
+CREATE TABLE t1 (a int)
+PARTITION BY HASH(a)
+PARTITIONS 5;
+
+SHOW CREATE TABLE t1;
+
+drop table t1;
+
+#
+# Bug #13446: Update to value outside of list values doesn't give error
+#
+CREATE TABLE t1 (a int)
+PARTITION BY RANGE (a)
+(PARTITION x1 VALUES LESS THAN (2));
+
+insert into t1 values (1);
+--error ER_NO_PARTITION_FOR_GIVEN_VALUE
+update t1 set a = 5;
+
+drop table t1;
+
+#
+# Bug #13441: Analyze on partitioned table didn't work
+#
+CREATE TABLE t1 (a int)
+PARTITION BY LIST (a)
+(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
+
+analyze table t1;
+
+drop table t1;
+
+#
+# BUG 14524
+#
+CREATE TABLE `t1` (
+ `id` int(11) default NULL
+) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ;
+SELECT * FROM t1;
+
+drop table t1;
+
+#
# BUG 14524
#
CREATE TABLE `t1` (
@@ -180,9 +319,9 @@ create table t1
partition by range (a)
( partition p0 values less than(10),
partition p1 values less than (20),
- partition p2 values less than maxvalue);
+ partition p2 values less than (25));
-alter table t1 reorganise partition p2 into (partition p2 values less than (30));
+alter table t1 reorganize partition p2 into (partition p2 values less than (30));
show create table t1;
drop table t1;
@@ -199,7 +338,8 @@ PARTITION BY RANGE (a)
PARTITION x8 VALUES LESS THAN (18),
PARTITION x9 VALUES LESS THAN (20));
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
(PARTITION x1 VALUES LESS THAN (6));
show create table t1;
drop table t1;
+
diff --git a/mysql-test/t/partition_02myisam.test b/mysql-test/t/partition_02myisam.test
new file mode 100644
index 00000000000..107d0b89cea
--- /dev/null
+++ b/mysql-test/t/partition_02myisam.test
@@ -0,0 +1,25 @@
+###############################################
+# #
+# Partition tests MyISAM tables #
+# #
+###############################################
+
+#
+# NOTE: PLEASE DO NOT ADD NOT MYISAM SPECIFIC TESTCASES HERE !
+# NON STORAGE SPECIFIC TESTCASES SHOULD BE ADDED IN
+# THE SOURCED FIELS ONLY.
+#
+
+# Storage engine to be tested
+let $engine= 'MYISAM';
+eval SET SESSION storage_engine=$engine;
+
+
+# Other storage engine <> storage engine to be tested
+let $engine_other= 'MEMORY';
+# number of rows for the INSERT/UPDATE/DELETE/SELECT experiments
+# on partioned tables
+# Attention: In the moment the result files fit to @max_row = 200 only
+SET @max_row = 200;
+
+-- source include/partition_1.inc
diff --git a/mysql-test/t/partition_03ndb.test b/mysql-test/t/partition_03ndb.test
new file mode 100644
index 00000000000..3190ab9dfc7
--- /dev/null
+++ b/mysql-test/t/partition_03ndb.test
@@ -0,0 +1,26 @@
+###############################################
+# #
+# Partition tests NDB tables #
+# #
+###############################################
+
+#
+# NOTE: PLEASE DO NOT ADD NOT NDB SPECIFIC TESTCASES HERE !
+# NON STORAGE SPECIFIC TESTCASES SHOULD BE ADDED IN
+# THE SOURCED FIELS ONLY.
+#
+
+# Storage engine to be tested
+let $engine= 'NDB' ;
+-- source include/have_ndb.inc
+eval SET SESSION storage_engine=$engine;
+
+
+# Other storage engine <> storage engine to be tested
+let $engine_other= 'MEMORY';
+# number of rows for the INSERT/UPDATE/DELETE/SELECT experiments
+# on partioned tables
+# Attention: In the moment the result files fit to @max_row = 200 only
+SET @max_row = 200;
+
+-- source include/partition_1.inc
diff --git a/mysql-test/t/partition_error.test b/mysql-test/t/partition_error.test
index ea12bbc5207..03a2ab41807 100644
--- a/mysql-test/t/partition_error.test
+++ b/mysql-test/t/partition_error.test
@@ -4,6 +4,10 @@
#
-- source include/have_partition.inc
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
#
# Partition by key stand-alone error
#
@@ -728,6 +732,14 @@ partitions 2
partition x2 values in (5));
#
+# Bug #13439: Crash when LESS THAN (non-literal)
+#
+--error 1054
+CREATE TABLE t1 (a int)
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (x1));
+
+#
# No partition for the given value
#
CREATE TABLE t1(a int)
diff --git a/mysql-test/t/partition_mgm_err.test b/mysql-test/t/partition_mgm_err.test
index 92848fc135e..c12f1c05c05 100644
--- a/mysql-test/t/partition_mgm_err.test
+++ b/mysql-test/t/partition_mgm_err.test
@@ -4,6 +4,10 @@
#
-- source include/have_partition.inc
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
#
# Try faulty DROP PARTITION and COALESCE PARTITION
#
@@ -21,7 +25,7 @@ PARTITION BY RANGE (a)
PARTITION x9 VALUES LESS THAN (20));
--error ER_REORG_OUTSIDE_RANGE
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (2),
PARTITION x11 VALUES LESS THAN (5));
@@ -38,30 +42,35 @@ ALTER TABLE t1 DROP PARTITION x10, x1, x2, x1;
ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3;
--error ER_REORG_PARTITION_NOT_EXIST
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO
(PARTITION x11 VALUES LESS THAN (22));
--error ER_SAME_NAME_PARTITION
-ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
(PARTITION x3 VALUES LESS THAN (6));
--error ER_CONSECUTIVE_REORG_PARTITIONS
-ALTER TABLE t1 REORGANISE PARTITION x0, x2 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0, x2 INTO
(PARTITION x11 VALUES LESS THAN (2));
--error ER_DROP_PARTITION_NON_EXISTENT
-ALTER TABLE t1 REORGANISE PARTITION x0, x1, x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0, x1, x1 INTO
(PARTITION x11 VALUES LESS THAN (4));
--error ER_REORG_OUTSIDE_RANGE
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (5));
---error ER_RANGE_NOT_INCREASING_ERROR
-ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO
+--error ER_REORG_OUTSIDE_RANGE
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
(PARTITION x01 VALUES LESS THAN (4),
PARTITION x11 VALUES LESS THAN (2));
+--error ER_RANGE_NOT_INCREASING_ERROR
+ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO
+(PARTITION x01 VALUES LESS THAN (6),
+ PARTITION x11 VALUES LESS THAN (4));
+
DROP TABLE t1;
CREATE TABLE t1 (a int)
diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c
index f5a8b618949..4b3e03750c8 100644
--- a/mysys/thr_lock.c
+++ b/mysys/thr_lock.c
@@ -1009,7 +1009,7 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count)
TL_WRITE_ONLY to abort any new accesses to the lock
*/
-void thr_abort_locks(THR_LOCK *lock)
+void thr_abort_locks(THR_LOCK *lock, bool upgrade_lock)
{
THR_LOCK_DATA *data;
DBUG_ENTER("thr_abort_locks");
@@ -1031,7 +1031,7 @@ void thr_abort_locks(THR_LOCK *lock)
lock->read_wait.last= &lock->read_wait.data;
lock->write_wait.last= &lock->write_wait.data;
lock->read_wait.data=lock->write_wait.data=0;
- if (lock->write.data)
+ if (upgrade_lock && lock->write.data)
lock->write.data->type=TL_WRITE_ONLY;
pthread_mutex_unlock(&lock->mutex);
DBUG_VOID_RETURN;
@@ -1089,6 +1089,213 @@ my_bool thr_abort_locks_for_thread(THR_LOCK *lock, pthread_t thread)
}
+/*
+ Downgrade a WRITE_* to a lower WRITE level
+ SYNOPSIS
+ thr_downgrade_write_lock()
+ in_data Lock data of thread downgrading its lock
+ new_lock_type New write lock type
+ RETURN VALUE
+ NONE
+ DESCRIPTION
+ This can be used to downgrade a lock already owned. When the downgrade
+ occurs also other waiters, both readers and writers can be allowed to
+ start.
+ The previous lock is often TL_WRITE_ONLY but can also be
+ TL_WRITE and TL_WRITE_ALLOW_READ. The normal downgrade variants are
+ TL_WRITE_ONLY => TL_WRITE_ALLOW_READ After a short exclusive lock
+ TL_WRITE_ALLOW_READ => TL_WRITE_ALLOW_WRITE After discovering that the
+ operation didn't need such a high lock.
+ TL_WRITE_ONLY => TL_WRITE after a short exclusive lock while holding a
+ write table lock
+ TL_WRITE_ONLY => TL_WRITE_ALLOW_WRITE After a short exclusive lock after
+ already earlier having dongraded lock to TL_WRITE_ALLOW_WRITE
+ The implementation is conservative and rather don't start rather than
+ go on unknown paths to start, the common cases are handled.
+
+ NOTE:
+ In its current implementation it is only allowed to downgrade from
+ TL_WRITE_ONLY. In this case there are no waiters. Thus no wake up
+ logic is required.
+*/
+
+void thr_downgrade_write_lock(THR_LOCK_DATA *in_data,
+ enum thr_lock_type new_lock_type)
+{
+ THR_LOCK *lock=in_data->lock;
+ THR_LOCK_DATA *data, *next;
+ enum thr_lock_type old_lock_type= in_data->type;
+ bool start_writers= FALSE;
+ bool start_readers= FALSE;
+ DBUG_ENTER("thr_downgrade_write_only_lock");
+
+ pthread_mutex_lock(&lock->mutex);
+ DBUG_ASSERT(old_lock_type == TL_WRITE_ONLY);
+ DBUG_ASSERT(old_lock_type > new_lock_type);
+ in_data->type= new_lock_type;
+ check_locks(lock,"after downgrading lock",0);
+#if 0
+ switch (old_lock_type)
+ {
+ case TL_WRITE_ONLY:
+ case TL_WRITE:
+ case TL_WRITE_LOW_PRIORITY:
+ /*
+ Previous lock was exclusive we are now ready to start up most waiting
+ threads.
+ */
+ switch (new_lock_type)
+ {
+ case TL_WRITE_ALLOW_READ:
+ /* Still cannot start WRITE operations. Can only start readers. */
+ start_readers= TRUE;
+ break;
+ case TL_WRITE:
+ case TL_WRITE_LOW_PRIORITY:
+ /*
+ Still cannot start anything, but new requests are no longer
+ aborted.
+ */
+ break;
+ case TL_WRITE_ALLOW_WRITE:
+ /*
+ We can start both writers and readers.
+ */
+ start_writers= TRUE;
+ start_readers= TRUE;
+ break;
+ case TL_WRITE_CONCURRENT_INSERT:
+ case TL_WRITE_DELAYED:
+ /*
+ This routine is not designed for those. Lock will be downgraded
+ but no start of waiters will occur. This is not the optimal but
+ should be a correct behaviour.
+ */
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ break;
+ case TL_WRITE_DELAYED:
+ case TL_WRITE_CONCURRENT_INSERT:
+ /*
+ This routine is not designed for those. Lock will be downgraded
+ but no start of waiters will occur. This is not the optimal but
+ should be a correct behaviour.
+ */
+ break;
+ case TL_WRITE_ALLOW_READ:
+ DBUG_ASSERT(new_lock_type == TL_WRITE_ALLOW_WRITE);
+ /*
+ Previously writers were not allowed to start, now it is ok to
+ start them again. Readers are already allowed so no reason to
+ handle them.
+ */
+ start_writers= TRUE;
+ break;
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+ if (start_writers)
+ {
+ /*
+ At this time the only active writer can be ourselves. Thus we need
+ not worry about that there are other concurrent write operations
+ active on the table. Thus we only need to worry about starting
+ waiting operations.
+ We also only come here with TL_WRITE_ALLOW_WRITE as the new
+ lock type, thus we can start other writers also of the same type.
+ If we find a lock at exclusive level >= TL_WRITE_LOW_PRIORITY we
+ don't start any more operations that would be mean those operations
+ will have to wait for things started afterwards.
+ */
+ DBUG_ASSERT(new_lock_type == TL_WRITE_ALLOW_WRITE);
+ for (data=lock->write_wait.data; data ; data= next)
+ {
+ /*
+ All WRITE requests compatible with new lock type are also
+ started
+ */
+ next= data->next;
+ if (start_writers && data->type == new_lock_type)
+ {
+ pthread_cond_t *cond= data->cond;
+ /*
+ It is ok to start this waiter.
+ Move from being first in wait queue to be last in write queue.
+ */
+ if (((*data->prev)= data->next))
+ data->next->prev= data->prev;
+ else
+ lock->write_wait.last= data->prev;
+ data->prev= lock->write.last;
+ lock->write.last= &data->next;
+ data->next= 0;
+ check_locks(lock, "Started write lock after downgrade",0);
+ data->cond= 0;
+ pthread_cond_signal(cond);
+ }
+ else
+ {
+ /*
+ We found an incompatible lock, we won't start any more write
+ requests to avoid letting writers pass other writers in the
+ queue.
+ */
+ start_writers= FALSE;
+ if (data->type >= TL_WRITE_LOW_PRIORITY)
+ {
+ /*
+ We have an exclusive writer in the queue so we won't start
+ readers either.
+ */
+ start_readers= FALSE;
+ }
+ }
+ }
+ }
+ if (start_readers)
+ {
+ DBUG_ASSERT(new_lock_type == TL_WRITE_ALLOW_WRITE ||
+ new_lock_type == TL_WRITE_ALLOW_READ);
+ /*
+ When we come here we know that the write locks are
+ TL_WRITE_ALLOW_WRITE or TL_WRITE_ALLOW_READ. This means that reads
+ are ok
+ */
+ for (data=lock->read_wait.data; data ; data=next)
+ {
+ next= data->next;
+ /*
+ All reads are ok to start now except TL_READ_NO_INSERT when
+ write lock is TL_WRITE_ALLOW_READ.
+ */
+ if (new_lock_type != TL_WRITE_ALLOW_READ ||
+ data->type != TL_READ_NO_INSERT)
+ {
+ pthread_cond_t *cond= data->cond;
+ if (((*data->prev)= data->next))
+ data->next->prev= data->prev;
+ else
+ lock->read_wait.last= data->prev;
+ data->prev= lock->read.last;
+ lock->read.last= &data->next;
+ data->next= 0;
+
+ if (data->type == TL_READ_NO_INSERT)
+ lock->read_no_write_count++;
+ check_locks(lock, "Started read lock after downgrade",0);
+ data->cond= 0;
+ pthread_cond_signal(cond);
+ }
+ }
+ }
+ check_locks(lock,"after starting waiters after downgrading lock",0);
+#endif
+ pthread_mutex_unlock(&lock->mutex);
+ DBUG_VOID_RETURN;
+}
/* Upgrade a WRITE_DELAY lock to a WRITE_LOCK */
diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc
index fea4005d131..06130f31504 100644
--- a/sql/ha_archive.cc
+++ b/sql/ha_archive.cc
@@ -170,6 +170,8 @@ handlerton archive_hton = {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter interface */
HTON_NO_FLAGS
};
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index fb9ed2de117..e9168487cf4 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -149,6 +149,8 @@ handlerton berkeley_hton = {
NULL, /* Start Consistent Snapshot */
berkeley_flush_logs, /* Flush logs */
berkeley_show_status, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter Tablespace */
HTON_CLOSE_CURSORS_AT_COMMIT | HTON_FLUSH_AFTER_RENAME
};
diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc
index 38e03d4d1f7..71b4ef3c9dc 100644
--- a/sql/ha_blackhole.cc
+++ b/sql/ha_blackhole.cc
@@ -57,6 +57,8 @@ handlerton blackhole_hton= {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter Tablespace */
HTON_CAN_RECREATE
};
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index b218b52bfd9..0dbbf8e1175 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -394,6 +394,8 @@ handlerton federated_hton= {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter Tablespace */
HTON_ALTER_NOT_SUPPORTED
};
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index bcb0bf07774..2fe4bc7aeb5 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -54,6 +54,8 @@ handlerton heap_hton= {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter Tablespace */
HTON_CAN_RECREATE
};
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 75c1c380a42..9aee8a63508 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -235,6 +235,8 @@ handlerton innobase_hton = {
innobase_start_trx_and_assign_read_view, /* Start Consistent Snapshot */
innobase_flush_logs, /* Flush logs */
innobase_show_status, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
HTON_NO_FLAGS
};
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 87bc2148b03..8f73f77967a 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -86,6 +86,8 @@ handlerton myisam_hton= {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter Tablespace */
HTON_CAN_RECREATE
};
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index 601fe94bf11..36de3dc64e0 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -64,6 +64,8 @@ handlerton myisammrg_hton= {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter Tablespace */
HTON_CAN_RECREATE
};
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index a9c4ea9da9e..38b24e8eace 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -52,6 +52,8 @@ static const int parallelism= 0;
// createable against NDB from this handler
static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used
+static uint ndbcluster_partition_flags();
+static uint ndbcluster_alter_table_flags(uint flags);
static bool ndbcluster_init(void);
static int ndbcluster_end(ha_panic_function flag);
static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type);
@@ -72,6 +74,23 @@ static handler *ndbcluster_create_handler(TABLE_SHARE *table)
return new ha_ndbcluster(table);
}
+static uint ndbcluster_partition_flags()
+{
+ return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY |
+ HA_CAN_PARTITION_UNIQUE | HA_USE_AUTO_PARTITION);
+}
+
+static uint ndbcluster_alter_table_flags(uint flags)
+{
+ if (flags & ALTER_DROP_PARTITION)
+ return 0;
+ else
+ return (HA_ONLINE_ADD_INDEX | HA_ONLINE_DROP_INDEX |
+ HA_ONLINE_ADD_UNIQUE_INDEX | HA_ONLINE_DROP_UNIQUE_INDEX |
+ HA_PARTITION_FUNCTION_SUPPORTED);
+
+}
+
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
#define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0
@@ -117,10 +136,6 @@ static int rename_share(NDB_SHARE *share, const char *new_key);
#endif
static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len);
-static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len);
-static int unpackfrm(const void **data, uint *len,
- const void* pack_data);
-
static int ndb_get_table_statistics(Ndb*, const char *,
struct Ndb_statistics *);
@@ -348,7 +363,7 @@ struct Ndb_local_table_statistics {
void ha_ndbcluster::set_rec_per_key()
{
DBUG_ENTER("ha_ndbcluster::get_status_const");
- for (uint i=0 ; i < table->s->keys ; i++)
+ for (uint i=0 ; i < table_share->keys ; i++)
{
table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1;
}
@@ -447,7 +462,7 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
*/
void
-ha_ndbcluster::invalidate_dictionary_cache(TABLE *table, Ndb *ndb,
+ha_ndbcluster::invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
const char *tabname, bool global)
{
NDBDICT *dict= ndb->getDictionary();
@@ -470,16 +485,16 @@ ha_ndbcluster::invalidate_dictionary_cache(TABLE *table, Ndb *ndb,
}
else
dict->removeCachedTable(tabname);
- table->s->version=0L; /* Free when thread is ready */
+ share->version=0L; /* Free when thread is ready */
DBUG_VOID_RETURN;
}
void ha_ndbcluster::invalidate_dictionary_cache(bool global)
{
NDBDICT *dict= get_ndb()->getDictionary();
- invalidate_dictionary_cache(table, get_ndb(), m_tabname, global);
+ invalidate_dictionary_cache(table_share, get_ndb(), m_tabname, global);
/* Invalidate indexes */
- for (uint i= 0; i < table->s->keys; i++)
+ for (uint i= 0; i < table_share->keys; i++)
{
NDBINDEX *index = (NDBINDEX *) m_index[i].index;
NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index;
@@ -549,7 +564,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
if (res == HA_ERR_FOUND_DUPP_KEY)
{
if (m_rows_to_insert == 1)
- m_dupkey= table->s->primary_key;
+ m_dupkey= table_share->primary_key;
else
{
/* We are batching inserts, offending key is not available */
@@ -788,7 +803,7 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob)
for (int loop= 0; loop <= 1; loop++)
{
uint32 offset= 0;
- for (uint i= 0; i < table->s->fields; i++)
+ for (uint i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
NdbValue value= m_value[i];
@@ -892,10 +907,10 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
*/
bool ha_ndbcluster::uses_blob_value()
{
- if (table->s->blob_fields == 0)
+ if (table_share->blob_fields == 0)
return FALSE;
{
- uint no_fields= table->s->fields;
+ uint no_fields= table_share->fields;
int i;
// They always put blobs at the end..
for (i= no_fields - 1; i >= 0; i--)
@@ -1423,7 +1438,7 @@ static void shrink_varchar(Field* field, const byte* & ptr, char* buf)
int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key)
{
- KEY* key_info= table->key_info + table->s->primary_key;
+ KEY* key_info= table->key_info + table_share->primary_key;
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
DBUG_ENTER("set_primary_key");
@@ -1445,7 +1460,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key)
int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *record)
{
- KEY* key_info= table->key_info + table->s->primary_key;
+ KEY* key_info= table->key_info + table_share->primary_key;
KEY_PART_INFO* key_part= key_info->key_part;
KEY_PART_INFO* end= key_part+key_info->key_parts;
DBUG_ENTER("set_primary_key_from_record");
@@ -1490,7 +1505,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
DBUG_ENTER("define_read_attrs");
// Define attributes to read
- for (i= 0; i < table->s->fields; i++)
+ for (i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
if (ha_get_bit_in_read_set(i+1) ||
@@ -1505,11 +1520,11 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
}
}
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
DBUG_PRINT("info", ("Getting hidden key"));
// Scanning table with no primary key
- int hidden_no= table->s->fields;
+ int hidden_no= table_share->fields;
#ifndef DBUG_OFF
const NDBTAB *tab= (const NDBTAB *) m_table;
if (!tab->getColumn(hidden_no))
@@ -1529,7 +1544,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
uint32 part_id)
{
- uint no_fields= table->s->fields;
+ uint no_fields= table_share->fields;
NdbConnection *trans= m_active_trans;
NdbOperation *op;
@@ -1547,7 +1562,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
if (m_use_partition_function)
op->setPartitionId(part_id);
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key"));
@@ -1587,7 +1602,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf,
int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data,
uint32 old_part_id)
{
- uint no_fields= table->s->fields, i;
+ uint no_fields= table_share->fields, i;
NdbTransaction *trans= m_active_trans;
NdbOperation *op;
DBUG_ENTER("complemented_pk_read");
@@ -2135,13 +2150,13 @@ int ha_ndbcluster::write_row(byte *record)
DBUG_ENTER("write_row");
- if (!m_use_write && m_ignore_dup_key && table->s->primary_key != MAX_KEY)
+ if (!m_use_write && m_ignore_dup_key && table_share->primary_key != MAX_KEY)
{
int peek_res= peek_row(record);
if (!peek_res)
{
- m_dupkey= table->s->primary_key;
+ m_dupkey= table_share->primary_key;
DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
}
if (peek_res != HA_ERR_KEY_NOT_FOUND)
@@ -2171,7 +2186,7 @@ int ha_ndbcluster::write_row(byte *record)
op->setPartitionId(part_id);
}
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
// Table has hidden primary key
Ndb *ndb= get_ndb();
@@ -2184,7 +2199,7 @@ int ha_ndbcluster::write_row(byte *record)
ndb->getNdbError().status == NdbError::TemporaryError);
if (auto_value == NDB_FAILED_AUTO_INCREMENT)
ERR_RETURN(ndb->getNdbError());
- if (set_hidden_key(op, table->s->fields, (const byte*)&auto_value))
+ if (set_hidden_key(op, table_share->fields, (const byte*)&auto_value))
ERR_RETURN(op->getNdbError());
}
else
@@ -2208,7 +2223,7 @@ int ha_ndbcluster::write_row(byte *record)
// Set non-key attribute(s)
bool set_blob_value= FALSE;
- for (i= 0; i < table->s->fields; i++)
+ for (i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
if (!(field->flags & PRI_KEY_FLAG) &&
@@ -2349,8 +2364,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
}
/* Check for update of primary key for special handling */
- if ((table->s->primary_key != MAX_KEY) &&
- (key_cmp(table->s->primary_key, old_data, new_data)) ||
+ if ((table_share->primary_key != MAX_KEY) &&
+ (key_cmp(table_share->primary_key, old_data, new_data)) ||
(old_part_id != new_part_id))
{
int read_res, insert_res, delete_res, undo_res;
@@ -2424,14 +2439,14 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (m_use_partition_function)
op->setPartitionId(new_part_id);
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key"));
// Require that the PK for this record has previously been
// read into m_value
- uint no_fields= table->s->fields;
+ uint no_fields= table_share->fields;
const NdbRecAttr* rec= m_value[no_fields].rec;
DBUG_ASSERT(rec);
DBUG_DUMP("key", (char*)rec->aRef(), NDB_HIDDEN_PRIMARY_KEY_LENGTH);
@@ -2450,7 +2465,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
m_rows_changed++;
// Set non-key attribute(s)
- for (i= 0; i < table->s->fields; i++)
+ for (i= 0; i < table_share->fields; i++)
{
Field *field= table->field[i];
if (ha_get_bit_in_write_set(i+1) &&
@@ -2529,11 +2544,11 @@ int ha_ndbcluster::delete_row(const byte *record)
no_uncommitted_rows_update(-1);
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
// This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key"));
- uint no_fields= table->s->fields;
+ uint no_fields= table_share->fields;
const NdbRecAttr* rec= m_value[no_fields].rec;
DBUG_ASSERT(rec != NULL);
@@ -2656,10 +2671,10 @@ void ha_ndbcluster::unpack_record(byte *buf)
ndb_unpack_record(table, m_value, 0, buf);
#ifndef DBUG_OFF
// Read and print all values that was fetched
- if (table->s->primary_key == MAX_KEY)
+ if (table_share->primary_key == MAX_KEY)
{
// Table with hidden primary key
- int hidden_no= table->s->fields;
+ int hidden_no= table_share->fields;
const NDBTAB *tab= (const NDBTAB *) m_table;
const NDBCOL *hidden_col= tab->getColumn(hidden_no);
const NdbRecAttr* rec= m_value[hidden_no].rec;
@@ -2686,7 +2701,7 @@ void ha_ndbcluster::print_results()
char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH];
String type(buf_type, sizeof(buf_type), &my_charset_bin);
String val(buf_val, sizeof(buf_val), &my_charset_bin);
- for (uint f= 0; f < table->s->fields; f++)
+ for (uint f= 0; f < table_share->fields; f++)
{
/* Use DBUG_PRINT since DBUG_FILE cannot be filtered out */
char buf[2000];
@@ -2953,7 +2968,7 @@ int ha_ndbcluster::rnd_init(bool scan)
DBUG_RETURN(-1);
}
}
- index_init(table->s->primary_key, 0);
+ index_init(table_share->primary_key, 0);
DBUG_RETURN(0);
}
@@ -3051,9 +3066,9 @@ void ha_ndbcluster::position(const byte *record)
byte *buff;
DBUG_ENTER("position");
- if (table->s->primary_key != MAX_KEY)
+ if (table_share->primary_key != MAX_KEY)
{
- key_info= table->key_info + table->s->primary_key;
+ key_info= table->key_info + table_share->primary_key;
key_part= key_info->key_part;
end= key_part + key_info->key_parts;
buff= ref;
@@ -3095,7 +3110,7 @@ void ha_ndbcluster::position(const byte *record)
{
// No primary key, get hidden key
DBUG_PRINT("info", ("Getting hidden key"));
- int hidden_no= table->s->fields;
+ int hidden_no= table_share->fields;
const NdbRecAttr* rec= m_value[hidden_no].rec;
memcpy(ref, (const void*)rec->aRef(), ref_length);
#ifndef DBUG_OFF
@@ -4057,7 +4072,7 @@ int ha_ndbcluster::create(const char *name,
caller.
Do Ndb specific stuff, such as create a .ndb file
*/
- if ((my_errno= write_ndb_file()))
+ if ((my_errno= write_ndb_file(name)))
DBUG_RETURN(my_errno);
#ifdef HAVE_NDB_BINLOG
if (ndb_binlog_thread_running > 0)
@@ -4164,21 +4179,11 @@ int ha_ndbcluster::create(const char *name,
// Check partition info
partition_info *part_info= form->part_info;
- if (part_info)
- {
- int error;
- if ((error= set_up_partition_info(part_info, form, (void*)&tab)))
- {
- DBUG_RETURN(error);
- }
- }
- else
+ if ((my_errno= set_up_partition_info(part_info, form, (void*)&tab)))
{
- ndb_set_fragmentation(tab, form, pk_length);
+ DBUG_RETURN(my_errno);
}
-
-
if ((my_errno= check_ndb_connection()))
DBUG_RETURN(my_errno);
@@ -4199,7 +4204,7 @@ int ha_ndbcluster::create(const char *name,
my_errno= create_indexes(ndb, form);
if (!my_errno)
- my_errno= write_ndb_file();
+ my_errno= write_ndb_file(name);
else
{
/*
@@ -4921,9 +4926,9 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
primary key to be written in the ref variable
*/
- if (table->s->primary_key != MAX_KEY)
+ if (table_share->primary_key != MAX_KEY)
{
- key= table->key_info+table->s->primary_key;
+ key= table->key_info+table_share->primary_key;
ref_length= key->key_length;
DBUG_PRINT("info", (" ref_length: %d", ref_length));
}
@@ -4945,10 +4950,23 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked)
if (!res)
info(HA_STATUS_VARIABLE | HA_STATUS_CONST);
-
DBUG_RETURN(res);
}
+/*
+ Set partition info
+
+ SYNOPSIS
+ set_part_info()
+ part_info
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Set up partition info when handler object created
+*/
+
void ha_ndbcluster::set_part_info(partition_info *part_info)
{
m_part_info= part_info;
@@ -5570,6 +5588,8 @@ static bool ndbcluster_init()
h.panic= ndbcluster_end; /* Panic call */
h.show_status= ndbcluster_show_status; /* Show status */
h.alter_tablespace= ndbcluster_alter_tablespace; /* Show status */
+ h.partition_flags= ndbcluster_partition_flags; /* Partition flags */
+ h.alter_table_flags=ndbcluster_alter_table_flags; /* Alter table flags */
#ifdef HAVE_NDB_BINLOG
ndbcluster_binlog_init_handlerton();
#endif
@@ -5721,6 +5741,20 @@ static int ndbcluster_end(ha_panic_function type)
DBUG_RETURN(0);
}
+void ha_ndbcluster::print_error(int error, myf errflag)
+{
+ DBUG_ENTER("ha_ndbcluster::print_error");
+ DBUG_PRINT("enter", ("error = %d", error));
+
+ if (error == HA_ERR_NO_PARTITION_FOUND)
+ my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
+ (int)m_part_info->part_expr->val_int());
+ else
+ handler::print_error(error, errflag);
+ DBUG_VOID_RETURN;
+}
+
+
/*
Static error print function called from
static handler method ndbcluster_commit
@@ -5747,8 +5781,10 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op)
*/
void ha_ndbcluster::set_dbname(const char *path_name, char *dbname)
{
- char *end, *ptr;
-
+ char *end, *ptr, *tmp_name;
+ char tmp_buff[FN_REFLEN];
+
+ tmp_name= tmp_buff;
/* Scan name from the end */
ptr= strend(path_name)-1;
while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
@@ -5760,18 +5796,19 @@ void ha_ndbcluster::set_dbname(const char *path_name, char *dbname)
ptr--;
}
uint name_len= end - ptr;
- memcpy(dbname, ptr + 1, name_len);
- dbname[name_len]= '\0';
+ memcpy(tmp_name, ptr + 1, name_len);
+ tmp_name[name_len]= '\0';
#ifdef __WIN__
/* Put to lower case */
- ptr= dbname;
+ ptr= tmp_name;
while (*ptr != '\0') {
*ptr= tolower(*ptr);
ptr++;
}
#endif
+ filename_to_tablename(tmp_name, dbname, FN_REFLEN);
}
/*
@@ -5790,8 +5827,10 @@ void ha_ndbcluster::set_dbname(const char *path_name)
void
ha_ndbcluster::set_tabname(const char *path_name, char * tabname)
{
- char *end, *ptr;
-
+ char *end, *ptr, *tmp_name;
+ char tmp_buff[FN_REFLEN];
+
+ tmp_name= tmp_buff;
/* Scan name from the end */
end= strend(path_name)-1;
ptr= end;
@@ -5799,17 +5838,18 @@ ha_ndbcluster::set_tabname(const char *path_name, char * tabname)
ptr--;
}
uint name_len= end - ptr;
- memcpy(tabname, ptr + 1, end - ptr);
- tabname[name_len]= '\0';
+ memcpy(tmp_name, ptr + 1, end - ptr);
+ tmp_name[name_len]= '\0';
#ifdef __WIN__
/* Put to lower case */
- ptr= tabname;
+ ptr= tmp_name;
while (*ptr != '\0') {
*ptr= tolower(*ptr);
ptr++;
}
#endif
+ filename_to_tablename(tmp_name, tabname, FN_REFLEN);
}
/*
@@ -6576,104 +6616,6 @@ void ndbcluster_free_share(NDB_SHARE **share, bool have_lock)
}
-/*
- Internal representation of the frm blob
-
-*/
-
-struct frm_blob_struct
-{
- struct frm_blob_header
- {
- uint ver; // Version of header
- uint orglen; // Original length of compressed data
- uint complen; // Compressed length of data, 0=uncompressed
- } head;
- char data[1];
-};
-
-
-
-static int packfrm(const void *data, uint len,
- const void **pack_data, uint *pack_len)
-{
- int error;
- ulong org_len, comp_len;
- uint blob_len;
- frm_blob_struct* blob;
- DBUG_ENTER("packfrm");
- DBUG_PRINT("enter", ("data: 0x%lx len: %d", data, len));
-
- error= 1;
- org_len= len;
- if (my_compress((byte*)data, &org_len, &comp_len))
- goto err;
-
- DBUG_PRINT("info", ("org_len: %d comp_len: %d", org_len, comp_len));
- DBUG_DUMP("compressed", (char*)data, org_len);
-
- error= 2;
- blob_len= sizeof(frm_blob_struct::frm_blob_header)+org_len;
- if (!(blob= (frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME))))
- goto err;
-
- // Store compressed blob in machine independent format
- int4store((char*)(&blob->head.ver), 1);
- int4store((char*)(&blob->head.orglen), comp_len);
- int4store((char*)(&blob->head.complen), org_len);
-
- // Copy frm data into blob, already in machine independent format
- memcpy(blob->data, data, org_len);
-
- *pack_data= blob;
- *pack_len= blob_len;
- error= 0;
-
- DBUG_PRINT("exit",
- ("pack_data: 0x%lx pack_len: %d", *pack_data, *pack_len));
-err:
- DBUG_RETURN(error);
-
-}
-
-
-static int unpackfrm(const void **unpack_data, uint *unpack_len,
- const void *pack_data)
-{
- const frm_blob_struct *blob= (frm_blob_struct*)pack_data;
- byte *data;
- ulong complen, orglen, ver;
- DBUG_ENTER("unpackfrm");
- DBUG_PRINT("enter", ("pack_data: 0x%lx", pack_data));
-
- complen= uint4korr((char*)&blob->head.complen);
- orglen= uint4korr((char*)&blob->head.orglen);
- ver= uint4korr((char*)&blob->head.ver);
-
- DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d",
- ver,complen,orglen));
- DBUG_DUMP("blob->data", (char*) blob->data, complen);
-
- if (ver != 1)
- DBUG_RETURN(1);
- if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME))))
- DBUG_RETURN(2);
- memcpy(data, blob->data, complen);
-
- if (my_uncompress(data, &complen, &orglen))
- {
- my_free((char*)data, MYF(0));
- DBUG_RETURN(3);
- }
-
- *unpack_data= data;
- *unpack_len= complen;
-
- DBUG_PRINT("exit", ("frmdata: 0x%lx, len: %d", *unpack_data, *unpack_len));
-
- DBUG_RETURN(0);
-}
-
static
int
ndb_get_table_statistics(Ndb* ndb, const char * table,
@@ -6756,17 +6698,17 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
that the table with this name is a ndb table
*/
-int ha_ndbcluster::write_ndb_file()
+int ha_ndbcluster::write_ndb_file(const char *name)
{
File file;
bool error=1;
char path[FN_REFLEN];
DBUG_ENTER("write_ndb_file");
- DBUG_PRINT("enter", ("db: %s, name: %s", m_dbname, m_tabname));
+ DBUG_PRINT("enter", ("name: %s", name));
(void)strxnmov(path, FN_REFLEN-1,
- mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS);
+ mysql_data_home,"/",name,ha_ndb_ext,NullS);
if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0)
{
@@ -6790,7 +6732,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
int res;
KEY* key_info= table->key_info + active_index;
NDB_INDEX_TYPE index_type= get_index_type(active_index);
- ulong reclength= table->s->reclength;
+ ulong reclength= table_share->reclength;
NdbOperation* op;
if (uses_blob_value())
@@ -6997,7 +6939,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
int res;
int range_no;
- ulong reclength= table->s->reclength;
+ ulong reclength= table_share->reclength;
const NdbOperation* op= m_current_multi_operation;
for (;multi_range_curr < m_multi_range_defined; multi_range_curr++)
{
@@ -7146,7 +7088,7 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr)
Field **field, **end;
NdbValue *value= m_value;
- end= table->field + table->s->fields;
+ end= table->field + table_share->fields;
for (field= table->field; field < end; field++, value++)
{
@@ -8867,12 +8809,117 @@ int ha_ndbcluster::get_default_no_partitions(ulonglong max_rows)
uint reported_frags;
uint no_fragments= get_no_fragments(max_rows);
uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
- adjusted_frag_count(no_fragments, no_nodes, reported_frags);
+ if (adjusted_frag_count(no_fragments, no_nodes, reported_frags))
+ {
+ push_warning(current_thd,
+ MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ "Ndb might have problems storing the max amount of rows specified");
+ }
return (int)reported_frags;
}
/*
+ Set-up auto-partitioning for NDB Cluster
+
+ SYNOPSIS
+ set_auto_partitions()
+ part_info Partition info struct to set-up
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Set-up auto partitioning scheme for tables that didn't define any
+ partitioning. We'll use PARTITION BY KEY() in this case which
+ translates into partition by primary key if a primary key exists
+ and partition by hidden key otherwise.
+*/
+
+void ha_ndbcluster::set_auto_partitions(partition_info *part_info)
+{
+ DBUG_ENTER("ha_ndbcluster::set_auto_partitions");
+ part_info->list_of_part_fields= TRUE;
+ part_info->part_type= HASH_PARTITION;
+ switch (opt_ndb_distribution_id)
+ {
+ case ND_KEYHASH:
+ part_info->linear_hash_ind= FALSE;
+ break;
+ case ND_LINHASH:
+ part_info->linear_hash_ind= TRUE;
+ break;
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info)
+{
+ NDBTAB *tab= (NDBTAB*)tab_ref;
+ int32 *range_data= (int32*)my_malloc(part_info->no_parts*sizeof(int32),
+ MYF(0));
+ uint i;
+ int error= 0;
+ DBUG_ENTER("set_range_data");
+
+ if (!range_data)
+ {
+ mem_alloc_error(part_info->no_parts*sizeof(int32));
+ DBUG_RETURN(1);
+ }
+ for (i= 0; i < part_info->no_parts; i++)
+ {
+ longlong range_val= part_info->range_int_array[i];
+ if (range_val < INT_MIN32 || range_val > INT_MAX32)
+ {
+ my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
+ error= 1;
+ goto error;
+ }
+ range_data[i]= (int32)range_val;
+ }
+ tab->setRangeListData(range_data, sizeof(int32)*part_info->no_parts);
+error:
+ my_free((char*)range_data, MYF(0));
+ DBUG_RETURN(error);
+}
+
+int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info)
+{
+ NDBTAB *tab= (NDBTAB*)tab_ref;
+ int32 *list_data= (int32*)my_malloc(part_info->no_list_values * 2
+ * sizeof(int32), MYF(0));
+ uint32 *part_id, i;
+ int error= 0;
+ DBUG_ENTER("set_list_data");
+
+ if (!list_data)
+ {
+ mem_alloc_error(part_info->no_list_values*2*sizeof(int32));
+ DBUG_RETURN(1);
+ }
+ for (i= 0; i < part_info->no_list_values; i++)
+ {
+ LIST_PART_ENTRY *list_entry= &part_info->list_array[i];
+ longlong list_val= list_entry->list_value;
+ if (list_val < INT_MIN32 || list_val > INT_MAX32)
+ {
+ my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB");
+ error= 1;
+ goto error;
+ }
+ list_data[2*i]= (int32)list_val;
+ part_id= (uint32*)&list_data[2*i+1];
+ *part_id= list_entry->partition_id;
+ }
+ tab->setRangeListData(list_data, 2*sizeof(int32)*part_info->no_list_values);
+error:
+ my_free((char*)list_data, MYF(0));
+ DBUG_RETURN(error);
+}
+
+/*
User defined partitioning set-up. We need to check how many fragments the
user wants defined and which node groups to put those into. Later we also
want to attach those partitions to a tablespace.
@@ -8889,12 +8936,18 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
TABLE *table,
void *tab_par)
{
- DBUG_ENTER("ha_ndbcluster::set_up_partition_info");
- ushort node_group[MAX_PARTITIONS];
- ulong ng_index= 0, i, j;
+ uint16 frag_data[MAX_PARTITIONS];
+ char *ts_names[MAX_PARTITIONS];
+ ulong ts_index= 0, fd_index= 0, i, j;
NDBTAB *tab= (NDBTAB*)tab_par;
NDBTAB::FragmentType ftype= NDBTAB::UserDefined;
partition_element *part_elem;
+ bool first= TRUE;
+ uint ts_id, ts_version, part_count= 0, tot_ts_name_len;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ int error;
+ char *name_ptr;
+ DBUG_ENTER("ha_ndbcluster::set_up_partition_info");
if (part_info->part_type == HASH_PARTITION &&
part_info->list_of_part_fields == TRUE)
@@ -8913,93 +8966,60 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
col->setPartitionKey(TRUE);
}
}
- List_iterator<partition_element> part_it(part_info->partitions);
- for (i= 0; i < part_info->no_parts; i++)
+ else if (part_info->part_type == RANGE_PARTITION)
{
- part_elem= part_it++;
- if (!is_sub_partitioned(part_info))
+ if ((error= set_range_data((void*)tab, part_info)))
{
- node_group[ng_index++]= part_elem->nodegroup_id;
- //Here we should insert tablespace id based on tablespace name
- }
- else
- {
- List_iterator<partition_element> sub_it(part_elem->subpartitions);
- for (j= 0; j < part_info->no_subparts; j++)
- {
- part_elem= sub_it++;
- node_group[ng_index++]= part_elem->nodegroup_id;
- //Here we should insert tablespace id based on tablespace name
- }
+ DBUG_RETURN(error);
}
}
+ else if (part_info->part_type == LIST_PARTITION)
{
- uint no_nodes= g_ndb_cluster_connection->no_db_nodes();
- if (ng_index > 4 * no_nodes)
+ if ((error= set_list_data((void*)tab, part_info)))
{
- DBUG_RETURN(1300);
+ DBUG_RETURN(error);
}
}
- tab->setNodeGroupIds(&node_group, ng_index);
tab->setFragmentType(ftype);
- DBUG_RETURN(0);
-}
-
-
-/*
- This routine is used to set-up fragmentation when the user has only specified
- ENGINE = NDB and no user defined partitioning what so ever. Thus all values
- will be based on default values. We will choose Linear Hash or Hash with
- perfect spread dependent on a session variable defined in MySQL.
-*/
-
-static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
-{
- NDBTAB::FragmentType ftype= NDBTAB::DistrKeyHash;
- ushort node_group[MAX_PARTITIONS];
- uint no_nodes= g_ndb_cluster_connection->no_db_nodes(), no_fragments, i;
- DBUG_ENTER("ndb_set_fragmentation");
-
- if (form->s->max_rows == (ha_rows) 0)
- {
- no_fragments= no_nodes;
- }
- else
+ i= 0;
+ tot_ts_name_len= 0;
+ do
{
- /*
- Ensure that we get enough fragments to handle all rows and ensure that
- the table is fully distributed by keeping the number of fragments a
- multiple of the number of nodes.
- */
- uint fragments= get_no_fragments(form->s->max_rows);
- if (adjusted_frag_count(fragments, no_nodes, no_fragments))
+ uint ng;
+ part_elem= part_it++;
+ if (!is_sub_partitioned(part_info))
{
- push_warning(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
- "Ndb might have problems storing the max amount of rows specified");
+ ng= part_elem->nodegroup_id;
+ if (first && ng == UNDEF_NODEGROUP)
+ ng= 0;
+ ts_names[fd_index]= part_elem->tablespace_name;
+ frag_data[fd_index++]= ng;
}
- }
- /*
- Always start with node group 0 and continue with next node group from
- there
- */
- node_group[0]= 0;
- for (i= 1; i < no_fragments; i++)
- node_group[i]= UNDEF_NODEGROUP;
- switch (opt_ndb_distribution_id)
- {
- case ND_KEYHASH:
- ftype= NDBTAB::DistrKeyHash;
- break;
- case ND_LINHASH:
- ftype= NDBTAB::DistrKeyLin;
- break;
- }
- tab.setFragmentType(ftype);
- tab.setNodeGroupIds(&node_group, no_fragments);
- DBUG_VOID_RETURN;
+ else
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ j= 0;
+ do
+ {
+ part_elem= sub_it++;
+ ng= part_elem->nodegroup_id;
+ if (first && ng == UNDEF_NODEGROUP)
+ ng= 0;
+ ts_names[fd_index]= part_elem->tablespace_name;
+ frag_data[fd_index++]= ng;
+ } while (++j < part_info->no_subparts);
+ }
+ first= FALSE;
+ } while (++i < part_info->no_parts);
+ tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions);
+ tab->setMaxRows(table->s->max_rows);
+ tab->setTablespaceNames(ts_names, fd_index*sizeof(char*));
+ tab->setFragmentCount(fd_index);
+ tab->setFragmentData(&frag_data, fd_index*2);
+ DBUG_RETURN(0);
}
+
bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
@@ -9252,3 +9272,41 @@ ndberror:
DBUG_RETURN(1);
}
+
+bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts)
+{
+ Ndb *ndb;
+ NDBDICT *dict;
+ const NDBTAB *tab;
+ int err;
+ DBUG_ENTER("ha_ndbcluster::get_no_parts");
+
+ set_dbname(name);
+ set_tabname(name);
+ do
+ {
+ if (check_ndb_connection())
+ {
+ err= HA_ERR_NO_CONNECTION;
+ break;
+ }
+ ndb= get_ndb();
+ dict= ndb->getDictionary();
+ if (!(tab= dict->getTable(m_tabname)))
+ ERR_BREAK(dict->getNdbError(), err);
+ // Check if thread has stale local cache
+ if (tab->getObjectStatus() == NdbDictionary::Object::Invalid)
+ {
+ invalidate_dictionary_cache(FALSE);
+ if (!(tab= dict->getTable(m_tabname)))
+ ERR_BREAK(dict->getNdbError(), err);
+ }
+ *no_parts= tab->getFragmentCount();
+ DBUG_RETURN(FALSE);
+ } while (1);
+
+end:
+ print_error(err, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index ea35af908d8..71f87557049 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -561,22 +561,13 @@ class ha_ndbcluster: public handler
int extra_opt(enum ha_extra_function operation, ulong cache_size);
int external_lock(THD *thd, int lock_type);
int start_stmt(THD *thd, thr_lock_type lock_type);
+ void print_error(int error, myf errflag);
const char * table_type() const;
const char ** bas_ext() const;
ulong table_flags(void) const;
- ulong alter_table_flags(void) const
- {
- return (HA_ONLINE_ADD_INDEX | HA_ONLINE_DROP_INDEX |
- HA_ONLINE_ADD_UNIQUE_INDEX | HA_ONLINE_DROP_UNIQUE_INDEX);
- }
int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys);
int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys);
int final_drop_index(TABLE *table_arg);
- ulong partition_flags(void) const
- {
- return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY |
- HA_CAN_PARTITION_UNIQUE);
- }
void set_part_info(partition_info *part_info);
ulong index_flags(uint idx, uint part, bool all_parts) const;
uint max_supported_record_length() const;
@@ -588,6 +579,9 @@ class ha_ndbcluster: public handler
int delete_table(const char *name);
int create(const char *name, TABLE *form, HA_CREATE_INFO *info);
int get_default_no_partitions(ulonglong max_rows);
+ bool get_no_parts(const char *name, uint *no_parts);
+ void set_auto_partitions(partition_info *part_info);
+
THR_LOCK_DATA **store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
@@ -657,7 +651,7 @@ static void set_tabname(const char *pathname, char *tabname);
bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
- static void invalidate_dictionary_cache(TABLE *table, Ndb *ndb,
+ static void invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb,
const char *tabname, bool global);
private:
@@ -694,6 +688,8 @@ private:
uint set_up_partition_info(partition_info *part_info,
TABLE *table,
void *tab);
+ int set_range_data(void *tab, partition_info* part_info);
+ int set_list_data(void *tab, partition_info* part_info);
int complemented_pk_read(const byte *old_data, byte *new_data,
uint32 old_part_id);
int pk_read(const byte *key, uint key_len, byte *buf, uint32 part_id);
@@ -743,7 +739,7 @@ private:
char *update_table_comment(const char * comment);
- int write_ndb_file();
+ int write_ndb_file(const char *name);
int check_ndb_connection(THD* thd= current_thd);
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index c80b2b27d8d..a807140d2e7 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -260,7 +260,7 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table)
break;
}
if ((error= open_table_from_share(thd, table_share, "", 0,
- (uint) READ_ALL, 0, table)))
+ (uint) READ_ALL, 0, table, FALSE)))
{
sql_print_error("Unable to open table for %s, error=%d(%d)",
share->key, error, my_errno);
@@ -1219,7 +1219,7 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
pOp->getReqNodeId() != g_ndb_cluster_connection->node_id())
{
ndb->setDatabaseName(share->table->s->db.str);
- ha_ndbcluster::invalidate_dictionary_cache(share->table,
+ ha_ndbcluster::invalidate_dictionary_cache(share->table->s,
ndb,
share->table->s->table_name.str,
TRUE);
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index d7549c1a95b..3d3f4f8d971 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -67,6 +67,8 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table);
****************************************************************************/
static handler *partition_create_handler(TABLE_SHARE *share);
+static uint partition_flags();
+static uint alter_table_flags(uint flags);
handlerton partition_hton = {
MYSQL_HANDLERTON_INTERFACE_VERSION,
@@ -96,15 +98,68 @@ handlerton partition_hton = {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ partition_flags, /* Partition flags */
+ alter_table_flags, /* Partition flags */
NULL, /* Alter Tablespace */
HTON_NOT_USER_SELECTABLE | HTON_HIDDEN
};
+/*
+ Create new partition handler
+
+ SYNOPSIS
+ partition_create_handler()
+ table Table object
+
+ RETURN VALUE
+ New partition object
+*/
+
static handler *partition_create_handler(TABLE_SHARE *share)
{
return new ha_partition(share);
}
+/*
+ HA_CAN_PARTITION:
+ Used by storage engines that can handle partitioning without this
+ partition handler
+ (Partition, NDB)
+
+ HA_CAN_UPDATE_PARTITION_KEY:
+ Set if the handler can update fields that are part of the partition
+ function.
+
+ HA_CAN_PARTITION_UNIQUE:
+ Set if the handler can handle unique indexes where the fields of the
+ unique key are not part of the fields of the partition function. Thus
+ a unique key can be set on all fields.
+
+ HA_USE_AUTO_PARTITION
+ Set if the handler sets all tables to be partitioned by default.
+*/
+
+static uint partition_flags()
+{
+ return HA_CAN_PARTITION;
+}
+
+static uint alter_table_flags(uint flags __attribute__((unused)))
+{
+ return (HA_PARTITION_FUNCTION_SUPPORTED |
+ HA_FAST_CHANGE_PARTITION);
+}
+
+/*
+ Constructor method
+
+ SYNOPSIS
+ ha_partition()
+ table Table object
+
+ RETURN VALUE
+ NONE
+*/
ha_partition::ha_partition(TABLE_SHARE *share)
:handler(&partition_hton, share), m_part_info(NULL), m_create_handler(FALSE),
@@ -116,6 +171,17 @@ ha_partition::ha_partition(TABLE_SHARE *share)
}
+/*
+ Constructor method
+
+ SYNOPSIS
+ ha_partition()
+ part_info Partition info
+
+ RETURN VALUE
+ NONE
+*/
+
ha_partition::ha_partition(partition_info *part_info)
:handler(&partition_hton, NULL), m_part_info(part_info),
m_create_handler(TRUE),
@@ -129,13 +195,28 @@ ha_partition::ha_partition(partition_info *part_info)
}
+/*
+ Initialise handler object
+
+ SYNOPSIS
+ init_handler_variables()
+
+ RETURN VALUE
+ NONE
+*/
+
void ha_partition::init_handler_variables()
{
active_index= MAX_KEY;
+ m_mode= 0;
+ m_open_test_lock= 0;
m_file_buffer= NULL;
m_name_buffer_ptr= NULL;
m_engine_array= NULL;
m_file= NULL;
+ m_reorged_file= NULL;
+ m_reorged_parts= 0;
+ m_added_file= NULL;
m_tot_parts= 0;
m_has_transactions= 0;
m_pkey_is_clustered= 0;
@@ -172,6 +253,16 @@ void ha_partition::init_handler_variables()
}
+/*
+ Destructor method
+
+ SYNOPSIS
+ ~ha_partition()
+
+ RETURN VALUE
+ NONE
+*/
+
ha_partition::~ha_partition()
{
DBUG_ENTER("ha_partition::~ha_partition()");
@@ -189,6 +280,17 @@ ha_partition::~ha_partition()
/*
+ Initialise partition handler object
+
+ SYNOPSIS
+ ha_initialise()
+
+ RETURN VALUE
+ 1 Error
+ 0 Success
+
+ DESCRIPTION
+
The partition handler is only a layer on top of other engines. Thus it
can't really perform anything without the underlying handlers. Thus we
add this method as part of the allocation of a handler object.
@@ -218,6 +320,7 @@ ha_partition::~ha_partition()
sort will be performed using the underlying handlers.
5) primary_key_is_clustered, has_transactions and low_byte_first is
calculated here.
+
*/
int ha_partition::ha_initialise()
@@ -244,7 +347,7 @@ int ha_partition::ha_initialise()
}
else if (get_from_handler_file(table_share->normalized_path.str))
{
- my_error(ER_OUTOFMEMORY, MYF(0), 129); //Temporary fix TODO print_error
+ mem_alloc_error(2);
DBUG_RETURN(1);
}
/*
@@ -289,47 +392,119 @@ int ha_partition::ha_initialise()
MODULE meta data changes
****************************************************************************/
/*
- This method is used to calculate the partition name, service routine to
- the del_ren_cre_table method.
+ Create partition names
+
+ SYNOPSIS
+ create_partition_name()
+ out:out Created partition name string
+ in1 First part
+ in2 Second part
+ name_variant Normal, temporary or renamed partition name
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ This method is used to calculate the partition name, service routine to
+ the del_ren_cre_table method.
*/
-static void create_partition_name(char *out, const char *in1, const char *in2)
+#define NORMAL_PART_NAME 0
+#define TEMP_PART_NAME 1
+#define RENAMED_PART_NAME 2
+static void create_partition_name(char *out, const char *in1,
+ const char *in2, uint name_variant,
+ bool translate)
{
- strxmov(out, in1, "_", in2, NullS);
+ char transl_part_name[FN_REFLEN];
+ const char *transl_part;
+
+ if (translate)
+ {
+ tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+ transl_part= transl_part_name;
+ }
+ else
+ transl_part= in2;
+ if (name_variant == NORMAL_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part, NullS);
+ else if (name_variant == TEMP_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part, "#TMP#", NullS);
+ else if (name_variant == RENAMED_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part, "#REN#", NullS);
}
/*
- This method is used to calculate the partition name, service routine to
+ Create subpartition name
+
+ SYNOPSIS
+ create_subpartition_name()
+ out:out Created partition name string
+ in1 First part
+ in2 Second part
+ in3 Third part
+ name_variant Normal, temporary or renamed partition name
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ This method is used to calculate the subpartition name, service routine to
the del_ren_cre_table method.
*/
static void create_subpartition_name(char *out, const char *in1,
- const char *in2, const char *in3)
+ const char *in2, const char *in3,
+ uint name_variant)
{
- strxmov(out, in1, "_", in2, "_", in3, NullS);
+ char transl_part_name[FN_REFLEN], transl_subpart_name[FN_REFLEN];
+
+ tablename_to_filename(in2, transl_part_name, FN_REFLEN);
+ tablename_to_filename(in3, transl_subpart_name, FN_REFLEN);
+ if (name_variant == NORMAL_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, NullS);
+ else if (name_variant == TEMP_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, "#TMP#", NullS);
+ else if (name_variant == RENAMED_PART_NAME)
+ strxmov(out, in1, "#P#", transl_part_name,
+ "#SP#", transl_subpart_name, "#REN#", NullS);
}
/*
- Used to delete a table. By the time delete_table() has been called all
- opened references to this table will have been closed (and your globally
- shared references released. The variable name will just be the name of
- the table. You will need to remove any files you have created at this
- point.
-
- If you do not implement this, the default delete_table() is called from
- handler.cc and it will delete all files with the file extentions returned
- by bas_ext().
+ Delete a table
- Called from handler.cc by delete_table and ha_create_table(). Only used
- during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
- the storage engine.
+ SYNOPSIS
+ delete_table()
+ name Full path of table name
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Used to delete a table. By the time delete_table() has been called all
+ opened references to this table will have been closed (and your globally
+ shared references released. The variable name will just be the name of
+ the table. You will need to remove any files you have created at this
+ point.
+
+ If you do not implement this, the default delete_table() is called from
+ handler.cc and it will delete all files with the file extentions returned
+ by bas_ext().
+
+ Called from handler.cc by delete_table and ha_create_table(). Only used
+ during create if the table_flag HA_DROP_BEFORE_CREATE was specified for
+ the storage engine.
*/
int ha_partition::delete_table(const char *name)
{
int error;
DBUG_ENTER("ha_partition::delete_table");
+
if ((error= del_ren_cre_table(name, NULL, NULL, NULL)))
DBUG_RETURN(error);
DBUG_RETURN(handler::delete_table(name));
@@ -337,19 +512,32 @@ int ha_partition::delete_table(const char *name)
/*
- Renames a table from one name to another from alter table call.
+ Rename a table
+
+ SYNOPSIS
+ rename_table()
+ from Full path of old table name
+ to Full path of new table name
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Renames a table from one name to another from alter table call.
- If you do not implement this, the default rename_table() is called from
- handler.cc and it will delete all files with the file extentions returned
- by bas_ext().
+ If you do not implement this, the default rename_table() is called from
+ handler.cc and it will rename all files with the file extentions returned
+ by bas_ext().
- Called from sql_table.cc by mysql_rename_table().
+ Called from sql_table.cc by mysql_rename_table().
*/
int ha_partition::rename_table(const char *from, const char *to)
{
int error;
DBUG_ENTER("ha_partition::rename_table");
+
if ((error= del_ren_cre_table(from, to, NULL, NULL)))
DBUG_RETURN(error);
DBUG_RETURN(handler::rename_table(from, to));
@@ -357,11 +545,22 @@ int ha_partition::rename_table(const char *from, const char *to)
/*
- create_handler_files is called to create any handler specific files
- before opening the file with openfrm to later call ::create on the
- file object.
- In the partition handler this is used to store the names of partitions
- and types of engines in the partitions.
+ Create the handler file (.par-file)
+
+ SYNOPSIS
+ create_handler_files()
+ name Full path of table name
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ create_handler_files is called to create any handler specific files
+ before opening the file with openfrm to later call ::create on the
+ file object.
+ In the partition handler this is used to store the names of partitions
+ and types of engines in the partitions.
*/
int ha_partition::create_handler_files(const char *name)
@@ -372,7 +571,6 @@ int ha_partition::create_handler_files(const char *name)
We need to update total number of parts since we might write the handler
file as part of a partition management command
*/
- m_tot_parts= get_tot_partitions(m_part_info);
if (create_handler_file(name))
{
my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0));
@@ -383,14 +581,27 @@ int ha_partition::create_handler_files(const char *name)
/*
- create() is called to create a table. The variable name will have the name
- of the table. When create() is called you do not need to worry about
- opening the table. Also, the FRM file will have already been created so
- adjusting create_info will not do you any good. You can overwrite the frm
- file at this point if you wish to change the table definition, but there
- are no methods currently provided for doing that.
+ Create a partitioned table
- Called from handle.cc by ha_create_table().
+ SYNOPSIS
+ create()
+ name Full path of table name
+ table_arg Table object
+ create_info Create info generated for CREATE TABLE
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ create() is called to create a table. The variable name will have the name
+ of the table. When create() is called you do not need to worry about
+ opening the table. Also, the FRM file will have already been created so
+ adjusting create_info will not do you any good. You can overwrite the frm
+ file at this point if you wish to change the table definition, but there
+ are no methods currently provided for doing that.
+
+ Called from handler.cc by ha_create_table().
*/
int ha_partition::create(const char *name, TABLE *table_arg,
@@ -410,23 +621,838 @@ int ha_partition::create(const char *name, TABLE *table_arg,
DBUG_RETURN(0);
}
+
+/*
+ Drop partitions as part of ALTER TABLE of partitions
+
+ SYNOPSIS
+ drop_partitions()
+ path Complete path of db and table name
+
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+
+ DESCRIPTION
+ Use part_info object on handler object to deduce which partitions to
+ drop (each partition has a state attached to it)
+*/
+
int ha_partition::drop_partitions(const char *path)
{
List_iterator<partition_element> part_it(m_part_info->partitions);
+ List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
+ char part_name_buff[FN_REFLEN];
+ uint no_parts= m_part_info->partitions.elements;
+ uint part_count= 0;
+ uint no_subparts= m_part_info->no_subparts;
+ uint i= 0;
+ uint name_variant;
+ int error= 1;
+ bool reorged_parts= (m_reorged_parts > 0);
+ bool temp_partitions= (m_part_info->temp_partitions.elements > 0);
+ DBUG_ENTER("ha_partition::drop_partitions");
+
+ if (temp_partitions)
+ no_parts= m_part_info->temp_partitions.elements;
+ do
+ {
+ partition_element *part_elem;
+ if (temp_partitions)
+ {
+ /*
+ We need to remove the reorganised partitions that were put in the
+ temp_partitions-list.
+ */
+ part_elem= temp_it++;
+ DBUG_ASSERT(part_elem->part_state == PART_TO_BE_DROPPED);
+ }
+ else
+ part_elem= part_it++;
+ if (part_elem->part_state == PART_TO_BE_DROPPED ||
+ part_elem->part_state == PART_IS_CHANGED)
+ {
+ handler *file;
+ /*
+ This part is to be dropped, meaning the part or all its subparts.
+ */
+ name_variant= NORMAL_PART_NAME;
+ if (part_elem->part_state == PART_IS_CHANGED ||
+ (part_elem->part_state == PART_TO_BE_DROPPED && temp_partitions))
+ name_variant= RENAMED_PART_NAME;
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint j= 0, part;
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ part= i * no_subparts + j;
+ create_subpartition_name(part_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name, name_variant);
+ if (reorged_parts)
+ file= m_reorged_file[part_count++];
+ else
+ file= m_file[part];
+ DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
+ error= file->delete_table((const char *) part_name_buff);
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ create_partition_name(part_name_buff, path,
+ part_elem->partition_name, name_variant,
+ TRUE);
+ if (reorged_parts)
+ file= m_reorged_file[part_count++];
+ else
+ file= m_file[i];
+ DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
+ error= file->delete_table((const char *) part_name_buff);
+ }
+ if (part_elem->part_state == PART_IS_CHANGED)
+ part_elem->part_state= PART_NORMAL;
+ else
+ part_elem->part_state= PART_IS_DROPPED;
+ }
+ } while (++i < no_parts);
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Rename partitions as part of ALTER TABLE of partitions
+
+ SYNOPSIS
+ rename_partitions()
+ path Complete path of db and table name
+
+ RETURN VALUE
+ TRUE Failure
+ FALSE Success
+
+ DESCRIPTION
+ When reorganising partitions, adding hash partitions and coalescing
+ partitions it can be necessary to rename partitions while holding
+ an exclusive lock on the table.
+ Which partitions to rename is given by state of partitions found by the
+ partition info struct referenced from the handler object
+*/
+
+int ha_partition::rename_partitions(const char *path)
+{
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ List_iterator<partition_element> temp_it(m_part_info->temp_partitions);
char part_name_buff[FN_REFLEN];
+ char norm_name_buff[FN_REFLEN];
+ uint no_parts= m_part_info->partitions.elements;
+ uint part_count= 0;
+ uint no_subparts= m_part_info->no_subparts;
+ uint i= 0;
+ uint j= 0;
+ int error= 1;
+ uint temp_partitions= m_part_info->temp_partitions.elements;
+ handler *file;
+ partition_element *part_elem, *sub_elem;
+ DBUG_ENTER("ha_partition::rename_partitions");
+
+ if (temp_partitions)
+ {
+ do
+ {
+ part_elem= temp_it++;
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ do
+ {
+ sub_elem= sub_it++;
+ file= m_reorged_file[part_count++];
+ create_subpartition_name(part_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ RENAMED_PART_NAME);
+ create_subpartition_name(norm_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ NORMAL_PART_NAME);
+ DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+ norm_name_buff, part_name_buff));
+ error= file->rename_table((const char *) norm_name_buff,
+ (const char *) part_name_buff);
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ file= m_reorged_file[part_count++];
+ create_partition_name(part_name_buff, path,
+ part_elem->partition_name, RENAMED_PART_NAME,
+ TRUE);
+ create_partition_name(norm_name_buff, path,
+ part_elem->partition_name, NORMAL_PART_NAME,
+ TRUE);
+ DBUG_PRINT("info", ("Rename partition from %s to %s",
+ norm_name_buff, part_name_buff));
+ error= file->rename_table((const char *) norm_name_buff,
+ (const char *) part_name_buff);
+ }
+ } while (++i < temp_partitions);
+ }
+ i= 0;
+ do
+ {
+ part_elem= part_it++;
+ if (part_elem->part_state == PART_IS_CHANGED ||
+ (part_elem->part_state == PART_IS_ADDED && temp_partitions))
+ {
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint part;
+
+ j= 0;
+ do
+ {
+ sub_elem= sub_it++;
+ part= i * no_subparts + j;
+ create_subpartition_name(norm_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ NORMAL_PART_NAME);
+ if (part_elem->part_state == PART_IS_CHANGED)
+ {
+ file= m_reorged_file[part_count++];
+ create_subpartition_name(part_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ RENAMED_PART_NAME);
+ DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+ norm_name_buff, part_name_buff));
+ error= file->rename_table((const char *) norm_name_buff,
+ (const char *) part_name_buff);
+ }
+ file= m_new_file[part];
+ create_subpartition_name(part_name_buff, path,
+ part_elem->partition_name,
+ sub_elem->partition_name,
+ TEMP_PART_NAME);
+ DBUG_PRINT("info", ("Rename subpartition from %s to %s",
+ part_name_buff, norm_name_buff));
+ error= file->rename_table((const char *) part_name_buff,
+ (const char *) norm_name_buff);
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ create_partition_name(norm_name_buff, path,
+ part_elem->partition_name, NORMAL_PART_NAME,
+ TRUE);
+ if (part_elem->part_state == PART_IS_CHANGED)
+ {
+ file= m_reorged_file[part_count++];
+ create_partition_name(part_name_buff, path,
+ part_elem->partition_name, RENAMED_PART_NAME,
+ TRUE);
+ DBUG_PRINT("info", ("Rename partition from %s to %s",
+ norm_name_buff, part_name_buff));
+ error= file->rename_table((const char *) norm_name_buff,
+ (const char *) part_name_buff);
+ }
+ file= m_new_file[i];
+ create_partition_name(part_name_buff, path,
+ part_elem->partition_name, TEMP_PART_NAME,
+ TRUE);
+ DBUG_PRINT("info", ("Rename partition from %s to %s",
+ part_name_buff, norm_name_buff));
+ error= file->rename_table((const char *) part_name_buff,
+ (const char *) norm_name_buff);
+ }
+ }
+ } while (++i < no_parts);
+ DBUG_RETURN(error);
+}
+
+
+#define OPTIMIZE_PARTS 1
+#define ANALYZE_PARTS 2
+#define CHECK_PARTS 3
+#define REPAIR_PARTS 4
+
+/*
+ Optimize table
+
+ SYNOPSIS
+ optimize()
+ thd Thread object
+ check_opt Check/analyze/repair/optimize options
+
+ RETURN VALUES
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::optimize(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::optimize");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ OPTIMIZE_PARTS, TRUE));
+}
+
+
+/*
+ Analyze table
+
+ SYNOPSIS
+ analyze()
+ thd Thread object
+ check_opt Check/analyze/repair/optimize options
+
+ RETURN VALUES
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::analyze(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::analyze");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ ANALYZE_PARTS, TRUE));
+}
+
+
+/*
+ Check table
+
+ SYNOPSIS
+ check()
+ thd Thread object
+ check_opt Check/analyze/repair/optimize options
+
+ RETURN VALUES
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::check(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::check");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ CHECK_PARTS, TRUE));
+}
+
+
+/*
+ Repair table
+
+ SYNOPSIS
+ repair()
+ thd Thread object
+ check_opt Check/analyze/repair/optimize options
+
+ RETURN VALUES
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::repair(THD *thd, HA_CHECK_OPT *check_opt)
+{
+ DBUG_ENTER("ha_partition::repair");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ REPAIR_PARTS, TRUE));
+}
+
+/*
+ Optimize partitions
+
+ SYNOPSIS
+ optimize_partitions()
+ thd Thread object
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+ DESCRIPTION
+ Call optimize on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::optimize_partitions(THD *thd)
+{
+ DBUG_ENTER("ha_partition::optimize_partitions");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ OPTIMIZE_PARTS, FALSE));
+}
+
+/*
+ Analyze partitions
+
+ SYNOPSIS
+ analyze_partitions()
+ thd Thread object
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+ DESCRIPTION
+ Call analyze on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::analyze_partitions(THD *thd)
+{
+ DBUG_ENTER("ha_partition::analyze_partitions");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ ANALYZE_PARTS, FALSE));
+}
+
+/*
+ Check partitions
+
+ SYNOPSIS
+ check_partitions()
+ thd Thread object
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+ DESCRIPTION
+ Call check on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::check_partitions(THD *thd)
+{
+ DBUG_ENTER("ha_partition::check_partitions");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ CHECK_PARTS, FALSE));
+}
+
+/*
+ Repair partitions
+
+ SYNOPSIS
+ repair_partitions()
+ thd Thread object
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+ DESCRIPTION
+ Call repair on each partition marked with partition state PART_CHANGED
+*/
+
+int ha_partition::repair_partitions(THD *thd)
+{
+ DBUG_ENTER("ha_partition::repair_partitions");
+
+ DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt,
+ REPAIR_PARTS, FALSE));
+}
+
+
+/*
+ Handle optimize/analyze/check/repair of one partition
+
+ SYNOPSIS
+ handle_opt_part()
+ thd Thread object
+ check_opt Options
+ file Handler object of partition
+ flag Optimize/Analyze/Check/Repair flag
+
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+*/
+
+static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
+ handler *file, uint flag)
+{
+ int error;
+ DBUG_ENTER("handle_opt_part");
+ DBUG_PRINT("enter", ("flag = %u", flag));
+
+ if (flag == OPTIMIZE_PARTS)
+ error= file->optimize(thd, check_opt);
+ else if (flag == ANALYZE_PARTS)
+ error= file->analyze(thd, check_opt);
+ else if (flag == CHECK_PARTS)
+ error= file->check(thd, check_opt);
+ else if (flag == REPAIR_PARTS)
+ error= file->repair(thd, check_opt);
+ else
+ {
+ DBUG_ASSERT(FALSE);
+ error= 1;
+ }
+ if (error == HA_ADMIN_ALREADY_DONE)
+ error= 0;
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Handle optimize/analyze/check/repair of partitions
+
+ SYNOPSIS
+ handle_opt_partitions()
+ thd Thread object
+ check_opt Options
+ flag Optimize/Analyze/Check/Repair flag
+ all_parts All partitions or only a subset
+
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+*/
+
+int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
+ uint flag, bool all_parts)
+{
+ List_iterator<partition_element> part_it(m_part_info->partitions);
uint no_parts= m_part_info->no_parts;
- uint no_subparts= m_part_info->no_subparts, i= 0;
+ uint no_subparts= m_part_info->no_subparts;
+ uint i= 0;
+ LEX *lex= thd->lex;
+ int error;
+ DBUG_ENTER("ha_partition::handle_opt_partitions");
+ DBUG_PRINT("enter", ("all_parts %u, flag= %u", all_parts, flag));
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (all_parts || part_elem->part_state == PART_CHANGED)
+ {
+ handler *file;
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ uint j= 0, part;
+ do
+ {
+ partition_element *sub_elem= sub_it++;
+ part= i * no_subparts + j;
+ DBUG_PRINT("info", ("Optimize subpartition %u",
+ part));
+ if ((error= handle_opt_part(thd, check_opt, m_file[part], flag)))
+ {
+ my_error(ER_GET_ERRNO, MYF(0), error);
+ DBUG_RETURN(TRUE);
+ }
+ } while (++j < no_subparts);
+ }
+ else
+ {
+ DBUG_PRINT("info", ("Optimize partition %u", i));
+ if ((error= handle_opt_part(thd, check_opt, m_file[i], flag)))
+ {
+ my_error(ER_GET_ERRNO, MYF(0), error);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ } while (++i < no_parts);
+ DBUG_RETURN(FALSE);
+}
+
+/*
+ Prepare by creating a new partition
+
+ SYNOPSIS
+ prepare_new_partition()
+ table Table object
+ create_info Create info from CREATE TABLE
+ file Handler object of new partition
+ part_name partition name
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::prepare_new_partition(TABLE *table,
+ HA_CREATE_INFO *create_info,
+ handler *file, const char *part_name)
+{
+ int error;
+ bool create_flag= FALSE;
+ bool open_flag= FALSE;
+ DBUG_ENTER("prepare_new_partition");
+
+ if ((error= file->create(part_name, table, create_info)))
+ goto error;
+ create_flag= TRUE;
+ if ((error= file->ha_open(table, part_name, m_mode, m_open_test_lock)))
+ goto error;
+ if ((error= file->external_lock(current_thd, m_lock_type)))
+ goto error;
+
+ DBUG_RETURN(0);
+error:
+ if (create_flag)
+ VOID(file->delete_table(part_name));
+ print_error(error, MYF(0));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Cleanup by removing all created partitions after error
+
+ SYNOPSIS
+ cleanup_new_partition()
+ part_count Number of partitions to remove
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ TODO:
+ We must ensure that in the case that we get an error during the process
+ that we call external_lock with F_UNLCK, close the table and delete the
+ table in the case where we have been successful with prepare_handler.
+ We solve this by keeping an array of successful calls to prepare_handler
+ which can then be used to undo the call.
+*/
+
+void ha_partition::cleanup_new_partition(uint part_count)
+{
+ handler **save_m_file= m_file;
+ DBUG_ENTER("ha_partition::cleanup_new_partition");
+
+ if (m_added_file && m_added_file[0])
+ {
+ m_file= m_added_file;
+ m_added_file= NULL;
+
+ external_lock(current_thd, F_UNLCK);
+ /* delete_table also needed, a bit more complex */
+ close();
+
+ m_added_file= m_file;
+ m_file= save_m_file;
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Implement the partition changes defined by ALTER TABLE of partitions
+
+ SYNOPSIS
+ change_partitions()
+ create_info HA_CREATE_INFO object describing all
+ fields and indexes in table
+ path Complete path of db and table name
+ out: copied Output parameter where number of copied
+ records are added
+ out: deleted Output parameter where number of deleted
+ records are added
+ pack_frm_data Reference to packed frm file
+ pack_frm_len Length of packed frm file
+
+ RETURN VALUE
+ >0 Failure
+ 0 Success
+
+ DESCRIPTION
+ Add and copy if needed a number of partitions, during this operation
+ no other operation is ongoing in the server. This is used by
+ ADD PARTITION all types as well as by REORGANIZE PARTITION. For
+ one-phased implementations it is used also by DROP and COALESCE
+ PARTITIONs.
+ One-phased implementation needs the new frm file, other handlers will
+ get zero length and a NULL reference here.
+*/
+
+int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
+ const char *path,
+ ulonglong *copied,
+ ulonglong *deleted,
+ const void *pack_frm_data
+ __attribute__((unused)),
+ uint pack_frm_len
+ __attribute__((unused)))
+{
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ List_iterator <partition_element> t_it(m_part_info->temp_partitions);
+ char part_name_buff[FN_REFLEN];
+ uint no_parts= m_part_info->partitions.elements;
+ uint no_subparts= m_part_info->no_subparts;
+ uint i= 0;
+ uint no_remain_partitions, part_count;
+ handler **new_file_array;
int error= 1;
- DBUG_ENTER("ha_partition::drop_partitions()");
+ bool first;
+ bool copy_parts= FALSE;
+ uint temp_partitions= m_part_info->temp_partitions.elements;
+ THD *thd= current_thd;
+ DBUG_ENTER("ha_partition::change_partitions");
+
+ m_reorged_parts= 0;
+ if (!is_sub_partitioned(m_part_info))
+ no_subparts= 1;
+
+ /*
+ Step 1:
+ Calculate number of reorganised partitions and allocate space for
+ their handler references.
+ */
+ if (temp_partitions)
+ {
+ m_reorged_parts= temp_partitions * no_subparts;
+ }
+ else
+ {
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_CHANGED ||
+ part_elem->part_state == PART_REORGED_DROPPED)
+ {
+ m_reorged_parts+= no_subparts;
+ }
+ } while (++i < no_parts);
+ }
+ if (m_reorged_parts &&
+ !(m_reorged_file= (handler**)sql_calloc(sizeof(partition_element*)*
+ (m_reorged_parts + 1))))
+ {
+ mem_alloc_error(sizeof(partition_element*)*(m_reorged_parts+1));
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ Step 2:
+ Calculate number of partitions after change and allocate space for
+ their handler references.
+ */
+ no_remain_partitions= 0;
+ if (temp_partitions)
+ {
+ no_remain_partitions= no_parts * no_subparts;
+ }
+ else
+ {
+ part_it.rewind();
+ i= 0;
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_NORMAL ||
+ part_elem->part_state == PART_TO_BE_ADDED ||
+ part_elem->part_state == PART_CHANGED)
+ {
+ no_remain_partitions+= no_subparts;
+ }
+ } while (++i < no_parts);
+ }
+ if (!(new_file_array= (handler**)sql_calloc(sizeof(handler*)*
+ (2*(no_remain_partitions + 1)))))
+ {
+ mem_alloc_error(sizeof(handler*)*2*(no_remain_partitions+1));
+ DBUG_RETURN(TRUE);
+ }
+ m_added_file= &new_file_array[no_remain_partitions + 1];
+
+ /*
+ Step 3:
+ Fill m_reorged_file with handler references and NULL at the end
+ */
+ if (m_reorged_parts)
+ {
+ i= 0;
+ part_count= 0;
+ first= TRUE;
+ part_it.rewind();
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_CHANGED ||
+ part_elem->part_state == PART_REORGED_DROPPED)
+ {
+ memcpy((void*)&m_reorged_file[part_count],
+ (void*)&m_file[i*no_subparts],
+ sizeof(handler*)*no_subparts);
+ part_count+= no_subparts;
+ }
+ else if (first && temp_partitions &&
+ part_elem->part_state == PART_TO_BE_ADDED)
+ {
+ /*
+ When doing an ALTER TABLE REORGANIZE PARTITION a number of
+ partitions is to be reorganised into a set of new partitions.
+ The reorganised partitions are in this case in the temp_partitions
+ list. We copy all of them in one batch and thus we only do this
+ until we find the first partition with state PART_TO_BE_ADDED
+ since this is where the new partitions go in and where the old
+ ones used to be.
+ */
+ first= FALSE;
+ memcpy((void*)m_reorged_file, &m_file[i*no_subparts],
+ sizeof(handler*)*m_reorged_parts*no_subparts);
+ }
+ } while (++i < no_parts);
+ }
+ /*
+ Step 4:
+ Fill new_array_file with handler references. Create the handlers if
+ needed.
+ */
+ i= 0;
+ part_count= 0;
+ part_it.rewind();
do
{
partition_element *part_elem= part_it++;
- if (part_elem->part_state == PART_IS_DROPPED)
+ if (part_elem->part_state == PART_NORMAL)
+ {
+ memcpy((void*)&new_file_array[part_count], (void*)&m_file[i],
+ sizeof(handler*)*no_subparts);
+ part_count+= no_subparts;
+ }
+ else if (part_elem->part_state == PART_CHANGED ||
+ part_elem->part_state == PART_TO_BE_ADDED)
+ {
+ uint j= 0;
+ do
+ {
+ if (!(new_file_array[part_count++]= get_new_handler(table->s,
+ thd->mem_root,
+ part_elem->engine_type)))
+ {
+ mem_alloc_error(sizeof(handler));
+ DBUG_RETURN(TRUE);
+ }
+ } while (++j < no_subparts);
+ }
+ } while (++i < no_parts);
+
+ /*
+ Step 5:
+ Create the new partitions and also open, lock and call external_lock
+ on them to prepare them for copy phase and also for later close
+ calls
+ */
+ i= 0;
+ part_count= 0;
+ part_it.rewind();
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_TO_BE_ADDED ||
+ part_elem->part_state == PART_CHANGED)
{
/*
- This part is to be dropped, meaning the part or all its subparts.
+ A new partition needs to be created PART_TO_BE_ADDED means an
+ entirely new partition and PART_CHANGED means a changed partition
+ that will still exist with either more or less data in it.
*/
+ uint name_variant= NORMAL_PART_NAME;
+ if (part_elem->part_state == PART_CHANGED ||
+ (part_elem->part_state == PART_TO_BE_ADDED && temp_partitions))
+ name_variant= TEMP_PART_NAME;
if (is_sub_partitioned(m_part_info))
{
List_iterator<partition_element> sub_it(part_elem->subpartitions);
@@ -436,44 +1462,202 @@ int ha_partition::drop_partitions(const char *path)
partition_element *sub_elem= sub_it++;
create_subpartition_name(part_name_buff, path,
part_elem->partition_name,
- sub_elem->partition_name);
+ sub_elem->partition_name,
+ name_variant);
part= i * no_subparts + j;
- DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
- error= m_file[part]->delete_table((const char *) part_name_buff);
+ DBUG_PRINT("info", ("Add subpartition %s", part_name_buff));
+ if ((error= prepare_new_partition(table, create_info,
+ new_file_array[part],
+ (const char *)part_name_buff)))
+ {
+ cleanup_new_partition(part_count);
+ DBUG_RETURN(TRUE);
+ }
+ m_added_file[part_count++]= new_file_array[part];
} while (++j < no_subparts);
}
else
{
create_partition_name(part_name_buff, path,
- part_elem->partition_name);
- DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
- error= m_file[i]->delete_table((const char *) part_name_buff);
+ part_elem->partition_name, name_variant,
+ TRUE);
+ DBUG_PRINT("info", ("Add partition %s", part_name_buff));
+ if ((error= prepare_new_partition(table, create_info,
+ new_file_array[i],
+ (const char *)part_name_buff)))
+ {
+ cleanup_new_partition(part_count);
+ DBUG_RETURN(TRUE);
+ }
+ m_added_file[part_count++]= new_file_array[i];
}
}
} while (++i < no_parts);
- DBUG_RETURN(error);
+
+ /*
+ Step 6:
+ State update to prepare for next write of the frm file.
+ */
+ i= 0;
+ part_it.rewind();
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_TO_BE_ADDED)
+ part_elem->part_state= PART_IS_ADDED;
+ else if (part_elem->part_state == PART_CHANGED)
+ part_elem->part_state= PART_IS_CHANGED;
+ else if (part_elem->part_state == PART_REORGED_DROPPED)
+ part_elem->part_state= PART_TO_BE_DROPPED;
+ } while (++i < no_parts);
+ for (i= 0; i < temp_partitions; i++)
+ {
+ partition_element *part_elem= t_it++;
+ DBUG_ASSERT(part_elem->part_state == PART_TO_BE_REORGED);
+ part_elem->part_state= PART_TO_BE_DROPPED;
+ }
+ m_new_file= new_file_array;
+ DBUG_RETURN(copy_partitions(copied, deleted));
}
+
+/*
+ Copy partitions as part of ALTER TABLE of partitions
+
+ SYNOPSIS
+ copy_partitions()
+ out:copied Number of records copied
+ out:deleted Number of records deleted
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ change_partitions has done all the preparations, now it is time to
+ actually copy the data from the reorganised partitions to the new
+ partitions.
+*/
+
+int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
+{
+ uint reorg_part= 0;
+ int result= 0;
+ DBUG_ENTER("ha_partition::copy_partitions");
+
+ while (reorg_part < m_reorged_parts)
+ {
+ handler *file= m_reorged_file[reorg_part];
+ uint32 new_part;
+
+ late_extra_cache(reorg_part);
+ if ((result= file->ha_rnd_init(1)))
+ goto error;
+ while (TRUE)
+ {
+ if ((result= file->rnd_next(m_rec0)))
+ {
+ if (result == HA_ERR_RECORD_DELETED)
+ continue; //Probably MyISAM
+ if (result != HA_ERR_END_OF_FILE)
+ goto error;
+ /*
+ End-of-file reached, break out to continue with next partition or
+ end the copy process.
+ */
+ break;
+ }
+ /* Found record to insert into new handler */
+ if (m_part_info->get_partition_id(m_part_info, &new_part))
+ {
+ /*
+ This record is in the original table but will not be in the new
+ table since it doesn't fit into any partition any longer due to
+ changed partitioning ranges or list values.
+ */
+ deleted++;
+ }
+ else
+ {
+ /* Copy record to new handler */
+ copied++;
+ if ((result= m_new_file[new_part]->write_row(m_rec0)))
+ goto error;
+ }
+ }
+ late_extra_no_cache(reorg_part);
+ file->rnd_end();
+ reorg_part++;
+ }
+ DBUG_RETURN(FALSE);
+error:
+ print_error(result, MYF(0));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Update create info as part of ALTER TABLE
+
+ SYNOPSIS
+ update_create_info()
+ create_info Create info from ALTER TABLE
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Method empty so far
+*/
+
void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
{
return;
}
+/*
+ Change comments specific to handler
+
+ SYNOPSIS
+ update_table_comment()
+ comment Original comment
+
+ RETURN VALUE
+ new comment
+
+ DESCRIPTION
+ No comment changes so far
+*/
+
char *ha_partition::update_table_comment(const char *comment)
{
- return (char*) comment; // Nothing to change
+ return (char*) comment; /* Nothing to change */
}
/*
- Common routine to handle delete_table and rename_table.
- The routine uses the partition handler file to get the
- names of the partition instances. Both these routines
- are called after creating the handler without table
- object and thus the file is needed to discover the
- names of the partitions and the underlying storage engines.
+ Handle delete, rename and create table
+
+ SYNOPSIS
+ del_ren_cre_table()
+ from Full path of old table
+ to Full path of new table
+ table_arg Table object
+ create_info Create info
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Common routine to handle delete_table and rename_table.
+ The routine uses the partition handler file to get the
+ names of the partition instances. Both these routines
+ are called after creating the handler without table
+ object and thus the file is needed to discover the
+ names of the partitions and the underlying storage engines.
*/
uint ha_partition::del_ren_cre_table(const char *from,
@@ -481,7 +1665,8 @@ uint ha_partition::del_ren_cre_table(const char *from,
TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
- int save_error= 0, error;
+ int save_error= 0;
+ int error;
char from_buff[FN_REFLEN], to_buff[FN_REFLEN];
char *name_buffer_ptr;
uint i;
@@ -496,10 +1681,12 @@ uint ha_partition::del_ren_cre_table(const char *from,
i= 0;
do
{
- create_partition_name(from_buff, from, name_buffer_ptr);
+ create_partition_name(from_buff, from, name_buffer_ptr, NORMAL_PART_NAME,
+ FALSE);
if (to != NULL)
{ // Rename branch
- create_partition_name(to_buff, to, name_buffer_ptr);
+ create_partition_name(to_buff, to, name_buffer_ptr, NORMAL_PART_NAME,
+ FALSE);
error= (*file)->rename_table((const char*) from_buff,
(const char*) to_buff);
}
@@ -518,12 +1705,23 @@ uint ha_partition::del_ren_cre_table(const char *from,
DBUG_RETURN(save_error);
}
+/*
+ Find partition based on partition id
+
+ SYNOPSIS
+ find_partition_element()
+ part_id Partition id of partition looked for
+
+ RETURN VALUE
+ >0 Reference to partition_element
+ 0 Partition not found
+*/
partition_element *ha_partition::find_partition_element(uint part_id)
{
uint i;
uint curr_part_id= 0;
- List_iterator_fast < partition_element > part_it(m_part_info->partitions);
+ List_iterator_fast <partition_element> part_it(m_part_info->partitions);
for (i= 0; i < m_part_info->no_parts; i++)
{
@@ -549,18 +1747,32 @@ partition_element *ha_partition::find_partition_element(uint part_id)
}
+/*
+ Set up table share object before calling create on underlying handler
+
+ SYNOPSIS
+ set_up_table_before_create()
+ table Table object
+ info Create info
+ part_id Partition id of partition to set-up
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Set up
+ 1) Comment on partition
+ 2) MAX_ROWS, MIN_ROWS on partition
+ 3) Index file name on partition
+ 4) Data file name on partition
+*/
+
void ha_partition::set_up_table_before_create(TABLE *table,
HA_CREATE_INFO *info,
uint part_id)
{
- /*
- Set up
- 1) Comment on partition
- 2) MAX_ROWS, MIN_ROWS on partition
- 3) Index file name on partition
- 4) Data file name on partition
- */
partition_element *part_elem= find_partition_element(part_id);
+
if (!part_elem)
return; // Fatal error
table->s->max_rows= part_elem->part_max_rows;
@@ -571,53 +1783,95 @@ void ha_partition::set_up_table_before_create(TABLE *table,
/*
- Routine used to add two names with '_' in between then. Service routine
- to create_handler_file
- Include the NULL in the count of characters since it is needed as separator
- between the partition names.
+ Add two names together
+
+ SYNOPSIS
+ name_add()
+ out:dest Destination string
+ first_name First name
+ sec_name Second name
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Routine used to add two names with '_' in between then. Service routine
+ to create_handler_file
+ Include the NULL in the count of characters since it is needed as separator
+ between the partition names.
*/
static uint name_add(char *dest, const char *first_name, const char *sec_name)
{
- return (uint) (strxmov(dest, first_name, "_", sec_name, NullS) -dest) + 1;
+ return (uint) (strxmov(dest, first_name, "#SP#", sec_name, NullS) -dest) + 1;
}
/*
- Method used to create handler file with names of partitions, their
- engine types and the number of partitions.
+ Create the special .par file
+
+ SYNOPSIS
+ create_handler_file()
+ name Full path of table name
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Method used to create handler file with names of partitions, their
+ engine types and the number of partitions.
*/
bool ha_partition::create_handler_file(const char *name)
{
partition_element *part_elem, *subpart_elem;
uint i, j, part_name_len, subpart_name_len;
- uint tot_partition_words, tot_name_len;
+ uint tot_partition_words, tot_name_len, no_parts;
+ uint tot_parts= 0;
uint tot_len_words, tot_len_byte, chksum, tot_name_words;
char *name_buffer_ptr;
uchar *file_buffer, *engine_array;
bool result= TRUE;
char file_name[FN_REFLEN];
+ char part_name[FN_REFLEN];
+ char subpart_name[FN_REFLEN];
File file;
- List_iterator_fast < partition_element > part_it(m_part_info->partitions);
+ List_iterator_fast <partition_element> part_it(m_part_info->partitions);
DBUG_ENTER("create_handler_file");
- DBUG_PRINT("info", ("table name = %s", name));
+ no_parts= m_part_info->partitions.elements;
+ DBUG_PRINT("info", ("table name = %s, no_parts = %u", name,
+ no_parts));
tot_name_len= 0;
- for (i= 0; i < m_part_info->no_parts; i++)
+ for (i= 0; i < no_parts; i++)
{
part_elem= part_it++;
- part_name_len= strlen(part_elem->partition_name);
+ if (part_elem->part_state != PART_NORMAL &&
+ part_elem->part_state != PART_IS_ADDED &&
+ part_elem->part_state != PART_IS_CHANGED)
+ continue;
+ tablename_to_filename(part_elem->partition_name, part_name,
+ FN_REFLEN);
+ part_name_len= strlen(part_name);
if (!m_is_sub_partitioned)
+ {
tot_name_len+= part_name_len + 1;
+ tot_parts++;
+ }
else
{
- List_iterator_fast<partition_element> sub_it(part_elem->subpartitions);
+ List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
for (j= 0; j < m_part_info->no_subparts; j++)
{
subpart_elem= sub_it++;
- subpart_name_len= strlen(subpart_elem->partition_name);
- tot_name_len+= part_name_len + subpart_name_len + 2;
+ tablename_to_filename(subpart_elem->partition_name,
+ subpart_name,
+ FN_REFLEN);
+ subpart_name_len= strlen(subpart_name);
+ tot_name_len+= part_name_len + subpart_name_len + 5;
+ tot_parts++;
}
}
}
@@ -634,7 +1888,7 @@ bool ha_partition::create_handler_file(const char *name)
All padding bytes are zeroed
*/
- tot_partition_words= (m_tot_parts + 3) / 4;
+ tot_partition_words= (tot_parts + 3) / 4;
tot_name_words= (tot_name_len + 3) / 4;
tot_len_words= 4 + tot_partition_words + tot_name_words;
tot_len_byte= 4 * tot_len_words;
@@ -643,25 +1897,34 @@ bool ha_partition::create_handler_file(const char *name)
engine_array= (file_buffer + 12);
name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4));
part_it.rewind();
- for (i= 0; i < m_part_info->no_parts; i++)
+ for (i= 0; i < no_parts; i++)
{
part_elem= part_it++;
+ if (part_elem->part_state != PART_NORMAL &&
+ part_elem->part_state != PART_IS_ADDED &&
+ part_elem->part_state != PART_IS_CHANGED)
+ continue;
if (!m_is_sub_partitioned)
{
- name_buffer_ptr= strmov(name_buffer_ptr, part_elem->partition_name)+1;
+ tablename_to_filename(part_elem->partition_name, part_name, FN_REFLEN);
+ name_buffer_ptr= strmov(name_buffer_ptr, part_name)+1;
*engine_array= (uchar) ha_legacy_type(part_elem->engine_type);
DBUG_PRINT("info", ("engine: %u", *engine_array));
engine_array++;
}
else
{
- List_iterator_fast<partition_element> sub_it(part_elem->subpartitions);
+ List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
for (j= 0; j < m_part_info->no_subparts; j++)
{
subpart_elem= sub_it++;
+ tablename_to_filename(part_elem->partition_name, part_name,
+ FN_REFLEN);
+ tablename_to_filename(subpart_elem->partition_name, subpart_name,
+ FN_REFLEN);
name_buffer_ptr+= name_add(name_buffer_ptr,
- part_elem->partition_name,
- subpart_elem->partition_name);
+ part_name,
+ subpart_name);
*engine_array= (uchar) ha_legacy_type(part_elem->engine_type);
engine_array++;
}
@@ -669,7 +1932,7 @@ bool ha_partition::create_handler_file(const char *name)
}
chksum= 0;
int4store(file_buffer, tot_len_words);
- int4store(file_buffer + 8, m_tot_parts);
+ int4store(file_buffer + 8, tot_parts);
int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len);
for (i= 0; i < tot_len_words; i++)
chksum^= uint4korr(file_buffer + 4 * i);
@@ -693,6 +1956,15 @@ bool ha_partition::create_handler_file(const char *name)
DBUG_RETURN(result);
}
+/*
+ Clear handler variables and free some memory
+
+ SYNOPSIS
+ clear_handler_file()
+
+ RETURN VALUE
+ NONE
+*/
void ha_partition::clear_handler_file()
{
@@ -703,6 +1975,16 @@ void ha_partition::clear_handler_file()
m_engine_array= NULL;
}
+/*
+ Create underlying handler objects
+
+ SYNOPSIS
+ create_handlers()
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+*/
bool ha_partition::create_handlers()
{
@@ -736,10 +2018,20 @@ bool ha_partition::create_handlers()
DBUG_RETURN(FALSE);
}
+/*
+ Create underlying handler objects from partition info
+
+ SYNOPSIS
+ new_handlers_from_part_info()
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+*/
bool ha_partition::new_handlers_from_part_info()
{
- uint i, j;
+ uint i, j, part_count;
partition_element *part_elem;
uint alloc_len= (m_tot_parts + 1) * sizeof(handler*);
List_iterator_fast <partition_element> part_it(m_part_info->partitions);
@@ -747,23 +2039,22 @@ bool ha_partition::new_handlers_from_part_info()
DBUG_ENTER("ha_partition::new_handlers_from_part_info");
if (!(m_file= (handler **) sql_alloc(alloc_len)))
- goto error;
+ {
+ mem_alloc_error(alloc_len);
+ goto error_end;
+ }
bzero(m_file, alloc_len);
DBUG_ASSERT(m_part_info->no_parts > 0);
i= 0;
+ part_count= 0;
/*
Don't know the size of the underlying storage engine, invent a number of
bytes allocated for error message if allocation fails
*/
- alloc_len= 128;
do
{
part_elem= part_it++;
- if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
- part_elem->engine_type)))
- goto error;
- DBUG_PRINT("info", ("engine_type: %u", (uint) ha_legacy_type(part_elem->engine_type)));
if (m_is_sub_partitioned)
{
for (j= 0; j < m_part_info->no_subparts; j++)
@@ -771,9 +2062,18 @@ bool ha_partition::new_handlers_from_part_info()
if (!(m_file[i]= get_new_handler(table_share, thd->mem_root,
part_elem->engine_type)))
goto error;
- DBUG_PRINT("info", ("engine_type: %u", (uint) ha_legacy_type(part_elem->engine_type)));
+ DBUG_PRINT("info", ("engine_type: %u",
+ (uint) ha_legacy_type(part_elem->engine_type)));
}
}
+ else
+ {
+ if (!(m_file[part_count++]= get_new_handler(table_share, thd->mem_root,
+ part_elem->engine_type)))
+ goto error;
+ DBUG_PRINT("info", ("engine_type: %u",
+ (uint) ha_legacy_type(part_elem->engine_type)));
+ }
} while (++i < m_part_info->no_parts);
if (part_elem->engine_type == &myisam_hton)
{
@@ -782,14 +2082,26 @@ bool ha_partition::new_handlers_from_part_info()
}
DBUG_RETURN(FALSE);
error:
- my_error(ER_OUTOFMEMORY, MYF(0), alloc_len);
+ mem_alloc_error(sizeof(handler));
+error_end:
DBUG_RETURN(TRUE);
}
/*
- Open handler file to get partition names, engine types and number of
- partitions.
+ Get info about partition engines and their names from the .par file
+
+ SYNOPSIS
+ get_from_handler_file()
+ name Full path of table name
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ Open handler file to get partition names, engine types and number of
+ partitions.
*/
bool ha_partition::get_from_handler_file(const char *name)
@@ -825,6 +2137,7 @@ bool ha_partition::get_from_handler_file(const char *name)
if (chksum)
goto err2;
m_tot_parts= uint4korr((file_buffer) + 8);
+ DBUG_PRINT("info", ("No of parts = %u", m_tot_parts));
tot_partition_words= (m_tot_parts + 3) / 4;
if (!(engine_array= (handlerton **) my_malloc(m_tot_parts * sizeof(handlerton*),MYF(0))))
goto err2;
@@ -854,17 +2167,31 @@ err1:
DBUG_RETURN(TRUE);
}
+
/****************************************************************************
MODULE open/close object
****************************************************************************/
/*
- Used for opening tables. The name will be the name of the file.
- A table is opened when it needs to be opened. For instance
- when a request comes in for a select on the table (tables are not
- open and closed for each request, they are cached).
+ Open handler object
- Called from handler.cc by handler::ha_open(). The server opens all tables
- by calling ha_open() which then calls the handler specific open().
+ SYNOPSIS
+ open()
+ name Full path of table name
+ mode Open mode flags
+ test_if_locked ?
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+
+ DESCRIPTION
+ Used for opening tables. The name will be the name of the file.
+ A table is opened when it needs to be opened. For instance
+ when a request comes in for a select on the table (tables are not
+ open and closed for each request, they are cached).
+
+ Called from handler.cc by handler::ha_open(). The server opens all tables
+ by calling ha_open() which then calls the handler specific open().
*/
int ha_partition::open(const char *name, int mode, uint test_if_locked)
@@ -877,6 +2204,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
DBUG_ENTER("ha_partition::open");
ref_length= 0;
+ m_mode= mode;
+ m_open_test_lock= test_if_locked;
m_part_field_array= m_part_info->full_part_field_array;
if (get_from_handler_file(name))
DBUG_RETURN(1);
@@ -912,7 +2241,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
file= m_file;
do
{
- create_partition_name(name_buff, name, name_buffer_ptr);
+ create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
+ FALSE);
if ((error= (*file)->ha_open(table, (const char*) name_buff, mode,
test_if_locked)))
goto err_handler;
@@ -934,7 +2264,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
/*
Initialise priority queue, initialised to reading forward.
*/
- if ((error= init_queue(&queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS,
+ if ((error= init_queue(&m_queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS,
0, key_rec_cmp, (void*)this)))
goto err_handler;
/*
@@ -952,28 +2282,45 @@ err_handler:
DBUG_RETURN(error);
}
+
/*
- Closes a table. We call the free_share() function to free any resources
- that we have allocated in the "shared" structure.
+ Close handler object
- Called from sql_base.cc, sql_select.cc, and table.cc.
- In sql_select.cc it is only used to close up temporary tables or during
- the process where a temporary table is converted over to being a
- myisam table.
- For sql_base.cc look at close_data_tables().
+ SYNOPSIS
+ close()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Called from sql_base.cc, sql_select.cc, and table.cc.
+ In sql_select.cc it is only used to close up temporary tables or during
+ the process where a temporary table is converted over to being a
+ myisam table.
+ For sql_base.cc look at close_data_tables().
*/
int ha_partition::close(void)
{
handler **file;
+ bool first= TRUE;
DBUG_ENTER("ha_partition::close");
- delete_queue(&queue);
+ delete_queue(&m_queue);
file= m_file;
+
+repeat:
do
{
(*file)->close();
} while (*(++file));
+ if (first && m_added_file && m_added_file[0])
+ {
+ file= m_added_file;
+ first= FALSE;
+ goto repeat;
+ }
DBUG_RETURN(0);
}
@@ -988,30 +2335,47 @@ int ha_partition::close(void)
*/
/*
- First you should go read the section "locking functions for mysql" in
- lock.cc to understand this.
- This create a lock on the table. If you are implementing a storage engine
- that can handle transactions look at ha_berkely.cc to see how you will
- want to goo about doing this. Otherwise you should consider calling
- flock() here.
- Originally this method was used to set locks on file level to enable
- several MySQL Servers to work on the same data. For transactional
- engines it has been "abused" to also mean start and end of statements
- to enable proper rollback of statements and transactions. When LOCK
- TABLES has been issued the start_stmt method takes over the role of
- indicating start of statement but in this case there is no end of
- statement indicator(?).
+ Set external locks on table
- Called from lock.cc by lock_external() and unlock_external(). Also called
- from sql_table.cc by copy_data_between_tables().
+ SYNOPSIS
+ external_lock()
+ thd Thread object
+ lock_type Type of external lock
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ First you should go read the section "locking functions for mysql" in
+ lock.cc to understand this.
+ This create a lock on the table. If you are implementing a storage engine
+ that can handle transactions look at ha_berkeley.cc to see how you will
+ want to go about doing this. Otherwise you should consider calling
+ flock() here.
+ Originally this method was used to set locks on file level to enable
+ several MySQL Servers to work on the same data. For transactional
+ engines it has been "abused" to also mean start and end of statements
+ to enable proper rollback of statements and transactions. When LOCK
+ TABLES has been issued the start_stmt method takes over the role of
+ indicating start of statement but in this case there is no end of
+ statement indicator(?).
+
+ Called from lock.cc by lock_external() and unlock_external(). Also called
+ from sql_table.cc by copy_data_between_tables().
*/
int ha_partition::external_lock(THD *thd, int lock_type)
{
uint error;
handler **file;
+ bool first= TRUE;
DBUG_ENTER("ha_partition::external_lock");
+
file= m_file;
+ m_lock_type= lock_type;
+
+repeat:
do
{
if ((error= (*file)->external_lock(thd, lock_type)))
@@ -1020,7 +2384,13 @@ int ha_partition::external_lock(THD *thd, int lock_type)
goto err_handler;
}
} while (*(++file));
- m_lock_type= lock_type; // For the future (2009?)
+ if (first && m_added_file && m_added_file[0])
+ {
+ DBUG_ASSERT(lock_type == F_UNLCK);
+ file= m_added_file;
+ first= FALSE;
+ goto repeat;
+ }
DBUG_RETURN(0);
err_handler:
@@ -1031,36 +2401,49 @@ err_handler:
/*
- The idea with handler::store_lock() is the following:
-
- The statement decided which locks we should need for the table
- for updates/deletes/inserts we get WRITE locks, for SELECT... we get
- read locks.
-
- Before adding the lock into the table lock handler (see thr_lock.c)
- mysqld calls store lock with the requested locks. Store lock can now
- modify a write lock to a read lock (or some other lock), ignore the
- lock (if we don't want to use MySQL table locks at all) or add locks
- for many tables (like we do when we are using a MERGE handler).
-
- Berkeley DB for partition changes all WRITE locks to TL_WRITE_ALLOW_WRITE
- (which signals that we are doing WRITES, but we are still allowing other
- reader's and writer's.
-
- When releasing locks, store_lock() are also called. In this case one
- usually doesn't have to do anything.
-
- store_lock is called when holding a global mutex to ensure that only
- one thread at a time changes the locking information of tables.
-
- In some exceptional cases MySQL may send a request for a TL_IGNORE;
- This means that we are requesting the same lock as last time and this
- should also be ignored. (This may happen when someone does a flush
- table when we have opened a part of the tables, in which case mysqld
- closes and reopens the tables and tries to get the same locks at last
- time). In the future we will probably try to remove this.
+ Get the lock(s) for the table and perform conversion of locks if needed
- Called from lock.cc by get_lock_data().
+ SYNOPSIS
+ store_lock()
+ thd Thread object
+ to Lock object array
+ lock_type Table lock type
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ The idea with handler::store_lock() is the following:
+
+ The statement decided which locks we should need for the table
+ for updates/deletes/inserts we get WRITE locks, for SELECT... we get
+ read locks.
+
+ Before adding the lock into the table lock handler (see thr_lock.c)
+ mysqld calls store lock with the requested locks. Store lock can now
+ modify a write lock to a read lock (or some other lock), ignore the
+ lock (if we don't want to use MySQL table locks at all) or add locks
+ for many tables (like we do when we are using a MERGE handler).
+
+ Berkeley DB for partition changes all WRITE locks to TL_WRITE_ALLOW_WRITE
+ (which signals that we are doing WRITES, but we are still allowing other
+ reader's and writer's.
+
+ When releasing locks, store_lock() is also called. In this case one
+ usually doesn't have to do anything.
+
+ store_lock is called when holding a global mutex to ensure that only
+ one thread at a time changes the locking information of tables.
+
+ In some exceptional cases MySQL may send a request for a TL_IGNORE;
+ This means that we are requesting the same lock as last time and this
+ should also be ignored. (This may happen when someone does a flush
+ table when we have opened a part of the tables, in which case mysqld
+ closes and reopens the tables and tries to get the same locks as last
+ time). In the future we will probably try to remove this.
+
+ Called from lock.cc by get_lock_data().
*/
THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
@@ -1069,6 +2452,7 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
{
handler **file;
DBUG_ENTER("ha_partition::store_lock");
+
file= m_file;
do
{
@@ -1077,12 +2461,29 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
DBUG_RETURN(to);
}
+/*
+ Start a statement when table is locked
+
+ SYNOPSIS
+ start_stmt()
+ thd Thread object
+ lock_type Type of external lock
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This method is called instead of external lock when the table is locked
+ before the statement is executed.
+*/
int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
{
int error= 0;
handler **file;
DBUG_ENTER("ha_partition::start_stmt");
+
file= m_file;
do
{
@@ -1094,22 +2495,41 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
/*
- Returns the number of store locks needed in call to store lock.
- We return number of partitions since we call store_lock on each
- underlying handler. Assists the above functions in allocating
- sufficient space for lock structures.
+ Get number of lock objects returned in store_lock
+
+ SYNOPSIS
+ lock_count()
+
+ RETURN VALUE
+ Number of locks returned in call to store_lock
+
+ DESCRIPTION
+ Returns the number of store locks needed in call to store lock.
+ We return number of partitions since we call store_lock on each
+ underlying handler. Assists the above functions in allocating
+ sufficient space for lock structures.
*/
uint ha_partition::lock_count() const
{
DBUG_ENTER("ha_partition::lock_count");
+
DBUG_RETURN(m_no_locks);
}
/*
- Record currently processed was not in the result set of the statement
- and is thus unlocked. Used for UPDATE and DELETE queries.
+ Unlock last accessed row
+
+ SYNOPSIS
+ unlock_row()
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Record currently processed was not in the result set of the statement
+ and is thus unlocked. Used for UPDATE and DELETE queries.
*/
void ha_partition::unlock_row()
@@ -1124,37 +2544,49 @@ void ha_partition::unlock_row()
****************************************************************************/
/*
- write_row() inserts a row. buf() is a byte array of data, normally record[0].
+ Insert a row to the table
- You can use the field information to extract the data from the native byte
- array type.
+ SYNOPSIS
+ write_row()
+ buf The row in MySQL Row Format
- Example of this would be:
- for (Field **field=table->field ; *field ; field++)
- {
- ...
- }
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ write_row() inserts a row. buf() is a byte array of data, normally
+ record[0].
+
+ You can use the field information to extract the data from the native byte
+ array type.
+
+ Example of this would be:
+ for (Field **field=table->field ; *field ; field++)
+ {
+ ...
+ }
- See ha_tina.cc for an partition of extracting all of the data as strings.
- ha_berekly.cc has an partition of how to store it intact by "packing" it
- for ha_berkeley's own native storage type.
+ See ha_tina.cc for a variant of extracting all of the data as strings.
+ ha_berkeley.cc has a variant of how to store it intact by "packing" it
+ for ha_berkeley's own native storage type.
- See the note for update_row() on auto_increments and timestamps. This
- case also applied to write_row().
+ See the note for update_row() on auto_increments and timestamps. This
+ case also applied to write_row().
- Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
- sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
+ Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc,
+ sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc.
- ADDITIONAL INFO:
+ ADDITIONAL INFO:
- Most handlers set timestamp when calling write row if any such fields
- exists. Since we are calling an underlying handler we assume the
- underlying handler will assume this responsibility.
+ Most handlers set timestamp when calling write row if any such fields
+ exists. Since we are calling an underlying handler we assume the´
+ underlying handler will assume this responsibility.
- Underlying handlers will also call update_auto_increment to calculate
- the new auto increment value. We will catch the call to
- get_auto_increment and ensure this increment value is maintained by
- only one of the underlying handlers.
+ Underlying handlers will also call update_auto_increment to calculate
+ the new auto increment value. We will catch the call to
+ get_auto_increment and ensure this increment value is maintained by
+ only one of the underlying handlers.
*/
int ha_partition::write_row(byte * buf)
@@ -1180,7 +2612,7 @@ int ha_partition::write_row(byte * buf)
}
#endif
if (unlikely(error))
- DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+ DBUG_RETURN(error);
m_last_part= part_id;
DBUG_PRINT("info", ("Insert in partition %d", part_id));
DBUG_RETURN(m_file[part_id]->write_row(buf));
@@ -1188,23 +2620,34 @@ int ha_partition::write_row(byte * buf)
/*
- Yes, update_row() does what you expect, it updates a row. old_data will
- have the previous row record in it, while new_data will have the newest
- data in it.
- Keep in mind that the server can do updates based on ordering if an
- ORDER BY clause was used. Consecutive ordering is not guarenteed.
-
- Currently new_data will not have an updated auto_increament record, or
- and updated timestamp field. You can do these for partition by doing these:
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
- if (table->next_number_field && record == table->record[0])
- update_auto_increment();
-
- Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
- new_data is always record[0]
- old_data is normally record[1] but may be anything
+ Update an existing row
+ SYNOPSIS
+ update_row()
+ old_data Old record in MySQL Row Format
+ new_data New record in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Yes, update_row() does what you expect, it updates a row. old_data will
+ have the previous row record in it, while new_data will have the newest
+ data in it.
+ Keep in mind that the server can do updates based on ordering if an
+ ORDER BY clause was used. Consecutive ordering is not guarenteed.
+
+ Currently new_data will not have an updated auto_increament record, or
+ and updated timestamp field. You can do these for partition by doing these:
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
+ if (table->next_number_field && record == table->record[0])
+ update_auto_increment();
+
+ Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
+ new_data is always record[0]
+ old_data is normally record[1] but may be anything
*/
int ha_partition::update_row(const byte *old_data, byte *new_data)
@@ -1249,21 +2692,31 @@ int ha_partition::update_row(const byte *old_data, byte *new_data)
/*
- This will delete a row. buf will contain a copy of the row to be deleted.
- The server will call this right after the current row has been read
- (from either a previous rnd_xxx() or index_xxx() call).
- If you keep a pointer to the last row or can access a primary key it will
- make doing the deletion quite a bit easier.
- Keep in mind that the server does no guarentee consecutive deletions.
- ORDER BY clauses can be used.
-
- Called in sql_acl.cc and sql_udf.cc to manage internal table information.
- Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
- it is used for removing duplicates while in insert it is used for REPLACE
- calls.
-
- buf is either record[0] or record[1]
+ Remove an existing row
+ SYNOPSIS
+ delete_row
+ buf Deleted row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error Code
+ 0 Success
+
+ DESCRIPTION
+ This will delete a row. buf will contain a copy of the row to be deleted.
+ The server will call this right after the current row has been read
+ (from either a previous rnd_xxx() or index_xxx() call).
+ If you keep a pointer to the last row or can access a primary key it will
+ make doing the deletion quite a bit easier.
+ Keep in mind that the server does no guarentee consecutive deletions.
+ ORDER BY clauses can be used.
+
+ Called in sql_acl.cc and sql_udf.cc to manage internal table information.
+ Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select
+ it is used for removing duplicates while in insert it is used for REPLACE
+ calls.
+
+ buf is either record[0] or record[1]
*/
int ha_partition::delete_row(const byte *buf)
@@ -1282,15 +2735,25 @@ int ha_partition::delete_row(const byte *buf)
/*
- Used to delete all rows in a table. Both for cases of truncate and
- for cases where the optimizer realizes that all rows will be
- removed as a result of a SQL statement.
+ Delete all rows in a table
- Called from item_sum.cc by Item_func_group_concat::clear(),
- Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
- Called from sql_delete.cc by mysql_delete().
- Called from sql_select.cc by JOIN::reinit().
- Called from sql_union.cc by st_select_lex_unit::exec().
+ SYNOPSIS
+ delete_all_rows()
+
+ RETURN VALUE
+ >0 Error Code
+ 0 Success
+
+ DESCRIPTION
+ Used to delete all rows in a table. Both for cases of truncate and
+ for cases where the optimizer realizes that all rows will be
+ removed as a result of a SQL statement.
+
+ Called from item_sum.cc by Item_func_group_concat::clear(),
+ Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
+ Called from sql_delete.cc by mysql_delete().
+ Called from sql_select.cc by JOIN::reinit().
+ Called from sql_union.cc by st_select_lex_unit::exec().
*/
int ha_partition::delete_all_rows()
@@ -1298,6 +2761,7 @@ int ha_partition::delete_all_rows()
int error;
handler **file;
DBUG_ENTER("ha_partition::delete_all_rows");
+
file= m_file;
do
{
@@ -1307,14 +2771,26 @@ int ha_partition::delete_all_rows()
DBUG_RETURN(0);
}
+
/*
- rows == 0 means we will probably insert many rows
+ Start a large batch of insert rows
+
+ SYNOPSIS
+ start_bulk_insert()
+ rows Number of rows to insert
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ rows == 0 means we will probably insert many rows
*/
void ha_partition::start_bulk_insert(ha_rows rows)
{
handler **file;
DBUG_ENTER("ha_partition::start_bulk_insert");
+
if (!rows)
{
/* Avoid allocation big caches in all underlaying handlers */
@@ -1330,6 +2806,17 @@ void ha_partition::start_bulk_insert(ha_rows rows)
}
+/*
+ Finish a large batch of insert rows
+
+ SYNOPSIS
+ end_bulk_insert()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
int ha_partition::end_bulk_insert()
{
int error= 0;
@@ -1347,6 +2834,7 @@ int ha_partition::end_bulk_insert()
DBUG_RETURN(error);
}
+
/****************************************************************************
MODULE full table scan
****************************************************************************/
@@ -1358,18 +2846,22 @@ int ha_partition::end_bulk_insert()
scan 0 Initialize for random reads through rnd_pos()
1 Initialize for random scan through rnd_next()
- NOTES
- rnd_init() is called when the server wants the storage engine to do a
- table scan or when the server wants to access data through rnd_pos.
+ RETURN VALUE
+ >0 Error code
+ 0 Success
- When scan is used we will scan one handler partition at a time.
- When preparing for rnd_pos we will init all handler partitions.
- No extra cache handling is needed when scannning is not performed.
+ DESCRIPTION
+ rnd_init() is called when the server wants the storage engine to do a
+ table scan or when the server wants to access data through rnd_pos.
- Before initialising we will call rnd_end to ensure that we clean up from
- any previous incarnation of a table scan.
- Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
- sql_table.cc, and sql_update.cc.
+ When scan is used we will scan one handler partition at a time.
+ When preparing for rnd_pos we will init all handler partitions.
+ No extra cache handling is needed when scannning is not performed.
+
+ Before initialising we will call rnd_end to ensure that we clean up from
+ any previous incarnation of a table scan.
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
*/
int ha_partition::rnd_init(bool scan)
@@ -1423,10 +2915,22 @@ err:
}
+/*
+ End of a table scan
+
+ SYNOPSIS
+ rnd_end()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
int ha_partition::rnd_end()
{
handler **file;
DBUG_ENTER("ha_partition::rnd_end");
+
switch (m_scan_value) {
case 2: // Error
break;
@@ -1458,18 +2962,22 @@ int ha_partition::rnd_end()
rnd_next()
buf buffer that should be filled with data
- This is called for each row of the table scan. When you run out of records
- you should return HA_ERR_END_OF_FILE.
- The Field structure for the table is the key to getting data into buf
- in a manner that will allow the server to understand it.
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This is called for each row of the table scan. When you run out of records
+ you should return HA_ERR_END_OF_FILE.
+ The Field structure for the table is the key to getting data into buf
+ in a manner that will allow the server to understand it.
- Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
- sql_table.cc, and sql_update.cc.
+ Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc,
+ sql_table.cc, and sql_update.cc.
*/
int ha_partition::rnd_next(byte *buf)
{
- DBUG_ASSERT(m_scan_value);
uint part_id= m_part_spec.start_part; // Cache of this variable
handler *file= m_file[part_id];
int result= HA_ERR_END_OF_FILE;
@@ -1528,37 +3036,38 @@ end:
}
-inline void store_part_id_in_pos(byte *pos, uint part_id)
-{
- int2store(pos, part_id);
-}
+/*
+ Save position of current row
-inline uint get_part_id_from_pos(const byte *pos)
-{
- return uint2korr(pos);
-}
+ SYNOPSIS
+ position()
+ record Current record in MySQL Row Format
-/*
- position() is called after each call to rnd_next() if the data needs
- to be ordered. You can do something like the following to store
- the position:
- ha_store_ptr(ref, ref_length, current_position);
+ RETURN VALUE
+ NONE
- The server uses ref to store data. ref_length in the above case is
- the size needed to store current_position. ref is just a byte array
- that the server will maintain. If you are using offsets to mark rows, then
- current_position should be the offset. If it is a primary key like in
- BDB, then it needs to be a primary key.
+ DESCRIPTION
+ position() is called after each call to rnd_next() if the data needs
+ to be ordered. You can do something like the following to store
+ the position:
+ ha_store_ptr(ref, ref_length, current_position);
- Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
+ The server uses ref to store data. ref_length in the above case is
+ the size needed to store current_position. ref is just a byte array
+ that the server will maintain. If you are using offsets to mark rows, then
+ current_position should be the offset. If it is a primary key like in
+ BDB, then it needs to be a primary key.
+
+ Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc.
*/
void ha_partition::position(const byte *record)
{
handler *file= m_file[m_last_part];
DBUG_ENTER("ha_partition::position");
+
file->position(record);
- store_part_id_in_pos(ref, m_last_part);
+ int2store(ref, m_last_part);
memcpy((ref + PARTITION_BYTES_IN_POS), file->ref,
(ref_length - PARTITION_BYTES_IN_POS));
@@ -1571,12 +3080,24 @@ void ha_partition::position(const byte *record)
}
/*
- This is like rnd_next, but you are given a position to use
- to determine the row. The position will be of the type that you stored in
- ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
- or position you saved when position() was called.
- Called from filesort.cc records.cc sql_insert.cc sql_select.cc
- sql_update.cc.
+ Read row using position
+
+ SYNOPSIS
+ rnd_pos()
+ out:buf Row read in MySQL Row Format
+ position Position of read row
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This is like rnd_next, but you are given a position to use
+ to determine the row. The position will be of the type that you stored in
+ ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key
+ or position you saved when position() was called.
+ Called from filesort.cc records.cc sql_insert.cc sql_select.cc
+ sql_update.cc.
*/
int ha_partition::rnd_pos(byte * buf, byte *pos)
@@ -1585,7 +3106,7 @@ int ha_partition::rnd_pos(byte * buf, byte *pos)
handler *file;
DBUG_ENTER("ha_partition::rnd_pos");
- part_id= get_part_id_from_pos((const byte *) pos);
+ part_id= uint2korr((const byte *) pos);
DBUG_ASSERT(part_id < m_tot_parts);
file= m_file[part_id];
m_last_part= part_id;
@@ -1613,8 +3134,20 @@ int ha_partition::rnd_pos(byte * buf, byte *pos)
*/
/*
- index_init is always called before starting index scans (except when
- starting through index_read_idx and using read_range variants).
+ Initialise handler before start of index scan
+
+ SYNOPSIS
+ index_init()
+ inx Index number
+ sorted Is rows to be returned in sorted order
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_init is always called before starting index scans (except when
+ starting through index_read_idx and using read_range variants).
*/
int ha_partition::index_init(uint inx, bool sorted)
@@ -1645,8 +3178,18 @@ int ha_partition::index_init(uint inx, bool sorted)
/*
- index_end is called at the end of an index scan to clean up any
- things needed to clean up.
+ End of index scan
+
+ SYNOPSIS
+ index_end()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_end is called at the end of an index scan to clean up any
+ things needed to clean up.
*/
int ha_partition::index_end()
@@ -1671,25 +3214,49 @@ int ha_partition::index_end()
/*
- index_read starts a new index scan using a start key. The MySQL Server
- will check the end key on its own. Thus to function properly the
- partitioned handler need to ensure that it delivers records in the sort
- order of the MySQL Server.
- index_read can be restarted without calling index_end on the previous
- index scan and without calling index_init. In this case the index_read
- is on the same index as the previous index_scan. This is particularly
- used in conjuntion with multi read ranges.
+ Read one record in an index scan and start an index scan
+
+ SYNOPSIS
+ index_read()
+ buf Read row in MySQL Row Format
+ key Key parts in consecutive order
+ key_len Total length of key parts
+ find_flag What type of key condition is used
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_read starts a new index scan using a start key. The MySQL Server
+ will check the end key on its own. Thus to function properly the
+ partitioned handler need to ensure that it delivers records in the sort
+ order of the MySQL Server.
+ index_read can be restarted without calling index_end on the previous
+ index scan and without calling index_init. In this case the index_read
+ is on the same index as the previous index_scan. This is particularly
+ used in conjuntion with multi read ranges.
*/
int ha_partition::index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag)
{
DBUG_ENTER("ha_partition::index_read");
+
end_range= 0;
DBUG_RETURN(common_index_read(buf, key, key_len, find_flag));
}
+/*
+ Common routine for a number of index_read variants
+
+ SYNOPSIS
+ common_index_read
+
+ see index_read for rest
+*/
+
int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len,
enum ha_rkey_function find_flag)
{
@@ -1738,18 +3305,30 @@ int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len,
/*
- index_first() asks for the first key in the index.
- This is similar to index_read except that there is no start key since
- the scan starts from the leftmost entry and proceeds forward with
- index_next.
+ Start an index scan from leftmost record and return first record
+
+ SYNOPSIS
+ index_first()
+ buf Read row in MySQL Row Format
- Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
- and sql_select.cc.
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_first() asks for the first key in the index.
+ This is similar to index_read except that there is no start key since
+ the scan starts from the leftmost entry and proceeds forward with
+ index_next.
+
+ Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+ and sql_select.cc.
*/
int ha_partition::index_first(byte * buf)
{
DBUG_ENTER("ha_partition::index_first");
+
end_range= 0;
m_index_scan_type= partition_index_first;
DBUG_RETURN(common_first_last(buf));
@@ -1757,25 +3336,47 @@ int ha_partition::index_first(byte * buf)
/*
- index_last() asks for the last key in the index.
- This is similar to index_read except that there is no start key since
- the scan starts from the rightmost entry and proceeds forward with
- index_prev.
+ Start an index scan from rightmost record and return first record
+
+ SYNOPSIS
+ index_last()
+ buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ index_last() asks for the last key in the index.
+ This is similar to index_read except that there is no start key since
+ the scan starts from the rightmost entry and proceeds forward with
+ index_prev.
- Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
- and sql_select.cc.
+ Called from opt_range.cc, opt_sum.cc, sql_handler.cc,
+ and sql_select.cc.
*/
int ha_partition::index_last(byte * buf)
{
DBUG_ENTER("ha_partition::index_last");
+
m_index_scan_type= partition_index_last;
DBUG_RETURN(common_first_last(buf));
}
+/*
+ Common routine for index_first/index_last
+
+ SYNOPSIS
+ common_index_first_last
+
+ see index_first for rest
+*/
+
int ha_partition::common_first_last(byte *buf)
{
int error;
+
if ((error= partition_scan_set_up(buf, FALSE)))
return error;
if (!m_ordered_scan_ongoing)
@@ -1783,10 +3384,18 @@ int ha_partition::common_first_last(byte *buf)
return handle_ordered_index_scan(buf);
}
+
/*
- Positions an index cursor to the index specified in key. Fetches the
- row if any. This is only used to read whole keys.
- TODO: Optimise this code to avoid index_init and index_end
+ Perform index read using index where always only one row is returned
+
+ SYNOPSIS
+ index_read_idx()
+ see index_read for rest of parameters and return values
+
+ DESCRIPTION
+ Positions an index cursor to the index specified in key. Fetches the
+ row if any. This is only used to read whole keys.
+ TODO: Optimise this code to avoid index_init and index_end
*/
int ha_partition::index_read_idx(byte * buf, uint index, const byte * key,
@@ -1795,32 +3404,60 @@ int ha_partition::index_read_idx(byte * buf, uint index, const byte * key,
{
int res;
DBUG_ENTER("ha_partition::index_read_idx");
+
index_init(index, 0);
res= index_read(buf, key, key_len, find_flag);
index_end();
DBUG_RETURN(res);
}
+
/*
- This is used in join_read_last_key to optimise away an ORDER BY.
- Can only be used on indexes supporting HA_READ_ORDER
+ Read last using key
+
+ SYNOPSIS
+ index_read_last()
+ buf Read row in MySQL Row Format
+ key Key
+ keylen Length of key
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This is used in join_read_last_key to optimise away an ORDER BY.
+ Can only be used on indexes supporting HA_READ_ORDER
*/
int ha_partition::index_read_last(byte *buf, const byte *key, uint keylen)
{
DBUG_ENTER("ha_partition::index_read_last");
+
m_ordered= TRUE; // Safety measure
DBUG_RETURN(index_read(buf, key, keylen, HA_READ_PREFIX_LAST));
}
/*
- Used to read forward through the index.
+ Read next record in a forward index scan
+
+ SYNOPSIS
+ index_next()
+ buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Used to read forward through the index.
*/
int ha_partition::index_next(byte * buf)
{
DBUG_ENTER("ha_partition::index_next");
+
/*
TODO(low priority):
If we want partition to work with the HANDLER commands, we
@@ -1836,13 +3473,27 @@ int ha_partition::index_next(byte * buf)
/*
- This routine is used to read the next but only if the key is the same
- as supplied in the call.
+ Read next record special
+
+ SYNOPSIS
+ index_next_same()
+ buf Read row in MySQL Row Format
+ key Key
+ keylen Length of key
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This routine is used to read the next but only if the key is the same
+ as supplied in the call.
*/
int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
{
DBUG_ENTER("ha_partition::index_next_same");
+
DBUG_ASSERT(keylen == m_start_key.length);
DBUG_ASSERT(m_index_scan_type != partition_index_last);
if (!m_ordered_scan_ongoing)
@@ -1850,13 +3501,26 @@ int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen)
DBUG_RETURN(handle_ordered_next(buf, TRUE));
}
+
/*
- Used to read backwards through the index.
+ Read next record when performing index scan backwards
+
+ SYNOPSIS
+ index_prev()
+ buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ Used to read backwards through the index.
*/
int ha_partition::index_prev(byte * buf)
{
DBUG_ENTER("ha_partition::index_prev");
+
/* TODO: read comment in index_next */
DBUG_ASSERT(m_index_scan_type != partition_index_first);
DBUG_RETURN(handle_ordered_prev(buf));
@@ -1864,10 +3528,24 @@ int ha_partition::index_prev(byte * buf)
/*
- We reimplement read_range_first since we don't want the compare_key
- check at the end. This is already performed in the partition handler.
- read_range_next is very much different due to that we need to scan
- all underlying handlers.
+ Start a read of one range with start and end key
+
+ SYNOPSIS
+ read_range_first()
+ start_key Specification of start key
+ end_key Specification of end key
+ eq_range_arg Is it equal range
+ sorted Should records be returned in sorted order
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ We reimplement read_range_first since we don't want the compare_key
+ check at the end. This is already performed in the partition handler.
+ read_range_next is very much different due to that we need to scan
+ all underlying handlers.
*/
int ha_partition::read_range_first(const key_range *start_key,
@@ -1876,6 +3554,7 @@ int ha_partition::read_range_first(const key_range *start_key,
{
int error;
DBUG_ENTER("ha_partition::read_range_first");
+
m_ordered= sorted;
eq_range= eq_range_arg;
end_range= 0;
@@ -1904,9 +3583,21 @@ int ha_partition::read_range_first(const key_range *start_key,
}
+/*
+ Read next record in read of a range with start and end key
+
+ SYNOPSIS
+ read_range_next()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
int ha_partition::read_range_next()
{
DBUG_ENTER("ha_partition::read_range_next");
+
if (m_ordered)
{
DBUG_RETURN(handler::read_range_next());
@@ -1915,6 +3606,22 @@ int ha_partition::read_range_next()
}
+/*
+ Common routine to set up scans
+
+ SYNOPSIS
+ buf Buffer to later return record in
+ idx_read_flag Is it index scan
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This is where we check which partitions to actually scan if not all
+ of them
+*/
+
int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
{
DBUG_ENTER("ha_partition::partition_scan_set_up");
@@ -1959,16 +3666,29 @@ int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag)
Unordered Index Scan Routines
****************************************************************************/
/*
- These routines are used to scan partitions without considering order.
- This is performed in two situations.
- 1) In read_multi_range this is the normal case
- 2) When performing any type of index_read, index_first, index_last where
- all fields in the partition function is bound. In this case the index
- scan is performed on only one partition and thus it isn't necessary to
- perform any sort.
+ Common routine to handle index_next with unordered results
+
+ SYNOPSIS
+ handle_unordered_next()
+ out:buf Read row in MySQL Row Format
+ next_same Called from index_next_same
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+
+ DESCRIPTION
+ These routines are used to scan partitions without considering order.
+ This is performed in two situations.
+ 1) In read_multi_range this is the normal case
+ 2) When performing any type of index_read, index_first, index_last where
+ all fields in the partition function is bound. In this case the index
+ scan is performed on only one partition and thus it isn't necessary to
+ perform any sort.
*/
-int ha_partition::handle_unordered_next(byte *buf, bool next_same)
+int ha_partition::handle_unordered_next(byte *buf, bool is_next_same)
{
handler *file= file= m_file[m_part_spec.start_part];
int error;
@@ -1978,7 +3698,7 @@ int ha_partition::handle_unordered_next(byte *buf, bool next_same)
We should consider if this should be split into two functions as
next_same is alwas a local constant
*/
- if (next_same)
+ if (is_next_same)
{
if (!(error= file->index_next_same(buf, m_start_key.key,
m_start_key.length)))
@@ -2007,8 +3727,20 @@ int ha_partition::handle_unordered_next(byte *buf, bool next_same)
/*
- This routine is used to start the index scan on the next partition.
- Both initial start and after completing scan on one partition.
+ Handle index_next when changing to new partition
+
+ SYNOPSIS
+ handle_unordered_scan_next_partition()
+ buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+
+ DESCRIPTION
+ This routine is used to start the index scan on the next partition.
+ Both initial start and after completing scan on one partition.
*/
int ha_partition::handle_unordered_scan_next_partition(byte * buf)
@@ -2056,30 +3788,43 @@ int ha_partition::handle_unordered_scan_next_partition(byte * buf)
/*
- This part contains the logic to handle index scans that require ordered
- output. This includes all except those started by read_range_first with
- the flag ordered set to FALSE. Thus most direct index_read and all
- index_first and index_last.
-
- We implement ordering by keeping one record plus a key buffer for each
- partition. Every time a new entry is requested we will fetch a new
- entry from the partition that is currently not filled with an entry.
- Then the entry is put into its proper sort position.
+ Common routine to start index scan with ordered results
- Returning a record is done by getting the top record, copying the
- record to the request buffer and setting the partition as empty on
- entries.
+ SYNOPSIS
+ handle_ordered_index_scan()
+ out:buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+
+ DESCRIPTION
+ This part contains the logic to handle index scans that require ordered
+ output. This includes all except those started by read_range_first with
+ the flag ordered set to FALSE. Thus most direct index_read and all
+ index_first and index_last.
+
+ We implement ordering by keeping one record plus a key buffer for each
+ partition. Every time a new entry is requested we will fetch a new
+ entry from the partition that is currently not filled with an entry.
+ Then the entry is put into its proper sort position.
+
+ Returning a record is done by getting the top record, copying the
+ record to the request buffer and setting the partition as empty on
+ entries.
*/
int ha_partition::handle_ordered_index_scan(byte *buf)
{
- uint i, j= 0;
+ uint i;
+ uint j= 0;
bool found= FALSE;
bool reverse_order= FALSE;
DBUG_ENTER("ha_partition::handle_ordered_index_scan");
m_top_entry= NO_CURRENT_PART_ID;
- queue_remove_all(&queue);
+ queue_remove_all(&m_queue);
for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
{
int error;
@@ -2112,7 +3857,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf)
/*
Initialise queue without order first, simply insert
*/
- queue_element(&queue, j++)= (byte*)queue_buf(i);
+ queue_element(&m_queue, j++)= (byte*)queue_buf(i);
}
else if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
{
@@ -2125,10 +3870,10 @@ int ha_partition::handle_ordered_index_scan(byte *buf)
We found at least one partition with data, now sort all entries and
after that read the first entry and copy it to the buffer to return in.
*/
- queue_set_max_at_top(&queue, reverse_order);
- queue_set_cmp_arg(&queue, (void*)m_curr_key_info);
- queue.elements= j;
- queue_fix(&queue);
+ queue_set_max_at_top(&m_queue, reverse_order);
+ queue_set_cmp_arg(&m_queue, (void*)m_curr_key_info);
+ m_queue.elements= j;
+ queue_fix(&m_queue);
return_top_record(buf);
DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
DBUG_RETURN(0);
@@ -2137,11 +3882,23 @@ int ha_partition::handle_ordered_index_scan(byte *buf)
}
+/*
+ Return the top record in sort order
+
+ SYNOPSIS
+ return_top_record()
+ out:buf Row returned in MySQL Row Format
+
+ RETURN VALUE
+ NONE
+*/
+
void ha_partition::return_top_record(byte *buf)
{
uint part_id;
- byte *key_buffer= queue_top(&queue);
+ byte *key_buffer= queue_top(&m_queue);
byte *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS;
+
part_id= uint2korr(key_buffer);
memcpy(buf, rec_buffer, m_rec_length);
m_last_part= part_id;
@@ -2149,14 +3906,28 @@ void ha_partition::return_top_record(byte *buf)
}
-int ha_partition::handle_ordered_next(byte *buf, bool next_same)
+/*
+ Common routine to handle index_next with ordered results
+
+ SYNOPSIS
+ handle_ordered_next()
+ out:buf Read row in MySQL Row Format
+ next_same Called from index_next_same
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+*/
+
+int ha_partition::handle_ordered_next(byte *buf, bool is_next_same)
{
int error;
uint part_id= m_top_entry;
handler *file= m_file[part_id];
DBUG_ENTER("ha_partition::handle_ordered_next");
- if (!next_same)
+ if (!is_next_same)
error= file->index_next(rec_buf(part_id));
else
error= file->index_next_same(rec_buf(part_id), m_start_key.key,
@@ -2166,8 +3937,8 @@ int ha_partition::handle_ordered_next(byte *buf, bool next_same)
if (error == HA_ERR_END_OF_FILE)
{
/* Return next buffered row */
- queue_remove(&queue, (uint) 0);
- if (queue.elements)
+ queue_remove(&m_queue, (uint) 0);
+ if (m_queue.elements)
{
DBUG_PRINT("info", ("Record returned from partition %u (2)",
m_top_entry));
@@ -2177,25 +3948,39 @@ int ha_partition::handle_ordered_next(byte *buf, bool next_same)
}
DBUG_RETURN(error);
}
- queue_replaced(&queue);
+ queue_replaced(&m_queue);
return_top_record(buf);
DBUG_PRINT("info", ("Record returned from partition %u", m_top_entry));
DBUG_RETURN(0);
}
+/*
+ Common routine to handle index_prev with ordered results
+
+ SYNOPSIS
+ handle_ordered_prev()
+ out:buf Read row in MySQL Row Format
+
+ RETURN VALUE
+ HA_ERR_END_OF_FILE End of scan
+ 0 Success
+ other Error code
+*/
+
int ha_partition::handle_ordered_prev(byte *buf)
{
int error;
uint part_id= m_top_entry;
handler *file= m_file[part_id];
DBUG_ENTER("ha_partition::handle_ordered_prev");
+
if ((error= file->index_prev(rec_buf(part_id))))
{
if (error == HA_ERR_END_OF_FILE)
{
- queue_remove(&queue, (uint) 0);
- if (queue.elements)
+ queue_remove(&m_queue, (uint) 0);
+ if (m_queue.elements)
{
return_top_record(buf);
DBUG_PRINT("info", ("Record returned from partition %d (2)",
@@ -2205,17 +3990,34 @@ int ha_partition::handle_ordered_prev(byte *buf)
}
DBUG_RETURN(error);
}
- queue_replaced(&queue);
+ queue_replaced(&m_queue);
return_top_record(buf);
DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry));
DBUG_RETURN(0);
}
+/*
+ Set fields in partition functions in read set for underlying handlers
+
+ SYNOPSIS
+ include_partition_fields_in_used_fields()
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Some handlers only read fields as specified by the bitmap for the
+ read set. For partitioned handlers we always require that the
+ fields of the partition functions are read such that we can
+ calculate the partition id to place updated and deleted records.
+*/
+
void ha_partition::include_partition_fields_in_used_fields()
{
- DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields");
Field **ptr= m_part_field_array;
+ DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields");
+
do
{
ha_set_bit_in_read_set((*ptr)->fieldnr);
@@ -2234,57 +4036,68 @@ void ha_partition::include_partition_fields_in_used_fields()
*/
/*
- ::info() is used to return information to the optimizer.
- Currently this table handler doesn't implement most of the fields
- really needed. SHOW also makes use of this data
- Another note, if your handler doesn't proved exact record count,
- you will probably want to have the following in your code:
- if (records < 2)
- records = 2;
- The reason is that the server will optimize for cases of only a single
- record. If in a table scan you don't know the number of records
- it will probably be better to set records to two so you can return
- as many records as you need.
-
- Along with records a few more variables you may wish to set are:
- records
- deleted
- data_file_length
- index_file_length
- delete_length
- check_time
- Take a look at the public variables in handler.h for more information.
-
- Called in:
- filesort.cc
- ha_heap.cc
- item_sum.cc
- opt_sum.cc
- sql_delete.cc
- sql_delete.cc
- sql_derived.cc
- sql_select.cc
- sql_select.cc
- sql_select.cc
- sql_select.cc
- sql_select.cc
- sql_show.cc
- sql_show.cc
- sql_show.cc
- sql_show.cc
- sql_table.cc
- sql_union.cc
- sql_update.cc
-
- Some flags that are not implemented
- HA_STATUS_POS:
- This parameter is never used from the MySQL Server. It is checked in a
- place in MyISAM so could potentially be used by MyISAM specific programs.
- HA_STATUS_NO_LOCK:
- This is declared and often used. It's only used by MyISAM.
- It means that MySQL doesn't need the absolute latest statistics
- information. This may save the handler from doing internal locks while
- retrieving statistics data.
+ General method to gather info from handler
+
+ SYNOPSIS
+ info()
+ flag Specifies what info is requested
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ ::info() is used to return information to the optimizer.
+ Currently this table handler doesn't implement most of the fields
+ really needed. SHOW also makes use of this data
+ Another note, if your handler doesn't proved exact record count,
+ you will probably want to have the following in your code:
+ if (records < 2)
+ records = 2;
+ The reason is that the server will optimize for cases of only a single
+ record. If in a table scan you don't know the number of records
+ it will probably be better to set records to two so you can return
+ as many records as you need.
+
+ Along with records a few more variables you may wish to set are:
+ records
+ deleted
+ data_file_length
+ index_file_length
+ delete_length
+ check_time
+ Take a look at the public variables in handler.h for more information.
+
+ Called in:
+ filesort.cc
+ ha_heap.cc
+ item_sum.cc
+ opt_sum.cc
+ sql_delete.cc
+ sql_delete.cc
+ sql_derived.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_select.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_show.cc
+ sql_table.cc
+ sql_union.cc
+ sql_update.cc
+
+ Some flags that are not implemented
+ HA_STATUS_POS:
+ This parameter is never used from the MySQL Server. It is checked in a
+ place in MyISAM so could potentially be used by MyISAM specific
+ programs.
+ HA_STATUS_NO_LOCK:
+ This is declared and often used. It's only used by MyISAM.
+ It means that MySQL doesn't need the absolute latest statistics
+ information. This may save the handler from doing internal locks while
+ retrieving statistics data.
*/
void ha_partition::info(uint flag)
@@ -2469,6 +4282,17 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
/*
+ General function to prepare handler for certain behavior
+
+ SYNOPSIS
+ extra()
+ operation Operation type for extra call
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
extra() is called whenever the server wishes to send a hint to
the storage engine. The MyISAM engine implements the most hints.
@@ -2814,8 +4638,18 @@ int ha_partition::extra(enum ha_extra_function operation)
/*
- This will in the future be called instead of extra(HA_EXTRA_RESET) as this
- is such a common call
+ Special extra call to reset extra parameters
+
+ SYNOPSIS
+ reset()
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+
+ DESCRIPTION
+ This will in the future be called instead of extra(HA_EXTRA_RESET) as this
+ is such a common call
*/
int ha_partition::reset(void)
@@ -2823,6 +4657,7 @@ int ha_partition::reset(void)
int result= 0, tmp;
handler **file;
DBUG_ENTER("ha_partition::reset");
+
file= m_file;
if (m_part_info)
bitmap_clear_all(&m_part_info->used_partitions);
@@ -2835,15 +4670,40 @@ int ha_partition::reset(void)
}
+/*
+ Special extra method for HA_EXTRA_CACHE with cachesize as extra parameter
+
+ SYNOPSIS
+ extra_opt()
+ operation Must be HA_EXTRA_CACHE
+ cachesize Size of cache in full table scan
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
{
DBUG_ENTER("ha_partition::extra_opt()");
+
DBUG_ASSERT(HA_EXTRA_CACHE == operation);
prepare_extra_cache(cachesize);
DBUG_RETURN(0);
}
+/*
+ Call extra on handler with HA_EXTRA_CACHE and cachesize
+
+ SYNOPSIS
+ prepare_extra_cache()
+ cachesize Size of cache for full table scan
+
+ RETURN VALUE
+ NONE
+*/
+
void ha_partition::prepare_extra_cache(uint cachesize)
{
DBUG_ENTER("ha_partition::prepare_extra_cache()");
@@ -2859,11 +4719,24 @@ void ha_partition::prepare_extra_cache(uint cachesize)
}
+/*
+ Call extra on all partitions
+
+ SYNOPSIS
+ loop_extra()
+ operation extra operation type
+
+ RETURN VALUE
+ >0 Error code
+ 0 Success
+*/
+
int ha_partition::loop_extra(enum ha_extra_function operation)
{
int result= 0, tmp;
handler **file;
DBUG_ENTER("ha_partition::loop_extra()");
+
for (file= m_file; *file; file++)
{
if ((tmp= (*file)->extra(operation)))
@@ -2873,10 +4746,22 @@ int ha_partition::loop_extra(enum ha_extra_function operation)
}
+/*
+ Call extra(HA_EXTRA_CACHE) on next partition_id
+
+ SYNOPSIS
+ late_extra_cache()
+ partition_id Partition id to call extra on
+
+ RETURN VALUE
+ NONE
+*/
+
void ha_partition::late_extra_cache(uint partition_id)
{
handler *file;
DBUG_ENTER("ha_partition::late_extra_cache");
+
if (!m_extra_cache)
DBUG_VOID_RETURN;
file= m_file[partition_id];
@@ -2888,10 +4773,22 @@ void ha_partition::late_extra_cache(uint partition_id)
}
+/*
+ Call extra(HA_EXTRA_NO_CACHE) on next partition_id
+
+ SYNOPSIS
+ late_extra_no_cache()
+ partition_id Partition id to call extra on
+
+ RETURN VALUE
+ NONE
+*/
+
void ha_partition::late_extra_no_cache(uint partition_id)
{
handler *file;
DBUG_ENTER("ha_partition::late_extra_no_cache");
+
if (!m_extra_cache)
DBUG_VOID_RETURN;
file= m_file[partition_id];
@@ -2904,12 +4801,34 @@ void ha_partition::late_extra_no_cache(uint partition_id)
MODULE optimiser support
****************************************************************************/
+/*
+ Get keys to use for scanning
+
+ SYNOPSIS
+ keys_to_use_for_scanning()
+
+ RETURN VALUE
+ key_map of keys usable for scanning
+*/
+
const key_map *ha_partition::keys_to_use_for_scanning()
{
DBUG_ENTER("ha_partition::keys_to_use_for_scanning");
+
DBUG_RETURN(m_file[0]->keys_to_use_for_scanning());
}
+
+/*
+ Return time for a scan of the table
+
+ SYNOPSIS
+ scan_time()
+
+ RETURN VALUE
+ time for scan
+*/
+
double ha_partition::scan_time()
{
double scan_time= 0;
@@ -2923,28 +4842,53 @@ double ha_partition::scan_time()
/*
- This will be optimised later to include whether or not the index can
- be used with partitioning. To achieve we need to add another parameter
- that specifies how many of the index fields that are bound in the ranges.
- Possibly added as a new call to handlers.
+ Get time to read
+
+ SYNOPSIS
+ read_time()
+ index Index number used
+ ranges Number of ranges
+ rows Number of rows
+
+ RETURN VALUE
+ time for read
+
+ DESCRIPTION
+ This will be optimised later to include whether or not the index can
+ be used with partitioning. To achieve we need to add another parameter
+ that specifies how many of the index fields that are bound in the ranges.
+ Possibly added as a new call to handlers.
*/
double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
{
DBUG_ENTER("ha_partition::read_time");
+
DBUG_RETURN(m_file[0]->read_time(index, ranges, rows));
}
/*
- Given a starting key, and an ending key estimate the number of rows that
- will exist between the two. end_key may be empty which in case determine
- if start_key matches any rows.
+ Find number of records in a range
+
+ SYNOPSIS
+ records_in_range()
+ inx Index number
+ min_key Start of range
+ max_key End of range
+
+ RETURN VALUE
+ Number of rows in range
- Called from opt_range.cc by check_quick_keys().
+ DESCRIPTION
+ Given a starting key, and an ending key estimate the number of rows that
+ will exist between the two. end_key may be empty which in case determine
+ if start_key matches any rows.
- monty: MUST be called for each range and added.
- Note that MySQL will assume that if this returns 0 there is no
- matching rows for the range!
+ Called from opt_range.cc by check_quick_keys().
+
+ monty: MUST be called for each range and added.
+ Note that MySQL will assume that if this returns 0 there is no
+ matching rows for the range!
*/
ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
@@ -2963,6 +4907,16 @@ ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
}
+/*
+ Estimate upper bound of number of rows
+
+ SYNOPSIS
+ estimate_rows_upper_bound()
+
+ RETURN VALUE
+ Number of rows
+*/
+
ha_rows ha_partition::estimate_rows_upper_bound()
{
ha_rows rows, tot_rows= 0;
@@ -2981,9 +4935,48 @@ ha_rows ha_partition::estimate_rows_upper_bound()
}
+/*
+ Is it ok to switch to a new engine for this table
+
+ SYNOPSIS
+ can_switch_engine()
+
+ RETURN VALUE
+ TRUE Ok
+ FALSE Not ok
+
+ DESCRIPTION
+ Used to ensure that tables with foreign key constraints are not moved
+ to engines without foreign key support.
+*/
+
+bool ha_partition::can_switch_engines()
+{
+ handler **file;
+ DBUG_ENTER("ha_partition::can_switch_engines");
+
+ file= m_file;
+ do
+ {
+ if (!(*file)->can_switch_engines())
+ DBUG_RETURN(FALSE);
+ } while (*(++file));
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ Is table cache supported
+
+ SYNOPSIS
+ table_cache_type()
+
+*/
+
uint8 ha_partition::table_cache_type()
{
DBUG_ENTER("ha_partition::table_cache_type");
+
DBUG_RETURN(m_file[0]->table_cache_type());
}
@@ -2995,6 +4988,7 @@ uint8 ha_partition::table_cache_type()
const char *ha_partition::index_type(uint inx)
{
DBUG_ENTER("ha_partition::index_type");
+
DBUG_RETURN(m_file[0]->index_type(inx));
}
@@ -3002,8 +4996,11 @@ const char *ha_partition::index_type(uint inx)
void ha_partition::print_error(int error, myf errflag)
{
DBUG_ENTER("ha_partition::print_error");
+
/* Should probably look for my own errors first */
/* monty: needs to be called for the last used partition ! */
+ DBUG_PRINT("enter", ("error = %d", error));
+
if (error == HA_ERR_NO_PARTITION_FOUND)
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
m_part_info->part_expr->val_int());
@@ -3016,6 +5013,7 @@ void ha_partition::print_error(int error, myf errflag)
bool ha_partition::get_error_message(int error, String *buf)
{
DBUG_ENTER("ha_partition::get_error_message");
+
/* Should probably look for my own errors first */
/* monty: needs to be called for the last used partition ! */
DBUG_RETURN(m_file[0]->get_error_message(error, buf));
@@ -3040,7 +5038,8 @@ const char **ha_partition::bas_ext() const
{ return ha_partition_ext; }
-uint ha_partition::min_of_the_max_uint(uint (handler::*operator_func)(void) const) const
+uint ha_partition::min_of_the_max_uint(
+ uint (handler::*operator_func)(void) const) const
{
handler **file;
uint min_of_the_max= ((*m_file)->*operator_func)();
@@ -3088,6 +5087,7 @@ uint ha_partition::extra_rec_buf_length() const
{
handler **file;
uint max= (*m_file)->extra_rec_buf_length();
+
for (file= m_file, file++; *file; file++)
if (max < (*file)->extra_rec_buf_length())
max= (*file)->extra_rec_buf_length();
@@ -3099,6 +5099,7 @@ uint ha_partition::min_record_length(uint options) const
{
handler **file;
uint max= (*m_file)->min_record_length(options);
+
for (file= m_file, file++; *file; file++)
if (max < (*file)->min_record_length(options))
max= (*file)->min_record_length(options);
@@ -3110,10 +5111,23 @@ uint ha_partition::min_record_length(uint options) const
MODULE compare records
****************************************************************************/
/*
- We get two references and need to check if those records are the same.
- If they belong to different partitions we decide that they are not
- the same record. Otherwise we use the particular handler to decide if
- they are the same. Sort in partition id order if not equal.
+ Compare two positions
+
+ SYNOPSIS
+ cmp_ref()
+ ref1 First position
+ ref2 Second position
+
+ RETURN VALUE
+ <0 ref1 < ref2
+ 0 Equal
+ >0 ref1 > ref2
+
+ DESCRIPTION
+ We get two references and need to check if those records are the same.
+ If they belong to different partitions we decide that they are not
+ the same record. Otherwise we use the particular handler to decide if
+ they are the same. Sort in partition id order if not equal.
*/
int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
@@ -3122,9 +5136,10 @@ int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
my_ptrdiff_t diff1, diff2;
handler *file;
DBUG_ENTER("ha_partition::cmp_ref");
+
if ((ref1[0] == ref2[0]) && (ref1[1] == ref2[1]))
{
- part_id= get_part_id_from_pos(ref1);
+ part_id= uint2korr(ref1);
file= m_file[part_id];
DBUG_ASSERT(part_id < m_tot_parts);
DBUG_RETURN(file->cmp_ref((ref1 + PARTITION_BYTES_IN_POS),
@@ -3155,6 +5170,7 @@ int ha_partition::cmp_ref(const byte *ref1, const byte *ref2)
void ha_partition::restore_auto_increment()
{
DBUG_ENTER("ha_partition::restore_auto_increment");
+
DBUG_VOID_RETURN;
}
@@ -3169,6 +5185,7 @@ void ha_partition::restore_auto_increment()
ulonglong ha_partition::get_auto_increment()
{
DBUG_ENTER("ha_partition::get_auto_increment");
+
DBUG_RETURN(m_file[0]->get_auto_increment());
}
@@ -3204,6 +5221,7 @@ static int partition_init= 0;
/*
Function we use in the creation of our hash to get key.
*/
+
static byte *partition_get_key(PARTITION_SHARE *share, uint *length,
my_bool not_used __attribute__ ((unused)))
{
@@ -3218,7 +5236,6 @@ static byte *partition_get_key(PARTITION_SHARE *share, uint *length,
function.
*/
-
static PARTITION_SHARE *get_share(const char *table_name, TABLE *table)
{
PARTITION_SHARE *share;
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 03acf217419..60f6a5ca15b 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -49,10 +49,15 @@ private:
partition_no_index_scan= 3
};
/* Data for the partition handler */
+ int m_mode; // Open mode
+ uint m_open_test_lock; // Open test_if_locked
char *m_file_buffer; // Buffer with names
char *m_name_buffer_ptr; // Pointer to first partition name
handlerton **m_engine_array; // Array of types of the handlers
handler **m_file; // Array of references to handler inst.
+ handler **m_new_file; // Array of references to new handlers
+ handler **m_reorged_file; // Reorganised partitions
+ handler **m_added_file; // Added parts kept for errors
partition_info *m_part_info; // local reference to partition
byte *m_start_key_ref; // Reference of start key in current
// index scan info
@@ -60,7 +65,7 @@ private:
byte *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan
KEY *m_curr_key_info; // Current index
byte *m_rec0; // table->record[0]
- QUEUE queue; // Prio queue used by sorted read
+ QUEUE m_queue; // Prio queue used by sorted read
/*
Since the partition handler is a handler on top of other handlers, it
is necessary to keep information about what the underlying handler
@@ -71,6 +76,7 @@ private:
u_long m_table_flags;
u_long m_low_byte_first;
+ uint m_reorged_parts; // Number of reorganised parts
uint m_tot_parts; // Total number of partitions;
uint m_no_locks; // For engines like ha_blackhole, which needs no locks
uint m_last_part; // Last file that we update,write
@@ -172,21 +178,38 @@ public:
*/
virtual int delete_table(const char *from);
virtual int rename_table(const char *from, const char *to);
- virtual int create(const char *name, TABLE * form,
- HA_CREATE_INFO * create_info);
+ virtual int create(const char *name, TABLE *form,
+ HA_CREATE_INFO *create_info);
virtual int create_handler_files(const char *name);
- virtual void update_create_info(HA_CREATE_INFO * create_info);
+ virtual void update_create_info(HA_CREATE_INFO *create_info);
virtual char *update_table_comment(const char *comment);
+ virtual int change_partitions(HA_CREATE_INFO *create_info,
+ const char *path,
+ ulonglong *copied,
+ ulonglong *deleted,
+ const void *pack_frm_data,
+ uint pack_frm_len);
virtual int drop_partitions(const char *path);
+ virtual int rename_partitions(const char *path);
+ bool get_no_parts(const char *name, uint *no_parts)
+ {
+ DBUG_ENTER("ha_partition::get_no_parts");
+ *no_parts= m_tot_parts;
+ DBUG_RETURN(0);
+ }
private:
+ int copy_partitions(ulonglong *copied, ulonglong *deleted);
+ void cleanup_new_partition(uint part_count);
+ int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info,
+ handler *file, const char *part_name);
/*
delete_table, rename_table and create uses very similar logic which
is packed into this routine.
*/
uint del_ren_cre_table(const char *from,
const char *to= NULL,
- TABLE * table_arg= NULL,
- HA_CREATE_INFO * create_info= NULL);
+ TABLE *table_arg= NULL,
+ HA_CREATE_INFO *create_info= NULL);
/*
One method to create the table_name.par file containing the names of the
underlying partitions, their engine and the number of partitions.
@@ -647,30 +670,8 @@ public:
index scan module.
(NDB)
*/
- virtual ulong alter_table_flags(void) const
- {
- //return HA_ONLINE_ADD_EMPTY_PARTITION + HA_ONLINE_DROP_PARTITION;
- return HA_ONLINE_DROP_PARTITION;
- }
virtual ulong table_flags() const
{ return m_table_flags; }
- /*
- HA_CAN_PARTITION:
- Used by storage engines that can handle partitioning without this
- partition handler
- (Partition, NDB)
-
- HA_CAN_UPDATE_PARTITION_KEY:
- Set if the handler can update fields that are part of the partition
- function.
-
- HA_CAN_PARTITION_UNIQUE:
- Set if the handler can handle unique indexes where the fields of the
- unique key are not part of the fields of the partition function. Thus
- a unique key can be set on all fields.
- */
- virtual ulong partition_flags() const
- { return HA_CAN_PARTITION; }
/*
This is a bitmap of flags that says how the storage engine
@@ -834,6 +835,8 @@ public:
description of how the CREATE TABLE part to define FOREIGN KEY's is done.
free_foreign_key_create_info is used to free the memory area that provided
this description.
+ can_switch_engines checks if it is ok to switch to a new engine based on
+ the foreign key info in the table.
-------------------------------------------------------------------------
virtual char* get_foreign_key_create_info()
@@ -843,7 +846,7 @@ public:
List<FOREIGN_KEY_INFO> *f_key_list)
virtual uint referenced_by_foreign_key()
*/
-
+ virtual bool can_switch_engines();
/*
-------------------------------------------------------------------------
MODULE fulltext index
@@ -892,16 +895,35 @@ public:
-------------------------------------------------------------------------
MODULE admin MyISAM
-------------------------------------------------------------------------
+
+ -------------------------------------------------------------------------
+ OPTIMIZE TABLE, CHECK TABLE, ANALYZE TABLE and REPAIR TABLE are
+ mapped to a routine that handles looping over a given set of
+ partitions and those routines send a flag indicating to execute on
+ all partitions.
+ -------------------------------------------------------------------------
+ */
+ virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
+ virtual int optimize_partitions(THD *thd);
+ virtual int analyze_partitions(THD *thd);
+ virtual int check_partitions(THD *thd);
+ virtual int repair_partitions(THD *thd);
+
+ private:
+ int handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
+ uint flags, bool all_parts);
+ public:
+ /*
+ -------------------------------------------------------------------------
Admin commands not supported currently (almost purely MyISAM routines)
This means that the following methods are not implemented:
-------------------------------------------------------------------------
- virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
virtual int backup(TD* thd, HA_CHECK_OPT *check_opt);
virtual int restore(THD* thd, HA_CHECK_OPT *check_opt);
- virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
- virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
- virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
virtual int preload_keys(THD *thd, HA_CHECK_OPT *check_opt);
virtual bool check_and_repair(THD *thd);
diff --git a/sql/handler.cc b/sql/handler.cc
index 5fd27c87ead..868dc5a09e9 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -63,7 +63,7 @@ const handlerton default_hton =
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL,
create_default,
- NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL,
HTON_NO_FLAGS
};
@@ -2160,7 +2160,8 @@ int ha_create_table(THD *thd, const char *path,
init_tmp_table_share(&share, db, 0, table_name, path);
if (open_table_def(thd, &share, 0) ||
- open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table))
+ open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table,
+ TRUE))
goto err;
if (update_create_info)
@@ -2237,7 +2238,7 @@ int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
{
DBUG_RETURN(3);
}
- if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table))
+ if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table, FALSE))
{
free_table_share(&share);
DBUG_RETURN(3);
diff --git a/sql/handler.h b/sql/handler.h
index e766797133d..9b870be4505 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -99,6 +99,7 @@
#define HA_CAN_PARTITION (1 << 0) /* Partition support */
#define HA_CAN_UPDATE_PARTITION_KEY (1 << 1)
#define HA_CAN_PARTITION_UNIQUE (1 << 2)
+#define HA_USE_AUTO_PARTITION (1 << 3)
/* bits in index_flags(index_number) for what you can do with index */
@@ -109,30 +110,58 @@
#define HA_ONLY_WHOLE_INDEX 16 /* Can't use part key searches */
#define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */
-/* bits in alter_table_flags */
-#define HA_ONLINE_ADD_EMPTY_PARTITION 0x00000001
-#define HA_ONLINE_DROP_PARTITION 0x00000002
+/*
+ bits in alter_table_flags:
+*/
/*
These bits are set if different kinds of indexes can be created
off-line without re-create of the table (but with a table lock).
*/
-#define HA_ONLINE_ADD_INDEX_NO_WRITES 0x00000004 /*add index w/lock*/
-#define HA_ONLINE_DROP_INDEX_NO_WRITES 0x00000008 /*drop index w/lock*/
-#define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES 0x00000010 /*add unique w/lock*/
-#define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES 0x00000020 /*drop uniq. w/lock*/
-#define HA_ONLINE_ADD_PK_INDEX_NO_WRITES 0x00000040 /*add prim. w/lock*/
-#define HA_ONLINE_DROP_PK_INDEX_NO_WRITES 0x00000080 /*drop prim. w/lock*/
+#define HA_ONLINE_ADD_INDEX_NO_WRITES (1L << 0) /*add index w/lock*/
+#define HA_ONLINE_DROP_INDEX_NO_WRITES (1L << 1) /*drop index w/lock*/
+#define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES (1L << 2) /*add unique w/lock*/
+#define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES (1L << 3) /*drop uniq. w/lock*/
+#define HA_ONLINE_ADD_PK_INDEX_NO_WRITES (1L << 4) /*add prim. w/lock*/
+#define HA_ONLINE_DROP_PK_INDEX_NO_WRITES (1L << 5) /*drop prim. w/lock*/
/*
These are set if different kinds of indexes can be created on-line
(without a table lock). If a handler is capable of one or more of
these, it should also set the corresponding *_NO_WRITES bit(s).
*/
-#define HA_ONLINE_ADD_INDEX 0x00000100 /*add index online*/
-#define HA_ONLINE_DROP_INDEX 0x00000200 /*drop index online*/
-#define HA_ONLINE_ADD_UNIQUE_INDEX 0x00000400 /*add unique online*/
-#define HA_ONLINE_DROP_UNIQUE_INDEX 0x00000800 /*drop uniq. online*/
-#define HA_ONLINE_ADD_PK_INDEX 0x00001000 /*add prim. online*/
-#define HA_ONLINE_DROP_PK_INDEX 0x00002000 /*drop prim. online*/
+#define HA_ONLINE_ADD_INDEX (1L << 6) /*add index online*/
+#define HA_ONLINE_DROP_INDEX (1L << 7) /*drop index online*/
+#define HA_ONLINE_ADD_UNIQUE_INDEX (1L << 8) /*add unique online*/
+#define HA_ONLINE_DROP_UNIQUE_INDEX (1L << 9) /*drop uniq. online*/
+#define HA_ONLINE_ADD_PK_INDEX (1L << 10)/*add prim. online*/
+#define HA_ONLINE_DROP_PK_INDEX (1L << 11)/*drop prim. online*/
+/*
+ HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is
+ supported at all.
+ HA_FAST_CHANGE_PARTITION means that optimised variants of the changes
+ exists but they are not necessarily done online.
+
+ HA_ONLINE_DOUBLE_WRITE means that the handler supports writing to both
+ the new partition and to the old partitions when updating through the
+ old partitioning schema while performing a change of the partitioning.
+ This means that we can support updating of the table while performing
+ the copy phase of the change. For no lock at all also a double write
+ from new to old must exist and this is not required when this flag is
+ set.
+ This is actually removed even before it was introduced the first time.
+ The new idea is that handlers will handle the lock level already in
+ store_lock for ALTER TABLE partitions.
+
+ HA_PARTITION_ONE_PHASE is a flag that can be set by handlers that take
+ care of changing the partitions online and in one phase. Thus all phases
+ needed to handle the change are implemented inside the storage engine.
+ The storage engine must also support auto-discovery since the frm file
+ is changed as part of the change and this change must be controlled by
+ the storage engine. A typical engine to support this is NDB (through
+ WL #2498).
+*/
+#define HA_PARTITION_FUNCTION_SUPPORTED (1L << 12)
+#define HA_FAST_CHANGE_PARTITION (1L << 13)
+#define HA_PARTITION_ONE_PHASE (1L << 14)
/*
Index scan will not return records in rowid order. Not guaranteed to be
@@ -140,7 +169,6 @@
*/
#define HA_KEY_SCAN_NOT_ROR 128
-
/* operations for disable/enable indexes */
#define HA_KEY_SWITCH_NONUNIQ 0
#define HA_KEY_SWITCH_ALL 1
@@ -540,6 +568,8 @@ typedef struct
int (*start_consistent_snapshot)(THD *thd);
bool (*flush_logs)();
bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat);
+ uint (*partition_flags)();
+ uint (*alter_table_flags)(uint flags);
int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info);
uint32 flags; /* global handler flags */
/*
@@ -604,10 +634,12 @@ enum partition_state {
PART_NORMAL= 0,
PART_IS_DROPPED= 1,
PART_TO_BE_DROPPED= 2,
- PART_DROPPING= 3,
- PART_IS_ADDED= 4,
- PART_ADDING= 5,
- PART_ADDED= 6
+ PART_TO_BE_ADDED= 3,
+ PART_TO_BE_REORGED= 4,
+ PART_REORGED_DROPPED= 5,
+ PART_CHANGED= 6,
+ PART_IS_CHANGED= 7,
+ PART_IS_ADDED= 8
};
typedef struct {
@@ -657,12 +689,12 @@ public:
typedef struct {
longlong list_value;
- uint partition_id;
+ uint32 partition_id;
} LIST_PART_ENTRY;
class partition_info;
-typedef bool (*get_part_id_func)(partition_info *part_info,
+typedef int (*get_part_id_func)(partition_info *part_info,
uint32 *part_id);
typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
@@ -732,6 +764,8 @@ public:
char *part_func_string;
char *subpart_func_string;
+ uchar *part_state;
+
partition_element *curr_part_elem;
partition_element *current_partition;
/*
@@ -748,12 +782,12 @@ public:
partition_type subpart_type;
uint part_info_len;
+ uint part_state_len;
uint part_func_len;
uint subpart_func_len;
uint no_parts;
uint no_subparts;
- uint count_curr_parts;
uint count_curr_subparts;
uint part_error_code;
@@ -764,14 +798,24 @@ public:
uint no_subpart_fields;
uint no_full_part_fields;
+ /*
+ This variable is used to calculate the partition id when using
+ LINEAR KEY/HASH. This functionality is kept in the MySQL Server
+ but mainly of use to handlers supporting partitioning.
+ */
uint16 linear_hash_mask;
bool use_default_partitions;
+ bool use_default_no_partitions;
bool use_default_subpartitions;
+ bool use_default_no_subpartitions;
+ bool default_partitions_setup;
bool defined_max_value;
bool list_of_part_fields;
bool list_of_subpart_fields;
bool linear_hash_ind;
+ bool fixed;
+ bool from_openfrm;
partition_info()
: get_partition_id(NULL), get_part_partition_id(NULL),
@@ -782,19 +826,27 @@ public:
list_array(NULL),
part_info_string(NULL),
part_func_string(NULL), subpart_func_string(NULL),
+ part_state(NULL),
curr_part_elem(NULL), current_partition(NULL),
default_engine_type(NULL),
part_result_type(INT_RESULT),
part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION),
- part_info_len(0), part_func_len(0), subpart_func_len(0),
+ part_info_len(0), part_state_len(0),
+ part_func_len(0), subpart_func_len(0),
no_parts(0), no_subparts(0),
- count_curr_parts(0), count_curr_subparts(0), part_error_code(0),
+ count_curr_subparts(0), part_error_code(0),
no_list_values(0), no_part_fields(0), no_subpart_fields(0),
no_full_part_fields(0), linear_hash_mask(0),
use_default_partitions(TRUE),
- use_default_subpartitions(TRUE), defined_max_value(FALSE),
+ use_default_no_partitions(TRUE),
+ use_default_subpartitions(TRUE),
+ use_default_no_subpartitions(TRUE),
+ default_partitions_setup(FALSE),
+ defined_max_value(FALSE),
list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
- linear_hash_ind(FALSE)
+ linear_hash_ind(FALSE),
+ fixed(FALSE),
+ from_openfrm(FALSE)
{
all_fields_in_PF.clear_all();
all_fields_in_PPF.clear_all();
@@ -842,6 +894,8 @@ uint get_tot_partitions(partition_info *part_info)
return part_info->no_parts *
(is_sub_partitioned(part_info) ? part_info->no_subparts : 1);
}
+
+
#endif
typedef struct st_ha_create_information
@@ -891,8 +945,8 @@ typedef struct st_ha_check_opt
#ifdef WITH_PARTITION_STORAGE_ENGINE
bool is_partition_in_list(char *part_name, List<char> list_part_names);
-bool is_partitions_in_table(partition_info *new_part_info,
- partition_info *old_part_info);
+char *are_partitions_in_table(partition_info *new_part_info,
+ partition_info *old_part_info);
bool check_reorganise_list(partition_info *new_part_info,
partition_info *old_part_info,
List<char> list_part_names);
@@ -906,12 +960,13 @@ int get_parts_for_update(const byte *old_data, byte *new_data,
uint32 *old_part_id, uint32 *new_part_id);
int get_part_for_delete(const byte *buf, const byte *rec0,
partition_info *part_info, uint32 *part_id);
-bool check_partition_info(partition_info *part_info,handlerton *eng_type,
+bool check_partition_info(partition_info *part_info,handlerton **eng_type,
handler *file, ulonglong max_rows);
-bool fix_partition_func(THD *thd, const char *name, TABLE *table);
+bool fix_partition_func(THD *thd, const char *name, TABLE *table,
+ bool create_table_ind);
char *generate_partition_syntax(partition_info *part_info,
uint *buf_length, bool use_sql_alloc,
- bool add_default_info);
+ bool write_all);
bool partition_key_modified(TABLE *table, List<Item> &fields);
void get_partition_set(const TABLE *table, byte *buf, const uint index,
const key_range *key_spec,
@@ -921,7 +976,9 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
const key_range *key_spec,
part_id_range *part_spec);
bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
- uint part_info_len, TABLE *table,
+ uint part_info_len,
+ uchar *part_state, uint part_state_len,
+ TABLE *table, bool is_create_table_ind,
handlerton *default_db_type);
void make_used_partitions_str(partition_info *part_info, String *parts_str);
uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
@@ -1480,11 +1537,16 @@ public:
virtual const char *table_type() const =0;
virtual const char **bas_ext() const =0;
virtual ulong table_flags(void) const =0;
- virtual ulong alter_table_flags(void) const { return 0; }
#ifdef WITH_PARTITION_STORAGE_ENGINE
- virtual ulong partition_flags(void) const { return 0;}
virtual int get_default_no_partitions(ulonglong max_rows) { return 1;}
- virtual void set_part_info(partition_info *part_info) { return; }
+ virtual void set_auto_partitions(partition_info *part_info) { return; }
+ virtual bool get_no_parts(const char *name,
+ uint *no_parts)
+ {
+ *no_parts= 0;
+ return 0;
+ }
+ virtual void set_part_info(partition_info *part_info) {return;}
#endif
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
@@ -1530,19 +1592,26 @@ public:
virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0;
virtual int create_handler_files(const char *name) { return FALSE;}
- /*
- SYNOPSIS
- drop_partitions()
- path Complete path of db and table name
- RETURN VALUE
- TRUE Failure
- FALSE Success
- DESCRIPTION
- Drop a partition, during this operation no other activity is ongoing
- in this server on the table.
- */
+ virtual int change_partitions(HA_CREATE_INFO *create_info,
+ const char *path,
+ ulonglong *copied,
+ ulonglong *deleted,
+ const void *pack_frm_data,
+ uint pack_frm_len)
+ { return HA_ERR_WRONG_COMMAND; }
virtual int drop_partitions(const char *path)
{ return HA_ERR_WRONG_COMMAND; }
+ virtual int rename_partitions(const char *path)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int optimize_partitions(THD *thd)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int analyze_partitions(THD *thd)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int check_partitions(THD *thd)
+ { return HA_ERR_WRONG_COMMAND; }
+ virtual int repair_partitions(THD *thd)
+ { return HA_ERR_WRONG_COMMAND; }
+
/* lock_count() can be more than one if the table is a MERGE */
virtual uint lock_count(void) const { return 1; }
virtual THR_LOCK_DATA **store_lock(THD *thd,
diff --git a/sql/lex.h b/sql/lex.h
index 29c693c2c74..e0b4855abc3 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -422,6 +422,7 @@ static SYMBOL symbols[] = {
{ "READ_WRITE", SYM(READ_WRITE_SYM)},
{ "READS", SYM(READS_SYM)},
{ "REAL", SYM(REAL)},
+ { "REBUILD", SYM(REBUILD_SYM)},
{ "RECOVER", SYM(RECOVER_SYM)},
{ "REDO_BUFFER_SIZE", SYM(REDO_BUFFER_SIZE_SYM)},
{ "REDOFILE", SYM(REDOFILE_SYM)},
@@ -434,7 +435,7 @@ static SYMBOL symbols[] = {
{ "RELEASE", SYM(RELEASE_SYM)},
{ "RELOAD", SYM(RELOAD)},
{ "RENAME", SYM(RENAME)},
- { "REORGANISE", SYM(REORGANISE_SYM)},
+ { "REORGANIZE", SYM(REORGANIZE_SYM)},
{ "REPAIR", SYM(REPAIR)},
{ "REPEATABLE", SYM(REPEATABLE_SYM)},
{ "REPLACE", SYM(REPLACE)},
diff --git a/sql/lock.cc b/sql/lock.cc
index 8e24c56799d..76c511e4ba0 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -351,9 +351,25 @@ void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table)
}
}
+/* Downgrade all locks on a table to new WRITE level from WRITE_ONLY */
+
+void mysql_lock_downgrade_write(THD *thd, TABLE *table,
+ thr_lock_type new_lock_type)
+{
+ MYSQL_LOCK *locked;
+ TABLE *write_lock_used;
+ if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
+ {
+ for (uint i=0; i < locked->lock_count; i++)
+ thr_downgrade_write_lock(locked->locks[i], new_lock_type);
+ my_free((gptr) locked,MYF(0));
+ }
+}
+
+
/* abort all other threads waiting to get lock in table */
-void mysql_lock_abort(THD *thd, TABLE *table)
+void mysql_lock_abort(THD *thd, TABLE *table, bool upgrade_lock)
{
MYSQL_LOCK *locked;
TABLE *write_lock_used;
@@ -362,7 +378,7 @@ void mysql_lock_abort(THD *thd, TABLE *table)
if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used)))
{
for (uint i=0; i < locked->lock_count; i++)
- thr_abort_locks(locked->locks[i]->lock);
+ thr_abort_locks(locked->locks[i]->lock, upgrade_lock);
my_free((gptr) locked,MYF(0));
}
DBUG_VOID_RETURN;
diff --git a/sql/log.cc b/sql/log.cc
index 7232d3a24dd..e5da48196c8 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -88,6 +88,8 @@ handlerton binlog_hton = {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter Tablespace */
HTON_NOT_USER_SELECTABLE | HTON_HIDDEN
};
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 026234caf34..43b6ed38668 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -595,6 +595,11 @@ struct Query_cache_query_flags
#define query_cache_invalidate_by_MyISAM_filename_ref NULL
#endif /*HAVE_QUERY_CACHE*/
+uint build_table_path(char *buff, size_t bufflen, const char *db,
+ const char *table, const char *ext);
+void write_bin_log(THD *thd, bool clear_error,
+ char const *query, ulong query_length);
+
bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent);
bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create);
bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent);
@@ -1035,6 +1040,22 @@ void remove_db_from_cache(const char *db);
void flush_tables();
bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+uint fast_alter_partition_table(THD *thd, TABLE *table,
+ ALTER_INFO *alter_info,
+ HA_CREATE_INFO *create_info,
+ TABLE_LIST *table_list,
+ List<create_field> *create_list,
+ List<Key> *key_list, const char *db,
+ const char *table_name,
+ uint fast_alter_partition);
+uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
+ HA_CREATE_INFO *create_info,
+ handlerton *old_db_type,
+ bool *partition_changed,
+ uint *fast_alter_partition);
+#endif
+
/* bits for last argument to remove_table_from_cache() */
#define RTFC_NO_FLAG 0x0000
#define RTFC_OWNED_BY_THD_FLAG 0x0001
@@ -1043,6 +1064,40 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
bool remove_table_from_cache(THD *thd, const char *db, const char *table,
uint flags);
+typedef struct st_lock_param_type
+{
+ ulonglong copied;
+ ulonglong deleted;
+ THD *thd;
+ HA_CREATE_INFO *create_info;
+ List<create_field> *create_list;
+ List<create_field> new_create_list;
+ List<Key> *key_list;
+ List<Key> new_key_list;
+ TABLE *table;
+ KEY *key_info_buffer;
+ const char *db;
+ const char *table_name;
+ const void *pack_frm_data;
+ enum thr_lock_type old_lock_type;
+ uint key_count;
+ uint db_options;
+ uint pack_frm_len;
+} ALTER_PARTITION_PARAM_TYPE;
+
+void mem_alloc_error(size_t size);
+int packfrm(const void *data, uint len,
+ const void **pack_data, uint *pack_len);
+int unpackfrm(const void **unpack_data, uint *unpack_len,
+ const void *pack_data);
+#define WFRM_INITIAL_WRITE 1
+#define WFRM_CREATE_HANDLER_FILES 2
+#define WFRM_PACK_FRM 4
+bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags);
+bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt);
+void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt);
+void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table);
+
bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE);
void copy_field_from_tmp_record(Field *field,int offset);
bool fill_record(THD *thd, Field **field, List<Item> &values,
@@ -1379,7 +1434,9 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock);
void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock);
void mysql_unlock_some_tables(THD *thd, TABLE **table,uint count);
void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table);
-void mysql_lock_abort(THD *thd, TABLE *table);
+void mysql_lock_abort(THD *thd, TABLE *table, bool upgrade_lock);
+void mysql_lock_downgrade_write(THD *thd, TABLE *table,
+ thr_lock_type new_lock_type);
bool mysql_lock_abort_for_thread(THD *thd, TABLE *table);
MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b);
TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle,
@@ -1431,9 +1488,7 @@ int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags);
void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg);
int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag, uint ha_open_flags,
- TABLE *outparam);
-int openfrm(THD *thd, const char *name,const char *alias,uint filestat,
- uint prgflag, uint ha_open_flags, TABLE *outparam);
+ TABLE *outparam, bool is_create_table);
int readfrm(const char *name, const void** data, uint* length);
int writefrm(const char* name, const void* data, uint len);
int closefrm(TABLE *table, bool free_share);
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index e735a87bab4..76f0cdeebe5 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -5601,13 +5601,13 @@ ER_SP_RECURSION_LIMIT
eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.64s"
ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.64s überschritten"
ER_SP_PROC_TABLE_CORRUPT
- eng "Failed to load routine %s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
+ eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
ER_PARTITION_REQUIRES_VALUES_ERROR
- eng "%s PARTITIONING requires definition of VALUES %s for each partition"
- swe "%s PARTITIONering kräver definition av VALUES %s för varje partition"
+ eng "%-.64s PARTITIONING requires definition of VALUES %-.64s for each partition"
+ swe "%-.64s PARTITIONering kräver definition av VALUES %-.64s för varje partition"
ER_PARTITION_WRONG_VALUES_ERROR
- eng "Only %s PARTITIONING can use VALUES %s in partition definition"
- swe "Endast %s partitionering kan använda VALUES %s i definition av partitionen"
+ eng "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition"
+ swe "Endast %-.64s partitionering kan använda VALUES %-.64s i definition av partitionen"
ER_PARTITION_MAXVALUE_ERROR
eng "MAXVALUE can only be used in last partition definition"
swe "MAXVALUE kan bara användas i definitionen av den sista partitionen"
@@ -5636,11 +5636,11 @@ ER_INCONSISTENT_PARTITION_INFO_ERROR
eng "The partition info in the frm file is not consistent with what can be written into the frm file"
swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen"
ER_PARTITION_FUNC_NOT_ALLOWED_ERROR
- eng "The %s function returns the wrong type"
- swe "%s-funktionen returnerar felaktig typ"
+ eng "The %-.64s function returns the wrong type"
+ swe "%-.64s-funktionen returnerar felaktig typ"
ER_PARTITIONS_MUST_BE_DEFINED_ERROR
- eng "For %s partitions each partition must be defined"
- swe "För %s partitionering så måste varje partition definieras"
+ eng "For %-.64s partitions each partition must be defined"
+ swe "För %-.64s partitionering så måste varje partition definieras"
ER_RANGE_NOT_INCREASING_ERROR
eng "VALUES LESS THAN value must be strictly increasing for each partition"
swe "Värden i VALUES LESS THAN måste vara strikt växande för varje partition"
@@ -5657,8 +5657,8 @@ ER_MIX_HANDLER_ERROR
eng "The mix of handlers in the partitions is not allowed in this version of MySQL"
swe "Denna mix av lagringsmotorer är inte tillåten i denna version av MySQL"
ER_PARTITION_NOT_DEFINED_ERROR
- eng "For the partitioned engine it is necessary to define all %s"
- swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %s"
+ eng "For the partitioned engine it is necessary to define all %-.64s"
+ swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %-.64s"
ER_TOO_MANY_PARTITIONS_ERROR
eng "Too many partitions were defined"
swe "För många partitioner definierades"
@@ -5671,30 +5671,36 @@ ER_CANT_CREATE_HANDLER_FILE
ER_BLOB_FIELD_IN_PART_FUNC_ERROR
eng "A BLOB field is not allowed in partition function"
swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner"
-ER_CHAR_SET_IN_PART_FIELD_ERROR
- eng "VARCHAR only allowed if binary collation for partition functions"
- swe "VARCHAR endast tillåten med binär collation för partitioneringsfunktion"
ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
- eng "A %s need to include all fields in the partition function"
- swe "En %s behöver inkludera alla fält i partitioneringsfunktionen för denna lagringsmotor"
+ eng "A %-.64s need to include all fields in the partition function"
+ swe "En %-.64s behöver inkludera alla fält i partitioneringsfunktionen för denna lagringsmotor"
ER_NO_PARTS_ERROR
- eng "Number of %s = 0 is not an allowed value"
- swe "Antal %s = 0 är inte ett tillåten värde"
+ eng "Number of %-.64s = 0 is not an allowed value"
+ swe "Antal %-.64s = 0 är inte ett tillåten värde"
ER_PARTITION_MGMT_ON_NONPARTITIONED
eng "Partition management on a not partitioned table is not possible"
swe "Partitioneringskommando på en opartitionerad tabell är inte möjligt"
+ER_FOREIGN_KEY_ON_PARTITIONED
+ eng "Foreign key condition is not yet supported in conjunction with partitioning"
+ swe "Foreign key villkor är inte ännu implementerad i kombination med partitionering"
ER_DROP_PARTITION_NON_EXISTENT
- eng "Error in list of partitions to change"
- swe "Fel i listan av partitioner att förändra"
+ eng "Error in list of partitions to %-.64s"
+ swe "Fel i listan av partitioner att %-.64s"
ER_DROP_LAST_PARTITION
eng "Cannot remove all partitions, use DROP TABLE instead"
swe "Det är inte tillåtet att ta bort alla partitioner, använd DROP TABLE istället"
ER_COALESCE_ONLY_ON_HASH_PARTITION
eng "COALESCE PARTITION can only be used on HASH/KEY partitions"
swe "COALESCE PARTITION kan bara användas på HASH/KEY partitioner"
+ER_REORG_HASH_ONLY_ON_SAME_NO
+ eng "REORGANISE PARTITION can only be used to reorganise partitions not to change their numbers"
+ swe "REORGANISE PARTITION kan bara användas för att omorganisera partitioner, inte för att ändra deras antal"
+ER_REORG_NO_PARAM_ERROR
+ eng "REORGANISE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs"
+ swe "REORGANISE PARTITION utan parametrar kan bara användas på auto-partitionerade tabeller som använder HASH partitionering"
ER_ONLY_ON_RANGE_LIST_PARTITION
- eng "%s PARTITION can only be used on RANGE/LIST partitions"
- swe "%s PARTITION kan bara användas på RANGE/LIST-partitioner"
+ eng "%-.64s PARTITION can only be used on RANGE/LIST partitions"
+ swe "%-.64s PARTITION kan bara användas på RANGE/LIST-partitioner"
ER_ADD_PARTITION_SUBPART_ERROR
eng "Trying to Add partition(s) with wrong number of subpartitions"
swe "ADD PARTITION med fel antal subpartitioner"
@@ -5708,19 +5714,25 @@ ER_REORG_PARTITION_NOT_EXIST
eng "More partitions to reorganise than there are partitions"
swe "Fler partitioner att reorganisera än det finns partitioner"
ER_SAME_NAME_PARTITION
- eng "All partitions must have unique names in the table"
- swe "Alla partitioner i tabellen måste ha unika namn"
+ eng "Duplicate partition name %-.64s"
+ swe "Duplicerat partitionsnamn %-.64s"
+ER_NO_BINLOG_ERROR
+ eng "It is not allowed to shut off binlog on this command"
+ swe "Det är inte tillåtet att stänga av binlog på detta kommando"
ER_CONSECUTIVE_REORG_PARTITIONS
eng "When reorganising a set of partitions they must be in consecutive order"
swe "När ett antal partitioner omorganiseras måste de vara i konsekutiv ordning"
ER_REORG_OUTSIDE_RANGE
- eng "The new partitions cover a bigger range then the reorganised partitions do"
- swe "De nya partitionerna täcker ett större intervall än de omorganiserade partitionerna"
-ER_DROP_PARTITION_FAILURE
- eng "Drop partition not supported in this version for this handler"
-ER_DROP_PARTITION_WHEN_FK_DEFINED
- eng "Cannot drop a partition when a foreign key constraint is defined on the table"
- swe "Kan inte ta bort en partition när en främmande nyckel är definierad på tabellen"
+ eng "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range"
+ swe "Reorganisering av rangepartitioner kan inte ändra den totala intervallet utom för den sista partitionen där intervallet kan utökas"
+ER_PARTITION_FUNCTION_FAILURE
+ eng "Partition function not supported in this version for this handler"
+ER_PART_STATE_ERROR
+ eng "Partition state cannot be defined from CREATE/ALTER TABLE"
+ swe "Partition state kan inte definieras från CREATE/ALTER TABLE"
+ER_LIMITED_PART_RANGE
+ eng "The %-.64s handler only supports 32 bit integers in VALUES"
+ swe "%-.64s stödjer endast 32 bitar i integers i VALUES"
ER_PLUGIN_IS_NOT_LOADED
eng "Plugin '%-.64s' is not loaded"
ER_WRONG_VALUE
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index d502562ec7c..c695fe40d7f 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2218,7 +2218,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
{
if (abort_locks)
{
- mysql_lock_abort(thd,table); // Close waiting threads
+ mysql_lock_abort(thd,table, TRUE); // Close waiting threads
mysql_lock_remove(thd, thd->locked_tables,table);
table->locked_by_flush=1; // Will be reopened with locks
}
@@ -2361,7 +2361,7 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name)
if (!strcmp(table->s->table_name.str, table_name) &&
!strcmp(table->s->db.str, db))
{
- mysql_lock_abort(thd,table);
+ mysql_lock_abort(thd,table, TRUE);
break;
}
}
@@ -2473,7 +2473,7 @@ retry:
HA_TRY_READ_ONLY),
(READ_KEYINFO | COMPUTE_TYPES |
EXTRA_RECORD),
- thd->open_options, entry)))
+ thd->open_options, entry, FALSE)))
{
if (error == 7) // Table def changed
{
@@ -2537,7 +2537,7 @@ retry:
HA_TRY_READ_ONLY),
READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
ha_open_options | HA_OPEN_FOR_REPAIR,
- entry) || ! entry->file ||
+ entry, FALSE) || ! entry->file ||
(entry->file->is_crashed() && entry->file->check_and_repair(thd)))
{
/* Give right error message */
@@ -3366,7 +3366,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
HA_GET_INDEX),
READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
ha_open_options,
- tmp_table))
+ tmp_table, FALSE))
{
/* No need to lock share->mutex as this is not needed for tmp tables */
free_table_share(share);
@@ -6069,3 +6069,155 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b)
{
return a->length == b->length && !strncmp(a->str, b->str, a->length);
}
+
+
+/*
+ SYNOPSIS
+ abort_and_upgrade_lock()
+ lpt Parameter passing struct
+ All parameters passed through the ALTER_PARTITION_PARAM_TYPE object
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+ DESCRIPTION
+ Remember old lock level (for possible downgrade later on), abort all
+ waiting threads and ensure that all keeping locks currently are
+ completed such that we own the lock exclusively and no other interaction
+ is ongoing.
+
+ thd Thread object
+ table Table object
+ db Database name
+ table_name Table name
+ old_lock_level Old lock level
+*/
+
+bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG;
+ int error= FALSE;
+ DBUG_ENTER("abort_and_upgrade_locks");
+
+ lpt->old_lock_type= lpt->table->reginfo.lock_type;
+ VOID(pthread_mutex_lock(&LOCK_open));
+ mysql_lock_abort(lpt->thd, lpt->table, TRUE);
+ VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags));
+ if (lpt->thd->killed)
+ {
+ lpt->thd->no_warnings_for_error= 0;
+ error= TRUE;
+ }
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ SYNOPSIS
+ close_open_tables_and_downgrade()
+ RESULT VALUES
+ NONE
+ DESCRIPTION
+ We need to ensure that any thread that has managed to open the table
+ but not yet encountered our lock on the table is also thrown out to
+ ensure that no threads see our frm changes premature to the final
+ version. The intermediate versions are only meant for use after a
+ crash and later REPAIR TABLE.
+ We also downgrade locks after the upgrade to WRITE_ONLY
+*/
+
+void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ VOID(pthread_mutex_lock(&LOCK_open));
+ remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name,
+ RTFC_WAIT_OTHER_THREAD_FLAG);
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ mysql_lock_downgrade_write(lpt->thd, lpt->table, lpt->old_lock_type);
+}
+
+
+/*
+ SYNOPSIS
+ mysql_wait_completed_table()
+ lpt Parameter passing struct
+ my_table My table object
+ All parameters passed through the ALTER_PARTITION_PARAM object
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+ DESCRIPTION
+ We have changed the frm file and now we want to wait for all users of
+ the old frm to complete before proceeding to ensure that no one
+ remains that uses the old frm definition.
+ Start by ensuring that all users of the table will be removed from cache
+ once they are done. Then abort all that have stumbled on locks and
+ haven't been started yet.
+
+ thd Thread object
+ table Table object
+ db Database name
+ table_name Table name
+*/
+
+void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table)
+{
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length;
+ TABLE *table;
+ DBUG_ENTER("mysql_wait_completed_table");
+
+ key_length=(uint) (strmov(strmov(key,lpt->db)+1,lpt->table_name)-key)+1;
+ VOID(pthread_mutex_lock(&LOCK_open));
+ HASH_SEARCH_STATE state;
+ for (table= (TABLE*) hash_first(&open_cache,(byte*) key,key_length,
+ &state) ;
+ table;
+ table= (TABLE*) hash_next(&open_cache,(byte*) key,key_length,
+ &state))
+ {
+ THD *in_use= table->in_use;
+ table->s->version= 0L;
+ if (!in_use)
+ {
+ relink_unused(table);
+ }
+ else
+ {
+ /* Kill delayed insert threads */
+ if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
+ ! in_use->killed)
+ {
+ in_use->killed= THD::KILL_CONNECTION;
+ pthread_mutex_lock(&in_use->mysys_var->mutex);
+ if (in_use->mysys_var->current_cond)
+ {
+ pthread_mutex_lock(in_use->mysys_var->current_mutex);
+ pthread_cond_broadcast(in_use->mysys_var->current_cond);
+ pthread_mutex_unlock(in_use->mysys_var->current_mutex);
+ }
+ pthread_mutex_unlock(&in_use->mysys_var->mutex);
+ }
+ /*
+ Now we must abort all tables locks used by this thread
+ as the thread may be waiting to get a lock for another table
+ */
+ for (TABLE *thd_table= in_use->open_tables;
+ thd_table ;
+ thd_table= thd_table->next)
+ {
+ if (thd_table->db_stat) // If table is open
+ mysql_lock_abort_for_thread(lpt->thd, thd_table);
+ }
+ }
+ }
+ /*
+ We start by removing all unused objects from the cache and marking
+ those in use for removal after completion. Now we also need to abort
+ all that are locked and are not progressing due to being locked
+ by our lock. We don't upgrade our lock here.
+ */
+ mysql_lock_abort(lpt->thd, my_table, FALSE);
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ DBUG_VOID_RETURN;
+}
+
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 669cb7f8d47..28ba8fbf94a 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -665,23 +665,31 @@ public:
};
typedef class st_select_lex SELECT_LEX;
-#define ALTER_ADD_COLUMN 1
-#define ALTER_DROP_COLUMN 2
-#define ALTER_CHANGE_COLUMN 4
-#define ALTER_ADD_INDEX 8
-#define ALTER_DROP_INDEX 16
-#define ALTER_RENAME 32
-#define ALTER_ORDER 64
-#define ALTER_OPTIONS 128
-#define ALTER_CHANGE_COLUMN_DEFAULT 256
-#define ALTER_KEYS_ONOFF 512
-#define ALTER_CONVERT 1024
-#define ALTER_FORCE 2048
-#define ALTER_RECREATE 4096
-#define ALTER_ADD_PARTITION 8192
-#define ALTER_DROP_PARTITION 16384
-#define ALTER_COALESCE_PARTITION 32768
-#define ALTER_REORGANISE_PARTITION 65536
+#define ALTER_ADD_COLUMN (1L << 0)
+#define ALTER_DROP_COLUMN (1L << 1)
+#define ALTER_CHANGE_COLUMN (1L << 2)
+#define ALTER_ADD_INDEX (1L << 3)
+#define ALTER_DROP_INDEX (1L << 4)
+#define ALTER_RENAME (1L << 5)
+#define ALTER_ORDER (1L << 6)
+#define ALTER_OPTIONS (1L << 7)
+#define ALTER_CHANGE_COLUMN_DEFAULT (1L << 8)
+#define ALTER_KEYS_ONOFF (1L << 9)
+#define ALTER_CONVERT (1L << 10)
+#define ALTER_FORCE (1L << 11)
+#define ALTER_RECREATE (1L << 12)
+#define ALTER_ADD_PARTITION (1L << 13)
+#define ALTER_DROP_PARTITION (1L << 14)
+#define ALTER_COALESCE_PARTITION (1L << 15)
+#define ALTER_REORGANIZE_PARTITION (1L << 16)
+#define ALTER_PARTITION (1L << 17)
+#define ALTER_OPTIMIZE_PARTITION (1L << 18)
+#define ALTER_TABLE_REORG (1L << 19)
+#define ALTER_REBUILD_PARTITION (1L << 20)
+#define ALTER_ALL_PARTITION (1L << 21)
+#define ALTER_ANALYZE_PARTITION (1L << 22)
+#define ALTER_CHECK_PARTITION (1L << 23)
+#define ALTER_REPAIR_PARTITION (1L << 24)
typedef struct st_alter_info
{
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 20d14f5f196..eec66a5553e 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -62,33 +62,33 @@ static const char *begin_paren_str= "(";
static const char *comma_str= ",";
static char buff[22];
-bool get_partition_id_list(partition_info *part_info,
+int get_partition_id_list(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_range(partition_info *part_info,
+int get_partition_id_range(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_hash_nosub(partition_info *part_info,
+int get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_key_nosub(partition_info *part_info,
+int get_partition_id_key_nosub(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_linear_hash_nosub(partition_info *part_info,
+int get_partition_id_linear_hash_nosub(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_linear_key_nosub(partition_info *part_info,
+int get_partition_id_linear_key_nosub(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_range_sub_hash(partition_info *part_info,
+int get_partition_id_range_sub_hash(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_range_sub_key(partition_info *part_info,
+int get_partition_id_range_sub_key(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_range_sub_linear_hash(partition_info *part_info,
+int get_partition_id_range_sub_linear_hash(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_range_sub_linear_key(partition_info *part_info,
+int get_partition_id_range_sub_linear_key(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_list_sub_hash(partition_info *part_info,
+int get_partition_id_list_sub_hash(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_list_sub_key(partition_info *part_info,
+int get_partition_id_list_sub_key(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_list_sub_linear_hash(partition_info *part_info,
+int get_partition_id_list_sub_linear_hash(partition_info *part_info,
uint32 *part_id);
-bool get_partition_id_list_sub_linear_key(partition_info *part_info,
+int get_partition_id_list_sub_linear_key(partition_info *part_info,
uint32 *part_id);
uint32 get_partition_id_hash_sub(partition_info *part_info);
uint32 get_partition_id_key_sub(partition_info *part_info);
@@ -100,12 +100,15 @@ uint32 get_partition_id_linear_key_sub(partition_info *part_info);
/*
A routine used by the parser to decide whether we are specifying a full
partitioning or if only partitions to add or to split.
+
SYNOPSIS
is_partition_management()
lex Reference to the lex object
+
RETURN VALUE
TRUE Yes, it is part of a management partition command
FALSE No, not a management partition command
+
DESCRIPTION
This needs to be outside of WITH_PARTITION_STORAGE_ENGINE since it is
used from the sql parser that doesn't have any #ifdef's
@@ -115,31 +118,34 @@ my_bool is_partition_management(LEX *lex)
{
return (lex->sql_command == SQLCOM_ALTER_TABLE &&
(lex->alter_info.flags == ALTER_ADD_PARTITION ||
- lex->alter_info.flags == ALTER_REORGANISE_PARTITION));
+ lex->alter_info.flags == ALTER_REORGANIZE_PARTITION));
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
/*
- A support function to check if a partition name is in a list of strings
+ A support function to check if a name is in a list of strings
+
SYNOPSIS
- is_partition_in_list()
- part_name String searched for
- list_part_names A list of names searched in
+ is_name_in_list()
+ name String searched for
+ list_names A list of names searched in
+
RETURN VALUES
TRUE String found
FALSE String not found
*/
-bool is_partition_in_list(char *part_name,
- List<char> list_part_names)
+bool is_name_in_list(char *name,
+ List<char> list_names)
{
- List_iterator<char> part_names_it(list_part_names);
- uint no_names= list_part_names.elements;
+ List_iterator<char> names_it(list_names);
+ uint no_names= list_names.elements;
uint i= 0;
+
do
{
- char *list_name= part_names_it++;
- if (!(my_strcasecmp(system_charset_info, part_name, list_name)))
+ char *list_name= names_it++;
+ if (!(my_strcasecmp(system_charset_info, name, list_name)))
return TRUE;
} while (++i < no_names);
return FALSE;
@@ -149,47 +155,99 @@ bool is_partition_in_list(char *part_name,
/*
A support function to check partition names for duplication in a
partitioned table
+
SYNOPSIS
- is_partitions_in_table()
+ are_partitions_in_table()
new_part_info New partition info
old_part_info Old partition info
+
RETURN VALUES
TRUE Duplicate names found
FALSE Duplicate names not found
+
DESCRIPTION
Can handle that the new and old parts are the same in which case it
checks that the list of names in the partitions doesn't contain any
duplicated names.
*/
-bool is_partitions_in_table(partition_info *new_part_info,
- partition_info *old_part_info)
+char *are_partitions_in_table(partition_info *new_part_info,
+ partition_info *old_part_info)
{
- uint no_new_parts= new_part_info->partitions.elements, new_count;
- uint no_old_parts= old_part_info->partitions.elements, old_count;
+ uint no_new_parts= new_part_info->partitions.elements;
+ uint no_old_parts= old_part_info->partitions.elements;
+ uint new_count, old_count;
List_iterator<partition_element> new_parts_it(new_part_info->partitions);
- bool same_part_info= (new_part_info == old_part_info);
- DBUG_ENTER("is_partitions_in_table");
+ bool is_same_part_info= (new_part_info == old_part_info);
+ DBUG_ENTER("are_partitions_in_table");
+ DBUG_PRINT("enter", ("%u", no_new_parts));
new_count= 0;
do
{
List_iterator<partition_element> old_parts_it(old_part_info->partitions);
char *new_name= (new_parts_it++)->partition_name;
+ DBUG_PRINT("info", ("%s", new_name));
new_count++;
old_count= 0;
do
{
char *old_name= (old_parts_it++)->partition_name;
old_count++;
- if (same_part_info && old_count == new_count)
+ if (is_same_part_info && old_count == new_count)
break;
if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
{
- DBUG_RETURN(TRUE);
+ DBUG_PRINT("info", ("old_name = %s, not ok", old_name));
+ DBUG_RETURN(old_name);
}
} while (old_count < no_old_parts);
} while (new_count < no_new_parts);
+ DBUG_RETURN(NULL);
+}
+
+/*
+ Set-up defaults for partitions.
+
+ SYNOPSIS
+ partition_default_handling()
+ table Table object
+ table_name Table name to use when getting no_parts
+ db_name Database name to use when getting no_parts
+ part_info Partition info to set up
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+*/
+
+bool partition_default_handling(TABLE *table, partition_info *part_info)
+{
+ DBUG_ENTER("partition_default_handling");
+
+ if (part_info->use_default_no_partitions)
+ {
+ if (table->file->get_no_parts(table->s->normalized_path.str,
+ &part_info->no_parts))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (is_sub_partitioned(part_info) &&
+ part_info->use_default_no_subpartitions)
+ {
+ uint no_parts;
+ if (table->file->get_no_parts(table->s->normalized_path.str,
+ &no_parts))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_ASSERT(part_info->no_parts > 0);
+ part_info->no_subparts= no_parts / part_info->no_parts;
+ DBUG_ASSERT((no_parts % part_info->no_parts) == 0);
+ }
+ set_up_defaults_for_partitioning(part_info, table->file,
+ (ulonglong)0, (uint)0);
DBUG_RETURN(FALSE);
}
@@ -240,7 +298,7 @@ bool check_reorganise_list(partition_info *new_part_info,
break;
if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
{
- if (!is_partition_in_list(old_name, list_part_names))
+ if (!is_name_in_list(old_name, list_part_names))
DBUG_RETURN(TRUE);
}
} while (old_count < no_old_parts);
@@ -252,23 +310,19 @@ bool check_reorganise_list(partition_info *new_part_info,
/*
A useful routine used by update_row for partition handlers to calculate
the partition ids of the old and the new record.
+
SYNOPSIS
get_part_for_update()
old_data Buffer of old record
new_data Buffer of new record
rec0 Reference to table->record[0]
part_info Reference to partition information
- part_field_array A NULL-terminated array of fields for partition
- function
- old_part_id The returned partition id of old record
- new_part_id The returned partition id of new record
+ out:old_part_id The returned partition id of old record
+ out:new_part_id The returned partition id of new record
+
RETURN VALUE
0 Success
> 0 Error code
- DESCRIPTION
- Dependent on whether buf is not record[0] we need to prepare the
- fields. Then we call the function pointer get_partition_id to
- calculate the partition ids.
*/
int get_parts_for_update(const byte *old_data, byte *new_data,
@@ -278,8 +332,8 @@ int get_parts_for_update(const byte *old_data, byte *new_data,
Field **part_field_array= part_info->full_part_field_array;
int error;
DBUG_ENTER("get_parts_for_update");
- DBUG_ASSERT(new_data == rec0);
+ DBUG_ASSERT(new_data == rec0);
set_field_ptr(part_field_array, old_data, rec0);
error= part_info->get_partition_id(part_info, old_part_id);
set_field_ptr(part_field_array, rec0, old_data);
@@ -321,17 +375,18 @@ int get_parts_for_update(const byte *old_data, byte *new_data,
/*
A useful routine used by delete_row for partition handlers to calculate
the partition id.
+
SYNOPSIS
get_part_for_delete()
buf Buffer of old record
rec0 Reference to table->record[0]
part_info Reference to partition information
- part_field_array A NULL-terminated array of fields for partition
- function
- part_id The returned partition id to delete from
+ out:part_id The returned partition id to delete from
+
RETURN VALUE
0 Success
> 0 Error code
+
DESCRIPTION
Dependent on whether buf is not record[0] we need to prepare the
fields. Then we call the function pointer get_partition_id to
@@ -373,12 +428,15 @@ int get_part_for_delete(const byte *buf, const byte *rec0,
check what partition a certain value belongs to. At the same time it does
also check that the range constants are defined in increasing order and
that the expressions are constant integer expressions.
+
SYNOPSIS
check_range_constants()
- part_info
+ part_info Partition info
+
RETURN VALUE
TRUE An error occurred during creation of range constants
FALSE Successful creation of range constant mapping
+
DESCRIPTION
This routine is called from check_partition_info to get a quick error
before we came too far into the CREATE TABLE process. It is also called
@@ -389,8 +447,10 @@ int get_part_for_delete(const byte *buf, const byte *rec0,
static bool check_range_constants(partition_info *part_info)
{
partition_element* part_def;
- longlong current_largest_int= LONGLONG_MIN, part_range_value_int;
- uint no_parts= part_info->no_parts, i;
+ longlong current_largest_int= LONGLONG_MIN;
+ longlong part_range_value_int;
+ uint no_parts= part_info->no_parts;
+ uint i;
List_iterator<partition_element> it(part_info->partitions);
bool result= TRUE;
DBUG_ENTER("check_range_constants");
@@ -401,7 +461,7 @@ static bool check_range_constants(partition_info *part_info)
(longlong*)sql_alloc(no_parts * sizeof(longlong));
if (unlikely(part_info->range_int_array == NULL))
{
- my_error(ER_OUTOFMEMORY, MYF(0), no_parts*sizeof(longlong));
+ mem_alloc_error(no_parts * sizeof(longlong));
goto end;
}
i= 0;
@@ -432,10 +492,12 @@ end:
/*
A support routine for check_list_constants used by qsort to sort the
constant list expressions.
+
SYNOPSIS
list_part_cmp()
a First list constant to compare with
b Second list constant to compare with
+
RETURN VALUE
+1 a > b
0 a == b
@@ -444,9 +506,8 @@ end:
static int list_part_cmp(const void* a, const void* b)
{
- longlong a1, b1;
- a1= ((LIST_PART_ENTRY*)a)->list_value;
- b1= ((LIST_PART_ENTRY*)b)->list_value;
+ longlong a1= ((LIST_PART_ENTRY*)a)->list_value;
+ longlong b1= ((LIST_PART_ENTRY*)b)->list_value;
if (a1 < b1)
return -1;
else if (a1 > b1)
@@ -461,12 +522,15 @@ static int list_part_cmp(const void* a, const void* b)
check what partition a certain value belongs to. At the same time it does
also check that there are no duplicates among the list constants and that
that the list expressions are constant integer expressions.
+
SYNOPSIS
check_list_constants()
- part_info
+ part_info Partition info
+
RETURN VALUE
TRUE An error occurred during creation of list constants
FALSE Successful creation of list constant mapping
+
DESCRIPTION
This routine is called from check_partition_info to get a quick error
before we came too far into the CREATE TABLE process. It is also called
@@ -476,9 +540,12 @@ static int list_part_cmp(const void* a, const void* b)
static bool check_list_constants(partition_info *part_info)
{
- uint i, no_list_values= 0, no_parts, list_index= 0;
+ uint i, no_parts;
+ uint no_list_values= 0;
+ uint list_index= 0;
longlong *list_value;
- bool not_first, result= TRUE;
+ bool not_first;
+ bool result= TRUE;
longlong curr_value, prev_value;
partition_element* part_def;
List_iterator<partition_element> list_func_it(part_info->partitions);
@@ -516,7 +583,7 @@ static bool check_list_constants(partition_info *part_info)
(LIST_PART_ENTRY*)sql_alloc(no_list_values*sizeof(LIST_PART_ENTRY));
if (unlikely(part_info->list_array == NULL))
{
- my_error(ER_OUTOFMEMORY, MYF(0), no_list_values*sizeof(LIST_PART_ENTRY));
+ mem_alloc_error(no_list_values * sizeof(LIST_PART_ENTRY));
goto end;
}
@@ -560,12 +627,16 @@ end:
/*
Create a memory area where default partition names are stored and fill it
up with the names.
+
SYNOPSIS
create_default_partition_names()
no_parts Number of partitions
+ start_no Starting partition number
subpart Is it subpartitions
+
RETURN VALUE
A pointer to the memory area of the default partition names
+
DESCRIPTION
A support routine for the partition code where default values are
generated.
@@ -575,17 +646,18 @@ end:
#define MAX_PART_NAME_SIZE 8
static char *create_default_partition_names(uint no_parts, uint start_no,
- bool subpart)
+ bool is_subpart)
{
char *ptr= sql_calloc(no_parts*MAX_PART_NAME_SIZE);
char *move_ptr= ptr;
uint i= 0;
DBUG_ENTER("create_default_partition_names");
+
if (likely(ptr != 0))
{
do
{
- if (subpart)
+ if (is_subpart)
my_sprintf(move_ptr, (move_ptr,"sp%u", (start_no + i)));
else
my_sprintf(move_ptr, (move_ptr,"p%u", (start_no + i)));
@@ -594,7 +666,7 @@ static char *create_default_partition_names(uint no_parts, uint start_no,
}
else
{
- my_error(ER_OUTOFMEMORY, MYF(0), no_parts*MAX_PART_NAME_SIZE);
+ mem_alloc_error(no_parts*MAX_PART_NAME_SIZE);
}
DBUG_RETURN(ptr);
}
@@ -604,14 +676,18 @@ static char *create_default_partition_names(uint no_parts, uint start_no,
Set up all the default partitions not set-up by the user in the SQL
statement. Also perform a number of checks that the user hasn't tried
to use default values where no defaults exists.
+
SYNOPSIS
set_up_default_partitions()
part_info The reference to all partition information
file A reference to a handler of the table
max_rows Maximum number of rows stored in the table
+ start_no Starting partition number
+
RETURN VALUE
TRUE Error, attempted default values not possible
FALSE Ok, default partitions set-up
+
DESCRIPTION
The routine uses the underlying handler of the partitioning to define
the default number of partitions. For some handlers this requires
@@ -643,7 +719,6 @@ static bool set_up_default_partitions(partition_info *part_info,
if (part_info->no_parts == 0)
part_info->no_parts= file->get_default_no_partitions(max_rows);
no_parts= part_info->no_parts;
- part_info->use_default_partitions= FALSE;
if (unlikely(no_parts > MAX_PARTITIONS))
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
@@ -657,16 +732,16 @@ static bool set_up_default_partitions(partition_info *part_info,
do
{
partition_element *part_elem= new partition_element();
- if (likely(part_elem != 0))
+ if (likely(part_elem != 0 &&
+ (!part_info->partitions.push_back(part_elem))))
{
- part_elem->engine_type= NULL;
+ part_elem->engine_type= part_info->default_engine_type;
part_elem->partition_name= default_name;
default_name+=MAX_PART_NAME_SIZE;
- part_info->partitions.push_back(part_elem);
}
else
{
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+ mem_alloc_error(sizeof(partition_element));
goto end;
}
} while (++i < no_parts);
@@ -680,14 +755,17 @@ end:
Set up all the default subpartitions not set-up by the user in the SQL
statement. Also perform a number of checks that the default partitioning
becomes an allowed partitioning scheme.
+
SYNOPSIS
set_up_default_subpartitions()
part_info The reference to all partition information
file A reference to a handler of the table
max_rows Maximum number of rows stored in the table
+
RETURN VALUE
TRUE Error, attempted default values not possible
FALSE Ok, default partitions set-up
+
DESCRIPTION
The routine uses the underlying handler of the partitioning to define
the default number of partitions. For some handlers this requires
@@ -711,7 +789,6 @@ static bool set_up_default_subpartitions(partition_info *part_info,
part_info->no_subparts= file->get_default_no_partitions(max_rows);
no_parts= part_info->no_parts;
no_subparts= part_info->no_subparts;
- part_info->use_default_subpartitions= FALSE;
if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS))
{
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
@@ -729,16 +806,16 @@ static bool set_up_default_subpartitions(partition_info *part_info,
do
{
partition_element *subpart_elem= new partition_element();
- if (likely(subpart_elem != 0))
+ if (likely(subpart_elem != 0 &&
+ (!part_elem->subpartitions.push_back(subpart_elem))))
{
- subpart_elem->engine_type= NULL;
+ subpart_elem->engine_type= part_info->default_engine_type;
subpart_elem->partition_name= name_ptr;
name_ptr+= MAX_PART_NAME_SIZE;
- part_elem->subpartitions.push_back(subpart_elem);
}
else
{
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+ mem_alloc_error(sizeof(partition_element));
goto end;
}
} while (++j < no_subparts);
@@ -750,18 +827,22 @@ end:
/*
- Set up defaults for partition or subpartition (cannot set-up for both,
- this will return an error.
+ Support routine for check_partition_info
+
SYNOPSIS
set_up_defaults_for_partitioning()
part_info The reference to all partition information
file A reference to a handler of the table
max_rows Maximum number of rows stored in the table
+ start_no Starting partition number
+
RETURN VALUE
TRUE Error, attempted default values not possible
FALSE Ok, default partitions set-up
+
DESCRIPTION
- Support routine for check_partition_info
+ Set up defaults for partition or subpartition (cannot set-up for both,
+ this will return an error.
*/
bool set_up_defaults_for_partitioning(partition_info *part_info,
@@ -770,11 +851,15 @@ bool set_up_defaults_for_partitioning(partition_info *part_info,
{
DBUG_ENTER("set_up_defaults_for_partitioning");
- if (part_info->use_default_partitions)
- DBUG_RETURN(set_up_default_partitions(part_info, file, max_rows,
- start_no));
- if (is_sub_partitioned(part_info) && part_info->use_default_subpartitions)
- DBUG_RETURN(set_up_default_subpartitions(part_info, file, max_rows));
+ if (!part_info->default_partitions_setup)
+ {
+ part_info->default_partitions_setup= TRUE;
+ if (part_info->use_default_partitions)
+ DBUG_RETURN(set_up_default_partitions(part_info, file, max_rows,
+ start_no));
+ if (is_sub_partitioned(part_info) && part_info->use_default_subpartitions)
+ DBUG_RETURN(set_up_default_subpartitions(part_info, file, max_rows));
+ }
DBUG_RETURN(FALSE);
}
@@ -782,21 +867,22 @@ bool set_up_defaults_for_partitioning(partition_info *part_info,
/*
Check that all partitions use the same storage engine.
This is currently a limitation in this version.
+
SYNOPSIS
check_engine_mix()
engine_array An array of engine identifiers
no_parts Total number of partitions
+
RETURN VALUE
TRUE Error, mixed engines
FALSE Ok, no mixed engines
+ DESCRIPTION
+ Current check verifies only that all handlers are the same.
+ Later this check will be more sophisticated.
*/
static bool check_engine_mix(handlerton **engine_array, uint no_parts)
{
- /*
- Current check verifies only that all handlers are the same.
- Later this check will be more sophisticated.
- */
uint i= 0;
bool result= FALSE;
DBUG_ENTER("check_engine_mix");
@@ -814,31 +900,35 @@ static bool check_engine_mix(handlerton **engine_array, uint no_parts)
/*
- We will check that the partition info requested is possible to set-up in
- this version. This routine is an extension of the parser one could say.
- If defaults were used we will generate default data structures for all
- partitions.
+ This code is used early in the CREATE TABLE and ALTER TABLE process.
+
SYNOPSIS
check_partition_info()
part_info The reference to all partition information
- db_type Default storage engine if no engine specified per
- partition.
file A reference to a handler of the table
max_rows Maximum number of rows stored in the table
+ engine_type Return value for used engine in partitions
+
RETURN VALUE
TRUE Error, something went wrong
FALSE Ok, full partition data structures are now generated
+
DESCRIPTION
- This code is used early in the CREATE TABLE and ALTER TABLE process.
+ We will check that the partition info requested is possible to set-up in
+ this version. This routine is an extension of the parser one could say.
+ If defaults were used we will generate default data structures for all
+ partitions.
+
*/
-bool check_partition_info(partition_info *part_info,handlerton *eng_type,
+bool check_partition_info(partition_info *part_info,handlerton **eng_type,
handler *file, ulonglong max_rows)
{
handlerton **engine_array= NULL;
- uint part_count= 0, i, no_parts, tot_partitions;
+ uint part_count= 0;
+ uint i, no_parts, tot_partitions;
bool result= TRUE;
- List_iterator<partition_element> part_it(part_info->partitions);
+ char *same_name;
DBUG_ENTER("check_partition_info");
if (unlikely(is_sub_partitioned(part_info) &&
@@ -858,9 +948,10 @@ bool check_partition_info(partition_info *part_info,handlerton *eng_type,
my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
goto end;
}
- if (unlikely(is_partitions_in_table(part_info, part_info)))
+ if (((same_name= are_partitions_in_table(part_info,
+ part_info))))
{
- my_error(ER_SAME_NAME_PARTITION, MYF(0));
+ my_error(ER_SAME_NAME_PARTITION, MYF(0), same_name);
goto end;
}
engine_array= (handlerton**)my_malloc(tot_partitions * sizeof(handlerton *),
@@ -869,36 +960,44 @@ bool check_partition_info(partition_info *part_info,handlerton *eng_type,
goto end;
i= 0;
no_parts= part_info->no_parts;
- do
{
- partition_element *part_elem= part_it++;
- if (!is_sub_partitioned(part_info))
- {
- if (part_elem->engine_type == NULL)
- part_elem->engine_type= eng_type;
- DBUG_PRINT("info", ("engine = %s", part_elem->engine_type->name));
- engine_array[part_count++]= part_elem->engine_type;
- }
- else
+ List_iterator<partition_element> part_it(part_info->partitions);
+ do
{
- uint j= 0, no_subparts= part_info->no_subparts;;
- List_iterator<partition_element> sub_it(part_elem->subpartitions);
- do
+ partition_element *part_elem= part_it++;
+ if (!is_sub_partitioned(part_info))
{
- part_elem= sub_it++;
if (part_elem->engine_type == NULL)
- part_elem->engine_type= eng_type;
- DBUG_PRINT("info", ("engine = %s", part_elem->engine_type->name));
+ part_elem->engine_type= part_info->default_engine_type;
+ DBUG_PRINT("info", ("engine = %d",
+ ha_legacy_type(part_elem->engine_type)));
engine_array[part_count++]= part_elem->engine_type;
- } while (++j < no_subparts);
- }
- } while (++i < part_info->no_parts);
+ }
+ else
+ {
+ uint j= 0, no_subparts= part_info->no_subparts;;
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ do
+ {
+ part_elem= sub_it++;
+ if (part_elem->engine_type == NULL)
+ part_elem->engine_type= part_info->default_engine_type;
+ DBUG_PRINT("info", ("engine = %u",
+ ha_legacy_type(part_elem->engine_type)));
+ engine_array[part_count++]= part_elem->engine_type;
+ } while (++j < no_subparts);
+ }
+ } while (++i < part_info->no_parts);
+ }
if (unlikely(check_engine_mix(engine_array, part_count)))
{
my_error(ER_MIX_HANDLER_ERROR, MYF(0));
goto end;
}
+ if (eng_type)
+ *eng_type= (handlerton*)engine_array[0];
+
/*
We need to check all constant expressions that they are of the correct
type and that they are increasing for ranges and not overlapping for
@@ -918,51 +1017,54 @@ end:
/*
- A great number of functions below here is part of the fix_partition_func
- method. It is used to set up the partition structures for execution from
- openfrm. It is called at the end of the openfrm when the table struct has
- been set-up apart from the partition information.
- It involves:
- 1) Setting arrays of fields for the partition functions.
- 2) Setting up binary search array for LIST partitioning
- 3) Setting up array for binary search for RANGE partitioning
- 4) Setting up key_map's to assist in quick evaluation whether one
- can deduce anything from a given index of what partition to use
- 5) Checking whether a set of partitions can be derived from a range on
- a field in the partition function.
- As part of doing this there is also a great number of error controls.
- This is actually the place where most of the things are checked for
- partition information when creating a table.
- Things that are checked includes
- 1) No NULLable fields in partition function
- 2) All fields of partition function in Primary keys and unique indexes
- (if not supported)
- 3) No fields in partition function that are BLOB's or VARCHAR with a
- collation other than the binary collation.
-
+ This method is used to set-up both partition and subpartitioning
+ field array and used for all types of partitioning.
+ It is part of the logic around fix_partition_func.
-
- Create an array of partition fields (NULL terminated). Before this method
- is called fix_fields or find_table_in_sef has been called to set
- GET_FIXED_FIELDS_FLAG on all fields that are part of the partition
- function.
SYNOPSIS
set_up_field_array()
table TABLE object for which partition fields are set-up
sub_part Is the table subpartitioned as well
+
RETURN VALUE
TRUE Error, some field didn't meet requirements
FALSE Ok, partition field array set-up
+
DESCRIPTION
- This method is used to set-up both partition and subpartitioning
- field array and used for all types of partitioning.
- It is part of the logic around fix_partition_func.
+
+ A great number of functions below here is part of the fix_partition_func
+ method. It is used to set up the partition structures for execution from
+ openfrm. It is called at the end of the openfrm when the table struct has
+ been set-up apart from the partition information.
+ It involves:
+ 1) Setting arrays of fields for the partition functions.
+ 2) Setting up binary search array for LIST partitioning
+ 3) Setting up array for binary search for RANGE partitioning
+ 4) Setting up key_map's to assist in quick evaluation whether one
+ can deduce anything from a given index of what partition to use
+ 5) Checking whether a set of partitions can be derived from a range on
+ a field in the partition function.
+ As part of doing this there is also a great number of error controls.
+ This is actually the place where most of the things are checked for
+ partition information when creating a table.
+ Things that are checked includes
+ 1) All fields of partition function in Primary keys and unique indexes
+ (if not supported)
+
+
+ Create an array of partition fields (NULL terminated). Before this method
+ is called fix_fields or find_table_in_sef has been called to set
+ GET_FIXED_FIELDS_FLAG on all fields that are part of the partition
+ function.
*/
+
static bool set_up_field_array(TABLE *table,
- bool sub_part)
+ bool is_sub_part)
{
Field **ptr, *field, **field_array;
- uint no_fields= 0, size_field_array, i= 0;
+ uint no_fields= 0;
+ uint size_field_array;
+ uint i= 0;
partition_info *part_info= table->part_info;
int result= FALSE;
DBUG_ENTER("set_up_field_array");
@@ -973,11 +1075,19 @@ static bool set_up_field_array(TABLE *table,
if (field->flags & GET_FIXED_FIELDS_FLAG)
no_fields++;
}
+ if (no_fields == 0)
+ {
+ /*
+ We are using hidden key as partitioning field
+ */
+ DBUG_ASSERT(!is_sub_part);
+ DBUG_RETURN(result);
+ }
size_field_array= (no_fields+1)*sizeof(Field*);
field_array= (Field**)sql_alloc(size_field_array);
if (unlikely(!field_array))
{
- my_error(ER_OUTOFMEMORY, MYF(0), size_field_array);
+ mem_alloc_error(size_field_array);
result= TRUE;
}
ptr= table->field;
@@ -997,11 +1107,6 @@ static bool set_up_field_array(TABLE *table,
1) Not be a BLOB of any type
A BLOB takes too long time to evaluate so we don't want it for
performance reasons.
- 2) Not be a VARCHAR other than VARCHAR with a binary collation
- A VARCHAR with character sets can have several values being
- equal with different number of spaces or NULL's. This is not a
- good ground for a safe and exact partition function. Thus it is
- not allowed in partition functions.
*/
if (unlikely(field->flags & BLOB_FLAG))
@@ -1009,17 +1114,11 @@ static bool set_up_field_array(TABLE *table,
my_error(ER_BLOB_FIELD_IN_PART_FUNC_ERROR, MYF(0));
result= TRUE;
}
- else if (unlikely((!field->flags & BINARY_FLAG) &&
- field->real_type() == MYSQL_TYPE_VARCHAR))
- {
- my_error(ER_CHAR_SET_IN_PART_FIELD_ERROR, MYF(0));
- result= TRUE;
- }
}
}
}
field_array[no_fields]= 0;
- if (!sub_part)
+ if (!is_sub_part)
{
part_info->part_field_array= field_array;
part_info->no_part_fields= no_fields;
@@ -1036,13 +1135,16 @@ static bool set_up_field_array(TABLE *table,
/*
Create a field array including all fields of both the partitioning and the
subpartitioning functions.
+
SYNOPSIS
create_full_part_field_array()
table TABLE object for which partition fields are set-up
part_info Reference to partitioning data structure
+
RETURN VALUE
TRUE Memory allocation of field array failed
FALSE Ok
+
DESCRIPTION
If there is no subpartitioning then the same array is used as for the
partitioning. Otherwise a new array is built up using the flag
@@ -1075,7 +1177,7 @@ static bool create_full_part_field_array(TABLE *table,
field_array= (Field**)sql_alloc(size_field_array);
if (unlikely(!field_array))
{
- my_error(ER_OUTOFMEMORY, MYF(0), size_field_array);
+ mem_alloc_error(size_field_array);
result= TRUE;
goto end;
}
@@ -1096,21 +1198,25 @@ end:
/*
- These support routines is used to set/reset an indicator of all fields
- in a certain key. It is used in conjunction with another support routine
- that traverse all fields in the PF to find if all or some fields in the
- PF is part of the key. This is used to check primary keys and unique
- keys involve all fields in PF (unless supported) and to derive the
- key_map's used to quickly decide whether the index can be used to
- derive which partitions are needed to scan.
-
-
Clear flag GET_FIXED_FIELDS_FLAG in all fields of a key previously set by
set_indicator_in_key_fields (always used in pairs).
+
SYNOPSIS
clear_indicator_in_key_fields()
key_info Reference to find the key fields
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ These support routines is used to set/reset an indicator of all fields
+ in a certain key. It is used in conjunction with another support routine
+ that traverse all fields in the PF to find if all or some fields in the
+ PF is part of the key. This is used to check primary keys and unique
+ keys involve all fields in PF (unless supported) and to derive the
+ key_map's used to quickly decide whether the index can be used to
+ derive which partitions are needed to scan.
*/
static void clear_indicator_in_key_fields(KEY *key_info)
@@ -1124,9 +1230,13 @@ static void clear_indicator_in_key_fields(KEY *key_info)
/*
Set flag GET_FIXED_FIELDS_FLAG in all fields of a key.
+
SYNOPSIS
set_indicator_in_key_fields
key_info Reference to find the key fields
+
+ RETURN VALUE
+ NONE
*/
static void set_indicator_in_key_fields(KEY *key_info)
@@ -1141,11 +1251,13 @@ static void set_indicator_in_key_fields(KEY *key_info)
/*
Check if all or some fields in partition field array is part of a key
previously used to tag key fields.
+
SYNOPSIS
check_fields_in_PF()
ptr Partition field array
- all_fields Is all fields of partition field array used in key
- some_fields Is some fields of partition field array used in key
+ out:all_fields Is all fields of partition field array used in key
+ out:some_fields Is some fields of partition field array used in key
+
RETURN VALUE
all_fields, some_fields
*/
@@ -1154,6 +1266,7 @@ static void check_fields_in_PF(Field **ptr, bool *all_fields,
bool *some_fields)
{
DBUG_ENTER("check_fields_in_PF");
+
*all_fields= TRUE;
*some_fields= FALSE;
do
@@ -1171,9 +1284,13 @@ static void check_fields_in_PF(Field **ptr, bool *all_fields,
/*
Clear flag GET_FIXED_FIELDS_FLAG in all fields of the table.
This routine is used for error handling purposes.
+
SYNOPSIS
clear_field_flag()
table TABLE object for which partition fields are set-up
+
+ RETURN VALUE
+ NONE
*/
static void clear_field_flag(TABLE *table)
@@ -1188,35 +1305,42 @@ static void clear_field_flag(TABLE *table)
/*
- This routine sets-up the partition field array for KEY partitioning, it
- also verifies that all fields in the list of fields is actually a part of
- the table.
+ find_field_in_table_sef finds the field given its name. All fields get
+ GET_FIXED_FIELDS_FLAG set.
+
SYNOPSIS
handle_list_of_fields()
it A list of field names for the partition function
table TABLE object for which partition fields are set-up
part_info Reference to partitioning data structure
sub_part Is the table subpartitioned as well
+
RETURN VALUE
TRUE Fields in list of fields not part of table
FALSE All fields ok and array created
+
DESCRIPTION
- find_field_in_table_sef finds the field given its name. All fields get
- GET_FIXED_FIELDS_FLAG set.
+ This routine sets-up the partition field array for KEY partitioning, it
+ also verifies that all fields in the list of fields is actually a part of
+ the table.
+
*/
+
static bool handle_list_of_fields(List_iterator<char> it,
TABLE *table,
partition_info *part_info,
- bool sub_part)
+ bool is_sub_part)
{
Field *field;
bool result;
char *field_name;
+ bool is_list_empty= TRUE;
DBUG_ENTER("handle_list_of_fields");
while ((field_name= it++))
{
+ is_list_empty= FALSE;
field= find_field_in_table_sef(table, field_name);
if (likely(field != 0))
field->flags|= GET_FIXED_FIELDS_FLAG;
@@ -1228,19 +1352,54 @@ static bool handle_list_of_fields(List_iterator<char> it,
goto end;
}
}
- result= set_up_field_array(table, sub_part);
+ if (is_list_empty)
+ {
+ uint primary_key= table->s->primary_key;
+ if (primary_key != MAX_KEY)
+ {
+ uint no_key_parts= table->key_info[primary_key].key_parts, i;
+ /*
+ In the case of an empty list we use primary key as partition key.
+ */
+ for (i= 0; i < no_key_parts; i++)
+ {
+ Field *field= table->key_info[primary_key].key_part[i].field;
+ field->flags|= GET_FIXED_FIELDS_FLAG;
+ }
+ }
+ else
+ {
+ if (table->s->db_type->partition_flags &&
+ (table->s->db_type->partition_flags() & HA_USE_AUTO_PARTITION) &&
+ (table->s->db_type->partition_flags() & HA_CAN_PARTITION))
+ {
+ /*
+ This engine can handle automatic partitioning and there is no
+ primary key. In this case we rely on that the engine handles
+ partitioning based on a hidden key. Thus we allocate no
+ array for partitioning fields.
+ */
+ DBUG_RETURN(FALSE);
+ }
+ else
+ {
+ my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ result= set_up_field_array(table, is_sub_part);
end:
DBUG_RETURN(result);
}
/*
- This function is used to build an array of partition fields for the
- partitioning function and subpartitioning function. The partitioning
- function is an item tree that must reference at least one field in the
- table. This is checked first in the parser that the function doesn't
- contain non-cacheable parts (like a random function) and by checking
- here that the function isn't a constant function.
+ The function uses a new feature in fix_fields where the flag
+ GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
+ This field must always be reset before returning from the function
+ since it is used for other purposes as well.
+
SYNOPSIS
fix_fields_part_func()
thd The thread object
@@ -1248,35 +1407,38 @@ end:
func_expr The item tree reference of the partition function
part_info Reference to partitioning data structure
sub_part Is the table subpartitioned as well
+
RETURN VALUE
TRUE An error occurred, something was wrong with the
partition function.
FALSE Ok, a partition field array was created
+
DESCRIPTION
- The function uses a new feature in fix_fields where the flag
- GET_FIXED_FIELDS_FLAG is set for all fields in the item tree.
- This field must always be reset before returning from the function
- since it is used for other purposes as well.
-*/
+ This function is used to build an array of partition fields for the
+ partitioning function and subpartitioning function. The partitioning
+ function is an item tree that must reference at least one field in the
+ table. This is checked first in the parser that the function doesn't
+ contain non-cacheable parts (like a random function) and by checking
+ here that the function isn't a constant function.
-static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
- Item* func_expr, partition_info *part_info,
- bool sub_part)
-{
- /*
Calculate the number of fields in the partition function.
Use it allocate memory for array of Field pointers.
Initialise array of field pointers. Use information set when
calling fix_fields and reset it immediately after.
The get_fields_in_item_tree activates setting of bit in flags
on the field object.
- */
+*/
+static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
+ Item* func_expr, partition_info *part_info,
+ bool is_sub_part)
+{
bool result= TRUE;
TABLE *table= tables->table;
TABLE_LIST *save_table_list, *save_first_table, *save_last_table;
int error;
Name_resolution_context *context;
+ const char *save_where;
DBUG_ENTER("fix_fields_part_func");
context= thd->lex->current_context();
@@ -1289,6 +1451,7 @@ static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
context->first_name_resolution_table= tables;
context->last_name_resolution_table= NULL;
func_expr->walk(&Item::change_context_processor, (byte*) context);
+ save_where= thd->where;
thd->where= "partition function";
error= func_expr->fix_fields(thd, (Item**)0);
context->table_list= save_table_list;
@@ -1300,13 +1463,14 @@ static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables,
clear_field_flag(table);
goto end;
}
+ thd->where= save_where;
if (unlikely(func_expr->const_item()))
{
my_error(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR, MYF(0));
clear_field_flag(table);
goto end;
}
- result= set_up_field_array(table, sub_part);
+ result= set_up_field_array(table, is_sub_part);
end:
table->get_fields_in_item_tree= FALSE;
table->map= 0; //Restore old value
@@ -1315,24 +1479,30 @@ end:
/*
- This function verifies that if there is a primary key that it contains
- all the fields of the partition function.
- This is a temporary limitation that will hopefully be removed after a
- while.
+ Check that the primary key contains all partition fields if defined
+
SYNOPSIS
check_primary_key()
table TABLE object for which partition fields are set-up
+
RETURN VALUES
TRUE Not all fields in partitioning function was part
of primary key
FALSE Ok, all fields of partitioning function were part
of primary key
+
+ DESCRIPTION
+ This function verifies that if there is a primary key that it contains
+ all the fields of the partition function.
+ This is a temporary limitation that will hopefully be removed after a
+ while.
*/
static bool check_primary_key(TABLE *table)
{
uint primary_key= table->s->primary_key;
- bool all_fields, some_fields, result= FALSE;
+ bool all_fields, some_fields;
+ bool result= FALSE;
DBUG_ENTER("check_primary_key");
if (primary_key < MAX_KEY)
@@ -1352,25 +1522,33 @@ static bool check_primary_key(TABLE *table)
/*
- This function verifies that if there is a unique index that it contains
- all the fields of the partition function.
- This is a temporary limitation that will hopefully be removed after a
- while.
+ Check that unique keys contains all partition fields
+
SYNOPSIS
check_unique_keys()
table TABLE object for which partition fields are set-up
+
RETURN VALUES
TRUE Not all fields in partitioning function was part
of all unique keys
FALSE Ok, all fields of partitioning function were part
of unique keys
+
+ DESCRIPTION
+ This function verifies that if there is a unique index that it contains
+ all the fields of the partition function.
+ This is a temporary limitation that will hopefully be removed after a
+ while.
*/
static bool check_unique_keys(TABLE *table)
{
- bool all_fields, some_fields, result= FALSE;
- uint keys= table->s->keys, i;
+ bool all_fields, some_fields;
+ bool result= FALSE;
+ uint keys= table->s->keys;
+ uint i;
DBUG_ENTER("check_unique_keys");
+
for (i= 0; i < keys; i++)
{
if (table->key_info[i].flags & HA_NOSAME) //Unique index
@@ -1434,9 +1612,11 @@ static bool check_unique_keys(TABLE *table)
indicating this to notify that we can use also ranges on the field
of the PF to deduce a set of partitions if the fields of the PF were
not all fully bound.
+
SYNOPSIS
check_range_capable_PF()
table TABLE object for which partition fields are set-up
+
DESCRIPTION
Support for this is not implemented yet.
*/
@@ -1444,35 +1624,76 @@ static bool check_unique_keys(TABLE *table)
void check_range_capable_PF(TABLE *table)
{
DBUG_ENTER("check_range_capable_PF");
+
DBUG_VOID_RETURN;
}
/*
+ Set up partition bitmap
+
+ SYNOPSIS
+ set_up_partition_bitmap()
+ thd Thread object
+ part_info Reference to partitioning data structure
+
+ RETURN VALUE
+ TRUE Memory allocation failure
+ FALSE Success
+
+ DESCRIPTION
+ Allocate memory for bitmap of the partitioned table
+ and initialise it.
+*/
+
+static bool set_up_partition_bitmap(THD *thd, partition_info *part_info)
+{
+ uint32 *bitmap_buf;
+ uint bitmap_bits= part_info->no_subparts?
+ (part_info->no_subparts* part_info->no_parts):
+ part_info->no_parts;
+ uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
+ DBUG_ENTER("set_up_partition_bitmap");
+
+ if (!(bitmap_buf= (uint32*)thd->alloc(bitmap_bytes)))
+ {
+ mem_alloc_error(bitmap_bytes);
+ DBUG_RETURN(TRUE);
+ }
+ bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
Set up partition key maps
+
SYNOPSIS
set_up_partition_key_maps()
table TABLE object for which partition fields are set-up
part_info Reference to partitioning data structure
+
RETURN VALUES
None
+
DESCRIPTION
- This function sets up a couple of key maps to be able to quickly check
- if an index ever can be used to deduce the partition fields or even
- a part of the fields of the partition function.
- We set up the following key_map's.
- PF = Partition Function
- 1) All fields of the PF is set even by equal on the first fields in the
- key
- 2) All fields of the PF is set if all fields of the key is set
- 3) At least one field in the PF is set if all fields is set
- 4) At least one field in the PF is part of the key
+ This function sets up a couple of key maps to be able to quickly check
+ if an index ever can be used to deduce the partition fields or even
+ a part of the fields of the partition function.
+ We set up the following key_map's.
+ PF = Partition Function
+ 1) All fields of the PF is set even by equal on the first fields in the
+ key
+ 2) All fields of the PF is set if all fields of the key is set
+ 3) At least one field in the PF is set if all fields is set
+ 4) At least one field in the PF is part of the key
*/
static void set_up_partition_key_maps(TABLE *table,
partition_info *part_info)
{
- uint keys= table->s->keys, i;
+ uint keys= table->s->keys;
+ uint i;
bool all_fields, some_fields;
DBUG_ENTER("set_up_partition_key_maps");
@@ -1507,17 +1728,26 @@ static void set_up_partition_key_maps(TABLE *table,
/*
- Set-up all function pointers for calculation of partition id,
- subpartition id and the upper part in subpartitioning. This is to speed up
- execution of get_partition_id which is executed once every record to be
- written and deleted and twice for updates.
+ Set up function pointers for partition function
+
SYNOPSIS
- set_up_partition_function_pointers()
+ set_up_partition_func_pointers()
part_info Reference to partitioning data structure
+
+ RETURN VALUE
+ NONE
+
+ DESCRIPTION
+ Set-up all function pointers for calculation of partition id,
+ subpartition id and the upper part in subpartitioning. This is to speed up
+ execution of get_partition_id which is executed once every record to be
+ written and deleted and twice for updates.
*/
static void set_up_partition_func_pointers(partition_info *part_info)
{
+ DBUG_ENTER("set_up_partition_func_pointers");
+
if (is_sub_partitioned(part_info))
{
if (part_info->part_type == RANGE_PARTITION)
@@ -1550,7 +1780,7 @@ static void set_up_partition_func_pointers(partition_info *part_info)
}
}
}
- else //LIST Partitioning
+ else /* LIST Partitioning */
{
part_info->get_part_partition_id= get_partition_id_list;
if (part_info->list_of_subpart_fields)
@@ -1581,7 +1811,7 @@ static void set_up_partition_func_pointers(partition_info *part_info)
}
}
}
- else //No subpartitioning
+ else /* No subpartitioning */
{
part_info->get_part_partition_id= NULL;
part_info->get_subpartition_id= NULL;
@@ -1589,7 +1819,7 @@ static void set_up_partition_func_pointers(partition_info *part_info)
part_info->get_partition_id= get_partition_id_range;
else if (part_info->part_type == LIST_PARTITION)
part_info->get_partition_id= get_partition_id_list;
- else //HASH partitioning
+ else /* HASH partitioning */
{
if (part_info->list_of_part_fields)
{
@@ -1607,21 +1837,27 @@ static void set_up_partition_func_pointers(partition_info *part_info)
}
}
}
+ DBUG_VOID_RETURN;
}
/*
For linear hashing we need a mask which is on the form 2**n - 1 where
2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7.
+
SYNOPSIS
set_linear_hash_mask()
part_info Reference to partitioning data structure
no_parts Number of parts in linear hash partitioning
+
+ RETURN VALUE
+ NONE
*/
static void set_linear_hash_mask(partition_info *part_info, uint no_parts)
{
uint mask;
+
for (mask= 1; mask < no_parts; mask<<=1)
;
part_info->linear_hash_mask= mask - 1;
@@ -1631,13 +1867,16 @@ static void set_linear_hash_mask(partition_info *part_info, uint no_parts)
/*
This function calculates the partition id provided the result of the hash
function using linear hashing parameters, mask and number of partitions.
+
SYNOPSIS
get_part_id_from_linear_hash()
hash_value Hash value calculated by HASH function or KEY function
mask Mask calculated previously by set_linear_hash_mask
no_parts Number of partitions in HASH partitioned part
+
RETURN VALUE
part_id The calculated partition identity (starting at 0)
+
DESCRIPTION
The partition is calculated according to the theory of linear hashing.
See e.g. Linear hashing: a new tool for file and table addressing,
@@ -1649,6 +1888,7 @@ static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask,
uint no_parts)
{
uint32 part_id= (uint32)(hash_value & mask);
+
if (part_id >= no_parts)
{
uint new_mask= ((mask + 1) >> 1) - 1;
@@ -1665,10 +1905,12 @@ static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask,
thd The thread object
name The name of the partitioned table
table TABLE object for which partition fields are set-up
+ create_table_ind Indicator of whether openfrm was called as part of
+ CREATE or ALTER TABLE
RETURN VALUE
- TRUE
- FALSE
+ TRUE Error
+ FALSE Success
DESCRIPTION
The name parameter contains the full table name and is used to get the
@@ -1683,7 +1925,8 @@ NOTES
of an error that is not discovered until here.
*/
-bool fix_partition_func(THD *thd, const char *name, TABLE *table)
+bool fix_partition_func(THD *thd, const char* name, TABLE *table,
+ bool is_create_table_ind)
{
bool result= TRUE;
uint dir_length, home_dir_length;
@@ -1695,6 +1938,10 @@ bool fix_partition_func(THD *thd, const char *name, TABLE *table)
ulong save_set_query_id= thd->set_query_id;
DBUG_ENTER("fix_partition_func");
+ if (part_info->fixed)
+ {
+ DBUG_RETURN(FALSE);
+ }
thd->set_query_id= 0;
/*
Set-up the TABLE_LIST object to be a list with a single table
@@ -1714,6 +1961,13 @@ bool fix_partition_func(THD *thd, const char *name, TABLE *table)
db_name= &db_name_string[home_dir_length];
tables.db= db_name;
+ if (!is_create_table_ind)
+ {
+ if (partition_default_handling(table, part_info))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
if (is_sub_partitioned(part_info))
{
DBUG_ASSERT(part_info->subpart_type == HASH_PARTITION);
@@ -1810,12 +2064,16 @@ bool fix_partition_func(THD *thd, const char *name, TABLE *table)
goto end;
if (unlikely(check_primary_key(table)))
goto end;
- if (unlikely((!table->file->partition_flags() & HA_CAN_PARTITION_UNIQUE) &&
+ if (unlikely((!(table->s->db_type->partition_flags &&
+ (table->s->db_type->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
check_unique_keys(table)))
goto end;
+ if (unlikely(set_up_partition_bitmap(thd, part_info)))
+ goto end;
check_range_capable_PF(table);
set_up_partition_key_maps(table, part_info);
set_up_partition_func_pointers(part_info);
+ part_info->fixed= TRUE;
result= FALSE;
end:
thd->set_query_id= save_set_query_id;
@@ -1834,6 +2092,7 @@ end:
static int add_write(File fptr, const char *buf, uint len)
{
uint len_written= my_write(fptr, (const byte*)buf, len, MYF(0));
+
if (likely(len == len_written))
return 0;
else
@@ -1878,6 +2137,7 @@ static int add_begin_parenthesis(File fptr)
static int add_part_key_word(File fptr, const char *key_string)
{
int err= add_string(fptr, key_string);
+
err+= add_space(fptr);
return err + add_begin_parenthesis(fptr);
}
@@ -1896,6 +2156,7 @@ static int add_partition(File fptr)
static int add_subpartition(File fptr)
{
int err= add_string(fptr, sub_str);
+
return err + add_partition(fptr);
}
@@ -1908,6 +2169,7 @@ static int add_partition_by(File fptr)
static int add_subpartition_by(File fptr)
{
int err= add_string(fptr, sub_str);
+
return err + add_partition_by(fptr);
}
@@ -1915,17 +2177,19 @@ static int add_key_partition(File fptr, List<char> field_list)
{
uint i, no_fields;
int err;
+
List_iterator<char> part_it(field_list);
err= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
no_fields= field_list.elements;
i= 0;
- do
+ while (i < no_fields)
{
const char *field_str= part_it++;
err+= add_string(fptr, field_str);
if (i != (no_fields-1))
err+= add_comma(fptr);
- } while (++i < no_fields);
+ i++;
+ }
return err;
}
@@ -1939,6 +2203,7 @@ static int add_keyword_string(File fptr, const char *keyword,
const char *keystr)
{
int err= add_string(fptr, keyword);
+
err+= add_space(fptr);
err+= add_equal(fptr);
err+= add_space(fptr);
@@ -1949,6 +2214,7 @@ static int add_keyword_string(File fptr, const char *keyword,
static int add_keyword_int(File fptr, const char *keyword, longlong num)
{
int err= add_string(fptr, keyword);
+
err+= add_space(fptr);
err+= add_equal(fptr);
err+= add_space(fptr);
@@ -1959,14 +2225,15 @@ static int add_keyword_int(File fptr, const char *keyword, longlong num)
static int add_engine(File fptr, handlerton *engine_type)
{
const char *engine_str= engine_type->name;
+ DBUG_PRINT("info", ("ENGINE = %s", engine_str));
int err= add_string(fptr, "ENGINE = ");
return err + add_string(fptr, engine_str);
- return err;
}
static int add_partition_options(File fptr, partition_element *p_elem)
{
int err= 0;
+
if (p_elem->tablespace_name)
err+= add_keyword_string(fptr,"TABLESPACE",p_elem->tablespace_name);
if (p_elem->nodegroup_id != UNDEF_NODEGROUP)
@@ -1988,6 +2255,7 @@ static int add_partition_values(File fptr, partition_info *part_info,
partition_element *p_elem)
{
int err= 0;
+
if (part_info->part_type == RANGE_PARTITION)
{
err+= add_string(fptr, "VALUES LESS THAN ");
@@ -2024,16 +2292,19 @@ static int add_partition_values(File fptr, partition_info *part_info,
Generate the partition syntax from the partition data structure.
Useful for support of generating defaults, SHOW CREATE TABLES
and easy partition management.
+
SYNOPSIS
generate_partition_syntax()
part_info The partitioning data structure
buf_length A pointer to the returned buffer length
use_sql_alloc Allocate buffer from sql_alloc if true
otherwise use my_malloc
- add_default_info Add info generated by default
+ write_all Write everything, also default values
+
RETURN VALUES
NULL error
buf, buf_length Buffer and its length
+
DESCRIPTION
Here we will generate the full syntax for the given command where all
defaults have been expanded. By so doing the it is also possible to
@@ -2057,39 +2328,37 @@ static int add_partition_values(File fptr, partition_info *part_info,
char *generate_partition_syntax(partition_info *part_info,
uint *buf_length,
bool use_sql_alloc,
- bool add_default_info)
+ bool write_all)
{
- uint i,j, no_parts, no_subparts;
+ uint i,j, tot_no_parts, no_subparts, no_parts;
partition_element *part_elem;
+ partition_element *save_part_elem= NULL;
ulonglong buffer_length;
char path[FN_REFLEN];
int err= 0;
- DBUG_ENTER("generate_partition_syntax");
+ List_iterator<partition_element> part_it(part_info->partitions);
+ List_iterator<partition_element> temp_it(part_info->temp_partitions);
File fptr;
char *buf= NULL; //Return buffer
- const char *file_name;
-
- sprintf(path, "%s_%lx_%lx", "part_syntax", current_pid,
- current_thd->thread_id);
- fn_format(path,path,mysql_tmpdir,".psy", MY_REPLACE_EXT);
- file_name= &path[0];
- DBUG_PRINT("info", ("File name = %s", file_name));
- if (unlikely(((fptr= my_open(file_name,O_CREAT|O_RDWR, MYF(MY_WME))) == -1)))
+ uint use_temp= 0;
+ uint no_temp_parts= part_info->temp_partitions.elements;
+ bool write_part_state;
+ DBUG_ENTER("generate_partition_syntax");
+
+ write_part_state= (part_info->part_state && !part_info->part_state_len);
+ if (unlikely(((fptr= create_temp_file(path,mysql_tmpdir,"psy", 0,0))) < 0))
DBUG_RETURN(NULL);
-#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2)
-#else
- my_delete(file_name, MYF(0));
+#ifndef __WIN__
+ unlink(path);
#endif
err+= add_space(fptr);
err+= add_partition_by(fptr);
switch (part_info->part_type)
{
case RANGE_PARTITION:
- add_default_info= TRUE;
err+= add_part_key_word(fptr, partition_keywords[PKW_RANGE].str);
break;
case LIST_PARTITION:
- add_default_info= TRUE;
err+= add_part_key_word(fptr, partition_keywords[PKW_LIST].str);
break;
case HASH_PARTITION:
@@ -2111,6 +2380,13 @@ char *generate_partition_syntax(partition_info *part_info,
part_info->part_func_len);
err+= add_end_parenthesis(fptr);
err+= add_space(fptr);
+ if ((!part_info->use_default_no_partitions) &&
+ part_info->use_default_partitions)
+ {
+ err+= add_string(fptr, "PARTITIONS ");
+ err+= add_int(fptr, part_info->no_parts);
+ err+= add_space(fptr);
+ }
if (is_sub_partitioned(part_info))
{
err+= add_subpartition_by(fptr);
@@ -2124,53 +2400,114 @@ char *generate_partition_syntax(partition_info *part_info,
part_info->subpart_func_len);
err+= add_end_parenthesis(fptr);
err+= add_space(fptr);
+ if ((!part_info->use_default_no_subpartitions) &&
+ part_info->use_default_subpartitions)
+ {
+ err+= add_string(fptr, "SUBPARTITIONS ");
+ err+= add_int(fptr, part_info->no_subparts);
+ err+= add_space(fptr);
+ }
}
- if (add_default_info)
- {
- err+= add_begin_parenthesis(fptr);
- List_iterator<partition_element> part_it(part_info->partitions);
no_parts= part_info->no_parts;
+ tot_no_parts= no_parts + no_temp_parts;
no_subparts= part_info->no_subparts;
- i= 0;
- do
+
+ if (write_all || (!part_info->use_default_partitions))
{
- part_elem= part_it++;
- err+= add_partition(fptr);
- err+= add_string(fptr, part_elem->partition_name);
- err+= add_space(fptr);
- err+= add_partition_values(fptr, part_info, part_elem);
- if (!is_sub_partitioned(part_info))
- err+= add_partition_options(fptr, part_elem);
- if (is_sub_partitioned(part_info))
+ err+= add_begin_parenthesis(fptr);
+ i= 0;
+ do
{
- err+= add_space(fptr);
- err+= add_begin_parenthesis(fptr);
- List_iterator<partition_element> sub_it(part_elem->subpartitions);
- j= 0;
- do
+ /*
+ We need to do some clever list manipulation here since we have two
+ different needs for our list processing and here we take some of the
+ cost of using a simpler list processing for the other parts of the
+ code.
+
+ ALTER TABLE REORGANIZE PARTITIONS has the list of partitions to be
+ the final list as the main list and the reorganised partitions is in
+ the temporary partition list. Thus when finding the first part added
+ we insert the temporary list if there is such a list. If there is no
+ temporary list we are performing an ADD PARTITION.
+ */
+ if (use_temp && use_temp <= no_temp_parts)
+ {
+ part_elem= temp_it++;
+ DBUG_ASSERT(no_temp_parts);
+ no_temp_parts--;
+ }
+ else if (use_temp)
{
- part_elem= sub_it++;
- err+= add_subpartition(fptr);
+ DBUG_ASSERT(no_parts);
+ part_elem= save_part_elem;
+ use_temp= 0;
+ no_parts--;
+ }
+ else
+ {
+ part_elem= part_it++;
+ if ((part_elem->part_state == PART_TO_BE_ADDED ||
+ part_elem->part_state == PART_IS_ADDED) && no_temp_parts)
+ {
+ save_part_elem= part_elem;
+ part_elem= temp_it++;
+ no_temp_parts--;
+ use_temp= 1;
+ }
+ else
+ {
+ DBUG_ASSERT(no_parts);
+ no_parts--;
+ }
+ }
+
+ if (part_elem->part_state != PART_IS_DROPPED)
+ {
+ if (write_part_state)
+ {
+ uint32 part_state_id= part_info->part_state_len;
+ part_info->part_state[part_state_id]= (uchar)part_elem->part_state;
+ part_info->part_state_len= part_state_id+1;
+ }
+ err+= add_partition(fptr);
err+= add_string(fptr, part_elem->partition_name);
err+= add_space(fptr);
- err+= add_partition_options(fptr, part_elem);
- if (j != (no_subparts-1))
+ err+= add_partition_values(fptr, part_info, part_elem);
+ if (!is_sub_partitioned(part_info))
+ err+= add_partition_options(fptr, part_elem);
+ if (is_sub_partitioned(part_info) &&
+ (write_all || (!part_info->use_default_subpartitions)))
+ {
+ err+= add_space(fptr);
+ err+= add_begin_parenthesis(fptr);
+ List_iterator<partition_element> sub_it(part_elem->subpartitions);
+ j= 0;
+ do
+ {
+ part_elem= sub_it++;
+ err+= add_subpartition(fptr);
+ err+= add_string(fptr, part_elem->partition_name);
+ err+= add_space(fptr);
+ err+= add_partition_options(fptr, part_elem);
+ if (j != (no_subparts-1))
+ {
+ err+= add_comma(fptr);
+ err+= add_space(fptr);
+ }
+ else
+ err+= add_end_parenthesis(fptr);
+ } while (++j < no_subparts);
+ }
+ if (i != (tot_no_parts-1))
{
err+= add_comma(fptr);
err+= add_space(fptr);
}
- else
- err+= add_end_parenthesis(fptr);
- } while (++j < no_subparts);
- }
- if (i != (no_parts-1))
- {
- err+= add_comma(fptr);
- err+= add_space(fptr);
- }
- else
- err+= add_end_parenthesis(fptr);
- } while (++i < no_parts);
+ }
+ if (i == (tot_no_parts-1))
+ err+= add_end_parenthesis(fptr);
+ } while (++i < tot_no_parts);
+ DBUG_ASSERT(!no_parts && !no_temp_parts);
}
if (err)
goto close_file;
@@ -2198,19 +2535,7 @@ char *generate_partition_syntax(partition_info *part_info,
buf[*buf_length]= 0;
close_file:
- /*
- Delete the file before closing to ensure the file doesn't get synched
- to disk unnecessary. We only used the file system as a dynamic array
- implementation so we are not really interested in getting the file
- present on disk.
- This is not possible on Windows so here it has to be done after closing
- the file. Also on Unix we delete immediately after opening to ensure no
- other process can read the information written into the file.
- */
my_close(fptr, MYF(0));
-#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2)
- my_delete(file_name, MYF(0));
-#endif
DBUG_RETURN(buf);
}
@@ -2218,10 +2543,12 @@ close_file:
/*
Check if partition key fields are modified and if it can be handled by the
underlying storage engine.
+
SYNOPSIS
partition_key_modified
table TABLE object for which partition fields are set-up
fields A list of the to be modifed
+
RETURN VALUES
TRUE Need special handling of UPDATE
FALSE Normal UPDATE handling is ok
@@ -2233,9 +2560,11 @@ bool partition_key_modified(TABLE *table, List<Item> &fields)
partition_info *part_info= table->part_info;
Item_field *item_field;
DBUG_ENTER("partition_key_modified");
+
if (!part_info)
DBUG_RETURN(FALSE);
- if (table->file->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY)
+ if (table->s->db_type->partition_flags &&
+ (table->s->db_type->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY))
DBUG_RETURN(FALSE);
f.rewind();
while ((item_field=(Item_field*) f++))
@@ -2265,11 +2594,14 @@ bool partition_key_modified(TABLE *table, List<Item> &fields)
/*
Calculate hash value for KEY partitioning using an array of fields.
+
SYNOPSIS
calculate_key_value()
field_array An array of the fields in KEY partitioning
+
RETURN VALUE
hash_value calculated
+
DESCRIPTION
Uses the hash function on the character set of the field. Integer and
floating point fields use the binary character set by default.
@@ -2279,6 +2611,7 @@ static uint32 calculate_key_value(Field **field_array)
{
uint32 hashnr= 0;
ulong nr2= 4;
+
do
{
Field *field= *field_array;
@@ -2302,6 +2635,7 @@ static uint32 calculate_key_value(Field **field_array)
/*
A simple support function to calculate part_id given local part and
sub part.
+
SYNOPSIS
get_part_id_for_sub()
loc_part_id Local partition id
@@ -2319,10 +2653,12 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id,
/*
Calculate part_id for (SUB)PARTITION BY HASH
+
SYNOPSIS
get_part_id_hash()
no_parts Number of hash partitions
part_expr Item tree of hash function
+
RETURN VALUE
Calculated partition id
*/
@@ -2339,12 +2675,14 @@ static uint32 get_part_id_hash(uint no_parts,
/*
Calculate part_id for (SUB)PARTITION BY LINEAR HASH
+
SYNOPSIS
get_part_id_linear_hash()
part_info A reference to the partition_info struct where all the
desired information is given
no_parts Number of hash partitions
part_expr Item tree of hash function
+
RETURN VALUE
Calculated partition id
*/
@@ -2355,6 +2693,7 @@ static uint32 get_part_id_linear_hash(partition_info *part_info,
Item *part_expr)
{
DBUG_ENTER("get_part_id_linear_hash");
+
DBUG_RETURN(get_part_id_from_linear_hash(part_expr->val_int(),
part_info->linear_hash_mask,
no_parts));
@@ -2363,10 +2702,12 @@ static uint32 get_part_id_linear_hash(partition_info *part_info,
/*
Calculate part_id for (SUB)PARTITION BY KEY
+
SYNOPSIS
get_part_id_key()
field_array Array of fields for PARTTION KEY
no_parts Number of KEY partitions
+
RETURN VALUE
Calculated partition id
*/
@@ -2376,18 +2717,21 @@ static uint32 get_part_id_key(Field **field_array,
uint no_parts)
{
DBUG_ENTER("get_part_id_key");
+
DBUG_RETURN(calculate_key_value(field_array) % no_parts);
}
/*
Calculate part_id for (SUB)PARTITION BY LINEAR KEY
+
SYNOPSIS
get_part_id_linear_key()
part_info A reference to the partition_info struct where all the
desired information is given
field_array Array of fields for PARTTION KEY
no_parts Number of KEY partitions
+
RETURN VALUE
Calculated partition id
*/
@@ -2398,6 +2742,7 @@ static uint32 get_part_id_linear_key(partition_info *part_info,
uint no_parts)
{
DBUG_ENTER("get_partition_id_linear_key");
+
DBUG_RETURN(get_part_id_from_linear_hash(calculate_key_value(field_array),
part_info->linear_hash_mask,
no_parts));
@@ -2407,15 +2752,18 @@ static uint32 get_part_id_linear_key(partition_info *part_info,
This function is used to calculate the partition id where all partition
fields have been prepared to point to a record where the partition field
values are bound.
+
SYNOPSIS
get_partition_id()
part_info A reference to the partition_info struct where all the
desired information is given
- part_id The partition id is returned through this pointer
+ out:part_id The partition id is returned through this pointer
+
RETURN VALUE
part_id
return TRUE means that the fields of the partition function didn't fit
into any partition and thus the values of the PF-fields are not allowed.
+
DESCRIPTION
A routine used from write_row, update_row and delete_row from any
handler supporting partitioning. It is also a support routine for
@@ -2445,15 +2793,18 @@ static uint32 get_part_id_linear_key(partition_info *part_info,
This function is used to calculate the main partition to use in the case of
subpartitioning and we don't know enough to get the partition identity in
total.
+
SYNOPSIS
get_part_partition_id()
part_info A reference to the partition_info struct where all the
desired information is given
- part_id The partition id is returned through this pointer
+ out:part_id The partition id is returned through this pointer
+
RETURN VALUE
part_id
return TRUE means that the fields of the partition function didn't fit
into any partition and thus the values of the PF-fields are not allowed.
+
DESCRIPTION
It is actually 6 different variants of this function which are called
@@ -2468,15 +2819,17 @@ static uint32 get_part_id_linear_key(partition_info *part_info,
*/
-bool get_partition_id_list(partition_info *part_info,
- uint32 *part_id)
+int get_partition_id_list(partition_info *part_info,
+ uint32 *part_id)
{
- DBUG_ENTER("get_partition_id_list");
LIST_PART_ENTRY *list_array= part_info->list_array;
- uint list_index;
+ int list_index;
longlong list_value;
- uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
+ int min_list_index= 0;
+ int max_list_index= part_info->no_list_values - 1;
longlong part_func_value= part_info->part_expr->val_int();
+ DBUG_ENTER("get_partition_id_list");
+
while (max_list_index >= min_list_index)
{
list_index= (max_list_index + min_list_index) >> 1;
@@ -2492,12 +2845,12 @@ bool get_partition_id_list(partition_info *part_info,
else
{
*part_id= (uint32)list_array[list_index].partition_id;
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
}
notfound:
*part_id= 0;
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
}
@@ -2574,14 +2927,17 @@ notfound:
}
-bool get_partition_id_range(partition_info *part_info,
+int get_partition_id_range(partition_info *part_info,
uint32 *part_id)
{
- DBUG_ENTER("get_partition_id_int_range");
longlong *range_array= part_info->range_int_array;
uint max_partition= part_info->no_parts - 1;
- uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
+ uint min_part_id= 0;
+ uint max_part_id= max_partition;
+ uint loc_part_id;
longlong part_func_value= part_info->part_expr->val_int();
+ DBUG_ENTER("get_partition_id_int_range");
+
while (max_part_id > min_part_id)
{
loc_part_id= (max_part_id + min_part_id + 1) >> 1;
@@ -2598,8 +2954,8 @@ bool get_partition_id_range(partition_info *part_info,
if (loc_part_id == max_partition)
if (range_array[loc_part_id] != LONGLONG_MAX)
if (part_func_value >= range_array[loc_part_id])
- DBUG_RETURN(TRUE);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+ DBUG_RETURN(0);
}
@@ -2685,191 +3041,209 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
}
-bool get_partition_id_hash_nosub(partition_info *part_info,
+int get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id)
{
*part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr);
- return FALSE;
+ return 0;
}
-bool get_partition_id_linear_hash_nosub(partition_info *part_info,
+int get_partition_id_linear_hash_nosub(partition_info *part_info,
uint32 *part_id)
{
*part_id= get_part_id_linear_hash(part_info, part_info->no_parts,
part_info->part_expr);
- return FALSE;
+ return 0;
}
-bool get_partition_id_key_nosub(partition_info *part_info,
+int get_partition_id_key_nosub(partition_info *part_info,
uint32 *part_id)
{
*part_id= get_part_id_key(part_info->part_field_array, part_info->no_parts);
- return FALSE;
+ return 0;
}
-bool get_partition_id_linear_key_nosub(partition_info *part_info,
+int get_partition_id_linear_key_nosub(partition_info *part_info,
uint32 *part_id)
{
*part_id= get_part_id_linear_key(part_info,
part_info->part_field_array,
part_info->no_parts);
- return FALSE;
+ return 0;
}
-bool get_partition_id_range_sub_hash(partition_info *part_info,
+int get_partition_id_range_sub_hash(partition_info *part_info,
uint32 *part_id)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
+ int error;
DBUG_ENTER("get_partition_id_range_sub_hash");
- if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+ if (unlikely((error= get_partition_id_range(part_info, &loc_part_id))))
{
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
-bool get_partition_id_range_sub_linear_hash(partition_info *part_info,
+int get_partition_id_range_sub_linear_hash(partition_info *part_info,
uint32 *part_id)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
+ int error;
DBUG_ENTER("get_partition_id_range_sub_linear_hash");
- if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+ if (unlikely((error= get_partition_id_range(part_info, &loc_part_id))))
{
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_linear_hash(part_info, no_subparts,
part_info->subpart_expr);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
-bool get_partition_id_range_sub_key(partition_info *part_info,
+int get_partition_id_range_sub_key(partition_info *part_info,
uint32 *part_id)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
+ int error;
DBUG_ENTER("get_partition_id_range_sub_key");
- if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+ if (unlikely((error= get_partition_id_range(part_info, &loc_part_id))))
{
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
-bool get_partition_id_range_sub_linear_key(partition_info *part_info,
+int get_partition_id_range_sub_linear_key(partition_info *part_info,
uint32 *part_id)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
+ int error;
DBUG_ENTER("get_partition_id_range_sub_linear_key");
- if (unlikely(get_partition_id_range(part_info, &loc_part_id)))
+
+ if (unlikely((error= get_partition_id_range(part_info, &loc_part_id))))
{
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_linear_key(part_info,
part_info->subpart_field_array,
no_subparts);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
-bool get_partition_id_list_sub_hash(partition_info *part_info,
+int get_partition_id_list_sub_hash(partition_info *part_info,
uint32 *part_id)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
+ int error;
DBUG_ENTER("get_partition_id_list_sub_hash");
- if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+ if (unlikely((error= get_partition_id_list(part_info, &loc_part_id))))
{
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
-bool get_partition_id_list_sub_linear_hash(partition_info *part_info,
+int get_partition_id_list_sub_linear_hash(partition_info *part_info,
uint32 *part_id)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
+ int error;
DBUG_ENTER("get_partition_id_list_sub_linear_hash");
- if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+ if (unlikely((error= get_partition_id_list(part_info, &loc_part_id))))
{
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
-bool get_partition_id_list_sub_key(partition_info *part_info,
+int get_partition_id_list_sub_key(partition_info *part_info,
uint32 *part_id)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
+ int error;
DBUG_ENTER("get_partition_id_range_sub_key");
- if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+ if (unlikely((error= get_partition_id_list(part_info, &loc_part_id))))
{
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
-bool get_partition_id_list_sub_linear_key(partition_info *part_info,
+int get_partition_id_list_sub_linear_key(partition_info *part_info,
uint32 *part_id)
{
uint32 loc_part_id, sub_part_id;
uint no_subparts;
+ int error;
DBUG_ENTER("get_partition_id_list_sub_linear_key");
- if (unlikely(get_partition_id_list(part_info, &loc_part_id)))
+
+ if (unlikely((error= get_partition_id_list(part_info, &loc_part_id))))
{
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error);
}
no_subparts= part_info->no_subparts;
sub_part_id= get_part_id_linear_key(part_info,
part_info->subpart_field_array,
no_subparts);
*part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
/*
This function is used to calculate the subpartition id
+
SYNOPSIS
get_subpartition_id()
part_info A reference to the partition_info struct where all the
desired information is given
+
RETURN VALUE
- part_id
- The subpartition identity
+ part_id The subpartition identity
+
DESCRIPTION
A routine used in some SELECT's when only partial knowledge of the
partitions is known.
@@ -2912,11 +3286,13 @@ uint32 get_partition_id_linear_key_sub(partition_info *part_info)
/*
- Set an indicator on all partition fields that are set by the key
+ Set an indicator on all partition fields that are set by the key
+
SYNOPSIS
set_PF_fields_in_key()
key_info Information about the index
key_length Length of key
+
RETURN VALUE
TRUE Found partition field set by key
FALSE No partition field set by key
@@ -2957,9 +3333,11 @@ static bool set_PF_fields_in_key(KEY *key_info, uint key_length)
/*
We have found that at least one partition field was set by a key, now
check if a partition function has all its fields bound or not.
+
SYNOPSIS
check_part_func_bound()
ptr Array of fields NULL terminated (partition fields)
+
RETURN VALUE
TRUE All fields in partition function are set
FALSE Not all fields in partition function are set
@@ -2985,14 +3363,17 @@ static bool check_part_func_bound(Field **ptr)
/*
Get the id of the subpartitioning part by using the key buffer of the
index scan.
+
SYNOPSIS
get_sub_part_id_from_key()
table The table object
buf A buffer that can be used to evaluate the partition function
key_info The index object
key_spec A key_range containing key and key length
+
RETURN VALUES
part_id Subpartition id to use
+
DESCRIPTION
Use key buffer to set-up record in buf, move field pointers and
get the partition identity and restore field pointers afterwards.
@@ -3023,20 +3404,24 @@ static uint32 get_sub_part_id_from_key(const TABLE *table,byte *buf,
/*
Get the id of the partitioning part by using the key buffer of the
index scan.
+
SYNOPSIS
get_part_id_from_key()
table The table object
buf A buffer that can be used to evaluate the partition function
key_info The index object
key_spec A key_range containing key and key length
- part_id Partition to use
+ out:part_id Partition to use
+
RETURN VALUES
TRUE Partition to use not found
FALSE Ok, part_id indicates partition to use
+
DESCRIPTION
Use key buffer to set-up record in buf, move field pointers and
get the partition identity and restore field pointers afterwards.
*/
+
bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info,
const key_range *key_spec, uint32 *part_id)
{
@@ -3061,16 +3446,19 @@ bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info,
/*
Get the partitioning id of the full PF by using the key buffer of the
index scan.
+
SYNOPSIS
get_full_part_id_from_key()
table The table object
buf A buffer that is used to evaluate the partition function
key_info The index object
key_spec A key_range containing key and key length
- part_spec A partition id containing start part and end part
+ out:part_spec A partition id containing start part and end part
+
RETURN VALUES
part_spec
No partitions to scan is indicated by end_part > start_part when returning
+
DESCRIPTION
Use key buffer to set-up record in buf, move field pointers if needed and
get the partition identity and restore field pointers afterwards.
@@ -3104,14 +3492,16 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
/*
Get the set of partitions to use in query.
+
SYNOPSIS
get_partition_set()
table The table object
buf A buffer that can be used to evaluate the partition function
index The index of the key used, if MAX_KEY no index used
key_spec A key_range containing key and key length
- part_spec Contains start part, end part and indicator if bitmap is
+ out:part_spec Contains start part, end part and indicator if bitmap is
used for which partitions to scan
+
DESCRIPTION
This function is called to discover which partitions to use in an index
scan or a full table scan.
@@ -3121,6 +3511,7 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
If start_part > end_part at return it means no partition needs to be
scanned. If start_part == end_part it always means a single partition
needs to be scanned.
+
RETURN VALUE
part_spec
*/
@@ -3128,7 +3519,8 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index,
const key_range *key_spec, part_id_range *part_spec)
{
partition_info *part_info= table->part_info;
- uint no_parts= get_tot_partitions(part_info), i, part_id;
+ uint no_parts= get_tot_partitions(part_info);
+ uint i, part_id;
uint sub_part= no_parts;
uint32 part_part= no_parts;
KEY *key_info= NULL;
@@ -3170,7 +3562,8 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index,
sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
else if (part_info->all_fields_in_PPF.is_set(index))
{
- if (get_part_id_from_key(table,buf,key_info,key_spec,(uint32*)&part_part))
+ if (get_part_id_from_key(table,buf,key_info,
+ key_spec,(uint32*)&part_part))
{
/*
The value of the RANGE or LIST partitioning was outside of
@@ -3205,15 +3598,18 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index,
clear_indicator_in_key_fields(key_info);
DBUG_VOID_RETURN;
}
- else if (check_part_func_bound(part_info->part_field_array))
- sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
- else if (check_part_func_bound(part_info->subpart_field_array))
+ else if (is_sub_partitioned(part_info))
{
- if (get_part_id_from_key(table,buf,key_info,key_spec,(uint32*)&part_part))
+ if (check_part_func_bound(part_info->subpart_field_array))
+ sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec);
+ else if (check_part_func_bound(part_info->part_field_array))
{
- part_spec->start_part= no_parts;
- clear_indicator_in_key_fields(key_info);
- DBUG_VOID_RETURN;
+ if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part))
+ {
+ part_spec->start_part= no_parts;
+ clear_indicator_in_key_fields(key_info);
+ DBUG_VOID_RETURN;
+ }
}
}
}
@@ -3282,10 +3678,10 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index,
| Forminfo 288 bytes |
-------------------------------
| Screen buffer, to make |
- | field names readable |
+ | field names readable |
-------------------------------
| Packed field info |
- | 17 + 1 + strlen(field_name) |
+ | 17 + 1 + strlen(field_name) |
| + 1 end of file character |
-------------------------------
| Partition info |
@@ -3294,15 +3690,20 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index,
Read the partition syntax from the frm file and parse it to get the
data structures of the partitioning.
+
SYNOPSIS
mysql_unpack_partition()
- file File reference of frm file
thd Thread object
+ part_buf Partition info from frm file
part_info_len Length of partition syntax
table Table object of partitioned table
+ create_table_ind Is it called from CREATE TABLE
+ default_db_type What is the default engine of the table
+
RETURN VALUE
TRUE Error
FALSE Sucess
+
DESCRIPTION
Read the partition syntax from the current position in the frm file.
Initiate a LEX object, save the list of item tree objects to free after
@@ -3315,13 +3716,16 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index,
*/
bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
- uint part_info_len, TABLE* table,
+ uint part_info_len,
+ uchar *part_state, uint part_state_len,
+ TABLE* table, bool is_create_table_ind,
handlerton *default_db_type)
{
Item *thd_free_list= thd->free_list;
bool result= TRUE;
partition_info *part_info;
- LEX *old_lex= thd->lex, lex;
+ LEX *old_lex= thd->lex;
+ LEX lex;
DBUG_ENTER("mysql_unpack_partition");
thd->lex= &lex;
@@ -3344,13 +3748,59 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
we then save in the partition info structure.
*/
thd->free_list= NULL;
- lex.part_info= (partition_info*)1; //Indicate yyparse from this place
+ lex.part_info= new partition_info();/* Indicates yyparse from this place */
+ if (!lex.part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ goto end;
+ }
+ lex.part_info->part_state= part_state;
+ lex.part_info->part_state_len= part_state_len;
+ DBUG_PRINT("info", ("Parse: %s", part_buf));
if (yyparse((void*)thd) || thd->is_fatal_error)
{
free_items(thd->free_list);
goto end;
}
+ /*
+ The parsed syntax residing in the frm file can still contain defaults.
+ The reason is that the frm file is sometimes saved outside of this
+ MySQL Server and used in backup and restore of clusters or partitioned
+ tables. It is not certain that the restore will restore exactly the
+ same default partitioning.
+
+ The easiest manner of handling this is to simply continue using the
+ part_info we already built up during mysql_create_table if we are
+ in the process of creating a table. If the table already exists we
+ need to discover the number of partitions for the default parts. Since
+ the handler object hasn't been created here yet we need to postpone this
+ to the fix_partition_func method.
+ */
+
+ DBUG_PRINT("info", ("Successful parse"));
part_info= lex.part_info;
+ DBUG_PRINT("info", ("default engine = %d", ha_legacy_type(part_info->default_engine_type)));
+ if (is_create_table_ind)
+ {
+ if (old_lex->name)
+ {
+ /*
+ This code is executed when we do a CREATE TABLE t1 LIKE t2
+ old_lex->name contains the t2 and the table we are opening has
+ name t1.
+ */
+ Table_ident *ti= (Table_ident*)old_lex->name;
+ const char *db_name= ti->db.str ? ti->db.str : thd->db;
+ const char *table_name= ti->table.str;
+ handler *file;
+ if (partition_default_handling(table, part_info))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else
+ part_info= old_lex->part_info;
+ }
table->part_info= part_info;
table->file->set_part_info(part_info);
if (part_info->default_engine_type == NULL)
@@ -3373,30 +3823,25 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
*/
uint part_func_len= part_info->part_func_len;
uint subpart_func_len= part_info->subpart_func_len;
- uint bitmap_bits= part_info->no_subparts?
- (part_info->no_subparts* part_info->no_parts):
- part_info->no_parts;
- uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
- uint32 *bitmap_buf;
- char *part_func_string, *subpart_func_string= NULL;
- if (!((part_func_string= thd->alloc(part_func_len))) ||
+ char *part_func_string= NULL;
+ char *subpart_func_string= NULL;
+ if ((part_func_len &&
+ !((part_func_string= thd->alloc(part_func_len)))) ||
(subpart_func_len &&
- !((subpart_func_string= thd->alloc(subpart_func_len)))) ||
- !((bitmap_buf= (uint32*)thd->alloc(bitmap_bytes))))
+ !((subpart_func_string= thd->alloc(subpart_func_len)))))
{
- my_error(ER_OUTOFMEMORY, MYF(0), part_func_len);
+ mem_alloc_error(part_func_len);
free_items(thd->free_list);
part_info->item_free_list= 0;
goto end;
}
- memcpy(part_func_string, part_info->part_func_string, part_func_len);
+ if (part_func_len)
+ memcpy(part_func_string, part_info->part_func_string, part_func_len);
if (subpart_func_len)
memcpy(subpart_func_string, part_info->subpart_func_string,
subpart_func_len);
part_info->part_func_string= part_func_string;
part_info->subpart_func_string= subpart_func_string;
-
- bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
}
result= FALSE;
@@ -3405,16 +3850,1541 @@ end:
thd->lex= old_lex;
DBUG_RETURN(result);
}
+
+
+/*
+ SYNOPSIS
+ fast_alter_partition_error_handler()
+ lpt Container for parameters
+
+ RETURN VALUES
+ None
+
+ DESCRIPTION
+ Support routine to clean up after failures of on-line ALTER TABLE
+ for partition management.
+*/
+
+static void fast_alter_partition_error_handler(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ DBUG_ENTER("fast_alter_partition_error_handler");
+ /* TODO: WL 2826 Error handling */
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ SYNOPSIS
+ fast_end_partition()
+ thd Thread object
+ out:copied Number of records copied
+ out:deleted Number of records deleted
+ table_list Table list with the one table in it
+ empty Has nothing been done
+ lpt Struct to be used by error handler
+
+ RETURN VALUES
+ FALSE Success
+ TRUE Failure
+
+ DESCRIPTION
+ Support routine to handle the successful cases for partition
+ management.
+*/
+
+static int fast_end_partition(THD *thd, ulonglong copied,
+ ulonglong deleted,
+ TABLE_LIST *table_list, bool is_empty,
+ ALTER_PARTITION_PARAM_TYPE *lpt,
+ bool written_bin_log)
+{
+ int error;
+ DBUG_ENTER("fast_end_partition");
+
+ thd->proc_info="end";
+ if (!is_empty)
+ query_cache_invalidate3(thd, table_list, 0);
+ error= ha_commit_stmt(thd);
+ if (ha_commit(thd))
+ error= 1;
+ if (!error || is_empty)
+ {
+ char tmp_name[80];
+ if ((!is_empty) && (!written_bin_log) &&
+ (!thd->lex->no_write_to_binlog))
+ write_bin_log(thd, FALSE, thd->query, thd->query_length);
+ close_thread_tables(thd);
+ my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO),
+ (ulong) (copied + deleted),
+ (ulong) deleted,
+ (ulong) 0);
+ send_ok(thd,copied+deleted,0L,tmp_name);
+ DBUG_RETURN(FALSE);
+ }
+ fast_alter_partition_error_handler(lpt);
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ We need to check if engine used by all partitions can handle
+ partitioning natively.
+
+ SYNOPSIS
+ check_native_partitioned()
+ create_info Create info in CREATE TABLE
+ out:ret_val Return value
+ part_info Partition info
+ thd Thread object
+
+ RETURN VALUES
+ Value returned in bool ret_value
+ TRUE Native partitioning supported by engine
+ FALSE Need to use partition handler
+
+ Return value from function
+ TRUE Error
+ FALSE Success
+*/
+
+static bool check_native_partitioned(HA_CREATE_INFO *create_info,bool *ret_val,
+ partition_info *part_info, THD *thd)
+{
+ List_iterator<partition_element> part_it(part_info->partitions);
+ bool first= TRUE;
+ bool default_engine;
+ handlerton *engine_type= create_info->db_type;
+ uint i= 0;
+ handler *file;
+ DBUG_ENTER("check_native_partitioned");
+
+ default_engine= (create_info->used_fields | HA_CREATE_USED_ENGINE) ?
+ TRUE : FALSE;
+ DBUG_PRINT("info", ("engine_type = %u, default = %u",
+ ha_legacy_type(engine_type),
+ default_engine));
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (first && default_engine && part_elem->engine_type)
+ engine_type= part_elem->engine_type;
+ first= FALSE;
+
+ if (part_elem->engine_type != engine_type)
+ {
+ /*
+ Mixed engines not yet supported but when supported it will need
+ the partition handler
+ */
+ *ret_val= FALSE;
+ DBUG_RETURN(FALSE);
+ }
+ } while (++i < part_info->no_parts);
+ /*
+ All engines are of the same type. Check if this engine supports
+ native partitioning.
+ */
+ if (engine_type->partition_flags &&
+ (engine_type->partition_flags() & HA_CAN_PARTITION))
+ {
+ create_info->db_type= engine_type;
+ DBUG_PRINT("info", ("Changed to native partitioning"));
+ *ret_val= TRUE;
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Prepare for ALTER TABLE of partition structure
+
+ SYNOPSIS
+ prep_alter_part_table()
+ thd Thread object
+ table Table object
+ inout:alter_info Alter information
+ inout:create_info Create info for CREATE TABLE
+ old_db_type Old engine type
+ out:partition_changed Boolean indicating whether partition changed
+ out:fast_alter_partition Boolean indicating whether fast partition
+ change is requested
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+ partition_changed
+ fast_alter_partition
+
+ DESCRIPTION
+ This method handles all preparations for ALTER TABLE for partitioned
+ tables
+ We need to handle both partition management command such as Add Partition
+ and others here as well as an ALTER TABLE that completely changes the
+ partitioning and yet others that don't change anything at all. We start
+ by checking the partition management variants and then check the general
+ change patterns.
+*/
+
+uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
+ HA_CREATE_INFO *create_info,
+ handlerton *old_db_type,
+ bool *partition_changed,
+ uint *fast_alter_partition)
+{
+ DBUG_ENTER("prep_alter_part_table");
+
+ if (alter_info->flags &
+ (ALTER_ADD_PARTITION | ALTER_DROP_PARTITION |
+ ALTER_COALESCE_PARTITION | ALTER_REORGANIZE_PARTITION |
+ ALTER_TABLE_REORG | ALTER_OPTIMIZE_PARTITION |
+ ALTER_CHECK_PARTITION | ALTER_ANALYZE_PARTITION |
+ ALTER_REPAIR_PARTITION | ALTER_REBUILD_PARTITION))
+ {
+ partition_info *tab_part_info= table->part_info;
+ if (!tab_part_info)
+ {
+ my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ /*
+ We are going to manipulate the partition info on the table object
+ so we need to ensure that the data structure of the table object
+ is freed by setting version to 0. table->s->version= 0 forces a
+ flush of the table object in close_thread_tables().
+ */
+ uint flags;
+ table->s->version= 0L;
+ if (alter_info->flags == ALTER_TABLE_REORG)
+ {
+ uint new_part_no, curr_part_no;
+ ulonglong max_rows= table->s->max_rows;
+ if (tab_part_info->part_type != HASH_PARTITION ||
+ tab_part_info->use_default_no_partitions)
+ {
+ my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ new_part_no= table->file->get_default_no_partitions(max_rows);
+ curr_part_no= tab_part_info->no_parts;
+ if (new_part_no == curr_part_no)
+ {
+ /*
+ No change is needed, we will have the same number of partitions
+ after the change as before. Thus we can reply ok immediately
+ without any changes at all.
+ */
+ DBUG_RETURN(fast_end_partition(thd, ULL(0), ULL(0), NULL,
+ TRUE, NULL, FALSE));
+ }
+ else if (new_part_no > curr_part_no)
+ {
+ /*
+ We will add more partitions, we use the ADD PARTITION without
+ setting the flag for no default number of partitions
+ */
+ alter_info->flags|= ALTER_ADD_PARTITION;
+ thd->lex->part_info->no_parts= new_part_no - curr_part_no;
+ }
+ else
+ {
+ /*
+ We will remove hash partitions, we use the COALESCE PARTITION
+ without setting the flag for no default number of partitions
+ */
+ alter_info->flags|= ALTER_COALESCE_PARTITION;
+ alter_info->no_parts= curr_part_no - new_part_no;
+ }
+ }
+ if (table->s->db_type->alter_table_flags &&
+ (!(flags= table->s->db_type->alter_table_flags(alter_info->flags))))
+ {
+ my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
+ DBUG_RETURN(1);
+ }
+ *fast_alter_partition= flags ^ HA_PARTITION_FUNCTION_SUPPORTED;
+ if (alter_info->flags & ALTER_ADD_PARTITION)
+ {
+ /*
+ We start by moving the new partitions to the list of temporary
+ partitions. We will then check that the new partitions fit in the
+ partitioning scheme as currently set-up.
+ Partitions are always added at the end in ADD PARTITION.
+ */
+ partition_info *alt_part_info= thd->lex->part_info;
+ uint no_new_partitions= alt_part_info->no_parts;
+ uint no_orig_partitions= tab_part_info->no_parts;
+ uint check_total_partitions= no_new_partitions + no_orig_partitions;
+ uint new_total_partitions= check_total_partitions;
+ /*
+ We allow quite a lot of values to be supplied by defaults, however we
+ must know the number of new partitions in this case.
+ */
+ if (thd->lex->no_write_to_binlog &&
+ tab_part_info->part_type != HASH_PARTITION)
+ {
+ my_error(ER_NO_BINLOG_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (no_new_partitions == 0)
+ {
+ my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (is_sub_partitioned(tab_part_info))
+ {
+ if (alt_part_info->no_subparts == 0)
+ alt_part_info->no_subparts= tab_part_info->no_subparts;
+ else if (alt_part_info->no_subparts != tab_part_info->no_subparts)
+ {
+ my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ check_total_partitions= new_total_partitions*
+ alt_part_info->no_subparts;
+ }
+ if (check_total_partitions > MAX_PARTITIONS)
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ alt_part_info->part_type= tab_part_info->part_type;
+ if (set_up_defaults_for_partitioning(alt_part_info,
+ table->file,
+ ULL(0),
+ tab_part_info->no_parts))
+ {
+ DBUG_RETURN(TRUE);
+ }
+/*
+Handling of on-line cases:
+
+ADD PARTITION for RANGE/LIST PARTITIONING:
+------------------------------------------
+For range and list partitions add partition is simply adding a
+new empty partition to the table. If the handler support this we
+will use the simple method of doing this. The figure below shows
+an example of this and the states involved in making this change.
+
+Existing partitions New added partitions
+------ ------ ------ ------ | ------ ------
+| | | | | | | | | | | | |
+| p0 | | p1 | | p2 | | p3 | | | p4 | | p5 |
+------ ------ ------ ------ | ------ ------
+PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_TO_BE_ADDED*2
+PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_ADDED*2
+
+The first line is the states before adding the new partitions and the
+second line is after the new partitions are added. All the partitions are
+in the partitions list, no partitions are placed in the temp_partitions
+list.
+
+ADD PARTITION for HASH PARTITIONING
+-----------------------------------
+This little figure tries to show the various partitions involved when
+adding two new partitions to a linear hash based partitioned table with
+four partitions to start with, which lists are used and the states they
+pass through. Adding partitions to a normal hash based is similar except
+that it is always all the existing partitions that are reorganised not
+only a subset of them.
+
+Existing partitions New added partitions
+------ ------ ------ ------ | ------ ------
+| | | | | | | | | | | | |
+| p0 | | p1 | | p2 | | p3 | | | p4 | | p5 |
+------ ------ ------ ------ | ------ ------
+PART_CHANGED PART_CHANGED PART_NORMAL PART_NORMAL PART_TO_BE_ADDED
+PART_IS_CHANGED*2 PART_NORMAL PART_NORMAL PART_IS_ADDED
+PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_ADDED
+
+Reorganised existing partitions
+------ ------
+| | | |
+| p0'| | p1'|
+------ ------
+
+p0 - p5 will be in the partitions list of partitions.
+p0' and p1' will actually not exist as separate objects, there presence can
+be deduced from the state of the partition and also the names of those
+partitions can be deduced this way.
+
+After adding the partitions and copying the partition data to p0', p1',
+p4 and p5 from p0 and p1 the states change to adapt for the new situation
+where p0 and p1 is dropped and replaced by p0' and p1' and the new p4 and
+p5 are in the table again.
+
+The first line above shows the states of the partitions before we start
+adding and copying partitions, the second after completing the adding
+and copying and finally the third line after also dropping the partitions
+that are reorganised.
+*/
+ if (*fast_alter_partition &&
+ tab_part_info->part_type == HASH_PARTITION)
+ {
+ uint part_no= 0, start_part= 1, start_sec_part= 1;
+ uint end_part= 0, end_sec_part= 0;
+ uint upper_2n= tab_part_info->linear_hash_mask + 1;
+ uint lower_2n= upper_2n >> 1;
+ bool all_parts= TRUE;
+ if (tab_part_info->linear_hash_ind &&
+ no_new_partitions < upper_2n)
+ {
+ /*
+ An analysis of which parts needs reorganisation shows that it is
+ divided into two intervals. The first interval is those parts
+ that are reorganised up until upper_2n - 1. From upper_2n and
+ onwards it starts again from partition 0 and goes on until
+ it reaches p(upper_2n - 1). If the last new partition reaches
+ beyond upper_2n - 1 then the first interval will end with
+ p(lower_2n - 1) and start with p(no_orig_partitions - lower_2n).
+ If lower_2n partitions are added then p0 to p(lower_2n - 1) will
+ be reorganised which means that the two interval becomes one
+ interval at this point. Thus only when adding less than
+ lower_2n partitions and going beyond a total of upper_2n we
+ actually get two intervals.
+
+ To exemplify this assume we have 6 partitions to start with and
+ add 1, 2, 3, 5, 6, 7, 8, 9 partitions.
+ The first to add after p5 is p6 = 110 in bit numbers. Thus we
+ can see that 10 = p2 will be partition to reorganise if only one
+ partition.
+ If 2 partitions are added we reorganise [p2, p3]. Those two
+ cases are covered by the second if part below.
+ If 3 partitions are added we reorganise [p2, p3] U [p0,p0]. This
+ part is covered by the else part below.
+ If 5 partitions are added we get [p2,p3] U [p0, p2] = [p0, p3].
+ This is covered by the first if part where we need the max check
+ to here use lower_2n - 1.
+ If 7 partitions are added we get [p2,p3] U [p0, p4] = [p0, p4].
+ This is covered by the first if part but here we use the first
+ calculated end_part.
+ Finally with 9 new partitions we would also reorganise p6 if we
+ used the method below but we cannot reorganise more partitions
+ than what we had from the start and thus we simply set all_parts
+ to TRUE. In this case we don't get into this if-part at all.
+ */
+ all_parts= FALSE;
+ if (no_new_partitions >= lower_2n)
+ {
+ /*
+ In this case there is only one interval since the two intervals
+ overlap and this starts from zero to last_part_no - upper_2n
+ */
+ start_part= 0;
+ end_part= new_total_partitions - (upper_2n + 1);
+ end_part= max(lower_2n - 1, end_part);
+ }
+ else if (new_total_partitions <= upper_2n)
+ {
+ /*
+ Also in this case there is only one interval since we are not
+ going over a 2**n boundary
+ */
+ start_part= no_orig_partitions - lower_2n;
+ end_part= start_part + (no_new_partitions - 1);
+ }
+ else
+ {
+ /* We have two non-overlapping intervals since we are not
+ passing a 2**n border and we have not at least lower_2n
+ new parts that would ensure that the intervals become
+ overlapping.
+ */
+ start_part= no_orig_partitions - lower_2n;
+ end_part= upper_2n - 1;
+ start_sec_part= 0;
+ end_sec_part= new_total_partitions - (upper_2n + 1);
+ }
+ }
+ List_iterator<partition_element> tab_it(tab_part_info->partitions);
+ part_no= 0;
+ do
+ {
+ partition_element *p_elem= tab_it++;
+ if (all_parts ||
+ (part_no >= start_part && part_no <= end_part) ||
+ (part_no >= start_sec_part && part_no <= end_sec_part))
+ {
+ p_elem->part_state= PART_CHANGED;
+ }
+ } while (++part_no < no_orig_partitions);
+ }
+ /*
+ Need to concatenate the lists here to make it possible to check the
+ partition info for correctness using check_partition_info.
+ For on-line add partition we set the state of this partition to
+ PART_TO_BE_ADDED to ensure that it is known that it is not yet
+ usable (becomes usable when partition is created and the switch of
+ partition configuration is made.
+ */
+ {
+ List_iterator<partition_element> alt_it(alt_part_info->partitions);
+ uint part_count= 0;
+ do
+ {
+ partition_element *part_elem= alt_it++;
+ if (*fast_alter_partition)
+ part_elem->part_state= PART_TO_BE_ADDED;
+ if (tab_part_info->partitions.push_back(part_elem))
+ {
+ mem_alloc_error(1);
+ DBUG_RETURN(TRUE);
+ }
+ } while (++part_count < no_new_partitions);
+ tab_part_info->no_parts+= no_new_partitions;
+ }
+ /*
+ If we specify partitions explicitly we don't use defaults anymore.
+ Using ADD PARTITION also means that we don't have the default number
+ of partitions anymore. We use this code also for Table reorganisations
+ and here we don't set any default flags to FALSE.
+ */
+ if (!(alter_info->flags & ALTER_TABLE_REORG))
+ {
+ if (!alt_part_info->use_default_partitions)
+ {
+ DBUG_PRINT("info", ("part_info= %x", tab_part_info));
+ tab_part_info->use_default_partitions= FALSE;
+ }
+ tab_part_info->use_default_no_partitions= FALSE;
+ }
+ }
+ else if (alter_info->flags == ALTER_DROP_PARTITION)
+ {
+ /*
+ Drop a partition from a range partition and list partitioning is
+ always safe and can be made more or less immediate. It is necessary
+ however to ensure that the partition to be removed is safely removed
+ and that REPAIR TABLE can remove the partition if for some reason the
+ command to drop the partition failed in the middle.
+ */
+ uint part_count= 0;
+ uint no_parts_dropped= alter_info->partition_names.elements;
+ uint no_parts_found= 0;
+ List_iterator<partition_element> part_it(tab_part_info->partitions);
+ if (!(tab_part_info->part_type == RANGE_PARTITION ||
+ tab_part_info->part_type == LIST_PARTITION))
+ {
+ my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP");
+ DBUG_RETURN(TRUE);
+ }
+ if (no_parts_dropped >= tab_part_info->no_parts)
+ {
+ my_error(ER_DROP_LAST_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (is_name_in_list(part_elem->partition_name,
+ alter_info->partition_names))
+ {
+ /*
+ Set state to indicate that the partition is to be dropped.
+ */
+ no_parts_found++;
+ part_elem->part_state= PART_TO_BE_DROPPED;
+ }
+ } while (++part_count < tab_part_info->no_parts);
+ if (no_parts_found != no_parts_dropped)
+ {
+ my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP");
+ DBUG_RETURN(TRUE);
+ }
+ if (table->file->is_fk_defined_on_table_or_index(MAX_KEY))
+ {
+ my_error(ER_ROW_IS_REFERENCED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if ((alter_info->flags & ALTER_OPTIMIZE_PARTITION) ||
+ (alter_info->flags & ALTER_ANALYZE_PARTITION) ||
+ (alter_info->flags & ALTER_CHECK_PARTITION) ||
+ (alter_info->flags & ALTER_REPAIR_PARTITION) ||
+ (alter_info->flags & ALTER_REBUILD_PARTITION))
+ {
+ uint no_parts_opt= alter_info->partition_names.elements;
+ uint part_count= 0;
+ uint no_parts_found= 0;
+ List_iterator<partition_element> part_it(tab_part_info->partitions);
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if ((alter_info->flags & ALTER_ALL_PARTITION) ||
+ (is_name_in_list(part_elem->partition_name,
+ alter_info->partition_names)))
+ {
+ /*
+ Mark the partition as a partition to be "changed" by
+ analyzing/optimizing/rebuilding/checking/repairing
+ */
+ no_parts_found++;
+ part_elem->part_state= PART_CHANGED;
+ }
+ } while (++part_count < tab_part_info->no_parts);
+ if (no_parts_found != no_parts_opt &&
+ (!(alter_info->flags & ALTER_ALL_PARTITION)))
+ {
+ const char *ptr;
+ if (alter_info->flags & ALTER_OPTIMIZE_PARTITION)
+ ptr= "OPTIMIZE";
+ else if (alter_info->flags & ALTER_ANALYZE_PARTITION)
+ ptr= "ANALYZE";
+ else if (alter_info->flags & ALTER_CHECK_PARTITION)
+ ptr= "CHECK";
+ else if (alter_info->flags & ALTER_REPAIR_PARTITION)
+ ptr= "REPAIR";
+ else
+ ptr= "REBUILD";
+ my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), ptr);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (alter_info->flags & ALTER_COALESCE_PARTITION)
+ {
+ uint no_parts_coalesced= alter_info->no_parts;
+ uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced;
+ List_iterator<partition_element> part_it(tab_part_info->partitions);
+ if (tab_part_info->part_type != HASH_PARTITION)
+ {
+ my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (no_parts_coalesced == 0)
+ {
+ my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (no_parts_coalesced >= tab_part_info->no_parts)
+ {
+ my_error(ER_DROP_LAST_PARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+/*
+Online handling:
+COALESCE PARTITION:
+-------------------
+The figure below shows the manner in which partitions are handled when
+performing an on-line coalesce partition and which states they go through
+at start, after adding and copying partitions and finally after dropping
+the partitions to drop. The figure shows an example using four partitions
+to start with, using linear hash and coalescing one partition (always the
+last partition).
+
+Using linear hash then all remaining partitions will have a new reorganised
+part.
+
+Existing partitions Coalesced partition
+------ ------ ------ | ------
+| | | | | | | | |
+| p0 | | p1 | | p2 | | | p3 |
+------ ------ ------ | ------
+PART_NORMAL PART_CHANGED PART_NORMAL PART_REORGED_DROPPED
+PART_NORMAL PART_IS_CHANGED PART_NORMAL PART_TO_BE_DROPPED
+PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_DROPPED
+
+Reorganised existing partitions
+ ------
+ | |
+ | p1'|
+ ------
+
+p0 - p3 is in the partitions list.
+The p1' partition will actually not be in any list it is deduced from the
+state of p1.
+*/
+ {
+ uint part_count= 0, start_part= 1, start_sec_part= 1;
+ uint end_part= 0, end_sec_part= 0;
+ bool all_parts= TRUE;
+ if (*fast_alter_partition &&
+ tab_part_info->linear_hash_ind)
+ {
+ uint upper_2n= tab_part_info->linear_hash_mask + 1;
+ uint lower_2n= upper_2n >> 1;
+ all_parts= FALSE;
+ if (no_parts_coalesced >= lower_2n)
+ {
+ all_parts= TRUE;
+ }
+ else if (no_parts_remain >= lower_2n)
+ {
+ end_part= tab_part_info->no_parts - (lower_2n + 1);
+ start_part= no_parts_remain - lower_2n;
+ }
+ else
+ {
+ start_part= 0;
+ end_part= tab_part_info->no_parts - (lower_2n + 1);
+ end_sec_part= (lower_2n >> 1) - 1;
+ start_sec_part= end_sec_part - (lower_2n - (no_parts_remain + 1));
+ }
+ }
+ do
+ {
+ partition_element *p_elem= part_it++;
+ if (*fast_alter_partition &&
+ (all_parts ||
+ (part_count >= start_part && part_count <= end_part) ||
+ (part_count >= start_sec_part && part_count <= end_sec_part)))
+ p_elem->part_state= PART_CHANGED;
+ if (++part_count > no_parts_remain)
+ {
+ if (*fast_alter_partition)
+ p_elem->part_state= PART_REORGED_DROPPED;
+ else
+ part_it.remove();
+ }
+ } while (part_count < tab_part_info->no_parts);
+ tab_part_info->no_parts= no_parts_remain;
+ }
+ if (!(alter_info->flags & ALTER_TABLE_REORG))
+ tab_part_info->use_default_no_partitions= FALSE;
+ }
+ else if (alter_info->flags == ALTER_REORGANIZE_PARTITION)
+ {
+ /*
+ Reorganise partitions takes a number of partitions that are next
+ to each other (at least for RANGE PARTITIONS) and then uses those
+ to create a set of new partitions. So data is copied from those
+ partitions into the new set of partitions. Those new partitions
+ can have more values in the LIST value specifications or less both
+ are allowed. The ranges can be different but since they are
+ changing a set of consecutive partitions they must cover the same
+ range as those changed from.
+ This command can be used on RANGE and LIST partitions.
+ */
+ uint no_parts_reorged= alter_info->partition_names.elements;
+ uint no_parts_new= thd->lex->part_info->partitions.elements;
+ partition_info *alt_part_info= thd->lex->part_info;
+ uint check_total_partitions;
+ if (no_parts_reorged > tab_part_info->no_parts)
+ {
+ my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (!(tab_part_info->part_type == RANGE_PARTITION ||
+ tab_part_info->part_type == LIST_PARTITION) &&
+ (no_parts_new != no_parts_reorged))
+ {
+ my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ check_total_partitions= tab_part_info->no_parts + no_parts_new;
+ check_total_partitions-= no_parts_reorged;
+ if (check_total_partitions > MAX_PARTITIONS)
+ {
+ my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+/*
+Online handling:
+REORGANIZE PARTITION:
+---------------------
+The figure exemplifies the handling of partitions, their state changes and
+how they are organised. It exemplifies four partitions where two of the
+partitions are reorganised (p1 and p2) into two new partitions (p4 and p5).
+The reason of this change could be to change range limits, change list
+values or for hash partitions simply reorganise the partition which could
+also involve moving them to new disks or new node groups (MySQL Cluster).
+
+Existing partitions
+------ ------ ------ ------
+| | | | | | | |
+| p0 | | p1 | | p2 | | p3 |
+------ ------ ------ ------
+PART_NORMAL PART_TO_BE_REORGED PART_NORMAL
+PART_NORMAL PART_TO_BE_DROPPED PART_NORMAL
+PART_NORMAL PART_IS_DROPPED PART_NORMAL
+
+Reorganised new partitions (replacing p1 and p2)
+------ ------
+| | | |
+| p4 | | p5 |
+------ ------
+PART_TO_BE_ADDED
+PART_IS_ADDED
+PART_IS_ADDED
+
+All unchanged partitions and the new partitions are in the partitions list
+in the order they will have when the change is completed. The reorganised
+partitions are placed in the temp_partitions list. PART_IS_ADDED is only a
+temporary state not written in the frm file. It is used to ensure we write
+the generated partition syntax in a correct manner.
+*/
+ {
+ List_iterator<partition_element> tab_it(tab_part_info->partitions);
+ uint part_count= 0;
+ bool found_first= FALSE;
+ bool found_last= FALSE;
+ bool is_last_partition_reorged;
+ uint drop_count= 0;
+ longlong tab_max_range= 0, alt_max_range= 0;
+ do
+ {
+ partition_element *part_elem= tab_it++;
+ is_last_partition_reorged= FALSE;
+ if (is_name_in_list(part_elem->partition_name,
+ alter_info->partition_names))
+ {
+ is_last_partition_reorged= TRUE;
+ drop_count++;
+ tab_max_range= part_elem->range_value;
+ if (*fast_alter_partition &&
+ tab_part_info->temp_partitions.push_back(part_elem))
+ {
+ mem_alloc_error(1);
+ DBUG_RETURN(TRUE);
+ }
+ if (*fast_alter_partition)
+ part_elem->part_state= PART_TO_BE_REORGED;
+ if (!found_first)
+ {
+ uint alt_part_count= 0;
+ found_first= TRUE;
+ List_iterator<partition_element>
+ alt_it(alt_part_info->partitions);
+ do
+ {
+ partition_element *alt_part_elem= alt_it++;
+ alt_max_range= alt_part_elem->range_value;
+ if (*fast_alter_partition)
+ alt_part_elem->part_state= PART_TO_BE_ADDED;
+ if (alt_part_count == 0)
+ tab_it.replace(alt_part_elem);
+ else
+ tab_it.after(alt_part_elem);
+ } while (++alt_part_count < no_parts_new);
+ }
+ else if (found_last)
+ {
+ my_error(ER_CONSECUTIVE_REORG_PARTITIONS, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ else
+ tab_it.remove();
+ }
+ else
+ {
+ if (found_first)
+ found_last= TRUE;
+ }
+ } while (++part_count < tab_part_info->no_parts);
+ if (drop_count != no_parts_reorged)
+ {
+ my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REORGANIZE");
+ DBUG_RETURN(TRUE);
+ }
+ if (tab_part_info->part_type == RANGE_PARTITION &&
+ ((is_last_partition_reorged &&
+ alt_max_range < tab_max_range) ||
+ (!is_last_partition_reorged &&
+ alt_max_range != tab_max_range)))
+ {
+ /*
+ For range partitioning the total resulting range before and
+ after the change must be the same except in one case. This is
+ when the last partition is reorganised, in this case it is
+ acceptable to increase the total range.
+ The reason is that it is not allowed to have "holes" in the
+ middle of the ranges and thus we should not allow to reorganise
+ to create "holes". Also we should not allow using REORGANIZE
+ to drop data.
+ */
+ my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ tab_part_info->no_parts= check_total_partitions;
+ }
+ }
+ else
+ {
+ DBUG_ASSERT(FALSE);
+ }
+ *partition_changed= TRUE;
+ create_info->db_type= &partition_hton;
+ thd->lex->part_info= tab_part_info;
+ if (alter_info->flags == ALTER_ADD_PARTITION ||
+ alter_info->flags == ALTER_REORGANIZE_PARTITION)
+ {
+ if (check_partition_info(tab_part_info, (handlerton**)NULL,
+ table->file, ULL(0)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ else
+ {
+ /*
+ When thd->lex->part_info has a reference to a partition_info the
+ ALTER TABLE contained a definition of a partitioning.
+
+ Case I:
+ If there was a partition before and there is a new one defined.
+ We use the new partitioning. The new partitioning is already
+ defined in the correct variable so no work is needed to
+ accomplish this.
+ We do however need to update partition_changed to ensure that not
+ only the frm file is changed in the ALTER TABLE command.
+
+ Case IIa:
+ There was a partitioning before and there is no new one defined.
+ Also the user has not specified an explicit engine to use.
+
+ We use the old partitioning also for the new table. We do this
+ by assigning the partition_info from the table loaded in
+ open_ltable to the partition_info struct used by mysql_create_table
+ later in this method.
+
+ Case IIb:
+ There was a partitioning before and there is no new one defined.
+ The user has specified an explicit engine to use.
+
+ Since the user has specified an explicit engine to use we override
+ the old partitioning info and create a new table using the specified
+ engine. This is the reason for the extra check if old and new engine
+ is equal.
+ In this case the partition also is changed.
+
+ Case III:
+ There was no partitioning before altering the table, there is
+ partitioning defined in the altered table. Use the new partitioning.
+ No work needed since the partitioning info is already in the
+ correct variable.
+
+ In this case we discover one case where the new partitioning is using
+ the same partition function as the default (PARTITION BY KEY or
+ PARTITION BY LINEAR KEY with the list of fields equal to the primary
+ key fields OR PARTITION BY [LINEAR] KEY() for tables without primary
+ key)
+ Also here partition has changed and thus a new table must be
+ created.
+
+ Case IV:
+ There was no partitioning before and no partitioning defined.
+ Obviously no work needed.
+ */
+ if (table->part_info)
+ {
+ if (!thd->lex->part_info &&
+ create_info->db_type == old_db_type)
+ thd->lex->part_info= table->part_info;
+ }
+ if (thd->lex->part_info)
+ {
+ /*
+ Need to cater for engine types that can handle partition without
+ using the partition handler.
+ */
+ if (thd->lex->part_info != table->part_info)
+ *partition_changed= TRUE;
+ if (create_info->db_type == &partition_hton)
+ {
+ if (table->part_info)
+ {
+ thd->lex->part_info->default_engine_type=
+ table->part_info->default_engine_type;
+ }
+ else
+ {
+ thd->lex->part_info->default_engine_type=
+ ha_checktype(thd, DB_TYPE_DEFAULT, FALSE, FALSE);
+ }
+ }
+ else
+ {
+ bool is_native_partitioned;
+ partition_info *part_info= thd->lex->part_info;
+ part_info->default_engine_type= create_info->db_type;
+ if (check_native_partitioned(create_info, &is_native_partitioned,
+ part_info, thd))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ if (!is_native_partitioned)
+ {
+ if (create_info->db_type == (handlerton*)&default_hton)
+ {
+ thd->lex->part_info->default_engine_type=
+ ha_checktype(thd, DB_TYPE_DEFAULT, FALSE, FALSE);
+ }
+ create_info->db_type= &partition_hton;
+ }
+ }
+ DBUG_PRINT("info", ("default_db_type = %s",
+ thd->lex->part_info->default_engine_type->name));
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Change partitions, used to implement ALTER TABLE ADD/REORGANIZE/COALESCE
+ partitions. This method is used to implement both single-phase and multi-
+ phase implementations of ADD/REORGANIZE/COALESCE partitions.
+
+ SYNOPSIS
+ mysql_change_partitions()
+ lpt Struct containing parameters
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+
+ DESCRIPTION
+ Request handler to add partitions as set in states of the partition
+
+ Elements of the lpt parameters used:
+ create_info Create information used to create partitions
+ db Database name
+ table_name Table name
+ copied Output parameter where number of copied
+ records are added
+ deleted Output parameter where number of deleted
+ records are added
+*/
+
+static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ char path[FN_REFLEN+1];
+ DBUG_ENTER("mysql_change_partitions");
+
+ build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+ DBUG_RETURN(lpt->table->file->change_partitions(lpt->create_info, path,
+ &lpt->copied,
+ &lpt->deleted,
+ lpt->pack_frm_data,
+ lpt->pack_frm_len));
+}
+
+
+/*
+ Rename partitions in an ALTER TABLE of partitions
+
+ SYNOPSIS
+ mysql_rename_partitions()
+ lpt Struct containing parameters
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+
+ DESCRIPTION
+ Request handler to rename partitions as set in states of the partition
+
+ Parameters used:
+ db Database name
+ table_name Table name
+*/
+
+static bool mysql_rename_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ char path[FN_REFLEN+1];
+ DBUG_ENTER("mysql_rename_partitions");
+
+ build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+ DBUG_RETURN(lpt->table->file->rename_partitions(path));
+}
+
+
+/*
+ Drop partitions in an ALTER TABLE of partitions
+
+ SYNOPSIS
+ mysql_drop_partitions()
+ lpt Struct containing parameters
+
+ RETURN VALUES
+ TRUE Failure
+ FALSE Success
+ DESCRIPTION
+ Drop the partitions marked with PART_TO_BE_DROPPED state and remove
+ those partitions from the list.
+
+ Parameters used:
+ table Table object
+ db Database name
+ table_name Table name
+*/
+
+static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
+{
+ char path[FN_REFLEN+1];
+ partition_info *part_info= lpt->table->part_info;
+ List_iterator<partition_element> part_it(part_info->partitions);
+ uint i= 0;
+ uint remove_count= 0;
+ DBUG_ENTER("mysql_drop_partitions");
+
+ build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+ if (lpt->table->file->drop_partitions(path))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (part_elem->part_state == PART_IS_DROPPED)
+ {
+ part_it.remove();
+ remove_count++;
+ }
+ } while (++i < part_info->no_parts);
+ part_info->no_parts-= remove_count;
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Actually perform the change requested by ALTER TABLE of partitions
+ previously prepared.
+
+ SYNOPSIS
+ fast_alter_partition_table()
+ thd Thread object
+ table Table object
+ alter_info ALTER TABLE info
+ create_info Create info for CREATE TABLE
+ table_list List of the table involved
+ create_list The fields in the resulting table
+ key_list The keys in the resulting table
+ db Database name of new table
+ table_name Table name of new table
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ Perform all ALTER TABLE operations for partitioned tables that can be
+ performed fast without a full copy of the original table.
+*/
+
+uint fast_alter_partition_table(THD *thd, TABLE *table,
+ ALTER_INFO *alter_info,
+ HA_CREATE_INFO *create_info,
+ TABLE_LIST *table_list,
+ List<create_field> *create_list,
+ List<Key> *key_list, const char *db,
+ const char *table_name,
+ uint fast_alter_partition)
+{
+ /* Set-up struct used to write frm files */
+ ulonglong copied= 0;
+ ulonglong deleted= 0;
+ partition_info *part_info= table->part_info;
+ ALTER_PARTITION_PARAM_TYPE lpt_obj;
+ ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj;
+ bool written_bin_log= TRUE;
+ DBUG_ENTER("fast_alter_partition_table");
+
+ lpt->thd= thd;
+ lpt->create_info= create_info;
+ lpt->create_list= create_list;
+ lpt->key_list= key_list;
+ lpt->db_options= create_info->table_options;
+ if (create_info->row_type == ROW_TYPE_DYNAMIC)
+ lpt->db_options|= HA_OPTION_PACK_RECORD;
+ lpt->table= table;
+ lpt->key_info_buffer= 0;
+ lpt->key_count= 0;
+ lpt->db= db;
+ lpt->table_name= table_name;
+ lpt->copied= 0;
+ lpt->deleted= 0;
+ lpt->pack_frm_data= NULL;
+ lpt->pack_frm_len= 0;
+ thd->lex->part_info= part_info;
+
+ if (alter_info->flags & ALTER_OPTIMIZE_PARTITION ||
+ alter_info->flags & ALTER_ANALYZE_PARTITION ||
+ alter_info->flags & ALTER_CHECK_PARTITION ||
+ alter_info->flags & ALTER_REPAIR_PARTITION)
+ {
+ /*
+ In this case the user has specified that he wants a set of partitions
+ to be optimised and the partition engine can handle optimising
+ partitions natively without requiring a full rebuild of the
+ partitions.
+
+ In this case it is enough to call optimise_partitions, there is no
+ need to change frm files or anything else.
+ */
+ written_bin_log= FALSE;
+ if (((alter_info->flags & ALTER_OPTIMIZE_PARTITION) &&
+ (table->file->optimize_partitions(thd))) ||
+ ((alter_info->flags & ALTER_ANALYZE_PARTITION) &&
+ (table->file->analyze_partitions(thd))) ||
+ ((alter_info->flags & ALTER_CHECK_PARTITION) &&
+ (table->file->check_partitions(thd))) ||
+ ((alter_info->flags & ALTER_REPAIR_PARTITION) &&
+ (table->file->repair_partitions(thd))))
+ {
+ fast_alter_partition_error_handler(lpt);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (fast_alter_partition & HA_PARTITION_ONE_PHASE)
+ {
+ /*
+ In the case where the engine supports one phase online partition
+ changes it is not necessary to have any exclusive locks. The
+ correctness is upheld instead by transactions being aborted if they
+ access the table after its partition definition has changed (if they
+ are still using the old partition definition).
+
+ The handler is in this case responsible to ensure that all users
+ start using the new frm file after it has changed. To implement
+ one phase it is necessary for the handler to have the master copy
+ of the frm file and use discovery mechanisms to renew it. Thus
+ write frm will write the frm, pack the new frm and finally
+ the frm is deleted and the discovery mechanisms will either restore
+ back to the old or installing the new after the change is activated.
+
+ Thus all open tables will be discovered that they are old, if not
+ earlier as soon as they try an operation using the old table. One
+ should ensure that this is checked already when opening a table,
+ even if it is found in the cache of open tables.
+
+ change_partitions will perform all operations and it is the duty of
+ the handler to ensure that the frm files in the system gets updated
+ in synch with the changes made and if an error occurs that a proper
+ error handling is done.
+
+ If the MySQL Server crashes at this moment but the handler succeeds
+ in performing the change then the binlog is not written for the
+ change. There is no way to solve this as long as the binlog is not
+ transactional and even then it is hard to solve it completely.
+
+ The first approach here was to downgrade locks. Now a different approach
+ is decided upon. The idea is that the handler will have access to the
+ ALTER_INFO when store_lock arrives with TL_WRITE_ALLOW_READ. So if the
+ handler knows that this functionality can be handled with a lower lock
+ level it will set the lock level to TL_WRITE_ALLOW_WRITE immediately.
+ Thus the need to downgrade the lock disappears.
+ 1) Write the new frm, pack it and then delete it
+ 2) Perform the change within the handler
+ */
+ if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE | WFRM_PACK_FRM)) ||
+ (mysql_change_partitions(lpt)))
+ {
+ fast_alter_partition_error_handler(lpt);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if (alter_info->flags == ALTER_DROP_PARTITION)
+ {
+ /*
+ Now after all checks and setting state on dropped partitions we can
+ start the actual dropping of the partitions.
+
+ Drop partition is actually two things happening. The first is that
+ a lot of records are deleted. The second is that the behaviour of
+ subsequent updates and writes and deletes will change. The delete
+ part can be handled without any particular high lock level by
+ transactional engines whereas non-transactional engines need to
+ ensure that this change is done with an exclusive lock on the table.
+ The second part, the change of partitioning does however require
+ an exclusive lock to install the new partitioning as one atomic
+ operation. If this is not the case, it is possible for two
+ transactions to see the change in a different order than their
+ serialisation order. Thus we need an exclusive lock for both
+ transactional and non-transactional engines.
+
+ For LIST partitions it could be possible to avoid the exclusive lock
+ (and for RANGE partitions if they didn't rearrange range definitions
+ after a DROP PARTITION) if one ensured that failed accesses to the
+ dropped partitions was aborted for sure (thus only possible for
+ transactional engines).
+
+ 1) Lock the table in TL_WRITE_ONLY to ensure all other accesses to
+ the table have completed
+ 2) Write the new frm file where the partitions have changed but are
+ still remaining with the state PART_TO_BE_DROPPED
+ 3) Write the bin log
+ 4) Prepare MyISAM handlers for drop of partitions
+ 5) Ensure that any users that has opened the table but not yet
+ reached the abort lock do that before downgrading the lock.
+ 6) Drop the partitions
+ 7) Write the frm file that the partition has been dropped
+ 8) Wait until all accesses using the old frm file has completed
+ 9) Complete query
+ */
+ if ((abort_and_upgrade_lock(lpt)) ||
+ (mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+ ((!thd->lex->no_write_to_binlog) &&
+ (write_bin_log(thd, FALSE,
+ thd->query, thd->query_length), FALSE)) ||
+ (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) ||
+ (close_open_tables_and_downgrade(lpt), FALSE) ||
+ (mysql_drop_partitions(lpt)) ||
+ (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+ (mysql_wait_completed_table(lpt, table), FALSE))
+ {
+ fast_alter_partition_error_handler(lpt);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else if ((alter_info->flags & ALTER_ADD_PARTITION) &&
+ (part_info->part_type == RANGE_PARTITION ||
+ part_info->part_type == LIST_PARTITION))
+ {
+ /*
+ ADD RANGE/LIST PARTITIONS
+ In this case there are no tuples removed and no tuples are added.
+ Thus the operation is merely adding a new partition. Thus it is
+ necessary to perform the change as an atomic operation. Otherwise
+ someone reading without seeing the new partition could potentially
+ miss updates made by a transaction serialised before it that are
+ inserted into the new partition.
+
+ 1) Write the new frm file where state of added partitions is
+ changed to PART_TO_BE_ADDED
+ 2) Add the new partitions
+ 3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
+ are still using the old partitioning scheme. Wait until all
+ ongoing users have completed before progressing.
+ 4) Write a new frm file of the table where the partitions are added
+ to the table.
+ 5) Write binlog
+ 6) Wait until all accesses using the old frm file has completed
+ 7) Complete query
+ */
+ if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+ (mysql_change_partitions(lpt)) ||
+ (abort_and_upgrade_lock(lpt)) ||
+ (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+ ((!thd->lex->no_write_to_binlog) &&
+ (write_bin_log(thd, FALSE,
+ thd->query, thd->query_length), FALSE)) ||
+ (close_open_tables_and_downgrade(lpt), FALSE))
+ {
+ fast_alter_partition_error_handler(lpt);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else
+ {
+ /*
+ ADD HASH PARTITION/
+ COALESCE PARTITION/
+ REBUILD PARTITION/
+ REORGANIZE PARTITION
+
+ In this case all records are still around after the change although
+ possibly organised into new partitions, thus by ensuring that all
+ updates go to both the old and the new partitioning scheme we can
+ actually perform this operation lock-free. The only exception to
+ this is when REORGANIZE PARTITION adds/drops ranges. In this case
+ there needs to be an exclusive lock during the time when the range
+ changes occur.
+ This is only possible if the handler can ensure double-write for a
+ period. The double write will ensure that it doesn't matter where the
+ data is read from since both places are updated for writes. If such
+ double writing is not performed then it is necessary to perform the
+ change with the usual exclusive lock. With double writes it is even
+ possible to perform writes in parallel with the reorganisation of
+ partitions.
+
+ Without double write procedure we get the following procedure.
+ The only difference with using double write is that we can downgrade
+ the lock to TL_WRITE_ALLOW_WRITE. Double write in this case only
+ double writes from old to new. If we had double writing in both
+ directions we could perform the change completely without exclusive
+ lock for HASH partitions.
+ Handlers that perform double writing during the copy phase can actually
+ use a lower lock level. This can be handled inside store_lock in the
+ respective handler.
+
+ 1) Write the new frm file where state of added partitions is
+ changed to PART_TO_BE_ADDED and the reorganised partitions
+ are set in state PART_TO_BE_REORGED.
+ 2) Add the new partitions
+ Copy from the reorganised partitions to the new partitions
+ 3) Lock all partitions in TL_WRITE_ONLY to ensure that no users
+ are still using the old partitioning scheme. Wait until all
+ ongoing users have completed before progressing.
+ 4) Prepare MyISAM handlers for rename and delete of partitions
+ 5) Write a new frm file of the table where the partitions are
+ reorganised.
+ 6) Rename the reorged partitions such that they are no longer
+ used and rename those added to their real new names.
+ 7) Write bin log
+ 8) Wait until all accesses using the old frm file has completed
+ 9) Drop the reorganised partitions
+ 10)Write a new frm file of the table where the partitions are
+ reorganised.
+ 11)Wait until all accesses using the old frm file has completed
+ 12)Complete query
+ */
+
+ if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) ||
+ (mysql_change_partitions(lpt)) ||
+ (abort_and_upgrade_lock(lpt)) ||
+ (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) ||
+ (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) ||
+ (mysql_rename_partitions(lpt)) ||
+ ((!thd->lex->no_write_to_binlog) &&
+ (write_bin_log(thd, FALSE,
+ thd->query, thd->query_length), FALSE)) ||
+ (close_open_tables_and_downgrade(lpt), FALSE) ||
+ (mysql_drop_partitions(lpt)) ||
+ (mysql_write_frm(lpt, 0UL)) ||
+ (mysql_wait_completed_table(lpt, table), FALSE))
+ {
+ fast_alter_partition_error_handler(lpt);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ /*
+ A final step is to write the query to the binlog and send ok to the
+ user
+ */
+ DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted,
+ table_list, FALSE, lpt,
+ written_bin_log));
+}
#endif
/*
+ Internal representation of the frm blob
+*/
+
+struct frm_blob_struct
+{
+ struct frm_blob_header
+ {
+ uint ver; /* Version of header */
+ uint orglen; /* Original length of compressed data */
+ uint complen; /* Compressed length of data, 0=uncompressed */
+ } head;
+ char data[1];
+};
+
+
+/*
+ packfrm is a method used to compress the frm file for storage in a
+ handler. This method was developed for the NDB handler and has been moved
+ here to serve also other uses.
+
+ SYNOPSIS
+ packfrm()
+ data Data reference to frm file data
+ len Length of frm file data
+ out:pack_data Reference to the pointer to the packed frm data
+ out:pack_len Length of packed frm file data
+
+ RETURN VALUES
+ 0 Success
+ >0 Failure
+*/
+
+int packfrm(const void *data, uint len,
+ const void **pack_data, uint *pack_len)
+{
+ int error;
+ ulong org_len, comp_len;
+ uint blob_len;
+ frm_blob_struct *blob;
+ DBUG_ENTER("packfrm");
+ DBUG_PRINT("enter", ("data: %x, len: %d", data, len));
+
+ error= 1;
+ org_len= len;
+ if (my_compress((byte*)data, &org_len, &comp_len))
+ goto err;
+
+ DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len));
+ DBUG_DUMP("compressed", (char*)data, org_len);
+
+ error= 2;
+ blob_len= sizeof(frm_blob_struct::frm_blob_header)+org_len;
+ if (!(blob= (frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME))))
+ goto err;
+
+ // Store compressed blob in machine independent format
+ int4store((char*)(&blob->head.ver), 1);
+ int4store((char*)(&blob->head.orglen), comp_len);
+ int4store((char*)(&blob->head.complen), org_len);
+
+ // Copy frm data into blob, already in machine independent format
+ memcpy(blob->data, data, org_len);
+
+ *pack_data= blob;
+ *pack_len= blob_len;
+ error= 0;
+
+ DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len));
+err:
+ DBUG_RETURN(error);
+
+}
+
+/*
+ unpackfrm is a method used to decompress the frm file received from a
+ handler. This method was developed for the NDB handler and has been moved
+ here to serve also other uses for other clustered storage engines.
+
+ SYNOPSIS
+ unpackfrm()
+ pack_data Data reference to packed frm file data
+ out:unpack_data Reference to the pointer to the unpacked frm data
+ out:unpack_len Length of unpacked frm file data
+
+ RETURN VALUES¨
+ 0 Success
+ >0 Failure
+*/
+
+int unpackfrm(const void **unpack_data, uint *unpack_len,
+ const void *pack_data)
+{
+ const frm_blob_struct *blob= (frm_blob_struct*)pack_data;
+ byte *data;
+ ulong complen, orglen, ver;
+ DBUG_ENTER("unpackfrm");
+ DBUG_PRINT("enter", ("pack_data: %x", pack_data));
+
+ complen= uint4korr((char*)&blob->head.complen);
+ orglen= uint4korr((char*)&blob->head.orglen);
+ ver= uint4korr((char*)&blob->head.ver);
+
+ DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d",
+ ver,complen,orglen));
+ DBUG_DUMP("blob->data", (char*) blob->data, complen);
+
+ if (ver != 1)
+ DBUG_RETURN(1);
+ if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME))))
+ DBUG_RETURN(2);
+ memcpy(data, blob->data, complen);
+
+ if (my_uncompress(data, &complen, &orglen))
+ {
+ my_free((char*)data, MYF(0));
+ DBUG_RETURN(3);
+ }
+
+ *unpack_data= data;
+ *unpack_len= complen;
+
+ DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len));
+ DBUG_RETURN(0);
+}
+
+
+/*
Prepare for calling val_int on partition function by setting fields to
point to the record where the values of the PF-fields are stored.
+
SYNOPSIS
set_field_ptr()
ptr Array of fields to change ptr
new_buf New record pointer
old_buf Old record pointer
+
DESCRIPTION
Set ptr in field objects of field array to refer to new_buf record
instead of previously old_buf. Used before calling val_int and after
@@ -3424,10 +5394,10 @@ end:
*/
void set_field_ptr(Field **ptr, const byte *new_buf,
- const byte *old_buf)
+ const byte *old_buf)
{
my_ptrdiff_t diff= (new_buf - old_buf);
- DBUG_ENTER("set_nullable_field_ptr");
+ DBUG_ENTER("set_field_ptr");
do
{
@@ -3442,11 +5412,13 @@ void set_field_ptr(Field **ptr, const byte *new_buf,
point to the record where the values of the PF-fields are stored.
This variant works on a key_part reference.
It is not required that all fields are NOT NULL fields.
+
SYNOPSIS
set_key_field_ptr()
- key_part key part with a set of fields to change ptr
+ key_info key info with a set of fields to change ptr
new_buf New record pointer
old_buf Old record pointer
+
DESCRIPTION
Set ptr in field objects of field array to refer to new_buf record
instead of previously old_buf. Used before calling val_int and after
@@ -3459,7 +5431,8 @@ void set_key_field_ptr(KEY *key_info, const byte *new_buf,
const byte *old_buf)
{
KEY_PART_INFO *key_part= key_info->key_part;
- uint key_parts= key_info->key_parts, i= 0;
+ uint key_parts= key_info->key_parts;
+ uint i= 0;
my_ptrdiff_t diff= (new_buf - old_buf);
DBUG_ENTER("set_key_field_ptr");
@@ -3473,6 +5446,27 @@ void set_key_field_ptr(KEY *key_info, const byte *new_buf,
/*
+ SYNOPSIS
+ mem_alloc_error()
+ size Size of memory attempted to allocate
+ None
+
+ RETURN VALUES
+ None
+
+ DESCRIPTION
+ A routine to use for all the many places in the code where memory
+ allocation error can happen, a tremendous amount of them, needs
+ simple routine that signals this error.
+*/
+
+void mem_alloc_error(size_t size)
+{
+ my_error(ER_OUTOFMEMORY, MYF(0), size);
+}
+
+
+/*
Fill the string comma-separated line of used partitions names
SYNOPSIS
make_used_partitions_str()
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 5c1ec8b3a49..b9bb8c766e6 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -1243,8 +1243,8 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
char *part_syntax;
if (table->part_info &&
((part_syntax= generate_partition_syntax(table->part_info,
- &part_syntax_len,
- FALSE,FALSE))))
+ &part_syntax_len,
+ FALSE,FALSE))))
{
packet->append(part_syntax, part_syntax_len);
my_free(part_syntax, MYF(0));
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 778d1af8a15..8e3bbadebb0 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -41,67 +41,17 @@ static int copy_data_between_tables(TABLE *from,TABLE *to,
static bool prepare_blob_field(THD *thd, create_field *sql_field);
static bool check_engine(THD *thd, const char *table_name,
handlerton **new_engine);
+static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info,
+ List<create_field> *fields,
+ List<Key> *keys, bool tmp_table,
+ uint *db_options,
+ handler *file, KEY **key_info_buffer,
+ uint *key_count, int select_field_count);
-/*
- SYNOPSIS
- write_bin_log()
- thd Thread object
- clear_error is clear_error to be called
- RETURN VALUES
- NONE
- DESCRIPTION
- Write the binlog if open, routine used in multiple places in this
- file
-*/
-
-static void write_bin_log(THD *thd, bool clear_error,
- char const* query, ulong query_length)
-{
- if (mysql_bin_log.is_open())
- {
- if (clear_error)
- thd->clear_error();
- thd->binlog_query(THD::STMT_QUERY_TYPE,
- query, query_length, FALSE, FALSE);
- }
-}
-
-/*
- SYNOPSIS
- abort_and_upgrade_lock()
- thd Thread object
- table Table object
- db Database name
- table_name Table name
- old_lock_level Old lock level
- RETURN VALUES
- TRUE Failure
- FALSE Success
- DESCRIPTION
- Remember old lock level (for possible downgrade later on), abort all
- waiting threads and ensure that all keeping locks currently are
- completed such that we own the lock exclusively and no other interaction
- is ongoing.
-*/
-
-static bool abort_and_upgrade_lock(THD *thd, TABLE *table, const char *db,
- const char *table_name,
- uint *old_lock_level)
-{
- uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG;
- DBUG_ENTER("abort_and_upgrade_locks");
-
- *old_lock_level= table->reginfo.lock_type;
- mysql_lock_abort(thd, table);
- VOID(remove_table_from_cache(thd, db, table_name, flags));
- if (thd->killed)
- {
- thd->no_warnings_for_error= 0;
- DBUG_RETURN(TRUE);
- }
- DBUG_RETURN(FALSE);
-}
-
+static int mysql_copy_create_lists(List<create_field> *orig_create_list,
+ List<Key> *orig_key,
+ List<create_field> *new_create_list,
+ List<Key> *new_key);
#define MYSQL50_TABLE_NAME_PREFIX "#mysql50#"
#define MYSQL50_TABLE_NAME_PREFIX_LENGTH 9
@@ -192,6 +142,272 @@ uint build_tmptable_filename(char *buff, size_t bufflen,
#define ALTER_TABLE_INDEX_CHANGED 2
+/*
+ SYNOPSIS
+ mysql_copy_create_list()
+ orig_create_list Original list of created fields
+ inout::new_create_list Copy of original list
+
+ RETURN VALUES
+ FALSE Success
+ TRUE Memory allocation error
+
+ DESCRIPTION
+ mysql_prepare_table destroys the create_list and in some cases we need
+ this lists for more purposes. Thus we copy it specifically for use
+ by mysql_prepare_table
+*/
+
+static int mysql_copy_create_list(List<create_field> *orig_create_list,
+
+ List<create_field> *new_create_list)
+{
+ List_iterator<create_field> prep_field_it(*orig_create_list);
+ create_field *prep_field;
+ DBUG_ENTER("mysql_copy_create_list");
+
+ while ((prep_field= prep_field_it++))
+ {
+ create_field *field= new create_field(*prep_field);
+ if (!field || new_create_list->push_back(field))
+ {
+ mem_alloc_error(2);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ SYNOPSIS
+ mysql_copy_key_list()
+ orig_key Original list of keys
+ inout::new_key Copy of original list
+
+ RETURN VALUES
+ FALSE Success
+ TRUE Memory allocation error
+
+ DESCRIPTION
+ mysql_prepare_table destroys the key list and in some cases we need
+ this lists for more purposes. Thus we copy it specifically for use
+ by mysql_prepare_table
+*/
+
+static int mysql_copy_key_list(List<Key> *orig_key,
+ List<Key> *new_key)
+{
+ List_iterator<Key> prep_key_it(*orig_key);
+ Key *prep_key;
+ DBUG_ENTER("mysql_copy_create_lists");
+
+ while ((prep_key= prep_key_it++))
+ {
+ List<key_part_spec> prep_columns;
+ List_iterator<key_part_spec> prep_col_it(prep_key->columns);
+ key_part_spec *prep_col;
+ Key *temp_key;
+
+ while ((prep_col= prep_col_it++))
+ {
+ key_part_spec *prep_key_part;
+ if (prep_key_part= new key_part_spec(*prep_col))
+ {
+ mem_alloc_error(sizeof(key_part_spec));
+ DBUG_RETURN(TRUE);
+ }
+ if (prep_columns.push_back(prep_key_part))
+ {
+ mem_alloc_error(2);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ if ((temp_key= new Key(prep_key->type, prep_key->name,
+ prep_key->algorithm,
+ prep_key->generated,
+ prep_columns,
+ prep_key->parser_name)))
+ {
+ mem_alloc_error(sizeof(Key));
+ DBUG_RETURN(TRUE);
+ }
+ if (new_key->push_back(temp_key))
+ {
+ mem_alloc_error(2);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ SYNOPSIS
+ mysql_write_frm()
+ lpt Struct carrying many parameters needed for this
+ method
+ flags Flags as defined below
+ WFRM_INITIAL_WRITE If set we need to prepare table before
+ creating the frm file
+ WFRM_CREATE_HANDLER_FILES If set we need to create the handler file as
+ part of the creation of the frm file
+ WFRM_PACK_FRM If set we should pack the frm file and delete
+ the frm file
+
+ RETURN VALUES
+ TRUE Error
+ FALSE Success
+
+ DESCRIPTION
+ A support method that creates a new frm file and in this process it
+ regenerates the partition data. It works fine also for non-partitioned
+ tables since it only handles partitioned data if it exists.
+*/
+
+bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
+{
+ /*
+ Prepare table to prepare for writing a new frm file where the
+ partitions in add/drop state have temporarily changed their state
+ We set tmp_table to avoid get errors on naming of primary key index.
+ */
+ int error= 0;
+ char path[FN_REFLEN+1];
+ char frm_name[FN_REFLEN+1];
+ DBUG_ENTER("mysql_write_frm");
+
+ if (flags & WFRM_INITIAL_WRITE)
+ {
+ error= mysql_copy_create_list(lpt->create_list,
+ &lpt->new_create_list);
+ error+= mysql_copy_key_list(lpt->key_list,
+ &lpt->new_key_list);
+ if (error)
+ {
+ DBUG_RETURN(TRUE);
+ }
+ }
+ build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, "");
+ strxmov(frm_name, path, reg_ext, NullS);
+ if ((flags & WFRM_INITIAL_WRITE) &&
+ (mysql_prepare_table(lpt->thd, lpt->create_info, &lpt->new_create_list,
+ &lpt->new_key_list,/*tmp_table*/ 1, &lpt->db_options,
+ lpt->table->file, &lpt->key_info_buffer,
+ &lpt->key_count, /*select_field_count*/ 0)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ {
+ partition_info *part_info= lpt->table->part_info;
+ char *part_syntax_buf;
+ uint syntax_len, i;
+ bool any_unnormal_state= FALSE;
+
+ if (part_info)
+ {
+ uint max_part_state_len= part_info->partitions.elements +
+ part_info->temp_partitions.elements;
+ if (!(part_info->part_state= (uchar*)sql_alloc(max_part_state_len)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ part_info->part_state_len= 0;
+ if (!(part_syntax_buf= generate_partition_syntax(part_info,
+ &syntax_len,
+ TRUE, FALSE)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ for (i= 0; i < part_info->part_state_len; i++)
+ {
+ enum partition_state part_state=
+ (enum partition_state)part_info->part_state[i];
+ if (part_state != PART_NORMAL && part_state != PART_IS_ADDED)
+ any_unnormal_state= TRUE;
+ }
+ if (!any_unnormal_state)
+ {
+ part_info->part_state= NULL;
+ part_info->part_state_len= 0;
+ }
+ part_info->part_info_string= part_syntax_buf;
+ part_info->part_info_len= syntax_len;
+ }
+ }
+#endif
+ /*
+ We write the frm file with the LOCK_open mutex since otherwise we could
+ overwrite the frm file as another is reading it in open_table.
+ */
+ lpt->create_info->table_options= lpt->db_options;
+ VOID(pthread_mutex_lock(&LOCK_open));
+ if ((mysql_create_frm(lpt->thd, frm_name, lpt->db, lpt->table_name,
+ lpt->create_info, lpt->new_create_list, lpt->key_count,
+ lpt->key_info_buffer, lpt->table->file)) ||
+ ((flags & WFRM_CREATE_HANDLER_FILES) &&
+ lpt->table->file->create_handler_files(path)))
+ {
+ error= 1;
+ goto end;
+ }
+ if (flags & WFRM_PACK_FRM)
+ {
+ /*
+ We need to pack the frm file and after packing it we delete the
+ frm file to ensure it doesn't get used. This is only used for
+ handlers that have the main version of the frm file stored in the
+ handler.
+ */
+ const void *data= 0;
+ uint length= 0;
+ if (readfrm(path, &data, &length) ||
+ packfrm(data, length, &lpt->pack_frm_data, &lpt->pack_frm_len))
+ {
+ my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)lpt->pack_frm_data, MYF(MY_ALLOW_ZERO_PTR));
+ mem_alloc_error(length);
+ error= 1;
+ goto end;
+ }
+ error= my_delete(frm_name, MYF(MY_WME));
+ }
+ /* Frm file have been updated to reflect the change about to happen. */
+end:
+ VOID(pthread_mutex_unlock(&LOCK_open));
+ DBUG_RETURN(error);
+}
+
+
+/*
+ SYNOPSIS
+ write_bin_log()
+ thd Thread object
+ clear_error is clear_error to be called
+ query Query to log
+ query_length Length of query
+
+ RETURN VALUES
+ NONE
+
+ DESCRIPTION
+ Write the binlog if open, routine used in multiple places in this
+ file
+*/
+
+void write_bin_log(THD *thd, bool clear_error,
+ char const *query, ulong query_length)
+{
+ if (mysql_bin_log.is_open())
+ {
+ if (clear_error)
+ thd->clear_error();
+ thd->binlog_query(THD::STMT_QUERY_TYPE,
+ query, query_length, FALSE, FALSE);
+ }
+}
+
/*
delete (drop) tables.
@@ -1807,24 +2023,54 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
if (!(file=get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
create_info->db_type)))
{
- my_error(ER_OUTOFMEMORY, MYF(0), 128);//128 bytes invented
+ mem_alloc_error(sizeof(handler));
DBUG_RETURN(TRUE);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
partition_info *part_info= thd->lex->part_info;
+ if (!part_info && create_info->db_type->partition_flags &&
+ (create_info->db_type->partition_flags() & HA_USE_AUTO_PARTITION))
+ {
+ /*
+ Table is not defined as a partitioned table but the engine handles
+ all tables as partitioned. The handler will set up the partition info
+ object with the default settings.
+ */
+ thd->lex->part_info= part_info= new partition_info();
+ if (!part_info)
+ {
+ mem_alloc_error(sizeof(partition_info));
+ DBUG_RETURN(TRUE);
+ }
+ file->set_auto_partitions(part_info);
+ }
if (part_info)
{
/*
- The table has been specified as a partitioned table.
- If this is part of an ALTER TABLE the handler will be the partition
- handler but we need to specify the default handler to use for
- partitions also in the call to check_partition_info. We transport
- this information in the default_db_type variable, it is either
- DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command.
+ The table has been specified as a partitioned table.
+ If this is part of an ALTER TABLE the handler will be the partition
+ handler but we need to specify the default handler to use for
+ partitions also in the call to check_partition_info. We transport
+ this information in the default_db_type variable, it is either
+ DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command.
+
+ Check that we don't use foreign keys in the table since it won't
+ work even with InnoDB beneath it.
*/
+ List_iterator<Key> key_iterator(keys);
+ Key *key;
handlerton *part_engine_type= create_info->db_type;
char *part_syntax_buf;
uint syntax_len;
+ handlerton *engine_type;
+ while ((key= key_iterator++))
+ {
+ if (key->type == Key::FOREIGN_KEY)
+ {
+ my_error(ER_CANNOT_ADD_FOREIGN, MYF(0));
+ goto err;
+ }
+ }
if (part_engine_type == &partition_hton)
{
/*
@@ -1832,16 +2078,29 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
default_engine_type was assigned from the engine set in the ALTER
TABLE command.
*/
- part_engine_type= ha_checktype(thd,
- ha_legacy_type(part_info->default_engine_type), 0, 0);
+ ;
}
else
{
- part_info->default_engine_type= create_info->db_type;
+ if (create_info->used_fields & HA_CREATE_USED_ENGINE)
+ {
+ part_info->default_engine_type= create_info->db_type;
+ }
+ else
+ {
+ if (part_info->default_engine_type == NULL)
+ {
+ part_info->default_engine_type= ha_checktype(thd,
+ DB_TYPE_DEFAULT, 0, 0);
+ }
+ }
}
- if (check_partition_info(part_info, part_engine_type,
- file, create_info->max_rows))
+ DBUG_PRINT("info", ("db_type = %d",
+ ha_legacy_type(part_info->default_engine_type)));
+ if (check_partition_info(part_info, &engine_type, file,
+ create_info->max_rows))
goto err;
+ part_info->default_engine_type= engine_type;
/*
We reverse the partitioning parser and generate a standard format
@@ -1849,19 +2108,29 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
*/
if (!(part_syntax_buf= generate_partition_syntax(part_info,
&syntax_len,
- TRUE,TRUE)))
+ TRUE, FALSE)))
goto err;
part_info->part_info_string= part_syntax_buf;
part_info->part_info_len= syntax_len;
- if ((!(file->partition_flags() & HA_CAN_PARTITION)) ||
+ if (create_info->db_type != engine_type)
+ {
+ delete file;
+ if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, engine_type)))
+ {
+ mem_alloc_error(sizeof(handler));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ if ((!(engine_type->partition_flags &&
+ engine_type->partition_flags() & HA_CAN_PARTITION)) ||
create_info->db_type == &partition_hton)
{
/*
The handler assigned to the table cannot handle partitioning.
Assign the partition handler as the handler of the table.
*/
- DBUG_PRINT("info", ("db_type: %d part_flag: %d",
- create_info->db_type,file->partition_flags()));
+ DBUG_PRINT("info", ("db_type: %d",
+ ha_legacy_type(create_info->db_type)));
delete file;
create_info->db_type= &partition_hton;
if (!(file= get_ha_partition(part_info)))
@@ -2252,7 +2521,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table,
VOID(table->file->extra(function));
/* Mark all tables that are in use as 'old' */
- mysql_lock_abort(thd, table); // end threads waiting on lock
+ mysql_lock_abort(thd, table, TRUE); /* end threads waiting on lock */
/* Wait until all there are no other threads that has this table open */
remove_table_from_cache(thd, table->s->db.str,
@@ -2405,7 +2674,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(0); // Can't open frm file
}
- if (open_table_from_share(thd, share, "", 0, 0, 0, &tmp_table))
+ if (open_table_from_share(thd, share, "", 0, 0, 0, &tmp_table, FALSE))
{
release_table_share(share, RELEASE_NORMAL);
pthread_mutex_unlock(&LOCK_open);
@@ -2664,7 +2933,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
pthread_mutex_lock(&LOCK_open);
const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open,
"Waiting to get writelock");
- mysql_lock_abort(thd,table->table);
+ mysql_lock_abort(thd,table->table, TRUE);
remove_table_from_cache(thd, table->table->s->db.str,
table->table->s->table_name.str,
RTFC_WAIT_OTHER_THREAD_FLAG |
@@ -3014,7 +3283,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
Table_ident *table_ident)
{
TABLE *tmp_table;
- char src_path[FN_REFLEN], dst_path[FN_REFLEN];
+ char src_path[FN_REFLEN], dst_path[FN_REFLEN], tmp_path[FN_REFLEN];
uint dst_path_length;
char *db= table->db;
char *table_name= table->table_name;
@@ -3120,6 +3389,19 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
creation, instead create the table directly (for both normal
and temporary tables).
*/
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ /*
+ For partitioned tables we need to copy the .par file as well since
+ it is used in open_table_def to even be able to create a new handler.
+ There is no way to find out here if the original table is a
+ partitioned table so we copy the file and ignore any errors.
+ */
+ fn_format(tmp_path, dst_path, reg_ext, ".par", MYF(MY_REPLACE_EXT));
+ strmov(dst_path, tmp_path);
+ fn_format(tmp_path, src_path, reg_ext, ".par", MYF(MY_REPLACE_EXT));
+ strmov(src_path, tmp_path);
+ my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE));
+#endif
dst_path[dst_path_length - reg_ext_length]= '\0'; // Remove .frm
err= ha_create_table(thd, dst_path, db, table_name, create_info, 1);
@@ -3547,10 +3829,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
handlerton *old_db_type, *new_db_type;
uint need_copy_table= 0;
#ifdef WITH_PARTITION_STORAGE_ENGINE
- bool online_add_empty_partition= FALSE;
- bool online_drop_partition= FALSE;
+ uint fast_alter_partition= 0;
bool partition_changed= FALSE;
- handlerton *default_engine_type;
#endif
List<create_field> prepared_create_list;
List<Key> prepared_key_list;
@@ -3642,413 +3922,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
create_info->db_type= old_db_type;
#ifdef WITH_PARTITION_STORAGE_ENGINE
- /*
- We need to handle both partition management command such as Add Partition
- and others here as well as an ALTER TABLE that completely changes the
- partitioning and yet others that don't change anything at all. We start
- by checking the partition management variants and then check the general
- change patterns.
- */
- if (alter_info->flags & (ALTER_ADD_PARTITION +
- ALTER_DROP_PARTITION + ALTER_COALESCE_PARTITION +
- ALTER_REORGANISE_PARTITION))
- {
- partition_info *tab_part_info= table->part_info;
- if (!tab_part_info)
- {
- my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
- DBUG_RETURN(TRUE);
- }
- default_engine_type= tab_part_info->default_engine_type;
- /*
- We are going to manipulate the partition info on the table object
- so we need to ensure that the data structure of the table object
- is freed by setting version to 0.
- */
- table->s->version= 0L;
- if (alter_info->flags == ALTER_ADD_PARTITION)
- {
- /*
- We start by moving the new partitions to the list of temporary
- partitions. We will then check that the new partitions fit in the
- partitioning scheme as currently set-up.
- Partitions are always added at the end in ADD PARTITION.
- */
- partition_info *alt_part_info= thd->lex->part_info;
- uint no_new_partitions= alt_part_info->no_parts;
- uint no_orig_partitions= tab_part_info->no_parts;
- uint check_total_partitions= no_new_partitions + no_orig_partitions;
- uint new_total_partitions= check_total_partitions;
- /*
- We allow quite a lot of values to be supplied by defaults, however we
- must know the number of new partitions in this case.
- */
- if (no_new_partitions == 0)
- {
- my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (is_sub_partitioned(tab_part_info))
- {
- if (alt_part_info->no_subparts == 0)
- alt_part_info->no_subparts= tab_part_info->no_subparts;
- else if (alt_part_info->no_subparts != tab_part_info->no_subparts)
- {
- my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
- }
- check_total_partitions= new_total_partitions*
- alt_part_info->no_subparts;
- }
- if (check_total_partitions > MAX_PARTITIONS)
- {
- my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
- }
- alt_part_info->part_type= tab_part_info->part_type;
- if (set_up_defaults_for_partitioning(alt_part_info,
- table->file,
- (ulonglong)0ULL,
- tab_part_info->no_parts))
- {
- DBUG_RETURN(TRUE);
- }
- /*
- Need to concatenate the lists here to make it possible to check the
- partition info for correctness using check_partition_info
- */
- {
- List_iterator<partition_element> alt_it(alt_part_info->partitions);
- uint part_count= 0;
- do
- {
- partition_element *part_elem= alt_it++;
- tab_part_info->partitions.push_back(part_elem);
- tab_part_info->temp_partitions.push_back(part_elem);
- } while (++part_count < no_new_partitions);
- tab_part_info->no_parts+= no_new_partitions;
- }
- {
- List_iterator<partition_element> tab_it(tab_part_info->partitions);
- partition_element *part_elem= tab_it++;
- if (is_sub_partitioned(tab_part_info))
- {
- List_iterator<partition_element> sub_it(part_elem->subpartitions);
- part_elem= sub_it++;
- }
- if (check_partition_info(tab_part_info, part_elem->engine_type,
- table->file, (ulonglong)0ULL))
- {
- DBUG_RETURN(TRUE);
- }
- }
- create_info->db_type= &partition_hton;
- thd->lex->part_info= tab_part_info;
- if (table->file->alter_table_flags() & HA_ONLINE_ADD_EMPTY_PARTITION &&
- (tab_part_info->part_type == RANGE_PARTITION ||
- tab_part_info->part_type == LIST_PARTITION))
- {
- /*
- For range and list partitions add partition is simply adding a new
- empty partition to the table. If the handler support this we will
- use the simple method of doing this. In this case we need to break
- out the new partitions from the list again and only keep them in the
- temporary list. Added partitions are always added at the end.
- */
- {
- List_iterator<partition_element> tab_it(tab_part_info->partitions);
- uint part_count= 0;
- do
- {
- tab_it++;
- } while (++part_count < no_orig_partitions);
- do
- {
- tab_it++;
- tab_it.remove();
- } while (++part_count < new_total_partitions);
- }
- tab_part_info->no_parts-= no_new_partitions;
- online_add_empty_partition= TRUE;
- }
- else
- {
- tab_part_info->temp_partitions.empty();
- }
- }
- else if (alter_info->flags == ALTER_DROP_PARTITION)
- {
- /*
- Drop a partition from a range partition and list partitioning is
- always safe and can be made more or less immediate. It is necessary
- however to ensure that the partition to be removed is safely removed
- and that REPAIR TABLE can remove the partition if for some reason the
- command to drop the partition failed in the middle.
- */
- uint part_count= 0;
- uint no_parts_dropped= alter_info->partition_names.elements;
- uint no_parts_found= 0;
- List_iterator<partition_element> part_it(tab_part_info->partitions);
- if (!(tab_part_info->part_type == RANGE_PARTITION ||
- tab_part_info->part_type == LIST_PARTITION))
- {
- my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP");
- DBUG_RETURN(TRUE);
- }
- if (no_parts_dropped >= tab_part_info->no_parts)
- {
- my_error(ER_DROP_LAST_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
- }
- do
- {
- partition_element *part_elem= part_it++;
- if (is_partition_in_list(part_elem->partition_name,
- alter_info->partition_names))
- {
- /*
- Remove the partition from the list and put it instead in the
- list of temporary partitions with a new state.
- */
- no_parts_found++;
- part_elem->part_state= PART_IS_DROPPED;
- }
- } while (++part_count < tab_part_info->no_parts);
- if (no_parts_found != no_parts_dropped)
- {
- my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (!(table->file->alter_table_flags() & HA_ONLINE_DROP_PARTITION))
- {
- my_error(ER_DROP_PARTITION_FAILURE, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (table->file->is_fk_defined_on_table_or_index(MAX_KEY))
- {
- my_error(ER_DROP_PARTITION_WHEN_FK_DEFINED, MYF(0));
- DBUG_RETURN(TRUE);
- }
- /*
- This code needs set-up of structures needed by mysql_create_table
- before it is called and thus we only set a boolean variable to be
- checked later down in the code when all needed data structures are
- prepared.
- */
- online_drop_partition= TRUE;
- }
- else if (alter_info->flags == ALTER_COALESCE_PARTITION)
- {
- /*
- In this version COALESCE PARTITION is implemented by simply removing
- a partition from the table and using the normal ALTER TABLE code
- and ensuring that copy to a new table occurs. Later on we can optimise
- this function for Linear Hash partitions. In that case we can avoid
- reorganising the entire table. For normal hash partitions it will
- be a complete reorganise anyways so that can only be made on-line
- if it still uses a copy table.
- */
- uint part_count= 0;
- uint no_parts_coalesced= alter_info->no_parts;
- uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced;
- List_iterator<partition_element> part_it(tab_part_info->partitions);
- if (tab_part_info->part_type != HASH_PARTITION)
- {
- my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (no_parts_coalesced == 0)
- {
- my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (no_parts_coalesced >= tab_part_info->no_parts)
- {
- my_error(ER_DROP_LAST_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
- }
- do
- {
- part_it++;
- if (++part_count > no_parts_remain)
- part_it.remove();
- } while (part_count < tab_part_info->no_parts);
- tab_part_info->no_parts= no_parts_remain;
- }
- else if (alter_info->flags == ALTER_REORGANISE_PARTITION)
- {
- /*
- Reorganise partitions takes a number of partitions that are next
- to each other (at least for RANGE PARTITIONS) and then uses those
- to create a set of new partitions. So data is copied from those
- partitions into the new set of partitions. Those new partitions
- can have more values in the LIST value specifications or less both
- are allowed. The ranges can be different but since they are
- changing a set of consecutive partitions they must cover the same
- range as those changed from.
- This command can be used on RANGE and LIST partitions.
- */
- uint no_parts_reorged= alter_info->partition_names.elements;
- uint no_parts_new= thd->lex->part_info->partitions.elements;
- partition_info *alt_part_info= thd->lex->part_info;
- uint check_total_partitions;
- if (no_parts_reorged > tab_part_info->no_parts)
- {
- my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (!(tab_part_info->part_type == RANGE_PARTITION ||
- tab_part_info->part_type == LIST_PARTITION))
- {
- my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "REORGANISE");
- DBUG_RETURN(TRUE);
- }
- if (check_reorganise_list(alt_part_info, tab_part_info,
- alter_info->partition_names))
- {
- my_error(ER_SAME_NAME_PARTITION, MYF(0));
- DBUG_RETURN(TRUE);
- }
- check_total_partitions= tab_part_info->no_parts + no_parts_new;
- check_total_partitions-= no_parts_reorged;
- if (check_total_partitions > MAX_PARTITIONS)
- {
- my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0));
- DBUG_RETURN(TRUE);
- }
- {
- List_iterator<partition_element> tab_it(tab_part_info->partitions);
- uint part_count= 0;
- bool found_first= FALSE, found_last= FALSE;
- uint drop_count= 0;
- longlong tab_max_range, alt_max_range;
- do
- {
- partition_element *part_elem= tab_it++;
- if (is_partition_in_list(part_elem->partition_name,
- alter_info->partition_names))
- {
- drop_count++;
- tab_max_range= part_elem->range_value;
- if (!found_first)
- {
- uint alt_part_count= 0;
- found_first= TRUE;
- List_iterator<partition_element> alt_it(alt_part_info->partitions);
- do
- {
- partition_element *alt_part_elem= alt_it++;
- alt_max_range= alt_part_elem->range_value;
- if (alt_part_count == 0)
- tab_it.replace(alt_part_elem);
- else
- tab_it.after(alt_part_elem);
- } while (++alt_part_count < no_parts_new);
- }
- else if (found_last)
- {
- my_error(ER_CONSECUTIVE_REORG_PARTITIONS, MYF(0));
- DBUG_RETURN(TRUE);
- }
- else
- tab_it.remove();
- }
- else
- {
- if (found_first)
- found_last= TRUE;
- }
- } while (++part_count < tab_part_info->no_parts);
- if (drop_count != no_parts_reorged)
- {
- my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0));
- DBUG_RETURN(TRUE);
- }
- if (tab_part_info->part_type == RANGE_PARTITION &&
- alt_max_range > tab_max_range)
- {
- my_error(ER_REORG_OUTSIDE_RANGE, MYF(0));
- DBUG_RETURN(TRUE);
- }
- }
- }
- partition_changed= TRUE;
- tab_part_info->no_parts= tab_part_info->partitions.elements;
- create_info->db_type= &partition_hton;
- thd->lex->part_info= tab_part_info;
- if (alter_info->flags == ALTER_ADD_PARTITION ||
- alter_info->flags == ALTER_REORGANISE_PARTITION)
- {
- if (check_partition_info(tab_part_info, default_engine_type,
- table->file, (ulonglong)0ULL))
- {
- DBUG_RETURN(TRUE);
- }
- }
- }
- else
+ if (prep_alter_part_table(thd, table, alter_info, create_info, old_db_type,
+ &partition_changed, &fast_alter_partition))
{
- /*
- When thd->lex->part_info has a reference to a partition_info the
- ALTER TABLE contained a definition of a partitioning.
-
- Case I:
- If there was a partition before and there is a new one defined.
- We use the new partitioning. The new partitioning is already
- defined in the correct variable so no work is needed to
- accomplish this.
- We do however need to update partition_changed to ensure that not
- only the frm file is changed in the ALTER TABLE command.
-
- Case IIa:
- There was a partitioning before and there is no new one defined.
- Also the user has not specified an explicit engine to use.
-
- We use the old partitioning also for the new table. We do this
- by assigning the partition_info from the table loaded in
- open_ltable to the partition_info struct used by mysql_create_table
- later in this method.
-
- Case IIb:
- There was a partitioning before and there is no new one defined.
- The user has specified an explicit engine to use.
-
- Since the user has specified an explicit engine to use we override
- the old partitioning info and create a new table using the specified
- engine. This is the reason for the extra check if old and new engine
- is equal.
- In this case the partition also is changed.
-
- Case III:
- There was no partitioning before altering the table, there is
- partitioning defined in the altered table. Use the new partitioning.
- No work needed since the partitioning info is already in the
- correct variable.
- Also here partition has changed and thus a new table must be
- created.
-
- Case IV:
- There was no partitioning before and no partitioning defined.
- Obviously no work needed.
- */
- if (table->part_info)
- {
- if (!thd->lex->part_info &&
- create_info->db_type == old_db_type)
- thd->lex->part_info= table->part_info;
- }
- if (thd->lex->part_info)
- {
- /*
- Need to cater for engine types that can handle partition without
- using the partition handler.
- */
- if (thd->lex->part_info != table->part_info)
- partition_changed= TRUE;
- if (create_info->db_type != &partition_hton)
- thd->lex->part_info->default_engine_type= create_info->db_type;
- create_info->db_type= &partition_hton;
- }
+ DBUG_RETURN(TRUE);
}
#endif
if (check_engine(thd, new_name, &create_info->db_type))
@@ -4503,14 +4380,16 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (need_copy_table == ALTER_TABLE_INDEX_CHANGED)
{
int pk_changed= 0;
- ulong alter_flags= table->file->alter_table_flags();
+ ulong alter_flags= 0;
ulong needed_online_flags= 0;
ulong needed_fast_flags= 0;
KEY *key;
uint *idx_p;
uint *idx_end_p;
- DBUG_PRINT("info", ("alter_flags: %lu", alter_flags));
+ if (table->s->db_type->alter_table_flags)
+ alter_flags= table->s->db_type->alter_table_flags(alter_info->flags);
+ DBUG_PRINT("info", ("alter_flags: %lu", alter_flags));
/* Check dropped indexes. */
for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count;
idx_p < idx_end_p;
@@ -4610,103 +4489,13 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
create_info->frm_only= 1;
#ifdef WITH_PARTITION_STORAGE_ENGINE
- if (partition_changed)
+ if (fast_alter_partition)
{
- if (online_drop_partition)
- {
- /*
- Now after all checks and setting state on dropped partitions we can
- start the actual dropping of the partitions.
- 1) Lock table in TL_WRITE_ONLY to ensure all other accesses on table
- are completed and no new ones are started until we have changed
- the frm file.
- 2) Write the new frm file where state of dropped partitions is
- changed to PART_IS_DROPPED
- 3) Perform the actual drop of the partition using the handler of the
- table.
- 4) Write a new frm file of the table where the partitions are dropped
- from the table.
-
- */
- uint old_lock_type;
- partition_info *part_info= table->part_info;
- char path[FN_REFLEN+1], noext_path[FN_REFLEN+1];
- uint syntax_len;
- char *part_syntax_buf;
-
- VOID(pthread_mutex_lock(&LOCK_open));
- if (abort_and_upgrade_lock(thd, table, db, table_name, &old_lock_type))
- {
- DBUG_RETURN(TRUE);
- }
- VOID(pthread_mutex_unlock(&LOCK_open));
- if (!(part_syntax_buf= generate_partition_syntax(part_info,
- &syntax_len,
- TRUE,TRUE)))
- {
- DBUG_RETURN(TRUE);
- }
- part_info->part_info_string= part_syntax_buf;
- part_info->part_info_len= syntax_len;
- build_table_filename(path, sizeof(path), db, table_name, reg_ext);
- if (mysql_create_frm(thd, path, db, table_name, create_info,
- prepared_create_list, key_count, key_info_buffer,
- table->file))
- {
- DBUG_RETURN(TRUE);
- }
- thd->lex->part_info= part_info;
- build_table_filename(path, sizeof(path), db, table_name, "");
- if (table->file->drop_partitions(path))
- {
- DBUG_RETURN(TRUE);
- }
- {
- List_iterator<partition_element> part_it(part_info->partitions);
- uint i= 0, remove_count= 0;
- do
- {
- partition_element *part_elem= part_it++;
- if (is_partition_in_list(part_elem->partition_name,
- alter_info->partition_names))
- {
- part_it.remove();
- remove_count++;
- }
- } while (++i < part_info->no_parts);
- part_info->no_parts-= remove_count;
- }
- if (!(part_syntax_buf= generate_partition_syntax(part_info,
- &syntax_len,
- TRUE,TRUE)))
- {
- DBUG_RETURN(TRUE);
- }
- part_info->part_info_string= part_syntax_buf;
- part_info->part_info_len= syntax_len;
- build_table_filename(path, sizeof(path), db, table_name, reg_ext);
- build_table_filename(noext_path, sizeof(noext_path), db, table_name, "");
- if (mysql_create_frm(thd, path, db, table_name, create_info,
- prepared_create_list, key_count, key_info_buffer,
- table->file) ||
- table->file->create_handler_files(noext_path))
- {
- DBUG_RETURN(TRUE);
- }
- thd->proc_info="end";
- query_cache_invalidate3(thd, table_list, 0);
- error= ha_commit_stmt(thd);
- if (ha_commit(thd))
- error= 1;
- if (!error)
- {
- close_thread_tables(thd);
- write_bin_log(thd, FALSE, thd->query, thd->query_length);
- send_ok(thd);
- DBUG_RETURN(FALSE);
- }
- DBUG_RETURN(error);
- }
+ DBUG_RETURN(fast_alter_partition_table(thd, table, alter_info,
+ create_info, table_list,
+ &create_list, &key_list,
+ db, table_name,
+ fast_alter_partition));
}
#endif
@@ -5143,7 +4932,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
/* Mark in-use copies old */
remove_table_from_cache(thd,db,table_name,RTFC_NO_FLAG);
/* end threads waiting on lock */
- mysql_lock_abort(thd,table);
+ mysql_lock_abort(thd,table, TRUE);
}
VOID(quick_rm_table(old_db_type,db,old_name));
if (close_data_tables(thd,db,table_name) ||
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index fde014898f4..69c33a8e7d0 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -530,6 +530,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token READ_SYM
%token READ_WRITE_SYM
%token REAL
+%token REBUILD_SYM
%token RECOVER_SYM
%token REDO_BUFFER_SIZE_SYM
%token REDOFILE_SYM
@@ -542,7 +543,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token RELEASE_SYM
%token RELOAD
%token RENAME
-%token REORGANISE_SYM
+%token REORGANIZE_SYM
%token REPAIR
%token REPEATABLE_SYM
%token REPEAT_SYM
@@ -3331,9 +3332,13 @@ partitioning:
lex->part_info= new partition_info();
if (!lex->part_info)
{
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info));
+ mem_alloc_error(sizeof(partition_info));
YYABORT;
}
+ if (lex->sql_command == SQLCOM_ALTER_TABLE)
+ {
+ lex->alter_info.flags|= ALTER_PARTITION;
+ }
}
partition
;
@@ -3342,24 +3347,15 @@ partition_entry:
PARTITION_SYM
{
LEX *lex= Lex;
- if (lex->part_info)
- {
- /*
- We enter here when opening the frm file to translate
- partition info string into part_info data structure.
- */
- lex->part_info= new partition_info();
- if (!lex->part_info)
- {
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info));
- YYABORT;
- }
- }
- else
+ if (!lex->part_info)
{
yyerror(ER(ER_PARTITION_ENTRY_ERROR));
YYABORT;
}
+ /*
+ We enter here when opening the frm file to translate
+ partition info string into part_info data structure.
+ */
}
partition {}
;
@@ -3393,14 +3389,23 @@ opt_linear:
;
part_field_list:
+ /* empty */ {}
+ | part_field_item_list {}
+ ;
+
+part_field_item_list:
part_field_item {}
- | part_field_list ',' part_field_item {}
+ | part_field_item_list ',' part_field_item {}
;
part_field_item:
ident
{
- Lex->part_info->part_field_list.push_back($1.str);
+ if (Lex->part_info->part_field_list.push_back($1.str))
+ {
+ mem_alloc_error(1);
+ YYABORT;
+ }
}
;
@@ -3434,12 +3439,15 @@ opt_no_parts:
| PARTITIONS_SYM ulong_num
{
uint no_parts= $2;
+ LEX *lex= Lex;
if (no_parts == 0)
{
my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions");
YYABORT;
}
- Lex->part_info->no_parts= no_parts;
+
+ lex->part_info->no_parts= no_parts;
+ lex->part_info->use_default_no_partitions= FALSE;
}
;
@@ -3465,7 +3473,13 @@ sub_part_field_list:
sub_part_field_item:
ident
- { Lex->part_info->subpart_field_list.push_back($1.str); }
+ {
+ if (Lex->part_info->subpart_field_list.push_back($1.str))
+ {
+ mem_alloc_error(1);
+ YYABORT;
+ }
+ }
;
part_func_expr:
@@ -3489,12 +3503,14 @@ opt_no_subparts:
| SUBPARTITIONS_SYM ulong_num
{
uint no_parts= $2;
+ LEX *lex= Lex;
if (no_parts == 0)
{
my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions");
YYABORT;
}
- Lex->part_info->no_subparts= no_parts;
+ lex->part_info->no_subparts= no_parts;
+ lex->part_info->use_default_no_subpartitions= FALSE;
}
;
@@ -3505,21 +3521,21 @@ part_defs:
{
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
+ uint count_curr_parts= part_info->partitions.elements;
if (part_info->no_parts != 0)
{
if (part_info->no_parts !=
- part_info->count_curr_parts)
+ count_curr_parts)
{
yyerror(ER(ER_PARTITION_WRONG_NO_PART_ERROR));
YYABORT;
}
}
- else if (part_info->count_curr_parts > 0)
+ else if (count_curr_parts > 0)
{
- part_info->no_parts= part_info->count_curr_parts;
+ part_info->no_parts= count_curr_parts;
}
part_info->count_curr_subparts= 0;
- part_info->count_curr_parts= 0;
}
;
@@ -3534,17 +3550,79 @@ part_definition:
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
partition_element *p_elem= new partition_element();
- if (!p_elem)
+ uint part_id= part_info->partitions.elements +
+ part_info->temp_partitions.elements;
+ enum partition_state part_state;
+
+ if (part_info->part_state)
+ part_state= (enum partition_state)part_info->part_state[part_id];
+ else
+ part_state= PART_NORMAL;
+ switch (part_state)
{
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
- YYABORT;
+ case PART_TO_BE_DROPPED:
+ /*
+ This part is currently removed so we keep it in a
+ temporary list for REPAIR TABLE to be able to handle
+ failures during drop partition process.
+ */
+ case PART_TO_BE_ADDED:
+ /*
+ This part is currently being added so we keep it in a
+ temporary list for REPAIR TABLE to be able to handle
+ failures during add partition process.
+ */
+ if (!p_elem || part_info->temp_partitions.push_back(p_elem))
+ {
+ mem_alloc_error(sizeof(partition_element));
+ YYABORT;
+ }
+ break;
+ case PART_IS_ADDED:
+ /*
+ Part has been added and is now a normal partition
+ */
+ case PART_TO_BE_REORGED:
+ /*
+ This part is currently reorganised, it is still however
+ used so we keep it in the list of partitions. We do
+ however need the state to be able to handle REPAIR TABLE
+ after failures in the reorganisation process.
+ */
+ case PART_REORGED_DROPPED:
+ /*
+ This part is currently reorganised as part of a
+ COALESCE PARTITION and it will be dropped without a new
+ replacement partition after completing the reorganisation.
+ */
+ case PART_CHANGED:
+ /*
+ This part is currently split or merged as part of ADD
+ PARTITION for a hash partition or as part of COALESCE
+ PARTITION for a hash partitioned table.
+ */
+ case PART_IS_CHANGED:
+ /*
+ This part has been split or merged as part of ADD
+ PARTITION for a hash partition or as part of COALESCE
+ PARTITION for a hash partitioned table.
+ */
+ case PART_NORMAL:
+ if (!p_elem || part_info->partitions.push_back(p_elem))
+ {
+ mem_alloc_error(sizeof(partition_element));
+ YYABORT;
+ }
+ break;
+ default:
+ mem_alloc_error((part_id * 1000) + part_state);
+ YYABORT;
}
+ p_elem->part_state= part_state;
part_info->curr_part_elem= p_elem;
part_info->current_partition= p_elem;
part_info->use_default_partitions= FALSE;
- part_info->partitions.push_back(p_elem);
- p_elem->engine_type= NULL;
- part_info->count_curr_parts++;
+ part_info->use_default_no_partitions= FALSE;
}
part_name {}
opt_part_values {}
@@ -3554,7 +3632,12 @@ part_definition:
part_name:
ident_or_text
- { Lex->part_info->curr_part_elem->partition_name= $1.str; }
+ {
+ LEX *lex= Lex;
+ partition_info *part_info= lex->part_info;
+ partition_element *p_elem= part_info->curr_part_elem;
+ p_elem->partition_name= $1.str;
+ }
;
opt_part_values:
@@ -3643,13 +3726,13 @@ part_list_item:
part_bit_expr
{
longlong *value_ptr;
- if (!(value_ptr= (longlong*)sql_alloc(sizeof(longlong))))
+ if (!(value_ptr= (longlong*)sql_alloc(sizeof(longlong))) ||
+ ((*value_ptr= $1, FALSE) ||
+ Lex->part_info->curr_part_elem->list_val_list.push_back(value_ptr)))
{
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(longlong));
+ mem_alloc_error(sizeof(longlong));
YYABORT;
}
- *value_ptr= $1;
- Lex->part_info->curr_part_elem->list_val_list.push_back(value_ptr);
}
;
@@ -3659,20 +3742,23 @@ part_bit_expr:
Item *part_expr= $1;
bool not_corr_func;
LEX *lex= Lex;
+ THD *thd= YYTHD;
longlong item_value;
Name_resolution_context *context= &lex->current_select->context;
TABLE_LIST *save_list= context->table_list;
+ const char *save_where= thd->where;
context->table_list= 0;
- part_expr->fix_fields(YYTHD, (Item**)0);
- context->table_list= save_list;
- not_corr_func= !part_expr->const_item() ||
- !lex->safe_to_cache_query;
- if (not_corr_func)
+ thd->where= "partition function";
+ if (part_expr->fix_fields(YYTHD, (Item**)0) ||
+ ((context->table_list= save_list), FALSE) ||
+ (!part_expr->const_item()) ||
+ (!lex->safe_to_cache_query))
{
yyerror(ER(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR));
YYABORT;
}
+ thd->where= save_where;
if (part_expr->result_type() != INT_RESULT)
{
yyerror(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR));
@@ -3717,16 +3803,16 @@ sub_part_definition:
LEX *lex= Lex;
partition_info *part_info= lex->part_info;
partition_element *p_elem= new partition_element();
- if (!p_elem)
+ if (!p_elem ||
+ part_info->current_partition->subpartitions.push_back(p_elem))
{
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element));
+ mem_alloc_error(sizeof(partition_element));
YYABORT;
}
part_info->curr_part_elem= p_elem;
- part_info->current_partition->subpartitions.push_back(p_elem);
part_info->use_default_subpartitions= FALSE;
+ part_info->use_default_no_subpartitions= FALSE;
part_info->count_curr_subparts++;
- p_elem->engine_type= NULL;
}
sub_name opt_part_options {}
;
@@ -4794,7 +4880,7 @@ alter_commands:
| DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; }
| IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; }
| alter_list
- opt_partitioning
+ opt_partitioning
| partitioning
/*
This part was added for release 5.1 by Mikael Ronström.
@@ -4809,26 +4895,77 @@ alter_commands:
{
Lex->alter_info.flags|= ALTER_DROP_PARTITION;
}
- | COALESCE PARTITION_SYM ulong_num
+ | REBUILD_SYM PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_REBUILD_PARTITION;
+ lex->no_write_to_binlog= $3;
+ }
+ | OPTIMIZE PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_OPTIMIZE_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->check_opt.init();
+ }
+ opt_no_write_to_binlog opt_mi_check_type
+ | ANALYZE_SYM PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_ANALYZE_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->check_opt.init();
+ }
+ opt_mi_check_type
+ | CHECK_SYM PARTITION_SYM all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_CHECK_PARTITION;
+ lex->check_opt.init();
+ }
+ opt_mi_check_type
+ | REPAIR PARTITION_SYM opt_no_write_to_binlog
+ all_or_alt_part_name_list
+ {
+ LEX *lex= Lex;
+ lex->alter_info.flags|= ALTER_REPAIR_PARTITION;
+ lex->no_write_to_binlog= $3;
+ lex->check_opt.init();
+ }
+ opt_mi_repair_type
+ | COALESCE PARTITION_SYM opt_no_write_to_binlog ulong_num
{
LEX *lex= Lex;
lex->alter_info.flags|= ALTER_COALESCE_PARTITION;
- lex->alter_info.no_parts= $3;
+ lex->no_write_to_binlog= $3;
+ lex->alter_info.no_parts= $4;
}
| reorg_partition_rule
;
+all_or_alt_part_name_list:
+ | ALL
+ {
+ Lex->alter_info.flags|= ALTER_ALL_PARTITION;
+ }
+ | alt_part_name_list
+ ;
+
add_partition_rule:
- ADD PARTITION_SYM
+ ADD PARTITION_SYM opt_no_write_to_binlog
{
LEX *lex= Lex;
lex->part_info= new partition_info();
if (!lex->part_info)
{
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info));
+ mem_alloc_error(sizeof(partition_info));
YYABORT;
}
lex->alter_info.flags|= ALTER_ADD_PARTITION;
+ lex->no_write_to_binlog= $3;
}
add_part_extra
{}
@@ -4838,7 +4975,7 @@ add_part_extra:
| '(' part_def_list ')'
{
LEX *lex= Lex;
- lex->part_info->no_parts= lex->part_info->count_curr_parts;
+ lex->part_info->no_parts= lex->part_info->partitions.elements;
}
| PARTITIONS_SYM ulong_num
{
@@ -4848,21 +4985,34 @@ add_part_extra:
;
reorg_partition_rule:
- REORGANISE_SYM PARTITION_SYM
+ REORGANIZE_SYM PARTITION_SYM opt_no_write_to_binlog
{
LEX *lex= Lex;
lex->part_info= new partition_info();
if (!lex->part_info)
{
- my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info));
+ mem_alloc_error(sizeof(partition_info));
YYABORT;
}
- lex->alter_info.flags|= ALTER_REORGANISE_PARTITION;
+ lex->no_write_to_binlog= $3;
}
- alt_part_name_list INTO '(' part_def_list ')'
+ reorg_parts_rule
+ ;
+
+reorg_parts_rule:
+ /* empty */
+ {
+ Lex->alter_info.flags|= ALTER_TABLE_REORG;
+ }
+ |
+ alt_part_name_list
+ {
+ Lex->alter_info.flags|= ALTER_REORGANIZE_PARTITION;
+ }
+ INTO '(' part_def_list ')'
{
LEX *lex= Lex;
- lex->part_info->no_parts= lex->part_info->count_curr_parts;
+ lex->part_info->no_parts= lex->part_info->partitions.elements;
}
;
@@ -4874,7 +5024,11 @@ alt_part_name_list:
alt_part_name_item:
ident
{
- Lex->alter_info.partition_names.push_back($1.str);
+ if (Lex->alter_info.partition_names.push_back($1.str))
+ {
+ mem_alloc_error(1);
+ YYABORT;
+ }
}
;
@@ -9262,6 +9416,7 @@ keyword_sp:
| RAID_CHUNKSIZE {}
| RAID_STRIPED_SYM {}
| RAID_TYPE {}
+ | REBUILD_SYM {}
| RECOVER_SYM {}
| REDO_BUFFER_SIZE_SYM {}
| REDOFILE_SYM {}
@@ -9269,7 +9424,7 @@ keyword_sp:
| RELAY_LOG_FILE_SYM {}
| RELAY_LOG_POS_SYM {}
| RELOAD {}
- | REORGANISE_SYM {}
+ | REORGANIZE_SYM {}
| REPEATABLE_SYM {}
| REPLICATION {}
| RESOURCES {}
diff --git a/sql/table.cc b/sql/table.cc
index b8811366524..352ca495a31 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -388,6 +388,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
#ifdef WITH_PARTITION_STORAGE_ENGINE
share->default_part_db_type=
ha_checktype(thd, (enum legacy_db_type) (uint) *(head+61), 0, 0);
+ DBUG_PRINT("info", ("default_part_db_type = %u", head[61]));
#endif
legacy_db_type= (enum legacy_db_type) (uint) *(head+3);
share->db_type= ha_checktype(thd, legacy_db_type, 0, 0);
@@ -525,7 +526,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
((uint2korr(head+14) == 0xffff ?
uint4korr(head+47) : uint2korr(head+14))));
- if ((n_length= uint2korr(head+55)))
+ if ((n_length= uint4korr(head+55)))
{
/* Read extra data segment */
char *buff, *next_chunk, *buff_end;
@@ -599,6 +600,38 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
#endif
next_chunk+= 5 + partition_info_len;
}
+ if (share->mysql_version > 50105 && next_chunk + 5 < buff_end)
+ {
+ /*
+ Partition state was introduced to support partition management in version 5.1.5
+ */
+ uint32 part_state_len= uint4korr(next_chunk);
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if ((share->part_state_len= part_state_len))
+ if (!(share->part_state=
+ (uchar*) memdup_root(&share->mem_root, next_chunk + 4,
+ part_state_len)))
+ {
+ my_free(buff, MYF(0));
+ goto err;
+ }
+#else
+ if (part_state_len)
+ {
+ DBUG_PRINT("info", ("WITH_PARTITION_STORAGE_ENGINE is not defined"));
+ my_free(buff, MYF(0));
+ goto err;
+ }
+#endif
+ next_chunk+= 4 + part_state_len;
+ }
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ else
+ {
+ share->part_state_len= 0;
+ share->part_state= NULL;
+ }
+#endif
keyinfo= share->key_info;
for (i= 0; i < keys; i++, keyinfo++)
{
@@ -1223,7 +1256,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag, uint ha_open_flags,
- TABLE *outparam)
+ TABLE *outparam, bool is_create_table)
{
int error;
uint records, i;
@@ -1379,13 +1412,17 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
{
if (mysql_unpack_partition(thd, share->partition_info,
share->partition_info_len,
- outparam, share->default_part_db_type))
+ (uchar*)share->part_state,
+ share->part_state_len,
+ outparam, is_create_table,
+ share->default_part_db_type))
goto err;
/*
Fix the partition functions and ensure they are not constant
functions
*/
- if (fix_partition_func(thd, share->normalized_path.str, outparam))
+ if (fix_partition_func(thd, share->normalized_path.str, outparam,
+ is_create_table))
goto err;
}
#endif
@@ -1503,6 +1540,7 @@ int closefrm(register TABLE *table, bool free_share)
if (table->part_info)
{
free_items(table->part_info->item_free_list);
+ table->part_info->item_free_list= 0;
table->part_info= 0;
}
#endif
@@ -1985,7 +2023,7 @@ File create_frm(THD *thd, const char *name, const char *db,
int4store(fileinfo+47, key_length);
tmp= MYSQL_VERSION_ID; // Store to avoid warning from int4store
int4store(fileinfo+51, tmp);
- int2store(fileinfo+55, create_info->extra_size);
+ int4store(fileinfo+55, create_info->extra_size);
bzero(fill,IO_SIZE);
for (; length > IO_SIZE ; length-= IO_SIZE)
{
diff --git a/sql/table.h b/sql/table.h
index 99b818ef47b..ac56a7840bf 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -201,6 +201,8 @@ typedef struct st_table_share
#ifdef WITH_PARTITION_STORAGE_ENGINE
const uchar *partition_info;
uint partition_info_len;
+ const uchar *part_state;
+ uint part_state_len;
handlerton *default_part_db_type;
#endif
} TABLE_SHARE;
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 7b15e14bdaf..4200a36ab58 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -89,9 +89,6 @@ bool mysql_create_frm(THD *thd, const char *file_name,
partition_info *part_info= thd->lex->part_info;
#endif
DBUG_ENTER("mysql_create_frm");
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- thd->lex->part_info= NULL;
-#endif
DBUG_ASSERT(*fn_rext((char*)file_name)); // Check .frm extension
formnames.type_names=0;
@@ -134,10 +131,13 @@ bool mysql_create_frm(THD *thd, const char *file_name,
create_info->extra_size= (2 + str_db_type.length +
2 + create_info->connect_string.length);
/* Partition */
- create_info->extra_size+= 5;
+ create_info->extra_size+= 9;
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (part_info)
+ {
create_info->extra_size+= part_info->part_info_len;
+ create_info->extra_size+= part_info->part_state_len;
+ }
#endif
for (i= 0; i < keys; i++)
@@ -171,7 +171,10 @@ bool mysql_create_frm(THD *thd, const char *file_name,
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (part_info)
+ {
fileinfo[61]= (uchar) ha_legacy_type(part_info->default_engine_type);
+ DBUG_PRINT("info", ("part_db_type = %d", fileinfo[61]));
+ }
#endif
int2store(fileinfo+59,db_file->extra_rec_buf_length());
if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) ||
@@ -206,12 +209,18 @@ bool mysql_create_frm(THD *thd, const char *file_name,
my_write(file, (const byte*)part_info->part_info_string,
part_info->part_info_len + 1, MYF_RW))
goto err;
+ DBUG_PRINT("info", ("Part state len = %d", part_info->part_state_len));
+ int4store(buff, part_info->part_state_len);
+ if (my_write(file, (const byte*)buff, 4, MYF_RW) ||
+ my_write(file, (const byte*)part_info->part_state,
+ part_info->part_state_len, MYF_RW))
+ goto err;
}
else
#endif
{
- bzero(buff, 5);
- if (my_write(file, (byte*) buff, 5, MYF_RW))
+ bzero(buff, 9);
+ if (my_write(file, (byte*) buff, 9, MYF_RW))
goto err;
}
for (i= 0; i < keys; i++)
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index f258b1b1f99..c6bb689485d 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -88,6 +88,8 @@ handlerton tina_hton= {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter Tablespace */
HTON_CAN_RECREATE
};
diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc
index f4b1276198d..30034496291 100644
--- a/storage/example/ha_example.cc
+++ b/storage/example/ha_example.cc
@@ -103,6 +103,8 @@ handlerton example_hton= {
NULL, /* Start Consistent Snapshot */
NULL, /* Flush logs */
NULL, /* Show status */
+ NULL, /* Partition flags */
+ NULL, /* Alter table flags */
NULL, /* Alter tablespace */
HTON_CAN_RECREATE
};
diff --git a/storage/ndb/include/kernel/ndb_limits.h b/storage/ndb/include/kernel/ndb_limits.h
index 3f46f3802c7..a4da7262341 100644
--- a/storage/ndb/include/kernel/ndb_limits.h
+++ b/storage/ndb/include/kernel/ndb_limits.h
@@ -64,6 +64,7 @@
#define MAX_NULL_BITS 4096
#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
#define MAX_NDB_PARTITIONS 1024
+#define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data
#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
/*
diff --git a/storage/ndb/include/kernel/signaldata/AlterTable.hpp b/storage/ndb/include/kernel/signaldata/AlterTable.hpp
index 572e97afbd6..260c8511bd4 100644
--- a/storage/ndb/include/kernel/signaldata/AlterTable.hpp
+++ b/storage/ndb/include/kernel/signaldata/AlterTable.hpp
@@ -63,6 +63,10 @@ private:
/*
n = Changed name
f = Changed frm
+ d = Changed fragment data
+ r = Changed range or list array
+ t = Changed tablespace name array
+ s = Changed tablespace id array
1111111111222222222233
01234567890123456789012345678901
@@ -70,6 +74,10 @@ private:
*/
#define NAME_SHIFT (0)
#define FRM_SHIFT (1)
+#define FRAG_DATA_SHIFT (2)
+#define RANGE_LIST_SHIFT (3)
+#define TS_NAME_SHIFT (4)
+#define TS_SHIFT (5)
/**
* Getters and setters
@@ -78,10 +86,30 @@ private:
static void setNameFlag(UintR & changeMask, Uint32 nameFlg);
static Uint8 getFrmFlag(const UintR & changeMask);
static void setFrmFlag(UintR & changeMask, Uint32 frmFlg);
+ static Uint8 getFragDataFlag(const UintR & changeMask);
+ static void setFragDataFlag(UintR & changeMask, Uint32 fragFlg);
+ static Uint8 getRangeListFlag(const UintR & changeMask);
+ static void setRangeListFlag(UintR & changeMask, Uint32 rangeFlg);
+ static Uint8 getTsNameFlag(const UintR & changeMask);
+ static void setTsNameFlag(UintR & changeMask, Uint32 tsNameFlg);
+ static Uint8 getTsFlag(const UintR & changeMask);
+ static void setTsFlag(UintR & changeMask, Uint32 tsFlg);
};
inline
Uint8
+AlterTableReq::getTsFlag(const UintR & changeMask){
+ return (Uint8)((changeMask >> TS_SHIFT) & 1);
+}
+
+inline
+void
+AlterTableReq::setTsFlag(UintR & changeMask, Uint32 tsFlg){
+ changeMask |= (tsFlg << TS_SHIFT);
+}
+
+inline
+Uint8
AlterTableReq::getNameFlag(const UintR & changeMask){
return (Uint8)((changeMask >> NAME_SHIFT) & 1);
}
@@ -104,6 +132,42 @@ AlterTableReq::setFrmFlag(UintR & changeMask, Uint32 frmFlg){
changeMask |= (frmFlg << FRM_SHIFT);
}
+inline
+Uint8
+AlterTableReq::getFragDataFlag(const UintR & changeMask){
+ return (Uint8)((changeMask >> FRAG_DATA_SHIFT) & 1);
+}
+
+inline
+void
+AlterTableReq::setFragDataFlag(UintR & changeMask, Uint32 fragDataFlg){
+ changeMask |= (fragDataFlg << FRAG_DATA_SHIFT);
+}
+
+inline
+Uint8
+AlterTableReq::getRangeListFlag(const UintR & changeMask){
+ return (Uint8)((changeMask >> RANGE_LIST_SHIFT) & 1);
+}
+
+inline
+void
+AlterTableReq::setRangeListFlag(UintR & changeMask, Uint32 rangeFlg){
+ changeMask |= (rangeFlg << RANGE_LIST_SHIFT);
+}
+
+inline
+Uint8
+AlterTableReq::getTsNameFlag(const UintR & changeMask){
+ return (Uint8)((changeMask >> TS_NAME_SHIFT) & 1);
+}
+
+inline
+void
+AlterTableReq::setTsNameFlag(UintR & changeMask, Uint32 tsNameFlg){
+ changeMask |= (tsNameFlg << TS_NAME_SHIFT);
+}
+
class AlterTableRef {
/**
diff --git a/storage/ndb/include/kernel/signaldata/DiAddTab.hpp b/storage/ndb/include/kernel/signaldata/DiAddTab.hpp
index 6b17515eb6f..47456f11842 100644
--- a/storage/ndb/include/kernel/signaldata/DiAddTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/DiAddTab.hpp
@@ -32,6 +32,7 @@ class DiAddTabReq {
public:
STATIC_CONST( SignalLength = 9 );
SECTION( FRAGMENTATION = 0 );
+ SECTION( TS_RANGE = 0 );
private:
Uint32 connectPtr;
diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
index a46750228b6..c9a0e84de19 100644
--- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -122,6 +122,15 @@ public:
FragmentData = 130, // CREATE_FRAGMENTATION reply
TablespaceId = 131,
TablespaceVersion = 132,
+ TablespaceDataLen = 133,
+ TablespaceData = 134,
+ RangeListDataLen = 135,
+ RangeListData = 136,
+ ReplicaDataLen = 137,
+ ReplicaData = 138,
+ MaxRowsLow = 139,
+ MaxRowsHigh = 140,
+ DefaultNoPartFlag = 141,
RowGCIFlag = 150,
RowChecksumFlag = 151,
@@ -298,11 +307,25 @@ public:
Uint32 CustomTriggerId;
Uint32 TablespaceId;
Uint32 TablespaceVersion;
+ Uint32 MaxRowsLow;
+ Uint32 MaxRowsHigh;
+ Uint32 DefaultNoPartFlag;
+ /*
+ TODO RONM:
+ We need to replace FRM, Fragment Data, Tablespace Data and in
+ very particular RangeListData with dynamic arrays
+ */
Uint32 FrmLen;
char FrmData[MAX_FRM_DATA_SIZE];
Uint32 FragmentCount;
+ Uint32 ReplicaDataLen;
+ Uint16 ReplicaData[MAX_FRAGMENT_DATA_BYTES];
Uint32 FragmentDataLen;
- Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2];
+ Uint16 FragmentData[3*MAX_NDB_PARTITIONS];
+ Uint32 TablespaceDataLen;
+ Uint32 TablespaceData[2*MAX_NDB_PARTITIONS];
+ Uint32 RangeListDataLen;
+ char RangeListData[4*2*MAX_NDB_PARTITIONS*2];
Uint32 RowGCIFlag;
Uint32 RowChecksumFlag;
diff --git a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp
index cd3f8849552..4c77e337122 100644
--- a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp
@@ -49,6 +49,7 @@ private:
Uint32 nodeId;
Uint32 totalFragments;
Uint32 startGci;
+ Uint32 tablespaceId;
Uint32 logPartId;
};
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
index 2599a391318..d4c0c14dea4 100644
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -669,10 +669,22 @@ public:
Uint32 getFrmLength() const;
/**
- * Get Node Group and Tablespace id's for fragments in table
+ * Get Fragment Data (id, state and node group)
*/
- const void *getNodeGroupIds() const;
- Uint32 getNodeGroupIdsLength() const;
+ const void *getFragmentData() const;
+ Uint32 getFragmentDataLen() const;
+
+ /**
+ * Get Range or List Array (value, partition)
+ */
+ const void *getRangeListData() const;
+ Uint32 getRangeListDataLen() const;
+
+ /**
+ * Get Tablespace Data (id, version)
+ */
+ const void *getTablespaceData() const;
+ Uint32 getTablespaceDataLen() const;
/** @} *******************************************************************/
@@ -722,6 +734,16 @@ public:
void setLogging(bool);
/**
+ * Set fragment count
+ */
+ void setFragmentCount(Uint32);
+
+ /**
+ * Get fragment count
+ */
+ Uint32 getFragmentCount() const;
+
+ /**
* Set fragmentation type
*/
void setFragmentType(FragmentType);
@@ -773,6 +795,19 @@ public:
virtual int getObjectVersion() const;
/**
+ * Set/Get Maximum number of rows in table (only used to calculate
+ * number of partitions).
+ */
+ void setMaxRows(Uint64 maxRows);
+ Uint64 getMaxRows();
+
+ /**
+ * Set/Get indicator if default number of partitions is used in table.
+ */
+ void setDefaultNoPartitionsFlag(Uint32 indicator);
+ Uint32 getDefaultNoPartitionsFlag();
+
+ /**
* Get object id
*/
virtual int getObjectId() const;
@@ -783,9 +818,34 @@ public:
void setFrm(const void* data, Uint32 len);
/**
- * Set node group for fragments
+ * Set array of fragment information containing
+ * Fragment Identity
+ * Node group identity
+ * Fragment State
+ */
+ void setFragmentData(const void* data, Uint32 len);
+
+ /**
+ * Set/Get tablespace names per fragment
+ */
+ void setTablespaceNames(const void* data, Uint32 len);
+ const void *getTablespaceNames();
+ Uint32 getTablespaceNamesLen();
+
+ /**
+ * Set tablespace information per fragment
+ * Contains a tablespace id and a tablespace version
+ */
+ void setTablespaceData(const void* data, Uint32 len);
+
+ /**
+ * Set array of information mapping range values and list values
+ * to fragments. This is essentially a sorted map consisting of
+ * pairs of value, fragment identity. For range partitions there is
+ * one pair per fragment. For list partitions it could be any number
+ * of pairs, at least as many as there are fragments.
*/
- void setNodeGroupIds(const void *data, Uint32 len);
+ void setRangeListData(const void* data, Uint32 len);
/**
* Set table object type
diff --git a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
index 885c2a03d93..bc8622818b8 100644
--- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
@@ -44,11 +44,21 @@ DictTabInfo::TableMapping[] = {
DTIMAP(Table, CustomTriggerId, CustomTriggerId),
DTIMAP2(Table, FrmLen, FrmLen, 0, MAX_FRM_DATA_SIZE),
DTIMAPB(Table, FrmData, FrmData, 0, MAX_FRM_DATA_SIZE, FrmLen),
- DTIMAP(Table, FragmentCount, FragmentCount),
- DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, MAX_FRAGMENT_DATA_BYTES),
- DTIMAPB(Table, FragmentData, FragmentData, 0, MAX_FRAGMENT_DATA_BYTES, FragmentDataLen),
+ DTIMAP2(Table, FragmentCount, FragmentCount, 0, MAX_NDB_PARTITIONS),
+ DTIMAP2(Table, ReplicaDataLen, ReplicaDataLen, 0, 2*MAX_FRAGMENT_DATA_BYTES),
+ DTIMAPB(Table, ReplicaData, ReplicaData, 0, 2*MAX_FRAGMENT_DATA_BYTES, ReplicaDataLen),
+ DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, 6*MAX_NDB_PARTITIONS),
+ DTIMAPB(Table, FragmentData, FragmentData, 0, 6*MAX_NDB_PARTITIONS, FragmentDataLen),
+ DTIMAP2(Table, TablespaceDataLen, TablespaceDataLen, 0, 8*MAX_NDB_PARTITIONS),
+ DTIMAPB(Table, TablespaceData, TablespaceData, 0, 8*MAX_NDB_PARTITIONS, TablespaceDataLen),
+ DTIMAP2(Table, RangeListDataLen, RangeListDataLen, 0, 8*MAX_NDB_PARTITIONS),
+ DTIMAPB(Table, RangeListData, RangeListData, 0, 8*MAX_NDB_PARTITIONS, RangeListDataLen),
DTIMAP(Table, TablespaceId, TablespaceId),
DTIMAP(Table, TablespaceVersion, TablespaceVersion),
+ DTIMAP(Table, MaxRowsLow, MaxRowsLow),
+ DTIMAP(Table, MaxRowsHigh, MaxRowsHigh),
+ DTIMAP(Table, DefaultNoPartFlag, DefaultNoPartFlag),
+ DTIMAP(Table, TablespaceVersion, TablespaceVersion),
DTIMAP(Table, RowGCIFlag, RowGCIFlag),
DTIMAP(Table, RowChecksumFlag, RowChecksumFlag),
DTIBREAK(AttributeName)
@@ -124,12 +134,21 @@ DictTabInfo::Table::init(){
DeleteTriggerId = RNIL;
CustomTriggerId = RNIL;
FrmLen = 0;
- memset(FrmData, 0, sizeof(FrmData));
- FragmentCount = 0;
FragmentDataLen = 0;
+ ReplicaDataLen = 0;
+ RangeListDataLen = 0;
+ TablespaceDataLen = 0;
+ memset(FrmData, 0, sizeof(FrmData));
memset(FragmentData, 0, sizeof(FragmentData));
+ memset(ReplicaData, 0, sizeof(ReplicaData));
+ memset(RangeListData, 0, sizeof(RangeListData));
+ memset(TablespaceData, 0, sizeof(TablespaceData));
+ FragmentCount = 0;
TablespaceId = RNIL;
TablespaceVersion = ~0;
+ MaxRowsLow = 0;
+ MaxRowsHigh = 0;
+ DefaultNoPartFlag = 1;
RowGCIFlag = ~0;
RowChecksumFlag = ~0;
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 2ebf06a0219..68f54ad9757 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -408,6 +408,9 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
union {
char tableName[MAX_TAB_NAME_SIZE];
char frmData[MAX_FRM_DATA_SIZE];
+ char rangeData[16*MAX_NDB_PARTITIONS];
+ char ngData[2*MAX_NDB_PARTITIONS];
+ char tsData[2*2*MAX_NDB_PARTITIONS];
char defaultValue[MAX_ATTR_DEFAULT_VALUE_SIZE];
char attributeName[MAX_ATTR_NAME_SIZE];
};
@@ -434,13 +437,15 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType);
w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType);
-
- if(!signal)
- {
- w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
- }
- else
+ w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow);
+ w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh);
+ w.add(DictTabInfo::DefaultNoPartFlag, tablePtr.p->defaultNoPartFlag);
+ w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
+
+ if(signal)
{
+ /* Denna branch körs vid GET_TABINFOREQ */
+
Uint32 * theData = signal->getDataPtrSend();
CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
req->senderRef = 0;
@@ -450,18 +455,16 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
req->primaryTableId = tablePtr.i;
EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
CreateFragmentationReq::SignalLength);
- if(signal->theData[0] == 0)
- {
- Uint16 *data = (Uint16*)&signal->theData[25];
- Uint32 count = 2 + data[0] * data[1];
- w.add(DictTabInfo::FragmentDataLen, 2*count);
- w.add(DictTabInfo::FragmentData, data, 2*count);
- ndbrequire(count > 0);
- }
- else
- {
- ndbrequire(false);
- }
+ ndbrequire(signal->theData[0] == 0);
+ Uint16 *data = (Uint16*)&signal->theData[25];
+ Uint32 count = 2 + data[0] * data[1];
+ w.add(DictTabInfo::ReplicaDataLen, 2*count);
+ w.add(DictTabInfo::ReplicaData, data, 2*count);
+ }
+ else
+ {
+ /* Denna del körs vid CREATE_TABLEREQ, ALTER_TABLEREQ */
+ ;
}
if (tablePtr.p->primaryTableId != RNIL){
@@ -480,10 +483,27 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w,
ConstRope frm(c_rope_pool, tablePtr.p->frmData);
frm.copy(frmData);
-
w.add(DictTabInfo::FrmLen, frm.size());
w.add(DictTabInfo::FrmData, frmData, frm.size());
+ {
+ jam();
+ ConstRope ts(c_rope_pool, tablePtr.p->tsData);
+ ts.copy(tsData);
+ w.add(DictTabInfo::TablespaceDataLen, ts.size());
+ w.add(DictTabInfo::TablespaceData, tsData, ts.size());
+
+ ConstRope ng(c_rope_pool, tablePtr.p->ngData);
+ ng.copy(ngData);
+ w.add(DictTabInfo::FragmentDataLen, ng.size());
+ w.add(DictTabInfo::FragmentData, ngData, ng.size());
+
+ ConstRope range(c_rope_pool, tablePtr.p->rangeData);
+ range.copy(rangeData);
+ w.add(DictTabInfo::RangeListDataLen, range.size());
+ w.add(DictTabInfo::RangeListData, rangeData, range.size());
+ }
+
if(tablePtr.p->m_tablespace_id != RNIL)
{
w.add(DictTabInfo::TablespaceId, tablePtr.p->m_tablespace_id);
@@ -1797,8 +1817,6 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->gciTableCreated = 0;
tablePtr.p->noOfAttributes = ZNIL;
tablePtr.p->noOfNullAttr = 0;
- tablePtr.p->ngLen = 0;
- memset(tablePtr.p->ngData, 0, sizeof(tablePtr.p->ngData));
tablePtr.p->fragmentCount = 0;
/*
tablePtr.p->lh3PageIndexBits = 0;
@@ -1811,6 +1829,9 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->minLoadFactor = 70;
tablePtr.p->noOfPrimkey = 1;
tablePtr.p->tupKeyLength = 1;
+ tablePtr.p->maxRowsLow = 0;
+ tablePtr.p->maxRowsHigh = 0;
+ tablePtr.p->defaultNoPartFlag = true;
tablePtr.p->m_bits = 0;
tablePtr.p->tableType = DictTabInfo::UserTable;
tablePtr.p->primaryTableId = RNIL;
@@ -3608,15 +3629,15 @@ Dbdict::execCREATE_TABLE_REQ(Signal* signal){
Uint32 key = c_opRecordSequence + 1;
Uint32 *theData = signal->getDataPtrSend(), i;
- Uint16 *node_group= (Uint16*)&signal->theData[25];
+ Uint16 *frag_data= (Uint16*)&signal->theData[25];
CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
req->senderRef = reference();
req->senderData = key;
req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
- req->noOfFragments = parseRecord.tablePtr.p->ngLen >> 1;
+ req->noOfFragments = parseRecord.tablePtr.p->fragmentCount;
req->fragmentationType = parseRecord.tablePtr.p->fragmentType;
- for (i = 0; i < req->noOfFragments; i++)
- node_group[i] = parseRecord.tablePtr.p->ngData[i];
+ MEMCOPY_NO_WORDS(frag_data, c_fragData, c_fragDataLen);
+
if (parseRecord.tablePtr.p->isOrderedIndex()) {
jam();
// ordered index has same fragmentation as the table
@@ -4520,6 +4541,9 @@ int Dbdict::handleAlterTab(AlterTabReq * req,
ndbrequire(org.assign(tmp, src.size()));
}
+/*
+ TODO RONM: Lite ny kod för FragmentData och RangeOrListData
+*/
if (supportedAlteration)
{
// Set new schema version
@@ -4727,11 +4751,12 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
packTableIntoPages(w, tabPtr);
SegmentedSectionPtr spDataPtr;
+ Ptr<SectionSegment> tmpTsPtr;
w.getPtr(spDataPtr);
signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO);
signal->setSection(fragDataPtr, CreateTabReq::FRAGMENTATION);
-
+
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter);
createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ;
@@ -5109,6 +5134,9 @@ Dbdict::createTab_dih(Signal* signal,
req->schemaVersion = tabPtr.p->tableVersion;
req->primaryTableId = tabPtr.p->primaryTableId;
+/*
+ Behöver fiska upp fragDataPtr från table object istället
+*/
if(!fragDataPtr.isNull()){
signal->setSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
}
@@ -5203,6 +5231,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
Uint32 fragCount = req->totalFragments;
Uint32 requestInfo = req->requestInfo;
Uint32 startGci = req->startGci;
+ Uint32 tablespace_id= req->tablespaceId;
Uint32 logPart = req->logPartId;
ndbrequire(node == getOwnNodeId());
@@ -5258,6 +5287,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->tableType = tabPtr.p->tableType;
req->primaryTableId = tabPtr.p->primaryTableId;
req->tablespace_id= tabPtr.p->m_tablespace_id;
+ //req->tablespace_id= tablespace_id;
req->logPartId = logPart;
sendSignal(DBLQH_REF, GSN_LQHFRAGREQ, signal,
LqhFragReq::SignalLength, JBB);
@@ -5740,8 +5770,8 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
it.first();
SimpleProperties::UnpackStatus status;
- DictTabInfo::Table tableDesc; tableDesc.init();
- status = SimpleProperties::unpack(it, &tableDesc,
+ c_tableDesc.init();
+ status = SimpleProperties::unpack(it, &c_tableDesc,
DictTabInfo::TableMapping,
DictTabInfo::TableMappingSize,
true, true);
@@ -5767,12 +5797,12 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
// Verify that table name is an allowed table name.
// TODO
/* ---------------------------------------------------------------- */
- const Uint32 tableNameLength = strlen(tableDesc.TableName) + 1;
- const Uint32 name_hash = Rope::hash(tableDesc.TableName, tableNameLength);
+ const Uint32 tableNameLength = strlen(c_tableDesc.TableName) + 1;
+ const Uint32 name_hash = Rope::hash(c_tableDesc.TableName, tableNameLength);
if(checkExist){
jam();
- tabRequire(get_object(tableDesc.TableName, tableNameLength) == 0,
+ tabRequire(get_object(c_tableDesc.TableName, tableNameLength) == 0,
CreateTableRef::TableAlreadyExist);
}
@@ -5783,7 +5813,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
}
case DictTabInfo::AlterTableFromAPI:{
jam();
- tablePtr.i = getFreeTableRecord(tableDesc.PrimaryTableId);
+ tablePtr.i = getFreeTableRecord(c_tableDesc.PrimaryTableId);
/* ---------------------------------------------------------------- */
// Check if no free tables existed.
/* ---------------------------------------------------------------- */
@@ -5799,7 +5829,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
/* ---------------------------------------------------------------- */
// Get table id and check that table doesn't already exist
/* ---------------------------------------------------------------- */
- tablePtr.i = tableDesc.TableId;
+ tablePtr.i = c_tableDesc.TableId;
if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) {
ndbrequire(tablePtr.i == c_restartRecord.activeTable);
@@ -5821,7 +5851,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
/* ---------------------------------------------------------------- */
// Set table version
/* ---------------------------------------------------------------- */
- Uint32 tableVersion = tableDesc.TableVersion;
+ Uint32 tableVersion = c_tableDesc.TableVersion;
tablePtr.p->tableVersion = tableVersion;
break;
@@ -5834,7 +5864,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
{
Rope name(c_rope_pool, tablePtr.p->tableName);
- ndbrequire(name.assign(tableDesc.TableName, tableNameLength, name_hash));
+ ndbrequire(name.assign(c_tableDesc.TableName, tableNameLength, name_hash));
}
Ptr<DictObject> obj_ptr;
@@ -5842,7 +5872,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
jam();
ndbrequire(c_obj_hash.seize(obj_ptr));
obj_ptr.p->m_id = tablePtr.i;
- obj_ptr.p->m_type = tableDesc.TableType;
+ obj_ptr.p->m_type = c_tableDesc.TableType;
obj_ptr.p->m_name = tablePtr.p->tableName;
obj_ptr.p->m_ref_count = 0;
c_obj_hash.add(obj_ptr);
@@ -5850,42 +5880,54 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
#ifdef VM_TRACE
ndbout_c("Dbdict: name=%s,id=%u,obj_ptr_i=%d",
- tableDesc.TableName, tablePtr.i, tablePtr.p->m_obj_ptr_i);
+ c_tableDesc.TableName, tablePtr.i, tablePtr.p->m_obj_ptr_i);
#endif
}
- tablePtr.p->noOfAttributes = tableDesc.NoOfAttributes;
+ tablePtr.p->noOfAttributes = c_tableDesc.NoOfAttributes;
tablePtr.p->m_bits |=
- (tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0);
+ (c_tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0);
tablePtr.p->m_bits |=
- (tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0);
+ (c_tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0);
tablePtr.p->m_bits |=
- (tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0);
- tablePtr.p->minLoadFactor = tableDesc.MinLoadFactor;
- tablePtr.p->maxLoadFactor = tableDesc.MaxLoadFactor;
- tablePtr.p->fragmentType = (DictTabInfo::FragmentType)tableDesc.FragmentType;
- tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType;
- tablePtr.p->kValue = tableDesc.TableKValue;
- tablePtr.p->fragmentCount = tableDesc.FragmentCount;
- tablePtr.p->m_tablespace_id = tableDesc.TablespaceId;
+ (c_tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0);
+ tablePtr.p->minLoadFactor = c_tableDesc.MinLoadFactor;
+ tablePtr.p->maxLoadFactor = c_tableDesc.MaxLoadFactor;
+ tablePtr.p->fragmentType = (DictTabInfo::FragmentType)c_tableDesc.FragmentType;
+ tablePtr.p->tableType = (DictTabInfo::TableType)c_tableDesc.TableType;
+ tablePtr.p->kValue = c_tableDesc.TableKValue;
+ tablePtr.p->fragmentCount = c_tableDesc.FragmentCount;
+ tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId;
+ tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow;
+ tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh;
+ tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag;
{
Rope frm(c_rope_pool, tablePtr.p->frmData);
- ndbrequire(frm.assign(tableDesc.FrmData, tableDesc.FrmLen));
- }
-
- tablePtr.p->ngLen = tableDesc.FragmentDataLen;
- memcpy(tablePtr.p->ngData, tableDesc.FragmentData,
- tableDesc.FragmentDataLen);
-
- if(tableDesc.PrimaryTableId != RNIL) {
-
- tablePtr.p->primaryTableId = tableDesc.PrimaryTableId;
- tablePtr.p->indexState = (TableRecord::IndexState)tableDesc.IndexState;
- tablePtr.p->insertTriggerId = tableDesc.InsertTriggerId;
- tablePtr.p->updateTriggerId = tableDesc.UpdateTriggerId;
- tablePtr.p->deleteTriggerId = tableDesc.DeleteTriggerId;
- tablePtr.p->customTriggerId = tableDesc.CustomTriggerId;
+ ndbrequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen));
+ Rope range(c_rope_pool, tablePtr.p->rangeData);
+ ndbrequire(range.assign(c_tableDesc.RangeListData,
+ c_tableDesc.RangeListDataLen));
+ Rope fd(c_rope_pool, tablePtr.p->ngData);
+ ndbrequire(fd.assign((const char*)c_tableDesc.FragmentData,
+ c_tableDesc.FragmentDataLen));
+ Rope ts(c_rope_pool, tablePtr.p->tsData);
+ ndbrequire(ts.assign((const char*)c_tableDesc.TablespaceData,
+ c_tableDesc.TablespaceDataLen));
+ }
+
+ c_fragDataLen = c_tableDesc.FragmentDataLen;
+ memcpy(c_fragData, c_tableDesc.FragmentData,
+ c_tableDesc.FragmentDataLen);
+
+ if(c_tableDesc.PrimaryTableId != RNIL) {
+
+ tablePtr.p->primaryTableId = c_tableDesc.PrimaryTableId;
+ tablePtr.p->indexState = (TableRecord::IndexState)c_tableDesc.IndexState;
+ tablePtr.p->insertTriggerId = c_tableDesc.InsertTriggerId;
+ tablePtr.p->updateTriggerId = c_tableDesc.UpdateTriggerId;
+ tablePtr.p->deleteTriggerId = c_tableDesc.DeleteTriggerId;
+ tablePtr.p->customTriggerId = c_tableDesc.CustomTriggerId;
} else {
tablePtr.p->primaryTableId = RNIL;
tablePtr.p->indexState = TableRecord::IS_UNDEFINED;
@@ -5897,7 +5939,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->buildTriggerId = RNIL;
tablePtr.p->indexLocal = 0;
- handleTabInfo(it, parseP, tableDesc);
+ handleTabInfo(it, parseP, c_tableDesc);
if(parseP->errorCode != 0)
{
@@ -7460,10 +7502,9 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal)
// save name and index table properties
signal->getSection(ssPtr, CreateIndxReq::INDEX_NAME_SECTION);
SimplePropertiesSectionReader r1(ssPtr, getSectionSegmentPool());
- DictTabInfo::Table tableDesc;
- tableDesc.init();
+ c_tableDesc.init();
SimpleProperties::UnpackStatus status = SimpleProperties::unpack(
- r1, &tableDesc,
+ r1, &c_tableDesc,
DictTabInfo::TableMapping, DictTabInfo::TableMappingSize,
true, true);
if (status != SimpleProperties::Eof) {
@@ -7473,8 +7514,8 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal)
createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster);
return;
}
- memcpy(opPtr.p->m_indexName, tableDesc.TableName, MAX_TAB_NAME_SIZE);
- opPtr.p->m_storedIndex = tableDesc.TableLoggedFlag;
+ memcpy(opPtr.p->m_indexName, c_tableDesc.TableName, MAX_TAB_NAME_SIZE);
+ opPtr.p->m_storedIndex = c_tableDesc.TableLoggedFlag;
releaseSections(signal);
// master expects to hear from all
if (opPtr.p->m_isMaster)
@@ -13097,7 +13138,7 @@ Dbdict::getTableKeyList(TableRecordPtr tablePtr,
list.id[list.sz++] = attrPtr.p->attributeId;
}
}
- ndbrequire(list.sz == tablePtr.p->noOfPrimkey + 1);
+ ndbrequire(list.sz == (uint)(tablePtr.p->noOfPrimkey + 1));
ndbrequire(list.sz <= MAX_ATTRIBUTES_IN_INDEX + 1);
}
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 7ad5e0d8b49..710305b6af2 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -203,6 +203,8 @@ public:
*/
struct TableRecord {
TableRecord(){}
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
/* Table id (array index in DICT and other blocks) */
Uint32 tableId;
Uint32 m_obj_ptr_i;
@@ -269,6 +271,11 @@ public:
Uint8 maxLoadFactor;
/*
+ Flag to indicate default number of partitions
+ */
+ bool defaultNoPartFlag;
+
+ /*
* Used when shrinking to decide when to merge buckets. Hysteresis
* is thus possible. Should be smaller but not much smaller than
* maxLoadFactor
@@ -353,10 +360,9 @@ public:
/** frm data for this table */
RopeHandle frmData;
- /** Node Group and Tablespace id for this table */
- /** TODO Could preferrably be made dynamic size */
- Uint32 ngLen;
- Uint16 ngData[MAX_NDB_PARTITIONS];
+ RopeHandle tsData;
+ RopeHandle ngData;
+ RopeHandle rangeData;
Uint32 fragmentCount;
Uint32 m_tablespace_id;
@@ -365,6 +371,15 @@ public:
typedef Ptr<TableRecord> TableRecordPtr;
ArrayPool<TableRecord> c_tableRecordPool;
+ /** Node Group and Tablespace id+version + range or list data.
+ * This is only stored temporarily in DBDICT during an ongoing
+ * change.
+ * TODO RONM: Look into improvements of this
+ */
+ Uint32 c_fragDataLen;
+ Uint16 c_fragData[MAX_NDB_PARTITIONS];
+ Uint32 c_tsIdData[2*MAX_NDB_PARTITIONS];
+
/**
* Triggers. This is volatile data not saved on disk. Setting a
* trigger online creates the trigger in TC (if index) and LQH-TUP.
@@ -504,6 +519,8 @@ public:
CArray<SchemaPageRecord> c_schemaPageRecordArray;
+ DictTabInfo::Table c_tableDesc;
+
/**
* A page for create index table signal.
*/
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index be9f988cb7d..6ae79dd73e7 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -5785,6 +5785,9 @@ void Dblqh::execCOMPLETE(Signal* signal)
errorReport(signal, 1);
return;
}//if
+ if (ERROR_INSERTED(5042)) {
+ ndbrequire(false);
+ }
if (ERROR_INSERTED(5013)) {
CLEAR_ERROR_INSERT_VALUE;
sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index 643863b31a1..1f393b036e3 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -534,6 +534,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
CreateFilegroupImplReq rep;
if(regTabPtr.p->m_no_of_disk_attributes)
{
+ ljam();
Tablespace_client tsman(0, c_tsman, 0, 0,
regFragPtr.p->m_tablespace_id);
ndbrequire(tsman.get_tablespace_info(&rep) == 0);
@@ -545,11 +546,14 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
if (regTabPtr.p->m_no_of_disk_attributes)
{
+ ljam();
if(!(getNodeState().getSystemRestartInProgress() &&
getNodeState().startLevel == NodeState::SL_STARTING &&
getNodeState().starting.startPhase <= 4))
{
Callback cb;
+ ljam();
+
cb.m_callbackData= fragOperPtr.i;
cb.m_callbackFunction =
safe_cast(&Dbtup::undo_createtable_callback);
@@ -562,6 +566,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
int res= lgman.get_log_buffer(signal, sz, &cb);
switch(res){
case 0:
+ ljam();
signal->theData[0] = 1;
return;
case -1:
diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp
index 8d098a9f493..2f5325bd844 100644
--- a/storage/ndb/src/ndbapi/NdbBlob.cpp
+++ b/storage/ndb/src/ndbapi/NdbBlob.cpp
@@ -76,11 +76,16 @@ NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnIm
BLOB tables use the same fragmentation as the original table
but may change the fragment type if it is UserDefined since it
must be hash based so that the kernel can handle it on its own.
+ It also uses the same tablespaces and it never uses any range or
+ list arrays.
*/
bt.m_primaryTableId = t->m_id;
+ bt.m_fd.clear();
+ bt.m_ts.clear();
+ bt.m_range.clear();
+ bt.setFragmentCount(t->getFragmentCount());
bt.m_tablespace_id = t->m_tablespace_id;
bt.m_tablespace_version = t->m_tablespace_version;
- bt.m_ng.clear();
switch (t->getFragmentType())
{
case NdbDictionary::Object::FragAllSmall:
diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp
index 19069d2a16d..4a986054457 100644
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp
@@ -413,6 +413,30 @@ NdbDictionary::Table::getNoOfPrimaryKeys() const {
return m_impl.m_noOfKeys;
}
+void
+NdbDictionary::Table::setMaxRows(Uint64 maxRows)
+{
+ m_impl.m_max_rows = maxRows;
+}
+
+Uint64
+NdbDictionary::Table::getMaxRows()
+{
+ return m_impl.m_max_rows;
+}
+
+void
+NdbDictionary::Table::setDefaultNoPartitionsFlag(Uint32 flag)
+{
+ m_impl.m_default_no_part_flag = flag;;
+}
+
+Uint32
+NdbDictionary::Table::getDefaultNoPartitionsFlag()
+{
+ return m_impl.m_default_no_part_flag;
+}
+
const char*
NdbDictionary::Table::getPrimaryKey(int no) const {
int count = 0;
@@ -436,24 +460,86 @@ NdbDictionary::Table::getFrmLength() const {
}
void
+NdbDictionary::Table::setTablespaceNames(const void *data, Uint32 len)
+{
+ m_impl.setTablespaceNames(data, len);
+}
+
+const void*
+NdbDictionary::Table::getTablespaceNames()
+{
+ return m_impl.getTablespaceNames();
+}
+
+Uint32
+NdbDictionary::Table::getTablespaceNamesLen()
+{
+ return m_impl.getTablespaceNamesLen();
+}
+
+void
+NdbDictionary::Table::setFragmentCount(Uint32 count)
+{
+ m_impl.setFragmentCount(count);
+}
+
+Uint32
+NdbDictionary::Table::getFragmentCount() const
+{
+ return m_impl.getFragmentCount();
+}
+
+void
NdbDictionary::Table::setFrm(const void* data, Uint32 len){
m_impl.setFrm(data, len);
}
const void*
-NdbDictionary::Table::getNodeGroupIds() const {
- return m_impl.m_ng.get_data();
+NdbDictionary::Table::getFragmentData() const {
+ return m_impl.getFragmentData();
+}
+
+Uint32
+NdbDictionary::Table::getFragmentDataLen() const {
+ return m_impl.getFragmentDataLen();
+}
+
+void
+NdbDictionary::Table::setFragmentData(const void* data, Uint32 len)
+{
+ m_impl.setFragmentData(data, len);
+}
+
+const void*
+NdbDictionary::Table::getTablespaceData() const {
+ return m_impl.getTablespaceData();
+}
+
+Uint32
+NdbDictionary::Table::getTablespaceDataLen() const {
+ return m_impl.getTablespaceDataLen();
+}
+
+void
+NdbDictionary::Table::setTablespaceData(const void* data, Uint32 len)
+{
+ m_impl.setTablespaceData(data, len);
+}
+
+const void*
+NdbDictionary::Table::getRangeListData() const {
+ return m_impl.getRangeListData();
}
Uint32
-NdbDictionary::Table::getNodeGroupIdsLength() const {
- return m_impl.m_ng.length();
+NdbDictionary::Table::getRangeListDataLen() const {
+ return m_impl.getRangeListDataLen();
}
void
-NdbDictionary::Table::setNodeGroupIds(const void* data, Uint32 noWords)
+NdbDictionary::Table::setRangeListData(const void* data, Uint32 len)
{
- m_impl.m_ng.assign(data, 2*noWords);
+ m_impl.setRangeListData(data, len);
}
NdbDictionary::Object::Status
@@ -1523,7 +1609,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
out << " AT=MEDIUM_VAR";
break;
default:
- out << " AT=" << col.getArrayType() << "?";
+ out << " AT=" << (int)col.getArrayType() << "?";
break;
}
@@ -1535,7 +1621,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
out << " ST=DISK";
break;
default:
- out << " ST=" << col.getStorageType() << "?";
+ out << " ST=" << (int)col.getStorageType() << "?";
break;
}
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 3bb1e5838f0..7e429068f23 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -42,6 +42,7 @@
#include <AttributeHeader.hpp>
#include <my_sys.h>
#include <NdbEnv.h>
+#include <NdbMem.h>
#define DEBUG_PRINT 0
#define INCOMPATIBLE_VERSION -2
@@ -370,28 +371,47 @@ void
NdbTableImpl::init(){
m_changeMask= 0;
m_id= RNIL;
+ m_version = ~0;
+ m_status = NdbDictionary::Object::Invalid;
+ m_type = NdbDictionary::Object::TypeUndefined;
m_primaryTableId= RNIL;
+ m_internalName.clear();
+ m_externalName.clear();
+ m_newExternalName.clear();
+ m_mysqlName.clear();
m_frm.clear();
m_newFrm.clear();
- m_fragmentType= NdbDictionary::Object::DistrKeyHash;
+ m_ts_name.clear();
+ m_new_ts_name.clear();
+ m_ts.clear();
+ m_new_ts.clear();
+ m_fd.clear();
+ m_new_fd.clear();
+ m_range.clear();
+ m_new_range.clear();
+ m_fragmentType= NdbDictionary::Object::FragAllSmall;
m_hashValueMask= 0;
m_hashpointerValue= 0;
+ m_primaryTable.clear();
+ m_max_rows = 0;
+ m_default_no_part_flag = 1;
m_logging= true;
+ m_row_gci = true;
+ m_row_checksum = true;
m_kvalue= 6;
m_minLoadFactor= 78;
m_maxLoadFactor= 80;
m_keyLenInWords= 0;
m_fragmentCount= 0;
- m_dictionary= NULL;
m_index= NULL;
m_indexType= NdbDictionary::Object::TypeUndefined;
m_noOfKeys= 0;
m_noOfDistributionKeys= 0;
m_noOfBlobs= 0;
m_replicaCount= 0;
+ m_tablespace_name.clear();
m_tablespace_id = ~0;
- m_row_gci = true;
- m_row_checksum = true;
+ m_tablespace_version = ~0;
}
bool
@@ -401,63 +421,185 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const
if ((m_internalName.c_str() == NULL) ||
(strcmp(m_internalName.c_str(), "") == 0) ||
(obj.m_internalName.c_str() == NULL) ||
- (strcmp(obj.m_internalName.c_str(), "") == 0)) {
+ (strcmp(obj.m_internalName.c_str(), "") == 0))
+ {
// Shallow equal
- if(strcmp(getName(), obj.getName()) != 0){
+ if(strcmp(getName(), obj.getName()) != 0)
+ {
DBUG_PRINT("info",("name %s != %s",getName(),obj.getName()));
DBUG_RETURN(false);
}
- } else
+ }
+ else
+ {
// Deep equal
- if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0){
+ if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0)
{
DBUG_PRINT("info",("m_internalName %s != %s",
m_internalName.c_str(),obj.m_internalName.c_str()));
DBUG_RETURN(false);
}
}
- if(m_fragmentType != obj.m_fragmentType){
- DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType,obj.m_fragmentType));
+ if (m_frm.length() != obj.m_frm.length() ||
+ (memcmp(m_frm.get_data(), obj.m_frm.get_data(), m_frm.length())))
+ {
+ DBUG_PRINT("info",("m_frm not equal"));
DBUG_RETURN(false);
}
- if(m_columns.size() != obj.m_columns.size()){
- DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(),obj.m_columns.size()));
+ if (m_fd.length() != obj.m_fd.length() ||
+ (memcmp(m_fd.get_data(), obj.m_fd.get_data(), m_fd.length())))
+ {
+ DBUG_PRINT("info",("m_fd not equal"));
+ DBUG_RETURN(false);
+ }
+ if (m_ts.length() != obj.m_ts.length() ||
+ (memcmp(m_ts.get_data(), obj.m_ts.get_data(), m_ts.length())))
+ {
+ DBUG_PRINT("info",("m_ts not equal"));
+ DBUG_RETURN(false);
+ }
+ if (m_range.length() != obj.m_range.length() ||
+ (memcmp(m_range.get_data(), obj.m_range.get_data(), m_range.length())))
+ {
+ DBUG_PRINT("info",("m_range not equal"));
+ DBUG_RETURN(false);
+ }
+ if(m_fragmentType != obj.m_fragmentType)
+ {
+ DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType,
+ obj.m_fragmentType));
+ DBUG_RETURN(false);
+ }
+ if(m_columns.size() != obj.m_columns.size())
+ {
+ DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(),
+ obj.m_columns.size()));
DBUG_RETURN(false);
}
- for(unsigned i = 0; i<obj.m_columns.size(); i++){
- if(!m_columns[i]->equal(* obj.m_columns[i])){
+ for(unsigned i = 0; i<obj.m_columns.size(); i++)
+ {
+ if(!m_columns[i]->equal(* obj.m_columns[i]))
+ {
DBUG_PRINT("info",("m_columns [%d] != [%d]",i,i));
DBUG_RETURN(false);
}
}
- if(m_logging != obj.m_logging){
+ if(m_max_rows != obj.m_max_rows)
+ {
+ DBUG_PRINT("info",("m_max_rows %d != %d",(int32)m_max_rows,
+ (int32)obj.m_max_rows));
+ DBUG_RETURN(false);
+ }
+
+ if(m_default_no_part_flag != obj.m_default_no_part_flag)
+ {
+ DBUG_PRINT("info",("m_default_no_part_flag %d != %d",m_default_no_part_flag,
+ obj.m_default_no_part_flag));
+ DBUG_RETURN(false);
+ }
+
+ if(m_logging != obj.m_logging)
+ {
DBUG_PRINT("info",("m_logging %d != %d",m_logging,obj.m_logging));
DBUG_RETURN(false);
}
- if(m_kvalue != obj.m_kvalue){
+ if(m_row_gci != obj.m_row_gci)
+ {
+ DBUG_PRINT("info",("m_row_gci %d != %d",m_row_gci,obj.m_row_gci));
+ DBUG_RETURN(false);
+ }
+
+ if(m_row_checksum != obj.m_row_checksum)
+ {
+ DBUG_PRINT("info",("m_row_checksum %d != %d",m_row_checksum,
+ obj.m_row_checksum));
+ DBUG_RETURN(false);
+ }
+
+ if(m_kvalue != obj.m_kvalue)
+ {
DBUG_PRINT("info",("m_kvalue %d != %d",m_kvalue,obj.m_kvalue));
DBUG_RETURN(false);
}
- if(m_minLoadFactor != obj.m_minLoadFactor){
- DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor,obj.m_minLoadFactor));
+ if(m_minLoadFactor != obj.m_minLoadFactor)
+ {
+ DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor,
+ obj.m_minLoadFactor));
+ DBUG_RETURN(false);
+ }
+
+ if(m_maxLoadFactor != obj.m_maxLoadFactor)
+ {
+ DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor,
+ obj.m_maxLoadFactor));
+ DBUG_RETURN(false);
+ }
+
+ if(m_tablespace_id != obj.m_tablespace_id)
+ {
+ DBUG_PRINT("info",("m_tablespace_id %d != %d",m_tablespace_id,
+ obj.m_tablespace_id));
DBUG_RETURN(false);
}
- if(m_maxLoadFactor != obj.m_maxLoadFactor){
- DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor,obj.m_maxLoadFactor));
+ if(m_tablespace_version != obj.m_tablespace_version)
+ {
+ DBUG_PRINT("info",("m_tablespace_version %d != %d",m_tablespace_version,
+ obj.m_tablespace_version));
+ DBUG_RETURN(false);
+ }
+
+ if(m_id != obj.m_id)
+ {
+ DBUG_PRINT("info",("m_id %d != %d",m_id,obj.m_id));
DBUG_RETURN(false);
}
+ if(m_version != obj.m_version)
+ {
+ DBUG_PRINT("info",("m_version %d != %d",m_version,obj.m_version));
+ DBUG_RETURN(false);
+ }
+
+ if(m_type != obj.m_type)
+ {
+ DBUG_PRINT("info",("m_type %d != %d",m_type,obj.m_type));
+ DBUG_RETURN(false);
+ }
+
+ if (m_type == NdbDictionary::Object::UniqueHashIndex ||
+ m_type == NdbDictionary::Object::OrderedIndex)
+ {
+ if(m_primaryTableId != obj.m_primaryTableId)
+ {
+ DBUG_PRINT("info",("m_primaryTableId %d != %d",m_primaryTableId,
+ obj.m_primaryTableId));
+ DBUG_RETURN(false);
+ }
+ if (m_indexType != obj.m_indexType)
+ {
+ DBUG_PRINT("info",("m_indexType %d != %d",m_indexType,obj.m_indexType));
+ DBUG_RETURN(false);
+ }
+ if(strcmp(m_primaryTable.c_str(), obj.m_primaryTable.c_str()) != 0)
+ {
+ DBUG_PRINT("info",("m_primaryTable %s != %s",
+ m_primaryTable.c_str(),obj.m_primaryTable.c_str()));
+ DBUG_RETURN(false);
+ }
+ }
DBUG_RETURN(true);
}
void
NdbTableImpl::assign(const NdbTableImpl& org)
{
+ /* m_changeMask intentionally not copied */
+ m_primaryTableId = org.m_primaryTableId;
m_internalName.assign(org.m_internalName);
updateMysqlName();
// If the name has been explicitly set, use that name
@@ -467,10 +609,21 @@ NdbTableImpl::assign(const NdbTableImpl& org)
else
m_externalName.assign(org.m_externalName);
m_frm.assign(org.m_frm.get_data(), org.m_frm.length());
- m_ng.assign(org.m_ng.get_data(), org.m_ng.length());
- m_fragmentType = org.m_fragmentType;
- m_fragmentCount = org.m_fragmentCount;
+ m_ts_name.assign(org.m_ts_name.get_data(), org.m_ts_name.length());
+ m_new_ts_name.assign(org.m_new_ts_name.get_data(),
+ org.m_new_ts_name.length());
+ m_ts.assign(org.m_ts.get_data(), org.m_ts.length());
+ m_new_ts.assign(org.m_new_ts.get_data(), org.m_new_ts.length());
+ m_fd.assign(org.m_fd.get_data(), org.m_fd.length());
+ m_new_fd.assign(org.m_new_fd.get_data(), org.m_new_fd.length());
+ m_range.assign(org.m_range.get_data(), org.m_range.length());
+ m_new_range.assign(org.m_new_range.get_data(), org.m_new_range.length());
+ m_fragmentType = org.m_fragmentType;
+ /*
+ m_columnHashMask, m_columnHash, m_hashValueMask, m_hashpointerValue
+ is state calculated by computeAggregates and buildColumnHash
+ */
for(unsigned i = 0; i<org.m_columns.size(); i++){
NdbColumnImpl * col = new NdbColumnImpl();
const NdbColumnImpl * iorg = org.m_columns[i];
@@ -478,19 +631,30 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_columns.push_back(col);
}
+ m_fragments = org.m_fragments;
+
+ m_max_rows = org.m_max_rows;
+ m_default_no_part_flag = org.m_default_no_part_flag;
m_logging = org.m_logging;
+ m_row_gci = org.m_row_gci;
+ m_row_checksum = org.m_row_checksum;
m_kvalue = org.m_kvalue;
m_minLoadFactor = org.m_minLoadFactor;
m_maxLoadFactor = org.m_maxLoadFactor;
+ m_keyLenInWords = org.m_keyLenInWords;
+ m_fragmentCount = org.m_fragmentCount;
if (m_index != 0)
delete m_index;
m_index = org.m_index;
-
- m_noOfDistributionKeys = org.m_noOfDistributionKeys;
+
+ m_primaryTable = org.m_primaryTable;
+ m_indexType = org.m_indexType;
+
m_noOfKeys = org.m_noOfKeys;
- m_keyLenInWords = org.m_keyLenInWords;
+ m_noOfDistributionKeys = org.m_noOfDistributionKeys;
m_noOfBlobs = org.m_noOfBlobs;
+ m_replicaCount = org.m_replicaCount;
m_id = org.m_id;
m_version = org.m_version;
@@ -575,6 +739,39 @@ NdbTableImpl::computeAggregates()
}
}
+const void*
+NdbTableImpl::getTablespaceNames() const
+{
+ if (m_new_ts_name.empty())
+ return m_ts_name.get_data();
+ else
+ return m_new_ts_name.get_data();
+}
+
+Uint32
+NdbTableImpl::getTablespaceNamesLen() const
+{
+ if (m_new_ts_name.empty())
+ return m_ts_name.length();
+ else
+ return m_new_ts_name.length();
+}
+
+void NdbTableImpl::setTablespaceNames(const void *data, Uint32 len)
+{
+ m_new_ts_name.assign(data, len);
+}
+
+void NdbTableImpl::setFragmentCount(Uint32 count)
+{
+ m_fragmentCount= count;
+}
+
+Uint32 NdbTableImpl::getFragmentCount() const
+{
+ return m_fragmentCount;
+}
+
void NdbTableImpl::setFrm(const void* data, Uint32 len)
{
m_newFrm.assign(data, len);
@@ -598,6 +795,75 @@ NdbTableImpl::getFrmLength() const
return m_newFrm.length();
}
+void NdbTableImpl::setFragmentData(const void* data, Uint32 len)
+{
+ m_new_fd.assign(data, len);
+}
+
+const void *
+NdbTableImpl::getFragmentData() const
+{
+ if (m_new_fd.empty())
+ return m_fd.get_data();
+ else
+ return m_new_fd.get_data();
+}
+
+Uint32
+NdbTableImpl::getFragmentDataLen() const
+{
+ if (m_new_fd.empty())
+ return m_fd.length();
+ else
+ return m_new_fd.length();
+}
+
+void NdbTableImpl::setTablespaceData(const void* data, Uint32 len)
+{
+ m_new_ts.assign(data, len);
+}
+
+const void *
+NdbTableImpl::getTablespaceData() const
+{
+ if (m_new_ts.empty())
+ return m_ts.get_data();
+ else
+ return m_new_ts.get_data();
+}
+
+Uint32
+NdbTableImpl::getTablespaceDataLen() const
+{
+ if (m_new_ts.empty())
+ return m_ts.length();
+ else
+ return m_new_ts.length();
+}
+
+void NdbTableImpl::setRangeListData(const void* data, Uint32 len)
+{
+ m_new_range.assign(data, len);
+}
+
+const void *
+NdbTableImpl::getRangeListData() const
+{
+ if (m_new_range.empty())
+ return m_range.get_data();
+ else
+ return m_new_range.get_data();
+}
+
+Uint32
+NdbTableImpl::getRangeListDataLen() const
+{
+ if (m_new_range.empty())
+ return m_range.length();
+ else
+ return m_new_range.length();
+}
+
void
NdbTableImpl::updateMysqlName()
{
@@ -1512,59 +1778,82 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
const Uint32 * data, Uint32 len,
bool fullyQualifiedNames)
{
- DBUG_ENTER("NdbDictInterface::parseTableInfo");
-
SimplePropertiesLinearReader it(data, len);
- DictTabInfo::Table tableDesc; tableDesc.init();
+ DictTabInfo::Table *tableDesc;
SimpleProperties::UnpackStatus s;
- s = SimpleProperties::unpack(it, &tableDesc,
+ DBUG_ENTER("NdbDictInterface::parseTableInfo");
+
+ tableDesc = (DictTabInfo::Table*)NdbMem_Allocate(sizeof(DictTabInfo::Table));
+ if (!tableDesc)
+ {
+ DBUG_RETURN(4000);
+ }
+ tableDesc->init();
+ s = SimpleProperties::unpack(it, tableDesc,
DictTabInfo::TableMapping,
DictTabInfo::TableMappingSize,
true, true);
if(s != SimpleProperties::Break){
+ NdbMem_Free((void*)tableDesc);
DBUG_RETURN(703);
}
- const char * internalName = tableDesc.TableName;
+ const char * internalName = tableDesc->TableName;
const char * externalName = Ndb::externalizeTableName(internalName, fullyQualifiedNames);
NdbTableImpl * impl = new NdbTableImpl();
- impl->m_id = tableDesc.TableId;
- impl->m_version = tableDesc.TableVersion;
+ impl->m_id = tableDesc->TableId;
+ impl->m_version = tableDesc->TableVersion;
impl->m_status = NdbDictionary::Object::Retrieved;
impl->m_internalName.assign(internalName);
impl->updateMysqlName();
impl->m_externalName.assign(externalName);
- impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen);
- impl->m_ng.assign(tableDesc.FragmentData, tableDesc.FragmentDataLen);
+ impl->m_frm.assign(tableDesc->FrmData, tableDesc->FrmLen);
+ impl->m_fd.assign(tableDesc->FragmentData, tableDesc->FragmentDataLen);
+ impl->m_range.assign(tableDesc->RangeListData, tableDesc->RangeListDataLen);
+ impl->m_fragmentCount = tableDesc->FragmentCount;
+
+ /*
+ We specifically don't get tablespace data and range/list arrays here
+ since those are known by the MySQL Server through analysing the
+ frm file.
+ Fragment Data contains the real node group mapping and the fragment
+ identities used for each fragment. At the moment we have no need for
+ this.
+ Frm file is needed for autodiscovery.
+ */
impl->m_fragmentType = (NdbDictionary::Object::FragmentType)
- getApiConstant(tableDesc.FragmentType,
+ getApiConstant(tableDesc->FragmentType,
fragmentTypeMapping,
(Uint32)NdbDictionary::Object::FragUndefined);
- impl->m_logging = tableDesc.TableLoggedFlag;
- impl->m_row_gci = tableDesc.RowGCIFlag;
- impl->m_row_checksum = tableDesc.RowChecksumFlag;
- impl->m_kvalue = tableDesc.TableKValue;
- impl->m_minLoadFactor = tableDesc.MinLoadFactor;
- impl->m_maxLoadFactor = tableDesc.MaxLoadFactor;
+ Uint64 max_rows = ((Uint64)tableDesc->MaxRowsHigh) << 32;
+ max_rows += tableDesc->MaxRowsLow;
+ impl->m_max_rows = max_rows;
+ impl->m_default_no_part_flag = tableDesc->DefaultNoPartFlag;
+ impl->m_logging = tableDesc->TableLoggedFlag;
+ impl->m_row_gci = tableDesc->RowGCIFlag;
+ impl->m_row_checksum = tableDesc->RowChecksumFlag;
+ impl->m_kvalue = tableDesc->TableKValue;
+ impl->m_minLoadFactor = tableDesc->MinLoadFactor;
+ impl->m_maxLoadFactor = tableDesc->MaxLoadFactor;
impl->m_indexType = (NdbDictionary::Object::Type)
- getApiConstant(tableDesc.TableType,
+ getApiConstant(tableDesc->TableType,
indexTypeMapping,
NdbDictionary::Object::TypeUndefined);
if(impl->m_indexType == NdbDictionary::Object::TypeUndefined){
} else {
const char * externalPrimary =
- Ndb::externalizeTableName(tableDesc.PrimaryTable, fullyQualifiedNames);
+ Ndb::externalizeTableName(tableDesc->PrimaryTable, fullyQualifiedNames);
impl->m_primaryTable.assign(externalPrimary);
}
Uint32 i;
- for(i = 0; i < tableDesc.NoOfAttributes; i++) {
+ for(i = 0; i < tableDesc->NoOfAttributes; i++) {
DictTabInfo::Attribute attrDesc; attrDesc.init();
s = SimpleProperties::unpack(it,
&attrDesc,
@@ -1573,6 +1862,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
true, true);
if(s != SimpleProperties::Break){
delete impl;
+ NdbMem_Free((void*)tableDesc);
DBUG_RETURN(703);
}
@@ -1583,6 +1873,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
// check type and compute attribute size and array size
if (! attrDesc.translateExtType()) {
delete impl;
+ NdbMem_Free((void*)tableDesc);
DBUG_RETURN(703);
}
col->m_type = (NdbDictionary::Column::Type)attrDesc.AttributeExtType;
@@ -1594,12 +1885,14 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
// charset is defined exactly for char types
if (col->getCharType() != (cs_number != 0)) {
delete impl;
+ NdbMem_Free((void*)tableDesc);
DBUG_RETURN(703);
}
if (col->getCharType()) {
col->m_cs = get_charset(cs_number, MYF(0));
if (col->m_cs == NULL) {
delete impl;
+ NdbMem_Free((void*)tableDesc);
DBUG_RETURN(743);
}
}
@@ -1627,17 +1920,17 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->computeAggregates();
- if(tableDesc.FragmentDataLen > 0)
+ if(tableDesc->ReplicaDataLen > 0)
{
- Uint16 replicaCount = tableDesc.FragmentData[0];
- Uint16 fragCount = tableDesc.FragmentData[1];
+ Uint16 replicaCount = tableDesc->ReplicaData[0];
+ Uint16 fragCount = tableDesc->ReplicaData[1];
impl->m_replicaCount = replicaCount;
impl->m_fragmentCount = fragCount;
DBUG_PRINT("info", ("replicaCount=%x , fragCount=%x",replicaCount,fragCount));
for(i = 0; i < (Uint32) (fragCount*replicaCount); i++)
{
- impl->m_fragments.push_back(tableDesc.FragmentData[i+2]);
+ impl->m_fragments.push_back(tableDesc->ReplicaData[i+2]);
}
Uint32 topBit = (1 << 31);
@@ -1649,17 +1942,18 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
}
else
{
- impl->m_fragmentCount = tableDesc.FragmentCount;
+ impl->m_fragmentCount = tableDesc->FragmentCount;
impl->m_replicaCount = 0;
impl->m_hashValueMask = 0;
impl->m_hashpointerValue = 0;
}
- impl->m_tablespace_id = tableDesc.TablespaceId;
- impl->m_tablespace_version = tableDesc.TablespaceVersion;
+ impl->m_tablespace_id = tableDesc->TablespaceId;
+ impl->m_tablespace_version = tableDesc->TablespaceVersion;
* ret = impl;
+ NdbMem_Free((void*)tableDesc);
DBUG_ASSERT(impl->m_fragmentCount > 0);
DBUG_RETURN(0);
}
@@ -1800,8 +2094,9 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
NdbTableImpl & impl,
bool alter)
{
- DBUG_ENTER("NdbDictInterface::createOrAlterTable");
unsigned i;
+ char *ts_names[MAX_NDB_PARTITIONS];
+ DBUG_ENTER("NdbDictInterface::createOrAlterTable");
impl.computeAggregates();
@@ -1827,7 +2122,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
impl.m_newExternalName.clear();
}
// Definition change (frm)
- if (!impl.m_newFrm.empty()) {
+ if (!impl.m_newFrm.empty())
+ {
if (alter)
{
AlterTableReq::setFrmFlag(impl.m_changeMask, true);
@@ -1835,6 +2131,55 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
impl.m_frm.assign(impl.m_newFrm.get_data(), impl.m_newFrm.length());
impl.m_newFrm.clear();
}
+ // Change FragmentData (fragment identity, state, tablespace id)
+ if (!impl.m_new_fd.empty())
+ {
+ if (alter)
+ {
+ AlterTableReq::setFragDataFlag(impl.m_changeMask, true);
+ }
+ impl.m_fd.assign(impl.m_new_fd.get_data(), impl.m_new_fd.length());
+ impl.m_new_fd.clear();
+ }
+ // Change Tablespace Name Data
+ if (!impl.m_new_ts_name.empty())
+ {
+ if (alter)
+ {
+ AlterTableReq::setTsNameFlag(impl.m_changeMask, true);
+ }
+ impl.m_ts_name.assign(impl.m_new_ts_name.get_data(),
+ impl.m_new_ts_name.length());
+ impl.m_new_ts_name.clear();
+ }
+ // Change Range/List Data
+ if (!impl.m_new_range.empty())
+ {
+ if (alter)
+ {
+ AlterTableReq::setRangeListFlag(impl.m_changeMask, true);
+ }
+ impl.m_range.assign(impl.m_new_range.get_data(),
+ impl.m_new_range.length());
+ impl.m_new_range.clear();
+ }
+ // Change Tablespace Data
+ if (!impl.m_new_ts.empty())
+ {
+ if (alter)
+ {
+ AlterTableReq::setTsFlag(impl.m_changeMask, true);
+ }
+ impl.m_ts.assign(impl.m_new_ts.get_data(),
+ impl.m_new_ts.length());
+ impl.m_new_ts.clear();
+ }
+
+
+ /*
+ TODO RONM: Here I need to insert checks for fragment array and
+ range or list array
+ */
//validate();
//aggregate();
@@ -1843,10 +2188,17 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
ndb.internalize_table_name(impl.m_externalName.c_str()));
impl.m_internalName.assign(internalName);
impl.updateMysqlName();
- DictTabInfo::Table tmpTab;
- tmpTab.init();
- BaseString::snprintf(tmpTab.TableName,
- sizeof(tmpTab.TableName),
+ DictTabInfo::Table *tmpTab;
+
+ tmpTab = (DictTabInfo::Table*)NdbMem_Allocate(sizeof(DictTabInfo::Table));
+ if (!tmpTab)
+ {
+ m_error.code = 4000;
+ DBUG_RETURN(-1);
+ }
+ tmpTab->init();
+ BaseString::snprintf(tmpTab->TableName,
+ sizeof(tmpTab->TableName),
internalName.c_str());
bool haveAutoIncrement = false;
@@ -1859,6 +2211,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
if (col->m_autoIncrement) {
if (haveAutoIncrement) {
m_error.code= 4335;
+ NdbMem_Free((void*)tmpTab);
DBUG_RETURN(-1);
}
haveAutoIncrement = true;
@@ -1877,35 +2230,88 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
// Check max length of frm data
if (impl.m_frm.length() > MAX_FRM_DATA_SIZE){
m_error.code= 1229;
+ NdbMem_Free((void*)tmpTab);
DBUG_RETURN(-1);
}
- tmpTab.FrmLen = impl.m_frm.length();
- memcpy(tmpTab.FrmData, impl.m_frm.get_data(), impl.m_frm.length());
- tmpTab.FragmentDataLen = impl.m_ng.length();
- memcpy(tmpTab.FragmentData, impl.m_ng.get_data(), impl.m_ng.length());
-
- tmpTab.TableLoggedFlag = impl.m_logging;
- tmpTab.RowGCIFlag = impl.m_row_gci;
- tmpTab.RowChecksumFlag = impl.m_row_checksum;
- tmpTab.TableLoggedFlag = impl.m_logging;
- tmpTab.TableKValue = impl.m_kvalue;
- tmpTab.MinLoadFactor = impl.m_minLoadFactor;
- tmpTab.MaxLoadFactor = impl.m_maxLoadFactor;
- tmpTab.TableType = DictTabInfo::UserTable;
- tmpTab.PrimaryTableId = impl.m_primaryTableId;
- tmpTab.NoOfAttributes = sz;
-
- tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType,
- fragmentTypeMapping,
- DictTabInfo::AllNodesSmallTable);
- tmpTab.TableVersion = rand();
+ /*
+ TODO RONM: This needs to change to dynamic arrays instead
+ Frm Data, FragmentData, TablespaceData, RangeListData, TsNameData
+ */
+ tmpTab->FrmLen = impl.m_frm.length();
+ memcpy(tmpTab->FrmData, impl.m_frm.get_data(), impl.m_frm.length());
+
+ tmpTab->FragmentDataLen = impl.m_fd.length();
+ memcpy(tmpTab->FragmentData, impl.m_fd.get_data(), impl.m_fd.length());
+
+ tmpTab->TablespaceDataLen = impl.m_ts.length();
+ memcpy(tmpTab->TablespaceData, impl.m_ts.get_data(), impl.m_ts.length());
+
+ tmpTab->RangeListDataLen = impl.m_range.length();
+ memcpy(tmpTab->RangeListData, impl.m_range.get_data(),
+ impl.m_range.length());
+
+ memcpy(ts_names, impl.m_ts_name.get_data(),
+ impl.m_ts_name.length());
+
+ tmpTab->FragmentCount= impl.m_fragmentCount;
+ tmpTab->TableLoggedFlag = impl.m_logging;
+ tmpTab->RowGCIFlag = impl.m_row_gci;
+ tmpTab->RowChecksumFlag = impl.m_row_checksum;
+ tmpTab->TableKValue = impl.m_kvalue;
+ tmpTab->MinLoadFactor = impl.m_minLoadFactor;
+ tmpTab->MaxLoadFactor = impl.m_maxLoadFactor;
+ tmpTab->TableType = DictTabInfo::UserTable;
+ tmpTab->PrimaryTableId = impl.m_primaryTableId;
+ tmpTab->NoOfAttributes = sz;
+ tmpTab->MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32);
+ tmpTab->MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF);
+ tmpTab->DefaultNoPartFlag = impl.m_default_no_part_flag;
+
+ if (impl.m_ts_name.length())
+ {
+ char **ts_name_ptr= (char**)ts_names;
+ i= 0;
+ do
+ {
+ NdbTablespaceImpl tmp;
+ if (*ts_name_ptr)
+ {
+ if(get_filegroup(tmp, NdbDictionary::Object::Tablespace,
+ (const char*)*ts_name_ptr) == 0)
+ {
+ tmpTab->TablespaceData[2*i] = tmp.m_id;
+ tmpTab->TablespaceData[2*i + 1] = tmp.m_version;
+ }
+ else
+ {
+ NdbMem_Free((void*)tmpTab);
+ DBUG_RETURN(-1);
+ }
+ }
+ else
+ {
+ /*
+ No tablespace used, set tablespace id to NULL
+ */
+ tmpTab->TablespaceData[2*i] = RNIL;
+ tmpTab->TablespaceData[2*i + 1] = 0;
+ }
+ ts_name_ptr++;
+ } while (++i < tmpTab->FragmentCount);
+ tmpTab->TablespaceDataLen= 4*i;
+ }
- const char* tablespace_name= impl.m_tablespace_name.c_str();
+ tmpTab->FragmentType = getKernelConstant(impl.m_fragmentType,
+ fragmentTypeMapping,
+ DictTabInfo::AllNodesSmallTable);
+ tmpTab->TableVersion = rand();
+
+ const char *tablespace_name= impl.m_tablespace_name.c_str();
loop:
if(impl.m_tablespace_id != ~(Uint32)0)
{
- tmpTab.TablespaceId = impl.m_tablespace_id;
- tmpTab.TablespaceVersion = impl.m_tablespace_version;
+ tmpTab->TablespaceId = impl.m_tablespace_id;
+ tmpTab->TablespaceVersion = impl.m_tablespace_version;
}
else if(strlen(tablespace_name))
{
@@ -1913,13 +2319,14 @@ loop:
if(get_filegroup(tmp, NdbDictionary::Object::Tablespace,
tablespace_name) == 0)
{
- tmpTab.TablespaceId = tmp.m_id;
- tmpTab.TablespaceVersion = tmp.m_version;
+ tmpTab->TablespaceId = tmp.m_id;
+ tmpTab->TablespaceVersion = tmp.m_version;
}
else
{
// error set by get filegroup
- return -1;
+ NdbMem_Free((void*)tmpTab);
+ DBUG_RETURN(-1);
}
}
else
@@ -1937,13 +2344,14 @@ loop:
UtilBufferWriter w(m_buffer);
SimpleProperties::UnpackStatus s;
s = SimpleProperties::pack(w,
- &tmpTab,
+ tmpTab,
DictTabInfo::TableMapping,
DictTabInfo::TableMappingSize, true);
if(s != SimpleProperties::Eof){
abort();
}
+ NdbMem_Free((void*)tmpTab);
DBUG_PRINT("info",("impl.m_noOfDistributionKeys: %d impl.m_noOfKeys: %d distKeys: %d",
impl.m_noOfDistributionKeys, impl.m_noOfKeys, distKeys));
@@ -2053,7 +2461,7 @@ loop:
if(m_error.code == AlterTableRef::InvalidTableVersion) {
// Clear caches and try again
- return INCOMPATIBLE_VERSION;
+ DBUG_RETURN(INCOMPATIBLE_VERSION);
}
} else {
tSignal.theVerId_signalNumber = GSN_CREATE_TABLE_REQ;
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index c69172cd489..9408d0036a6 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -120,9 +120,24 @@ public:
void init();
void setName(const char * name);
const char * getName() const;
+ void setFragmentCount(Uint32 count);
+ Uint32 getFragmentCount() const;
void setFrm(const void* data, Uint32 len);
const void * getFrmData() const;
Uint32 getFrmLength() const;
+ void setFragmentData(const void* data, Uint32 len);
+ const void * getFragmentData() const;
+ Uint32 getFragmentDataLen() const;
+ void setTablespaceNames(const void* data, Uint32 len);
+ Uint32 getTablespaceNamesLen() const;
+ const void * getTablespaceNames() const;
+ void setTablespaceData(const void* data, Uint32 len);
+ const void * getTablespaceData() const;
+ Uint32 getTablespaceDataLen() const;
+ void setRangeListData(const void* data, Uint32 len);
+ const void * getRangeListData() const;
+ Uint32 getRangeListDataLen() const;
+
const char * getMysqlName() const;
void updateMysqlName();
@@ -133,8 +148,15 @@ public:
BaseString m_mysqlName;
BaseString m_newExternalName; // Used for alter table
UtilBuffer m_frm;
- UtilBuffer m_newFrm; // Used for alter table
- UtilBuffer m_ng;
+ UtilBuffer m_newFrm; // Used for alter table
+ UtilBuffer m_ts_name; //Tablespace Names
+ UtilBuffer m_new_ts_name; //Tablespace Names
+ UtilBuffer m_ts; //TablespaceData
+ UtilBuffer m_new_ts; //TablespaceData
+ UtilBuffer m_fd; //FragmentData
+ UtilBuffer m_new_fd; //FragmentData
+ UtilBuffer m_range; //Range Or List Array
+ UtilBuffer m_new_range; //Range Or List Array
NdbDictionary::Object::FragmentType m_fragmentType;
/**
@@ -153,6 +175,8 @@ public:
Uint32 m_hashpointerValue;
Vector<Uint16> m_fragments;
+ Uint64 m_max_rows;
+ Uint32 m_default_no_part_flag;
bool m_logging;
bool m_row_gci;
bool m_row_checksum;
@@ -162,7 +186,6 @@ public:
Uint16 m_keyLenInWords;
Uint16 m_fragmentCount;
- NdbDictionaryImpl * m_dictionary;
NdbIndexImpl * m_index;
NdbColumnImpl * getColumn(unsigned attrId);
NdbColumnImpl * getColumn(const char * name);
diff --git a/storage/ndb/test/ndbapi/test_event.cpp b/storage/ndb/test/ndbapi/test_event.cpp
index 87065e754b8..a09f6d7c9c8 100644
--- a/storage/ndb/test/ndbapi/test_event.cpp
+++ b/storage/ndb/test/ndbapi/test_event.cpp
@@ -334,7 +334,7 @@ int runCreateShadowTable(NDBT_Context* ctx, NDBT_Step* step)
table_shadow.setName(buf);
// TODO should be removed
// This should work wo/ next line
- table_shadow.setNodeGroupIds(0, 0);
+ //table_shadow.setNodeGroupIds(0, 0);
GETNDB(step)->getDictionary()->createTable(table_shadow);
if (GETNDB(step)->getDictionary()->getTable(buf))
return NDBT_OK;
diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp
index c60cf782fc8..881217aee2e 100644
--- a/storage/ndb/tools/restore/Restore.cpp
+++ b/storage/ndb/tools/restore/Restore.cpp
@@ -336,7 +336,7 @@ RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
return false;
debug << "parseTableInfo " << tableImpl->getName() << " done" << endl;
- tableImpl->m_ng.clear();
+ tableImpl->m_fd.clear();
tableImpl->m_fragmentType = NdbDictionary::Object::FragAllSmall;
TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl);
if(table == NULL) {