diff options
-rw-r--r-- | mysql-test/r/partition_innodb_stmt.result | 48 | ||||
-rw-r--r-- | mysql-test/t/partition_innodb_stmt.test | 59 | ||||
-rw-r--r-- | sql/ha_partition.cc | 110 | ||||
-rw-r--r-- | sql/ha_partition.h | 50 | ||||
-rw-r--r-- | sql/handler.cc | 2 | ||||
-rw-r--r-- | sql/handler.h | 3 |
6 files changed, 221 insertions, 51 deletions
diff --git a/mysql-test/r/partition_innodb_stmt.result b/mysql-test/r/partition_innodb_stmt.result new file mode 100644 index 00000000000..2735b0d2193 --- /dev/null +++ b/mysql-test/r/partition_innodb_stmt.result @@ -0,0 +1,48 @@ +# connection default +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; +CREATE TABLE t1 +( +id SMALLINT NOT NULL, +PRIMARY KEY (id) +) ENGINE=innodb +PARTITION BY RANGE (id) +( +PARTITION p1 VALUES LESS THAN (2), +PARTITION p2 VALUES LESS THAN (4), +PARTITION p3 VALUES LESS THAN (10) +); +INSERT INTO t1 VALUES (1),(2),(3); +# Test READ COMMITTED -> REPEATABLE READ +FLUSH TABLES; +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t1; +id +1 +2 +3 +#connection con1 +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +INSERT INTO t1 VALUES(7); +COMMIT; +# connection default +COMMIT; +FLUSH TABLES; +# Test REPEATABLE READ -> READ COMMITTED +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t1; +id +1 +2 +3 +7 +# connection con1 +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +INSERT INTO t1 VALUES(9); +ERROR HY000: Binary logging not possible. Message: Transaction level 'READ-COMMITTED' in InnoDB is not safe for binlog mode 'STATEMENT' +COMMIT; +COMMIT; +DROP TABLE t1; diff --git a/mysql-test/t/partition_innodb_stmt.test b/mysql-test/t/partition_innodb_stmt.test new file mode 100644 index 00000000000..2ed5baee0ac --- /dev/null +++ b/mysql-test/t/partition_innodb_stmt.test @@ -0,0 +1,59 @@ +--source include/have_binlog_format_statement.inc +--source include/have_innodb.inc + +--echo # connection default +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; + +CREATE TABLE t1 +( + id SMALLINT NOT NULL, + PRIMARY KEY (id) +) ENGINE=innodb +PARTITION BY RANGE (id) +( + PARTITION p1 VALUES LESS THAN (2), + PARTITION p2 VALUES LESS THAN (4), + PARTITION p3 VALUES LESS THAN (10) +); + +INSERT INTO t1 VALUES (1),(2),(3); + +--echo # Test READ COMMITTED -> REPEATABLE READ +FLUSH TABLES; +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +SELECT * FROM t1; + +connect (con1, localhost, root,,); +connection con1; + +--echo #connection con1 +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +INSERT INTO t1 VALUES(7); +COMMIT; + +connection default; +--echo # connection default +COMMIT; + +FLUSH TABLES; + +--echo # Test REPEATABLE READ -> READ COMMITTED +SET TRANSACTION ISOLATION LEVEL REPEATABLE READ; +BEGIN; +SELECT * FROM t1; + +connection con1; + +--echo # connection con1 +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +BEGIN; +--error ER_BINLOG_LOGGING_IMPOSSIBLE +INSERT INTO t1 VALUES(9); +COMMIT; + +disconnect con1; +connection default; +COMMIT; +DROP TABLE t1; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 4fd8a0f67a9..f2e63771467 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -107,7 +107,7 @@ static handler *partition_create_handler(handlerton *hton, MEM_ROOT *mem_root) { ha_partition *file= new (mem_root) ha_partition(hton, share); - if (file && file->initialise_partition(mem_root)) + if (file && file->initialize_partition(mem_root)) { delete file; file= 0; @@ -160,8 +160,7 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF; ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), - m_is_sub_partitioned(0), is_clone(FALSE), auto_increment_lock(FALSE), - auto_increment_safe_stmt_log_lock(FALSE) + m_is_sub_partitioned(0) { DBUG_ENTER("ha_partition::ha_partition(table)"); init_handler_variables(); @@ -181,10 +180,8 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) */ ha_partition::ha_partition(handlerton *hton, partition_info *part_info) - :handler(hton, NULL), m_part_info(part_info), - m_create_handler(TRUE), - m_is_sub_partitioned(m_part_info->is_sub_partitioned()), is_clone(FALSE), - auto_increment_lock(FALSE), auto_increment_safe_stmt_log_lock(FALSE) + :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), + m_is_sub_partitioned(m_part_info->is_sub_partitioned()) { DBUG_ENTER("ha_partition::ha_partition(part_info)"); init_handler_variables(); @@ -194,7 +191,7 @@ ha_partition::ha_partition(handlerton *hton, partition_info *part_info) /* - Initialise handler object + Initialize handler object SYNOPSIS init_handler_variables() @@ -231,7 +228,7 @@ void ha_partition::init_handler_variables() m_innodb= FALSE; m_extra_cache= FALSE; m_extra_cache_size= 0; - m_table_flags= HA_FILE_BASED | HA_REC_NOT_IN_SEQ; + m_handler_status= handler_not_initialized; m_low_byte_first= 1; m_part_field_array= NULL; m_ordered_rec_buffer= NULL; @@ -241,6 +238,9 @@ void ha_partition::init_handler_variables() m_rec0= 0; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; + is_clone= FALSE, + auto_increment_lock= FALSE; + auto_increment_safe_stmt_log_lock= FALSE; /* this allows blackhole to work properly */ @@ -287,10 +287,10 @@ ha_partition::~ha_partition() /* - Initialise partition handler object + Initialize partition handler object SYNOPSIS - initialise_partition() + initialize_partition() mem_root Allocate memory through this RETURN VALUE @@ -320,8 +320,8 @@ ha_partition::~ha_partition() normal storage engine The flag HA_FILE_BASED will be set independent of the underlying handlers 4) Index flags initialisation - When knowledge exists on the indexes it is also possible to initialise the - index flags. Again the index flags must be initialised by using the under- + When knowledge exists on the indexes it is also possible to initialize the + index flags. Again the index flags must be initialized by using the under- lying handlers since this is storage engine dependent. The flag HA_READ_ORDER will be reset for the time being to indicate no ordered output is available from partition handler indexes. Later a merge @@ -331,10 +331,11 @@ ha_partition::~ha_partition() */ -bool ha_partition::initialise_partition(MEM_ROOT *mem_root) +bool ha_partition::initialize_partition(MEM_ROOT *mem_root) { handler **file_array, *file; - DBUG_ENTER("ha_partition::initialise_partition"); + ulonglong check_table_flags; + DBUG_ENTER("ha_partition::initialize_partition"); if (m_create_handler) { @@ -346,11 +347,9 @@ bool ha_partition::initialise_partition(MEM_ROOT *mem_root) else if (!table_share || !table_share->normalized_path.str) { /* - Called with dummy table share (delete, rename and alter table) - Don't need to set-up table flags other than - HA_FILE_BASED here + Called with dummy table share (delete, rename and alter table). + Don't need to set-up anything. */ - m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ; DBUG_RETURN(0); } else if (get_from_handler_file(table_share->normalized_path.str, mem_root)) @@ -362,15 +361,12 @@ bool ha_partition::initialise_partition(MEM_ROOT *mem_root) We create all underlying table handlers here. We do it in this special method to be able to report allocation errors. - Set up table_flags, low_byte_first, primary_key_is_clustered and + Set up low_byte_first, primary_key_is_clustered and has_transactions since they are called often in all kinds of places, other parameters are calculated on demand. - HA_FILE_BASED is always set for partition handler since we use a - special file for handling names of partitions, engine types. - HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER, HA_DUPLICATE_POS, - HA_CAN_INSERT_DELAYED is disabled until further investigated. + Verify that all partitions have the same table_flags. */ - m_table_flags= (ulong)m_file[0]->ha_table_flags(); + check_table_flags= m_file[0]->ha_table_flags(); m_low_byte_first= m_file[0]->low_byte_first(); m_pkey_is_clustered= TRUE; file_array= m_file; @@ -385,12 +381,13 @@ bool ha_partition::initialise_partition(MEM_ROOT *mem_root) } if (!file->primary_key_is_clustered()) m_pkey_is_clustered= FALSE; - m_table_flags&= file->ha_table_flags(); + if (check_table_flags != file->ha_table_flags()) + { + my_error(ER_MIX_HANDLER_ERROR, MYF(0)); + DBUG_RETURN(1); + } } while (*(++file_array)); - m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS | - HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED | - HA_PRIMARY_KEY_REQUIRED_FOR_POSITION); - m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ; + m_handler_status= handler_initialized; DBUG_RETURN(0); } @@ -2399,6 +2396,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) handler **file; char name_buff[FN_REFLEN]; bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE); + ulonglong check_table_flags= 0; DBUG_ENTER("ha_partition::open"); DBUG_ASSERT(table->s == table_share); @@ -2438,7 +2436,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) } } - /* Initialise the bitmap we use to determine what partitions are used */ + /* Initialize the bitmap we use to determine what partitions are used */ if (!is_clone) { if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) @@ -2446,8 +2444,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) bitmap_set_all(&(m_part_info->used_partitions)); } - /* Recalculate table flags as they may change after open */ - m_table_flags= m_file[0]->ha_table_flags(); file= m_file; do { @@ -2459,11 +2455,26 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_no_locks+= (*file)->lock_count(); name_buffer_ptr+= strlen(name_buffer_ptr) + 1; set_if_bigger(ref_length, ((*file)->ref_length)); - m_table_flags&= (*file)->ha_table_flags(); + /* + Verify that all partitions have the same set of table flags. + Mask all flags that partitioning enables/disables. + */ + if (!check_table_flags) + { + check_table_flags= (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); + } + else if (check_table_flags != (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS))) + { + DBUG_PRINT("error", ("check_table_flag 0x%x != 0x%x table_flags()", + check_table_flags, (*file)->ha_table_flags())); + error= HA_ERR_INITIALIZATION; + goto err_handler; + } } while (*(++file)); - m_table_flags&= ~(HA_CAN_GEOMETRY | HA_CAN_FULLTEXT | HA_DUPLICATE_POS | - HA_CAN_SQL_HANDLER | HA_CAN_INSERT_DELAYED); - m_table_flags|= HA_FILE_BASED | HA_REC_NOT_IN_SEQ; key_used_on_scan= m_file[0]->key_used_on_scan; implicit_emptied= m_file[0]->implicit_emptied; /* @@ -2478,7 +2489,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) */ clear_handler_file(); /* - Initialise priority queue, initialised to reading forward. + Initialize priority queue, initialized to reading forward. */ if ((error= init_queue(&m_queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS, 0, key_rec_cmp, (void*)this))) @@ -2514,6 +2525,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) to ensure we have correct statistics we call info from open after calling open on all individual handlers. */ + m_handler_status= handler_opened; info(HA_STATUS_VARIABLE | HA_STATUS_CONST); DBUG_RETURN(0); @@ -2584,6 +2596,7 @@ repeat: goto repeat; } + m_handler_status= handler_closed; DBUG_RETURN(0); } @@ -3571,7 +3584,7 @@ int ha_partition::rnd_pos_by_record(uchar *record) */ /* - Initialise handler before start of index scan + Initialize handler before start of index scan SYNOPSIS index_init() @@ -4456,7 +4469,7 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) { found= TRUE; /* - Initialise queue without order first, simply insert + Initialize queue without order first, simply insert */ queue_element(&m_queue, j++)= (uchar*)queue_buf(i); } @@ -4777,7 +4790,7 @@ int ha_partition::info(uint flag) } } while (*(++file_array)); if (stats.records < 2 && - !(m_table_flags & HA_STATS_RECORDS_IS_EXACT)) + !(m_file[0]->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT)) stats.records= 2; if (stats.records > 0) stats.mean_rec_length= (ulong) (stats.data_file_length / stats.records); @@ -5151,7 +5164,7 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info, 5) Parameters only used by MyISAM internally -------------------------------------------- HA_EXTRA_REINIT_CACHE: - This call reinitialises the READ CACHE described above if there is one + This call reinitializes the READ CACHE described above if there is one and otherwise the call is ignored. We can thus safely call it on all underlying handlers if they are @@ -5233,7 +5246,6 @@ int ha_partition::extra(enum ha_extra_function operation) break; case HA_EXTRA_NORMAL: case HA_EXTRA_QUICK: - case HA_EXTRA_NO_READCHECK: case HA_EXTRA_PREPARE_FOR_UPDATE: case HA_EXTRA_FORCE_REOPEN: case HA_EXTRA_PREPARE_FOR_DROP: @@ -5243,6 +5255,14 @@ int ha_partition::extra(enum ha_extra_function operation) DBUG_RETURN(loop_extra(operation)); break; } + case HA_EXTRA_NO_READCHECK: + { + /* + This is only done as a part of ha_open, which is also used in + ha_partition::open, so no need to do anything. + */ + break; + } case HA_EXTRA_CACHE: { prepare_extra_cache(0); @@ -6154,7 +6174,7 @@ void ha_partition::release_auto_increment() } /**************************************************************************** - MODULE initialise handler for HANDLER call + MODULE initialize handler for HANDLER call ****************************************************************************/ void ha_partition::init_table_handle_for_HANDLER() @@ -6247,7 +6267,7 @@ int ha_partition::indexes_are_disabled(void) ------------------------------------------------------------------------- Variables for partition share methods. A hash used to track open tables. A mutex for the hash table and an init variable to check if hash table - is initialised. + is initialized. There is also a constant ending of the partition handler file name. */ diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 9b2a496507c..1f684b80f6c 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -48,6 +48,13 @@ typedef struct st_ha_data_partition } HA_DATA_PARTITION; #define PARTITION_BYTES_IN_POS 2 +#define PARTITION_ENABLED_TABLE_FLAGS (HA_FILE_BASED | HA_REC_NOT_IN_SEQ) +#define PARTITION_DISABLED_TABLE_FLAGS (HA_CAN_GEOMETRY | \ + HA_CAN_FULLTEXT | \ + HA_DUPLICATE_POS | \ + HA_CAN_SQL_HANDLER | \ + HA_CAN_INSERT_DELAYED | \ + HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) class ha_partition :public handler { private: @@ -92,8 +99,15 @@ private: for this since the MySQL Server sometimes allocating the handler object without freeing them. */ - longlong m_table_flags; ulong m_low_byte_first; + enum enum_handler_status + { + handler_not_initialized= 0, + handler_initialized, + handler_opened, + handler_closed + }; + enum_handler_status m_handler_status; uint m_reorged_parts; // Number of reorganised parts uint m_tot_parts; // Total number of partitions; @@ -189,7 +203,7 @@ public: enable later calls of the methods to retrieve constants from the under- lying handlers. Returns false if not successful. */ - bool initialise_partition(MEM_ROOT *mem_root); + bool initialize_partition(MEM_ROOT *mem_root); /* ------------------------------------------------------------------------- @@ -594,6 +608,8 @@ public: The partition handler will support whatever the underlying handlers support except when specifically mentioned below about exceptions to this rule. + NOTE: This cannot be cached since it can depend on TRANSACTION ISOLATION + LEVEL which is dynamic, see bug#39084. HA_READ_RND_SAME: Not currently used. (Means that the handler supports the rnd_same() call) @@ -718,9 +734,33 @@ public: transfer those calls into index_read and other calls in the index scan module. (NDB) + + HA_PRIMARY_KEY_REQUIRED_FOR_POSITION: + Does the storage engine need a PK for position? + Used with hidden primary key in InnoDB. + Hidden primary keys cannot be supported by partitioning, since the + partitioning expressions columns must be a part of the primary key. + (InnoDB) + + HA_FILE_BASED is always set for partition handler since we use a + special file for handling names of partitions, engine types. + HA_REC_NOT_IN_SEQ is always set for partition handler since we cannot + guarantee that the records will be returned in sequence. + HA_CAN_GEOMETRY, HA_CAN_FULLTEXT, HA_CAN_SQL_HANDLER, HA_DUPLICATE_POS, + HA_CAN_INSERT_DELAYED, HA_PRIMARY_KEY_REQUIRED_FOR_POSITION is disabled + until further investigated. */ - virtual ulonglong table_flags() const - { return m_table_flags; } + virtual Table_flags table_flags() const + { + DBUG_ENTER("ha_partition::table_flags"); + if (m_handler_status < handler_initialized || + m_handler_status >= handler_closed) + DBUG_RETURN(PARTITION_ENABLED_TABLE_FLAGS); + else + DBUG_RETURN((m_file[0]->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); + } /* This is a bitmap of flags that says how the storage engine @@ -903,7 +943,7 @@ public: /* ------------------------------------------------------------------------- - MODULE initialise handler for HANDLER call + MODULE initialize handler for HANDLER call ------------------------------------------------------------------------- This method is a special InnoDB method called before a HANDLER query. ------------------------------------------------------------------------- diff --git a/sql/handler.cc b/sql/handler.cc index be77a70e6a7..c32e9c759c3 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -263,7 +263,7 @@ handler *get_ha_partition(partition_info *part_info) DBUG_ENTER("get_ha_partition"); if ((partition= new ha_partition(partition_hton, part_info))) { - if (partition->initialise_partition(current_thd->mem_root)) + if (partition->initialize_partition(current_thd->mem_root)) { delete partition; partition= 0; diff --git a/sql/handler.h b/sql/handler.h index 1b3945d9287..a218de6a1e5 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1198,6 +1198,9 @@ public: { return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0; } + /** + The cached_table_flags is set at ha_open and ha_external_lock + */ Table_flags ha_table_flags() const { return cached_table_flags; } /** These functions represent the public interface to *users* of the |