From ccbc24b45a79b6407487550fa19b40752d22fa7f Mon Sep 17 00:00:00 2001 From: "Tatiana A. Nurnberg" Date: Thu, 11 Nov 2010 09:46:49 +0000 Subject: Bug#55436: buffer overflow in debug binary of dbug_buff in Field_new_decimal::store_value There were some misunderstandings about parameters pertaining to buffer-size. Patches fixes the reported off by one and clarifies the documentation. mysql-test/r/type_newdecimal.result: add test mysql-test/t/type_newdecimal.test: add test sql/field.cc: adjust buffer size by one to account for terminator. sql/my_decimal.cc: adjust buffer size by one to account for terminator. clarify needs in comments. sql/my_decimal.h: clarify buffer-size needs to prevent future off-by-one bugs. strings/decimal.c: clarify buffer-size needs and parameters to prevent future off-by-one bugs --- mysql-test/r/type_newdecimal.result | 13 +++++++++++++ mysql-test/t/type_newdecimal.test | 14 ++++++++++++++ sql/field.cc | 6 +++--- sql/my_decimal.cc | 11 ++++++----- sql/my_decimal.h | 3 ++- strings/decimal.c | 5 +++-- 6 files changed, 41 insertions(+), 11 deletions(-) diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result index 70ee3a56cf3..c301a7dd629 100644 --- a/mysql-test/r/type_newdecimal.result +++ b/mysql-test/r/type_newdecimal.result @@ -1913,4 +1913,17 @@ group by PAY.id + 1; mult v_net_with_discount v_total 1.0000 27.18 27.180000 DROP TABLE currencies, payments, sub_tasks; +# +# Bug#55436: buffer overflow in debug binary of dbug_buff in +# Field_new_decimal::store_value +# +SET SQL_MODE=''; +CREATE TABLE t1(f1 DECIMAL(44,24)) ENGINE=MYISAM; +INSERT INTO t1 SET f1 = -64878E-85; +Warnings: +Note 1265 Data truncated for column 'f1' at row 1 +SELECT f1 FROM t1; +f1 +0.000000000000000000000000 +DROP TABLE IF EXISTS t1; End of 5.1 tests diff --git a/mysql-test/t/type_newdecimal.test b/mysql-test/t/type_newdecimal.test index 2cf7ab8fbdf..31a8808da55 100644 --- a/mysql-test/t/type_newdecimal.test +++ b/mysql-test/t/type_newdecimal.test @@ -1510,5 +1510,19 @@ group by PAY.id + 1; DROP TABLE currencies, payments, sub_tasks; +--echo # +--echo # Bug#55436: buffer overflow in debug binary of dbug_buff in +--echo # Field_new_decimal::store_value +--echo # + +# this threw memory warnings on Windows. Also make sure future changes +# don't change these results, as per usual. +SET SQL_MODE=''; +CREATE TABLE t1(f1 DECIMAL(44,24)) ENGINE=MYISAM; +INSERT INTO t1 SET f1 = -64878E-85; +SELECT f1 FROM t1; +DROP TABLE IF EXISTS t1; + + --echo End of 5.1 tests diff --git a/sql/field.cc b/sql/field.cc index c887a5f1c9b..cb23ae4fe9f 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -2583,7 +2583,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value) DBUG_ENTER("Field_new_decimal::store_value"); #ifndef DBUG_OFF { - char dbug_buff[DECIMAL_MAX_STR_LENGTH+1]; + char dbug_buff[DECIMAL_MAX_STR_LENGTH+2]; DBUG_PRINT("enter", ("value: %s", dbug_decimal_as_string(dbug_buff, decimal_value))); } #endif @@ -2598,7 +2598,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value) } #ifndef DBUG_OFF { - char dbug_buff[DECIMAL_MAX_STR_LENGTH+1]; + char dbug_buff[DECIMAL_MAX_STR_LENGTH+2]; DBUG_PRINT("info", ("saving with precision %d scale: %d value %s", (int)precision, (int)dec, dbug_decimal_as_string(dbug_buff, decimal_value))); @@ -2673,7 +2673,7 @@ int Field_new_decimal::store(const char *from, uint length, } #ifndef DBUG_OFF - char dbug_buff[DECIMAL_MAX_STR_LENGTH+1]; + char dbug_buff[DECIMAL_MAX_STR_LENGTH+2]; DBUG_PRINT("enter", ("value: %s", dbug_decimal_as_string(dbug_buff, &decimal_value))); #endif diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc index 3aa01880b83..a38dc341684 100644 --- a/sql/my_decimal.cc +++ b/sql/my_decimal.cc @@ -95,10 +95,11 @@ int my_decimal2string(uint mask, const my_decimal *d, UNSIGNED. Hence the buffer for a ZEROFILLed value is the length the user requested, plus one for a possible decimal point, plus one if the user only wanted decimal places, but we force a leading - zero on them. Because the type is implicitly UNSIGNED, we do not - need to reserve a character for the sign. For all other cases, - fixed_prec will be 0, and my_decimal_string_length() will be called - instead to calculate the required size of the buffer. + zero on them, plus one for the '\0' terminator. Because the type + is implicitly UNSIGNED, we do not need to reserve a character for + the sign. For all other cases, fixed_prec will be 0, and + my_decimal_string_length() will be called instead to calculate the + required size of the buffer. */ int length= (fixed_prec ? (fixed_prec + ((fixed_prec == fixed_dec) ? 1 : 0) + 1) @@ -275,7 +276,7 @@ print_decimal_buff(const my_decimal *dec, const uchar* ptr, int length) const char *dbug_decimal_as_string(char *buff, const my_decimal *val) { - int length= DECIMAL_MAX_STR_LENGTH; + int length= DECIMAL_MAX_STR_LENGTH + 1; /* minimum size for buff */ if (!val) return "NULL"; (void)decimal2string((decimal_t*) val, buff, &length, 0,0,0); diff --git a/sql/my_decimal.h b/sql/my_decimal.h index 21669e82c44..2c13142bb60 100644 --- a/sql/my_decimal.h +++ b/sql/my_decimal.h @@ -55,7 +55,7 @@ C_MODE_END /** maximum length of string representation (number of maximum decimal - digits + 1 position for sign + 1 position for decimal point) + digits + 1 position for sign + 1 position for decimal point, no terminator) */ #define DECIMAL_MAX_STR_LENGTH (DECIMAL_MAX_POSSIBLE_PRECISION + 2) @@ -212,6 +212,7 @@ inline uint32 my_decimal_precision_to_length(uint precision, uint8 scale, inline int my_decimal_string_length(const my_decimal *d) { + /* length of string representation including terminating '\0' */ return decimal_string_size(d); } diff --git a/strings/decimal.c b/strings/decimal.c index bda296ce832..c91a5d1a7ec 100644 --- a/strings/decimal.c +++ b/strings/decimal.c @@ -320,8 +320,8 @@ int decimal_actual_fraction(decimal_t *from) from - value to convert to - points to buffer where string representation should be stored - *to_len - in: size of to buffer - out: length of the actually written string + *to_len - in: size of to buffer (incl. terminating '\0') + out: length of the actually written string (excl. '\0') fixed_precision - 0 if representation can be variable length and fixed_decimals will not be checked in this case. Put number as with fixed point position with this @@ -338,6 +338,7 @@ int decimal2string(decimal_t *from, char *to, int *to_len, int fixed_precision, int fixed_decimals, char filler) { + /* {intg_len, frac_len} output widths; {intg, frac} places in input */ int len, intg, frac= from->frac, i, intg_len, frac_len, fill; /* number digits before decimal point */ int fixed_intg= (fixed_precision ? -- cgit v1.2.1 From e24398ee42c05f6e0589d2c27cb2ab4593b74ad4 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Mon, 24 Jan 2011 13:41:44 +0100 Subject: Bug#59297: Can't find record in 'tablename' on update inner join Regression introduced in bug#52455. Problem was that the fixed function did not set the last used partition variable, resulting in wrong partition used when storing the position of the newly retrieved row. Fixed by setting the last used partition in ha_partition::index_read_idx_map. --- mysql-test/r/partition.result | 38 ++++++++++++++++++++++++++++++++++++++ mysql-test/t/partition.test | 43 +++++++++++++++++++++++++++++++++++++++++++ sql/ha_partition.cc | 1 + 3 files changed, 82 insertions(+) diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index 27ada9d1129..138264fd4e1 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -1,5 +1,43 @@ drop table if exists t1, t2; # +# Bug#59297: Can't find record in 'tablename' on update inner join +# +CREATE TABLE t1 ( +a char(2) NOT NULL, +b char(2) NOT NULL, +c int(10) unsigned NOT NULL, +d varchar(255) DEFAULT NULL, +e varchar(1000) DEFAULT NULL, +PRIMARY KEY (a, b, c), +KEY (a), +KEY (a, b) +) +/*!50100 PARTITION BY KEY (a) +PARTITIONS 20 */; +INSERT INTO t1 (a, b, c, d, e) VALUES +('07', '03', 343, '1', '07_03_343'), +('01', '04', 343, '2', '01_04_343'), +('01', '06', 343, '3', '01_06_343'), +('01', '07', 343, '4', '01_07_343'), +('01', '08', 343, '5', '01_08_343'), +('01', '09', 343, '6', '01_09_343'), +('03', '03', 343, '7', '03_03_343'), +('03', '06', 343, '8', '03_06_343'), +('03', '07', 343, '9', '03_07_343'), +('04', '03', 343, '10', '04_03_343'), +('04', '06', 343, '11', '04_06_343'), +('05', '03', 343, '12', '05_03_343'), +('11', '03', 343, '13', '11_03_343'), +('11', '04', 343, '14', '11_04_343') +; +UPDATE t1 AS A, +(SELECT '03' AS a, '06' AS b, 343 AS c, 'last' AS d) AS B +SET A.e = B.d +WHERE A.a = '03' +AND A.b = '06' +AND A.c = 343; +DROP TABLE t1; +# # Bug#57113: ha_partition::extra(ha_extra_function): # Assertion `m_extra_cache' failed CREATE TABLE t1 diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test index 0151820cef9..7a0a5558d32 100644 --- a/mysql-test/t/partition.test +++ b/mysql-test/t/partition.test @@ -14,6 +14,49 @@ drop table if exists t1, t2; --enable_warnings +--echo # +--echo # Bug#59297: Can't find record in 'tablename' on update inner join +--echo # + +CREATE TABLE t1 ( +a char(2) NOT NULL, +b char(2) NOT NULL, +c int(10) unsigned NOT NULL, +d varchar(255) DEFAULT NULL, +e varchar(1000) DEFAULT NULL, +PRIMARY KEY (a, b, c), +KEY (a), +KEY (a, b) +) +/*!50100 PARTITION BY KEY (a) +PARTITIONS 20 */; + +INSERT INTO t1 (a, b, c, d, e) VALUES +('07', '03', 343, '1', '07_03_343'), +('01', '04', 343, '2', '01_04_343'), +('01', '06', 343, '3', '01_06_343'), +('01', '07', 343, '4', '01_07_343'), +('01', '08', 343, '5', '01_08_343'), +('01', '09', 343, '6', '01_09_343'), +('03', '03', 343, '7', '03_03_343'), +('03', '06', 343, '8', '03_06_343'), +('03', '07', 343, '9', '03_07_343'), +('04', '03', 343, '10', '04_03_343'), +('04', '06', 343, '11', '04_06_343'), +('05', '03', 343, '12', '05_03_343'), +('11', '03', 343, '13', '11_03_343'), +('11', '04', 343, '14', '11_04_343') +; + +UPDATE t1 AS A, +(SELECT '03' AS a, '06' AS b, 343 AS c, 'last' AS d) AS B +SET A.e = B.d +WHERE A.a = '03' +AND A.b = '06' +AND A.c = 343; + +DROP TABLE t1; + --echo # --echo # Bug#57113: ha_partition::extra(ha_extra_function): --echo # Assertion `m_extra_cache' failed diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 7bcbd241541..f55c48189fe 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4317,6 +4317,7 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index, break; } } + m_last_part= part; } else { -- cgit v1.2.1 From 89621ad7387fb206023e8767b164ad6750d8e43b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 8 Feb 2011 12:56:23 +0200 Subject: Implement UNIV_BLOB_DEBUG. An early version of this caught Bug #55284. This option is known to be broken when tablespaces contain off-page columns after crash recovery. It has only been tested when creating the data files from the scratch. btr_blob_dbg_t: A map from page_no:heap_no:field_no to first_blob_page_no. This map is instantiated for every clustered index in index->blobs. It is protected by index->blobs_mutex. btr_blob_dbg_msg_issue(): Issue a diagnostic message. Invoked when btr_blob_dbg_msg is set. btr_blob_dbg_rbt_insert(): Insert a btr_blob_dbg_t into index->blobs. btr_blob_dbg_rbt_delete(): Remove a btr_blob_dbg_t from index->blobs. btr_blob_dbg_cmp(): Comparator for btr_blob_dbg_t. btr_blob_dbg_add_blob(): Add a BLOB reference to the map. btr_blob_dbg_add_rec(): Add all BLOB references from a record to the map. btr_blob_dbg_print(): Display the map of BLOB references in an index. btr_blob_dbg_remove_rec(): Remove all BLOB references of a record from the map. btr_blob_dbg_is_empty(): Check that no BLOB references exist to or from a page. Disowned references from delete-marked records are tolerated. btr_blob_dbg_op(): Perform an operation on all BLOB references on a B-tree page. btr_blob_dbg_add(): Add all BLOB references from a B-tree page to the map. btr_blob_dbg_remove(): Remove all BLOB references from a B-tree page from the map. btr_blob_dbg_restore(): Restore the BLOB references after a failed page reorganize. btr_blob_dbg_set_deleted_flag(): Modify the 'deleted' flag in the BLOB references of a record. btr_blob_dbg_owner(): Own or disown a BLOB reference. btr_page_create(), btr_page_free_low(): Assert that no BLOB references exist. btr_create(): Create index->blobs for clustered indexes. btr_page_reorganize_low(): Invoke btr_blob_dbg_remove() before copying the records. Invoke btr_blob_dbg_restore() if the operation fails. btr_page_empty(), btr_lift_page_up(), btr_compress(), btr_discard_page(): Invoke btr_blob_dbg_remove(). btr_cur_del_mark_set_clust_rec(): Invoke btr_blob_dbg_set_deleted_flag(). Other cases of modifying the delete mark are either in the secondary index or during crash recovery, which we do not promise to support. btr_cur_set_ownership_of_extern_field(): Invoke btr_blob_dbg_owner(). btr_store_big_rec_extern_fields(): Invoke btr_blob_dbg_add_blob(). btr_free_externally_stored_field(): Invoke btr_blob_dbg_assert_empty() on the first BLOB page. page_cur_insert_rec_low(), page_cur_insert_rec_zip(), page_copy_rec_list_end_to_created_page(): Invoke btr_blob_dbg_add_rec(). page_cur_insert_rec_zip_reorg(), page_copy_rec_list_end(), page_copy_rec_list_start(): After failure, invoke btr_blob_dbg_remove() and btr_blob_dbg_add(). page_cur_delete_rec(): Invoke btr_blob_dbg_remove_rec(). page_delete_rec_list_end(): Invoke btr_blob_dbg_op(btr_blob_dbg_remove_rec). page_zip_reorganize(): Invoke btr_blob_dbg_remove() before copying the records. page_zip_copy_recs(): Invoke btr_blob_dbg_add(). row_upd_rec_in_place(): Invoke btr_blob_dbg_rbt_delete() and btr_blob_dbg_rbt_insert(). innobase_start_or_create_for_mysql(): Warn when UNIV_BLOB_DEBUG is enabled. rb://550 approved by Jimmy Yang --- storage/innodb_plugin/btr/btr0btr.c | 571 ++++++++++++++++++++++++++++++ storage/innodb_plugin/btr/btr0cur.c | 44 +++ storage/innodb_plugin/dict/dict0mem.c | 9 + storage/innodb_plugin/include/btr0btr.h | 85 +++++ storage/innodb_plugin/include/btr0types.h | 125 +++++++ storage/innodb_plugin/include/dict0mem.h | 7 + storage/innodb_plugin/include/page0zip.h | 2 +- storage/innodb_plugin/include/univ.i | 2 + storage/innodb_plugin/page/page0cur.c | 10 + storage/innodb_plugin/page/page0page.c | 11 + storage/innodb_plugin/page/page0zip.c | 5 +- storage/innodb_plugin/row/row0upd.c | 35 ++ storage/innodb_plugin/srv/srv0start.c | 6 + 13 files changed, 910 insertions(+), 2 deletions(-) diff --git a/storage/innodb_plugin/btr/btr0btr.c b/storage/innodb_plugin/btr/btr0btr.c index 3d8d6048603..46810c011c4 100644 --- a/storage/innodb_plugin/btr/btr0btr.c +++ b/storage/innodb_plugin/btr/btr0btr.c @@ -42,6 +42,560 @@ Created 6/2/1994 Heikki Tuuri #include "ibuf0ibuf.h" #include "trx0trx.h" +#ifdef UNIV_BLOB_DEBUG +# include "srv0srv.h" +# include "ut0rbt.h" + +/** TRUE when messages about index->blobs modification are enabled. */ +static ibool btr_blob_dbg_msg; + +/** Issue a message about an operation on index->blobs. +@param op operation +@param b the entry being subjected to the operation +@param ctx the context of the operation */ +#define btr_blob_dbg_msg_issue(op, b, ctx) \ + fprintf(stderr, op " %u:%u:%u->%u %s(%u,%u,%u)\n", \ + (b)->ref_page_no, (b)->ref_heap_no, \ + (b)->ref_field_no, (b)->blob_page_no, ctx, \ + (b)->owner, (b)->always_owner, (b)->del) + +/** Insert to index->blobs a reference to an off-page column. +@param index the index tree +@param b the reference +@param ctx context (for logging) */ +UNIV_INTERN +void +btr_blob_dbg_rbt_insert( +/*====================*/ + dict_index_t* index, /*!< in/out: index tree */ + const btr_blob_dbg_t* b, /*!< in: the reference */ + const char* ctx) /*!< in: context (for logging) */ +{ + if (btr_blob_dbg_msg) { + btr_blob_dbg_msg_issue("insert", b, ctx); + } + mutex_enter(&index->blobs_mutex); + rbt_insert(index->blobs, b, b); + mutex_exit(&index->blobs_mutex); +} + +/** Remove from index->blobs a reference to an off-page column. +@param index the index tree +@param b the reference +@param ctx context (for logging) */ +UNIV_INTERN +void +btr_blob_dbg_rbt_delete( +/*====================*/ + dict_index_t* index, /*!< in/out: index tree */ + const btr_blob_dbg_t* b, /*!< in: the reference */ + const char* ctx) /*!< in: context (for logging) */ +{ + if (btr_blob_dbg_msg) { + btr_blob_dbg_msg_issue("delete", b, ctx); + } + mutex_enter(&index->blobs_mutex); + ut_a(rbt_delete(index->blobs, b)); + mutex_exit(&index->blobs_mutex); +} + +/**************************************************************//** +Comparator for items (btr_blob_dbg_t) in index->blobs. +The key in index->blobs is (ref_page_no, ref_heap_no, ref_field_no). +@return negative, 0 or positive if *a<*b, *a=*b, *a>*b */ +static +int +btr_blob_dbg_cmp( +/*=============*/ + const void* a, /*!< in: first btr_blob_dbg_t to compare */ + const void* b) /*!< in: second btr_blob_dbg_t to compare */ +{ + const btr_blob_dbg_t* aa = a; + const btr_blob_dbg_t* bb = b; + + ut_ad(aa != NULL); + ut_ad(bb != NULL); + + if (aa->ref_page_no != bb->ref_page_no) { + return(aa->ref_page_no < bb->ref_page_no ? -1 : 1); + } + if (aa->ref_heap_no != bb->ref_heap_no) { + return(aa->ref_heap_no < bb->ref_heap_no ? -1 : 1); + } + if (aa->ref_field_no != bb->ref_field_no) { + return(aa->ref_field_no < bb->ref_field_no ? -1 : 1); + } + return(0); +} + +/**************************************************************//** +Add a reference to an off-page column to the index->blobs map. */ +UNIV_INTERN +void +btr_blob_dbg_add_blob( +/*==================*/ + const rec_t* rec, /*!< in: clustered index record */ + ulint field_no, /*!< in: off-page column number */ + ulint page_no, /*!< in: start page of the column */ + dict_index_t* index, /*!< in/out: index tree */ + const char* ctx) /*!< in: context (for logging) */ +{ + btr_blob_dbg_t b; + const page_t* page = page_align(rec); + + ut_a(index->blobs); + + b.blob_page_no = page_no; + b.ref_page_no = page_get_page_no(page); + b.ref_heap_no = page_rec_get_heap_no(rec); + b.ref_field_no = field_no; + ut_a(b.ref_field_no >= index->n_uniq); + b.always_owner = b.owner = TRUE; + b.del = FALSE; + ut_a(!rec_get_deleted_flag(rec, page_is_comp(page))); + btr_blob_dbg_rbt_insert(index, &b, ctx); +} + +/**************************************************************//** +Add to index->blobs any references to off-page columns from a record. +@return number of references added */ +UNIV_INTERN +ulint +btr_blob_dbg_add_rec( +/*=================*/ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in/out: index */ + const ulint* offsets,/*!< in: offsets */ + const char* ctx) /*!< in: context (for logging) */ +{ + ulint count = 0; + ulint i; + btr_blob_dbg_t b; + ibool del; + + ut_ad(rec_offs_validate(rec, index, offsets)); + + if (!rec_offs_any_extern(offsets)) { + return(0); + } + + b.ref_page_no = page_get_page_no(page_align(rec)); + b.ref_heap_no = page_rec_get_heap_no(rec); + del = (rec_get_deleted_flag(rec, rec_offs_comp(offsets)) != 0); + + for (i = 0; i < rec_offs_n_fields(offsets); i++) { + if (rec_offs_nth_extern(offsets, i)) { + ulint len; + const byte* field_ref = rec_get_nth_field( + rec, offsets, i, &len); + + ut_a(len != UNIV_SQL_NULL); + ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); + field_ref += len - BTR_EXTERN_FIELD_REF_SIZE; + + if (!memcmp(field_ref, field_ref_zero, + BTR_EXTERN_FIELD_REF_SIZE)) { + /* the column has not been stored yet */ + continue; + } + + b.ref_field_no = i; + b.blob_page_no = mach_read_from_4( + field_ref + BTR_EXTERN_PAGE_NO); + ut_a(b.ref_field_no >= index->n_uniq); + b.always_owner = b.owner + = !(field_ref[BTR_EXTERN_LEN] + & BTR_EXTERN_OWNER_FLAG); + b.del = del; + + btr_blob_dbg_rbt_insert(index, &b, ctx); + count++; + } + } + + return(count); +} + +/**************************************************************//** +Display the references to off-page columns. +This function is to be called from a debugger, +for example when a breakpoint on ut_dbg_assertion_failed is hit. */ +UNIV_INTERN +void +btr_blob_dbg_print( +/*===============*/ + const dict_index_t* index) /*!< in: index tree */ +{ + const ib_rbt_node_t* node; + + if (!index->blobs) { + return; + } + + /* We intentionally do not acquire index->blobs_mutex here. + This function is to be called from a debugger, and the caller + should make sure that the index->blobs_mutex is held. */ + + for (node = rbt_first(index->blobs); + node != NULL; node = rbt_next(index->blobs, node)) { + const btr_blob_dbg_t* b + = rbt_value(btr_blob_dbg_t, node); + fprintf(stderr, "%u:%u:%u->%u%s%s%s\n", + b->ref_page_no, b->ref_heap_no, b->ref_field_no, + b->blob_page_no, + b->owner ? "" : "(disowned)", + b->always_owner ? "" : "(has disowned)", + b->del ? "(deleted)" : ""); + } +} + +/**************************************************************//** +Remove from index->blobs any references to off-page columns from a record. +@return number of references removed */ +UNIV_INTERN +ulint +btr_blob_dbg_remove_rec( +/*====================*/ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in/out: index */ + const ulint* offsets,/*!< in: offsets */ + const char* ctx) /*!< in: context (for logging) */ +{ + ulint i; + ulint count = 0; + btr_blob_dbg_t b; + + ut_ad(rec_offs_validate(rec, index, offsets)); + + if (!rec_offs_any_extern(offsets)) { + return(0); + } + + b.ref_page_no = page_get_page_no(page_align(rec)); + b.ref_heap_no = page_rec_get_heap_no(rec); + + for (i = 0; i < rec_offs_n_fields(offsets); i++) { + if (rec_offs_nth_extern(offsets, i)) { + ulint len; + const byte* field_ref = rec_get_nth_field( + rec, offsets, i, &len); + + ut_a(len != UNIV_SQL_NULL); + ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); + field_ref += len - BTR_EXTERN_FIELD_REF_SIZE; + + b.ref_field_no = i; + b.blob_page_no = mach_read_from_4( + field_ref + BTR_EXTERN_PAGE_NO); + + switch (b.blob_page_no) { + case 0: + /* The column has not been stored yet. + The BLOB pointer must be all zero. + There cannot be a BLOB starting at + page 0, because page 0 is reserved for + the tablespace header. */ + ut_a(!memcmp(field_ref, field_ref_zero, + BTR_EXTERN_FIELD_REF_SIZE)); + /* fall through */ + case FIL_NULL: + /* the column has been freed already */ + continue; + } + + btr_blob_dbg_rbt_delete(index, &b, ctx); + count++; + } + } + + return(count); +} + +/**************************************************************//** +Check that there are no references to off-page columns from or to +the given page. Invoked when freeing or clearing a page. +@return TRUE when no orphan references exist */ +UNIV_INTERN +ibool +btr_blob_dbg_is_empty( +/*==================*/ + dict_index_t* index, /*!< in: index */ + ulint page_no) /*!< in: page number */ +{ + const ib_rbt_node_t* node; + ibool success = TRUE; + + if (!index->blobs) { + return(success); + } + + mutex_enter(&index->blobs_mutex); + + for (node = rbt_first(index->blobs); + node != NULL; node = rbt_next(index->blobs, node)) { + const btr_blob_dbg_t* b + = rbt_value(btr_blob_dbg_t, node); + + if (b->ref_page_no != page_no && b->blob_page_no != page_no) { + continue; + } + + fprintf(stderr, + "InnoDB: orphan BLOB ref%s%s%s %u:%u:%u->%u\n", + b->owner ? "" : "(disowned)", + b->always_owner ? "" : "(has disowned)", + b->del ? "(deleted)" : "", + b->ref_page_no, b->ref_heap_no, b->ref_field_no, + b->blob_page_no); + + if (b->blob_page_no != page_no || b->owner || !b->del) { + success = FALSE; + } + } + + mutex_exit(&index->blobs_mutex); + return(success); +} + +/**************************************************************//** +Count and process all references to off-page columns on a page. +@return number of references processed */ +UNIV_INTERN +ulint +btr_blob_dbg_op( +/*============*/ + const page_t* page, /*!< in: B-tree leaf page */ + const rec_t* rec, /*!< in: record to start from + (NULL to process the whole page) */ + dict_index_t* index, /*!< in/out: index */ + const char* ctx, /*!< in: context (for logging) */ + const btr_blob_dbg_op_f op) /*!< in: operation on records */ +{ + ulint count = 0; + mem_heap_t* heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + rec_offs_init(offsets_); + + ut_a(fil_page_get_type(page) == FIL_PAGE_INDEX); + ut_a(!rec || page_align(rec) == page); + + if (!index->blobs || !page_is_leaf(page) + || !dict_index_is_clust(index)) { + return(0); + } + + if (rec == NULL) { + rec = page_get_infimum_rec(page); + } + + do { + offsets = rec_get_offsets(rec, index, offsets, + ULINT_UNDEFINED, &heap); + count += op(rec, index, offsets, ctx); + rec = page_rec_get_next_const(rec); + } while (!page_rec_is_supremum(rec)); + + if (UNIV_LIKELY_NULL(heap)) { + mem_heap_free(heap); + } + + return(count); +} + +/**************************************************************//** +Count and add to index->blobs any references to off-page columns +from records on a page. +@return number of references added */ +UNIV_INTERN +ulint +btr_blob_dbg_add( +/*=============*/ + const page_t* page, /*!< in: rewritten page */ + dict_index_t* index, /*!< in/out: index */ + const char* ctx) /*!< in: context (for logging) */ +{ + btr_blob_dbg_assert_empty(index, page_get_page_no(page)); + + return(btr_blob_dbg_op(page, NULL, index, ctx, btr_blob_dbg_add_rec)); +} + +/**************************************************************//** +Count and remove from index->blobs any references to off-page columns +from records on a page. +Used when reorganizing a page, before copying the records. +@return number of references removed */ +UNIV_INTERN +ulint +btr_blob_dbg_remove( +/*================*/ + const page_t* page, /*!< in: b-tree page */ + dict_index_t* index, /*!< in/out: index */ + const char* ctx) /*!< in: context (for logging) */ +{ + ulint count; + + count = btr_blob_dbg_op(page, NULL, index, ctx, + btr_blob_dbg_remove_rec); + + /* Check that no references exist. */ + btr_blob_dbg_assert_empty(index, page_get_page_no(page)); + + return(count); +} + +/**************************************************************//** +Restore in index->blobs any references to off-page columns +Used when page reorganize fails due to compressed page overflow. */ +UNIV_INTERN +void +btr_blob_dbg_restore( +/*=================*/ + const page_t* npage, /*!< in: page that failed to compress */ + const page_t* page, /*!< in: copy of original page */ + dict_index_t* index, /*!< in/out: index */ + const char* ctx) /*!< in: context (for logging) */ +{ + ulint removed; + ulint added; + + ut_a(page_get_page_no(npage) == page_get_page_no(page)); + ut_a(page_get_space_id(npage) == page_get_space_id(page)); + + removed = btr_blob_dbg_remove(npage, index, ctx); + added = btr_blob_dbg_add(page, index, ctx); + ut_a(added == removed); +} + +/**************************************************************//** +Modify the 'deleted' flag of a record. */ +UNIV_INTERN +void +btr_blob_dbg_set_deleted_flag( +/*==========================*/ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in/out: index */ + const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ + ibool del) /*!< in: TRUE=deleted, FALSE=exists */ +{ + const ib_rbt_node_t* node; + btr_blob_dbg_t b; + btr_blob_dbg_t* c; + ulint i; + + ut_ad(rec_offs_validate(rec, index, offsets)); + ut_a(dict_index_is_clust(index)); + ut_a(del == !!del);/* must be FALSE==0 or TRUE==1 */ + + if (!rec_offs_any_extern(offsets) || !index->blobs) { + + return; + } + + b.ref_page_no = page_get_page_no(page_align(rec)); + b.ref_heap_no = page_rec_get_heap_no(rec); + + for (i = 0; i < rec_offs_n_fields(offsets); i++) { + if (rec_offs_nth_extern(offsets, i)) { + ulint len; + const byte* field_ref = rec_get_nth_field( + rec, offsets, i, &len); + + ut_a(len != UNIV_SQL_NULL); + ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); + field_ref += len - BTR_EXTERN_FIELD_REF_SIZE; + + b.ref_field_no = i; + b.blob_page_no = mach_read_from_4( + field_ref + BTR_EXTERN_PAGE_NO); + + switch (b.blob_page_no) { + case 0: + ut_a(memcmp(field_ref, field_ref_zero, + BTR_EXTERN_FIELD_REF_SIZE)); + /* page number 0 is for the + page allocation bitmap */ + case FIL_NULL: + /* the column has been freed already */ + ut_error; + } + + mutex_enter(&index->blobs_mutex); + node = rbt_lookup(index->blobs, &b); + ut_a(node); + + c = rbt_value(btr_blob_dbg_t, node); + /* The flag should be modified. */ + c->del = del; + if (btr_blob_dbg_msg) { + b = *c; + mutex_exit(&index->blobs_mutex); + btr_blob_dbg_msg_issue("del_mk", &b, ""); + } else { + mutex_exit(&index->blobs_mutex); + } + } + } +} + +/**************************************************************//** +Change the ownership of an off-page column. */ +UNIV_INTERN +void +btr_blob_dbg_owner( +/*===============*/ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in/out: index */ + const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ + ulint i, /*!< in: ith field in rec */ + ibool own) /*!< in: TRUE=owned, FALSE=disowned */ +{ + const ib_rbt_node_t* node; + btr_blob_dbg_t b; + const byte* field_ref; + ulint len; + + ut_ad(rec_offs_validate(rec, index, offsets)); + ut_a(rec_offs_nth_extern(offsets, i)); + + field_ref = rec_get_nth_field(rec, offsets, i, &len); + ut_a(len != UNIV_SQL_NULL); + ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); + field_ref += len - BTR_EXTERN_FIELD_REF_SIZE; + + b.ref_page_no = page_get_page_no(page_align(rec)); + b.ref_heap_no = page_rec_get_heap_no(rec); + b.ref_field_no = i; + b.owner = !(field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG); + b.blob_page_no = mach_read_from_4(field_ref + BTR_EXTERN_PAGE_NO); + + ut_a(b.owner == own); + + mutex_enter(&index->blobs_mutex); + node = rbt_lookup(index->blobs, &b); + /* row_ins_clust_index_entry_by_modify() invokes + btr_cur_unmark_extern_fields() also for the newly inserted + references, which are all zero bytes until the columns are stored. + The node lookup must fail if and only if that is the case. */ + ut_a(!memcmp(field_ref, field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE) + == !node); + + if (node) { + btr_blob_dbg_t* c = rbt_value(btr_blob_dbg_t, node); + /* Some code sets ownership from TRUE to TRUE. + We do not allow changing ownership from FALSE to FALSE. */ + ut_a(own || c->owner); + + c->owner = own; + if (!own) { + c->always_owner = FALSE; + } + } + + mutex_exit(&index->blobs_mutex); +} +#endif /* UNIV_BLOB_DEBUG */ + /* Latching strategy of the InnoDB B-tree -------------------------------------- @@ -296,6 +850,7 @@ btr_page_create( page_t* page = buf_block_get_frame(block); ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + btr_blob_dbg_assert_empty(index, buf_block_get_page_no(block)); if (UNIV_LIKELY_NULL(page_zip)) { page_create_zip(block, index, level, mtr); @@ -489,6 +1044,7 @@ btr_page_free_low( modify clock */ buf_block_modify_clock_inc(block); + btr_blob_dbg_assert_empty(index, buf_block_get_page_no(block)); if (dict_index_is_ibuf(index)) { @@ -773,6 +1329,13 @@ btr_create( block = buf_page_get(space, zip_size, page_no, RW_X_LATCH, mtr); } else { +#ifdef UNIV_BLOB_DEBUG + if ((type & DICT_CLUSTERED) && !index->blobs) { + mutex_create(&index->blobs_mutex, SYNC_ANY_LATCH); + index->blobs = rbt_create(sizeof(btr_blob_dbg_t), + btr_blob_dbg_cmp); + } +#endif /* UNIV_BLOB_DEBUG */ block = fseg_create(space, 0, PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); } @@ -996,6 +1559,7 @@ btr_page_reorganize_low( block->check_index_page_at_flush = TRUE; #endif /* !UNIV_HOTBACKUP */ + btr_blob_dbg_remove(page, index, "btr_page_reorganize"); /* Recreate the page: note that global data on page (possible segment headers, next page-field, etc.) is preserved intact */ @@ -1024,6 +1588,8 @@ btr_page_reorganize_low( (!page_zip_compress(page_zip, page, index, NULL))) { /* Restore the old page and exit. */ + btr_blob_dbg_restore(page, temp_page, index, + "btr_page_reorganize_compress_fail"); #if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG /* Check that the bytes that we skip are identical. */ @@ -1157,6 +1723,7 @@ btr_page_empty( #endif /* UNIV_ZIP_DEBUG */ btr_search_drop_page_hash_index(block); + btr_blob_dbg_remove(page, index, "btr_page_empty"); /* Recreate the page: note that global data on page (possible segment headers, next page-field, etc.) is preserved intact */ @@ -2497,6 +3064,7 @@ btr_lift_page_up( index); } + btr_blob_dbg_remove(page, index, "btr_lift_page_up"); lock_update_copy_and_discard(father_block, block); /* Go upward to root page, decrementing levels by one. */ @@ -2758,6 +3326,7 @@ err_exit: lock_update_merge_right(merge_block, orig_succ, block); } + btr_blob_dbg_remove(page, index, "btr_compress"); mem_heap_free(heap); if (!dict_index_is_clust(index) && page_is_leaf(merge_page)) { @@ -2988,6 +3557,8 @@ btr_discard_page( block); } + btr_blob_dbg_remove(page, index, "btr_discard_page"); + /* Free the file page */ btr_page_free(index, block, mtr); diff --git a/storage/innodb_plugin/btr/btr0cur.c b/storage/innodb_plugin/btr/btr0cur.c index 704cc606a5f..86d77c79e7b 100644 --- a/storage/innodb_plugin/btr/btr0cur.c +++ b/storage/innodb_plugin/btr/btr0cur.c @@ -2572,6 +2572,7 @@ btr_cur_del_mark_set_clust_rec( page_zip = buf_block_get_page_zip(block); + btr_blob_dbg_set_deleted_flag(rec, index, offsets, val); btr_rec_set_deleted_flag(rec, page_zip, val); trx = thr_get_trx(thr); @@ -3595,6 +3596,8 @@ btr_cur_set_ownership_of_extern_field( } else { mach_write_to_1(data + local_len + BTR_EXTERN_LEN, byte_val); } + + btr_blob_dbg_owner(rec, index, offsets, i, val); } /*******************************************************************//** @@ -4094,6 +4097,11 @@ btr_store_big_rec_extern_fields_func( } if (prev_page_no == FIL_NULL) { + btr_blob_dbg_add_blob( + rec, big_rec_vec->fields[i] + .field_no, page_no, index, + "store"); + mach_write_to_4(field_ref + BTR_EXTERN_SPACE_ID, space_id); @@ -4169,6 +4177,11 @@ next_zip_page: MLOG_4BYTES, &mtr); if (prev_page_no == FIL_NULL) { + btr_blob_dbg_add_blob( + rec, big_rec_vec->fields[i] + .field_no, page_no, index, + "store"); + mlog_write_ulint(field_ref + BTR_EXTERN_SPACE_ID, space_id, @@ -4337,6 +4350,37 @@ btr_free_externally_stored_field( rec_zip_size = 0; } +#ifdef UNIV_BLOB_DEBUG + if (!(field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG) + && !((field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_INHERITED_FLAG) + && (rb_ctx == RB_NORMAL || rb_ctx == RB_RECOVERY))) { + /* This off-page column will be freed. + Check that no references remain. */ + + btr_blob_dbg_t b; + + b.blob_page_no = mach_read_from_4( + field_ref + BTR_EXTERN_PAGE_NO); + + if (rec) { + /* Remove the reference from the record to the + BLOB. If the BLOB were not freed, the + reference would be removed when the record is + removed. Freeing the BLOB will overwrite the + BTR_EXTERN_PAGE_NO in the field_ref of the + record with FIL_NULL, which would make the + btr_blob_dbg information inconsistent with the + record. */ + b.ref_page_no = page_get_page_no(page_align(rec)); + b.ref_heap_no = page_rec_get_heap_no(rec); + b.ref_field_no = i; + btr_blob_dbg_rbt_delete(index, &b, "free"); + } + + btr_blob_dbg_assert_empty(index, b.blob_page_no); + } +#endif /* UNIV_BLOB_DEBUG */ + for (;;) { #ifdef UNIV_SYNC_DEBUG buf_block_t* rec_block; diff --git a/storage/innodb_plugin/dict/dict0mem.c b/storage/innodb_plugin/dict/dict0mem.c index 3287247029f..aef815dd2f6 100644 --- a/storage/innodb_plugin/dict/dict0mem.c +++ b/storage/innodb_plugin/dict/dict0mem.c @@ -36,6 +36,9 @@ Created 1/8/1996 Heikki Tuuri #ifndef UNIV_HOTBACKUP # include "lock0lock.h" #endif /* !UNIV_HOTBACKUP */ +#ifdef UNIV_BLOB_DEBUG +# include "ut0rbt.h" +#endif /* UNIV_BLOB_DEBUG */ #define DICT_HEAP_SIZE 100 /*!< initial memory heap size when creating a table or index object */ @@ -316,6 +319,12 @@ dict_mem_index_free( { ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); +#ifdef UNIV_BLOB_DEBUG + if (index->blobs) { + mutex_free(&index->blobs_mutex); + rbt_free(index->blobs); + } +#endif /* UNIV_BLOB_DEBUG */ mem_heap_free(index->heap); } diff --git a/storage/innodb_plugin/include/btr0btr.h b/storage/innodb_plugin/include/btr0btr.h index dde3a0bab69..5aa02694e0e 100644 --- a/storage/innodb_plugin/include/btr0btr.h +++ b/storage/innodb_plugin/include/btr0btr.h @@ -81,6 +81,91 @@ UNIQUE definition on secondary indexes when we decide if we can use the insert buffer to speed up inserts */ #define BTR_IGNORE_SEC_UNIQUE 2048 +#ifdef UNIV_BLOB_DEBUG +# include "ut0rbt.h" +/** An index->blobs entry for keeping track of off-page column references */ +struct btr_blob_dbg_struct +{ + unsigned blob_page_no:32; /*!< first BLOB page number */ + unsigned ref_page_no:32; /*!< referring page number */ + unsigned ref_heap_no:16; /*!< referring heap number */ + unsigned ref_field_no:10; /*!< referring field number */ + unsigned owner:1; /*!< TRUE if BLOB owner */ + unsigned always_owner:1; /*!< TRUE if always + has been the BLOB owner; + reset to TRUE on B-tree + page splits and merges */ + unsigned del:1; /*!< TRUE if currently + delete-marked */ +}; + +/**************************************************************//** +Add a reference to an off-page column to the index->blobs map. */ +UNIV_INTERN +void +btr_blob_dbg_add_blob( +/*==================*/ + const rec_t* rec, /*!< in: clustered index record */ + ulint field_no, /*!< in: number of off-page column */ + ulint page_no, /*!< in: start page of the column */ + dict_index_t* index, /*!< in/out: index tree */ + const char* ctx) /*!< in: context (for logging) */ + __attribute__((nonnull)); +/**************************************************************//** +Display the references to off-page columns. +This function is to be called from a debugger, +for example when a breakpoint on ut_dbg_assertion_failed is hit. */ +UNIV_INTERN +void +btr_blob_dbg_print( +/*===============*/ + const dict_index_t* index) /*!< in: index tree */ + __attribute__((nonnull)); +/**************************************************************//** +Check that there are no references to off-page columns from or to +the given page. Invoked when freeing or clearing a page. +@return TRUE when no orphan references exist */ +UNIV_INTERN +ibool +btr_blob_dbg_is_empty( +/*==================*/ + dict_index_t* index, /*!< in: index */ + ulint page_no) /*!< in: page number */ + __attribute__((nonnull, warn_unused_result)); + +/**************************************************************//** +Modify the 'deleted' flag of a record. */ +UNIV_INTERN +void +btr_blob_dbg_set_deleted_flag( +/*==========================*/ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in/out: index */ + const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ + ibool del) /*!< in: TRUE=deleted, FALSE=exists */ + __attribute__((nonnull)); +/**************************************************************//** +Change the ownership of an off-page column. */ +UNIV_INTERN +void +btr_blob_dbg_owner( +/*===============*/ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in/out: index */ + const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ + ulint i, /*!< in: ith field in rec */ + ibool own) /*!< in: TRUE=owned, FALSE=disowned */ + __attribute__((nonnull)); +/** Assert that there are no BLOB references to or from the given page. */ +# define btr_blob_dbg_assert_empty(index, page_no) \ + ut_a(btr_blob_dbg_is_empty(index, page_no)) +#else /* UNIV_BLOB_DEBUG */ +# define btr_blob_dbg_add_blob(rec, field_no, page, index, ctx) ((void) 0) +# define btr_blob_dbg_set_deleted_flag(rec, index, offsets, del)((void) 0) +# define btr_blob_dbg_owner(rec, index, offsets, i, val) ((void) 0) +# define btr_blob_dbg_assert_empty(index, page_no) ((void) 0) +#endif /* UNIV_BLOB_DEBUG */ + /**************************************************************//** Gets the root node of a tree and x-latches it. @return root page, x-latched */ diff --git a/storage/innodb_plugin/include/btr0types.h b/storage/innodb_plugin/include/btr0types.h index ef4a6b04b34..07c06fb18d7 100644 --- a/storage/innodb_plugin/include/btr0types.h +++ b/storage/innodb_plugin/include/btr0types.h @@ -38,6 +38,131 @@ typedef struct btr_cur_struct btr_cur_t; /** B-tree search information for the adaptive hash index */ typedef struct btr_search_struct btr_search_t; +#ifdef UNIV_BLOB_DEBUG +# include "buf0types.h" +/** An index->blobs entry for keeping track of off-page column references */ +typedef struct btr_blob_dbg_struct btr_blob_dbg_t; + +/** Insert to index->blobs a reference to an off-page column. +@param index the index tree +@param b the reference +@param ctx context (for logging) */ +UNIV_INTERN +void +btr_blob_dbg_rbt_insert( +/*====================*/ + dict_index_t* index, /*!< in/out: index tree */ + const btr_blob_dbg_t* b, /*!< in: the reference */ + const char* ctx) /*!< in: context (for logging) */ + __attribute__((nonnull)); + +/** Remove from index->blobs a reference to an off-page column. +@param index the index tree +@param b the reference +@param ctx context (for logging) */ +UNIV_INTERN +void +btr_blob_dbg_rbt_delete( +/*====================*/ + dict_index_t* index, /*!< in/out: index tree */ + const btr_blob_dbg_t* b, /*!< in: the reference */ + const char* ctx) /*!< in: context (for logging) */ + __attribute__((nonnull)); + +/**************************************************************//** +Add to index->blobs any references to off-page columns from a record. +@return number of references added */ +UNIV_INTERN +ulint +btr_blob_dbg_add_rec( +/*=================*/ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in/out: index */ + const ulint* offsets,/*!< in: offsets */ + const char* ctx) /*!< in: context (for logging) */ + __attribute__((nonnull)); +/**************************************************************//** +Remove from index->blobs any references to off-page columns from a record. +@return number of references removed */ +UNIV_INTERN +ulint +btr_blob_dbg_remove_rec( +/*====================*/ + const rec_t* rec, /*!< in: record */ + dict_index_t* index, /*!< in/out: index */ + const ulint* offsets,/*!< in: offsets */ + const char* ctx) /*!< in: context (for logging) */ + __attribute__((nonnull)); +/**************************************************************//** +Count and add to index->blobs any references to off-page columns +from records on a page. +@return number of references added */ +UNIV_INTERN +ulint +btr_blob_dbg_add( +/*=============*/ + const page_t* page, /*!< in: rewritten page */ + dict_index_t* index, /*!< in/out: index */ + const char* ctx) /*!< in: context (for logging) */ + __attribute__((nonnull)); +/**************************************************************//** +Count and remove from index->blobs any references to off-page columns +from records on a page. +Used when reorganizing a page, before copying the records. +@return number of references removed */ +UNIV_INTERN +ulint +btr_blob_dbg_remove( +/*================*/ + const page_t* page, /*!< in: b-tree page */ + dict_index_t* index, /*!< in/out: index */ + const char* ctx) /*!< in: context (for logging) */ + __attribute__((nonnull)); +/**************************************************************//** +Restore in index->blobs any references to off-page columns +Used when page reorganize fails due to compressed page overflow. */ +UNIV_INTERN +void +btr_blob_dbg_restore( +/*=================*/ + const page_t* npage, /*!< in: page that failed to compress */ + const page_t* page, /*!< in: copy of original page */ + dict_index_t* index, /*!< in/out: index */ + const char* ctx) /*!< in: context (for logging) */ + __attribute__((nonnull)); + +/** Operation that processes the BLOB references of an index record +@param[in] rec record on index page +@param[in/out] index the index tree of the record +@param[in] offsets rec_get_offsets(rec,index) +@param[in] ctx context (for logging) +@return number of BLOB references processed */ +typedef ulint (*btr_blob_dbg_op_f) +(const rec_t* rec,dict_index_t* index,const ulint* offsets,const char* ctx); + +/**************************************************************//** +Count and process all references to off-page columns on a page. +@return number of references processed */ +UNIV_INTERN +ulint +btr_blob_dbg_op( +/*============*/ + const page_t* page, /*!< in: B-tree leaf page */ + const rec_t* rec, /*!< in: record to start from + (NULL to process the whole page) */ + dict_index_t* index, /*!< in/out: index */ + const char* ctx, /*!< in: context (for logging) */ + const btr_blob_dbg_op_f op) /*!< in: operation on records */ + __attribute__((nonnull(1,3,4,5))); +#else /* UNIV_BLOB_DEBUG */ +# define btr_blob_dbg_add_rec(rec, index, offsets, ctx) ((void) 0) +# define btr_blob_dbg_add(page, index, ctx) ((void) 0) +# define btr_blob_dbg_remove_rec(rec, index, offsets, ctx) ((void) 0) +# define btr_blob_dbg_remove(page, index, ctx) ((void) 0) +# define btr_blob_dbg_restore(npage, page, index, ctx) ((void) 0) +# define btr_blob_dbg_op(page, rec, index, ctx, op) ((void) 0) +#endif /* UNIV_BLOB_DEBUG */ + /** The size of a reference to data stored on a different page. The reference is stored at the end of the prefix of the field in the index record. */ diff --git a/storage/innodb_plugin/include/dict0mem.h b/storage/innodb_plugin/include/dict0mem.h index 09a068ccb93..bd32a239cfd 100644 --- a/storage/innodb_plugin/include/dict0mem.h +++ b/storage/innodb_plugin/include/dict0mem.h @@ -340,6 +340,13 @@ struct dict_index_struct{ index, or 0 if the index existed when InnoDB was started up */ #endif /* !UNIV_HOTBACKUP */ +#ifdef UNIV_BLOB_DEBUG + mutex_t blobs_mutex; + /*!< mutex protecting blobs */ + void* blobs; /*!< map of (page_no,heap_no,field_no) + to first_blob_page_no; protected by + blobs_mutex; @see btr_blob_dbg_t */ +#endif /* UNIV_BLOB_DEBUG */ #ifdef UNIV_DEBUG ulint magic_n;/*!< magic number */ /** Value of dict_index_struct::magic_n */ diff --git a/storage/innodb_plugin/include/page0zip.h b/storage/innodb_plugin/include/page0zip.h index 574809e5227..00c1d0516e6 100644 --- a/storage/innodb_plugin/include/page0zip.h +++ b/storage/innodb_plugin/include/page0zip.h @@ -420,7 +420,7 @@ page_zip_copy_recs( const page_t* src, /*!< in: page */ dict_index_t* index, /*!< in: index of the B-tree */ mtr_t* mtr) /*!< in: mini-transaction */ - __attribute__((nonnull(1,2,3,4))); + __attribute__((nonnull)); #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** diff --git a/storage/innodb_plugin/include/univ.i b/storage/innodb_plugin/include/univ.i index 690bfd5d6a9..6dd38df3782 100644 --- a/storage/innodb_plugin/include/univ.i +++ b/storage/innodb_plugin/include/univ.i @@ -194,6 +194,8 @@ this will break redo log file compatibility, but it may be useful when debugging redo log application problems. */ #define UNIV_MEM_DEBUG /* detect memory leaks etc */ #define UNIV_IBUF_DEBUG /* debug the insert buffer */ +#define UNIV_BLOB_DEBUG /* track BLOB ownership; +assumes that no BLOBs survive server restart */ #define UNIV_IBUF_COUNT_DEBUG /* debug the insert buffer; this limits the database to IBUF_COUNT_N_SPACES and IBUF_COUNT_N_PAGES, and the insert buffer must be empty when the database is started */ diff --git a/storage/innodb_plugin/page/page0cur.c b/storage/innodb_plugin/page/page0cur.c index f10f16a7dd9..936762b986a 100644 --- a/storage/innodb_plugin/page/page0cur.c +++ b/storage/innodb_plugin/page/page0cur.c @@ -1149,6 +1149,8 @@ use_heap: current_rec, index, mtr); } + btr_blob_dbg_add_rec(insert_rec, index, offsets, "insert"); + return(insert_rec); } @@ -1195,10 +1197,12 @@ page_cur_insert_rec_zip_reorg( } /* Out of space: restore the page */ + btr_blob_dbg_remove(page, index, "insert_zip_fail"); if (!page_zip_decompress(page_zip, page, FALSE)) { ut_error; /* Memory corrupted? */ } ut_ad(page_validate(page, index)); + btr_blob_dbg_add(page, index, "insert_zip_fail"); return(NULL); } @@ -1490,6 +1494,8 @@ use_heap: page_zip_write_rec(page_zip, insert_rec, index, offsets, 1); + btr_blob_dbg_add_rec(insert_rec, index, offsets, "insert_zip_ok"); + /* 9. Write log record of the insert */ if (UNIV_LIKELY(mtr != NULL)) { page_cur_insert_rec_write_log(insert_rec, rec_size, @@ -1697,6 +1703,9 @@ page_copy_rec_list_end_to_created_page( heap_top += rec_size; + rec_offs_make_valid(insert_rec, index, offsets); + btr_blob_dbg_add_rec(insert_rec, index, offsets, "copy_end"); + page_cur_insert_rec_write_log(insert_rec, rec_size, prev_rec, index, mtr); prev_rec = insert_rec; @@ -1944,6 +1953,7 @@ page_cur_delete_rec( page_dir_slot_set_n_owned(cur_dir_slot, page_zip, cur_n_owned - 1); /* 6. Free the memory occupied by the record */ + btr_blob_dbg_remove_rec(current_rec, index, offsets, "delete"); page_mem_free(page, page_zip, current_rec, index, offsets); /* 7. Now we have decremented the number of owned records of the slot. diff --git a/storage/innodb_plugin/page/page0page.c b/storage/innodb_plugin/page/page0page.c index 10008f9ac25..6cae03e8829 100644 --- a/storage/innodb_plugin/page/page0page.c +++ b/storage/innodb_plugin/page/page0page.c @@ -685,12 +685,16 @@ page_copy_rec_list_end( if (UNIV_UNLIKELY (!page_zip_reorganize(new_block, index, mtr))) { + btr_blob_dbg_remove(new_page, index, + "copy_end_reorg_fail"); if (UNIV_UNLIKELY (!page_zip_decompress(new_page_zip, new_page, FALSE))) { ut_error; } ut_ad(page_validate(new_page, index)); + btr_blob_dbg_add(new_page, index, + "copy_end_reorg_fail"); return(NULL); } else { /* The page was reorganized: @@ -803,12 +807,16 @@ page_copy_rec_list_start( if (UNIV_UNLIKELY (!page_zip_reorganize(new_block, index, mtr))) { + btr_blob_dbg_remove(new_page, index, + "copy_start_reorg_fail"); if (UNIV_UNLIKELY (!page_zip_decompress(new_page_zip, new_page, FALSE))) { ut_error; } ut_ad(page_validate(new_page, index)); + btr_blob_dbg_add(new_page, index, + "copy_start_reorg_fail"); return(NULL); } else { /* The page was reorganized: @@ -1080,6 +1088,9 @@ page_delete_rec_list_end( /* Remove the record chain segment from the record chain */ page_rec_set_next(prev_rec, page_get_supremum_rec(page)); + btr_blob_dbg_op(page, rec, index, "delete_end", + btr_blob_dbg_remove_rec); + /* Catenate the deleted chain segment to the page free list */ page_rec_set_next(last_rec, page_header_get_ptr(page, PAGE_FREE)); diff --git a/storage/innodb_plugin/page/page0zip.c b/storage/innodb_plugin/page/page0zip.c index bb9b0995c72..a1dd4177ba8 100644 --- a/storage/innodb_plugin/page/page0zip.c +++ b/storage/innodb_plugin/page/page0zip.c @@ -4451,6 +4451,8 @@ page_zip_reorganize( /* Copy the old page to temporary space */ buf_frame_copy(temp_page, page); + btr_blob_dbg_remove(page, index, "zip_reorg"); + /* Recreate the page: note that global data on page (possible segment headers, next page-field, etc.) is preserved intact */ @@ -4509,7 +4511,7 @@ page_zip_copy_recs( mtr_t* mtr) /*!< in: mini-transaction */ { ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, (page_t*) src, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, src, MTR_MEMO_PAGE_X_FIX)); ut_ad(!dict_index_is_ibuf(index)); #ifdef UNIV_ZIP_DEBUG /* The B-tree operations that call this function may set @@ -4579,6 +4581,7 @@ page_zip_copy_recs( #ifdef UNIV_ZIP_DEBUG ut_a(page_zip_validate(page_zip, page)); #endif /* UNIV_ZIP_DEBUG */ + btr_blob_dbg_add(page, index, "page_zip_copy_recs"); page_zip_compress_write_log(page_zip, page, index, mtr); } diff --git a/storage/innodb_plugin/row/row0upd.c b/storage/innodb_plugin/row/row0upd.c index 9ded3d68018..3a6de4b94a7 100644 --- a/storage/innodb_plugin/row/row0upd.c +++ b/storage/innodb_plugin/row/row0upd.c @@ -498,14 +498,49 @@ row_upd_rec_in_place( n_fields = upd_get_n_fields(update); for (i = 0; i < n_fields; i++) { +#ifdef UNIV_BLOB_DEBUG + btr_blob_dbg_t b; + const byte* field_ref = NULL; +#endif /* UNIV_BLOB_DEBUG */ + upd_field = upd_get_nth_field(update, i); new_val = &(upd_field->new_val); ut_ad(!dfield_is_ext(new_val) == !rec_offs_nth_extern(offsets, upd_field->field_no)); +#ifdef UNIV_BLOB_DEBUG + if (dfield_is_ext(new_val)) { + ulint len; + field_ref = rec_get_nth_field(rec, offsets, i, &len); + ut_a(len != UNIV_SQL_NULL); + ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); + field_ref += len - BTR_EXTERN_FIELD_REF_SIZE; + + b.ref_page_no = page_get_page_no(page_align(rec)); + b.ref_heap_no = page_rec_get_heap_no(rec); + b.ref_field_no = i; + b.blob_page_no = mach_read_from_4( + field_ref + BTR_EXTERN_PAGE_NO); + ut_a(b.ref_field_no >= index->n_uniq); + btr_blob_dbg_rbt_delete(index, &b, "upd_in_place"); + } +#endif /* UNIV_BLOB_DEBUG */ rec_set_nth_field(rec, offsets, upd_field->field_no, dfield_get_data(new_val), dfield_get_len(new_val)); + +#ifdef UNIV_BLOB_DEBUG + if (dfield_is_ext(new_val)) { + b.blob_page_no = mach_read_from_4( + field_ref + BTR_EXTERN_PAGE_NO); + b.always_owner = b.owner = !(field_ref[BTR_EXTERN_LEN] + & BTR_EXTERN_OWNER_FLAG); + b.del = rec_get_deleted_flag( + rec, rec_offs_comp(offsets)); + + btr_blob_dbg_rbt_insert(index, &b, "upd_in_place"); + } +#endif /* UNIV_BLOB_DEBUG */ } if (UNIV_LIKELY_NULL(page_zip)) { diff --git a/storage/innodb_plugin/srv/srv0start.c b/storage/innodb_plugin/srv/srv0start.c index 73f8f319704..f8b5049ca65 100644 --- a/storage/innodb_plugin/srv/srv0start.c +++ b/storage/innodb_plugin/srv/srv0start.c @@ -1061,6 +1061,12 @@ innobase_start_or_create_for_mysql(void) ); #endif +#ifdef UNIV_BLOB_DEBUG + fprintf(stderr, + "InnoDB: !!!!!!!! UNIV_BLOB_DEBUG switched on !!!!!!!!!\n" + "InnoDB: Server restart may fail with UNIV_BLOB_DEBUG\n"); +#endif /* UNIV_BLOB_DEBUG */ + #ifdef UNIV_SYNC_DEBUG fprintf(stderr, "InnoDB: !!!!!!!! UNIV_SYNC_DEBUG switched on !!!!!!!!!\n"); -- cgit v1.2.1 From 0e28a072e362d2185d8a55ba4feda7df0d06ae8b Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Tue, 8 Feb 2011 17:36:25 +0200 Subject: Bug #59815: Missing License information with enterprise GPL packages on behalf of Kent: Include the README into the binary packages --- scripts/make_win_bin_dist | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/make_win_bin_dist b/scripts/make_win_bin_dist index c1d01a0342d..7859f42ca29 100755 --- a/scripts/make_win_bin_dist +++ b/scripts/make_win_bin_dist @@ -198,6 +198,7 @@ cp Docs/INSTALL-BINARY $DESTDIR/Docs/ cp Docs/manual.chm $DESTDIR/Docs/ || /bin/true cp ChangeLog $DESTDIR/Docs/ || /bin/true cp support-files/my-*.ini $DESTDIR/ +cp README $DESTDIR/ if [ -f COPYING ] ; then cp COPYING $DESTDIR/ -- cgit v1.2.1 From f5dab8c83b562882ec345a6cb97a7e3649fa0d80 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 9 Feb 2011 19:02:38 +0100 Subject: Set version number for mysql-5.1.52sp1 release --- configure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.in b/configure.in index c1672557bd1..28ec04a4c42 100644 --- a/configure.in +++ b/configure.in @@ -12,7 +12,7 @@ dnl dnl When changing the major version number please also check the switch dnl statement in mysqlbinlog::check_master_version(). You may also need dnl to update version.c in ndb. -AC_INIT([MySQL Server], [5.1.52], [], [mysql]) +AC_INIT([MySQL Server], [5.1.52sp1], [], [mysql]) AC_CONFIG_SRCDIR([sql/mysqld.cc]) AC_CANONICAL_SYSTEM -- cgit v1.2.1 From d4549347efc8daf659f11c78bd9d945d9e0079b8 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:03:17 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 1810.3987.43 > revision-id: alexander.nozdrin@oracle.com-20101122132749-tl6m39zsfgvzi2mf > parent: alexander.nozdrin@oracle.com-20101122132550-hcu608iuq5un9k0q > committer: Alexander Nozdrin > branch nick: mysql-5.0-bugteam > timestamp: Mon 2010-11-22 16:27:49 +0300 > message: > Patch for Bug#58340 (Remove Server GPL EXCEPTIONS-CLIENT file). --- EXCEPTIONS-CLIENT | 119 ------------------------------------------------------ 1 file changed, 119 deletions(-) delete mode 100644 EXCEPTIONS-CLIENT diff --git a/EXCEPTIONS-CLIENT b/EXCEPTIONS-CLIENT deleted file mode 100644 index c570ff7ba24..00000000000 --- a/EXCEPTIONS-CLIENT +++ /dev/null @@ -1,119 +0,0 @@ -MySQL FLOSS License Exception - -The MySQL AB Exception for Free/Libre and Open Source -Software-only Applications Using MySQL Client Libraries (the -"FLOSS Exception"). - -Version 0.6, 7 March 2007 - -Exception Intent - -We want specified Free/Libre and Open Source Software (``FLOSS'') -applications to be able to use specified GPL-licensed MySQL client -libraries (the ``Program'') despite the fact that not all FLOSS -licenses are compatible with version 2 of the GNU General Public -License (the ``GPL''). - -Legal Terms and Conditions - -As a special exception to the terms and conditions of version 2.0 -of the GPL: - - 1. You are free to distribute a Derivative Work that is formed - entirely from the Program and one or more works (each, a - "FLOSS Work") licensed under one or more of the licenses - listed below in section 1, as long as: - a. You obey the GPL in all respects for the Program and the - Derivative Work, except for identifiable sections of the - Derivative Work which are not derived from the Program, - and which can reasonably be considered independent and - separate works in themselves, - b. all identifiable sections of the Derivative Work which - are not derived from the Program, and which can - reasonably be considered independent and separate works - in themselves, - i. are distributed subject to one of the FLOSS licenses - listed below, and - ii. the object code or executable form of those sections - are accompanied by the complete corresponding - machine-readable source code for those sections on - the same medium and under the same FLOSS license as - the corresponding object code or executable forms of - those sections, and - c. any works which are aggregated with the Program or with a - Derivative Work on a volume of a storage or distribution - medium in accordance with the GPL, can reasonably be - considered independent and separate works in themselves - which are not derivatives of either the Program, a - Derivative Work or a FLOSS Work. - If the above conditions are not met, then the Program may only - be copied, modified, distributed or used under the terms and - conditions of the GPL or another valid licensing option from - MySQL AB. - - 2. FLOSS License List - -License name Version(s)/Copyright Date -Academic Free License 2.0 -Apache Software License 1.0/1.1/2.0 -Apple Public Source License 2.0 -Artistic license From Perl 5.8.0 -BSD license "July 22 1999" -Common Development and Distribution License (CDDL) 1.0 -Common Public License 1.0 -Eclipse Public License 1.0 -GNU Library or "Lesser" General Public License (LGPL) 2.0/2.1 -Jabber Open Source License 1.0 -MIT license (As listed in file MIT-License.txt) --- -Mozilla Public License (MPL) 1.0/1.1 -Open Software License 2.0 -OpenSSL license (with original SSLeay license) "2003" ("1998") -PHP License 3.0 -Python license (CNRI Python License) --- -Python Software Foundation License 2.1.1 -Sleepycat License "1999" -University of Illinois/NCSA Open Source License --- -W3C License "2001" -X11 License "2001" -Zlib/libpng License --- -Zope Public License 2.0 - - Due to the many variants of some of the above licenses, we - require that any version follow the 2003 version of the Free - Software Foundation's Free Software Definition - (http://www.gnu.org/philosophy/free-sw.html) or version 1.9 of - the Open Source Definition by the Open Source Initiative - (http://www.opensource.org/docs/definition.php). - - 3. Definitions - - a. Terms used, but not defined, herein shall have the - meaning provided in the GPL. - b. Derivative Work means a derivative work under copyright - law. - - 4. Applicability: This FLOSS Exception applies to all Programs - that contain a notice placed by MySQL AB saying that the - Program may be distributed under the terms of this FLOSS - Exception. If you create or distribute a work which is a - Derivative Work of both the Program and any other work - licensed under the GPL, then this FLOSS Exception is not - available for that work; thus, you must remove the FLOSS - Exception notice from that work and comply with the GPL in all - respects, including by retaining all GPL notices. You may - choose to redistribute a copy of the Program exclusively under - the terms of the GPL by removing the FLOSS Exception notice - from that copy of the Program, provided that the copy has - never been modified by you or any third party. - -Appendix A. Qualified Libraries and Packages - -The following is a non-exhaustive list of libraries and packages -which are covered by the FLOSS License Exception. Please note that -this appendix is provided merely as an additional service to -specific FLOSS projects wishing to simplify licensing information -for their users. Compliance with one of the licenses noted under -the "FLOSS license list" section remains a prerequisite. - -Package Name Qualifying License and Version -Apache Portable Runtime (APR) Apache Software License 2.0 -- cgit v1.2.1 From 34812d0f099732d24d717e9a8a6ef13c4fe89abc Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:08:15 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 1810.3987.44 > revision-id: alexander.nozdrin@oracle.com-20101124095339-oatpqq684jpbbso2 > parent: alexander.nozdrin@oracle.com-20101122132749-tl6m39zsfgvzi2mf > committer: Alexander Nozdrin > branch nick: mysql-5.0-bugteam > timestamp: Wed 2010-11-24 12:53:39 +0300 > message: > A follow-up for Bug#58340 (Remove Server GPL EXCEPTIONS-CLIENT file) -- remove all > EXCEPTIONS-CLIENT from all the places. --- Makefile.am | 2 +- libmysql/Makefile.am | 3 +-- libmysql/Makefile.shared | 3 +-- libmysql/conf_to_src.c | 3 +-- libmysql/dll.c | 3 +-- libmysql/errmsg.c | 3 +-- libmysql/get_password.c | 3 +-- libmysql/libmysql.c | 3 +-- libmysql/manager.c | 3 +-- libmysql_r/Makefile.am | 3 +-- scripts/make_binary_distribution.sh | 2 +- scripts/make_win_bin_dist | 4 ++-- 12 files changed, 13 insertions(+), 22 deletions(-) diff --git a/Makefile.am b/Makefile.am index 4ce753ad8aa..eb44ca7bfd8 100644 --- a/Makefile.am +++ b/Makefile.am @@ -19,7 +19,7 @@ AUTOMAKE_OPTIONS = foreign # These are built from source in the Docs directory EXTRA_DIST = INSTALL-SOURCE INSTALL-WIN-SOURCE \ - README COPYING EXCEPTIONS-CLIENT CMakeLists.txt + README COPYING CMakeLists.txt SUBDIRS = . include @docs_dirs@ @zlib_dir@ \ @readline_topdir@ sql-common scripts \ diff --git a/libmysql/Makefile.am b/libmysql/Makefile.am index f67abfd8ac6..cb6b70b84d2 100644 --- a/libmysql/Makefile.am +++ b/libmysql/Makefile.am @@ -5,8 +5,7 @@ # published by the Free Software Foundation. # # There are special exceptions to the terms and conditions of the GPL as it -# is applied to this software. View the full text of the exception in file -# EXCEPTIONS-CLIENT in the directory of this software distribution. +# is applied to this software. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/libmysql/Makefile.shared b/libmysql/Makefile.shared index 39c1975888a..a27949eb7ca 100644 --- a/libmysql/Makefile.shared +++ b/libmysql/Makefile.shared @@ -5,8 +5,7 @@ # published by the Free Software Foundation. # # There are special exceptions to the terms and conditions of the GPL as it -# is applied to this software. View the full text of the exception in file -# EXCEPTIONS-CLIENT in the directory of this software distribution. +# is applied to this software. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/libmysql/conf_to_src.c b/libmysql/conf_to_src.c index 785e3cad4c1..f39a2e1856f 100644 --- a/libmysql/conf_to_src.c +++ b/libmysql/conf_to_src.c @@ -5,8 +5,7 @@ the Free Software Foundation. There are special exceptions to the terms and conditions of the GPL as it - is applied to this software. View the full text of the exception in file - EXCEPTIONS-CLIENT in the directory of this software distribution. + is applied to this software. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/libmysql/dll.c b/libmysql/dll.c index 8fcf41c792c..8654c035c63 100644 --- a/libmysql/dll.c +++ b/libmysql/dll.c @@ -5,8 +5,7 @@ the Free Software Foundation. There are special exceptions to the terms and conditions of the GPL as it - is applied to this software. View the full text of the exception in file - EXCEPTIONS-CLIENT in the directory of this software distribution. + is applied to this software. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/libmysql/errmsg.c b/libmysql/errmsg.c index 95ee6862aa8..163632127c3 100644 --- a/libmysql/errmsg.c +++ b/libmysql/errmsg.c @@ -5,8 +5,7 @@ the Free Software Foundation. There are special exceptions to the terms and conditions of the GPL as it - is applied to this software. View the full text of the exception in file - EXCEPTIONS-CLIENT in the directory of this software distribution. + is applied to this software. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/libmysql/get_password.c b/libmysql/get_password.c index cbe5fce6949..63d4e68541f 100644 --- a/libmysql/get_password.c +++ b/libmysql/get_password.c @@ -5,8 +5,7 @@ the Free Software Foundation. There are special exceptions to the terms and conditions of the GPL as it - is applied to this software. View the full text of the exception in file - EXCEPTIONS-CLIENT in the directory of this software distribution. + is applied to this software. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 8c612b6894e..4ecd976da3b 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -5,8 +5,7 @@ the Free Software Foundation. There are special exceptions to the terms and conditions of the GPL as it - is applied to this software. View the full text of the exception in file - EXCEPTIONS-CLIENT in the directory of this software distribution. + is applied to this software. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/libmysql/manager.c b/libmysql/manager.c index 53ffffa55c0..ef28f1bd323 100644 --- a/libmysql/manager.c +++ b/libmysql/manager.c @@ -5,8 +5,7 @@ the Free Software Foundation. There are special exceptions to the terms and conditions of the GPL as it - is applied to this software. View the full text of the exception in file - EXCEPTIONS-CLIENT in the directory of this software distribution. + is applied to this software. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/libmysql_r/Makefile.am b/libmysql_r/Makefile.am index 4a37dcfdec9..d4f0bea590b 100644 --- a/libmysql_r/Makefile.am +++ b/libmysql_r/Makefile.am @@ -5,8 +5,7 @@ # published by the Free Software Foundation. # # There are special exceptions to the terms and conditions of the GPL as it -# is applied to this software. View the full text of the exception in file -# EXCEPTIONS-CLIENT in the directory of this software distribution. +# is applied to this software. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 79ad585b7d5..cd12e1ecbc5 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -381,7 +381,7 @@ copyfileto() copyfileto $BASE/docs ChangeLog Docs/mysql.info copyfileto $BASE COPYING COPYING.LIB README Docs/INSTALL-BINARY \ - EXCEPTIONS-CLIENT LICENSE.mysql + LICENSE.mysql # Non platform-specific bin dir files: BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \ diff --git a/scripts/make_win_bin_dist b/scripts/make_win_bin_dist index 22970c95ddd..ac500dded8f 100755 --- a/scripts/make_win_bin_dist +++ b/scripts/make_win_bin_dist @@ -187,8 +187,8 @@ cp ChangeLog $DESTDIR/Docs/ || /bin/true cp support-files/my-*.ini $DESTDIR/ if [ -f COPYING ] ; then - cp COPYING EXCEPTIONS-CLIENT $DESTDIR/ - cp COPYING $DESTDIR/Docs/ + cp COPYING $DESTDIR/ + cp COPYING $DESTDIR/Docs/ fi # ---------------------------------------------------------------------- -- cgit v1.2.1 From 17234d60885d7afc7025a2ecfa0e567095e1603c Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:10:07 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3351.14.280 > revision-id: inaam.rana@oracle.com-20101026205418-aq835zpvxdskdf4c > parent: jimmy.yang@oracle.com-20101025055121-j319j363abfgdh6n > committer: Inaam Rana > branch nick: mysql-5.1-innodb > timestamp: Tue 2010-10-26 16:54:18 -0400 > message: > Bug #57611 ibdata file and continuous growing undo logs > rb://498 > > Fix handling of update_undo_logs at trx commit. Previously, when > rseg->update_undo_list grows beyond 500 the update_undo_logs were > marked with state TRX_UNDO_TO_FREE which should have been > TRX_UNDO_TO_PURGE. > > Approved by: Sunny Bains --- storage/innobase/trx/trx0undo.c | 18 ++++-------------- storage/innodb_plugin/trx/trx0undo.c | 18 ++++-------------- 2 files changed, 8 insertions(+), 28 deletions(-) diff --git a/storage/innobase/trx/trx0undo.c b/storage/innobase/trx/trx0undo.c index 4547ee9ea64..329565943c8 100644 --- a/storage/innobase/trx/trx0undo.c +++ b/storage/innobase/trx/trx0undo.c @@ -1752,21 +1752,11 @@ trx_undo_set_state_at_finish( if (undo->size == 1 && mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE) - < TRX_UNDO_PAGE_REUSE_LIMIT) { + < TRX_UNDO_PAGE_REUSE_LIMIT + && UT_LIST_GET_LEN(rseg->update_undo_list) < 500 + && UT_LIST_GET_LEN(rseg->insert_undo_list) < 500) { - /* This is a heuristic to avoid the problem of all UNDO - slots ending up in one of the UNDO lists. Previously if - the server crashed with all the slots in one of the lists, - transactions that required the slots of a different type - would fail for lack of slots. */ - - if (UT_LIST_GET_LEN(rseg->update_undo_list) < 500 - && UT_LIST_GET_LEN(rseg->insert_undo_list) < 500) { - - state = TRX_UNDO_CACHED; - } else { - state = TRX_UNDO_TO_FREE; - } + state = TRX_UNDO_CACHED; } else if (undo->type == TRX_UNDO_INSERT) { diff --git a/storage/innodb_plugin/trx/trx0undo.c b/storage/innodb_plugin/trx/trx0undo.c index c8a4b15e48b..76e88948e41 100644 --- a/storage/innodb_plugin/trx/trx0undo.c +++ b/storage/innodb_plugin/trx/trx0undo.c @@ -1823,21 +1823,11 @@ trx_undo_set_state_at_finish( if (undo->size == 1 && mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE) - < TRX_UNDO_PAGE_REUSE_LIMIT) { + < TRX_UNDO_PAGE_REUSE_LIMIT + && UT_LIST_GET_LEN(rseg->update_undo_list) < 500 + && UT_LIST_GET_LEN(rseg->insert_undo_list) < 500) { - /* This is a heuristic to avoid the problem of all UNDO - slots ending up in one of the UNDO lists. Previously if - the server crashed with all the slots in one of the lists, - transactions that required the slots of a different type - would fail for lack of slots. */ - - if (UT_LIST_GET_LEN(rseg->update_undo_list) < 500 - && UT_LIST_GET_LEN(rseg->insert_undo_list) < 500) { - - state = TRX_UNDO_CACHED; - } else { - state = TRX_UNDO_TO_FREE; - } + state = TRX_UNDO_CACHED; } else if (undo->type == TRX_UNDO_INSERT) { -- cgit v1.2.1 From aee322264fd86dd503939cfcff906e00d73bad1c Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:13:20 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3452.13.4 [merge] > revision-id: mmakela@bk-internal.mysql.com-20101011192851-u3bdt7erjkrgn90t > parent: marko.makela@oracle.com-20101011081800-sby6kmb8n1mnryfq > parent: jimmy.yang@oracle.com-20101011123613-guz1qgdktywmel1g > committer: Marko Makela > branch nick: mysql-5.1-security > timestamp: Mon 2010-10-11 21:28:51 +0200 > message: > Merge Bug #57345, Bug #56982, Bug#53307 test from mysql-5.1-innodb > ------------------------------------------------------------ > Use --include-merges or -n0 to see merged revisions. --- mysql-test/t/disabled.def | 1 - storage/innodb_plugin/handler/ha_innodb.cc | 6 +++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index cede26f555a..bb931fb7b14 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -11,6 +11,5 @@ ############################################################################## kill : Bug#37780 2008-12-03 HHunger need some changes to be robust enough for pushbuild. query_cache_28249 : Bug#43861 2009-03-25 main.query_cache_28249 fails sporadically -partition_innodb_plugin : Bug#53307 2010-04-30 VasilDimov valgrind warnings main.mysqlhotcopy_myisam : bug#54129 2010-06-04 Horst main.mysqlhotcopy_archive: bug#54129 2010-06-04 Horst diff --git a/storage/innodb_plugin/handler/ha_innodb.cc b/storage/innodb_plugin/handler/ha_innodb.cc index 990d99b9cf4..350eb8ae026 100644 --- a/storage/innodb_plugin/handler/ha_innodb.cc +++ b/storage/innodb_plugin/handler/ha_innodb.cc @@ -9409,7 +9409,11 @@ ha_innobase::innobase_peek_autoinc(void) auto_inc = dict_table_autoinc_read(innodb_table); - ut_a(auto_inc > 0); + if (auto_inc == 0) { + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB: AUTOINC next value generation " + "is disabled for '%s'\n", innodb_table->name); + } dict_table_autoinc_unlock(innodb_table); -- cgit v1.2.1 From 9b656643810e858eea13fb0633d834580754ee46 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:15:36 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3452.13.54 > revision-id: oystein.grovlen@oracle.com-20110112093715-tc076voaxwblqk8v > parent: georgi.kodinov@oracle.com-20110110130833-1c9q21mr7zoq07vg > committer: Oystein Grovlen > branch nick: mysql-5.1-security > timestamp: Wed 2011-01-12 10:37:15 +0100 > message: > Bug#59211: Select Returns Different Value for min(year) Function > > get_year_value() contains code to convert 2-digits year to > 4-digits. The fix for Bug#49910 added a check on the size of > the underlying field so that this conversion is not done for > YEAR(4) values. (Since otherwise one would convert invalid > YEAR(4) values to valid ones.) > > The existing check does not work when Item_cache is used, since > it is not detected when the cache is based on a Field. The > reported change in behavior is due to Bug#58030 which added > extra cached items in min/max computations. > > The elegant solution would be to implement > Item_cache::real_item() to return the underlying Item. > However, some side effects are observed (change in explain > output) that indicates that such a change is not straight- > forward, and definitely not appropriate for an MRU. > > Instead, a Item_cache::field() method has been added in order > to get access to the underlying field. (This field() method > eliminates the need for Item_cache::eq_def() used in > test_if_ref(), but in order to limit the scope of this fix, > that code has been left as is.) --- mysql-test/r/type_year.result | 14 ++++++++++++++ mysql-test/t/type_year.test | 10 ++++++++++ sql/item.h | 13 ++++++++++--- sql/item_cmpfunc.cc | 9 ++++++--- 4 files changed, 40 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/type_year.result b/mysql-test/r/type_year.result index 8948214f565..2dc491c6166 100644 --- a/mysql-test/r/type_year.result +++ b/mysql-test/r/type_year.result @@ -341,4 +341,18 @@ ta_y s tb_y s 2001 2001 2001 2001 DROP TABLE t1; # +# Bug #59211: Select Returns Different Value for min(year) Function +# +CREATE TABLE t1(c1 YEAR(4)); +INSERT INTO t1 VALUES (1901),(2155),(0000); +SELECT * FROM t1; +c1 +1901 +2155 +0000 +SELECT COUNT(*) AS total_rows, MIN(c1) AS min_value, MAX(c1) FROM t1; +total_rows min_value MAX(c1) +3 0 2155 +DROP TABLE t1; +# End of 5.1 tests diff --git a/mysql-test/t/type_year.test b/mysql-test/t/type_year.test index d8da4ccc82c..1a9e66478e1 100644 --- a/mysql-test/t/type_year.test +++ b/mysql-test/t/type_year.test @@ -149,6 +149,16 @@ SELECT ta.y AS ta_y, ta.s, tb.y AS tb_y, tb.s FROM t1 ta, t1 tb HAVING ta_y = tb DROP TABLE t1; +--echo # +--echo # Bug #59211: Select Returns Different Value for min(year) Function +--echo # + +CREATE TABLE t1(c1 YEAR(4)); +INSERT INTO t1 VALUES (1901),(2155),(0000); +SELECT * FROM t1; +SELECT COUNT(*) AS total_rows, MIN(c1) AS min_value, MAX(c1) FROM t1; +DROP TABLE t1; + --echo # --echo End of 5.1 tests diff --git a/sql/item.h b/sql/item.h index 57abb43010e..fd85fc780af 100644 --- a/sql/item.h +++ b/sql/item.h @@ -2960,11 +2960,10 @@ class Item_cache: public Item_basic_constant protected: Item *example; table_map used_table_map; - /* - Field that this object will get value from. This is set/used by + /** + Field that this object will get value from. This is used by index-based subquery engines to detect and remove the equality injected by IN->EXISTS transformation. - For all other uses of Item_cache, cached_field doesn't matter. */ Field *cached_field; enum enum_field_types cached_field_type; @@ -3021,6 +3020,14 @@ public: { return this == item; } + + /** + If this item caches a field value, return pointer to underlying field. + + @return Pointer to field, or NULL if this is not a cache for a field value. + */ + Field* field() { return cached_field; } + virtual void store(Item *item); virtual bool cache_value()= 0; }; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 6987dd9e053..8b8ee4a0054 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1196,9 +1196,12 @@ get_year_value(THD *thd, Item ***item_arg, Item **cache_arg, value of 2000. */ Item *real_item= item->real_item(); - if (!(real_item->type() == Item::FIELD_ITEM && - ((Item_field *)real_item)->field->type() == MYSQL_TYPE_YEAR && - ((Item_field *)real_item)->field->field_length == 4)) + Field *field= NULL; + if (real_item->type() == Item::FIELD_ITEM) + field= ((Item_field *)real_item)->field; + else if (real_item->type() == Item::CACHE_ITEM) + field= ((Item_cache *)real_item)->field(); + if (!(field && field->type() == MYSQL_TYPE_YEAR && field->field_length == 4)) { if (value < 70) value+= 100; -- cgit v1.2.1 From f31cc126b8faf277529ec327baa1d0d3b8e8d049 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:21:06 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3452.17.1 > revision-id: mattias.jonsson@oracle.com-20101005125751-ds92svhhb13ds8ri > parent: sergey.glukhov@sun.com-20101004085126-ia6veky4w6j87i43 > committer: Mattias Jonsson > branch nick: b55091-51-security > timestamp: Tue 2010-10-05 14:57:51 +0200 > message: > Bug#55091: Server crashes on ADD PARTITION after a failed attempt > > In case of failure in ALTER ... PARTITION under LOCK TABLE > the server could crash, due to it had modified the locked > table object, which was not reverted in case of failure, > resulting in a bad table definition used after the failed > command. > > Solved by always closing the LOCKED TABLE, even in case > of error. > > Note: this is a 5.1-only fix, bug#56172 fixed it in 5.5+ --- mysql-test/r/partition_innodb_plugin.result | 73 +++++++++++++++++++++++++++++ mysql-test/t/partition_innodb_plugin.test | 66 ++++++++++++++++++++++++++ sql/sql_partition.cc | 21 ++++++++- 3 files changed, 159 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/partition_innodb_plugin.result b/mysql-test/r/partition_innodb_plugin.result index dd91eee316a..f6b5ce84338 100644 --- a/mysql-test/r/partition_innodb_plugin.result +++ b/mysql-test/r/partition_innodb_plugin.result @@ -1,3 +1,76 @@ +call mtr.add_suppression("nnoDB: Error: table `test`.`t1` .* Partition.* InnoDB internal"); +# +# Bug#55091: Server crashes on ADD PARTITION after a failed attempt +# +SET @old_innodb_file_format_check = @@global.innodb_file_format_check; +SET @old_innodb_file_format = @@global.innodb_file_format; +SET @old_innodb_file_per_table = @@global.innodb_file_per_table; +SET @old_innodb_strict_mode = @@global.innodb_strict_mode; +SET @@global.innodb_file_format = Barracuda, +@@global.innodb_file_per_table = ON, +@@global.innodb_strict_mode = ON; +# Connection con1 +CREATE TABLE t1 (id INT NOT NULL +PRIMARY KEY, +user_num CHAR(10) +) ENGINE = InnoDB +KEY_BLOCK_SIZE=4 +PARTITION BY HASH(id) PARTITIONS 1; +t1#P#p0.ibd +t1.frm +t1.par +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL, + `user_num` char(10) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=4 +/*!50100 PARTITION BY HASH (id) +PARTITIONS 1 */ +SET GLOBAL innodb_file_per_table = OFF; +# Connection con2 +LOCK TABLE t1 WRITE; +# ALTER fails because COMPRESSED/KEY_BLOCK_SIZE +# are incompatible with innodb_file_per_table = OFF; +ALTER TABLE t1 ADD PARTITION PARTITIONS 1; +ERROR HY000: Got error 1478 from storage engine +t1#P#p0.ibd +t1.frm +t1.par +# This SET is not needed to reproduce the bug, +# it is here just to make the test case more realistic +SET innodb_strict_mode = OFF; +ALTER TABLE t1 ADD PARTITION PARTITIONS 2; +Warnings: +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4. +t1.frm +t1.par +ALTER TABLE t1 REBUILD PARTITION p0; +Warnings: +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4. +UNLOCK TABLES; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL, + `user_num` char(10) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=4 +/*!50100 PARTITION BY HASH (id) +PARTITIONS 3 */ +DROP TABLE t1; +# Connection default +SET @@global.innodb_strict_mode = @old_innodb_strict_mode; +SET @@global.innodb_file_format = @old_innodb_file_format; +SET @@global.innodb_file_per_table = @old_innodb_file_per_table; +SET @@global.innodb_file_format_check = @old_innodb_file_format_check; SET NAMES utf8; CREATE TABLE `t``\""e` (a INT, PRIMARY KEY (a)) ENGINE=InnoDB diff --git a/mysql-test/t/partition_innodb_plugin.test b/mysql-test/t/partition_innodb_plugin.test index eeb990c0d81..114adf67180 100644 --- a/mysql-test/t/partition_innodb_plugin.test +++ b/mysql-test/t/partition_innodb_plugin.test @@ -1,5 +1,71 @@ --source include/have_partition.inc --source include/have_innodb_plugin.inc +# Remove the line below when bug#53307 is solved. +--source include/not_valgrind.inc + +let $MYSQLD_DATADIR= `SELECT @@datadir`; + +call mtr.add_suppression("nnoDB: Error: table `test`.`t1` .* Partition.* InnoDB internal"); +--echo # +--echo # Bug#55091: Server crashes on ADD PARTITION after a failed attempt +--echo # +SET @old_innodb_file_format_check = @@global.innodb_file_format_check; +SET @old_innodb_file_format = @@global.innodb_file_format; +SET @old_innodb_file_per_table = @@global.innodb_file_per_table; +SET @old_innodb_strict_mode = @@global.innodb_strict_mode; +SET @@global.innodb_file_format = Barracuda, +@@global.innodb_file_per_table = ON, +@@global.innodb_strict_mode = ON; + +--echo # Connection con1 +--connect(con1,localhost,root,,) + +CREATE TABLE t1 (id INT NOT NULL +PRIMARY KEY, +user_num CHAR(10) +) ENGINE = InnoDB +KEY_BLOCK_SIZE=4 +PARTITION BY HASH(id) PARTITIONS 1; + +--list_files $MYSQLD_DATADIR/test +SHOW CREATE TABLE t1; + +SET GLOBAL innodb_file_per_table = OFF; + +--disconnect con1 +--connect(con2,localhost,root,,) +--echo # Connection con2 + +LOCK TABLE t1 WRITE; + +--echo # ALTER fails because COMPRESSED/KEY_BLOCK_SIZE +--echo # are incompatible with innodb_file_per_table = OFF; + +--error ER_GET_ERRNO +ALTER TABLE t1 ADD PARTITION PARTITIONS 1; + +--list_files $MYSQLD_DATADIR/test +--echo # This SET is not needed to reproduce the bug, +--echo # it is here just to make the test case more realistic +SET innodb_strict_mode = OFF; + +ALTER TABLE t1 ADD PARTITION PARTITIONS 2; +--list_files $MYSQLD_DATADIR/test + +# really bug#56172 +ALTER TABLE t1 REBUILD PARTITION p0; + +UNLOCK TABLES; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +--disconnect con2 +--connection default +--echo # Connection default +SET @@global.innodb_strict_mode = @old_innodb_strict_mode; +SET @@global.innodb_file_format = @old_innodb_file_format; +SET @@global.innodb_file_per_table = @old_innodb_file_per_table; +SET @@global.innodb_file_format_check = @old_innodb_file_format_check; # # Bug#32430 - show engine innodb status causes errors diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 76caa2b0c8d..7b0c47865d8 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -5934,6 +5934,12 @@ static void alter_partition_lock_handling(ALTER_PARTITION_PARAM_TYPE *lpt) int err; if (lpt->thd->locked_tables) { + /* + Close the table if open, to remove/destroy the already altered + table->part_info object, so that it is not reused. + */ + if (lpt->table->db_stat) + abort_and_upgrade_lock_and_close_table(lpt); /* When we have the table locked, it is necessary to reopen the table since all table objects were closed and removed as part of the @@ -6436,7 +6442,20 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, table, table_list, FALSE, NULL, written_bin_log)); err: - close_thread_tables(thd); + if (thd->locked_tables) + { + /* + table->part_info was altered in prep_alter_part_table and must be + destroyed and recreated, since otherwise it will be reused, since + we are under LOCK TABLE. + */ + alter_partition_lock_handling(lpt); + } + else + { + /* Force the table to be closed to avoid reuse of the table->part_info */ + close_thread_tables(thd); + } DBUG_RETURN(TRUE); } #endif -- cgit v1.2.1 From 6b9865571454b6def2a6c9a22be7f76964c58d82 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:22:26 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3496.1.39 > revision-id: jon.hauglid@oracle.com-20101019081957-zf2ohl7wgnhvnnb0 > parent: tor.didriksen@oracle.com-20101019070648-jhi9ezq3b5qk4wcm > committer: Jon Olav Hauglid > branch nick: mysql-5.1-bugteam-bug57274 > timestamp: Tue 2010-10-19 10:19:57 +0200 > message: > Bug #57274 SET GLOBAL debug crashes on Solaris in embedded server mode > (variables_debug fails) > > The problem was that "SET GLOBAL debug" could cause a crash on Solaris. > The crash happened if the server failed to open the trace file given in > the "SET GLOBAL debug" statement. This caused an error message to be > printed to stderr containing the process name. However, printing to > stderr crashed the server since the pointer to the process name had > not been initialized. > > This patch fixes the problem by initializing the process name > properly when doing "SET GLOBAL debug". > > No test case added as this bug was repeatable with existing test > coverage in variables_debug.test. --- dbug/dbug.c | 1 + 1 file changed, 1 insertion(+) diff --git a/dbug/dbug.c b/dbug/dbug.c index 76723fa8767..7372f1b2c0c 100644 --- a/dbug/dbug.c +++ b/dbug/dbug.c @@ -744,6 +744,7 @@ void _db_set_init_(const char *control) CODE_STATE tmp_cs; bzero((uchar*) &tmp_cs, sizeof(tmp_cs)); tmp_cs.stack= &init_settings; + tmp_cs.process= db_process ? db_process : "dbug"; DbugParse(&tmp_cs, control); } -- cgit v1.2.1 From 829f6b85e118c1c9e94a86154c5fe21a690e30a6 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:26:44 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3504 > revision-id: svoj@oracle.com-20101111100317-3bjzbj6c2ihfzb9t > parent: dmitry.shulga@oracle.com-20101111045251-jl1spfh3xjti1sll > committer: Sergey Vojtovich > branch nick: mysql-5.1-bugteam-bug58079 > timestamp: Thu 2010-11-11 13:03:17 +0300 > message: > BUG#58079 - Remove the IBM DB2 storage engine --- mysql-test/suite/ibmdb2i/include/have_i54.inc | 20 - mysql-test/suite/ibmdb2i/include/have_i61.inc | 20 - mysql-test/suite/ibmdb2i/include/have_ibmdb2i.inc | 6 - .../suite/ibmdb2i/r/ibmdb2i_bug_44020.result | 11 - .../suite/ibmdb2i/r/ibmdb2i_bug_44025.result | 4 - .../suite/ibmdb2i/r/ibmdb2i_bug_44232.result | 4 - .../suite/ibmdb2i/r/ibmdb2i_bug_44610.result | 18 - .../suite/ibmdb2i/r/ibmdb2i_bug_45196.result | 33 - .../suite/ibmdb2i/r/ibmdb2i_bug_45793.result | 7 - .../suite/ibmdb2i/r/ibmdb2i_bug_45983.result | 20 - .../suite/ibmdb2i/r/ibmdb2i_bug_49329.result | 9 - .../suite/ibmdb2i/r/ibmdb2i_collations.result | 1204 ------- mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44020.test | 9 - mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44025.test | 9 - mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44232.test | 8 - mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44610.test | 28 - mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45196.test | 26 - mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45793.test | 11 - mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45983.test | 47 - mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_49329.test | 10 - mysql-test/suite/ibmdb2i/t/ibmdb2i_collations.test | 44 - storage/ibmdb2i/CMakeLists.txt | 25 - storage/ibmdb2i/Makefile.am | 54 - storage/ibmdb2i/db2i_blobCollection.cc | 107 - storage/ibmdb2i/db2i_blobCollection.h | 151 - storage/ibmdb2i/db2i_charsetSupport.cc | 826 ----- storage/ibmdb2i/db2i_charsetSupport.h | 65 - storage/ibmdb2i/db2i_collationSupport.cc | 355 --- storage/ibmdb2i/db2i_collationSupport.h | 48 - storage/ibmdb2i/db2i_constraints.cc | 672 ---- storage/ibmdb2i/db2i_conversion.cc | 1459 --------- storage/ibmdb2i/db2i_errors.cc | 297 -- storage/ibmdb2i/db2i_errors.h | 93 - storage/ibmdb2i/db2i_file.cc | 556 ---- storage/ibmdb2i/db2i_file.h | 445 --- storage/ibmdb2i/db2i_global.h | 138 - storage/ibmdb2i/db2i_iconv.h | 51 - storage/ibmdb2i/db2i_ileBridge.cc | 1342 -------- storage/ibmdb2i/db2i_ileBridge.h | 499 --- storage/ibmdb2i/db2i_ioBuffers.cc | 332 -- storage/ibmdb2i/db2i_ioBuffers.h | 416 --- storage/ibmdb2i/db2i_misc.h | 129 - storage/ibmdb2i/db2i_myconv.cc | 1498 --------- storage/ibmdb2i/db2i_myconv.h | 3201 ------------------- storage/ibmdb2i/db2i_rir.cc | 686 ---- storage/ibmdb2i/db2i_safeString.h | 98 - storage/ibmdb2i/db2i_sqlStatementStream.cc | 86 - storage/ibmdb2i/db2i_sqlStatementStream.h | 151 - storage/ibmdb2i/db2i_validatedPointer.h | 162 - storage/ibmdb2i/ha_ibmdb2i.cc | 3359 -------------------- storage/ibmdb2i/ha_ibmdb2i.h | 822 ----- storage/ibmdb2i/plug.in | 12 - 52 files changed, 19683 deletions(-) delete mode 100755 mysql-test/suite/ibmdb2i/include/have_i54.inc delete mode 100644 mysql-test/suite/ibmdb2i/include/have_i61.inc delete mode 100644 mysql-test/suite/ibmdb2i/include/have_ibmdb2i.inc delete mode 100644 mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44020.result delete mode 100644 mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44025.result delete mode 100755 mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44232.result delete mode 100755 mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44610.result delete mode 100644 mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45196.result delete mode 100644 mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45793.result delete mode 100644 mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45983.result delete mode 100644 mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_49329.result delete mode 100644 mysql-test/suite/ibmdb2i/r/ibmdb2i_collations.result delete mode 100644 mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44020.test delete mode 100644 mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44025.test delete mode 100755 mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44232.test delete mode 100755 mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44610.test delete mode 100644 mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45196.test delete mode 100644 mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45793.test delete mode 100644 mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45983.test delete mode 100644 mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_49329.test delete mode 100644 mysql-test/suite/ibmdb2i/t/ibmdb2i_collations.test delete mode 100644 storage/ibmdb2i/CMakeLists.txt delete mode 100644 storage/ibmdb2i/Makefile.am delete mode 100644 storage/ibmdb2i/db2i_blobCollection.cc delete mode 100644 storage/ibmdb2i/db2i_blobCollection.h delete mode 100644 storage/ibmdb2i/db2i_charsetSupport.cc delete mode 100644 storage/ibmdb2i/db2i_charsetSupport.h delete mode 100644 storage/ibmdb2i/db2i_collationSupport.cc delete mode 100644 storage/ibmdb2i/db2i_collationSupport.h delete mode 100644 storage/ibmdb2i/db2i_constraints.cc delete mode 100644 storage/ibmdb2i/db2i_conversion.cc delete mode 100644 storage/ibmdb2i/db2i_errors.cc delete mode 100644 storage/ibmdb2i/db2i_errors.h delete mode 100644 storage/ibmdb2i/db2i_file.cc delete mode 100644 storage/ibmdb2i/db2i_file.h delete mode 100644 storage/ibmdb2i/db2i_global.h delete mode 100644 storage/ibmdb2i/db2i_iconv.h delete mode 100644 storage/ibmdb2i/db2i_ileBridge.cc delete mode 100644 storage/ibmdb2i/db2i_ileBridge.h delete mode 100644 storage/ibmdb2i/db2i_ioBuffers.cc delete mode 100644 storage/ibmdb2i/db2i_ioBuffers.h delete mode 100644 storage/ibmdb2i/db2i_misc.h delete mode 100644 storage/ibmdb2i/db2i_myconv.cc delete mode 100644 storage/ibmdb2i/db2i_myconv.h delete mode 100644 storage/ibmdb2i/db2i_rir.cc delete mode 100644 storage/ibmdb2i/db2i_safeString.h delete mode 100644 storage/ibmdb2i/db2i_sqlStatementStream.cc delete mode 100644 storage/ibmdb2i/db2i_sqlStatementStream.h delete mode 100644 storage/ibmdb2i/db2i_validatedPointer.h delete mode 100644 storage/ibmdb2i/ha_ibmdb2i.cc delete mode 100644 storage/ibmdb2i/ha_ibmdb2i.h delete mode 100644 storage/ibmdb2i/plug.in diff --git a/mysql-test/suite/ibmdb2i/include/have_i54.inc b/mysql-test/suite/ibmdb2i/include/have_i54.inc deleted file mode 100755 index 7054e196153..00000000000 --- a/mysql-test/suite/ibmdb2i/include/have_i54.inc +++ /dev/null @@ -1,20 +0,0 @@ -# Check for IBM i 6.1 or later ---disable_query_log -system uname -rv > $MYSQLTEST_VARDIR/tmp/version; ---disable_warnings -drop table if exists uname_vr; ---enable_warnings -create temporary table uname_vr (r int, v int); ---disable_warnings -eval LOAD DATA INFILE "$MYSQLTEST_VARDIR/tmp/version" into table uname_vr fields terminated by ' '; ---enable_warnings -let $ok = `select count(*) from uname_vr where v = 5 and r = 4`; -drop table uname_vr; -remove_file $MYSQLTEST_VARDIR/tmp/version; ---enable_query_log -if (!$ok) -{ - skip "Need IBM i 5.4 or later"; -} - - diff --git a/mysql-test/suite/ibmdb2i/include/have_i61.inc b/mysql-test/suite/ibmdb2i/include/have_i61.inc deleted file mode 100644 index 84b9a17c1d8..00000000000 --- a/mysql-test/suite/ibmdb2i/include/have_i61.inc +++ /dev/null @@ -1,20 +0,0 @@ -# Check for IBM i 6.1 or later ---disable_query_log -system uname -rv > $MYSQLTEST_VARDIR/tmp/version; ---disable_warnings -drop table if exists uname_vr; ---enable_warnings -create temporary table uname_vr (r int, v int); ---disable_warnings -eval LOAD DATA INFILE "$MYSQLTEST_VARDIR/tmp/version" into table uname_vr fields terminated by ' '; ---enable_warnings -let $ok = `select count(*) from uname_vr where v > 5`; -drop table uname_vr; -remove_file $MYSQLTEST_VARDIR/tmp/version; ---enable_query_log -if (!$ok) -{ - skip "Need IBM i 6.1 or later"; -} - - diff --git a/mysql-test/suite/ibmdb2i/include/have_ibmdb2i.inc b/mysql-test/suite/ibmdb2i/include/have_ibmdb2i.inc deleted file mode 100644 index f3ef0b4f1ac..00000000000 --- a/mysql-test/suite/ibmdb2i/include/have_ibmdb2i.inc +++ /dev/null @@ -1,6 +0,0 @@ -if (!`SELECT count(*) FROM information_schema.engines WHERE - (support = 'YES' OR support = 'DEFAULT') AND - engine = 'ibmdb2i'`) -{ - skip Need ibmdb2i engine; -} diff --git a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44020.result b/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44020.result deleted file mode 100644 index ddf92db6bca..00000000000 --- a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44020.result +++ /dev/null @@ -1,11 +0,0 @@ -create schema `A12345_@$#`; -create table `A12345_@$#`.t1 (i int) engine=ibmdb2i; -show create table `A12345_@$#`.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `i` int(11) DEFAULT NULL -) ENGINE=IBMDB2I DEFAULT CHARSET=latin1 -select * from `A12345_@$#`.t1; -i -drop table `A12345_@$#`.t1; -drop schema `A12345_@$#`; diff --git a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44025.result b/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44025.result deleted file mode 100644 index 10a3070fcc4..00000000000 --- a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44025.result +++ /dev/null @@ -1,4 +0,0 @@ -create table t1 (c char(10) collate utf8_swedish_ci, index(c)) engine=ibmdb2i; -drop table t1; -create table t1 (c char(10) collate ucs2_swedish_ci, index(c)) engine=ibmdb2i; -drop table t1; diff --git a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44232.result b/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44232.result deleted file mode 100755 index 8276b401073..00000000000 --- a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44232.result +++ /dev/null @@ -1,4 +0,0 @@ -create table t1 (c char(1) character set armscii8) engine=ibmdb2i; -ERROR HY000: Can't create table 'test.t1' (errno: 2504) -create table t1 (c char(1) character set eucjpms ) engine=ibmdb2i; -ERROR HY000: Can't create table 'test.t1' (errno: 2504) diff --git a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44610.result b/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44610.result deleted file mode 100755 index 311e800e1b0..00000000000 --- a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_44610.result +++ /dev/null @@ -1,18 +0,0 @@ -create table ABC (i int) engine=ibmdb2i; -drop table ABC; -create table `1234567890ABC` (i int) engine=ibmdb2i; -drop table `1234567890ABC`; -create table `!@#$%` (i int) engine=ibmdb2i; -drop table `!@#$%`; -create table `ABCD#########` (i int) engine=ibmdb2i; -drop table `ABCD#########`; -create table `_` (i int) engine=ibmdb2i; -drop table `_`; -create table `abc##def` (i int) engine=ibmdb2i; -drop table `abc##def`; -set names utf8; -create table Ä° (s1 int) engine=ibmdb2i; -drop table Ä°; -create table Ä°Ä° (s1 int) engine=ibmdb2i; -drop table Ä°Ä°; -set names latin1; diff --git a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45196.result b/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45196.result deleted file mode 100644 index 916e1d93ee5..00000000000 --- a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45196.result +++ /dev/null @@ -1,33 +0,0 @@ -drop table if exists t1; -create table t1 (c char(10), index(c)) collate ucs2_czech_ci engine=ibmdb2i; -insert into t1 values ("ch"),("h"),("i"); -select * from t1 order by c; -c -h -ch -i -drop table t1; -create table t1 (c char(10), index(c)) collate utf8_czech_ci engine=ibmdb2i; -insert into t1 values ("ch"),("h"),("i"); -select * from t1 order by c; -c -h -ch -i -drop table t1; -create table t1 (c char(10), index(c)) collate ucs2_danish_ci engine=ibmdb2i; -insert into t1 values("abc"),("abcd"),("aaaa"); -select c from t1 order by c; -c -abc -abcd -aaaa -drop table t1; -create table t1 (c char(10), index(c)) collate utf8_danish_ci engine=ibmdb2i; -insert into t1 values("abc"),("abcd"),("aaaa"); -select c from t1 order by c; -c -abc -abcd -aaaa -drop table t1; diff --git a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45793.result b/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45793.result deleted file mode 100644 index 2392b746877..00000000000 --- a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45793.result +++ /dev/null @@ -1,7 +0,0 @@ -drop table if exists t1; -create table t1 (c char(10), index(c)) charset macce engine=ibmdb2i; -insert into t1 values ("test"); -select * from t1 order by c; -c -test -drop table t1; diff --git a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45983.result b/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45983.result deleted file mode 100644 index b9f4dcfc656..00000000000 --- a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_45983.result +++ /dev/null @@ -1,20 +0,0 @@ -set ibmdb2i_create_index_option=1; -drop schema if exists test1; -create schema test1; -use test1; -CREATE TABLE t1 (f int primary key, index(f)) engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (f char(10) collate utf8_bin primary key, index(f)) engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (f char(10) collate latin1_swedish_ci primary key, index(f)) engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (f char(10) collate latin1_swedish_ci primary key, i int, index i(i,f)) engine=ibmdb2i; -drop table t1; -create table fd (SQSSEQ CHAR(10)) engine=ibmdb2i; -select * from fd; -SQSSEQ -*HEX -*HEX -*HEX -*HEX -drop table fd; diff --git a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_49329.result b/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_49329.result deleted file mode 100644 index d5bfc2579fd..00000000000 --- a/mysql-test/suite/ibmdb2i/r/ibmdb2i_bug_49329.result +++ /dev/null @@ -1,9 +0,0 @@ -create table ABC (i int) engine=ibmdb2i; -insert into ABC values(1); -create table abc (i int) engine=ibmdb2i; -insert into abc values (2); -select * from ABC; -i -1 -drop table ABC; -drop table abc; diff --git a/mysql-test/suite/ibmdb2i/r/ibmdb2i_collations.result b/mysql-test/suite/ibmdb2i/r/ibmdb2i_collations.result deleted file mode 100644 index 4f7d71cab2d..00000000000 --- a/mysql-test/suite/ibmdb2i/r/ibmdb2i_collations.result +++ /dev/null @@ -1,1204 +0,0 @@ -drop table if exists t1, ffd, fd; -CREATE TABLE t1 (armscii8_bin integer, c char(10), v varchar(20), index(c), index(v)) collate armscii8_bin engine=ibmdb2i; -CREATE TABLE t1 (armscii8_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate armscii8_general_ci engine=ibmdb2i; -CREATE TABLE t1 (ascii_bin integer, c char(10), v varchar(20), index(c), index(v)) collate ascii_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (ascii_bin char(10) primary key) collate ascii_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ascii_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ascii_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (ascii_general_ci char(10) primary key) collate ascii_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (big5_bin integer, c char(10), v varchar(20), index(c), index(v)) collate big5_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (big5_bin char(10) primary key) collate big5_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (big5_chinese_ci integer, c char(10), v varchar(20), index(c), index(v)) collate big5_chinese_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (big5_chinese_ci char(10) primary key) collate big5_chinese_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1250_bin integer, c char(10), v varchar(20), index(c), index(v)) collate cp1250_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1250_bin char(10) primary key) collate cp1250_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1250_croatian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp1250_croatian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1250_croatian_ci char(10) primary key) collate cp1250_croatian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1250_czech_cs integer, c char(10), v varchar(20), index(c), index(v)) collate cp1250_czech_cs engine=ibmdb2i; -CREATE TABLE t1 (cp1250_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp1250_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1250_general_ci char(10) primary key) collate cp1250_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1250_polish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp1250_polish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1250_polish_ci char(10) primary key) collate cp1250_polish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1251_bin integer, c char(10), v varchar(20), index(c), index(v)) collate cp1251_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1251_bin char(10) primary key) collate cp1251_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1251_bulgarian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp1251_bulgarian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1251_bulgarian_ci char(10) primary key) collate cp1251_bulgarian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1251_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp1251_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1251_general_ci char(10) primary key) collate cp1251_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1251_general_cs integer, c char(10), v varchar(20), index(c), index(v)) collate cp1251_general_cs engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1251_general_cs char(10) primary key) collate cp1251_general_cs engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1251_ukrainian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp1251_ukrainian_ci engine=ibmdb2i; -CREATE TABLE t1 (cp1256_bin integer, c char(10), v varchar(20), index(c), index(v)) collate cp1256_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1256_bin char(10) primary key) collate cp1256_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1256_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp1256_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp1256_general_ci char(10) primary key) collate cp1256_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp1257_bin integer, c char(10), v varchar(20), index(c), index(v)) collate cp1257_bin engine=ibmdb2i; -CREATE TABLE t1 (cp1257_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp1257_general_ci engine=ibmdb2i; -CREATE TABLE t1 (cp1257_lithuanian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp1257_lithuanian_ci engine=ibmdb2i; -CREATE TABLE t1 (cp850_bin integer, c char(10), v varchar(20), index(c), index(v)) collate cp850_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp850_bin char(10) primary key) collate cp850_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp850_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp850_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp850_general_ci char(10) primary key) collate cp850_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp852_bin integer, c char(10), v varchar(20), index(c), index(v)) collate cp852_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp852_bin char(10) primary key) collate cp852_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp852_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp852_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (cp852_general_ci char(10) primary key) collate cp852_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp866_bin integer, c char(10), v varchar(20), index(c), index(v)) collate cp866_bin engine=ibmdb2i; -CREATE TABLE t1 (cp866_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp866_general_ci engine=ibmdb2i; -CREATE TABLE t1 (cp932_bin integer, c char(10), v varchar(20), index(c), index(v)) collate cp932_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (cp932_bin char(10) primary key) collate cp932_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (cp932_japanese_ci integer, c char(10), v varchar(20), index(c), index(v)) collate cp932_japanese_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (cp932_japanese_ci char(10) primary key) collate cp932_japanese_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (dec8_bin integer, c char(10), v varchar(20), index(c), index(v)) collate dec8_bin engine=ibmdb2i; -CREATE TABLE t1 (dec8_swedish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate dec8_swedish_ci engine=ibmdb2i; -CREATE TABLE t1 (eucjpms_bin integer, c char(10), v varchar(20), index(c), index(v)) collate eucjpms_bin engine=ibmdb2i; -CREATE TABLE t1 (eucjpms_japanese_ci integer, c char(10), v varchar(20), index(c), index(v)) collate eucjpms_japanese_ci engine=ibmdb2i; -CREATE TABLE t1 (euckr_bin integer, c char(10), v varchar(20), index(c), index(v)) collate euckr_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (euckr_bin char(10) primary key) collate euckr_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (euckr_korean_ci integer, c char(10), v varchar(20), index(c), index(v)) collate euckr_korean_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (euckr_korean_ci char(10) primary key) collate euckr_korean_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (gb2312_bin integer, c char(10), v varchar(20), index(c), index(v)) collate gb2312_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (gb2312_bin char(10) primary key) collate gb2312_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (gb2312_chinese_ci integer, c char(10), v varchar(20), index(c), index(v)) collate gb2312_chinese_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (gb2312_chinese_ci char(10) primary key) collate gb2312_chinese_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (gbk_bin integer, c char(10), v varchar(20), index(c), index(v)) collate gbk_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (gbk_bin char(10) primary key) collate gbk_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (gbk_chinese_ci integer, c char(10), v varchar(20), index(c), index(v)) collate gbk_chinese_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (gbk_chinese_ci char(10) primary key) collate gbk_chinese_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (geostd8_bin integer, c char(10), v varchar(20), index(c), index(v)) collate geostd8_bin engine=ibmdb2i; -CREATE TABLE t1 (geostd8_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate geostd8_general_ci engine=ibmdb2i; -CREATE TABLE t1 (greek_bin integer, c char(10), v varchar(20), index(c), index(v)) collate greek_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (greek_bin char(10) primary key) collate greek_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (greek_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate greek_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (greek_general_ci char(10) primary key) collate greek_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (hebrew_bin integer, c char(10), v varchar(20), index(c), index(v)) collate hebrew_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (hebrew_bin char(10) primary key) collate hebrew_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (hebrew_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate hebrew_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (hebrew_general_ci char(10) primary key) collate hebrew_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (hp8_bin integer, c char(10), v varchar(20), index(c), index(v)) collate hp8_bin engine=ibmdb2i; -CREATE TABLE t1 (hp8_english_ci integer, c char(10), v varchar(20), index(c), index(v)) collate hp8_english_ci engine=ibmdb2i; -CREATE TABLE t1 (keybcs2_bin integer, c char(10), v varchar(20), index(c), index(v)) collate keybcs2_bin engine=ibmdb2i; -CREATE TABLE t1 (keybcs2_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate keybcs2_general_ci engine=ibmdb2i; -CREATE TABLE t1 (koi8r_bin integer, c char(10), v varchar(20), index(c), index(v)) collate koi8r_bin engine=ibmdb2i; -CREATE TABLE t1 (koi8r_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate koi8r_general_ci engine=ibmdb2i; -CREATE TABLE t1 (koi8u_bin integer, c char(10), v varchar(20), index(c), index(v)) collate koi8u_bin engine=ibmdb2i; -CREATE TABLE t1 (koi8u_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate koi8u_general_ci engine=ibmdb2i; -CREATE TABLE t1 (latin1_bin integer, c char(10), v varchar(20), index(c), index(v)) collate latin1_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin1_bin char(10) primary key) collate latin1_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin1_danish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin1_danish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin1_danish_ci char(10) primary key) collate latin1_danish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin1_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin1_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin1_general_ci char(10) primary key) collate latin1_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin1_general_cs integer, c char(10), v varchar(20), index(c), index(v)) collate latin1_general_cs engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin1_general_cs char(10) primary key) collate latin1_general_cs engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin1_german1_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin1_german1_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin1_german1_ci char(10) primary key) collate latin1_german1_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin1_german2_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin1_german2_ci engine=ibmdb2i; -CREATE TABLE t1 (latin1_spanish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin1_spanish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin1_spanish_ci char(10) primary key) collate latin1_spanish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin1_swedish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin1_swedish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin1_swedish_ci char(10) primary key) collate latin1_swedish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin2_bin integer, c char(10), v varchar(20), index(c), index(v)) collate latin2_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin2_bin char(10) primary key) collate latin2_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin2_croatian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin2_croatian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin2_croatian_ci char(10) primary key) collate latin2_croatian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin2_czech_cs integer, c char(10), v varchar(20), index(c), index(v)) collate latin2_czech_cs engine=ibmdb2i; -CREATE TABLE t1 (latin2_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin2_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin2_general_ci char(10) primary key) collate latin2_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin2_hungarian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin2_hungarian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin2_hungarian_ci char(10) primary key) collate latin2_hungarian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin5_bin integer, c char(10), v varchar(20), index(c), index(v)) collate latin5_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin5_bin char(10) primary key) collate latin5_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin5_turkish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin5_turkish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (latin5_turkish_ci char(10) primary key) collate latin5_turkish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (latin7_bin integer, c char(10), v varchar(20), index(c), index(v)) collate latin7_bin engine=ibmdb2i; -CREATE TABLE t1 (latin7_estonian_cs integer, c char(10), v varchar(20), index(c), index(v)) collate latin7_estonian_cs engine=ibmdb2i; -CREATE TABLE t1 (latin7_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate latin7_general_ci engine=ibmdb2i; -CREATE TABLE t1 (latin7_general_cs integer, c char(10), v varchar(20), index(c), index(v)) collate latin7_general_cs engine=ibmdb2i; -CREATE TABLE t1 (macce_bin integer, c char(10), v varchar(20), index(c), index(v)) collate macce_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (macce_bin char(10) primary key) collate macce_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (macce_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate macce_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (macce_general_ci char(10) primary key) collate macce_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (macroman_bin integer, c char(10), v varchar(20), index(c), index(v)) collate macroman_bin engine=ibmdb2i; -CREATE TABLE t1 (macroman_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate macroman_general_ci engine=ibmdb2i; -CREATE TABLE t1 (sjis_bin integer, c char(10), v varchar(20), index(c), index(v)) collate sjis_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (sjis_bin char(10) primary key) collate sjis_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (sjis_japanese_ci integer, c char(10), v varchar(20), index(c), index(v)) collate sjis_japanese_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (sjis_japanese_ci char(10) primary key) collate sjis_japanese_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (swe7_bin integer, c char(10), v varchar(20), index(c), index(v)) collate swe7_bin engine=ibmdb2i; -CREATE TABLE t1 (swe7_swedish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate swe7_swedish_ci engine=ibmdb2i; -CREATE TABLE t1 (tis620_bin integer, c char(10), v varchar(20), index(c), index(v)) collate tis620_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (tis620_bin char(10) primary key) collate tis620_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (tis620_thai_ci integer, c char(10), v varchar(20), index(c), index(v)) collate tis620_thai_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 11 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 23 NULL 6 Using where -drop table t1; -create table t1 (tis620_thai_ci char(10) primary key) collate tis620_thai_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_bin integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_bin char(10) primary key) collate ucs2_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_czech_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_czech_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_czech_ci char(10) primary key) collate ucs2_czech_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_danish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_danish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_danish_ci char(10) primary key) collate ucs2_danish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_esperanto_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_esperanto_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_esperanto_ci char(10) primary key) collate ucs2_esperanto_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_estonian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_estonian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_estonian_ci char(10) primary key) collate ucs2_estonian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_general_ci char(10) primary key) collate ucs2_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_hungarian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_hungarian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_hungarian_ci char(10) primary key) collate ucs2_hungarian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_icelandic_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_icelandic_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_icelandic_ci char(10) primary key) collate ucs2_icelandic_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_latvian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_latvian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_latvian_ci char(10) primary key) collate ucs2_latvian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_lithuanian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_lithuanian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_lithuanian_ci char(10) primary key) collate ucs2_lithuanian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_persian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_persian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_persian_ci char(10) primary key) collate ucs2_persian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_polish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_polish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_polish_ci char(10) primary key) collate ucs2_polish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_romanian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_romanian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_romanian_ci char(10) primary key) collate ucs2_romanian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_roman_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_roman_ci engine=ibmdb2i; -CREATE TABLE t1 (ucs2_slovak_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_slovak_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_slovak_ci char(10) primary key) collate ucs2_slovak_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_slovenian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_slovenian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_slovenian_ci char(10) primary key) collate ucs2_slovenian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_spanish2_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_spanish2_ci engine=ibmdb2i; -CREATE TABLE t1 (ucs2_spanish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_spanish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_spanish_ci char(10) primary key) collate ucs2_spanish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_swedish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_swedish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_swedish_ci char(10) primary key) collate ucs2_swedish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_turkish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_turkish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_turkish_ci char(10) primary key) collate ucs2_turkish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ucs2_unicode_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ucs2_unicode_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 21 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 43 NULL 6 Using where -drop table t1; -create table t1 (ucs2_unicode_ci char(10) primary key) collate ucs2_unicode_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ujis_bin integer, c char(10), v varchar(20), index(c), index(v)) collate ujis_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (ujis_bin char(10) primary key) collate ujis_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (ujis_japanese_ci integer, c char(10), v varchar(20), index(c), index(v)) collate ujis_japanese_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (ujis_japanese_ci char(10) primary key) collate ujis_japanese_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_bin integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_bin engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_bin char(10) primary key) collate utf8_bin engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_czech_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_czech_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_czech_ci char(10) primary key) collate utf8_czech_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_danish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_danish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_danish_ci char(10) primary key) collate utf8_danish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_esperanto_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_esperanto_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_esperanto_ci char(10) primary key) collate utf8_esperanto_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_estonian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_estonian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_estonian_ci char(10) primary key) collate utf8_estonian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_general_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_general_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_general_ci char(10) primary key) collate utf8_general_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_hungarian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_hungarian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_hungarian_ci char(10) primary key) collate utf8_hungarian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_icelandic_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_icelandic_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_icelandic_ci char(10) primary key) collate utf8_icelandic_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_latvian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_latvian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_latvian_ci char(10) primary key) collate utf8_latvian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_lithuanian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_lithuanian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_lithuanian_ci char(10) primary key) collate utf8_lithuanian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_persian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_persian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_persian_ci char(10) primary key) collate utf8_persian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_polish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_polish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_polish_ci char(10) primary key) collate utf8_polish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_romanian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_romanian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_romanian_ci char(10) primary key) collate utf8_romanian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_roman_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_roman_ci engine=ibmdb2i; -CREATE TABLE t1 (utf8_slovak_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_slovak_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_slovak_ci char(10) primary key) collate utf8_slovak_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_slovenian_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_slovenian_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_slovenian_ci char(10) primary key) collate utf8_slovenian_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_spanish2_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_spanish2_ci engine=ibmdb2i; -CREATE TABLE t1 (utf8_spanish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_spanish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_spanish_ci char(10) primary key) collate utf8_spanish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_swedish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_swedish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_swedish_ci char(10) primary key) collate utf8_swedish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_turkish_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_turkish_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_turkish_ci char(10) primary key) collate utf8_turkish_ci engine=ibmdb2i; -drop table t1; -CREATE TABLE t1 (utf8_unicode_ci integer, c char(10), v varchar(20), index(c), index(v)) collate utf8_unicode_ci engine=ibmdb2i; -insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); -insert into t1 select * from t1; -explain select c,v from t1 force index(c) where c like "ab%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range c c 31 NULL 6 Using where -explain select c,v from t1 force index(v) where v like "de%"; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range v v 63 NULL 6 Using where -drop table t1; -create table t1 (utf8_unicode_ci char(10) primary key) collate utf8_unicode_ci engine=ibmdb2i; -drop table t1; -create table ffd (WHCHD1 CHAR(20), WHCSID decimal(5,0)) engine=ibmdb2i; -create table fd (SQSSEQ CHAR(10)) engine=ibmdb2i; -create temporary table intermed (row integer key auto_increment, cs char(30), ccsid integer); -insert into intermed (cs, ccsid) select * from ffd; -create temporary table intermed2 (row integer key auto_increment, srtseq char(10)); -insert into intermed2 (srtseq) select * from fd; -select ccsid, cs, srtseq from intermed inner join intermed2 on intermed.row = intermed2.row; -ccsid cs srtseq -500 "ascii_bin" QBLA101F4U -500 "ascii_general_ci" QALA101F4S -1200 "big5_bin" QBCHT04B0U -1200 "big5_chinese_ci" QACHT04B0S -1153 "cp1250_bin" QELA20481U -1153 "cp1250_croatian_ci" QALA20481S -1153 "cp1250_general_ci" QCLA20481S -1153 "cp1250_polish_ci" QDLA20481S -1025 "cp1251_bin" QCCYR0401U -1025 "cp1251_bulgarian_ci QACYR0401S -1025 "cp1251_general_ci" QBCYR0401S -1025 "cp1251_general_cs" QBCYR0401U -420 "cp1256_bin" QBARA01A4U -420 "cp1256_general_ci" QAARA01A4S -500 "cp850_bin" QDLA101F4U -500 "cp850_general_ci" QCLA101F4S -870 "cp852_bin" QBLA20366U -870 "cp852_general_ci" QALA20366S -1200 "cp932_bin" QBJPN04B0U -1200 "cp932_japanese_ci" QAJPN04B0S -1200 "euckr_bin" QBKOR04B0U -1200 "euckr_korean_ci" QAKOR04B0S -1200 "gb2312_bin" QBCHS04B0U -1200 "gb2312_chinese_ci" QACHS04B0S -1200 "gbk_bin" QDCHS04B0U -1200 "gbk_chinese_ci" QCCHS04B0S -875 "greek_bin" QBELL036BU -875 "greek_general_ci" QAELL036BS -424 "hebrew_bin" QBHEB01A8U -424 "hebrew_general_ci" QAHEB01A8S -1148 "latin1_bin" QFLA1047CU -1148 "latin1_danish_ci" QALA1047CS -1148 "latin1_general_ci" QBLA1047CS -1148 "latin1_general_cs" QBLA1047CU -1148 "latin1_german1_ci" QCLA1047CS -1148 "latin1_spanish_ci" QDLA1047CS -1148 "latin1_swedish_ci" QELA1047CS -870 "latin2_bin" QGLA20366U -870 "latin2_croatian_ci" QCLA20366S -870 "latin2_general_ci" QELA20366S -870 "latin2_hungarian_ci QFLA20366S -1026 "latin5_bin" QBTRK0402U -1026 "latin5_turkish_ci" QATRK0402S -870 "macce_bin" QILA20366U -870 "macce_general_ci" QHLA20366S -1200 "sjis_bin" QDJPN04B0U -1200 "sjis_japanese_ci" QCJPN04B0S -838 "tis620_bin" QBTHA0346U -838 "tis620_thai_ci" QATHA0346S -13488 "ucs2_bin" *HEX -13488 "ucs2_czech_ci" I34ACS_CZ -13488 "ucs2_danish_ci" I34ADA_DK -13488 "ucs2_esperanto_ci" I34AEO -13488 "ucs2_estonian_ci" I34AET -13488 "ucs2_general_ci" QAUCS04B0S -13488 "ucs2_hungarian_ci" I34AHU -13488 "ucs2_icelandic_ci" I34AIS -13488 "ucs2_latvian_ci" I34ALV -13488 "ucs2_lithuanian_ci" I34ALT -13488 "ucs2_persian_ci" I34AFA -13488 "ucs2_polish_ci" I34APL -13488 "ucs2_romanian_ci" I34ARO -13488 "ucs2_slovak_ci" I34ASK -13488 "ucs2_slovenian_ci" I34ASL -13488 "ucs2_spanish_ci" I34AES -13488 "ucs2_swedish_ci" I34ASW -13488 "ucs2_turkish_ci" I34ATR -13488 "ucs2_unicode_ci" I34AEN -1200 "ujis_bin" QFJPN04B0U -1200 "ujis_japanese_ci" QEJPN04B0S -1208 "utf8_bin" *HEX -1208 "utf8_czech_ci" I34ACS_CZ -1208 "utf8_danish_ci" I34ADA_DK -1208 "utf8_esperanto_ci" I34AEO -1208 "utf8_estonian_ci" I34AET -1200 "utf8_general_ci" QAUCS04B0S -1208 "utf8_hungarian_ci" I34AHU -1208 "utf8_icelandic_ci" I34AIS -1208 "utf8_latvian_ci" I34ALV -1208 "utf8_lithuanian_ci" I34ALT -1208 "utf8_persian_ci" I34AFA -1208 "utf8_polish_ci" I34APL -1208 "utf8_romanian_ci" I34ARO -1208 "utf8_slovak_ci" I34ASK -1208 "utf8_slovenian_ci" I34ASL -1208 "utf8_spanish_ci" I34AES -1208 "utf8_swedish_ci" I34ASW -1208 "utf8_turkish_ci" I34ATR -1208 "utf8_unicode_ci" I34AEN -drop table ffd, fd; diff --git a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44020.test b/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44020.test deleted file mode 100644 index 09a7c75cfc0..00000000000 --- a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44020.test +++ /dev/null @@ -1,9 +0,0 @@ -source suite/ibmdb2i/include/have_ibmdb2i.inc; -source include/have_case_sensitive_file_system.inc; - -create schema `A12345_@$#`; -create table `A12345_@$#`.t1 (i int) engine=ibmdb2i; -show create table `A12345_@$#`.t1; -select * from `A12345_@$#`.t1; -drop table `A12345_@$#`.t1; -drop schema `A12345_@$#`; diff --git a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44025.test b/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44025.test deleted file mode 100644 index 9b033a2298f..00000000000 --- a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44025.test +++ /dev/null @@ -1,9 +0,0 @@ -source suite/ibmdb2i/include/have_ibmdb2i.inc; -source suite/ibmdb2i/include/have_i61.inc; - - -create table t1 (c char(10) collate utf8_swedish_ci, index(c)) engine=ibmdb2i; -drop table t1; - -create table t1 (c char(10) collate ucs2_swedish_ci, index(c)) engine=ibmdb2i; -drop table t1; diff --git a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44232.test b/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44232.test deleted file mode 100755 index ea29b5abcd4..00000000000 --- a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44232.test +++ /dev/null @@ -1,8 +0,0 @@ ---source suite/ibmdb2i/include/have_ibmdb2i.inc ---source suite/ibmdb2i/include/have_i54.inc - ---error 1005 -create table t1 (c char(1) character set armscii8) engine=ibmdb2i; - ---error 1005 -create table t1 (c char(1) character set eucjpms ) engine=ibmdb2i; diff --git a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44610.test b/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44610.test deleted file mode 100755 index da69b5d9148..00000000000 --- a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_44610.test +++ /dev/null @@ -1,28 +0,0 @@ -source suite/ibmdb2i/include/have_ibmdb2i.inc; - -# Test RCDFMT generation for a variety of kinds of table names -create table ABC (i int) engine=ibmdb2i; -drop table ABC; - -create table `1234567890ABC` (i int) engine=ibmdb2i; -drop table `1234567890ABC`; - -create table `!@#$%` (i int) engine=ibmdb2i; -drop table `!@#$%`; - -create table `ABCD#########` (i int) engine=ibmdb2i; -drop table `ABCD#########`; - -create table `_` (i int) engine=ibmdb2i; -drop table `_`; - -create table `abc##def` (i int) engine=ibmdb2i; -drop table `abc##def`; - -set names utf8; -create table Ä° (s1 int) engine=ibmdb2i; -drop table Ä°; - -create table Ä°Ä° (s1 int) engine=ibmdb2i; -drop table Ä°Ä°; -set names latin1; diff --git a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45196.test b/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45196.test deleted file mode 100644 index 17b1d658975..00000000000 --- a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45196.test +++ /dev/null @@ -1,26 +0,0 @@ -source suite/ibmdb2i/include/have_ibmdb2i.inc; -source suite/ibmdb2i/include/have_i61.inc; - ---disable_warnings -drop table if exists t1; ---enable_warnings - -create table t1 (c char(10), index(c)) collate ucs2_czech_ci engine=ibmdb2i; -insert into t1 values ("ch"),("h"),("i"); -select * from t1 order by c; -drop table t1; - -create table t1 (c char(10), index(c)) collate utf8_czech_ci engine=ibmdb2i; -insert into t1 values ("ch"),("h"),("i"); -select * from t1 order by c; -drop table t1; - -create table t1 (c char(10), index(c)) collate ucs2_danish_ci engine=ibmdb2i; -insert into t1 values("abc"),("abcd"),("aaaa"); -select c from t1 order by c; -drop table t1; - -create table t1 (c char(10), index(c)) collate utf8_danish_ci engine=ibmdb2i; -insert into t1 values("abc"),("abcd"),("aaaa"); -select c from t1 order by c; -drop table t1; diff --git a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45793.test b/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45793.test deleted file mode 100644 index 93fb78ff421..00000000000 --- a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45793.test +++ /dev/null @@ -1,11 +0,0 @@ -source suite/ibmdb2i/include/have_ibmdb2i.inc; -source suite/ibmdb2i/include/have_i61.inc; - ---disable_warnings -drop table if exists t1; ---enable_warnings - -create table t1 (c char(10), index(c)) charset macce engine=ibmdb2i; -insert into t1 values ("test"); -select * from t1 order by c; -drop table t1; diff --git a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45983.test b/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45983.test deleted file mode 100644 index 695d8e90ada..00000000000 --- a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_45983.test +++ /dev/null @@ -1,47 +0,0 @@ -source suite/ibmdb2i/include/have_ibmdb2i.inc; - -# Confirm that ibmdb2i_create_index_option causes additional *HEX sorted indexes to be created for all non-binary keys. - -set ibmdb2i_create_index_option=1; ---disable_warnings -drop schema if exists test1; -create schema test1; -use test1; ---enable_warnings - ---disable_abort_on_error ---error 0,255 -exec system "DLTF QGPL/FDOUT" > /dev/null; ---enable_abort_on_error - -#No additional index because no string fields in key -CREATE TABLE t1 (f int primary key, index(f)) engine=ibmdb2i; ---error 255 -exec system "DSPFD FILE(\"test1\"/PRIM0001) TYPE(*SEQ) OUTPUT(*OUTFILE) OUTFILE(QGPL/FDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; ---error 255 -exec system "DSPFD FILE(\"test1\"/\"f___H_t1\") TYPE(*SEQ) OUTPUT(*OUTFILE) OUTFILE(QGPL/FDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; -drop table t1; - -#No additional index because binary sorting -CREATE TABLE t1 (f char(10) collate utf8_bin primary key, index(f)) engine=ibmdb2i; ---error 255 -exec system "DSPFD FILE(\"test1\"/PRIM0001) TYPE(*SEQ) OUTPUT(*OUTFILE) OUTFILE(QGPL/FDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; ---error 255 -exec system "DSPFD FILE(\"test1\"/\"f___H_t1\") TYPE(*SEQ) OUTPUT(*OUTFILE) OUTFILE(QGPL/FDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; -drop table t1; - -CREATE TABLE t1 (f char(10) collate latin1_swedish_ci primary key, index(f)) engine=ibmdb2i; -exec system "DSPFD FILE(\"test1\"/PRIM0001) TYPE(*SEQ) OUTPUT(*OUTFILE) OUTFILE(QGPL/FDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; -exec system "DSPFD FILE(\"test1\"/\"f___H_t1\") TYPE(*SEQ) OUTPUT(*OUTFILE) OUTFILE(QGPL/FDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; -drop table t1; - -CREATE TABLE t1 (f char(10) collate latin1_swedish_ci primary key, i int, index i(i,f)) engine=ibmdb2i; -exec system "DSPFD FILE(\"test1\"/PRIM0001) TYPE(*SEQ) OUTPUT(*OUTFILE) OUTFILE(QGPL/FDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; -exec system "DSPFD FILE(\"test1\"/\"i___H_t1\") TYPE(*SEQ) OUTPUT(*OUTFILE) OUTFILE(QGPL/FDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; -drop table t1; - - -create table fd (SQSSEQ CHAR(10)) engine=ibmdb2i; -system system "CPYF FROMFILE(QGPL/FDOUT) TOFILE(\"test1\"/\"fd\") mbropt(*replace) fmtopt(*drop *map)" > /dev/null; -select * from fd; -drop table fd; diff --git a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_49329.test b/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_49329.test deleted file mode 100644 index 615df284d8f..00000000000 --- a/mysql-test/suite/ibmdb2i/t/ibmdb2i_bug_49329.test +++ /dev/null @@ -1,10 +0,0 @@ -source suite/ibmdb2i/include/have_ibmdb2i.inc; -source include/have_case_sensitive_file_system.inc; - -create table ABC (i int) engine=ibmdb2i; -insert into ABC values(1); -create table abc (i int) engine=ibmdb2i; -insert into abc values (2); -select * from ABC; -drop table ABC; -drop table abc; diff --git a/mysql-test/suite/ibmdb2i/t/ibmdb2i_collations.test b/mysql-test/suite/ibmdb2i/t/ibmdb2i_collations.test deleted file mode 100644 index 899f330d360..00000000000 --- a/mysql-test/suite/ibmdb2i/t/ibmdb2i_collations.test +++ /dev/null @@ -1,44 +0,0 @@ -source suite/ibmdb2i/include/have_ibmdb2i.inc; -source suite/ibmdb2i/include/have_i61.inc; ---disable_warnings -drop table if exists t1, ffd, fd; ---enable_warnings - ---disable_abort_on_error ---error 0,255 -exec system "DLTF QGPL/FFDOUT" > /dev/null; ---error 0,255 -exec system "DLTF QGPL/FDOUT" > /dev/null; ---enable_abort_on_error -let $count= query_get_value(select count(*) from information_schema.COLLATIONS where COLLATION_NAME <> "binary", count(*),1); - -while ($count) -{ - let $collation = query_get_value(select COLLATION_NAME from information_schema.COLLATIONS where COLLATION_NAME <> "binary" order by COLLATION_NAME desc, COLLATION_NAME, $count); - error 0,1005,2504,2028; - eval CREATE TABLE t1 ($collation integer, c char(10), v varchar(20), index(c), index(v)) collate $collation engine=ibmdb2i; - if (!$mysql_errno) - { - insert into t1 (c,v) values ("abc","def"),("abcd", "def"),("abcde","defg"),("aaaa","bbbb"); - insert into t1 select * from t1; - explain select c,v from t1 force index(c) where c like "ab%"; - explain select c,v from t1 force index(v) where v like "de%"; - drop table t1; - eval create table t1 ($collation char(10) primary key) collate $collation engine=ibmdb2i; - system system "DSPFFD FILE(\"test\"/\"t1\") OUTPUT(*OUTFILE) OUTFILE(QGPL/FFDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; - system system "DSPFD FILE(\"test\"/\"t1\") TYPE(*SEQ) OUTPUT(*OUTFILE) OUTFILE(QGPL/FDOUT) OUTMBR(*FIRST *ADD)" > /dev/null; - drop table t1; - } - dec $count; -} - -create table ffd (WHCHD1 CHAR(20), WHCSID decimal(5,0)) engine=ibmdb2i; -system system "CPYF FROMFILE(QGPL/FFDOUT) TOFILE(\"test\"/\"ffd\") mbropt(*replace) fmtopt(*drop *map)" > /dev/null; -create table fd (SQSSEQ CHAR(10)) engine=ibmdb2i; -system system "CPYF FROMFILE(QGPL/FDOUT) TOFILE(\"test\"/\"fd\") mbropt(*replace) fmtopt(*drop *map)" > /dev/null; -create temporary table intermed (row integer key auto_increment, cs char(30), ccsid integer); -insert into intermed (cs, ccsid) select * from ffd; -create temporary table intermed2 (row integer key auto_increment, srtseq char(10)); -insert into intermed2 (srtseq) select * from fd; -select ccsid, cs, srtseq from intermed inner join intermed2 on intermed.row = intermed2.row; -drop table ffd, fd; diff --git a/storage/ibmdb2i/CMakeLists.txt b/storage/ibmdb2i/CMakeLists.txt deleted file mode 100644 index 11cc4300569..00000000000 --- a/storage/ibmdb2i/CMakeLists.txt +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2006 MySQL AB -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") - -INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql - ${CMAKE_SOURCE_DIR}/regex - ${CMAKE_SOURCE_DIR}/extra/yassl/include) -ADD_LIBRARY(ibmdb2i ha_ibmdb2i.cc db2i_ileBridge.cc db2i_conversion.cc - db2i_blobCollection.cc db2i_file.cc db2i_charsetSupport.cc - db2i_collationSupport.cc db2i_errors.cc db2i_constraints.cc - db2i_rir.cc db2i_sqlStatementStream.cc db2i_ioBuffers.cc db2i_myconv.cc) diff --git a/storage/ibmdb2i/Makefile.am b/storage/ibmdb2i/Makefile.am deleted file mode 100644 index b9602e392e0..00000000000 --- a/storage/ibmdb2i/Makefile.am +++ /dev/null @@ -1,54 +0,0 @@ -# -# Copyright (c) 2007, 2008, IBM Corporation. -# All rights reserved. -# -# - -#called from the top level Makefile - -MYSQLDATAdir = $(localstatedir) -MYSQLSHAREdir = $(pkgdatadir) -MYSQLBASEdir= $(prefix) -MYSQLLIBdir= $(pkglibdir) -pkgplugindir = $(pkglibdir)/plugin -INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/include \ - -I$(top_srcdir)/regex \ - -I$(top_srcdir)/sql \ - -I$(srcdir) \ - -I$ /afs/rchland.ibm.com/lande/shadow/dev2000/osxpf/v5r4m0f.xpf/cur/cmvc/base.pgm/my.xpf/apis \ - -I$ /afs/rchland.ibm.com/lande/shadow/dev2000/osxpf/v5r4m0.xpf/bld/cmvc/base.pgm/lg.xpf \ - -I$ /afs/rchland.ibm.com/lande/shadow/dev2000/osxpf/v5r4m0.xpf/bld/cmvc/base.pgm/tq.xpf -WRAPLIBS= - -LDADD = - -DEFS = @DEFS@ - -noinst_HEADERS = ha_ibmdb2i.h db2i_collationSupport.h db2i_file.h \ - db2i_ioBuffers.h db2i_blobCollection.h \ - db2i_global.h db2i_misc.h db2i_charsetSupport.h db2i_errors.h \ - db2i_iconv.h db2i_myconv.h db2i_safeString.h db2i_sqlStatementStream.h \ - db2i_ileBridge.h db2i_validatedPointer.h - -EXTRA_LTLIBRARIES = ha_ibmdb2i.la -pkgplugin_LTLIBRARIES = @plugin_ibmdb2i_shared_target@ -ha_ibmdb2i_la_LIBADD = -liconv -ha_ibmdb2i_la_LDFLAGS = -module -rpath $(MYSQLLIBdir) -ha_ibmdb2i_la_CXXFLAGS= $(AM_CXXFLAGS) -DMYSQL_DYNAMIC_PLUGIN -ha_ibmdb2i_la_CFLAGS = $(AM_CFLAGS) -DMYSQL_DYNAMIC_PLUGIN -ha_ibmdb2i_la_SOURCES = ha_ibmdb2i.cc db2i_ileBridge.cc db2i_conversion.cc \ - db2i_blobCollection.cc db2i_file.cc db2i_charsetSupport.cc \ - db2i_collationSupport.cc db2i_errors.cc db2i_constraints.cc \ - db2i_rir.cc db2i_sqlStatementStream.cc db2i_ioBuffers.cc \ - db2i_myconv.cc - -EXTRA_LIBRARIES = libibmdb2i.a -noinst_LIBRARIES = @plugin_ibmdb2i_static_target@ -libibmdb2i_a_CXXFLAGS = $(AM_CXXFLAGS) -libibmdb2i_a_CFLAGS = $(AM_CFLAGS) -libibmdb2i_a_SOURCES= $(ha_ibmdb2i_la_SOURCES) - - -EXTRA_DIST = CMakeLists.txt plug.in -# Don't update the files from bitkeeper -%::SCCS/s.% diff --git a/storage/ibmdb2i/db2i_blobCollection.cc b/storage/ibmdb2i/db2i_blobCollection.cc deleted file mode 100644 index 17101c9c0a4..00000000000 --- a/storage/ibmdb2i/db2i_blobCollection.cc +++ /dev/null @@ -1,107 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#include "db2i_blobCollection.h" - -/** - Return the size to use when allocating space for blob reads. - - @param fieldIndex The field to allocate for - @param[out] shouldProtect Indicates whether storage protection should be - applied to the space, because the size returned is - smaller than the maximum possible size. -*/ - -uint32 -BlobCollection::getSizeToAllocate(int fieldIndex, bool& shouldProtect) -{ - Field* field = table->getMySQLTable()->field[fieldIndex]; - uint fieldLength = field->max_display_length(); - - if (fieldLength <= MAX_FULL_ALLOCATE_BLOB_LENGTH) - { - shouldProtect = false; - return fieldLength; - } - - shouldProtect = true; - - uint curMaxSize = table->getBlobFieldActualSize(fieldIndex); - - uint defaultAllocSize = min(defaultAllocation, fieldLength); - - return max(defaultAllocSize, curMaxSize); - -} - -void -BlobCollection::generateBuffer(int fieldIndex) -{ - DBUG_ASSERT(table->db2Field(fieldIndex).isBlob()); - - bool protect; - buffers[table->getBlobIdFromField(fieldIndex)].Malloc(getSizeToAllocate(fieldIndex, protect), protect); - - return; -} - -/** - Realloc the read buffer associated with a blob field. - - This is used when the previous allocation for a blob field is found to be - too small (this is discovered when QMY_READ trips over the protected boundary - page). - - @param fieldIndex The field to be reallocated - @param size The size of buffer to allocate for this field. -*/ - -ValidatedPointer& -BlobCollection::reallocBuffer(int fieldIndex, size_t size) -{ - ProtectedBuffer& buf = buffers[table->getBlobIdFromField(fieldIndex)]; - if (size <= buf.allocLen()) - return buf.ptr(); - - table->updateBlobFieldActualSize(fieldIndex, size); - - DBUG_PRINT("BlobCollection::reallocBuffer",("PERF: reallocing %d to %d: ", fieldIndex, size)); - - bool protect; - buf.Free(); - buf.Malloc(getSizeToAllocate(fieldIndex, protect), protect); - return buf.ptr(); -} diff --git a/storage/ibmdb2i/db2i_blobCollection.h b/storage/ibmdb2i/db2i_blobCollection.h deleted file mode 100644 index 6a60394555f..00000000000 --- a/storage/ibmdb2i/db2i_blobCollection.h +++ /dev/null @@ -1,151 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#ifndef DB2I_BLOBCOLLECTION_H -#define DB2I_BLOBCOLLECTION_H - -#include "db2i_global.h" -#include "db2i_file.h" - -/** - @class ProtectedBuffer - @brief Implements memory management for (optionally) protected buffers. - - Buffers created with the protection option will have a guard page set on the - page following requested allocation size. The side effect is that the actual - allocation is up to 2*4096-1 bytes larger than the size requested by the - using code. -*/ - -class ProtectedBuffer -{ -public: - ProtectedBuffer() : protectBuf(false) - {;} - - void Malloc(size_t size, bool protect = false) - { - protectBuf = protect; - bufptr.alloc(size + (protectBuf ? 0x1fff : 0x0)); - if ((void*)bufptr != NULL) - { - len = size; - if (protectBuf) - mprotect(protectedPage(), 0x1000, PROT_NONE); -#ifndef DBUG_OFF - // Prevents a problem with DBUG_PRINT over-reading in recent versions of - // MySQL - *((char*)protectedPage()-1) = 0; -#endif - } - } - - void Free() - { - if ((void*)bufptr != NULL) - { - if (protectBuf) - mprotect(protectedPage(), 0x1000, PROT_READ | PROT_WRITE); - bufptr.dealloc(); - } - } - - ~ProtectedBuffer() - { - Free(); - } - - ValidatedPointer& ptr() {return bufptr;} - bool isProtected() const {return protectBuf;} - size_t allocLen() const {return len;} -private: - void* protectedPage() - { - return (void*)(((address64_t)(void*)bufptr + len + 0x1000) & ~0xfff); - } - - ValidatedPointer bufptr; - size_t len; - bool protectBuf; - -}; - - -/** - @class BlobCollection - @brief Manages memory allocation for reading blobs associated with a table. - - Allocations are done on-demand and are protected with a guard page if less - than the max possible size is allocated. -*/ -class BlobCollection -{ - public: - BlobCollection(db2i_table* db2Table, uint32 defaultAllocSize) : - defaultAllocation(defaultAllocSize), table(db2Table) - { - buffers = new ProtectedBuffer[table->getBlobCount()]; - } - - ~BlobCollection() - { - delete[] buffers; - } - - ValidatedPointer& getBufferPtr(int fieldIndex) - { - int blobIndex = table->getBlobIdFromField(fieldIndex); - if ((char*)buffers[blobIndex].ptr() == NULL) - generateBuffer(fieldIndex); - - return buffers[blobIndex].ptr(); - } - - ValidatedPointer& reallocBuffer(int fieldIndex, size_t size); - - - private: - - uint32 getSizeToAllocate(int fieldIndex, bool& shouldProtect); - void generateBuffer(int fieldIndex); - - db2i_table* table; // The table being read - ProtectedBuffer* buffers; // The buffers - uint32 defaultAllocation; - /* The default size to use when first allocating a buffer */ -}; - -#endif diff --git a/storage/ibmdb2i/db2i_charsetSupport.cc b/storage/ibmdb2i/db2i_charsetSupport.cc deleted file mode 100644 index 83bf1b9448b..00000000000 --- a/storage/ibmdb2i/db2i_charsetSupport.cc +++ /dev/null @@ -1,826 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - - -#include "db2i_charsetSupport.h" -#include "as400_types.h" -#include "as400_protos.h" -#include "db2i_ileBridge.h" -#include "qlgusr.h" -#include "db2i_errors.h" - - -/* - The following arrays define a mapping between IANA-style text descriptors and - IBM i CCSID text descriptors. The mapping is a 1-to-1 correlation between - corresponding array slots. -*/ -#define MAX_IANASTRING 23 -static const char ianaStringType[MAX_IANASTRING][10] = -{ - {"ascii"}, - {"Big5"}, //big5 - {"cp1250"}, - {"cp1251"}, - {"cp1256"}, - {"cp850"}, - {"cp852"}, - {"cp866"}, - {"IBM943"}, //cp932 - {"EUC-KR"}, //euckr - {"IBM1381"}, //gb2312 - {"IBM1386"}, //gbk - {"greek"}, - {"hebrew"}, - {"latin1"}, - {"latin2"}, - {"latin5"}, - {"macce"}, - {"tis620"}, - {"Shift_JIS"}, //sjis - {"ucs2"}, - {"EUC-JP"}, //ujis - {"utf8"} -}; -static const char ccsidType[MAX_IANASTRING][6] = -{ - {"367"}, //ascii - {"950"}, //big5 - {"1250"}, //cp1250 - {"1251"}, //cp1251 - {"1256"}, //cp1256 - {"850"}, //cp850 - {"852"}, //cp852 - {"866"}, //cp866 - {"943"}, //cp932 - {"970"}, //euckr - {"1381"}, //gb2312 - {"1386"}, //gbk - {"813"}, //greek - {"916"}, //hebrew - {"923"}, //latin1 - {"912"}, //latin2 - {"920"}, //latin5 - {"1282"}, //macce - {"874"}, //tis620 - {"943"}, //sjis - {"13488"},//ucs2 - {"5050"}, //ujis - {"1208"} //utf8 -}; - -static _ILEpointer *QlgCvtTextDescToDesc_sym; - -/* We keep a cache of the mapping for text descriptions obtained via - QlgTextDescToDesc. The following structures implement this cache. */ -static HASH textDescMapHash; -static MEM_ROOT textDescMapMemroot; -static pthread_mutex_t textDescMapHashMutex; -struct TextDescMap -{ - struct HashKey - { - int32 inType; - int32 outType; - char inDesc[Qlg_MaxDescSize]; - } hashKey; - char outDesc[Qlg_MaxDescSize]; -}; - -/* We keep a cache of the mapping for open iconv descriptors. The following - structures implement this cache. */ -static HASH iconvMapHash; -static MEM_ROOT iconvMapMemroot; -static pthread_mutex_t iconvMapHashMutex; -struct IconvMap -{ - struct HashKey - { - uint32 direction; // These are uint32s to avoid garbage data in the key from compiler padding - uint32 db2CCSID; - const CHARSET_INFO* myCharset; - } hashKey; - iconv_t iconvDesc; -}; - - -/** - Initialize the static structures used by this module. - - This must only be called once per plugin instantiation. - - @return 0 if successful. Failure otherwise -*/ -int32 initCharsetSupport() -{ - DBUG_ENTER("initCharsetSupport"); - - int actmark = _ILELOAD("QSYS/QLGUSR", ILELOAD_LIBOBJ); - if ( actmark == -1 ) - { - DBUG_PRINT("initCharsetSupport", ("conversion srvpgm activation failed")); - DBUG_RETURN(1); - } - - QlgCvtTextDescToDesc_sym = (ILEpointer*)malloc_aligned(sizeof(ILEpointer)); - if (_ILESYM(QlgCvtTextDescToDesc_sym, actmark, "QlgCvtTextDescToDesc") == -1) - { - DBUG_PRINT("initCharsetSupport", - ("resolve of QlgCvtTextDescToDesc failed")); - DBUG_RETURN(errno); - } - - VOID(pthread_mutex_init(&textDescMapHashMutex,MY_MUTEX_INIT_FAST)); - hash_init(&textDescMapHash, &my_charset_bin, 10, offsetof(TextDescMap, hashKey), sizeof(TextDescMap::hashKey), 0, 0, HASH_UNIQUE); - - VOID(pthread_mutex_init(&iconvMapHashMutex,MY_MUTEX_INIT_FAST)); - hash_init(&iconvMapHash, &my_charset_bin, 10, offsetof(IconvMap, hashKey), sizeof(IconvMap::hashKey), 0, 0, HASH_UNIQUE); - - init_alloc_root(&textDescMapMemroot, 2048, 0); - init_alloc_root(&iconvMapMemroot, 256, 0); - - initMyconv(); - - DBUG_RETURN(0); -} - -/** - Cleanup the static structures used by this module. - - This must only be called once per plugin instantiation and only if - initCharsetSupport() was successful. -*/ -void doneCharsetSupport() -{ - cleanupMyconv(); - - free_root(&textDescMapMemroot, 0); - free_root(&iconvMapMemroot, 0); - - pthread_mutex_destroy(&textDescMapHashMutex); - hash_free(&textDescMapHash); - pthread_mutex_destroy(&iconvMapHashMutex); - hash_free(&iconvMapHash); - free_aligned(QlgCvtTextDescToDesc_sym); -} - - -/** - Convert a text description from one type to another. - - This function is just a wrapper for the IBM i QlgTextDescToDesc function plus - some overrides for conversions that the API does not handle correctly and - support for caching the computed conversion. - - @param inType The type of descriptor pointed to by "in". - @param outType The type of descriptor requested for "out". - @param in The descriptor to be convereted. - @param[out] out The equivalent descriptor - @param hashKey The hash key to be used for caching the conversion result. - - @return 0 if successful. Failure otherwise -*/ -static int32 getNewTextDesc(const int32 inType, - const int32 outType, - const char* in, - char* out, - const TextDescMap::HashKey* hashKey) -{ - DBUG_ENTER("db2i_charsetSupport::getNewTextDesc"); - const arg_type_t signature[] = { ARG_INT32, ARG_INT32, ARG_MEMPTR, ARG_INT32, ARG_MEMPTR, ARG_INT32, ARG_INT32, ARG_END }; - struct ArgList - { - ILEarglist_base base; - int32 CRDIInType; - int32 CRDIOutType; - ILEpointer CRDIDesc; - int32 CRDIDescSize; - ILEpointer CRDODesc; - int32 CRDODescSize; - int32 CTDCCSID; - } *arguments; - - if ((inType == Qlg_TypeIANA) && (outType == Qlg_TypeAix41)) - { - // Override non-standard charsets - if (unlikely(strcmp("IBM1381", in) == 0)) - { - strcpy(out, "IBM-1381"); - DBUG_RETURN(0); - } - } - else if ((inType == Qlg_TypeAS400CCSID) && (outType == Qlg_TypeAix41)) - { - // Override non-standard charsets - if (strcmp("1148", in) == 0) - { - strcpy(out, "IBM-1148"); - DBUG_RETURN(0); - } - else if (unlikely(strcmp("1153", in) == 0)) - { - strcpy(out, "IBM-1153"); - DBUG_RETURN(0); - } - } - - char argBuf[sizeof(ArgList)+15]; - arguments = (ArgList*)roundToQuadWordBdy(argBuf); - - arguments->CRDIInType = inType; - arguments->CRDIOutType = outType; - arguments->CRDIDesc.s.addr = (address64_t) in; - arguments->CRDIDescSize = Qlg_MaxDescSize; - arguments->CRDODesc.s.addr = (address64_t) out; - arguments->CRDODescSize = Qlg_MaxDescSize; - arguments->CTDCCSID = 819; - _ILECALL(QlgCvtTextDescToDesc_sym, - &arguments->base, - signature, - RESULT_INT32); - if (unlikely(arguments->base.result.s_int32.r_int32 < 0)) - { - if (arguments->base.result.s_int32.r_int32 == Qlg_InDescriptorNotFound) - { - DBUG_RETURN(DB2I_ERR_UNSUPP_CHARSET); - } - else - { - getErrTxt(DB2I_ERR_ILECALL,"QlgCvtTextDescToDesc",arguments->base.result.s_int32.r_int32); - DBUG_RETURN(DB2I_ERR_ILECALL); - } - } - - // Store the conversion information into a cache entry - TextDescMap* mapping = (TextDescMap*)alloc_root(&textDescMapMemroot, sizeof(TextDescMap)); - if (unlikely(!mapping)) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - memcpy(&(mapping->hashKey), hashKey, sizeof(hashKey)); - strcpy(mapping->outDesc, out); - pthread_mutex_lock(&textDescMapHashMutex); - my_hash_insert(&textDescMapHash, (const uchar*)mapping); - pthread_mutex_unlock(&textDescMapHashMutex); - - DBUG_RETURN(0); -} - - -/** - Convert a text description from one type to another. - - This function takes a text description in one representation and converts - it into another representation. Although the OS provides some facilities for - doing this, the support is not complete, nor does MySQL always use standard - identifiers. Therefore, there are a lot of hardcoded overrides required. - There is probably some room for optimization here, but this should not be - called frequently under most circumstances. - - @param inType The type of descriptor pointed to by "in". - @param outType The type of descriptor requested for "out". - @param in The descriptor to be convereted. - @param[out] out The equivalent descriptor - - @return 0 if successful. Failure otherwise -*/ -static int32 convertTextDesc(const int32 inType, const int32 outType, const char* inDesc, char* outDesc) -{ - DBUG_ENTER("db2i_charsetSupport::convertTextDesc"); - const char* inDescOverride; - - if (inType == Qlg_TypeIANA) - { - // Override non-standard charsets - if (strcmp("big5", inDesc) == 0) - inDescOverride = "Big5"; - else if (strcmp("cp932", inDesc) == 0) - inDescOverride = "IBM943"; - else if (strcmp("euckr", inDesc) == 0) - inDescOverride = "EUC-KR"; - else if (strcmp("gb2312", inDesc) == 0) - inDescOverride = "IBM1381"; - else if (strcmp("gbk", inDesc) == 0) - inDescOverride = "IBM1386"; - else if (strcmp("sjis", inDesc) == 0) - inDescOverride = "Shift_JIS"; - else if (strcmp("ujis", inDesc) == 0) - inDescOverride = "EUC-JP"; - else - inDescOverride = inDesc; - - // Hardcode non-standard charsets - if (outType == Qlg_TypeAix41) - { - if (strcmp("Big5", inDescOverride) == 0) - { - strcpy(outDesc,"big5"); - DBUG_RETURN(0); - } - else if (strcmp("IBM1386", inDescOverride) == 0) - { - strcpy(outDesc,"GBK"); - DBUG_RETURN(0); - } - else if (strcmp("Shift_JIS", inDescOverride) == 0 || - strcmp("IBM943", inDescOverride) == 0) - { - strcpy(outDesc,"IBM-943"); - DBUG_RETURN(0); - } - else if (strcmp("tis620", inDescOverride) == 0) - { - strcpy(outDesc,"TIS-620"); - DBUG_RETURN(0); - } - else if (strcmp("ucs2", inDescOverride) == 0) - { - strcpy(outDesc,"UCS-2"); - DBUG_RETURN(0); - } - else if (strcmp("cp1250", inDescOverride) == 0) - { - strcpy(outDesc,"IBM-1250"); - DBUG_RETURN(0); - } - else if (strcmp("cp1251", inDescOverride) == 0) - { - strcpy(outDesc,"IBM-1251"); - DBUG_RETURN(0); - } - else if (strcmp("cp1256", inDescOverride) == 0) - { - strcpy(outDesc,"IBM-1256"); - DBUG_RETURN(0); - } - else if (strcmp("macce", inDescOverride) == 0) - { - strcpy(outDesc,"IBM-1282"); - DBUG_RETURN(0); - } - } - else if (outType == Qlg_TypeAS400CCSID) - { - // See if we can fast path the convert - for (int loopCnt = 0; loopCnt < MAX_IANASTRING; ++loopCnt) - { - if (strcmp((char*)ianaStringType[loopCnt],inDescOverride) == 0) - { - strcpy(outDesc,ccsidType[loopCnt]); - DBUG_RETURN(0); - } - } - } - } - else - inDescOverride = inDesc; - - // We call getNewTextDesc for all other conversions and cache the result. - TextDescMap *mapping; - TextDescMap::HashKey hashKey; - hashKey.inType= inType; - hashKey.outType= outType; - uint32 len = strlen(inDescOverride); - memcpy(hashKey.inDesc, inDescOverride, len); - memset(hashKey.inDesc+len, 0, sizeof(hashKey.inDesc) - len); - - if (!(mapping=(TextDescMap *) hash_search(&textDescMapHash, - (const uchar*)&hashKey, - sizeof(hashKey)))) - { - DBUG_RETURN(getNewTextDesc(inType, outType, inDescOverride, outDesc, &hashKey)); - } - else - { - strcpy(outDesc, mapping->outDesc); - } - DBUG_RETURN(0); -} - - -/** - Convert an IANA character set name into a DB2 for i CCSID value. - - @param parmIANADesc An IANA character set name - @param[out] db2Ccsid The equivalent CCSID value - - @return 0 if successful. Failure otherwise -*/ -int32 convertIANAToDb2Ccsid(const char* parmIANADesc, uint16* db2Ccsid) -{ - int32 rc; - uint16 aixCcsid; - char aixCcsidString[Qlg_MaxDescSize]; - int aixEncodingScheme; - int db2EncodingScheme; - rc = convertTextDesc(Qlg_TypeIANA, Qlg_TypeAS400CCSID, parmIANADesc, aixCcsidString); - if (unlikely(rc)) - { - if (rc == DB2I_ERR_UNSUPP_CHARSET) - getErrTxt(DB2I_ERR_UNSUPP_CHARSET, parmIANADesc); - - return rc; - } - aixCcsid = atoi(aixCcsidString); - rc = getEncodingScheme(aixCcsid, aixEncodingScheme); - if (rc != 0) - return rc; - switch(aixEncodingScheme) { // Select on encoding scheme - case 0x1100: // EDCDIC SBCS - case 0x2100: // ASCII SBCS - case 0x4100: // AIX SBCS - case 0x4105: // MS Windows - case 0x5100: // ISO 7 bit ASCII - db2EncodingScheme = 0x1100; - break; - case 0x1200: // EDCDIC DBCS - case 0x2200: // ASCII DBCS - db2EncodingScheme = 0x1200; - break; - case 0x1301: // EDCDIC Mixed - case 0x2300: // ASCII Mixed - case 0x4403: // EUC (ISO 2022) - db2EncodingScheme = 0x1301; - break; - case 0x7200: // UCS2 - db2EncodingScheme = 0x7200; - break; - case 0x7807: // UTF-8 - db2EncodingScheme = 0x7807; - break; - case 0x7500: // UTF-32 - db2EncodingScheme = 0x7500; - break; - default: // Unknown - { - getErrTxt(DB2I_ERR_UNKNOWN_ENCODING,aixEncodingScheme); - return DB2I_ERR_UNKNOWN_ENCODING; - } - break; - } - if (aixEncodingScheme == db2EncodingScheme) - { - *db2Ccsid = aixCcsid; - } - else - { - rc = getAssociatedCCSID(aixCcsid, db2EncodingScheme, db2Ccsid); // EDCDIC SBCS - if (rc != 0) - return rc; - } - - return 0; -} - - -/** - Obtain the encoding scheme of a CCSID. - - @param inCcsid An IBM i CCSID - @param[out] outEncodingScheme The associated encoding scheme - - @return 0 if successful. Failure otherwise -*/ -int32 getEncodingScheme(const uint16 inCcsid, int32& outEncodingScheme) -{ - DBUG_ENTER("db2i_charsetSupport::getEncodingScheme"); - - static bool ptrInited = FALSE; - static char ptrSpace[sizeof(ILEpointer) + 15]; - static ILEpointer* ptrToPtr = (ILEpointer*)roundToQuadWordBdy(ptrSpace); - int rc; - - if (!ptrInited) - { - rc = _RSLOBJ2(ptrToPtr, RSLOBJ_TS_PGM, "QTQGESP", "QSYS"); - - if (rc) - { - getErrTxt(DB2I_ERR_RESOLVE_OBJ,"QTQGESP","QSYS","*PGM",errno); - DBUG_RETURN(DB2I_ERR_RESOLVE_OBJ); - } - ptrInited = TRUE; - } - - DBUG_ASSERT(inCcsid != 0); - - int GESPCCSID = inCcsid; - int GESPLen = 32; - int GESPNbrVal = 0; - int32 GESPES; - int GESPCSCPL[32]; - int GESPFB[3]; - void* ILEArgv[7]; - ILEArgv[0] = &GESPCCSID; - ILEArgv[1] = &GESPLen; - ILEArgv[2] = &GESPNbrVal; - ILEArgv[3] = &GESPES; - ILEArgv[4] = &GESPCSCPL; - ILEArgv[5] = &GESPFB; - ILEArgv[6] = NULL; - - rc = _PGMCALL(ptrToPtr, (void**)&ILEArgv, 0); - - if (rc) - { - getErrTxt(DB2I_ERR_PGMCALL,"QTQGESP","QSYS",rc); - DBUG_RETURN(DB2I_ERR_PGMCALL); - } - if (GESPFB[0] != 0 || - GESPFB[1] != 0 || - GESPFB[2] != 0) - { - getErrTxt(DB2I_ERR_QTQGESP,GESPFB[0],GESPFB[1],GESPFB[2]); - DBUG_RETURN(DB2I_ERR_QTQGESP); - } - outEncodingScheme = GESPES; - - DBUG_RETURN(0); -} - - -/** - Get the best fit equivalent CCSID. (Wrapper for QTQGRDC API) - - @param inCcsid An IBM i CCSID - @param inEncodingScheme The encoding scheme - @param[out] outCcsid The equivalent CCSID - - @return 0 if successful. Failure otherwise -*/ -int32 getAssociatedCCSID(const uint16 inCcsid, const int inEncodingScheme, uint16* outCcsid) -{ - DBUG_ENTER("db2i_charsetSupport::getAssociatedCCSID"); - static bool ptrInited = FALSE; - static char ptrSpace[sizeof(ILEpointer) + 15]; - static ILEpointer* ptrToPtr = (ILEpointer*)roundToQuadWordBdy(ptrSpace); - int rc; - - // Override non-standard charsets - if ((inCcsid == 923) && (inEncodingScheme == 0x1100)) - { - *outCcsid = 1148; - DBUG_RETURN(0); - } - else if ((inCcsid == 1250) && (inEncodingScheme == 0x1100)) - { - *outCcsid = 1153; - DBUG_RETURN(0); - } - - if (!ptrInited) - { - rc = _RSLOBJ2(ptrToPtr, RSLOBJ_TS_PGM, "QTQGRDC", "QSYS"); - - if (rc) - { - getErrTxt(DB2I_ERR_RESOLVE_OBJ,"QTQGRDC","QSYS","*PGM",errno); - DBUG_RETURN(DB2I_ERR_RESOLVE_OBJ); - } - ptrInited = TRUE; - } - - int GRDCCCSID = inCcsid; - int GRDCES = inEncodingScheme; - int GRDCSel = 0; - int GRDCAssCCSID; - int GRDCFB[3]; - void* ILEArgv[7]; - ILEArgv[0] = &GRDCCCSID; - ILEArgv[1] = &GRDCES; - ILEArgv[2] = &GRDCSel; - ILEArgv[3] = &GRDCAssCCSID; - ILEArgv[4] = &GRDCFB; - ILEArgv[5] = NULL; - - rc = _PGMCALL(ptrToPtr, (void**)&ILEArgv, 0); - - if (rc) - { - getErrTxt(DB2I_ERR_PGMCALL,"QTQGRDC","QSYS",rc); - DBUG_RETURN(DB2I_ERR_PGMCALL); - } - if (GRDCFB[0] != 0 || - GRDCFB[1] != 0 || - GRDCFB[2] != 0) - { - getErrTxt(DB2I_ERR_QTQGRDC,GRDCFB[0],GRDCFB[1],GRDCFB[2]); - DBUG_RETURN(DB2I_ERR_QTQGRDC); - } - - *outCcsid = GRDCAssCCSID; - - DBUG_RETURN(0); -} - -/** - Open an iconv conversion between a MySQL charset and the respective IBM i CCSID - - @param direction The direction of the conversion - @param mysqlCSName Name of the MySQL character set - @param db2CCSID The IBM i CCSID - @param hashKey The key to use for inserting the opened conversion into the cache - @param[out] newConversion The iconv descriptor - - @return 0 if successful. Failure otherwise -*/ -static int32 openNewConversion(enum_conversionDirection direction, - const char* mysqlCSName, - uint16 db2CCSID, - IconvMap::HashKey* hashKey, - iconv_t& newConversion) -{ - DBUG_ENTER("db2i_charsetSupport::openNewConversion"); - - char mysqlAix41Desc[Qlg_MaxDescSize]; - char db2Aix41Desc[Qlg_MaxDescSize]; - char db2CcsidString[6] = ""; - int32 rc; - - /* - First we have to convert the MySQL IANA-like name and the DB2 CCSID into - there equivalent iconv descriptions. - */ - rc = convertTextDesc(Qlg_TypeIANA, Qlg_TypeAix41, mysqlCSName, mysqlAix41Desc); - if (unlikely(rc)) - { - if (rc == DB2I_ERR_UNSUPP_CHARSET) - getErrTxt(DB2I_ERR_UNSUPP_CHARSET, mysqlCSName); - - DBUG_RETURN(rc); - } - CHARSET_INFO *cs= &my_charset_bin; - (uint)(cs->cset->long10_to_str)(cs,db2CcsidString,sizeof(db2CcsidString), 10, db2CCSID); - rc = convertTextDesc(Qlg_TypeAS400CCSID, Qlg_TypeAix41, db2CcsidString, db2Aix41Desc); - if (unlikely(rc)) - { - if (rc == DB2I_ERR_UNSUPP_CHARSET) - getErrTxt(DB2I_ERR_UNSUPP_CHARSET, mysqlCSName); - - DBUG_RETURN(rc); - } - - /* Call iconv to open the conversion. */ - if (direction == toDB2) - { - newConversion = iconv_open(db2Aix41Desc, mysqlAix41Desc); - } - else - { - newConversion = iconv_open(mysqlAix41Desc, db2Aix41Desc); - } - - if (unlikely(newConversion == (iconv_t) -1)) - { - getErrTxt(DB2I_ERR_UNSUPP_CHARSET, mysqlCSName); - DBUG_RETURN(DB2I_ERR_UNSUPP_CHARSET); - } - - /* Insert the new conversion into the cache. */ - IconvMap* mapping = (IconvMap*)alloc_root(&iconvMapMemroot, sizeof(IconvMap)); - if (!mapping) - { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(IconvMap)); - DBUG_RETURN( HA_ERR_OUT_OF_MEM); - } - memcpy(&(mapping->hashKey), hashKey, sizeof(mapping->hashKey)); - mapping->iconvDesc = newConversion; - pthread_mutex_lock(&iconvMapHashMutex); - my_hash_insert(&iconvMapHash, (const uchar*)mapping); - pthread_mutex_unlock(&iconvMapHashMutex); - - DBUG_RETURN(0); -} - - -/** - Open an iconv conversion between a MySQL charset and the respective IBM i CCSID - - @param direction The direction of the conversion - @param cs The MySQL character set - @param db2CCSID The IBM i CCSID - @param[out] newConversion The iconv descriptor - - @return 0 if successful. Failure otherwise -*/ -int32 getConversion(enum_conversionDirection direction, const CHARSET_INFO* cs, uint16 db2CCSID, iconv_t& conversion) -{ - DBUG_ENTER("db2i_charsetSupport::getConversion"); - - int32 rc; - - /* Build the hash key */ - IconvMap::HashKey hashKey; - hashKey.direction= direction; - hashKey.myCharset= cs; - hashKey.db2CCSID= db2CCSID; - - /* Look for the conversion in the cache and add it if it is not there. */ - IconvMap *mapping; - if (!(mapping= (IconvMap *) hash_search(&iconvMapHash, - (const uchar*)&hashKey, - sizeof(hashKey)))) - { - DBUG_PRINT("getConversion", ("Hash miss for direction=%d, cs=%s, ccsid=%d", direction, cs->name, db2CCSID)); - rc= openNewConversion(direction, cs->csname, db2CCSID, &hashKey, conversion); - if (rc) - DBUG_RETURN(rc); - } - else - { - conversion= mapping->iconvDesc; - } - - DBUG_RETURN(0); -} - -/** - Fast-path conversion from ASCII to EBCDIC for use in converting - identifiers to be sent to the QMY APIs. - - @param input ASCII data - @param[out] ouput EBCDIC data - @param ilen Size of input buffer and output buffer -*/ -int convToEbcdic(const char* input, char* output, size_t ilen) -{ - static bool inited = FALSE; - static iconv_t ic; - - if (ilen == 0) - return 0; - - if (!inited) - { - ic = iconv_open( "IBM-037", "ISO8859-1" ); - inited = TRUE; - } - size_t substitutedChars; - size_t olen = ilen; - if (iconv( ic, (char**)&input, &ilen, &output, &olen, &substitutedChars ) == -1) - return errno; - - return 0; -} - - -/** - Fast-path conversion from EBCDIC to ASCII for use in converting - data received from the QMY APIs. - - @param input EBCDIC data - @param[out] ouput ASCII data - @param ilen Size of input buffer and output buffer -*/ -int convFromEbcdic(const char* input, char* output, size_t ilen) -{ - static bool inited = FALSE; - static iconv_t ic; - - if (ilen == 0) - return 0; - - if (!inited) - { - ic = iconv_open("ISO8859-1", "IBM-037"); - inited = TRUE; - } - - size_t substitutedChars; - size_t olen = ilen; - if (iconv( ic, (char**)&input, &ilen, &output, &olen, &substitutedChars) == -1) - return errno; - - return 0; -} diff --git a/storage/ibmdb2i/db2i_charsetSupport.h b/storage/ibmdb2i/db2i_charsetSupport.h deleted file mode 100644 index 77051e1e0db..00000000000 --- a/storage/ibmdb2i/db2i_charsetSupport.h +++ /dev/null @@ -1,65 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#ifndef DB2I_CHARSETSUPPORT_H -#define DB2I_CHARSETSUPPORT_H - -#include "db2i_global.h" -#include "mysql_priv.h" -#include -#include "db2i_iconv.h" - -/** - @enum enum_conversionDirection - - Conversion directions for getConversion() -*/ -enum enum_conversionDirection -{ - toMySQL, - toDB2 -}; - -int initCharsetSupport(); -void doneCharsetSupport(); -int32 convertIANAToDb2Ccsid(const char* parmIANADesc, uint16* db2Ccsid); -int32 getEncodingScheme(const uint16 inCcsid, int32& outEncodingScheme); -int32 getAssociatedCCSID(const uint16 inCcsid, const int inEncodingScheme, uint16* outCcsid); -int convToEbcdic(const char* input, char* output, size_t ilen); -int convFromEbcdic(const char* input, char* output, size_t ilen); -int32 getConversion(enum_conversionDirection direction, const CHARSET_INFO* cs, uint16 db2CCSID, iconv_t& conversion); - -#endif diff --git a/storage/ibmdb2i/db2i_collationSupport.cc b/storage/ibmdb2i/db2i_collationSupport.cc deleted file mode 100644 index 65a17fd2452..00000000000 --- a/storage/ibmdb2i/db2i_collationSupport.cc +++ /dev/null @@ -1,355 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#include "db2i_collationSupport.h" -#include "db2i_errors.h" - - -/* - The following arrays define a mapping between MySQL collation names and - corresponding IBM i sort sequences. The mapping is a 1-to-1 correlation - between corresponding array slots but is incomplete without case-sensitivity - markers dynamically added to the mySqlSortSequence names. -*/ -#define MAX_COLLATION 87 -static const char* mySQLCollation[MAX_COLLATION] = -{ - {"ascii_general"}, - {"ascii"}, - {"big5_chinese"}, - {"big5"}, - {"cp1250_croatian"}, - {"cp1250_general"}, - {"cp1250_polish"}, - {"cp1250"}, - {"cp1251_bulgarian"}, - {"cp1251_general"}, - {"cp1251"}, - {"cp1256_general"}, - {"cp1256"}, - {"cp850_general"}, - {"cp850"}, - {"cp852_general"}, - {"cp852"}, - {"cp932_japanese"}, - {"cp932"}, - {"euckr_korean"}, - {"euckr"}, - {"gb2312_chinese"}, - {"gb2312"}, - {"gbk_chinese"}, - {"gbk"}, - {"greek_general"}, - {"greek"}, - {"hebrew_general"}, - {"hebrew"}, - {"latin1_danish"}, - {"latin1_general"}, - {"latin1_german1"}, - {"latin1_spanish"}, - {"latin1_swedish"}, - {"latin1"}, - {"latin2_croatian"}, - {"latin2_general"}, - {"latin2_hungarian"}, - {"latin2"}, - {"latin5_turkish"}, - {"latin5"}, - {"macce_general"}, - {"macce"}, - {"sjis_japanese"}, - {"sjis"}, - {"tis620_thai"}, - {"tis620"}, - {"ucs2_czech"}, - {"ucs2_danish"}, - {"ucs2_esperanto"}, - {"ucs2_estonian"}, - {"ucs2_general"}, - {"ucs2_hungarian"}, - {"ucs2_icelandic"}, - {"ucs2_latvian"}, - {"ucs2_lithuanian"}, - {"ucs2_persian"}, - {"ucs2_polish"}, - {"ucs2_romanian"}, - {"ucs2_slovak"}, - {"ucs2_slovenian"}, - {"ucs2_spanish"}, - {"ucs2_swedish"}, - {"ucs2_turkish"}, - {"ucs2_unicode"}, - {"ucs2"}, - {"ujis_japanese"}, - {"ujis"}, - {"utf8_czech"}, - {"utf8_danish"}, - {"utf8_esperanto"}, - {"utf8_estonian"}, - {"utf8_general"}, - {"utf8_hungarian"}, - {"utf8_icelandic"}, - {"utf8_latvian"}, - {"utf8_lithuanian"}, - {"utf8_persian"}, - {"utf8_polish"}, - {"utf8_romanian"}, - {"utf8_slovak"}, - {"utf8_slovenian"}, - {"utf8_spanish"}, - {"utf8_swedish"}, - {"utf8_turkish"}, - {"utf8_unicode"}, - {"utf8"} -}; - - -static const char* mySqlSortSequence[MAX_COLLATION] = -{ - {"QALA101F4"}, - {"QBLA101F4"}, - {"QACHT04B0"}, - {"QBCHT04B0"}, - {"QALA20481"}, - {"QCLA20481"}, - {"QDLA20481"}, - {"QELA20481"}, - {"QACYR0401"}, - {"QBCYR0401"}, - {"QCCYR0401"}, - {"QAARA01A4"}, - {"QBARA01A4"}, - {"QCLA101F4"}, - {"QDLA101F4"}, - {"QALA20366"}, - {"QBLA20366"}, - {"QAJPN04B0"}, - {"QBJPN04B0"}, - {"QAKOR04B0"}, - {"QBKOR04B0"}, - {"QACHS04B0"}, - {"QBCHS04B0"}, - {"QCCHS04B0"}, - {"QDCHS04B0"}, - {"QAELL036B"}, - {"QBELL036B"}, - {"QAHEB01A8"}, - {"QBHEB01A8"}, - {"QALA1047C"}, - {"QBLA1047C"}, - {"QCLA1047C"}, - {"QDLA1047C"}, - {"QELA1047C"}, - {"QFLA1047C"}, - {"QCLA20366"}, - {"QELA20366"}, - {"QFLA20366"}, - {"QGLA20366"}, - {"QATRK0402"}, - {"QBTRK0402"}, - {"QHLA20366"}, - {"QILA20366"}, - {"QCJPN04B0"}, - {"QDJPN04B0"}, - {"QATHA0346"}, - {"QBTHA0346"}, - {"ACS_CZ"}, - {"ADA_DK"}, - {"AEO"}, - {"AET"}, - {"QAUCS04B0"}, - {"AHU"}, - {"AIS"}, - {"ALV"}, - {"ALT"}, - {"AFA"}, - {"APL"}, - {"ARO"}, - {"ASK"}, - {"ASL"}, - {"AES"}, - {"ASW"}, - {"ATR"}, - {"AEN"}, - {"*HEX"}, - {"QEJPN04B0"}, - {"QFJPN04B0"}, - {"ACS_CZ"}, - {"ADA_DK"}, - {"AEO"}, - {"AET"}, - {"QAUCS04B0"}, - {"AHU"}, - {"AIS"}, - {"ALV"}, - {"ALT"}, - {"AFA"}, - {"APL"}, - {"ARO"}, - {"ASK"}, - {"ASL"}, - {"AES"}, - {"ASW"}, - {"ATR"}, - {"AEN"}, - {"*HEX"} -}; - - -/** - Get the IBM i sort sequence that corresponds to the given MySQL collation. - - @param fieldCharSet The collated character set - @param[out] rtnSortSequence The corresponding sort sequence - - @return 0 if successful. Failure otherwise -*/ -static int32 getAssociatedSortSequence(const CHARSET_INFO *fieldCharSet, const char** rtnSortSequence) -{ - DBUG_ENTER("ha_ibmdb2i::getAssociatedSortSequence"); - - if (strcmp(fieldCharSet->csname,"binary") != 0) - { - int collationSearchLen = strlen(fieldCharSet->name); - if (fieldCharSet->state & MY_CS_BINSORT) - collationSearchLen -= 4; - else - collationSearchLen -= 3; - - uint16 loopCnt = 0; - for (loopCnt; loopCnt < MAX_COLLATION; ++loopCnt) - { - if ((strlen(mySQLCollation[loopCnt]) == collationSearchLen) && - (strncmp((char*)mySQLCollation[loopCnt], fieldCharSet->name, collationSearchLen) == 0)) - break; - } - if (loopCnt == MAX_COLLATION) // Did not find associated sort sequence - { - getErrTxt(DB2I_ERR_SRTSEQ); - DBUG_RETURN(DB2I_ERR_SRTSEQ); - } - *rtnSortSequence = mySqlSortSequence[loopCnt]; - } - - DBUG_RETURN(0); -} - - -/** - Update sort sequence information for a key. - - This function accumulates information about a key as it is called for each - field composing the key. The caller should invoke the function for each field - and (with the exception of the charset parm) preserve the values for the - parms across invocations, until a particular key has been evaluated. Once - the last field in the key has been evaluated, the fileSortSequence and - fileSortSequenceLibrary parms will contain the correct information for - creating the corresponding DB2 key. - - @param charset The character set under consideration - @param[in, out] fileSortSequenceType The type of the current key's sort seq - @param[in, out] fileSortSequence The IBM i identifier for the DB2 sort sequence - that corresponds - - @return 0 if successful. Failure otherwise -*/ -int32 updateAssociatedSortSequence(const CHARSET_INFO* charset, - char* fileSortSequenceType, - char* fileSortSequence, - char* fileSortSequenceLibrary) -{ - DBUG_ENTER("ha_ibmdb2i::updateAssociatedSortSequence"); - DBUG_ASSERT(charset); - if (strcmp(charset->csname,"binary") != 0) - { - char newSortSequence[11] = ""; - char newSortSequenceType = ' '; - const char* foundSortSequence; - int rc = getAssociatedSortSequence(charset, &foundSortSequence); - if (rc) DBUG_RETURN (rc); - switch(foundSortSequence[0]) - { - case '*': // Binary - strcat(newSortSequence,foundSortSequence); - newSortSequenceType = 'B'; - break; - case 'Q': // Non-ICU sort sequence - strcat(newSortSequence,foundSortSequence); - if ((charset->state & MY_CS_BINSORT) != 0) - { - strcat(newSortSequence,"U"); - } - else if ((charset->state & MY_CS_CSSORT) != 0) - { - strcat(newSortSequence,"U"); - } - else - { - strcat(newSortSequence,"S"); - } - newSortSequenceType = 'N'; - break; - default: // ICU sort sequence - { - if ((charset->state & MY_CS_CSSORT) == 0) - { - if (osVersion.v >= 6) - strcat(newSortSequence,"I34"); // ICU 3.4 - else - strcat(newSortSequence,"I26"); // ICU 2.6.1 - } - strcat(newSortSequence,foundSortSequence); - newSortSequenceType = 'I'; - } - break; - } - if (*fileSortSequenceType == ' ') // If no sort sequence has been set yet - { - // Set associated sort sequence - strcpy(fileSortSequence,newSortSequence); - strcpy(fileSortSequenceLibrary,"QSYS"); - *fileSortSequenceType = newSortSequenceType; - } - else if (strcmp(fileSortSequence,newSortSequence) != 0) - { - // Only one sort sequence/collation is supported for each DB2 index. - getErrTxt(DB2I_ERR_MIXED_COLLATIONS); - DBUG_RETURN(DB2I_ERR_MIXED_COLLATIONS); - } - } - - DBUG_RETURN(0); -} diff --git a/storage/ibmdb2i/db2i_collationSupport.h b/storage/ibmdb2i/db2i_collationSupport.h deleted file mode 100644 index b2ce09de1ea..00000000000 --- a/storage/ibmdb2i/db2i_collationSupport.h +++ /dev/null @@ -1,48 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#ifndef DB2I_COLLATIONSUPPORT_H -#define DB2I_COLLATIONSUPPORT_H - -#include "db2i_global.h" -#include "mysql_priv.h" - -int32 updateAssociatedSortSequence(const CHARSET_INFO* charset, - char* fileSortSequenceType, - char* fileSortSequence, - char* fileSortSequenceLibrary); - -#endif diff --git a/storage/ibmdb2i/db2i_constraints.cc b/storage/ibmdb2i/db2i_constraints.cc deleted file mode 100644 index 1fde0dd3d14..00000000000 --- a/storage/ibmdb2i/db2i_constraints.cc +++ /dev/null @@ -1,672 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - - -#include "ha_ibmdb2i.h" -#include "db2i_safeString.h" - -// This function is called when building the CREATE TABLE information for -// foreign key constraints. It converts a constraint, table, schema, or -// field name from EBCDIC to ASCII. If the DB2 name is quoted, it removes -// those quotes. It then adds the appropriate quotes for a MySQL identifier. - -static void convNameForCreateInfo(THD *thd, SafeString& info, char* fromName, int len) -{ - int quote; - char cquote; // Quote character - char convName[MAX_DB2_FILENAME_LENGTH]; // Converted name - - memset(convName, 0, sizeof(convName)); - convFromEbcdic(fromName, convName, len); - quote = get_quote_char_for_identifier(thd, convName, len); - cquote = (char) quote; - if (quote != EOF) - info.strcat(cquote); - if (convName[0] == '"') // If DB2 name was quoted, remove quotes - { - if (strstr(convName, "\"\"")) - stripExtraQuotes(convName+1, len-1); - info.strncat((char*)(convName+1), len-2); - } - else // DB2 name was not quoted - info.strncat(convName, len); - if (quote != EOF) - info.strcat(cquote); -} - -/** - Evaluate the parse tree to build foreign key constraint clauses - - @parm lex The parse tree - @parm appendHere The DB2 string to receive the constraint clauses - @parm path The path to the table under consideration - @parm fields Pointer to the table's list of field pointers - @parm[in, out] fileSortSequenceType The sort sequence type associated with the table - @parm[in, out] fileSortSequence The sort sequence associated with the table - @parm[in, out] fileSortSequenceLibrary The sort sequence library associated with the table - - @return 0 if successful; HA_ERR_CANNOT_ADD_FOREIGN otherwise -*/ -int ha_ibmdb2i::buildDB2ConstraintString(LEX* lex, - String& appendHere, - const char* path, - Field** fields, - char* fileSortSequenceType, - char* fileSortSequence, - char* fileSortSequenceLibrary) -{ - List_iterator keyIter(lex->alter_info.key_list); - char colName[MAX_DB2_COLNAME_LENGTH+1]; - - Key* curKey; - - while (curKey = keyIter++) - { - if (curKey->type == Key::FOREIGN_KEY) - { - appendHere.append(STRING_WITH_LEN(", ")); - - Foreign_key* fk = (Foreign_key*)curKey; - - char db2LibName[MAX_DB2_SCHEMANAME_LENGTH+1]; - if (fk->name) - { - char db2FKName[MAX_DB2_FILENAME_LENGTH+1]; - appendHere.append(STRING_WITH_LEN("CONSTRAINT ")); - if (fk->ref_table->db.str) - { - convertMySQLNameToDB2Name(fk->ref_table->db.str, db2LibName, sizeof(db2LibName)); - } - else - { - db2i_table::getDB2LibNameFromPath(path, db2LibName); - } - if (lower_case_table_names == 1) - my_casedn_str(files_charset_info, db2LibName); - appendHere.append(db2LibName); - - appendHere.append('.'); - - convertMySQLNameToDB2Name(fk->name, db2FKName, sizeof(db2FKName)); - appendHere.append(db2FKName); - } - - appendHere.append(STRING_WITH_LEN(" FOREIGN KEY (")); - - bool firstTime = true; - - List_iterator column(fk->columns); - Key_part_spec* curColumn; - - while (curColumn = column++) - { - if (!firstTime) - { - appendHere.append(','); - } - firstTime = false; - - convertMySQLNameToDB2Name(curColumn->field_name, colName, sizeof(colName)); - appendHere.append(colName); - - // DB2 requires that the sort sequence on the child table match the parent table's - // sort sequence. We ensure that happens by updating the sort sequence according - // to the constrained fields. - Field** field = fields; - do - { - if (strcmp((*field)->field_name, curColumn->field_name) == 0) - { - int rc = updateAssociatedSortSequence((*field)->charset(), - fileSortSequenceType, - fileSortSequence, - fileSortSequenceLibrary); - - if (unlikely(rc)) return rc; - } - } while (*(++field)); - } - - firstTime = true; - - appendHere.append(STRING_WITH_LEN(") REFERENCES ")); - - if (fk->ref_table->db.str) - { - convertMySQLNameToDB2Name(fk->ref_table->db.str, db2LibName, sizeof(db2LibName)); - } - else - { - db2i_table::getDB2LibNameFromPath(path, db2LibName); - } - if (lower_case_table_names == 1) - my_casedn_str(files_charset_info, db2LibName); - appendHere.append(db2LibName); - appendHere.append('.'); - - char db2FileName[MAX_DB2_FILENAME_LENGTH+1]; - convertMySQLNameToDB2Name(fk->ref_table->table.str, db2FileName, sizeof(db2FileName)); - if (lower_case_table_names) - my_casedn_str(files_charset_info, db2FileName); - appendHere.append(db2FileName); - - - if (!fk->ref_columns.is_empty()) - { - List_iterator ref(fk->ref_columns); - Key_part_spec* curRef; - appendHere.append(STRING_WITH_LEN(" (")); - - - while (curRef = ref++) - { - if (!firstTime) - { - appendHere.append(','); - } - firstTime = false; - - convertMySQLNameToDB2Name(curRef->field_name, colName, sizeof(colName)); - appendHere.append(colName); - } - - appendHere.append(STRING_WITH_LEN(") ")); - } - - if (fk->delete_opt != Foreign_key::FK_OPTION_UNDEF) - { - appendHere.append(STRING_WITH_LEN("ON DELETE ")); - switch (fk->delete_opt) - { - case Foreign_key::FK_OPTION_RESTRICT: - appendHere.append(STRING_WITH_LEN("RESTRICT ")); break; - case Foreign_key::FK_OPTION_CASCADE: - appendHere.append(STRING_WITH_LEN("CASCADE ")); break; - case Foreign_key::FK_OPTION_SET_NULL: - appendHere.append(STRING_WITH_LEN("SET NULL ")); break; - case Foreign_key::FK_OPTION_NO_ACTION: - appendHere.append(STRING_WITH_LEN("NO ACTION ")); break; - case Foreign_key::FK_OPTION_DEFAULT: - appendHere.append(STRING_WITH_LEN("SET DEFAULT ")); break; - default: - return HA_ERR_CANNOT_ADD_FOREIGN; break; - } - } - - if (fk->update_opt != Foreign_key::FK_OPTION_UNDEF) - { - appendHere.append(STRING_WITH_LEN("ON UPDATE ")); - switch (fk->update_opt) - { - case Foreign_key::FK_OPTION_RESTRICT: - appendHere.append(STRING_WITH_LEN("RESTRICT ")); break; - case Foreign_key::FK_OPTION_NO_ACTION: - appendHere.append(STRING_WITH_LEN("NO ACTION ")); break; - default: - return HA_ERR_CANNOT_ADD_FOREIGN; break; - } - } - - } - - } - - return 0; -} - - -/*********************************************************************** -Get the foreign key information in the form of a character string so -that it can be inserted into a CREATE TABLE statement. This is used by -the SHOW CREATE TABLE statement. The string will later be freed by the -free_foreign_key_create_info() method. -************************************************************************/ - -char* ha_ibmdb2i::get_foreign_key_create_info(void) -{ - DBUG_ENTER("ha_ibmdb2i::get_foreign_key_create_info"); - int rc = 0; - char* infoBuffer = NULL; // Pointer to string returned to MySQL - uint32 constraintSpaceLength;// Length of space passed to DB2 - ValidatedPointer constraintSpace; // Space pointer passed to DB2 - uint32 neededLen; // Length returned from DB2 - uint32 cstCnt; // Number of foreign key constraints from DB2 - uint32 fld; // - constraint_hdr* cstHdr; // Pointer to constraint header structure - FK_constraint* FKCstDef; // Pointer to constraint definition structure - cst_name* fieldName; // Pointer to field name structure - char* tempPtr; // Temp pointer for traversing constraint space - char convName[128]; - - /* Allocate space to retrieve the DB2 constraint information. */ - - if (!(share = get_share(table_share->path.str, table))) - DBUG_RETURN(NULL); - - constraintSpaceLength = 5000; // Try allocating 5000 bytes and see if enough. - - initBridge(); - - constraintSpace.alloc(constraintSpaceLength); - rc = bridge()->expectErrors(QMY_ERR_NEED_MORE_SPACE) - ->constraints(db2Table->dataFile()->getMasterDefnHandle(), - constraintSpace, - constraintSpaceLength, - &neededLen, - &cstCnt); - - if (unlikely(rc == QMY_ERR_NEED_MORE_SPACE)) - { - constraintSpaceLength = neededLen; // Get length of space that's needed - constraintSpace.realloc(constraintSpaceLength); - rc = bridge()->expectErrors(QMY_ERR_NEED_MORE_SPACE) - ->constraints(db2Table->dataFile()->getMasterDefnHandle(), - constraintSpace, - constraintSpaceLength, - &neededLen, - &cstCnt); - } - - /* If constraint information was returned by DB2, build a text string */ - /* to return to MySQL. */ - - if ((rc == 0) && (cstCnt > 0)) - { - THD* thd = ha_thd(); - infoBuffer = (char*) my_malloc(MAX_FOREIGN_LEN + 1, MYF(MY_WME)); - if (infoBuffer == NULL) - { - free_share(share); - DBUG_RETURN(NULL); - } - - SafeString info(infoBuffer, MAX_FOREIGN_LEN + 1); - - /* Loop through the DB2 constraints and build a text string for each foreign */ - /* key constraint that is found. */ - - tempPtr = constraintSpace; - cstHdr = (constraint_hdr_t*)(void*)constraintSpace; // Address first constraint definition - for (int i = 0; i < cstCnt && !info.overflowed(); ++i) - { - if (cstHdr->CstType[0] == QMY_CST_FK) // If this is a foreign key constraint - { - tempPtr = (char*)(tempPtr + cstHdr->CstDefOff); - FKCstDef = (FK_constraint_t*)tempPtr; - - /* Process the constraint name. */ - - info.strncat(STRING_WITH_LEN(",\n CONSTRAINT ")); - convNameForCreateInfo(thd, info, - FKCstDef->CstName.Name, FKCstDef->CstName.Len); - - /* Process the names of the foreign keys. */ - - info.strncat(STRING_WITH_LEN(" FOREIGN KEY (")); - tempPtr = (char*)(tempPtr + FKCstDef->KeyColOff); - fieldName= (cst_name_t*)tempPtr; - for (fld = 0; fld < FKCstDef->KeyCnt; ++fld) - { - convNameForCreateInfo(thd, info, fieldName->Name, fieldName->Len); - if ((fld + 1) < FKCstDef->KeyCnt) - { - info.strncat(STRING_WITH_LEN(", ")); - fieldName = fieldName + 1; - } - } - - /* Process the schema-name and name of the referenced table. */ - - info.strncat(STRING_WITH_LEN(") REFERENCES ")); - convNameForCreateInfo(thd, info, - FKCstDef->RefSchema.Name, FKCstDef->RefSchema.Len); - info.strcat('.'); - convNameForCreateInfo(thd, info, - FKCstDef->RefTable.Name, FKCstDef->RefTable.Len); - info.strncat(STRING_WITH_LEN(" (")); - - /* Process the names of the referenced keys. */ - - tempPtr = (char*)FKCstDef; - tempPtr = (char*)(tempPtr + FKCstDef->RefColOff); - fieldName= (cst_name_t*)tempPtr; - for (fld = 0; fld < FKCstDef->RefCnt; ++fld) - { - convNameForCreateInfo(thd, info, fieldName->Name, fieldName->Len); - if ((fld + 1) < FKCstDef->RefCnt) - { - info.strncat(STRING_WITH_LEN(", ")); - fieldName = fieldName + 1; - } - } - - /* Process the ON UPDATE and ON DELETE rules. */ - - info.strncat(STRING_WITH_LEN(") ON UPDATE ")); - switch(FKCstDef->UpdMethod) - { - case QMY_NOACTION: info.strncat(STRING_WITH_LEN("NO ACTION")); break; - case QMY_RESTRICT: info.strncat(STRING_WITH_LEN("RESTRICT")); break; - default: break; - } - info.strncat(STRING_WITH_LEN(" ON DELETE ")); - switch(FKCstDef->DltMethod) - { - case QMY_CASCADE: info.strncat(STRING_WITH_LEN("CASCADE")); break; - case QMY_SETDFT: info.strncat(STRING_WITH_LEN("SET DEFAULT")); break; - case QMY_SETNULL: info.strncat(STRING_WITH_LEN("SET NULL")); break; - case QMY_NOACTION: info.strncat(STRING_WITH_LEN("NO ACTION")); break; - case QMY_RESTRICT: info.strncat(STRING_WITH_LEN("RESTRICT")); break; - default: break; - } - } - - /* Address the next constraint, if any. */ - - if ((i+1) < cstCnt) - { - tempPtr = (char*)cstHdr + cstHdr->CstLen; - cstHdr = (constraint_hdr_t*)(tempPtr); - } - } - } - - /* Cleanup and return */ - free_share(share); - - DBUG_RETURN(infoBuffer); -} - -/*********************************************************************** -Free the foreign key create info (for a table) that was acquired by the -get_foreign_key_create_info() method. -***********************************************************************/ - -void ha_ibmdb2i::free_foreign_key_create_info(char* info) -{ - DBUG_ENTER("ha_ibmdb2i::free_foreign_key_create_info"); - - if (info) - { - my_free(info, MYF(0)); - } - DBUG_VOID_RETURN; -} - -/*********************************************************************** -This method returns to MySQL a list, with one entry in the list describing -each foreign key constraint. -***********************************************************************/ - -int ha_ibmdb2i::get_foreign_key_list(THD *thd, List *f_key_list) -{ - DBUG_ENTER("ha_ibmdb2i::get_foreign_key_list"); - int rc = 0; - uint32 constraintSpaceLength; // Length of space passed to DB2 - ValidatedPointer constraintSpace; // Space pointer passed to DB2 - uint16 rtnCode; // Return code from DB2 - uint32 neededLen; // Bytes needed to contain DB2 constraint info - uint32 cstCnt; // Number of constraints returned by DB2 - uint32 fld; - constraint_hdr* cstHdr; // Pointer to a cst header structure - FK_constraint* FKCstDef; // Pointer to definition of foreign key constraint - cst_name* fieldName; // Pointer to field name structure - const char *method; - ulong methodLen; - char* tempPtr; // Temp pointer for traversing constraint space - char convName[128]; - - if (!(share = get_share(table_share->path.str, table))) - DBUG_RETURN(0); - - // Allocate space to retrieve the DB2 constraint information. - constraintSpaceLength = 5000; // Try allocating 5000 bytes and see if enough. - - constraintSpace.alloc(constraintSpaceLength); - rc = bridge()->expectErrors(QMY_ERR_NEED_MORE_SPACE) - ->constraints(db2Table->dataFile()->getMasterDefnHandle(), - constraintSpace, - constraintSpaceLength, - &neededLen, - &cstCnt); - - if (unlikely(rc == QMY_ERR_NEED_MORE_SPACE)) - { - constraintSpaceLength = neededLen; // Get length of space that's needed - constraintSpace.realloc(constraintSpaceLength); - rc = bridge()->expectErrors(QMY_ERR_NEED_MORE_SPACE) - ->constraints(db2Table->dataFile()->getMasterDefnHandle(), - constraintSpace, - constraintSpaceLength, - &neededLen, - &cstCnt); - } - - /* If constraint information was returned by DB2, build a text string */ - /* to return to MySQL. */ - if ((rc == 0) && (cstCnt > 0)) - { - tempPtr = constraintSpace; - cstHdr = (constraint_hdr_t*)(void*)constraintSpace; // Address first constraint definition - for (int i = 0; i < cstCnt; ++i) - { - if (cstHdr->CstType[0] == QMY_CST_FK) // If this is a foreign key constraint - { - FOREIGN_KEY_INFO f_key_info; - LEX_STRING *name= 0; - tempPtr = (char*)(tempPtr + cstHdr->CstDefOff); - FKCstDef = (FK_constraint_t*)tempPtr; - - /* Process the constraint name. */ - - convFromEbcdic(FKCstDef->CstName.Name, convName,FKCstDef->CstName.Len); - if (convName[0] == '"') // If quoted, exclude quotes. - f_key_info.forein_id = thd_make_lex_string(thd, 0, - convName + 1, (uint) (FKCstDef->CstName.Len - 2), 1); - else // Not quoted - f_key_info.forein_id = thd_make_lex_string(thd, 0, - convName, (uint) FKCstDef->CstName.Len, 1); - - /* Process the names of the foreign keys. */ - - - tempPtr = (char*)(tempPtr + FKCstDef->KeyColOff); - fieldName = (cst_name_t*)tempPtr; - for (fld = 0; fld < FKCstDef->KeyCnt; ++fld) - { - convFromEbcdic(fieldName->Name, convName, fieldName->Len); - if (convName[0] == '"') // If quoted, exclude quotes. - name = thd_make_lex_string(thd, name, - convName + 1, (uint) (fieldName->Len - 2), 1); - else - name = thd_make_lex_string(thd, name, convName, (uint) fieldName->Len, 1); - f_key_info.foreign_fields.push_back(name); - if ((fld + 1) < FKCstDef->KeyCnt) - fieldName = fieldName + 1; - } - - /* Process the schema and name of the referenced table. */ - - convFromEbcdic(FKCstDef->RefSchema.Name, convName, FKCstDef->RefSchema.Len); - if (convName[0] == '"') // If quoted, exclude quotes. - f_key_info.referenced_db = thd_make_lex_string(thd, 0, - convName + 1, (uint) (FKCstDef->RefSchema.Len -2), 1); - else - f_key_info.referenced_db = thd_make_lex_string(thd, 0, - convName, (uint) FKCstDef->RefSchema.Len, 1); - convFromEbcdic(FKCstDef->RefTable.Name, convName, FKCstDef->RefTable.Len); - if (convName[0] == '"') // If quoted, exclude quotes. - f_key_info.referenced_table = thd_make_lex_string(thd, 0, - convName +1, (uint) (FKCstDef->RefTable.Len -2), 1); - else - f_key_info.referenced_table = thd_make_lex_string(thd, 0, - convName, (uint) FKCstDef->RefTable.Len, 1); - - /* Process the names of the referenced keys. */ - - tempPtr = (char*)FKCstDef; - tempPtr = (char*)(tempPtr + FKCstDef->RefColOff); - fieldName= (cst_name_t*)tempPtr; - for (fld = 0; fld < FKCstDef->RefCnt; ++fld) - { - convFromEbcdic(fieldName->Name, convName, fieldName->Len); - if (convName[0] == '"') // If quoted, exclude quotes. - name = thd_make_lex_string(thd, name, - convName + 1, (uint) (fieldName->Len -2), 1); - else - name = thd_make_lex_string(thd, name, convName, (uint) fieldName->Len, 1); - f_key_info.referenced_fields.push_back(name); - if ((fld + 1) < FKCstDef->RefCnt) - fieldName = fieldName + 1; - } - - /* Process the ON UPDATE and ON DELETE rules. */ - - switch(FKCstDef->UpdMethod) - { - case QMY_NOACTION: - { - method = "NO ACTION"; - methodLen=9; - } - break; - case QMY_RESTRICT: - { - method = "RESTRICT"; - methodLen = 8; - } - break; - default: break; - } - f_key_info.update_method = thd_make_lex_string( - thd, f_key_info.update_method, method, methodLen, 1); - switch(FKCstDef->DltMethod) - { - case QMY_CASCADE: - { - method = "CASCADE"; - methodLen = 7; - } - break; - case QMY_SETDFT: - { - method = "SET DEFAULT"; - methodLen = 11; - } - break; - case QMY_SETNULL: - { - method = "SET NULL"; - methodLen = 8; - } - break; - case QMY_NOACTION: - { - method = "NO ACTION"; - methodLen = 9; - } - break; - case QMY_RESTRICT: - { - method = "RESTRICT"; - methodLen = 8; - } - break; - default: break; - } - f_key_info.delete_method = thd_make_lex_string( - thd, f_key_info.delete_method, method, methodLen, 1); - f_key_info.referenced_key_name= thd_make_lex_string(thd, 0, (char *)"", 1, 1); - FOREIGN_KEY_INFO *pf_key_info = (FOREIGN_KEY_INFO *) - thd_memdup(thd, &f_key_info, sizeof(FOREIGN_KEY_INFO)); - f_key_list->push_back(pf_key_info); - } - - /* Address the next constraint, if any. */ - - if ((i+1) < cstCnt) - { - tempPtr = (char*)cstHdr + cstHdr->CstLen; - cstHdr = (constraint_hdr_t*)(tempPtr); - } - } - } - - /* Cleanup and return. */ - - free_share(share); - DBUG_RETURN(0); -} - -/*********************************************************************** -Checks if the table is referenced by a foreign key. -Returns: 0 if not referenced (or error occurs), - > 0 if is referenced -***********************************************************************/ - -uint ha_ibmdb2i::referenced_by_foreign_key(void) -{ - DBUG_ENTER("ha_ibmdb2i::referenced_by_foreign_key"); - - int rc = 0; - FILE_HANDLE queryFile = 0; - uint32 resultRowLen; - uint32 count = 0; - - const char* libName = db2Table->getDB2LibName(db2i_table::ASCII_SQL); - const char* fileName = db2Table->getDB2TableName(db2i_table::ASCII_SQL); - - String query(128); - query.append(STRING_WITH_LEN(" SELECT COUNT(*) FROM SYSIBM.SQLFOREIGNKEYS WHERE PKTABLE_SCHEM = '")); - query.append(libName+1, strlen(libName)-2); // parent library name - query.append(STRING_WITH_LEN("' AND PKTABLE_NAME = '")); - query.append(fileName+1, strlen(fileName)-2); // parent file name - query.append(STRING_WITH_LEN("'")); - - SqlStatementStream sqlStream(query); - - rc = bridge()->prepOpen(sqlStream.getPtrToData(), - &queryFile, - &resultRowLen); - if (rc == 0) - { - IOReadBuffer rowBuffer(1, resultRowLen); - rc = bridge()->read(queryFile, rowBuffer.ptr(), QMY_READ_ONLY, QMY_NONE, QMY_FIRST); - if (!rc) count = *((uint32*)rowBuffer.getRowN(0)); - bridge()->deallocateFile(queryFile); - } - DBUG_RETURN(count); -} diff --git a/storage/ibmdb2i/db2i_conversion.cc b/storage/ibmdb2i/db2i_conversion.cc deleted file mode 100644 index 9a85eb01c9b..00000000000 --- a/storage/ibmdb2i/db2i_conversion.cc +++ /dev/null @@ -1,1459 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - - -#include "db2i_ileBridge.h" -#include "mysql_priv.h" -#include "db2i_charsetSupport.h" -#include "ctype.h" -#include "ha_ibmdb2i.h" -#include "db2i_errors.h" -#include "wchar.h" - -const char ZERO_DATETIME_VALUE[] = "0000-00-00 00:00:00"; -const char ZERO_DATETIME_VALUE_SUBST[] = "0001-01-01 00:00:00"; -const char ZERO_DATE_VALUE[] = "0000-00-00"; -const char ZERO_DATE_VALUE_SUBST[] = "0001-01-01"; - - -/** - Put a BCD digit into a BCD string. - - @param[out] bcdString The BCD string to be modified - @param pos The position within the string to be updated. - @param val The value to be assigned into the string at pos. -*/ -static inline void bcdAssign(char* bcdString, uint pos, uint val) -{ - bcdString[pos/2] |= val << ((pos % 2) ? 0 : 4); -} - -/** - Read a BCD digit from a BCD string. - - @param[out] bcdString The BCD string to be read - @param pos The position within the string to be read. - - @return bcdGet The value of the BCD digit at pos. -*/ -static inline uint bcdGet(const char* bcdString, uint pos) -{ - return (bcdString[pos/2] >> ((pos % 2) ? 0 : 4)) & 0xf; -} - -/** - In-place convert a number in ASCII represenation to EBCDIC representation. - - @param string The string of ASCII characters - @param len The length of string -*/ -static inline void convertNumericToEbcdicFast(char* string, int len) -{ - for (int i = 0; i < len; ++i, ++string) - { - switch(*string) - { - case '-': - *string = 0x60; break; - case ':': - *string = 0x7A; break; - case '.': - *string = 0x4B; break; - default: - DBUG_ASSERT(isdigit(*string)); - *string += 0xF0 - '0'; - break; - } - } -} - - -/** - atoi()-like function for a 4-character EBCDIC string. - - @param string The EBCDIC string - @return a4toi_ebcdic The decimal value of the EBCDIC string -*/ -static inline uint16 a4toi_ebcdic(const uchar* string) -{ - return ((string[0]-0xF0) * 1000 + - (string[1]-0xF0) * 100 + - (string[2]-0xF0) * 10 + - (string[3]-0xF0)); -}; - - -/** - atoi()-like function for a 4-character EBCDIC string. - - @param string The EBCDIC string - @return a4toi_ebcdic The decimal value of the EBCDIC string -*/ -static inline uint8 a2toi_ebcdic(const uchar* string) -{ - return ((string[0]-0xF0) * 10 + - (string[1]-0xF0)); -}; - -/** - Perform character conversion for textual field data. -*/ -int ha_ibmdb2i::convertFieldChars(enum_conversionDirection direction, - uint16 fieldID, - const char* input, - char* output, - size_t ilen, - size_t olen, - size_t* outDataLen, - bool tacitErrors, - size_t* substChars) -{ - DBUG_PRINT("ha_ibmdb2i::convertFieldChars",("Direction: %d; length = %d", direction, ilen)); - - if (unlikely(ilen == 0)) - { - if (outDataLen) *outDataLen = 0; - return (0); - } - - iconv_t& conversion = db2Table->getConversionDefinition(direction, fieldID); - - if (unlikely(conversion == (iconv_t)(-1))) - { - return (DB2I_ERR_UNSUPP_CHARSET); - } - - size_t initOLen= olen; - size_t substitutedChars = 0; - int rc = iconv(conversion, (char**)&input, &ilen, &output, &olen, &substitutedChars ); - if (outDataLen) *outDataLen = initOLen - olen; - if (substChars) *substChars = substitutedChars; - if (unlikely(rc < 0)) - { - int er = errno; - if (er == EILSEQ) - { - if (!tacitErrors) getErrTxt(DB2I_ERR_ILL_CHAR, table->field[fieldID]->field_name); - return (DB2I_ERR_ILL_CHAR); - } - else - { - if (!tacitErrors) getErrTxt(DB2I_ERR_ICONV,er); - return (DB2I_ERR_ICONV); - } - } - if (unlikely(substitutedChars) && (!tacitErrors)) - { - warning(ha_thd(), DB2I_ERR_SUB_CHARS, table->field[fieldID]->field_name); - } - - return (0); -} - -/** - Append the appropriate default value clause onto a CREATE TABLE definition - - This was inspired by get_field_default_value in sql/sql_show.cc. - - @param field The field whose value is to be obtained - @param statement The string to receive the DEFAULT clause - @param quoteIt Does the data type require single quotes around the value? - @param ccsid The ccsid of the field value (if a string type); 0 if no conversion needed -*/ -static void get_field_default_value(Field *field, - String &statement, - bool quoteIt, - uint32 ccsid, - bool substituteZeroDates) -{ - if ((field->type() != FIELD_TYPE_BLOB && - !(field->flags & NO_DEFAULT_VALUE_FLAG) && - field->unireg_check != Field::NEXT_NUMBER)) - { - my_ptrdiff_t old_ptr= (my_ptrdiff_t) (field->table->s->default_values - field->table->record[0]); - field->move_field_offset(old_ptr); - - String defaultClause(64); - defaultClause.length(0); - defaultClause.append(" DEFAULT "); - if (!field->is_null()) - { - my_bitmap_map *old_map = tmp_use_all_columns(field->table, field->table->read_set); - char tmp[MAX_FIELD_WIDTH]; - - if (field->real_type() == MYSQL_TYPE_ENUM || - field->real_type() == MYSQL_TYPE_SET) - { - CHARSET_INFO *cs= &my_charset_bin; - uint len = (uint)(cs->cset->longlong10_to_str)(cs,tmp,sizeof(tmp), 10, field->val_int()); - tmp[len]=0; - defaultClause.append(tmp); - } - else - { - String type(tmp, sizeof(tmp), field->charset()); - field->val_str(&type); - if (type.length()) - { - if (field->type() == MYSQL_TYPE_DATE && - memcmp(type.ptr(), STRING_WITH_LEN(ZERO_DATE_VALUE)) == 0) - { - if (substituteZeroDates) - type.set(STRING_WITH_LEN(ZERO_DATE_VALUE_SUBST), field->charset()); - else - { - warning(current_thd, DB2I_ERR_WARN_COL_ATTRS, field->field_name); - return; - } - } - else if ((field->type() == MYSQL_TYPE_DATETIME || - field->type() == MYSQL_TYPE_TIMESTAMP) && - memcmp(type.ptr(), STRING_WITH_LEN(ZERO_DATETIME_VALUE)) == 0) - { - if (substituteZeroDates) - type.set(STRING_WITH_LEN(ZERO_DATETIME_VALUE_SUBST), field->charset()); - else - { - warning(current_thd, DB2I_ERR_WARN_COL_ATTRS, field->field_name); - return; - } - } - - - if (field->type() != MYSQL_TYPE_STRING && - field->type() != MYSQL_TYPE_VARCHAR && - field->type() != MYSQL_TYPE_BLOB && - field->type() != MYSQL_TYPE_BIT) - { - if (quoteIt) - defaultClause.append('\''); - defaultClause.append(type); - if (quoteIt) - defaultClause.append('\''); - } - else - { - int length; - char* out; - - // If a ccsid is specified, we need to make sure that the DEFAULT - // string is converted to that encoding. - if (ccsid != 0) - { - iconv_t iconvD; - if (getConversion(toDB2, field->charset(), ccsid, iconvD)) - { - warning(current_thd, DB2I_ERR_WARN_COL_ATTRS, field->field_name); - return; - } - - size_t ilen = type.length(); - size_t olen = 6 * ilen; - size_t origOlen = olen; - const char* in = type.ptr(); - const char* tempIn = in; - out = (char*)my_malloc(olen, MYF(MY_WME)); - char* tempOut = out; - size_t substitutedChars; - - if (iconv(iconvD, (char**)&tempIn, &ilen, &tempOut, &olen, &substitutedChars) < 0) - { - warning(current_thd, DB2I_ERR_WARN_COL_ATTRS, field->field_name); - my_free(out, MYF(0)); - return; - } - // Now we process the converted string to represent it as - // hexadecimal values. - - length = origOlen - olen; - } - else - { - length = type.length(); - out = (char*)my_malloc(length*2, MYF(MY_WME)); - memcpy(out, (char*)type.ptr(), length); - } - - if (length > 16370) - { - warning(current_thd, DB2I_ERR_WARN_COL_ATTRS, field->field_name); - my_free(out, MYF(0)); - return; - } - - if (ccsid == 1200) - defaultClause.append("ux'"); - else if (ccsid == 13488) - defaultClause.append("gx'"); - else if (field->charset() == &my_charset_bin) - defaultClause.append("binary(x'"); - else - defaultClause.append("x'"); - - const char hexMap[] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'}; - for (int c = length-1; c >= 0; --c) - { - out[c*2+1] = hexMap[out[c] & 0xF]; - out[c*2] = hexMap[out[c] >> 4]; - } - - defaultClause.append(out, length*2); - defaultClause.append('\''); - if (field->charset() == &my_charset_bin) - defaultClause.append(")"); - - my_free(out, MYF(0)); - } - } - else - defaultClause.length(0); - } - tmp_restore_column_map(field->table->read_set, old_map); - } - else if (field->maybe_null()) - defaultClause.append(STRING_WITH_LEN("NULL")); - - if (old_ptr) - field->move_field_offset(-old_ptr); - - statement.append(defaultClause); - } -} - - - - -/** - Convert a MySQL field definition into its corresponding DB2 type. - - The result will be appended to mapping as a DB2 SQL phrase. - - @param field The MySQL field to be evaluated - @param[out] mapping The receiver for the DB2 SQL syntax - @param timeFormat The format to be used for mapping the TIME type -*/ -int ha_ibmdb2i::getFieldTypeMapping(Field* field, - String& mapping, - enum_TimeFormat timeFormat, - enum_BlobMapping blobMapping, - enum_ZeroDate zeroDateHandling, - bool propagateDefaults, - enum_YearFormat yearFormat) -{ - char stringBuildBuffer[257]; - uint32 fieldLength; - bool defaultNeedsQuotes = false; - uint16 db2Ccsid = 0; - - CHARSET_INFO* fieldCharSet = field->charset(); - switch (field->type()) - { - case MYSQL_TYPE_NEWDECIMAL: - { - uint precision= ((Field_new_decimal*)field)->precision; - uint scale= field->decimals(); - - if (precision <= MAX_DEC_PRECISION) - { - sprintf(stringBuildBuffer,"DECIMAL(%d, %d)",precision,scale); - } - else - { - if (scale > precision - MAX_DEC_PRECISION) - { - scale = scale - (precision - MAX_DEC_PRECISION); - precision = MAX_DEC_PRECISION; - sprintf(stringBuildBuffer,"DECIMAL(%d, %d)",precision,scale); - } - else - { - return HA_ERR_UNSUPPORTED; - } - warning(ha_thd(), DB2I_ERR_PRECISION); - } - - mapping.append(stringBuildBuffer); - } - break; - case MYSQL_TYPE_TINY: - mapping.append(STRING_WITH_LEN("SMALLINT")); - break; - case MYSQL_TYPE_SHORT: - if (((Field_num*)field)->unsigned_flag) - mapping.append(STRING_WITH_LEN("INT")); - else - mapping.append(STRING_WITH_LEN("SMALLINT")); - break; - case MYSQL_TYPE_LONG: - if (((Field_num*)field)->unsigned_flag) - mapping.append(STRING_WITH_LEN("BIGINT")); - else - mapping.append(STRING_WITH_LEN("INT")); - break; - case MYSQL_TYPE_FLOAT: - mapping.append(STRING_WITH_LEN("REAL")); - break; - case MYSQL_TYPE_DOUBLE: - mapping.append(STRING_WITH_LEN("DOUBLE")); - break; - case MYSQL_TYPE_LONGLONG: - if (((Field_num*)field)->unsigned_flag) - mapping.append(STRING_WITH_LEN("DECIMAL(20,0)")); - else - mapping.append(STRING_WITH_LEN("BIGINT")); - break; - case MYSQL_TYPE_INT24: - mapping.append(STRING_WITH_LEN("INTEGER")); - break; - case MYSQL_TYPE_DATE: - case MYSQL_TYPE_NEWDATE: - mapping.append(STRING_WITH_LEN("DATE")); - defaultNeedsQuotes = true; - break; - case MYSQL_TYPE_TIME: - if (timeFormat == TIME_OF_DAY) - { - mapping.append(STRING_WITH_LEN("TIME")); - defaultNeedsQuotes = true; - } - else - mapping.append(STRING_WITH_LEN("INTEGER")); - break; - case MYSQL_TYPE_DATETIME: - mapping.append(STRING_WITH_LEN("TIMESTAMP")); - defaultNeedsQuotes = true; - break; - case MYSQL_TYPE_TIMESTAMP: - mapping.append(STRING_WITH_LEN("TIMESTAMP")); - - if (table_share->timestamp_field == field && propagateDefaults) - { - switch (((Field_timestamp*)field)->get_auto_set_type()) - { - case TIMESTAMP_NO_AUTO_SET: - break; - case TIMESTAMP_AUTO_SET_ON_INSERT: - mapping.append(STRING_WITH_LEN(" DEFAULT CURRENT_TIMESTAMP")); - break; - case TIMESTAMP_AUTO_SET_ON_UPDATE: - if (osVersion.v >= 6 && - !field->is_null()) - { - mapping.append(STRING_WITH_LEN(" GENERATED BY DEFAULT FOR EACH ROW ON UPDATE AS ROW CHANGE TIMESTAMP")); - warning(ha_thd(), DB2I_ERR_WARN_COL_ATTRS, field->field_name); - } - else - warning(ha_thd(), DB2I_ERR_WARN_COL_ATTRS, field->field_name); - break; - case TIMESTAMP_AUTO_SET_ON_BOTH: - if (osVersion.v >= 6 && - !field->is_null()) - mapping.append(STRING_WITH_LEN(" GENERATED BY DEFAULT FOR EACH ROW ON UPDATE AS ROW CHANGE TIMESTAMP")); - else - { - mapping.append(STRING_WITH_LEN(" DEFAULT CURRENT_TIMESTAMP")); - warning(ha_thd(), DB2I_ERR_WARN_COL_ATTRS, field->field_name); - } - break; - } - } - else - defaultNeedsQuotes = true; - break; - case MYSQL_TYPE_YEAR: - if (yearFormat == CHAR4) - { - mapping.append(STRING_WITH_LEN("CHAR(4) CCSID 1208")); - defaultNeedsQuotes = true; - } - else - { - mapping.append(STRING_WITH_LEN("SMALLINT")); - defaultNeedsQuotes = false; - } - break; - case MYSQL_TYPE_BIT: - sprintf(stringBuildBuffer, "BINARY(%d)", (field->max_display_length() / 8) + 1); - mapping.append(stringBuildBuffer); - break; - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_VARCHAR: - case MYSQL_TYPE_STRING: - { - if (field->real_type() == MYSQL_TYPE_ENUM || - field->real_type() == MYSQL_TYPE_SET) - { - mapping.append(STRING_WITH_LEN("BIGINT")); - } - else - { - defaultNeedsQuotes = true; - - fieldLength = field->max_display_length(); // Get field byte length - - if (fieldCharSet == &my_charset_bin) - { - if (field->type() == MYSQL_TYPE_STRING) - { - sprintf(stringBuildBuffer, "BINARY(%d)", max(fieldLength, 1)); - } - else - { - if (fieldLength <= MAX_VARCHAR_LENGTH) - { - sprintf(stringBuildBuffer, "VARBINARY(%d)", max(fieldLength, 1)); - } - else if (blobMapping == AS_VARCHAR && - (field->flags & PART_KEY_FLAG)) - { - sprintf(stringBuildBuffer, "LONG VARBINARY "); - } - else - { - fieldLength = min(MAX_BLOB_LENGTH, fieldLength); - sprintf(stringBuildBuffer, "BLOB(%d)", max(fieldLength, 1)); - } - } - mapping.append(stringBuildBuffer); - } - else - { - if (field->type() == MYSQL_TYPE_STRING) - { - if (fieldLength > MAX_CHAR_LENGTH) - return 1; - if (fieldCharSet->mbmaxlen > 1) - { - if (memcmp(fieldCharSet->name, "ucs2_", sizeof("ucs2_")-1) == 0 ) // UCS2 - { - sprintf(stringBuildBuffer, "GRAPHIC(%d)", max(fieldLength / fieldCharSet->mbmaxlen, 1)); // Number of characters - db2Ccsid = 13488; - } - else if (memcmp(fieldCharSet->name, "utf8_", sizeof("utf8_")-1) == 0 && - strcmp(fieldCharSet->name, "utf8_general_ci") != 0) - { - sprintf(stringBuildBuffer, "CHAR(%d)", max(fieldLength, 1)); // Number of bytes - db2Ccsid = 1208; - } - else - { - sprintf(stringBuildBuffer, "GRAPHIC(%d)", max(fieldLength / fieldCharSet->mbmaxlen, 1)); // Number of characters - db2Ccsid = 1200; - } - } - else - { - sprintf(stringBuildBuffer, "CHAR(%d)", max(fieldLength, 1)); - } - mapping.append(stringBuildBuffer); - } - else - { - if (fieldLength <= MAX_VARCHAR_LENGTH) - { - if (fieldCharSet->mbmaxlen > 1) - { - if (memcmp(fieldCharSet->name, "ucs2_", sizeof("ucs2_")-1) == 0 ) // UCS2 - { - sprintf(stringBuildBuffer, "VARGRAPHIC(%d)", max(fieldLength / fieldCharSet->mbmaxlen, 1)); // Number of characters - db2Ccsid = 13488; - } - else if (memcmp(fieldCharSet->name, "utf8_", sizeof("utf8_")-1) == 0 && - strcmp(fieldCharSet->name, "utf8_general_ci") != 0) - { - sprintf(stringBuildBuffer, "VARCHAR(%d)", max(fieldLength, 1)); // Number of bytes - db2Ccsid = 1208; - } - else - { - sprintf(stringBuildBuffer, "VARGRAPHIC(%d)", max(fieldLength / fieldCharSet->mbmaxlen, 1)); // Number of characters - db2Ccsid = 1200; - } - } - else - { - sprintf(stringBuildBuffer, "VARCHAR(%d)", max(fieldLength, 1)); - } - } - else if (blobMapping == AS_VARCHAR && - (field->flags & PART_KEY_FLAG)) - { - if (fieldCharSet->mbmaxlen > 1) - { - if (memcmp(fieldCharSet->name, "ucs2_", sizeof("ucs2_")-1) == 0 ) // UCS2 - { - sprintf(stringBuildBuffer, "LONG VARGRAPHIC "); - db2Ccsid = 13488; - } - else if (memcmp(fieldCharSet->name, "utf8_", sizeof("utf8_")-1) == 0 && - strcmp(fieldCharSet->name, "utf8_general_ci") != 0) - { - sprintf(stringBuildBuffer, "LONG VARCHAR "); - db2Ccsid = 1208; - } - else - { - sprintf(stringBuildBuffer, "LONG VARGRAPHIC "); - db2Ccsid = 1200; - } - } - else - { - sprintf(stringBuildBuffer, "LONG VARCHAR "); - } - } - else - { - fieldLength = min(MAX_BLOB_LENGTH, fieldLength); - - if (fieldCharSet->mbmaxlen > 1) - { - if (memcmp(fieldCharSet->name, "ucs2_", sizeof("ucs2_")-1) == 0 ) // UCS2 - { - sprintf(stringBuildBuffer, "DBCLOB(%d)", max(fieldLength / fieldCharSet->mbmaxlen, 1)); // Number of characters - db2Ccsid = 13488; - } - else if (memcmp(fieldCharSet->name, "utf8_", sizeof("utf8_")-1) == 0 && - strcmp(fieldCharSet->name, "utf8_general_ci") != 0) - { - sprintf(stringBuildBuffer, "CLOB(%d)", max(fieldLength, 1)); // Number of bytes - db2Ccsid = 1208; - } - else - { - sprintf(stringBuildBuffer, "DBCLOB(%d)", max(fieldLength / fieldCharSet->mbmaxlen, 1)); // Number of characters - db2Ccsid = 1200; - } - } - else - { - sprintf(stringBuildBuffer, "CLOB(%d)", max(fieldLength, 1)); // Number of characters - } - } - - mapping.append(stringBuildBuffer); - } - if (db2Ccsid == 0) // If not overriding CCSID - { - int32 rtnCode = convertIANAToDb2Ccsid(fieldCharSet->csname, &db2Ccsid); - if (rtnCode) - return rtnCode; - } - - if (db2Ccsid != 1208 && - db2Ccsid != 13488) - { - // Check whether there is a character conversion available. - iconv_t temp; - int32 rc = getConversion(toDB2, fieldCharSet, db2Ccsid, temp); - if (unlikely(rc)) - return rc; - } - - sprintf(stringBuildBuffer, " CCSID %d ", db2Ccsid); - mapping.append(stringBuildBuffer); - } - } - } - break; - - } - - if (propagateDefaults) - get_field_default_value(field, - mapping, - defaultNeedsQuotes, - db2Ccsid, - (zeroDateHandling==SUBSTITUTE_0001_01_01)); - - return 0; -} - - -/** - Convert MySQL field data into the equivalent DB2 format - - @param field The MySQL field to be converted - @param db2Field The corresponding DB2 field definition - @param db2Buf The buffer to receive the converted data - @param data NULL if field points to the correct data; otherwise, - the data to be converted (for use with keys) -*/ -int32 ha_ibmdb2i::convertMySQLtoDB2(Field* field, const DB2Field& db2Field, char* db2Buf, const uchar* data) -{ - enum_field_types fieldType = field->type(); - switch (fieldType) - { - case MYSQL_TYPE_NEWDECIMAL: - { - uint precision= ((Field_new_decimal*)field)->precision; - uint scale= field->decimals(); - uint db2Precision = min(precision, MAX_DEC_PRECISION); - uint truncationAmount = precision - db2Precision; - - if (scale >= truncationAmount) - { - String tempString(precision+2); - - if (data == NULL) - { - field->val_str((String*)&tempString, (String*)(NULL)); - } - else - { - field->val_str(&tempString, data); - } - const char* temp = tempString.ptr(); - char packed[32]; - memset(&packed, 0, sizeof(packed)); - - int bcdPos = db2Precision - (db2Precision % 2 ? 1 : 0); - bcdAssign(packed, bcdPos+1, (temp[0] == '-' ? 0xD : 0xF)); - - int strPos=tempString.length() - 1 - truncationAmount; - - for (;strPos >= 0 && bcdPos >= 0; strPos--) - { - if (my_isdigit(&my_charset_latin1, temp[strPos])) - { - bcdAssign(packed, bcdPos, temp[strPos]-'0'); - --bcdPos; - } - } - memcpy(db2Buf, &packed, (db2Precision/2)+1); - } - - } - break; - case MYSQL_TYPE_TINY: - { - int16 temp = (data == NULL ? field->val_int() : field->val_int(data)); - memcpy(db2Buf , &temp, sizeof(temp)); - } - break; - case MYSQL_TYPE_SHORT: - { - if (((Field_num*)field)->unsigned_flag) - { - memset(db2Buf, 0, 2); - memcpy(db2Buf+2, (data == NULL ? field->ptr : data), 2); - } - else - { - memcpy(db2Buf, (data == NULL ? field->ptr : data), 2); - } - } - break; - case MYSQL_TYPE_LONG: - { - if (((Field_num*)field)->unsigned_flag) - { - memset(db2Buf, 0, 4); - memcpy(db2Buf+4, (data == NULL ? field->ptr : data), 4); - } - else - { - memcpy(db2Buf, (data == NULL ? field->ptr : data), 4); - } - } - break; - case MYSQL_TYPE_FLOAT: - { - memcpy(db2Buf, (data == NULL ? field->ptr : data), 4); - } - break; - case MYSQL_TYPE_DOUBLE: - { - memcpy(db2Buf, (data == NULL ? field->ptr : data), 8); - } - break; - case MYSQL_TYPE_TIMESTAMP: - case MYSQL_TYPE_DATETIME: - { - String tempString(27); - if (data == NULL) - { - field->val_str(&tempString, &tempString); - } - else - { - field->val_str(&tempString, data); - } - memset(db2Buf, '0', 26); - memcpy(db2Buf, tempString.ptr(), tempString.length()); - if (strncmp(db2Buf,ZERO_DATETIME_VALUE,strlen(ZERO_DATETIME_VALUE)) == 0) - { - if (cachedZeroDateOption == SUBSTITUTE_0001_01_01) - memcpy(db2Buf, ZERO_DATETIME_VALUE_SUBST, sizeof(ZERO_DATETIME_VALUE_SUBST)); - else - { - getErrTxt(DB2I_ERR_INVALID_COL_VALUE, field->field_name); - return(DB2I_ERR_INVALID_COL_VALUE); - } - } - (db2Buf)[10] = '-'; - (db2Buf)[13] = (db2Buf)[16] = (db2Buf)[19] = '.'; - - convertNumericToEbcdicFast(db2Buf, 26); - } - break; - case MYSQL_TYPE_LONGLONG: - { - if (((Field_num*)field)->unsigned_flag) - { - char temp[23]; - String tempString(temp, sizeof(temp), &my_charset_latin1); - - if (data == NULL) - { - field->val_str((String*)&tempString, (String*)(NULL)); - } - else - { - field->val_str(&tempString, data); - } - char packed[11]; - memset(packed, 0, sizeof(packed)); - bcdAssign(packed, 21, (temp[0] == '-' ? 0xD : 0xF)); - int strPos=tempString.length()-1; - int bcdPos=20; - - for (;strPos >= 0; strPos--) - { - if (my_isdigit(&my_charset_latin1, temp[strPos])) - { - bcdAssign(packed, bcdPos, temp[strPos]-'0'); - --bcdPos; - } - } - memcpy(db2Buf, &packed, 11); - } - else - { - *(uint64*)db2Buf = *(uint64*)(data == NULL ? field->ptr : data); - } - } - break; - case MYSQL_TYPE_INT24: - { - int32 temp= (data == NULL ? field->val_int() : field->val_int(data)); - memcpy(db2Buf , &temp, sizeof(temp)); - } - break; - case MYSQL_TYPE_DATE: - case MYSQL_TYPE_NEWDATE: - { - String tempString(11); - if (data == NULL) - { - field->val_str(&tempString, (String*)NULL); - } - else - { - field->val_str(&tempString, data); - } - memcpy(db2Buf, tempString.ptr(), 10); - if (strncmp(db2Buf,ZERO_DATE_VALUE,strlen(ZERO_DATE_VALUE)) == 0) - { - if (cachedZeroDateOption == SUBSTITUTE_0001_01_01) - memcpy(db2Buf, ZERO_DATE_VALUE_SUBST, sizeof(ZERO_DATE_VALUE_SUBST)); - else - { - getErrTxt(DB2I_ERR_INVALID_COL_VALUE,field->field_name); - return(DB2I_ERR_INVALID_COL_VALUE); - } - } - - convertNumericToEbcdicFast(db2Buf,10); - } - break; - case MYSQL_TYPE_TIME: - { - if (db2Field.getType() == QMY_TIME) - { - String tempString(10); - if (data == NULL) - { - field->val_str(&tempString, (String*)NULL); - } - else - { - field->val_str(&tempString, data); - } - memcpy(db2Buf, tempString.ptr(), 8); - (db2Buf)[2]=(db2Buf)[5] = '.'; - - convertNumericToEbcdicFast(db2Buf, 8); - } - else - { - int32 temp = sint3korr(data == NULL ? field->ptr : data); - memcpy(db2Buf, &temp, sizeof(temp)); - } - } - break; - case MYSQL_TYPE_YEAR: - { - String tempString(5); - if (db2Field.getType() == QMY_CHAR) - { - if (data == NULL) - { - field->val_str(&tempString, (String*)NULL); - } - else - { - field->val_str(&tempString, data); - } - memcpy(db2Buf, tempString.ptr(), 4); - } - else - { - uint8 temp = *(uint8*)(data == NULL ? field->ptr : data); - *(uint16*)(db2Buf) = (temp ? temp + 1900 : 0); - } - } - break; - case MYSQL_TYPE_BIT: - { - int bytesToCopy = db2Field.getByteLengthInRecord(); - - if (data == NULL) - { - uint64 temp = field->val_int(); - memcpy(db2Buf, - ((char*)&temp) + (sizeof(temp) - bytesToCopy), - bytesToCopy); - } - else - { - memcpy(db2Buf, - data, - bytesToCopy); - } - } - break; - case MYSQL_TYPE_VARCHAR: - case MYSQL_TYPE_STRING: - case MYSQL_TYPE_BLOB: - { - if (field->real_type() == MYSQL_TYPE_ENUM || - field->real_type() == MYSQL_TYPE_SET) - { - int64 temp= (data == NULL ? field->val_int() : field->val_int(data)); - *(int64*)db2Buf = temp; - } - else - { - const uchar* dataToStore; - uint32 bytesToStore; - uint32 bytesToPad = 0; - CHARSET_INFO* fieldCharSet = field->charset(); - uint32 maxDisplayLength = field->max_display_length(); - switch (fieldType) - { - case MYSQL_TYPE_STRING: - { - bytesToStore = maxDisplayLength; - if (data == NULL) - dataToStore = field->ptr; - else - dataToStore = data; - } - break; - case MYSQL_TYPE_VARCHAR: - { - - if (data == NULL) - { - bytesToStore = field->data_length(); - dataToStore = field->ptr + ((Field_varstring*)field)->length_bytes; - } - else - { - // Key lens are stored little-endian - bytesToStore = *(uint8*)data + ((*(uint8*)(data+1)) << 8); - dataToStore = data + 2; - } - bytesToPad = maxDisplayLength - bytesToStore; - } - break; - case MYSQL_TYPE_BLOB: - { - if (data == NULL) - { - bytesToStore = ((Field_blob*)field)->get_length(); - bytesToPad = maxDisplayLength - bytesToStore; - ((Field_blob*)field)->get_ptr((uchar**)&dataToStore); - } - else - { - // Key lens are stored little-endian - bytesToStore = *(uint8*)data + ((*(uint8*)(data+1)) << 8); - dataToStore = data + 2; - } - } - break; - } - - int32 rc; - uint16 db2FieldType = db2Field.getType(); - switch(db2FieldType) - { - case QMY_CHAR: - if (maxDisplayLength == 0) - bytesToPad = 1; - case QMY_VARCHAR: - if (db2FieldType == QMY_VARCHAR) - { - db2Buf += sizeof(uint16); - bytesToPad = 0; - } - - if (bytesToStore > db2Field.getDataLengthInRecord()) - { - bytesToStore = db2Field.getDataLengthInRecord(); - field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); - } - - if (fieldCharSet == &my_charset_bin) // If binary - { - if (bytesToStore) - memcpy(db2Buf, dataToStore, bytesToStore); - if (bytesToPad) - memset(db2Buf + bytesToStore, 0x00, bytesToPad); - } - else if (db2Field.getCCSID() == 1208) // utf8 - { - if (bytesToStore) - memcpy(db2Buf, dataToStore, bytesToStore); - if (bytesToPad) - memset(db2Buf + bytesToStore, ' ', bytesToPad); - } - else // single-byte ASCII to EBCDIC - { - DBUG_ASSERT(fieldCharSet->mbmaxlen == 1); - if (bytesToStore) - { - rc = convertFieldChars(toDB2, field->field_index, (char*)dataToStore, db2Buf, bytesToStore, bytesToStore, NULL); - if (rc) - return rc; - } - if (bytesToPad) - memset(db2Buf + bytesToStore, 0x40, bytesToPad); - } - - if (db2FieldType == QMY_VARCHAR) - *(uint16*)(db2Buf - sizeof(uint16)) = bytesToStore; - break; - case QMY_VARGRAPHIC: - db2Buf += sizeof(uint16); - bytesToPad = 0; - case QMY_GRAPHIC: - if (maxDisplayLength == 0 && db2FieldType == QMY_GRAPHIC) - bytesToPad = 2; - - if (db2Field.getCCSID() == 13488) - { - if (bytesToStore) - memcpy(db2Buf, dataToStore, bytesToStore); - if (bytesToPad) - memset16((db2Buf + bytesToStore), 0x0020, bytesToPad/2); - } - else - { - size_t db2BytesToStore; - size_t maxDb2BytesToStore; - - if (maxDisplayLength == 0 && db2FieldType == QMY_GRAPHIC) - maxDb2BytesToStore = 2; - else - maxDb2BytesToStore = min(((bytesToStore * 2) / fieldCharSet->mbminlen), - ((maxDisplayLength * 2) / fieldCharSet->mbmaxlen)); - - if (bytesToStore == 0) - db2BytesToStore = 0; - else - { - rc = convertFieldChars(toDB2, field->field_index, (char*)dataToStore, db2Buf, bytesToStore, maxDb2BytesToStore, &db2BytesToStore); - if (rc) - return rc; - bytesToStore = db2BytesToStore; - } - if (db2BytesToStore < maxDb2BytesToStore) // If need to pad - memset16((db2Buf + db2BytesToStore), 0x0020, (maxDb2BytesToStore - db2BytesToStore)/2); - } - - if (db2FieldType == QMY_VARGRAPHIC) - *(uint16*)(db2Buf-sizeof(uint16)) = bytesToStore/2; - break; - case QMY_BLOBCLOB: - case QMY_DBCLOB: - { - DBUG_ASSERT(data == NULL); - DB2LobField* lobField = (DB2LobField*)(db2Buf + db2Field.calcBlobPad()); - - if ((fieldCharSet == &my_charset_bin) || // binary or - (db2Field.getCCSID()==13488) || - (db2Field.getCCSID()==1208)) // binary UTF8 - { - } - else - { - char* temp; - int32 rc; - size_t db2BytesToStore; - if (fieldCharSet->mbmaxlen == 1) // single-byte ASCII to EBCDIC - { - temp = getCharacterConversionBuffer(field->field_index, bytesToStore); - rc = convertFieldChars(toDB2, field->field_index, (char*)dataToStore,temp,bytesToStore, bytesToStore, NULL); - if (rc) - return (rc); - } - else // Else Far East, special UTF8 or non-special UTF8/UCS2 - { - size_t maxDb2BytesToStore; - maxDb2BytesToStore = min(((bytesToStore * 2) / fieldCharSet->mbminlen), - ((maxDisplayLength * 2) / fieldCharSet->mbmaxlen)); - temp = getCharacterConversionBuffer(field->field_index, maxDb2BytesToStore); - rc = convertFieldChars(toDB2, field->field_index, (char*)dataToStore,temp,bytesToStore, maxDb2BytesToStore, &db2BytesToStore); - if (rc) - return (rc); - bytesToStore = db2BytesToStore; - } - dataToStore = (uchar*)temp; - } - - uint16 blobID = db2Table->getBlobIdFromField(field->field_index); - if (blobWriteBuffers[blobID] != (char*)dataToStore) - blobWriteBuffers[blobID].reassign((char*)dataToStore); - if ((void*)blobWriteBuffers[blobID]) - lobField->dataHandle = (ILEMemHandle)blobWriteBuffers[blobID]; - else - lobField->dataHandle = 0; - lobField->length = bytesToStore / (db2FieldType == QMY_DBCLOB ? 2 : 1); - } - break; - } - } - } - break; - default: - DBUG_ASSERT(0); - break; - } - - return (ha_thd()->is_error()); -} - - -/** - Convert DB2 field data into the equivalent MySQL format - - @param db2Field The DB2 field definition - @param field The MySQL field to receive the converted data - @param buf The DB2 data to be converted -*/ -int32 ha_ibmdb2i::convertDB2toMySQL(const DB2Field& db2Field, Field* field, const char* buf) -{ - int32 storeRC = 0; // Result of the field->store() operation - - const char* bufPtr = buf + db2Field.getBufferOffset(); - - switch (field->type()) - { - case MYSQL_TYPE_NEWDECIMAL: - { - uint precision= ((Field_new_decimal*)field)->precision; - uint scale= field->decimals(); - uint db2Precision = min(precision, MAX_DEC_PRECISION); - uint decimalPlace = precision-scale+1; - char temp[80]; - - if (precision <= MAX_DEC_PRECISION || - scale > precision - MAX_DEC_PRECISION) - { - uint numNibbles = db2Precision + (db2Precision % 2 ? 0 : 1); - - temp[0] = (bcdGet(bufPtr, numNibbles) == 0xD ? '-' : ' '); - int strPos=1; - int bcdPos=(db2Precision % 2 ? 0 : 1); - - for (;bcdPos < numNibbles; bcdPos++, strPos++) - { - if (strPos == decimalPlace) - { - temp[strPos] = '.'; - strPos++; - } - - temp[strPos] = bcdGet(bufPtr, bcdPos) + '0'; - } - - temp[strPos] = 0; - - storeRC = field->store(temp, strPos, &my_charset_latin1); - } - } - break; - case MYSQL_TYPE_TINY: - { - storeRC = field->store(*(int16*)bufPtr, ((Field_num*)field)->unsigned_flag); - } - break; - case MYSQL_TYPE_SHORT: - { - if (((Field_num*)field)->unsigned_flag) - { - storeRC = field->store(*(int32*)bufPtr, TRUE); - } - else - { - storeRC = field->store(*(int16*)bufPtr, FALSE); - } - } - break; - case MYSQL_TYPE_LONG: - { - if (((Field_num*)field)->unsigned_flag) - { - storeRC = field->store(*(int64*)bufPtr, TRUE); - } - else - { - storeRC = field->store(*(int32*)bufPtr, FALSE); - } - } - break; - case MYSQL_TYPE_FLOAT: - { - storeRC = field->store(*(float*)bufPtr); - } - break; - case MYSQL_TYPE_DOUBLE: - { - storeRC = field->store(*(double*)bufPtr); - } - break; - case MYSQL_TYPE_LONGLONG: - { - char temp[23]; - if (((Field_num*)field)->unsigned_flag) - { - temp[0] = (bcdGet(bufPtr, 21) == 0xD ? '-' : ' '); - int strPos=1; - int bcdPos=0; - - for (;bcdPos <= 20; bcdPos++, strPos++) - { - temp[strPos] = bcdGet(bufPtr, bcdPos) + '0'; - } - - temp[strPos] = 0; - - storeRC = field->store(temp, strPos, &my_charset_latin1); - } - else - { - storeRC = field->store(*(int64*)bufPtr, FALSE); - } - } - break; - case MYSQL_TYPE_INT24: - { - storeRC = field->store(*(int32*)bufPtr, ((Field_num*)field)->unsigned_flag); - } - break; - case MYSQL_TYPE_DATE: - case MYSQL_TYPE_NEWDATE: - { - longlong value= a4toi_ebcdic((uchar*)bufPtr) * 10000 + - a2toi_ebcdic((uchar*)bufPtr+5) * 100 + - a2toi_ebcdic((uchar*)bufPtr+8); - - if (cachedZeroDateOption == SUBSTITUTE_0001_01_01 && - value == (10000 + 100 + 1)) - value = 0; - - storeRC = field->store(value); - } - break; - case MYSQL_TYPE_TIME: - { - if (db2Field.getType() == QMY_TIME) - { - longlong value= a2toi_ebcdic((uchar*)bufPtr) * 10000 + - a2toi_ebcdic((uchar*)bufPtr+3) * 100 + - a2toi_ebcdic((uchar*)bufPtr+6); - - storeRC = field->store(value); - } - else - storeRC = field->store(*((int32*)bufPtr)); - } - break; - case MYSQL_TYPE_TIMESTAMP: - case MYSQL_TYPE_DATETIME: - { - longlong value= (a4toi_ebcdic((uchar*)bufPtr) * 10000 + - a2toi_ebcdic((uchar*)bufPtr+5) * 100 + - a2toi_ebcdic((uchar*)bufPtr+8)) * 1000000LL + - (a2toi_ebcdic((uchar*)bufPtr+11) * 10000 + - a2toi_ebcdic((uchar*)bufPtr+14) * 100 + - a2toi_ebcdic((uchar*)bufPtr+17)); - - if (cachedZeroDateOption == SUBSTITUTE_0001_01_01 && - value == (10000 + 100 + 1) * 1000000LL) - value = 0; - - storeRC = field->store(value); - } - break; - case MYSQL_TYPE_YEAR: - { - if (db2Field.getType() == QMY_CHAR) - { - storeRC = field->store(bufPtr, 4, &my_charset_bin); - } - else - { - storeRC = field->store(*((uint16*)bufPtr)); - } - } - break; - case MYSQL_TYPE_BIT: - { - uint64 temp= 0; - int bytesToCopy= db2Field.getByteLengthInRecord(); - memcpy(((char*)&temp) + (sizeof(temp) - bytesToCopy), bufPtr, bytesToCopy); - storeRC = field->store(temp, TRUE); - } - break; - case MYSQL_TYPE_VARCHAR: - case MYSQL_TYPE_STRING: - case MYSQL_TYPE_BLOB: - { - if (field->real_type() == MYSQL_TYPE_ENUM || - field->real_type() == MYSQL_TYPE_SET) - { - storeRC = field->store(*(int64*)bufPtr); - } - else - { - - const char* dataToStore = NULL; - uint32 bytesToStore = 0; - CHARSET_INFO* fieldCharSet = field->charset(); - switch(db2Field.getType()) - { - case QMY_CHAR: - case QMY_GRAPHIC: - { - bytesToStore = db2Field.getByteLengthInRecord(); - if (bytesToStore == 0) - bytesToStore = 1; - dataToStore = bufPtr; - } - break; - case QMY_VARCHAR: - { - bytesToStore = *(uint16*)bufPtr; - dataToStore = bufPtr+sizeof(uint16); - } - break; - case QMY_VARGRAPHIC: - { - /* For VARGRAPHIC, convert the number of double-byte characters - to the number of bytes. */ - bytesToStore = (*(uint16*)bufPtr)*2; - dataToStore = bufPtr+sizeof(uint16); - } - break; - case QMY_DBCLOB: - case QMY_BLOBCLOB: - { - DB2LobField* lobField = (DB2LobField* )(bufPtr + db2Field.calcBlobPad()); - bytesToStore = lobField->length * (db2Field.getType() == QMY_DBCLOB ? 2 : 1); - dataToStore = (char*)blobReadBuffers->getBufferPtr(field->field_index); - } - break; - - } - - if ((fieldCharSet != &my_charset_bin) && // not binary & - (db2Field.getCCSID() != 13488) && // not UCS2 & - (db2Field.getCCSID() != 1208)) - { - char* temp; - size_t db2BytesToStore; - int rc; - if (fieldCharSet->mbmaxlen > 1) - { - size_t maxDb2BytesToStore = ((bytesToStore / 2) * fieldCharSet->mbmaxlen); // Worst case for number of bytes - temp = getCharacterConversionBuffer(field->field_index, maxDb2BytesToStore); - rc = convertFieldChars(toMySQL, field->field_index, dataToStore, temp, bytesToStore, maxDb2BytesToStore, &db2BytesToStore); - bytesToStore = db2BytesToStore; - } - else // single-byte ASCII to EBCDIC - { - temp = getCharacterConversionBuffer(field->field_index, bytesToStore); - rc = convertFieldChars(toMySQL, field->field_index, dataToStore, temp, bytesToStore, bytesToStore, NULL); - } - if (rc) - return (rc); - dataToStore = temp; - } - - if ((field)->flags & BLOB_FLAG) - ((Field_blob*)(field))->set_ptr(bytesToStore, (uchar*)dataToStore); - else - storeRC = field->store(dataToStore, bytesToStore, &my_charset_bin); - } - } - break; - default: - DBUG_ASSERT(0); - break; - - } - - if (storeRC) - { - invalidDataFound = true; - } - - return 0; -} diff --git a/storage/ibmdb2i/db2i_errors.cc b/storage/ibmdb2i/db2i_errors.cc deleted file mode 100644 index dd50e40e61b..00000000000 --- a/storage/ibmdb2i/db2i_errors.cc +++ /dev/null @@ -1,297 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#include "db2i_errors.h" -#include "db2i_ileBridge.h" -#include "db2i_charsetSupport.h" -#include "mysql_priv.h" -#include "stdarg.h" - -#define MAX_MSGSTRING 109 - -/* - The following strings are associated with errors that can be produced - within the storage engine proper. -*/ -static const char* engineErrors[MAX_MSGSTRING] = -{ - {""}, - {"Error opening codeset conversion from %.64s to %.64s (errno = %d)"}, - {"Invalid %-.10s name '%-.128s'"}, - {"Unsupported move from '%-.128s' to '%-.128s' on RENAME TABLE statement"}, - {"The %-.64s character set is not supported."}, - {"Auto_increment is not allowed for a partitioned table"}, - {"Character set conversion error due to unknown encoding scheme %d"}, - {""}, - {"Table '%-.128s' was not found by the storage engine"}, - {"Could not resolve to %-.128s in library %-.10s type %-.10s (errno = %d)"}, - {"Error on _PGMCALL for program %-.10s in library %-.10s (error = %d)"}, - {"Error on _ILECALL for API '%.128s' (error = %d)"}, - {"Error in iconv() function during character set conversion (errno = %d)"}, - {"Error from Get Encoding Scheme (QTQGESP) API: %d, %d, %d"}, - {"Error from Get Related Default CCSID (QTQGRDC) API: %d, %d, %d"}, - {"Data out of range for column '%.192s'"}, - {"Schema name '%.128s' exceeds maximum length of %d characters"}, - {"Multiple collations not supported in a single index or constraint"}, - {"Sort sequence was not found"}, - {"One or more characters in column %.128s were substituted during conversion"}, - {"A decimal column exceeded the maximum precision. Data may be truncated."}, - {"Some data returned by DB2 for table %s could not be converted for MySQL"}, - {""}, - {"Column %.128s contains characters that cannot be converted"}, - {"An invalid name was specified for ibmdb2i_rdb_name."}, - {"A duplicate key was encountered for index '%.128s'"}, - {"A table with the same name exists but has incompatible column definitions."}, - {"The created table was discovered as an existing DB2 object."}, - {"Some attribute(s) defined for column '%.128s' may not be honored by accesses from DB2."}, -}; - -/* - The following strings are associated with errors that can be returned - by the operating system via the QMY_* APIs. Most are very uncommon and - indicate a bug somewhere. -*/ -static const char* systemErrors[MAX_MSGSTRING] = -{ - {"Thread ID is too long"}, - {"Error creating a SPACE memory object"}, - {"Error creating a FILE memory object"}, - {"Error creating a SPACE synchronization token"}, - {"Error creating a FILE synchronization token"}, - {"See message %-.7s in joblog for job %-.6s/%-.10s/%-.10s."}, - {"Error unlocking a synchronization token when closing a connection"}, - {"Invalid action specified for an 'object lock' request"}, - {"Invalid action specified for a savepoint request"}, - {"Partial keys are not supported with an ICU sort sequence"}, - {"Error retrieving an ICU sort key"}, - {"Error converting single-byte sort sequence to UCS-2"}, - {"An unsupported collation was specified"}, - {"Validation failed for referenced table of foreign key constraint"}, - {"Error extracting table for constraint information"}, - {"Error extracting referenced table for constraint information"}, - {"Invalid action specified for a 'commitment control' request"}, - {"Invalid commitment control isolation level specified on 'open' request"}, - {"Invalid file handle"}, - {" "}, - {"Invalid option specified for returning data on 'read' request"}, - {"Invalid orientation specified for 'read' request"}, - {"Invalid option type specified for 'read' request"}, - {"Invalid isolation level for starting commitment control"}, - {"Error unlocking a synchronization token in module QMYALC"}, - {"Length of space for returned format is not long enough"}, - {"SQL XA transactions are currently unsupported by this interface"}, - {"The associated QSQSRVR job was killed or ended unexpectedly."}, - {"Error unlocking a synchronization token in module QMYSEI"}, - {"Error unlocking a synchronization token in module QMYSPO"}, - {"Error converting input CCSID from short form to long form"}, - {" "}, - {"Error getting associated CCSID for CCSID conversion"}, - {"Error converting a string from one CCSID to another"}, - {"Error unlocking a synchronization token"}, - {"Error destroying a synchronization token"}, - {"Error locking a synchronization token"}, - {"Error recreating a synchronization token"}, - {"A space handle was not specified for a constraint request"}, - {"An SQL cursor was specified for a delete request"}, - {" "}, - {"Error on delete request because current UFCB for connection is not open"}, - {"An SQL cursor was specified for an object initialization request"}, - {"An SQL cursor was specified for an object override request"}, - {"A space handle was not specified for an object override request"}, - {"An SQL cursor was specified for an information request"}, - {"An SQL cursor was specified for an object lock request"}, - {"An SQL cursor was specified for an optimize request"}, - {"A data handle was not specified for a read request"}, - {"A row number handle was not specified for a read request"}, - {"A key handle was not specified for a read request"}, - {"An SQL cursor was specified for an row estimation request"}, - {"A space handle was not specified for a row estimation request"}, - {"An SQL cursor was specified for a release record request"}, - {"A statement handle was not specified for an 'execute immediate' request"}, - {"A statement handle was not specified for a 'prepare open' request"}, - {"An SQL cursor was specified for an update request"}, - {"The UFCB was not open for read"}, - {"Error on update request because current UFCB for connection is not open"}, - {"A data handle was not specified for an update request"}, - {"An SQL cursor was specified for a write request"}, - {"A data handle was not specified for a write request"}, - {"An unknown function was specified on a process request"}, - {"A share definition was not specified for an 'allocate share' request"}, - {"A share handle was not specified for an 'allocate share' request"}, - {"A use count handle was not specified for an 'allocate share' request"}, - {"A 'records per key' handle was not specified for an information request"}, - {"Error resolving LOB addresss"}, - {"Length of a LOB space is too small"}, - {"An unknown function was specified for a server request"}, - {"Object authorization failed. See message %-.7s in joblog for job %-.6s/%-.10s/%-.10s. for more information."}, - {" "}, - {"Error locking mutex on server"}, - {"Error unlocking mutex on server"}, - {"Error checking for RDB name in RDB Directory"}, - {"Error creating mutex on server"}, - {"A table with that name already exists"}, - {" "}, - {"Error unlocking mutex"}, - {"Error connecting to server job"}, - {"Error connecting to server job"}, - {" "}, - {"Function check occurred while registering parameter spaces. See joblog."}, - {" "}, - {" "}, - {"End of block"}, - {"The file has changed and might not be compatible with the MySQL table definition"}, - {"Error giving pipe to server job"}, - {"There are open object locks when attempting to deallocate"}, - {"There is no open lock"}, - {" "}, - {" "}, - {"The maximum value for the auto_increment data type was exceeded"}, - {"Error occurred closing the pipe "}, - {"Error occurred taking a descriptor for the pipe"}, - {"Error writing to pipe "}, - {"Server was interrupted "}, - {"No pipe descriptor exists for reuse "}, - {"Error occurred during an SQL prepare statement "}, - {"Error occurred during an SQL open "}, - {" "}, - {" "}, - {" "}, - {" "}, - {" "}, - {" "}, - {"An unspecified error was returned from the system."}, - {" "} -}; - -/** - This function builds the text string for an error code, and substitutes - a variable number of replacement variables into the string. -*/ -void getErrTxt(int errCode, ...) -{ - va_list args; - va_start(args,errCode); - char* buffer = db2i_ileBridge::getBridgeForThread()->getErrorStorage(); - const char* msg; - - if (errCode >= QMY_ERR_MIN && errCode <= QMY_ERR_SQ_OPEN) - msg = systemErrors[errCode - QMY_ERR_MIN]; - else - { - DBUG_ASSERT(errCode >= DB2I_FIRST_ERR && errCode <= DB2I_LAST_ERR); - msg = engineErrors[errCode - DB2I_FIRST_ERR]; - } - - (void) my_vsnprintf (buffer, MYSQL_ERRMSG_SIZE, msg, args); - va_end(args); - fprintf(stderr,"ibmdb2i error %d: %s\n",errCode,buffer); - DBUG_PRINT("error", ("ibmdb2i error %d: %s",errCode,buffer)); -} - -static inline void trimSpace(char* str) -{ - char* end = strchr(str, ' '); - if (end) *end = 0; -} - - -/** - Generate the error text specific to an API error returned by a QMY_* API. - - @parm errCode The error value - @parm errInfo The structure containing the message and job identifiers. -*/ -void reportSystemAPIError(int errCode, const Qmy_Error_output *errInfo) -{ - if (errCode >= QMY_ERR_MIN && errCode <= QMY_ERR_SQ_OPEN) - { - switch(errCode) - { - case QMY_ERR_MSGID: - case QMY_ERR_NOT_AUTH: - { - DBUG_ASSERT(errInfo); - char jMsg[8]; // Error message ID - char jName[11]; // Job name - char jUser[11]; // Job user - char jNbr[7]; // Job number - memset(jMsg, 0, sizeof(jMsg)); - memset(jName, 0, sizeof(jMsg)); - memset(jUser, 0, sizeof(jMsg)); - memset(jMsg, 0, sizeof(jMsg)); - - convFromEbcdic(errInfo->MsgId,jMsg,sizeof(jMsg)-1); - convFromEbcdic(errInfo->JobName,jName,sizeof(jName)-1); - trimSpace(jName); - convFromEbcdic(errInfo->JobUser,jUser,sizeof(jUser)-1); - trimSpace(jUser); - convFromEbcdic(errInfo->JobNbr,jNbr,sizeof(jNbr)-1); - getErrTxt(errCode,jMsg,jNbr,jUser,jName); - } - break; - case QMY_ERR_RTNFMT: - { - getErrTxt(QMY_ERR_LVLID_MISMATCH); - } - break; - default: - getErrTxt(errCode); - break; - } - } -} - - -/** - Generate a warning for the specified error. -*/ -void warning(THD *thd, int errCode, ...) -{ - va_list args; - va_start(args,errCode); - char buffer[MYSQL_ERRMSG_SIZE]; - const char* msg; - - DBUG_ASSERT(errCode >= DB2I_FIRST_ERR && errCode <= DB2I_LAST_ERR); - msg = engineErrors[errCode - DB2I_FIRST_ERR]; - - (void) my_vsnprintf (buffer, MYSQL_ERRMSG_SIZE, msg, args); - va_end(args); - DBUG_PRINT("warning", ("ibmdb2i warning %d: %s",errCode,buffer)); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, errCode, buffer); -} - - diff --git a/storage/ibmdb2i/db2i_errors.h b/storage/ibmdb2i/db2i_errors.h deleted file mode 100644 index b6dd314ef50..00000000000 --- a/storage/ibmdb2i/db2i_errors.h +++ /dev/null @@ -1,93 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#ifndef DB2I_ERRORS_H -#define DB2I_ERRORS_H - -#include "qmyse.h" -class THD; - -/** - @enum DB2I_errors - - @brief These are the errors that can be returned by the storage engine proper - and that are specific to the engine. Refer to db2i_errors.cc for text - descriptions of the errors. -*/ - -enum DB2I_errors -{ - DB2I_FIRST_ERR = 2500, - DB2I_ERR_ICONV_OPEN, - DB2I_ERR_INVALID_NAME, - DB2I_ERR_RENAME_MOVE, - DB2I_ERR_UNSUPP_CHARSET, - DB2I_ERR_PART_AUTOINC, - DB2I_ERR_UNKNOWN_ENCODING, - DB2I_ERR_RESERVED, - DB2I_ERR_TABLE_NOT_FOUND, - DB2I_ERR_RESOLVE_OBJ, - DB2I_ERR_PGMCALL, - DB2I_ERR_ILECALL, - DB2I_ERR_ICONV, - DB2I_ERR_QTQGESP, - DB2I_ERR_QTQGRDC, - DB2I_ERR_INVALID_COL_VALUE, - DB2I_ERR_TOO_LONG_SCHEMA, - DB2I_ERR_MIXED_COLLATIONS, - DB2I_ERR_SRTSEQ, - DB2I_ERR_SUB_CHARS, - DB2I_ERR_PRECISION, - DB2I_ERR_INVALID_DATA, - DB2I_ERR_RESERVED2, - DB2I_ERR_ILL_CHAR, - DB2I_ERR_BAD_RDB_NAME, - DB2I_ERR_UNKNOWN_IDX, - DB2I_ERR_DISCOVERY_MISMATCH, - DB2I_ERR_WARN_CREATE_DISCOVER, - DB2I_ERR_WARN_COL_ATTRS, - DB2I_LAST_ERR = DB2I_ERR_WARN_COL_ATTRS -}; - -void getErrTxt(int errcode, ...); -void reportSystemAPIError(int errCode, const Qmy_Error_output *errInfo); -void warning(THD *thd, int errCode, ...); - -const char* DB2I_SQL0350 = "\xE2\xD8\xD3\xF0\xF3\xF5\xF0"; // SQL0350 in EBCDIC -const char* DB2I_CPF503A = "\xC3\xD7\xC6\xF5\xF0\xF3\xC1"; // CPF503A in EBCDIC -const char* DB2I_SQL0538 = "\xE2\xD8\xD3\xF0\xF5\xF3\xF8"; // SQL0538 in EBCDIC - -#endif diff --git a/storage/ibmdb2i/db2i_file.cc b/storage/ibmdb2i/db2i_file.cc deleted file mode 100644 index a16aa927527..00000000000 --- a/storage/ibmdb2i/db2i_file.cc +++ /dev/null @@ -1,556 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - - -#include "db2i_file.h" -#include "db2i_charsetSupport.h" -#include "db2i_collationSupport.h" -#include "db2i_misc.h" -#include "db2i_errors.h" -#include "my_dir.h" - -db2i_table::db2i_table(const TABLE_SHARE* myTable, const char* path) : - mysqlTable(myTable), - db2StartId(0), - blobFieldCount(0), - blobFields(NULL), - blobFieldActualSizes(NULL), - logicalFiles(NULL), - physicalFile(NULL), - db2TableNameSQLAscii(NULL), - db2LibNameSQLAscii(NULL) -{ - char asciiLibName[MAX_DB2_SCHEMANAME_LENGTH + 1]; - getDB2LibNameFromPath(path, asciiLibName, ASCII_NATIVE); - - char asciiFileName[MAX_DB2_FILENAME_LENGTH + 1]; - getDB2FileNameFromPath(path, asciiFileName, ASCII_NATIVE); - - size_t libNameLen = strlen(asciiLibName); - size_t fileNameLen = strlen(asciiFileName); - - db2LibNameEbcdic=(char *) - my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), - &db2LibNameEbcdic, libNameLen+1, - &db2LibNameAscii, libNameLen+1, - &db2LibNameSQLAscii, libNameLen*2 + 1, - &db2TableNameEbcdic, fileNameLen+1, - &db2TableNameAscii, fileNameLen+1, - &db2TableNameSQLAscii, fileNameLen*2 + 1, - NullS); - - if (likely(db2LibNameEbcdic)) - { - memcpy(db2LibNameAscii, asciiLibName, libNameLen); - convertNativeToSQLName(db2LibNameAscii, db2LibNameSQLAscii); - convToEbcdic(db2LibNameAscii, db2LibNameEbcdic, libNameLen); - memcpy(db2TableNameAscii, asciiFileName, fileNameLen); - convertNativeToSQLName(db2TableNameAscii, db2TableNameSQLAscii); - convToEbcdic(db2TableNameAscii, db2TableNameEbcdic, fileNameLen); - } - - conversionDefinitions[toMySQL] = NULL; - conversionDefinitions[toDB2] = NULL; - - isTemporaryTable = (strstr(mysqlTable->path.str, mysql_tmpdir) == mysqlTable->path.str); -} - - -int32 db2i_table::initDB2Objects(const char* path) -{ - uint fileObjects = 1 + mysqlTable->keys; - ValidatedPointer fileDefnSpace(sizeof(ShrDef) * fileObjects); - - physicalFile = new db2i_file(this); - physicalFile->fillILEDefn(&fileDefnSpace[0], true); - - logicalFileCount = mysqlTable->keys; - if (logicalFileCount > 0) - { - logicalFiles = new db2i_file*[logicalFileCount]; - for (int k = 0; k < logicalFileCount; k++) - { - logicalFiles[k] = new db2i_file(this, k); - logicalFiles[k]->fillILEDefn(&fileDefnSpace[k+1], false); - } - } - - ValidatedPointer fileDefnHandles(sizeof(FILE_HANDLE) * fileObjects); - size_t formatSpaceLen = sizeof(format_hdr_t) + mysqlTable->fields * sizeof(DB2Field); - formatSpace.alloc(formatSpaceLen); - - int rc = db2i_ileBridge::getBridgeForThread()-> - expectErrors(QMY_ERR_RTNFMT)-> - allocateFileDefn(fileDefnSpace, - fileDefnHandles, - fileObjects, - db2LibNameEbcdic, - strlen(db2LibNameEbcdic), - formatSpace, - formatSpaceLen); - - if (rc) - { - // We have to handle a format space error as a special case of a FID - // mismatch. We should only get the space error if columns have been added - // to the DB2 table without MySQL's knowledge, which is effectively a - // FID problem. - if (rc == QMY_ERR_RTNFMT) - { - rc = QMY_ERR_LVLID_MISMATCH; - getErrTxt(rc); - } - return rc; - } - - convFromEbcdic(((format_hdr_t*)formatSpace)->FilLvlId, fileLevelID, sizeof(fileLevelID)); - - if (!doFileIDsMatch(path)) - { - getErrTxt(QMY_ERR_LVLID_MISMATCH); - return QMY_ERR_LVLID_MISMATCH; - } - - physicalFile->setMasterDefnHandle(fileDefnHandles[0]); - for (int k = 0; k < mysqlTable->keys; k++) - { - logicalFiles[k]->setMasterDefnHandle(fileDefnHandles[k+1]); - } - - db2StartId = (uint64)(((format_hdr_t*)formatSpace)->StartIdVal); - db2Fields = (DB2Field*)((char*)(void*)formatSpace + ((format_hdr_t*)formatSpace)->ColDefOff); - - uint fields = mysqlTable->fields; - for (int i = 0; i < fields; ++i) - { - if (db2Field(i).isBlob()) - { - blobFieldCount++; - } - } - - if (blobFieldCount) - { - blobFieldActualSizes = (uint*)my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), - &blobFieldActualSizes, blobFieldCount * sizeof(uint), - &blobFields, blobFieldCount * sizeof(uint16), - NullS); - - int b = 0; - for (int i = 0; i < fields; ++i) - { - if (db2Field(i).isBlob()) - { - blobFields[b++] = i; - } - } - } - - my_multi_malloc(MYF(MY_WME), - &conversionDefinitions[toMySQL], fields * sizeof(iconv_t), - &conversionDefinitions[toDB2], fields * sizeof(iconv_t), - NullS); - for (int i = 0; i < fields; ++i) - { - conversionDefinitions[toMySQL][i] = (iconv_t)(-1); - conversionDefinitions[toDB2][i] = (iconv_t)(-1); - } - - return 0; -} - -int db2i_table::fastInitForCreate(const char* path) -{ - ValidatedPointer fileDefnSpace(sizeof(ShrDef)); - - physicalFile = new db2i_file(this); - physicalFile->fillILEDefn(fileDefnSpace, true); - - ValidatedPointer fileDefnHandles(sizeof(FILE_HANDLE)); - - size_t formatSpaceLen = sizeof(format_hdr_t) + - mysqlTable->fields * sizeof(DB2Field); - formatSpace.alloc(formatSpaceLen); - - int rc = db2i_ileBridge::getBridgeForThread()->allocateFileDefn(fileDefnSpace, - fileDefnHandles, - 1, - db2LibNameEbcdic, - strlen(db2LibNameEbcdic), - formatSpace, - formatSpaceLen); - - if (rc) - return rc; - - convFromEbcdic(((format_hdr_t*)formatSpace)->FilLvlId, fileLevelID, sizeof(fileLevelID)); - doFileIDsMatch(path); - - return 0; -} - -bool db2i_table::doFileIDsMatch(const char* path) -{ - char name_buff[FN_REFLEN]; - - fn_format(name_buff, path, "", FID_EXT, (MY_REPLACE_EXT | MY_UNPACK_FILENAME)); - - File fd = my_open(name_buff, O_RDONLY, MYF(0)); - - if (fd == -1) - { - if (errno == ENOENT) - { - fd = my_create(name_buff, 0, O_WRONLY, MYF(MY_WME)); - - if (fd == -1) - { - // TODO: Report errno here - return false; - } - my_write(fd, (uchar*)fileLevelID, sizeof(fileLevelID), MYF(MY_WME)); - my_close(fd, MYF(0)); - return true; - } - else - { - // TODO: Report errno here - return false; - } - } - - char diskFID[sizeof(fileLevelID)]; - - bool match = false; - - if (my_read(fd, (uchar*)diskFID, sizeof(diskFID), MYF(MY_WME)) == sizeof(diskFID) && - (memcmp(diskFID, fileLevelID, sizeof(diskFID)) == 0)) - match = true; - - my_close(fd, MYF(0)); - - return match; -} - -void db2i_table::deleteAssocFiles(const char* name) -{ - char name_buff[FN_REFLEN]; - fn_format(name_buff, name, "", FID_EXT, (MY_REPLACE_EXT | MY_UNPACK_FILENAME)); - my_delete(name_buff, MYF(0)); -} - -void db2i_table::renameAssocFiles(const char* from, const char* to) -{ - rename_file_ext(from, to, FID_EXT); -} - - -db2i_table::~db2i_table() -{ - if (blobFieldActualSizes) - my_free(blobFieldActualSizes, MYF(0)); - - if (conversionDefinitions[toMySQL]) - my_free(conversionDefinitions[toMySQL], MYF(0)); - - if (logicalFiles) - { - for (int k = 0; k < logicalFileCount; ++k) - { - delete logicalFiles[k]; - } - - delete[] logicalFiles; - } - delete physicalFile; - - my_free(db2LibNameEbcdic, 0); -} - -void db2i_table::getDB2QualifiedName(char* to) -{ - strcat(to, getDB2LibName(ASCII_SQL)); - strcat(to, "."); - strcat(to, getDB2TableName(ASCII_SQL)); -} - - -void db2i_table::getDB2QualifiedNameFromPath(const char* path, char* to) -{ - getDB2LibNameFromPath(path, to); - strcat(to, "."); - getDB2FileNameFromPath(path, strend(to)); -} - - -size_t db2i_table::smartFilenameToTableName(const char *in, char* out, size_t outlen) -{ - if (strchr(in, '@') == NULL) - { - return filename_to_tablename(in, out, outlen); - } - - char* test = (char*) my_malloc(outlen, MYF(MY_WME)); - - filename_to_tablename(in, test, outlen); - - char* cur = test; - - while (*cur) - { - if ((*cur <= 0x20) || (*cur >= 0x80)) - { - strncpy(out, in, outlen); - my_free(test, MYF(0)); - return min(outlen, strlen(out)); - } - ++cur; - } - - strncpy(out, test, outlen); - my_free(test, MYF(0)); - return min(outlen, strlen(out)); -} - -void db2i_table::filenameToTablename(const char* in, char* out, size_t outlen) -{ - if (strchr(in, '#') == NULL) - { - smartFilenameToTableName(in, out, outlen); - return; - } - - char* temp = (char*)sql_alloc(outlen); - - const char* part1, *part2, *part3, *part4; - part1 = in; - part2 = strstr(part1, "#P#"); - if (part2); - { - part3 = part2 + 3; - part4 = strchr(part3, '#'); - if (!part4) - part4 = strend(in); - } - - memcpy(temp, part1, min(outlen, part2 - part1)); - temp[min(outlen-1, part2-part1)] = 0; - - int32 accumLen = smartFilenameToTableName(temp, out, outlen); - - if (part2 && (accumLen + 4 < outlen)) - { - strcat(out, "#P#"); - accumLen += 4; - - memset(temp, 0, min(outlen, part2-part1)); - memcpy(temp, part3, min(outlen, part4-part3)); - temp[min(outlen-1, part4-part3)] = 0; - - accumLen += smartFilenameToTableName(temp, strend(out), outlen-accumLen); - - if (part4 && (accumLen + (strend(in) - part4 + 1) < outlen)) - { - strcat(out, part4); - } - } -} - -void db2i_table::getDB2LibNameFromPath(const char* path, char* lib, NameFormatFlags format) -{ - if (strstr(path, mysql_tmpdir) == path) - { - strcpy(lib, DB2I_TEMP_TABLE_SCHEMA); - } - else - { - const char* c = strend(path) - 1; - while (c > path && *c != '\\' && *c != '/') - --c; - - if (c != path) - { - const char* dbEnd = c; - do { - --c; - } while (c >= path && *c != '\\' && *c != '/'); - - if (c >= path) - { - const char* dbStart = c+1; - char fileName[FN_REFLEN]; - memcpy(fileName, dbStart, dbEnd - dbStart); - fileName[dbEnd-dbStart] = 0; - - char dbName[MAX_DB2_SCHEMANAME_LENGTH+1]; - filenameToTablename(fileName, dbName , sizeof(dbName)); - - convertMySQLNameToDB2Name(dbName, lib, sizeof(dbName), true, (format==ASCII_SQL) ); - } - else - DBUG_ASSERT(0); // This should never happen! - } - } -} - -void db2i_table::getDB2FileNameFromPath(const char* path, char* file, NameFormatFlags format) -{ - const char* fileEnd = strend(path); - const char* c = fileEnd; - while (c > path && *c != '\\' && *c != '/') - --c; - - if (c != path) - { - const char* fileStart = c+1; - char fileName[FN_REFLEN]; - memcpy(fileName, fileStart, fileEnd - fileStart); - fileName[fileEnd - fileStart] = 0; - char db2Name[MAX_DB2_FILENAME_LENGTH+1]; - filenameToTablename(fileName, db2Name, sizeof(db2Name)); - convertMySQLNameToDB2Name(db2Name, file, sizeof(db2Name), true, (format==ASCII_SQL) ); - } -} - -// Generates the DB2 index name when given the MySQL index and table names. -int32 db2i_table::appendQualifiedIndexFileName(const char* indexName, - const char* tableName, - String& to, - NameFormatFlags format, - enum_DB2I_INDEX_TYPE type) -{ - char generatedName[MAX_DB2_FILENAME_LENGTH+1]; - strncpy(generatedName, indexName, DB2I_INDEX_NAME_LENGTH_TO_PRESERVE); - generatedName[DB2I_INDEX_NAME_LENGTH_TO_PRESERVE] = 0; - char* endOfGeneratedName; - - if (type == typeDefault) - { - strcat(generatedName, DB2I_DEFAULT_INDEX_NAME_DELIMITER); - endOfGeneratedName = strend(generatedName); - } - else if (type != typeNone) - { - strcat(generatedName, DB2I_ADDL_INDEX_NAME_DELIMITER); - endOfGeneratedName = strend(generatedName); - *(endOfGeneratedName-2) = char(type); - } - - uint lenWithoutFile = endOfGeneratedName - generatedName; - - char strippedTableName[MAX_DB2_FILENAME_LENGTH+1]; - if (format == ASCII_SQL) - { - strcpy(strippedTableName, tableName); - stripExtraQuotes(strippedTableName+1, sizeof(strippedTableName)); - tableName = strippedTableName; - } - - if (strlen(tableName) > (MAX_DB2_FILENAME_LENGTH-lenWithoutFile)) - return -1; - - strncat(generatedName, - tableName+1, - min(strlen(tableName), (MAX_DB2_FILENAME_LENGTH-lenWithoutFile))-2 ); - - char finalName[MAX_DB2_FILENAME_LENGTH+1]; - convertMySQLNameToDB2Name(generatedName, finalName, sizeof(finalName), true, (format==ASCII_SQL)); - to.append(finalName); - - return 0; -} - - -void db2i_table::findConversionDefinition(enum_conversionDirection direction, uint16 fieldID) -{ - getConversion(direction, - mysqlTable->field[fieldID]->charset(), - db2Field(fieldID).getCCSID(), - conversionDefinitions[direction][fieldID]); -} - - -db2i_file::db2i_file(db2i_table* table) : db2Table(table) -{ - commonCtorInit(); - - DBUG_ASSERT(table->getMySQLTable()->table_name.length <= MAX_DB2_FILENAME_LENGTH-2); - - db2FileName = (char*)table->getDB2TableName(db2i_table::EBCDIC_NATIVE); -} - -db2i_file::db2i_file(db2i_table* table, int index) : db2Table(table) -{ - commonCtorInit(); - - if ((index == table->getMySQLTable()->primary_key) && !table->isTemporary()) - { - db2FileName = (char*)table->getDB2TableName(db2i_table::EBCDIC_NATIVE); - } - else - { - // Generate the index name (in index___table form); quote and EBCDICize it. - String qualifiedPath; - qualifiedPath.length(0); - - const char* asciiFileName = table->getDB2TableName(db2i_table::ASCII_NATIVE); - - db2i_table::appendQualifiedIndexFileName(table->getMySQLTable()->key_info[index].name, - asciiFileName, - qualifiedPath, - db2i_table::ASCII_NATIVE, - typeDefault); - - db2FileName = (char*)my_malloc(qualifiedPath.length()+1, MYF(MY_WME | MY_ZEROFILL)); - convToEbcdic(qualifiedPath.ptr(), db2FileName, qualifiedPath.length()); - } -} - -void db2i_file::commonCtorInit() -{ - masterDefn = 0; - memset(&formats, 0, maxRowFormats*sizeof(RowFormat)); -} - - -void db2i_file::fillILEDefn(ShrDef* defn, bool readInArrivalSeq) -{ - defn->ObjNamLen = strlen(db2FileName); - DBUG_ASSERT(defn->ObjNamLen <= sizeof(defn->ObjNam)); - memcpy(defn->ObjNam, db2FileName, defn->ObjNamLen); - defn->ArrSeq[0] = (readInArrivalSeq ? QMY_YES : QMY_NO); -} - diff --git a/storage/ibmdb2i/db2i_file.h b/storage/ibmdb2i/db2i_file.h deleted file mode 100644 index 7b63b18c315..00000000000 --- a/storage/ibmdb2i/db2i_file.h +++ /dev/null @@ -1,445 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#ifndef DB2I_FILE_H -#define DB2I_FILE_H - -#include "db2i_global.h" -#include "db2i_ileBridge.h" -#include "db2i_validatedPointer.h" -#include "db2i_iconv.h" -#include "db2i_charsetSupport.h" - -const char FID_EXT[] = ".FID"; - -class db2i_file; - -#pragma pack(1) -struct DB2LobField -{ - char reserved1; - uint32 length; - char reserved2[4]; - uint32 ordinal; - ILEMemHandle dataHandle; - char reserved3[8]; -}; -#pragma pack(pop) - -class DB2Field -{ - public: - uint16 getType() const { return *(uint16*)(&definition.ColType); } - uint16 getByteLengthInRecord() const { return definition.ColLen; } - uint16 getDataLengthInRecord() const - { - return (getType() == QMY_VARCHAR || getType() == QMY_VARGRAPHIC ? definition.ColLen - 2 : definition.ColLen); - } - uint16 getCCSID() const { return *(uint16*)(&definition.ColCCSID); } - bool isBlob() const - { - uint16 type = getType(); - return (type == QMY_BLOBCLOB || type == QMY_DBCLOB); - } - uint16 getBufferOffset() const { return definition.ColBufOff; } - uint16 calcBlobPad() const - { - DBUG_ASSERT(isBlob()); - return getByteLengthInRecord() - sizeof (DB2LobField); - } - DB2LobField* asBlobField(char* buf) const - { - DBUG_ASSERT(isBlob()); - return (DB2LobField*)(buf + getBufferOffset() + calcBlobPad()); - } - private: - col_def_t definition; -}; - - -/** - @class db2i_table - - @details - This class describes the logical SQL table provided by DB2. - It stores "table-scoped" information such as the name of the - DB2 schema, BLOB descriptions, and the corresponding MySQL table definition. - Only one instance exists per SQL table. -*/ -class db2i_table -{ - public: - enum NameFormatFlags - { - ASCII_SQL, - ASCII_NATIVE, - EBCDIC_NATIVE - }; - - db2i_table(const TABLE_SHARE* myTable, const char* path = NULL); - - ~db2i_table(); - - int32 initDB2Objects(const char* path); - - const TABLE_SHARE* getMySQLTable() const - { - return mysqlTable; - } - - uint64 getStartId() const - { - return db2StartId; - } - - void updateStartId(uint64 newStartId) - { - db2StartId = newStartId; - } - - bool hasBlobs() const - { - return (blobFieldCount > 0); - } - - uint16 getBlobCount() const - { - return blobFieldCount; - } - - uint getBlobFieldActualSize(uint fieldIndex) const - { - return blobFieldActualSizes[getBlobIdFromField(fieldIndex)]; - } - - void updateBlobFieldActualSize(uint fieldIndex, uint32 newSize) - { - // It's OK that this isn't threadsafe, since this is just an advisory - // value. If a race condition causes the lesser of two values to be stored, - // that's OK. - uint16 blobID = getBlobIdFromField(fieldIndex); - DBUG_ASSERT(blobID < blobFieldCount); - - if (blobFieldActualSizes[blobID] < newSize) - { - blobFieldActualSizes[blobID] = newSize; - } - } - - - - const char* getDB2LibName(NameFormatFlags format = EBCDIC_NATIVE) - { - switch (format) - { - case EBCDIC_NATIVE: - return db2LibNameEbcdic; break; - case ASCII_NATIVE: - return db2LibNameAscii; break; - case ASCII_SQL: - return db2LibNameSQLAscii; break; - default: - DBUG_ASSERT(0); - } - return NULL; - } - - const char* getDB2TableName(NameFormatFlags format = EBCDIC_NATIVE) const - { - switch (format) - { - case EBCDIC_NATIVE: - return db2TableNameEbcdic; break; - case ASCII_NATIVE: - return db2TableNameAscii; break; - case ASCII_SQL: - return db2TableNameAscii; break; - break; - default: - DBUG_ASSERT(0); - } - return NULL; - } - - DB2Field& db2Field(int fieldID) const { return db2Fields[fieldID]; } - DB2Field& db2Field(const Field* field) const { return db2Field(field->field_index); } - - void processFormatSpace(); - - void* getFormatSpace(size_t& spaceNeeded) - { - DBUG_ASSERT(formatSpace == NULL); - spaceNeeded = sizeof(format_hdr_t) + mysqlTable->fields * sizeof(DB2Field); - formatSpace.alloc(spaceNeeded); - return (void*)formatSpace; - } - - bool isTemporary() const - { - return isTemporaryTable; - } - - void getDB2QualifiedName(char* to); - static void getDB2LibNameFromPath(const char* path, char* lib, NameFormatFlags format=ASCII_SQL); - static void getDB2FileNameFromPath(const char* path, char* file, NameFormatFlags format=ASCII_SQL); - static void getDB2QualifiedNameFromPath(const char* path, char* to); - static int32 appendQualifiedIndexFileName(const char* indexName, - const char* tableName, - String& to, - NameFormatFlags format=ASCII_SQL, - enum_DB2I_INDEX_TYPE type=typeDefault); - - uint16 getBlobIdFromField(uint16 fieldID) const - { - for (int i = 0; i < blobFieldCount; ++i) - { - if (blobFields[i] == fieldID) - return i; - } - DBUG_ASSERT(0); - return 0; - } - - iconv_t& getConversionDefinition(enum_conversionDirection direction, - uint16 fieldID) - { - if (conversionDefinitions[direction][fieldID] == (iconv_t)(-1)) - findConversionDefinition(direction, fieldID); - - return conversionDefinitions[direction][fieldID]; - } - - const db2i_file* dataFile() const - { - return physicalFile; - } - - const db2i_file* indexFile(uint idx) const - { - return logicalFiles[idx]; - } - - const char* getFileLevelID() const - { - return fileLevelID; - } - - static void deleteAssocFiles(const char* name); - static void renameAssocFiles(const char* from, const char* to); - - int fastInitForCreate(const char* path); - int initDiscoveredTable(const char* path); - - uint16* blobFields; - -private: - - void findConversionDefinition(enum_conversionDirection direction, uint16 fieldID); - static void filenameToTablename(const char* in, char* out, size_t outlen); - static size_t smartFilenameToTableName(const char *in, char* out, size_t outlen); - void convertNativeToSQLName(const char* input, - char* output) - { - - output[0] = input[0]; - - uint o = 1; - uint i = 1; - do - { - output[o++] = input[i]; - if (input[i] == '"' && input[i+1]) - output[o++] = '"'; - } while (input[++i]); - - output[o] = 0; // This isn't the most user-friendly way to handle overflows, - // but at least its safe. - } - - bool doFileIDsMatch(const char* path); - - ValidatedPointer formatSpace; - DB2Field* db2Fields; - uint64 db2StartId; // Starting value for identity column - uint16 blobFieldCount; // Count of LOB fields in the DB2 table - uint* blobFieldActualSizes; // Array of LOB field lengths (actual vs. allocated). - // This is updated as LOBs are read and will contain - // the length of the longest known LOB in that field. - iconv_t* conversionDefinitions[2]; - - const TABLE_SHARE* mysqlTable; - uint16 logicalFileCount; - char* db2LibNameEbcdic; // Quoted and in EBCDIC - char* db2LibNameAscii; - char* db2TableNameEbcdic; - char* db2TableNameAscii; - char* db2TableNameSQLAscii; - char* db2LibNameSQLAscii; - - db2i_file* physicalFile; - db2i_file** logicalFiles; - - bool isTemporaryTable; - char fileLevelID[13]; -}; - -/** - @class db2i_file - - @details This class describes a file object underlaying a particular SQL - table. Both "physical files" (data) and "logical files" (indices) are - described by this class. Only one instance of the class exists per DB2 file - object. The single instance is responsible for de/allocating the multiple - handles used by the handlers. -*/ -class db2i_file -{ - -public: - struct RowFormat - { - uint16 readRowLen; - uint16 readRowNullOffset; - uint16 writeRowLen; - uint16 writeRowNullOffset; - char inited; - }; - -public: - - // Construct an instance for a physical file. - db2i_file(db2i_table* table); - - // Construct an instance for a logical file. - db2i_file(db2i_table* table, int index); - - ~db2i_file() - { - if (masterDefn) - db2i_ileBridge::getBridgeForThread()->deallocateFile(masterDefn); - - if (db2FileName != (char*)db2Table->getDB2TableName(db2i_table::EBCDIC_NATIVE)) - my_free(db2FileName, MYF(0)); - } - - // This is roughly equivalent to an "open". It tells ILE to allocate a descriptor - // for the file. The associated handle is returned to the caller. - int allocateNewInstance(FILE_HANDLE* newHandle, ILEMemHandle inuseSpace) const - { - int rc; - - rc = db2i_ileBridge::getBridgeForThread()->allocateFileInstance(masterDefn, - inuseSpace, - newHandle); - - if (rc) *newHandle = 0; - - return rc; - } - - // This obtains the row layout associated with a particular access intent for - // an open instance of the file. - int obtainRowFormat(FILE_HANDLE instanceHandle, - char intent, - char commitLevel, - const RowFormat** activeFormat) const - { - DBUG_ENTER("db2i_file::obtainRowFormat"); - RowFormat* rowFormat; - - if (intent == QMY_UPDATABLE) - rowFormat = &(formats[readWrite]); - else if (intent == QMY_READ_ONLY) - rowFormat = &(formats[readOnly]); - - if (unlikely(!rowFormat->inited)) - { - int rc = db2i_ileBridge::getBridgeForThread()-> - initFileForIO(instanceHandle, - intent, - commitLevel, - &(rowFormat->writeRowLen), - &(rowFormat->writeRowNullOffset), - &(rowFormat->readRowLen), - &(rowFormat->readRowNullOffset)); - if (rc) DBUG_RETURN(rc); - rowFormat->inited = 1; - } - - *activeFormat = rowFormat; - DBUG_RETURN(0); - } - - const char* getDB2FileName() const - { - return db2FileName; - } - - void fillILEDefn(ShrDef* defn, bool readInArrivalSeq); - - void setMasterDefnHandle(FILE_HANDLE handle) - { - masterDefn = handle; - } - - FILE_HANDLE getMasterDefnHandle() const - { - return masterDefn; - } - -private: - enum RowFormats - { - readOnly = 0, - readWrite, - maxRowFormats - }; - - mutable RowFormat formats[maxRowFormats]; - - void commonCtorInit(); - - char* db2FileName; // Quoted and in EBCDIC - - db2i_table* db2Table; // The logical SQL table contained by this file. - - bool db2CanSort; - - FILE_HANDLE masterDefn; -}; - - -#endif diff --git a/storage/ibmdb2i/db2i_global.h b/storage/ibmdb2i/db2i_global.h deleted file mode 100644 index d201fbd8124..00000000000 --- a/storage/ibmdb2i/db2i_global.h +++ /dev/null @@ -1,138 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#ifndef DB2I_GLOBAL_H -#define DB2I_GLOBAL_H - -#define MYSQL_SERVER 1 - -#include "my_global.h" -#include "my_sys.h" - -const uint MAX_DB2_KEY_PARTS=120; -const int MAX_DB2_V5R4_LIBNAME_LENGTH = 10; -const int MAX_DB2_V6R1_LIBNAME_LENGTH = 30; -const int MAX_DB2_SCHEMANAME_LENGTH=258; -const int MAX_DB2_FILENAME_LENGTH=258; -const int MAX_DB2_COLNAME_LENGTH=128; -const int MAX_DB2_SAVEPOINTNAME_LENGTH=128; -const int MAX_DB2_QUALIFIEDNAME_LENGTH=MAX_DB2_V6R1_LIBNAME_LENGTH + 1 + MAX_DB2_FILENAME_LENGTH; -const uint32 MAX_CHAR_LENGTH = 32765; -const uint32 MAX_VARCHAR_LENGTH = 32739; -const uint32 MAX_DEC_PRECISION = 63; -const uint32 MAX_BLOB_LENGTH = 2147483646; -const uint32 MAX_BINARY_LENGTH = MAX_CHAR_LENGTH; -const uint32 MAX_VARBINARY_LENGTH = MAX_VARCHAR_LENGTH; -const uint32 MAX_FULL_ALLOCATE_BLOB_LENGTH = 65536; -const uint32 MAX_FOREIGN_LEN = 64000; -const char* DB2I_TEMP_TABLE_SCHEMA = "QTEMP"; -const char DB2I_ADDL_INDEX_NAME_DELIMITER[5] = {'_','_','_','_','_'}; -const char DB2I_DEFAULT_INDEX_NAME_DELIMITER[3] = {'_','_','_'}; -const int DB2I_INDEX_NAME_LENGTH_TO_PRESERVE = 110; - -enum enum_DB2I_INDEX_TYPE -{ - typeNone = 0, - typeDefault = 'D', - typeHex = 'H', - typeAscii = 'A' -}; - -void* roundToQuadWordBdy(void* ptr) -{ - return (void*)(((uint64)(ptr)+0xf) & ~0xf); -} - -typedef uint64_t ILEMemHandle; - -struct OSVersion -{ - uint8 v; - uint8 r; -}; -extern OSVersion osVersion; - - -/** - Allocate 16-byte aligned space using the MySQL heap allocator - - @details Many of the spaces used by the QMY_* APIS are required to be - aligned on 16 byte boundaries. The standard system malloc will do this - alignment by default. However, in order to use the heap debug and tracking - features of the mysql allocator, we chose to implement an aligning wrapper - around my_malloc. Essentially, we overallocate the storage space, find the - first aligned address in the space, store a pointer to the true malloc - allocation in the bytes immediately preceding the aligned address, and return - the aligned address to the caller. - - @parm size The size of heap storage needed - - @return A 16-byte aligned pointer to the storage requested. -*/ -void* malloc_aligned(size_t size) -{ - char* p; - char* base; - base = (char*)my_malloc(size + sizeof(void*) + 15, MYF(MY_WME)); - if (likely(base)) - { - p = (char*)roundToQuadWordBdy(base + sizeof(void*)); - char** p2 = (char**)(p - sizeof(void*)); - *p2 = base; - } - else - p = NULL; - - return p; -} - -/** - Free a 16-byte aligned space alloced by malloc_aligned - - @details We know that a pointer to the true malloced storage immediately - precedes the aligned address, so we pull that out and call my_free(). - - @parm p A 16-byte aligned pointer generated by malloc_aligned -*/ -void free_aligned(void* p) -{ - if (likely(p)) - { - my_free(*(char**)((char*)p-sizeof(void*)), MYF(0)); - } -} - -#endif diff --git a/storage/ibmdb2i/db2i_iconv.h b/storage/ibmdb2i/db2i_iconv.h deleted file mode 100644 index 9fc6e4ed636..00000000000 --- a/storage/ibmdb2i/db2i_iconv.h +++ /dev/null @@ -1,51 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - -/** - @file - - @brief Used to redefine iconv symbols to the optimized "myconv" ones -*/ - -#ifndef DB2I_ICONV_H -#define DB2I_ICONV_H - -#include "db2i_myconv.h" -#define iconv_open(A, B) myconv_open(A, B, CONVERTER_DMAP) -#define iconv_close myconv_close -#define iconv myconv_dmap -#define iconv_t myconv_t - -#endif diff --git a/storage/ibmdb2i/db2i_ileBridge.cc b/storage/ibmdb2i/db2i_ileBridge.cc deleted file mode 100644 index 68ae2c2bb72..00000000000 --- a/storage/ibmdb2i/db2i_ileBridge.cc +++ /dev/null @@ -1,1342 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - - -#include "db2i_ileBridge.h" -#include "my_dbug.h" -#include "db2i_global.h" -#include "db2i_charsetSupport.h" -#include "db2i_errors.h" - - -// static class member data -ILEpointer* db2i_ileBridge::functionSymbols; -db2i_ileBridge* db2i_ileBridge::globalBridge; -#ifndef DBUG_OFF -uint32 db2i_ileBridge::registeredPtrs; -#endif - -pthread_key(IleParms*, THR_ILEPARMS); - -static void ileParmsDtor(void* parmsToFree) -{ - if (parmsToFree) - { - free_aligned(parmsToFree); - DBUG_PRINT("db2i_ileBridge", ("Freeing space for parms")); - } -} - - -/** - Convert a timestamp in ILE time format into a unix time_t -*/ -static inline time_t convertILEtime(const ILE_time_t& input) -{ - tm temp; - - temp.tm_sec = input.Second; - temp.tm_min = input.Minute; - temp.tm_hour = input.Hour; - temp.tm_mday = input.Day; - temp.tm_mon = input.Month-1; - temp.tm_year = input.Year - 1900; - temp.tm_isdst = -1; - - return mktime(&temp); -} - -/** - Allocate and intialize a new bridge structure -*/ -db2i_ileBridge* db2i_ileBridge::createNewBridge(CONNECTION_HANDLE connID) -{ - DBUG_PRINT("db2i_ileBridge::createNewBridge",("Building new bridge...")); - db2i_ileBridge* newBridge = (db2i_ileBridge*)my_malloc(sizeof(db2i_ileBridge), MYF(MY_WME)); - - if (unlikely(newBridge == NULL)) - return NULL; - - newBridge->stmtTxActive = false; - newBridge->connErrText = NULL; - newBridge->pendingLockedHandles.head = NULL; - newBridge->cachedConnectionID = connID; - - return newBridge; -} - - -void db2i_ileBridge::destroyBridge(db2i_ileBridge* bridge) -{ - bridge->freeErrorStorage(); - my_free(bridge, MYF(0)); -} - - -void db2i_ileBridge::destroyBridgeForThread(const THD* thd) -{ - void* thdData = *thd_ha_data(thd, ibmdb2i_hton); - if (thdData != NULL) - { - destroyBridge((db2i_ileBridge*)thdData); - } -} - - -void db2i_ileBridge::registerPtr(const void* ptr, ILEMemHandle* receiver) -{ - static const arg_type_t ileSignature[] = { ARG_MEMPTR, ARG_END }; - - if (unlikely(ptr == NULL)) - { - *receiver = 0; - return; - } - - struct ArgList - { - ILEarglist_base base; - ILEpointer ptr; - } *arguments; - - char argBuf[sizeof(ArgList)+15]; - arguments = (ArgList*)roundToQuadWordBdy(argBuf); - - arguments->ptr.s.addr = (address64_t)(ptr); - - _ILECALL(&functionSymbols[funcRegisterSpace], - &arguments->base, - ileSignature, - RESULT_INT64); - -#ifndef DBUG_OFF - uint32 truncHandle = arguments->base.result.r_uint64; - DBUG_PRINT("db2i_ileBridge::registerPtr",("Register 0x%p with handle %d", ptr, truncHandle)); - getBridgeForThread()->registeredPtrs++; -#endif - - *receiver = arguments->base.result.r_uint64; - return; -} - -void db2i_ileBridge::unregisterPtr(ILEMemHandle handle) -{ - static const arg_type_t ileSignature[] = { ARG_UINT64, ARG_END }; - - if (unlikely(handle == NULL)) - return; - - struct ArgList - { - ILEarglist_base base; - uint64 handle; - } *arguments; - - char argBuf[sizeof(ArgList)+15]; - arguments = (ArgList*)roundToQuadWordBdy(argBuf); - - arguments->handle = (uint64)(handle); - - _ILECALL(&functionSymbols[funcUnregisterSpace], - &arguments->base, - ileSignature, - RESULT_VOID); - -#ifndef DBUG_OFF - DBUG_PRINT("db2i_ileBridge::unregisterPtr",("Unregister handle %d", (uint32)handle)); - getBridgeForThread()->registeredPtrs--; -#endif -} - - - -/** - Initialize the bridge component - - @details Resolves srvpgm and function names of the APIs. If this fails, - the approrpiate operating system support (PTFs) is probably not installed. - - WARNING: - Must be called before any other functions in this class are used!!!! - Can only be called by a single thread! -*/ -int db2i_ileBridge::setup() -{ - static const char funcNames[db2i_ileBridge::funcListEnd][32] = - { - {"QmyRegisterParameterSpaces"}, - {"QmyRegisterSpace"}, - {"QmyUnregisterSpace"}, - {"QmyProcessRequest"} - }; - - DBUG_ENTER("db2i_ileBridge::setup"); - - int actmark = _ILELOAD("QSYS/QMYSE", ILELOAD_LIBOBJ); - if ( actmark == -1 ) - { - DBUG_PRINT("db2i_ileBridge::setup", ("srvpgm activation failed")); - DBUG_RETURN(1); - } - - functionSymbols = (ILEpointer*)malloc_aligned(sizeof(ILEpointer) * db2i_ileBridge::funcListEnd); - - for (int i = 0; i < db2i_ileBridge::funcListEnd; i++) - { - if (_ILESYM(&functionSymbols[i], actmark, funcNames[i]) == -1) - { - DBUG_PRINT("db2i_ileBridge::setup", - ("resolve of %s failed", funcNames[i])); - DBUG_RETURN(errno); - } - } - - pthread_key_create(&THR_ILEPARMS, &ileParmsDtor); - -#ifndef DBUG_OFF - registeredPtrs = 0; -#endif - - globalBridge = createNewBridge(0); - - DBUG_RETURN(0); -} - -/** - Cleanup any resources before shutting down plug-in -*/ -void db2i_ileBridge::takedown() -{ - if (globalBridge) - destroyBridge(globalBridge); - free_aligned(functionSymbols); -} - -/** - Call off to QmyProcessRequest to perform the API that the caller prepared -*/ -inline int32 db2i_ileBridge::doIt() -{ - static const arg_type_t ileSignature[] = {ARG_END}; - - struct ArgList - { - ILEarglist_base base; - } *arguments; - - char argBuf[sizeof(ArgList)+15]; - arguments = (ArgList*)roundToQuadWordBdy(argBuf); - - _ILECALL(&functionSymbols[funcProcessRequest], - &arguments->base, - ileSignature, - RESULT_INT32); - - return translateErrorCode(arguments->base.result.s_int32.r_int32); -} - -/** - Call off to QmyProcessRequest to perform the API that the caller prepared and - log any errors that may occur. -*/ -inline int32 db2i_ileBridge::doItWithLog() -{ - int32 rc = doIt(); - - if (unlikely(rc)) - { - // Only report errors that we weren't expecting - if (rc != tacitErrors[0] && - rc != tacitErrors[1] && - rc != QMY_ERR_END_OF_BLOCK) - reportSystemAPIError(rc, (Qmy_Error_output_t*)parms()->outParms); - } - memset(tacitErrors, 0, sizeof(tacitErrors)); - - return rc; -} - - -/** - Interface to QMY_ALLOCATE_SHARE API - - See QMY_ALLOCATE_SHARE documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::allocateFileDefn(ILEMemHandle definitionSpace, - ILEMemHandle handleSpace, - uint16 fileCount, - const char* schemaName, - uint16 schemaNameLength, - ILEMemHandle formatSpace, - uint32 formatSpaceLen) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - - IleParms* parmBlock = parms(); - Qmy_MAOS0100 *input = (Qmy_MAOS0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_ALLOCATE_SHARE; - input->ShrDefSpcHnd = definitionSpace; - input->ShrHndSpcHnd = handleSpace; - input->ShrDefCnt = fileCount; - input->FmtSpcHnd = formatSpace; - input->FmtSpcLen = formatSpaceLen; - - if (schemaNameLength > sizeof(input->SchNam)) - { - // This should never happen! - DBUG_ASSERT(0); - return HA_ERR_GENERIC; - } - - memcpy(input->SchNam, schemaName, schemaNameLength); - input->SchNamLen = schemaNameLength; - - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - return rc; -} - - -/** - Interface to QMY_ALLOCATE_INSTANCE API - - See QMY_ALLOCATE_INSTANCE documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::allocateFileInstance(FILE_HANDLE defnHandle, - ILEMemHandle inuseSpace, - FILE_HANDLE* instance) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - - IleParms* parmBlock = parms(); - Qmy_MAOI0100 *input = (Qmy_MAOI0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_ALLOCATE_INSTANCE; - input->ShrHnd = defnHandle; - input->CnnHnd = cachedConnectionID; - input->UseSpcHnd = inuseSpace; - - int32 rc = doItWithLog(); - - if (likely(rc == 0)) - { - Qmy_MAOI0100_output* output = (Qmy_MAOI0100_output*)parmBlock->outParms; - DBUG_ASSERT(instance); - *instance = output->ObjHnd; - } - - return rc; -} - - -/** - Interface to QMY_DEALLOCATE_OBJECT API - - See QMY_DEALLOCATE_OBJECT documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::deallocateFile(FILE_HANDLE rfileHandle, - bool postDropTable) -{ - IleParms* parmBlock = parms(); - Qmy_MDLC0100 *input = (Qmy_MDLC0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_DEALLOCATE_OBJECT; - input->ObjHnd = rfileHandle; - input->ObjDrp[0] = (postDropTable ? QMY_YES : QMY_NO); - - DBUG_PRINT("db2i_ileBridge::deallocateFile", ("Deallocating %d", (uint32)rfileHandle)); - - int32 rc = doItWithLog(); - - return rc; -} - - -/** - Interface to QMY_OBJECT_INITIALIZATION API - - See QMY_OBJECT_INITIALIZATION documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::initFileForIO(FILE_HANDLE rfileHandle, - char accessIntent, - char commitLevel, - uint16* inRecSize, - uint16* inRecNullOffset, - uint16* outRecSize, - uint16* outRecNullOffset) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MOIX0100 *input = (Qmy_MOIX0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_OBJECT_INITIALIZATION; - input->CmtLvl[0] = commitLevel; - input->Intent[0] = accessIntent; - input->ObjHnd = rfileHandle; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - if (likely(rc == 0)) - { - Qmy_MOIX0100_output* output = (Qmy_MOIX0100_output*)parmBlock->outParms; - *inRecSize = output->InNxtRowOff; - *inRecNullOffset = output->InNullMapOff; - *outRecSize = output->OutNxtRowOff; - *outRecNullOffset = output->OutNullMapOff; - } - - return rc; -} - - -/** - Interface to QMY_READ_ROWS API for reading a row with a specific RRN. - - See QMY_READ_ROWS documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::readByRRN(FILE_HANDLE rfileHandle, - ILEMemHandle buf, - uint32 inRRN, - char accessIntent, - char commitLevel) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MRDX0100 *input = (Qmy_MRDX0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_READ_ROWS; - input->CmtLvl[0] = commitLevel; - input->ObjHnd = rfileHandle; - input->Intent[0] = accessIntent; - input->OutSpcHnd = (uint64)buf; - input->RelRowNbr = inRRN; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - if (rc == QMY_ERR_END_OF_BLOCK) - { - rc = 0; - DBUG_PRINT("db2i_ileBridge::readByRRN", ("End of block signalled")); - } - - return rc; -} - - -/** - Interface to QMY_WRITE_ROWS API. - - See QMY_WRITE_ROWS documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::writeRows(FILE_HANDLE rfileHandle, - ILEMemHandle buf, - char commitLevel, - int64* outIdVal, - bool* outIdGen, - uint32* dupKeyRRN, - char** dupKeyName, - uint32* dupKeyNameLen, - uint32* outIdIncrement) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MWRT0100 *input = (Qmy_MWRT0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_WRITE_ROWS; - input->CmtLvl[0] = commitLevel; - - input->ObjHnd = rfileHandle; - input->InSpcHnd = (uint64_t) buf; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - Qmy_MWRT0100_output_t* output = (Qmy_MWRT0100_output_t*)parmBlock->outParms; - if (likely(rc == 0 || rc == HA_ERR_FOUND_DUPP_KEY)) - { - DBUG_ASSERT(dupKeyRRN && dupKeyName && dupKeyNameLen && outIdGen && outIdIncrement && outIdVal); - *dupKeyRRN = output->DupRRN; - *dupKeyName = (char*)parmBlock->outParms + output->DupObjNamOff; - *dupKeyNameLen = output->DupObjNamLen; - *outIdGen = (output->NewIdGen[0] == QMY_YES ? TRUE : FALSE); - if (*outIdGen == TRUE) - { - *outIdIncrement = output->IdIncrement; - *outIdVal = output->NewIdVal; - } - } - - return rc; - -} - -/** - Interface to QMY_EXECUTE_IMMEDIATE API. - - See QMY_EXECUTE_IMMEDIATE documentation for more information about - parameters and return codes. -*/ -uint32 db2i_ileBridge::execSQL(const char* statement, - uint32 statementCount, - uint8 commitLevel, - bool autoCreateSchema, - bool dropSchema, - bool noCommit, - FILE_HANDLE fileHandle) - -{ - IleParms* parmBlock = parms(); - Qmy_MSEI0100 *input = (Qmy_MSEI0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_EXECUTE_IMMEDIATE; - - registerPtr(statement, &input->StmtsSpcHnd); - - input->NbrStmts = statementCount; - *(uint16*)(&input->StmtCCSID) = 850; - input->AutoCrtSchema[0] = (autoCreateSchema == TRUE ? QMY_YES : QMY_NO); - input->DropSchema[0] = (dropSchema == TRUE ? QMY_YES : QMY_NO); - input->CmtLvl[0] = commitLevel; - if ((commitLevel == QMY_NONE && statementCount == 1) || noCommit) - { - input->CmtBefore[0] = QMY_NO; - input->CmtAfter[0] = QMY_NO; - } - else - { - input->CmtBefore[0] = QMY_YES; - input->CmtAfter[0] = QMY_YES; - } - input->CnnHnd = current_thd->thread_id; - input->ObjHnd = fileHandle; - - int32 rc = doItWithLog(); - - unregisterPtr(input->StmtsSpcHnd); - - return rc; -} - -/** - Interface to QMY_PREPARE_OPEN_CURSOR API. - - See QMY_PREPARE_OPEN_CURSOR documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::prepOpen(const char* statement, - FILE_HANDLE* rfileHandle, - uint32* recLength) -{ - IleParms* parmBlock = parms(); - Qmy_MSPO0100 *input = (Qmy_MSPO0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_PREPARE_OPEN_CURSOR; - - registerPtr(statement, &input->StmtsSpcHnd ); - *(uint16*)(&input->StmtCCSID) = 850; - input->CnnHnd = current_thd->thread_id; - - int32 rc = doItWithLog(); - - if (likely(rc == 0)) - { - Qmy_MSPO0100_output* output = (Qmy_MSPO0100_output*)parmBlock->outParms; - *rfileHandle = output->ObjHnd; - *recLength = max(output->InNxtRowOff, output->OutNxtRowOff); - } - - - unregisterPtr(input->StmtsSpcHnd); - - return rc; -} - - -/** - Interface to QMY_DELETE_ROW API. - - See QMY_DELETE_ROW documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::deleteRow(FILE_HANDLE rfileHandle, - uint32 rrn) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MDLT0100 *input = (Qmy_MDLT0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_DELETE_ROW; - input->ObjHnd = rfileHandle; - input->RelRowNbr = rrn; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - return rc; -} - - -/** - Interface to QMY_UPDATE_ROW API. - - See QMY_UPDATE_ROW documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::updateRow(FILE_HANDLE rfileHandle, - uint32 rrn, - ILEMemHandle buf, - uint32* dupKeyRRN, - char** dupKeyName, - uint32* dupKeyNameLen) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MUPD0100 *input = (Qmy_MUPD0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_UPDATE_ROW; - input->ObjHnd = rfileHandle; - input->InSpcHnd = (uint64)buf; - input->RelRowNbr = rrn; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - if (rc == HA_ERR_FOUND_DUPP_KEY) - { - Qmy_MUPD0100_output* output = (Qmy_MUPD0100_output*)parmBlock->outParms; - DBUG_ASSERT(dupKeyRRN && dupKeyName && dupKeyNameLen); - *dupKeyRRN = output->DupRRN; - *dupKeyName = (char*)parmBlock->outParms + output->DupObjNamOff; - *dupKeyNameLen = output->DupObjNamLen; - } - - return rc; -} - -/** - Interface to QMY_DESCRIBE_RANGE API. - - See QMY_DESCRIBE_RANGE documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::recordsInRange(FILE_HANDLE defnHandle, - ILEMemHandle inSpc, - uint32 inKeyCnt, - uint32 inLiteralCnt, - uint32 inBoundsOff, - uint32 inLitDefOff, - uint32 inLiteralsOff, - uint32 inCutoff, - uint32 inSpcLen, - uint16 inEndByte, - uint64* outRecCnt, - uint16* outRtnCode) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - - IleParms* parmBlock = parms(); - Qmy_MDRG0100 *input = (Qmy_MDRG0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_DESCRIBE_RANGE; - input->ShrHnd = defnHandle; - input->SpcHnd = (uint64)inSpc; - input->KeyCnt = inKeyCnt; - input->LiteralCnt = inLiteralCnt; - input->BoundsOff = inBoundsOff; - input->LitDefOff = inLitDefOff; - input->LiteralsOff = inLiteralsOff; - input->Cutoff = inCutoff; - input->SpcLen = inSpcLen; - input->EndByte = inEndByte; - input->CnnHnd = cachedConnectionID; - - int rc = doItWithLog(); - - if (likely(rc == 0)) - { - Qmy_MDRG0100_output* output = (Qmy_MDRG0100_output*)parmBlock->outParms; - DBUG_ASSERT(outRecCnt && outRtnCode); - *outRecCnt = output->RecCnt; - *outRtnCode = output->RtnCode; - } - - return rc; -} - - -/** - Interface to QMY_RELEASE_ROW API. - - See QMY_RELEASE_ROW documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::rrlslck(FILE_HANDLE rfileHandle, char accessIntent) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - - IleParms* parmBlock = parms(); - Qmy_MRRX0100 *input = (Qmy_MRRX0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_RELEASE_ROW; - - input->ObjHnd = rfileHandle; - input->CnnHnd = cachedConnectionID; - input->Intent[0] = accessIntent; - - int32 rc = doItWithLog(); - - return rc; -} - -/** - Interface to QMY_LOCK_OBJECT API. - - See QMY_LOCK_OBJECT documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::lockObj(FILE_HANDLE defnHandle, - uint64 lockVal, - char lockAction, - char lockType, - char lockTimeout) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MOLX0100 *input = (Qmy_MOLX0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_LOCK_OBJECT; - input->ShrHnd = defnHandle; - input->LckTimeoutVal = lockVal; - input->Action[0] = lockAction; - input->LckTyp[0] = lockType; - input->LckTimeout[0] = lockTimeout; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - return rc; -} - -/** - Interface to QMY_DESCRIBE_CONSTRAINTS API. - - See QMY_DESCRIBE_CONSTRAINTS documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::constraints(FILE_HANDLE defnHandle, - ILEMemHandle inSpc, - uint32 inSpcLen, - uint32* outLen, - uint32* outCnt) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MDCT0100 *input = (Qmy_MDCT0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_DESCRIBE_CONSTRAINTS; - input->ShrHnd = defnHandle; - input->CstSpcHnd = (uint64)inSpc; - input->CstSpcLen = inSpcLen; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - if (likely(rc == 0)) - { - Qmy_MDCT0100_output* output = (Qmy_MDCT0100_output*)parmBlock->outParms; - DBUG_ASSERT(outLen && outCnt); - *outLen = output->NeededLen; - *outCnt = output->CstCnt; - } - - return rc; -} - - -/** - Interface to QMY_REORGANIZE_TABLE API. - - See QMY_REORGANIZE_TABLE documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::optimizeTable(FILE_HANDLE defnHandle) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MRGX0100 *input = (Qmy_MRGX0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_REORGANIZE_TABLE; - input->ShrHnd = defnHandle; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - return rc; -} - - -/** - Interface to QMY_PROCESS_COMMITMENT_CONTROL API. - - See QMY_PROCESS_COMMITMENT_CONTROL documentation for more information about - parameters and return codes. -*/ -int32 db2i_ileBridge::commitmentControl(uint8 function) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MCCX0100 *input = (Qmy_MCCX0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_PROCESS_COMMITMENT_CONTROL; - input->Function[0] = function; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - return rc; -} - - -/** - Interface to QMY_PROCESS_SAVEPOINT API. - - See QMY_PROCESS_SAVEPOINT documentation for more information about parameters and - return codes. -*/ -int32 db2i_ileBridge::savepoint(uint8 function, - const char* savepointName) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - DBUG_PRINT("db2i_ileBridge::savepoint",("%d %s", (uint32)function, savepointName)); - - IleParms* parmBlock = parms(); - Qmy_MSPX0100 *input = (Qmy_MSPX0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - char* savPtNam = (char*)(input+1); - - input->Format = QMY_PROCESS_SAVEPOINT; - - if (strlen(savepointName) > MAX_DB2_SAVEPOINTNAME_LENGTH) - { - DBUG_ASSERT(0); - return HA_ERR_GENERIC; - } - strcpy(savPtNam, savepointName); - - input->Function[0] = function; - input->SavPtNamOff = savPtNam - (char*)(input); - input->SavPtNamLen = strlen(savepointName); - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - return rc; -} - -static ILEMemHandle traceSpcHandle; -/** - Do initialization for the QMY_* APIs. - - @parm aspName The name of the relational database to use for all - connections. - - @return 0 if successful; error otherwise -*/ -int32 db2i_ileBridge::initILE(const char* aspName, - uint16* traceCtlPtr) -{ - // We forego the typical thread-based parms space because MySQL doesn't - // allow us to clean it up before checking for memory leaks. As a result - // we get a complaint about leaked memory on server shutdown. - int32 rc; - char inParms[db2i_ileBridge_MAX_INPARM_SIZE]; - char outParms[db2i_ileBridge_MAX_OUTPARM_SIZE]; - if (rc = registerParmSpace(inParms, outParms)) - { - reportSystemAPIError(rc, NULL); - return rc; - } - - registerPtr(traceCtlPtr, &traceSpcHandle); - - struct ParmBlock - { - Qmy_MINI0100 parms; - } *parmBlock = (ParmBlock*)inParms; - - memset(inParms, 0, sizeof(ParmBlock)); - - parmBlock->parms.Format = QMY_INITIALIZATION; - - char paddedName[18]; - if (strlen(aspName) > sizeof(paddedName)) - { - getErrTxt(DB2I_ERR_BAD_RDB_NAME); - return DB2I_ERR_BAD_RDB_NAME; - } - - memset(paddedName, ' ', sizeof(paddedName)); - memcpy(paddedName, aspName, strlen(aspName)); - convToEbcdic(paddedName, parmBlock->parms.RDBName, strlen(paddedName)); - - parmBlock->parms.RDBNamLen = strlen(paddedName); - parmBlock->parms.TrcSpcHnd = traceSpcHandle; - - rc = doIt(); - - if (rc) - { - reportSystemAPIError(rc, (Qmy_Error_output_t*)outParms); - } - - return rc; -} - -/** - Signal to the QMY_ APIs to perform any cleanup they need to do. -*/ -int32 db2i_ileBridge::exitILE() -{ - IleParms* parmBlock = parms(); - Qmy_MCLN0100 *input = (Qmy_MCLN0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_CLEANUP; - - int32 rc = doIt(); - - if (rc) - { - reportSystemAPIError(rc, (Qmy_Error_output_t*)parmBlock->outParms); - } - - unregisterPtr(traceSpcHandle); - - DBUG_PRINT("db2i_ileBridge::exitILE", ("Registered ptrs remaining: %d", registeredPtrs)); -#ifndef DBUG_OFF - if (registeredPtrs != 0) - printf("Oh no! IBMDB2I left some pointers registered. Count was %d.\n", registeredPtrs); -#endif - - // This is needed to prevent SAFE_MALLOC from complaining at process termination. - my_pthread_setspecific_ptr(THR_ILEPARMS, NULL); - free_aligned(parmBlock); - - return rc; - -} - - -/** - Designate the specified addresses as parameter passing buffers. - - @parm in Input to the API will go here; format is defined by the individual API - @parm out Output from the API will be; format is defined by the individual API - - @return 0 if success; error otherwise -*/ -int db2i_ileBridge::registerParmSpace(char* in, char* out) -{ - static const arg_type_t ileSignature[] = { ARG_MEMPTR, ARG_MEMPTR, ARG_END }; - - struct ArgList - { - ILEarglist_base base; - ILEpointer input; - ILEpointer output; - } *arguments; - - char argBuf[sizeof(ArgList)+15]; - arguments = (ArgList*)roundToQuadWordBdy(argBuf); - - arguments->input.s.addr = (address64_t)(in); - arguments->output.s.addr = (address64_t)(out); - - _ILECALL(&functionSymbols[funcRegisterParameterSpaces], - &arguments->base, - ileSignature, - RESULT_INT32); - - return arguments->base.result.s_int32.r_int32; -} - - -/** - Interface to QMY_OBJECT_OVERRIDE API. - - See QMY_OBJECT_OVERRIDE documentation for more information about parameters and - return codes. -*/ -int32 db2i_ileBridge::objectOverride(FILE_HANDLE rfileHandle, - ILEMemHandle buf, - uint32 recordWidth) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MOOX0100 *input = (Qmy_MOOX0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_OBJECT_OVERRIDE; - input->ObjHnd = rfileHandle; - input->OutSpcHnd = (uint64)buf; - input->NxtRowOff = recordWidth; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - return rc; -} - -/** - Interface to QMY_DESCRIBE_OBJECT API for obtaining table stats. - - See QMY_DESCRIBE_OBJECT documentation for more information about parameters and - return codes. -*/ -int32 db2i_ileBridge::retrieveTableInfo(FILE_HANDLE defnHandle, - uint16 dataRequested, - ha_statistics& stats, - ILEMemHandle inSpc) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MDSO0100 *input = (Qmy_MDSO0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_DESCRIBE_OBJECT; - input->ShrHnd = defnHandle; - input->CnnHnd = cachedConnectionID; - - if (dataRequested & objLength) - input->RtnObjLen[0] = QMY_YES; - if (dataRequested & rowCount) - input->RtnRowCnt[0] = QMY_YES; - if (dataRequested & deletedRowCount) - input->RtnDltRowCnt[0] = QMY_YES; - if (dataRequested & rowsPerKey) - { - input->RowKeyHnd = (uint64)inSpc; - input->RtnRowKey[0] = QMY_YES; - } - if (dataRequested & meanRowLen) - input->RtnMeanRowLen[0] = QMY_YES; - if (dataRequested & lastModTime) - input->RtnModTim[0] = QMY_YES; - if (dataRequested & createTime) - input->RtnCrtTim[0] = QMY_YES; - if (dataRequested & ioCount) - input->RtnEstIoCnt[0] = QMY_YES; - - int32 rc = doItWithLog(); - - if (likely(rc == 0)) - { - Qmy_MDSO0100_output* output = (Qmy_MDSO0100_output*)parmBlock->outParms; - if (dataRequested & objLength) - stats.data_file_length = output->ObjLen; - if (dataRequested & rowCount) - stats.records= output->RowCnt; - if (dataRequested & deletedRowCount) - stats.deleted = output->DltRowCnt; - if (dataRequested & meanRowLen) - stats.mean_rec_length = output->MeanRowLen; - if (dataRequested & lastModTime) - stats.update_time = convertILEtime(output->ModTim); - if (dataRequested & createTime) - stats.create_time = convertILEtime(output->CrtTim); - if (dataRequested & ioCount) - stats.data_file_length = output->EstIoCnt; - } - - return rc; -} - -/** - Interface to QMY_DESCRIBE_OBJECT API for finding index size. - - See QMY_DESCRIBE_OBJECT documentation for more information about parameters and - return codes. -*/ -int32 db2i_ileBridge::retrieveIndexInfo(FILE_HANDLE defnHandle, - uint64* outPageCnt) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MDSO0100 *input = (Qmy_MDSO0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_DESCRIBE_OBJECT; - input->ShrHnd = defnHandle; - input->CnnHnd = cachedConnectionID; - input->RtnPageCnt[0] = QMY_YES; - - int32 rc = doItWithLog(); - - if (likely(rc == 0)) - { - Qmy_MDSO0100_output* output = (Qmy_MDSO0100_output*)parmBlock->outParms; - *outPageCnt = output->PageCnt; - } - - return rc; -} - - -/** - Interface to QMY_CLOSE_CONNECTION API - - See QMY_CLOSE_CONNECTION documentation for more information about parameters and - return codes. -*/ -int32 db2i_ileBridge::closeConnection(CONNECTION_HANDLE conn) -{ - IleParms* parmBlock = parms(); - Qmy_MCCN0100 *input = (Qmy_MCCN0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_CLOSE_CONNECTION; - input->CnnHnd = conn; - - int32 rc = doItWithLog(); - - return rc; -} - - -/** - Interface to QMY_INTERRUPT API - - See QMY_INTERRUPT documentation for more information about parameters and - return codes. -*/ -int32 db2i_ileBridge::readInterrupt(FILE_HANDLE fileHandle) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MINT0100 *input = (Qmy_MINT0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_INTERRUPT; - input->CnnHnd = cachedConnectionID; - input->ObjHnd = fileHandle; - - int32 rc = doItWithLog(); - - if (rc == QMY_ERR_END_OF_BLOCK) - { - rc = 0; - DBUG_PRINT("db2i_ileBridge::readInterrupt", ("End of block signalled")); - } - - return rc; -} - -/** - Interface to QMY_READ_ROWS API - - See QMY_READ_ROWS documentation for more information about parameters and - return codes. -*/ -int32 db2i_ileBridge::read(FILE_HANDLE rfileHandle, - ILEMemHandle buf, - char accessIntent, - char commitLevel, - char orientation, - bool asyncRead, - ILEMemHandle rrn, - ILEMemHandle key, - uint32 keylen, - uint16 keyParts, - int pipeFD) -{ - DBUG_ASSERT(cachedStateIsCoherent()); - IleParms* parmBlock = parms(); - Qmy_MRDX0100 *input = (Qmy_MRDX0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_READ_ROWS; - input->CmtLvl[0] = commitLevel; - - input->ObjHnd = rfileHandle; - input->Intent[0] = accessIntent; - input->OutSpcHnd = (uint64)buf; - input->OutRRNSpcHnd = (uint64)rrn; - input->RtnData[0] = QMY_RETURN_DATA; - - if (key) - { - input->KeySpcHnd = (uint64)key; - input->KeyColsLen = keylen; - input->KeyColsNbr = keyParts; - } - - input->Async[0] = (asyncRead ? QMY_YES : QMY_NO); - input->PipeDesc = pipeFD; - input->Orientation[0] = orientation; - input->CnnHnd = cachedConnectionID; - - int32 rc = doItWithLog(); - - // QMY_ERR_END_OF_BLOCK is informational only, so we ignore it. - if (rc == QMY_ERR_END_OF_BLOCK) - { - rc = 0; - DBUG_PRINT("db2i_ileBridge::read", ("End of block signalled")); - } - - return rc; -} - - -/** - Interface to QMY_QUIESCE_OBJECT API - - See QMY_QUIESCE_OBJECT documentation for more information about parameters and - return codes. -*/ -int32 db2i_ileBridge::quiesceFileInstance(FILE_HANDLE rfileHandle) -{ - IleParms* parmBlock = parms(); - Qmy_MQSC0100 *input = (Qmy_MQSC0100*)&(parmBlock->inParms); - memset(input, 0, sizeof(*input)); - - input->Format = QMY_QUIESCE_OBJECT; - input->ObjHnd = rfileHandle; - - int32 rc = doItWithLog(); - -#ifndef DBUG_OFF - if (unlikely(rc)) - { - DBUG_ASSERT(0); - } -#endif - - return rc; -} - -void db2i_ileBridge::PreservedHandleList::add(const char* newname, FILE_HANDLE newhandle, IBMDB2I_SHARE* share) -{ - NameHandlePair *newPair = (NameHandlePair*)my_malloc(sizeof(NameHandlePair), MYF(MY_WME)); - - newPair->next = head; - head = newPair; - - strcpy(newPair->name, newname); - newPair->handle = newhandle; - newPair->share = share; - DBUG_PRINT("db2i_ileBridge", ("Added handle %d for %s", uint32(newhandle), newname)); -} - - -FILE_HANDLE db2i_ileBridge::PreservedHandleList::findAndRemove(const char* fileName, IBMDB2I_SHARE** share) -{ - NameHandlePair* current = head; - NameHandlePair* prev = NULL; - - while (current) - { - NameHandlePair* next = current->next; - if (strcmp(fileName, current->name) == 0) - { - FILE_HANDLE tmp = current->handle; - *share = current->share; - if (prev) - prev->next = next; - if (current == head) - head = next; - my_free(current, MYF(0)); - DBUG_PRINT("db2i_ileBridge", ("Found handle %d for %s", uint32(tmp), fileName)); - return tmp; - } - prev = current; - current = next; - } - - return 0; -} - - -IleParms* db2i_ileBridge::initParmsForThread() -{ - - IleParms* p = (IleParms*)malloc_aligned(sizeof(IleParms)); - DBUG_ASSERT((uint64)(&(p->outParms))% 16 == 0); // Guarantee that outParms are aligned correctly - - if (likely(p)) - { - int32 rc = registerParmSpace((p->inParms), (p->outParms)); - if (likely(rc == 0)) - { - my_pthread_setspecific_ptr(THR_ILEPARMS, p); - DBUG_PRINT("db2i_ileBridge", ("Inited space for parms")); - return p; - } - else - reportSystemAPIError(rc, NULL); - } - - return NULL; -} - diff --git a/storage/ibmdb2i/db2i_ileBridge.h b/storage/ibmdb2i/db2i_ileBridge.h deleted file mode 100644 index 10b9820d983..00000000000 --- a/storage/ibmdb2i/db2i_ileBridge.h +++ /dev/null @@ -1,499 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#ifndef DB2I_ILEBRIDGE_H -#define DB2I_ILEBRIDGE_H - -#include "db2i_global.h" -#include "mysql_priv.h" -#include "as400_types.h" -#include "as400_protos.h" -#include "qmyse.h" -#include "db2i_errors.h" - -typedef uint64_t FILE_HANDLE; -typedef my_thread_id CONNECTION_HANDLE; -const char SAVEPOINT_NAME[] = {0xD4,0xE2,0xD7,0xC9,0xD5,0xE3,0xC5,0xD9,0xD5,0x0}; -const uint32 TACIT_ERRORS_SIZE=2; - -enum db2i_InfoRequestSpec -{ - objLength = 1, - rowCount = 2, - deletedRowCount = 4, - rowsPerKey = 8, - meanRowLen = 16, - lastModTime = 32, - createTime = 64, - ioCount = 128 -}; - -extern handlerton *ibmdb2i_hton; -struct IBMDB2I_SHARE; - -const uint32 db2i_ileBridge_MAX_INPARM_SIZE = 512; -const uint32 db2i_ileBridge_MAX_OUTPARM_SIZE = 512; - -extern pthread_key(IleParms*, THR_ILEPARMS); -struct IleParms -{ - char inParms[db2i_ileBridge_MAX_INPARM_SIZE]; - char outParms[db2i_ileBridge_MAX_OUTPARM_SIZE]; -}; - -/** - @class db2i_ileBridge - - Implements a connection-based interface to the QMY_* APIs - - @details Each client connection that touches an IBMDB2I table has a "bridge" - associated with it. This bridge is constructed on first use and provides a - more C-like interface to the APIs. As well, it is reponsible for tracking - connection scoped information such as statement transaction state and error - message text. The bridge is destroyed when the connection ends. -*/ -class db2i_ileBridge -{ - enum ileFuncs - { - funcRegisterParameterSpaces, - funcRegisterSpace, - funcUnregisterSpace, - funcProcessRequest, - funcListEnd - }; - - static db2i_ileBridge* globalBridge; -public: - - - static int setup(); - static void takedown(); - - /** - Obtain a pointer to the bridge for the current connection. - - If a MySQL client connection is on the stack, we get the associated brideg. - Otherwise, we use the globalBridge. - */ - static db2i_ileBridge* getBridgeForThread() - { - THD* thd = current_thd; - if (likely(thd)) - return getBridgeForThread(thd); - - return globalBridge; - } - - /** - Obtain a pointer to the bridge for the specified connection. - - If a bridge exists already, we return it immediately. Otherwise, prepare - a new bridge for the connection. - */ - static db2i_ileBridge* getBridgeForThread(const THD* thd) - { - void* thdData = *thd_ha_data(thd, ibmdb2i_hton); - if (likely(thdData != NULL)) - return (db2i_ileBridge*)(thdData); - - db2i_ileBridge* newBridge = createNewBridge(thd->thread_id); - *thd_ha_data(thd, ibmdb2i_hton) = (void*)newBridge; - return newBridge; - } - - static void destroyBridgeForThread(const THD* thd); - static void registerPtr(const void* ptr, ILEMemHandle* receiver); - static void unregisterPtr(ILEMemHandle handle); - int32 allocateFileDefn(ILEMemHandle definitionSpace, - ILEMemHandle handleSpace, - uint16 fileCount, - const char* schemaName, - uint16 schemaNameLength, - ILEMemHandle formatSpace, - uint32 formatSpaceLen); - int32 allocateFileInstance(FILE_HANDLE defnHandle, - ILEMemHandle inuseSpace, - FILE_HANDLE* instance); - int32 deallocateFile(FILE_HANDLE fileHandle, - bool postDropTable=FALSE); - int32 read(FILE_HANDLE rfileHandle, - ILEMemHandle buf, - char accessIntent, - char commitLevel, - char orientation, - bool asyncRead = FALSE, - ILEMemHandle rrn = 0, - ILEMemHandle key = 0, - uint32 keylen = 0, - uint16 keyParts = 0, - int pipeFD = -1); - int32 readByRRN(FILE_HANDLE rfileHandle, - ILEMemHandle buf, - uint32 inRRN, - char accessIntent, - char commitLevel); - int32 writeRows(FILE_HANDLE rfileHandle, - ILEMemHandle buf, - char commitLevel, - int64* outIdVal, - bool* outIdGen, - uint32* dupKeyRRN, - char** dupKeyName, - uint32* dupKeyNameLen, - uint32* outIdIncrement); - uint32 execSQL(const char* statement, - uint32 statementCount, - uint8 commitLevel, - bool autoCreateSchema = FALSE, - bool dropSchema = FALSE, - bool noCommit = FALSE, - FILE_HANDLE fileHandle = 0); - int32 prepOpen(const char* statement, - FILE_HANDLE* rfileHandle, - uint32* recLength); - int32 deleteRow(FILE_HANDLE rfileHandle, - uint32 rrn); - int32 updateRow(FILE_HANDLE rfileHandle, - uint32 rrn, - ILEMemHandle buf, - uint32* dupKeyRRN, - char** dupKeyName, - uint32* dupKeyNameLen); - int32 commitmentControl(uint8 function); - int32 savepoint(uint8 function, - const char* savepointName); - int32 recordsInRange(FILE_HANDLE rfileHandle, - ILEMemHandle inSpc, - uint32 inKeyCnt, - uint32 inLiteralCnt, - uint32 inBoundsOff, - uint32 inLitDefOff, - uint32 inLiteralsOff, - uint32 inCutoff, - uint32 inSpcLen, - uint16 inEndByte, - uint64* outRecCnt, - uint16* outRtnCode); - int32 rrlslck(FILE_HANDLE rfileHandle, - char accessIntent); - int32 lockObj(FILE_HANDLE rfileHandle, - uint64 inTimeoutVal, - char inAction, - char inLockType, - char inTimeout); - int32 constraints(FILE_HANDLE rfileHandle, - ILEMemHandle inSpc, - uint32 inSpcLen, - uint32* outLen, - uint32* outCnt); - int32 optimizeTable(FILE_HANDLE rfileHandle); - static int32 initILE(const char* aspName, - uint16* traceCtlPtr); - int32 initFileForIO(FILE_HANDLE rfileHandle, - char accessIntent, - char commitLevel, - uint16* inRecSize, - uint16* inRecNullOffset, - uint16* outRecSize, - uint16* outRecNullOffset); - int32 readInterrupt(FILE_HANDLE fileHandle); - static int32 exitILE(); - - int32 objectOverride(FILE_HANDLE rfileHandle, - ILEMemHandle buf, - uint32 recordWidth = 0); - - int32 retrieveTableInfo(FILE_HANDLE rfileHandle, - uint16 dataRequested, - ha_statistics& stats, - ILEMemHandle inSpc = NULL); - - int32 retrieveIndexInfo(FILE_HANDLE rfileHandle, - uint64* outPageCnt); - - int32 closeConnection(CONNECTION_HANDLE conn); - int32 quiesceFileInstance(FILE_HANDLE rfileHandle); - - /** - Mark the beginning of a "statement transaction" - - @detail MySQL "statement transactions" (see sql/handler.cc) are implemented - as DB2 savepoints having a predefined name. - - @return 0 if successful; error otherwise - */ - uint32 beginStmtTx() - { - DBUG_ENTER("db2i_ileBridge::beginStmtTx"); - if (stmtTxActive) - DBUG_RETURN(0); - - stmtTxActive = true; - - DBUG_RETURN(savepoint(QMY_SET_SAVEPOINT, SAVEPOINT_NAME)); - } - - /** - Commit a "statement transaction" - - @return 0 if successful; error otherwise - */ - uint32 commitStmtTx() - { - DBUG_ENTER("db2i_ileBridge::commitStmtTx"); - DBUG_ASSERT(stmtTxActive); - stmtTxActive = false; - DBUG_RETURN(savepoint(QMY_RELEASE_SAVEPOINT, SAVEPOINT_NAME)); - } - - /** - Roll back a "statement transaction" - - @return 0 if successful; error otherwise - */ - uint32 rollbackStmtTx() - { - DBUG_ENTER("db2i_ileBridge::rollbackStmtTx"); - DBUG_ASSERT(stmtTxActive); - stmtTxActive = false; - DBUG_RETURN(savepoint(QMY_ROLLBACK_SAVEPOINT, SAVEPOINT_NAME)); - } - - - /** - Provide storage for generating error messages. - - This storage must persist until the error message is retrieved from the - handler instance. It is for this reason that we associate it with the bridge. - - @return Pointer to heap storage of MYSQL_ERRMSG_SIZE bytes - */ - char* getErrorStorage() - { - if (!connErrText) - { - connErrText = (char*)my_malloc(MYSQL_ERRMSG_SIZE, MYF(MY_WME)); - if (connErrText) connErrText[0] = 0; - } - - return connErrText; - } - - /** - Free storage for generating error messages. - */ - void freeErrorStorage() - { - if (likely(connErrText)) - { - my_free(connErrText, MYF(0)); - connErrText = NULL; - } - } - - - /** - Store a file handle for later retrieval. - - If deallocateFile encounters a lock when trying to perform its operation, - the file remains allocated but must be deallocated later. This function - provides a way for the connection to "remember" that this deallocation is - still needed. - - @param newname The name of the file to be added - @param newhandle The handle associated with newname - - */ - void preserveHandle(const char* newname, FILE_HANDLE newhandle, IBMDB2I_SHARE* share) - { - pendingLockedHandles.add(newname, newhandle, share); - } - - /** - Retrieve a file handle stored by preserveHandle(). - - @param name The name of the file to be retrieved. - - @return The handle associated with name - */ - FILE_HANDLE findAndRemovePreservedHandle(const char* name, IBMDB2I_SHARE** share) - { - FILE_HANDLE hdl = pendingLockedHandles.findAndRemove(name, share); - return hdl; - } - - /** - Indicate which error messages should be suppressed on the next API call - - These functions are useful for ensuring that the provided error numbers - are returned if a failure occurs but do not cause a spurious error message - to be returned. - - @return A pointer to this instance - */ - db2i_ileBridge* expectErrors(int32 er1) - { - tacitErrors[0]=er1; - return this; - } - - db2i_ileBridge* expectErrors(int32 er1, int32 er2) - { - tacitErrors[0]=er1; - tacitErrors[1]=er2; - return this; - } - - /** - Obtain the IBM i system message that accompanied the last API failure. - - @return A pointer to the 7 character message ID. - */ - static const char* getErrorMsgID() - { - return ((Qmy_Error_output_t*)parms()->outParms)->MsgId; - } - - /** - Convert an API error code into the equivalent MySQL error code (if any) - - @param rc The QMYSE API error code - - @return If an equivalent exists, the MySQL error code; else rc - */ - static int32 translateErrorCode(int32 rc) - { - if (likely(rc == 0)) - return 0; - - switch (rc) - { - case QMY_ERR_KEY_NOT_FOUND: - return HA_ERR_KEY_NOT_FOUND; - case QMY_ERR_DUP_KEY: - return HA_ERR_FOUND_DUPP_KEY; - case QMY_ERR_END_OF_FILE: - return HA_ERR_END_OF_FILE; - case QMY_ERR_LOCK_TIMEOUT: - return HA_ERR_LOCK_WAIT_TIMEOUT; - case QMY_ERR_CST_VIOLATION: - return HA_ERR_NO_REFERENCED_ROW; - case QMY_ERR_TABLE_NOT_FOUND: - return HA_ERR_NO_SUCH_TABLE; - case QMY_ERR_NON_UNIQUE_KEY: - return ER_DUP_ENTRY; - case QMY_ERR_MSGID: - { - if (memcmp(getErrorMsgID(), DB2I_CPF503A, 7) == 0) - return HA_ERR_ROW_IS_REFERENCED; - if (memcmp(getErrorMsgID(), DB2I_SQL0538, 7) == 0) - return HA_ERR_CANNOT_ADD_FOREIGN; - } - } - return rc; - } - -private: - - static db2i_ileBridge* createNewBridge(CONNECTION_HANDLE connID); - static void destroyBridge(db2i_ileBridge* bridge); - static int registerParmSpace(char* in, char* out); - static int32 doIt(); - int32 doItWithLog(); - - static _ILEpointer *functionSymbols; ///< Array of ILE function pointers - CONNECTION_HANDLE cachedConnectionID; ///< The associated connection - bool stmtTxActive; ///< Inside statement transaction - char *connErrText; ///< Storage for error message - int32 tacitErrors[TACIT_ERRORS_SIZE]; ///< List of errors to be suppressed - - static IleParms* initParmsForThread(); - - /** - Get space for passing parameters to the QMY_* APIs - - @details A fixed-length parameter passing space is associated with each - pthread. This space is allocated and registered by initParmsForThread() - the first time a pthread works with a bridge. The space is cached away - and remains available until the pthread ends. It became necessary to - disassociate the parameter space from the bridge in order to support - future enhancements to MySQL that sever the one-to-one relationship between - pthreads and user connections. The QMY_* APIs scope a registered parameter - space to the thread that executes the register operation. - */ - static IleParms* parms() - { - IleParms* p = my_pthread_getspecific_ptr(IleParms*, THR_ILEPARMS); - if (likely(p)) - return p; - - return initParmsForThread(); - } - - class PreservedHandleList - { - friend db2i_ileBridge* db2i_ileBridge::createNewBridge(CONNECTION_HANDLE); - public: - void add(const char* newname, FILE_HANDLE newhandle, IBMDB2I_SHARE* share); - FILE_HANDLE findAndRemove(const char* fileName, IBMDB2I_SHARE** share); - - private: - struct NameHandlePair - { - char name[FN_REFLEN]; - FILE_HANDLE handle; - IBMDB2I_SHARE* share; - NameHandlePair* next; - }* head; - } pendingLockedHandles; - - -#ifndef DBUG_OFF - bool cachedStateIsCoherent() - { - return (current_thd->thread_id == cachedConnectionID); - } - - friend void db2i_ileBridge::unregisterPtr(ILEMemHandle); - friend void db2i_ileBridge::registerPtr(const void*, ILEMemHandle*); - static uint32 registeredPtrs; -#endif -}; - - - -#endif diff --git a/storage/ibmdb2i/db2i_ioBuffers.cc b/storage/ibmdb2i/db2i_ioBuffers.cc deleted file mode 100644 index 9525a6e34b5..00000000000 --- a/storage/ibmdb2i/db2i_ioBuffers.cc +++ /dev/null @@ -1,332 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#include "db2i_ioBuffers.h" - -/** - Request another block of rows - - Request the next set of rows from DB2. This must only be called after - newReadRequest(). - - @param orientation The direction to use when reading through the table. -*/ -void IOAsyncReadBuffer::loadNewRows(char orientation) -{ - rewind(); - maxRows() = rowsToBlock; - - DBUG_PRINT("db2i_ioBuffers::loadNewRows", ("Requesting %d rows, async = %d", rowsToBlock, readIsAsync)); - - rc = getBridge()->expectErrors(QMY_ERR_END_OF_BLOCK, QMY_ERR_LOB_SPACE_TOO_SMALL) - ->read(file, - ptr(), - accessIntent, - commitLevel, - orientation, - readIsAsync, - rrnList, - 0, - 0, - 0); - - DBUG_PRINT("db2i_ioBuffers::loadNewRows", ("recordsRead: %d, rc: %d", (uint32)rowCount(), rc)); - - - *releaseRowNeeded = true; - - if (rc == QMY_ERR_END_OF_BLOCK) - { - // This is really just an informational error, so we ignore it. - rc = 0; - DBUG_PRINT("db2i_ioBuffers::loadNewRows", ("End of block signalled")); - } - else if (rc == QMY_ERR_END_OF_FILE) - { - // If we reach EOF or end-of-key, DB2 guarantees that no rows will be locked. - rc = HA_ERR_END_OF_FILE; - *releaseRowNeeded = false; - } - else if (rc == QMY_ERR_KEY_NOT_FOUND) - { - rc = HA_ERR_KEY_NOT_FOUND; - *releaseRowNeeded = false; - } - - if (rc) closePipe(); -} - - -/** - Empty the message pipe to prepare for another read. -*/ -void IOAsyncReadBuffer::drainPipe() -{ - DBUG_ASSERT(pipeState == PendingFullBufferMsg); - PipeRpy_t msg[32]; - int bytes; - PipeRpy_t* lastMsg; - while ((bytes = read(msgPipe, msg, sizeof(msg))) > 0) - { - DBUG_PRINT("db2i_ioBuffers::drainPipe",("Pipe returned %d bytes", bytes)); - lastMsg = &msg[bytes / (sizeof(msg[0]))-1]; - if (lastMsg->CumRowCnt == maxRows() || - lastMsg->RtnCod != 0) - { - pipeState = ConsumedFullBufferMsg; - break; - } - - } - DBUG_PRINT("db2i_ioBuffers::drainPipe",("rc = %d, rows = %d, max = %d", lastMsg->RtnCod, lastMsg->CumRowCnt, (uint32)maxRows())); -} - - -/** - Poll the message pipe for async read messages - - Only valid in async - - @param orientation The direction to use when reading through the table. -*/ -void IOAsyncReadBuffer::pollNextRow(char orientation) -{ - DBUG_ASSERT(readIsAsync); - - // Handle the case in which the buffer is full. - if (rowCount() == maxRows()) - { - // If we haven't read to the end, exit here. - if (readCursor < rowCount()) - return; - - if (pipeState == PendingFullBufferMsg) - drainPipe(); - if (pipeState == ConsumedFullBufferMsg) - loadNewRows(orientation); - } - - if (!rc) - { - PipeRpy_t* lastMsg = NULL; - while (true) - { - PipeRpy_t msg[32]; - int bytes = read(msgPipe, msg, sizeof(msg)); - DBUG_PRINT("db2i_ioBuffers::pollNextRow",("Pipe returned %d bytes", bytes)); - - if (unlikely(bytes < 0)) - { - DBUG_PRINT("db2i_ioBuffers::pollNextRow", ("Error")); - rc = errno; - break; - } - else if (bytes == 0) - break; - - DBUG_ASSERT(bytes % sizeof(msg[0]) == 0); - lastMsg = &msg[bytes / (sizeof(msg[0]))-1]; - - if (lastMsg->RtnCod || (lastMsg->CumRowCnt == usedRows())) - { - rc = lastMsg->RtnCod; - break; - } - } - - *releaseRowNeeded = true; - - if (rc == QMY_ERR_END_OF_BLOCK) - rc = 0; - else if (rc == QMY_ERR_END_OF_FILE) - { - // If we reach EOF or end-of-key, DB2 guarantees that no rows will be locked. - rc = HA_ERR_END_OF_FILE; - *releaseRowNeeded = false; - } - else if (rc == QMY_ERR_KEY_NOT_FOUND) - { - rc = HA_ERR_KEY_NOT_FOUND; - *releaseRowNeeded = false; - } - - if (lastMsg) - DBUG_PRINT("db2i_ioBuffers::pollNextRow", ("Good data: rc=%d; rows=%d; usedRows=%d", lastMsg->RtnCod, lastMsg->CumRowCnt, (uint32)usedRows())); - if (lastMsg && likely(!rc)) - { - if (lastMsg->CumRowCnt < maxRows()) - pipeState = PendingFullBufferMsg; - else - pipeState = ConsumedFullBufferMsg; - - DBUG_ASSERT(lastMsg->CumRowCnt <= usedRows()); - - } - DBUG_ASSERT(rowCount() <= getRowCapacity()); - } - DBUG_PRINT("db2i_ioBuffers::pollNextRow", ("filledRows: %d, rc: %d", rowCount(), rc)); - if (rc) closePipe(); -} - - -/** - Prepare for the destruction of the row buffer storage. -*/ -void IOAsyncReadBuffer::prepForFree() -{ - interruptRead(); - rewind(); - IORowBuffer::prepForFree(); -} - - -/** - Initialize the newly allocated storage. - - @param sizeChanged Indicates whether the storage capacity is being changed. -*/ -void IOAsyncReadBuffer::initAfterAllocate(bool sizeChanged) -{ - rewind(); - - if (sizeChanged || ((void*)rrnList == NULL)) - rrnList.realloc(getRowCapacity() * sizeof(uint32)); -} - - -/** - Send an initial read request - - @param infile The file (table/index) being read from - @param orientation The orientation to use for this read request - @param rowsToBuffer The number of rows to request each time - @param useAsync Whether reads should be performed asynchronously. - @param key The key to use (if any) - @param keyLength The length of key (if any) - @param keyParts The number of columns in the key (if any) - -*/ -void IOAsyncReadBuffer::newReadRequest(FILE_HANDLE infile, - char orientation, - uint32 rowsToBuffer, - bool useAsync, - ILEMemHandle key, - int keyLength, - int keyParts) -{ - DBUG_ENTER("db2i_ioBuffers::newReadRequest"); - DBUG_ASSERT(rowsToBuffer <= getRowCapacity()); -#ifndef DBUG_OFF - if (readCursor < rowCount()) - DBUG_PRINT("PERF:",("Wasting %d buffered rows!\n", rowCount() - readCursor)); -#endif - - int fildes[2]; - int ileDescriptor = QMY_REUSE; - - interruptRead(); - - if (likely(useAsync)) - { - if (rowsToBuffer == 1) - { - // Async provides little or no benefit for single row reads, so we turn it off - DBUG_PRINT("db2i_ioBuffers::newReadRequest", ("Disabling async")); - useAsync = false; - } - else - { - rc = pipe(fildes); - if (rc) DBUG_VOID_RETURN; - - // Translate the pipe write descriptor into the equivalent ILE descriptor - rc = fstatx(fildes[1], (struct stat*)&ileDescriptor, sizeof(ileDescriptor), STX_XPFFD_PASE); - if (rc) - { - close(fildes[0]); - close(fildes[1]); - DBUG_VOID_RETURN; - } - pipeState = Untouched; - msgPipe = fildes[0]; - - DBUG_PRINT("db2i_ioBuffers::newReadRequest", ("Opened pipe %d", fildes[0])); - } - } - - file = infile; - readIsAsync = useAsync; - rowsToBlock = rowsToBuffer; - - rewind(); - maxRows() = 1; - rc = getBridge()->expectErrors(QMY_ERR_END_OF_BLOCK, QMY_ERR_LOB_SPACE_TOO_SMALL) - ->read(file, - ptr(), - accessIntent, - commitLevel, - orientation, - useAsync, - rrnList, - key, - keyLength, - keyParts, - ileDescriptor); - - // Having shared the pipe with ILE, we relinquish our claim on the write end - // of the pipe. - if (useAsync) - close(fildes[1]); - - // If we reach EOF or end-of-key, DB2 guarantees that no rows will be locked. - if (rc == QMY_ERR_END_OF_FILE) - { - rc = HA_ERR_END_OF_FILE; - *releaseRowNeeded = false; - } - else if (rc == QMY_ERR_KEY_NOT_FOUND) - { - if (rowCount()) - rc = HA_ERR_END_OF_FILE; - else - rc = HA_ERR_KEY_NOT_FOUND; - *releaseRowNeeded = false; - } - else - *releaseRowNeeded = true; - - DBUG_VOID_RETURN; -} diff --git a/storage/ibmdb2i/db2i_ioBuffers.h b/storage/ibmdb2i/db2i_ioBuffers.h deleted file mode 100644 index 350d854f055..00000000000 --- a/storage/ibmdb2i/db2i_ioBuffers.h +++ /dev/null @@ -1,416 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -/** - @file db2i_ioBuffers.h - - @brief Buffer classes used for interacting with QMYSE read/write buffers. - -*/ - - -#include "db2i_validatedPointer.h" -#include "mysql_priv.h" -#include -#include -#include - -// Needed for compilers which do not include fstatx in standard headers. -extern "C" int fstatx(int, struct stat *, int, int); - -/** - Basic row buffer - - Provides the basic structure and methods needed for communicating - with QMYSE I/O APIs. - - @details All QMYSE I/O apis use a buffer that is structured as two integer - row counts (max and used) and storage for some number of rows. The row counts - are both input and output for the API, and their usage depends on the - particular API invoked. This class encapsulates that buffer definition. -*/ -class IORowBuffer -{ - public: - IORowBuffer() : allocSize(0), rowLength(0) {;} - ~IORowBuffer() { freeBuf(); } - ValidatedPointer& ptr() { return data; } - - /** - Sets up the buffer to hold the size indicated. - - @param rowLen length of the rows that will be stored in this buffer - @param nullMapOffset position of null map within each row - @param size buffer size requested - */ - void allocBuf(uint32 rowLen, uint16 nullMapOffset, uint32 size) - { - nullOffset = nullMapOffset; - uint32 newSize = size + sizeof(BufferHdr_t); - // If the internal structure of the row is changing, we need to - // remember this and notify the subclasses via initAfterAllocate(); - bool formatChanged = ((size/rowLen) != rowCapacity); - - if (newSize > allocSize) - { - this->freeBuf(); - data.alloc(newSize); - if (likely((void*)data)) - allocSize = newSize; - } - - if (likely((void*)data)) - { - DBUG_ASSERT((uint64)(void*)data % 16 == 0); - rowLength = rowLen; - rowCapacity = size / rowLength; - initAfterAllocate(formatChanged); - } - else - { - allocSize = 0; - rowCapacity = 0; - } - - DBUG_PRINT("db2i_ioBuffers::allocBuf",("rowCapacity = %d", rowCapacity)); - } - - void zeroBuf() - { - memset(data, 0, allocSize); - } - - void freeBuf() - { - if (likely(allocSize)) - { - prepForFree(); - DBUG_PRINT("IORowBuffer::freeBuf",("Freeing 0x%p", (char*)data)); - data.dealloc(); - } - } - - char* getRowN(uint32 n) - { - if (unlikely(n >= getRowCapacity())) - return NULL; - return (char*)data + sizeof(BufferHdr_t) + (rowLength * n); - }; - - uint32 getRowCapacity() const {return rowCapacity;} - uint32 getRowNullOffset() const {return nullOffset;} - uint32 getRowLength() const {return rowLength;} - - protected: - /** - Called prior to freeing buffer storage so that subclasses can do - any required cleanup - */ - virtual void prepForFree() - { - allocSize = 0; - rowCapacity = 0; - } - - /** - Called after buffer storage so that subclasses can do any required setup. - */ - virtual void initAfterAllocate(bool sizeChanged) { return;} - - ValidatedPointer data; - uint32 allocSize; - uint32 rowCapacity; - uint32 rowLength; - uint16 nullOffset; - uint32& usedRows() const { return ((BufferHdr_t*)(char*)data)->UsedRowCnt; } - uint32& maxRows() const {return ((BufferHdr_t*)(char*)data)->MaxRowCnt; } -}; - - -/** - Write buffer - - Implements methods for inserting data into a row buffer for use with the - QMY_WRITE and QMY_UPDATE APIs. - - @details The max row count defines how many rows are in the buffer. The used - row count is updated by QMYSE to indicate how many rows have been - successfully written. -*/ -class IOWriteBuffer : public IORowBuffer -{ - public: - bool endOfBuffer() const {return (maxRows() == getRowCapacity());} - - char* addRow() - { - return getRowN(maxRows()++); - } - - void resetAfterWrite() - { - maxRows() = 0; - } - - void deleteRow() - { - --maxRows(); - } - - uint32 rowCount() const {return maxRows();} - - uint32 rowsWritten() const {return usedRows()-1;} - - private: - void initAfterAllocate(bool sizeChanged) {maxRows() = 0; usedRows() = 0;} -}; - - -/** - Read buffer - - Implements methods for reading data from and managing a row buffer for use - with the QMY_READ APIs. This is primarily for use with metainformation queries. -*/ -class IOReadBuffer : public IORowBuffer -{ - public: - - IOReadBuffer() {;} - IOReadBuffer(uint32 rows, uint32 rowLength) - { - allocBuf(rows, 0, rows * rowLength); - maxRows() = rows; - } - - uint32 rowCount() {return usedRows();} - void setRowsToProcess(uint32 rows) { maxRows() = rows; } -}; - - -/** - Read buffer - - Implements methods for reading data from and managing a row buffer for use - with the QMY_READ APIs. - - @details This class supports both sync and async read modes. The max row - count defines the number of rows that are requested to be read. The used row - count defines how many rows have been read. Sync mode is reasonably - straightforward, but async mode has a complex system of communicating with - QMYSE that is optimized for low latency. In async mode, the used row count is - updated continuously by QMYSE as rows are read. At the same time, messages are - sent to the associated pipe indicating that a row has been read. As long as - the internal read cursor lags behind the used row count, the pipe is never - consulted. But if the internal read cursor "catches up to" the used row count, - then we block on the pipe until we find a message indicating that a new row - has been read or that an error has occurred. -*/ -class IOAsyncReadBuffer : public IOReadBuffer -{ - public: - IOAsyncReadBuffer() : - file(0), readIsAsync(false), msgPipe(QMY_REUSE), bridge(NULL) - { - } - - ~IOAsyncReadBuffer() - { - interruptRead(); - rrnList.dealloc(); - } - - - /** - Signal read operation complete - - Indicates that the storage engine requires no more data from the table. - Must be called between calls to newReadRequest(). - */ - void endRead() - { -#ifndef DBUG_OFF - if (readCursor < rowCount()) - DBUG_PRINT("PERF:",("Wasting %d buffered rows!\n", rowCount() - readCursor)); -#endif - interruptRead(); - - file = 0; - bridge = NULL; - } - - /** - Update data that may change on each read operation - */ - void update(char newAccessIntent, - bool* newReleaseRowNeeded, - char commitLvl) - { - accessIntent = newAccessIntent; - releaseRowNeeded = newReleaseRowNeeded; - commitLevel = commitLvl; - } - - /** - Read the next row in the table. - - Return a pointer to the next row in the table, where "next" is defined - by the orientation. - - @param orientaton - @param[out] rrn The relative record number of the row returned. Not reliable - if NULL is returned by this function. - - @return Pointer to the row. Null if no more rows are available or an error - occurred. - */ - char* readNextRow(char orientation, uint32& rrn) - { - DBUG_PRINT("db2i_ioBuffers::readNextRow", ("readCursor: %d, filledRows: %d, rc: %d", readCursor, rowCount(), rc)); - - while (readCursor >= rowCount() && !rc) - { - if (!readIsAsync) - loadNewRows(orientation); - else - pollNextRow(orientation); - } - - if (readCursor >= rowCount()) - return NULL; - - rrn = rrnList[readCursor]; - return getRowN(readCursor++); - } - - /** - Retrieve the return code generated by the last operation. - - @return The return code, translated to the appropriate HA_ERR_* - value if possible. - */ - int32 lastrc() - { - return db2i_ileBridge::translateErrorCode(rc); - } - - void rewind() - { - readCursor = 0; - rc = 0; - usedRows() = 0; - } - - bool reachedEOD() { return EOD; } - - void newReadRequest(FILE_HANDLE infile, - char orientation, - uint32 rowsToBuffer, - bool useAsync, - ILEMemHandle key, - int keyLength, - int keyParts); - - private: - - /** - End any running async read operation. - */ - void interruptRead() - { - closePipe(); - if (file && readIsAsync && (rc == 0) && (rowCount() < getRowCapacity())) - { - DBUG_PRINT("IOReadBuffer::interruptRead", ("PERF: Interrupting %d", (uint32)file)); - getBridge()->readInterrupt(file); - } - } - - void closePipe() - { - if (msgPipe != QMY_REUSE) - { - DBUG_PRINT("db2i_ioBuffers::closePipe", ("Closing pipe %d", msgPipe)); - close(msgPipe); - msgPipe = QMY_REUSE; - } - } - - /** - Get a pointer to the active ILE bridge. - - Getting the bridge pointer is (relatively) expensive, so we cache - it off for each operation. - */ - db2i_ileBridge* getBridge() - { - if (unlikely(bridge == NULL)) - { - bridge = db2i_ileBridge::getBridgeForThread(); - } - return bridge; - } - - void drainPipe(); - void pollNextRow(char orientation); - void prepForFree(); - void initAfterAllocate(bool sizeChanged); - void loadNewRows(char orientation); - - - uint32 readCursor; // Read position within buffer - int32 rc; // Last return code received - ValidatedPointer rrnList; // Receiver for list of rrns - char accessIntent; // The access intent for this read - char commitLevel; // What isolation level should be used - char EOD; // Whether end-of-data was hit - char readIsAsync; // Are reads to be done asynchronously? - bool* releaseRowNeeded; - /* Does the caller need to release the current row when finished reading */ - FILE_HANDLE file; // The file to be read - int msgPipe; - /* The read descriptor of the pipe used to pass messages during async reads */ - db2i_ileBridge* bridge; // Cached pointer to bridge - uint32 rowsToBlock; // Number of rows to request - enum - { - ConsumedFullBufferMsg, - PendingFullBufferMsg, - Untouched - } pipeState; - /* The state of the async read message pipe */ -}; - diff --git a/storage/ibmdb2i/db2i_misc.h b/storage/ibmdb2i/db2i_misc.h deleted file mode 100644 index f0b527aaad0..00000000000 --- a/storage/ibmdb2i/db2i_misc.h +++ /dev/null @@ -1,129 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - -#ifndef DB2I_MISC_H -#define DB2I_MISC_H - -/** - Undelimit quote-delimited DB2 names in-place -*/ -void stripExtraQuotes(char* name, uint maxLen) -{ - char* oldName = (char*)sql_strdup(name); - uint i = 0; - uint j = 0; - do - { - name[j] = oldName[i]; - if (oldName[i] == '"' && oldName[i+1] == '"') - ++i; - } while (++j < maxLen && oldName[++i]); - - if (j == maxLen) - --j; - name[j] = 0; -} - -/** - Convert a MySQL identifier name into a DB2 compatible format - - @parm input The MySQL name - @parm output The DB2 name - @parm outlen The amount of space allocated for output - @parm delimit Should delimiting quotes be placed around the converted name? - @parm delimitQuotes Should quotes in the MySQL be delimited with additional quotes? - - @return FALSE if output was too small and name was truncated; TRUE otherwise -*/ -bool convertMySQLNameToDB2Name(const char* input, - char* output, - size_t outlen, - bool delimit = true, - bool delimitQuotes = true) -{ - uint o = 0; - if (delimit) - output[o++] = '"'; - - uint i = 0; - do - { - output[o] = input[i]; - if (delimitQuotes && input[i] == '"') - output[++o] = '"'; - } while (++o < outlen-2 && input[++i]); - - if (delimit) - output[o++] = '"'; - output[min(o, outlen-1)] = 0; // This isn't the most user-friendly way to handle overflows, - // but at least its safe. - return (o <= outlen-1); -} - -bool isOrdinaryIdentifier(const char* s) -{ - while (*s) - { - if (my_isupper(system_charset_info, *s) || - my_isdigit(system_charset_info, *s) || - (*s == '_') || - (*s == '@') || - (*s == '$') || - (*s == '#') || - (*s == '"')) - ++s; - else - return false; - } - return true; -} - -/** - Fill memory with a 16-bit word. - - @param p Pointer to space to fill. - @param v Value to fill - @param l Length of space (in 16-bit words) -*/ -void memset16(void* p, uint16 v, size_t l) -{ - uint16* p2=(uint16*)p; - while (l--) - { - *(p2++) = v; - } -} - -#endif diff --git a/storage/ibmdb2i/db2i_myconv.cc b/storage/ibmdb2i/db2i_myconv.cc deleted file mode 100644 index 7be6e1236cd..00000000000 --- a/storage/ibmdb2i/db2i_myconv.cc +++ /dev/null @@ -1,1498 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - -/** - @file - - @brief A direct map optimization of iconv and related functions - This was show to significantly reduce character conversion cost - for short strings when compared to calling iconv system code. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "db2i_myconv.h" -#include "db2i_global.h" - -int32_t myconvDebug=0; - -static char szGetTimeString[20]; -static char * GetTimeString(time_t now) -{ - struct tm * tm; - - now = time(&now); - tm = (struct tm *) localtime(&now); - sprintf(szGetTimeString, "%04d/%02d/%02d %02d:%02d:%02d", - tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); - - return szGetTimeString; -} - -static MEM_ROOT dmapMemRoot; - -void initMyconv() -{ - init_alloc_root(&dmapMemRoot, 0x200, 0); -} - -void cleanupMyconv() -{ - free_root(&dmapMemRoot,0); -} - - -#ifdef DEBUG -/* type: */ -#define STDOUT_WITH_TIME -1 /* to stdout with time */ -#define STDERR_WITH_TIME -2 /* to stderr with time */ -#define STDOUT_WO_TIME 1 /* : to stdout */ -#define STDERR_WO_TIME 2 /* : to stderr */ - - -static void MyPrintf(long type, - char * fmt, ...) -{ - char StdoutFN[256]; - va_list ap; - char * p; - time_t now; - FILE * fd=stderr; - - if (type < 0) - { - now = time(&now); - fprintf(fd, "%s ", GetTimeString(now)); - } - va_start(ap, fmt); - vfprintf(fd, fmt, ap); - va_end(ap); -} -#endif - - - - -#define MAX_CONVERTER 128 - -mycstoccsid(const char* pname) -{ - if (strcmp(pname, "UTF-16")==0) - return 1200; - else if (strcmp(pname, "big5")==0) - return 950; - else - return cstoccsid(pname); -} -#define cstoccsid mycstoccsid - -static struct __myconv_rec myconv_rec [MAX_CONVERTER]; -static struct __dmap_rec dmap_rec [MAX_CONVERTER]; - -static int dmap_open(const char * to, - const char * from, - const int32_t idx) -{ - if (myconvIsSBCS(from) && myconvIsSBCS(to)) { - dmap_rec[idx].codingSchema = DMAP_S2S; - if ((dmap_rec[idx].dmapS2S = (uchar *) alloc_root(&dmapMemRoot, 0x100)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_S2S, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapS2S, 0x00, 0x100); - myconv_rec[idx].allocatedSize=0x100; - - { - char dmapSrc[0x100]; - iconv_t cd; - int32_t i; - size_t inBytesLeft=0x100; - size_t outBytesLeft=0x100; - size_t len; - char * inBuf=dmapSrc; - char * outBuf=(char *) dmap_rec[idx].dmapS2S; - - if ((cd = iconv_open(to, from)) == (iconv_t) -1) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed with iconv_open(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); -#endif - return -1; - } - - inBytesLeft = 0x100; - for (i = 0; i < inBytesLeft; ++i) - dmapSrc[i]=i; - - do { - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d: iconv() returns %d, errno = %d in %s at %d\n", - to, from, idx, DMAP_S2S, len, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WITH_TIME, - "inBytesLeft = %d, inBuf - dmapSrc = %d\n", inBytesLeft, inBuf-dmapSrc); - MyPrintf(STDERR_WITH_TIME, - "outBytesLeft = %d, outBuf - dmapS2S = %d\n", outBytesLeft, outBuf-(char *) dmap_rec[idx].dmapS2S); - } - if ((inBytesLeft == 86 || inBytesLeft == 64 || inBytesLeft == 1) && - memcmp(from, "IBM-1256", 9) == 0 && - memcmp(to, "IBM-420", 8) == 0) { - /* Known problem for IBM-1256_IBM-420 */ - --inBytesLeft; - ++inBuf; - *outBuf=0x00; - ++outBuf; - --outBytesLeft; - continue; - } else if ((inBytesLeft == 173 || inBytesLeft == 172 || - inBytesLeft == 74 || inBytesLeft == 73 || - inBytesLeft == 52 || inBytesLeft == 50 || - inBytesLeft == 31 || inBytesLeft == 20 || - inBytesLeft == 6) && - memcmp(to, "IBM-1256", 9) == 0 && - memcmp(from, "IBM-420", 8) == 0) { - /* Known problem for IBM-420_IBM-1256 */ - --inBytesLeft; - ++inBuf; - *outBuf=0x00; - ++outBuf; - --outBytesLeft; - continue; - } else if ((128 >= inBytesLeft) && - memcmp(to, "IBM-037", 8) == 0 && - memcmp(from, "IBM-367", 8) == 0) { - /* Known problem for IBM-367_IBM-037 */ - --inBytesLeft; - ++inBuf; - *outBuf=0x00; - ++outBuf; - --outBytesLeft; - continue; - } else if (((1 <= inBytesLeft && inBytesLeft <= 4) || (97 <= inBytesLeft && inBytesLeft <= 128)) && - memcmp(to, "IBM-838", 8) == 0 && - memcmp(from, "TIS-620", 8) == 0) { - /* Known problem for TIS-620_IBM-838 */ - --inBytesLeft; - ++inBuf; - *outBuf=0x00; - ++outBuf; - --outBytesLeft; - continue; - } - iconv_close(cd); - return -1; -#else - /* Tolerant to undefined conversions for any converter */ - --inBytesLeft; - ++inBuf; - *outBuf=0x00; - ++outBuf; - --outBytesLeft; - continue; -#endif - } - } while (inBytesLeft > 0); - - if (myconvIsISO(to)) - myconv_rec[idx].subS=0x1A; - else if (myconvIsASCII(to)) - myconv_rec[idx].subS=0x7F; - else if (myconvIsEBCDIC(to)) - myconv_rec[idx].subS=0x3F; - - if (myconvIsISO(from)) - myconv_rec[idx].srcSubS=0x1A; - else if (myconvIsASCII(from)) - myconv_rec[idx].srcSubS=0x7F; - else if (myconvIsEBCDIC(from)) - myconv_rec[idx].srcSubS=0x3F; - - iconv_close(cd); - } - } else if (((myconvIsSBCS(from) && myconvIsUnicode2(to)) && (dmap_rec[idx].codingSchema = DMAP_S2U)) || - ((myconvIsSBCS(from) && myconvIsUTF8(to)) && (dmap_rec[idx].codingSchema = DMAP_S28))) { - int i; - - /* single byte mapping */ - if ((dmap_rec[idx].dmapD12U = (UniChar *) alloc_root(&dmapMemRoot, 0x100 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_S2U, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapD12U, 0x00, 0x100 * 2); - myconv_rec[idx].allocatedSize=0x100 * 2; - - - { - char dmapSrc[2]; - iconv_t cd; - int32_t i; - size_t inBytesLeft; - size_t outBytesLeft; - size_t len; - char * inBuf; - char * outBuf; - char SS=0x1A; -#ifdef support_surrogate - if ((cd = iconv_open("UTF-16", from)) == (iconv_t) -1) { -#else - if ((cd = iconv_open("UCS-2", from)) == (iconv_t) -1) { -#endif -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed with iconv_open(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); -#endif - return -1; - } - - for (i = 0; i < 0x100; ++i) { - dmapSrc[0]=i; - inBuf=dmapSrc; - inBytesLeft=1; - outBuf=(char *) &(dmap_rec[idx].dmapD12U[i]); - outBytesLeft=2; - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { - if ((errno == EILSEQ || errno == EINVAL) && - inBytesLeft == 1 && - outBytesLeft == 2) { - continue; - } else { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(cd,%02x,%d,%02x%02x,%d), errno = %d in %s at %d\n", - to, from, idx, dmapSrc[0], 1, - (&dmap_rec[idx].dmapD12U[i])[0],(&dmap_rec[idx].dmapD12U[i])[1], 2, - errno, __FILE__,__LINE__); - MyPrintf(STDERR_WITH_TIME, - "inBytesLeft=%d, outBytesLeft=%d, %02x%02x\n", - inBytesLeft, outBytesLeft, - (&dmap_rec[idx].dmapD12U[i])[0],(&dmap_rec[idx].dmapD12U[i])[1]); - } -#endif - iconv_close(cd); - return -1; - } - dmap_rec[idx].dmapD12U[i]=0x0000; - } - if (dmap_rec[idx].dmapE02U[i] == 0x001A && /* pick the first one */ - myconv_rec[idx].srcSubS == 0x00) { - myconv_rec[idx].srcSubS=i; - } - } - iconv_close(cd); - } - myconv_rec[idx].subS=0x1A; - myconv_rec[idx].subD=0xFFFD; - - - } else if (((myconvIsUCS2(from) && myconvIsSBCS(to)) && (dmap_rec[idx].codingSchema = DMAP_U2S)) || - ((myconvIsUTF16(from) && myconvIsSBCS(to)) && (dmap_rec[idx].codingSchema = DMAP_T2S)) || - ((myconvIsUTF8(from) && myconvIsSBCS(to)) && (dmap_rec[idx].codingSchema = DMAP_82S))) { - /* UTF-16 -> SBCS, the direct map a bit of waste of space, - * binary search may be reasonable alternative - */ - if ((dmap_rec[idx].dmapU2S = (uchar *) alloc_root(&dmapMemRoot, 0x10000 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_U2S, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapU2S, 0x00, 0x10000); - myconv_rec[idx].allocatedSize=(0x10000 * 2); - - { - iconv_t cd; - int32_t i; - -#ifdef support_surrogate - if ((cd = iconv_open(to, "UTF-16")) == (iconv_t) -1) { -#else - if ((cd = iconv_open(to, "UCS-2")) == (iconv_t) -1) { -#endif -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed with iconv_open(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); -#endif - return -1; - } - - for (i = 0; i < 0x100; ++i) { - UniChar dmapSrc[0x100]; - int32_t j; - for (j = 0; j < 0x100; ++j) { - dmapSrc[j]=i * 0x100 + j; - } - char * inBuf=(char *) dmapSrc; - char * outBuf=(char *) &(dmap_rec[idx].dmapU2S[i*0x100]); - size_t inBytesLeft=sizeof(dmapSrc); - size_t outBytesLeft=0x100; - size_t len; - - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { - if (inBytesLeft == 0 && outBytesLeft == 0) { /* a number of substitution returns */ - continue; - } -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - from, to, idx, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WITH_TIME, - "iconv() retuns %d, errno=%d, InBytesLeft=%d, OutBytesLeft=%d\n", - len, errno, inBytesLeft, outBytesLeft, __FILE__,__LINE__); - } -#endif - iconv_close(cd); - return -1; - } - } - iconv_close(cd); - - myconv_rec[idx].subS = dmap_rec[idx].dmapU2S[0x1A]; - myconv_rec[idx].subD = dmap_rec[idx].dmapU2S[0xFFFD]; - myconv_rec[idx].srcSubS = 0x1A; - myconv_rec[idx].srcSubD = 0xFFFD; - } - - - - } else if (((myconvIsDBCS(from) && myconvIsUnicode2(to)) && (dmap_rec[idx].codingSchema = DMAP_D2U)) || - ((myconvIsDBCS(from) && myconvIsUTF8(to)) && (dmap_rec[idx].codingSchema = DMAP_D28))) { - int i; - /* single byte mapping */ - if ((dmap_rec[idx].dmapD12U = (UniChar *) alloc_root(&dmapMemRoot, 0x100 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_D2U, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapD12U, 0x00, 0x100 * 2); - - /* double byte mapping, assume 7 bit ASCII is not use as the first byte of DBCS. */ - if ((dmap_rec[idx].dmapD22U = (UniChar *) alloc_root(&dmapMemRoot, 0x8000 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_D2U, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapD22U, 0x00, 0x8000 * 2); - - myconv_rec[idx].allocatedSize=(0x100 + 0x8000) * 2; - - - { - char dmapSrc[2]; - iconv_t cd; - int32_t i; - size_t inBytesLeft; - size_t outBytesLeft; - size_t len; - char * inBuf; - char * outBuf; - char SS=0x1A; - -#ifdef support_surrogate - if ((cd = iconv_open("UTF-16", from)) == (iconv_t) -1) { -#else - if ((cd = iconv_open("UCS-2", from)) == (iconv_t) -1) { -#endif -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed with iconv_open(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); -#endif - return -1; - } - - for (i = 0; i < 0x100; ++i) { - dmapSrc[0]=i; - inBuf=dmapSrc; - inBytesLeft=1; - outBuf=(char *) (&dmap_rec[idx].dmapD12U[i]); - outBytesLeft=2; - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { - if ((errno == EILSEQ || errno == EINVAL) && - inBytesLeft == 1 && - outBytesLeft == 2) { - continue; - } else { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(cd,%02x,%d,%02x%02x,%d), errno = %d in %s at %d\n", - to, from, idx, dmapSrc[0], 1, - (&dmap_rec[idx].dmapD12U[i])[0],(&dmap_rec[idx].dmapD12U[i])[1], 2, - errno, __FILE__,__LINE__); - MyPrintf(STDERR_WITH_TIME, - "inBytesLeft=%d, outBytesLeft=%d, %02x%02x\n", - inBytesLeft, outBytesLeft, - (&dmap_rec[idx].dmapD12U[i])[0],(&dmap_rec[idx].dmapD12U[i])[1]); - } -#endif - iconv_close(cd); - return -1; - } - dmap_rec[idx].dmapD12U[i]=0x0000; - } - if (dmap_rec[idx].dmapD12U[i] == 0x001A && /* pick the first one */ - myconv_rec[idx].srcSubS == 0x00) { - myconv_rec[idx].srcSubS=i; - } - } - - - for (i = 0x80; i < 0x100; ++i) { - int j; - if (dmap_rec[idx].dmapD12U[i] != 0x0000) - continue; - for (j = 0x01; j < 0x100; ++j) { - dmapSrc[0]=i; - dmapSrc[1]=j; - int offset = i-0x80; - offset<<=8; - offset+=j; - - inBuf=dmapSrc; - inBytesLeft=2; - outBuf=(char *) &(dmap_rec[idx].dmapD22U[offset]); - outBytesLeft=2; - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { - if (inBytesLeft == 2 && outBytesLeft == 2 && (errno == EILSEQ || errno == EINVAL)) { - ; /* invalid DBCS character, dmapDD2U[offset] remains 0x0000 */ - } else { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(cd,%p,2,%p,2), errno = %d in %s at %d\n", - to, from, idx, - dmapSrc, &(dmap_rec[idx].dmapD22U[offset]), - errno, __FILE__,__LINE__); - MyPrintf(STDERR_WO_TIME, - "iconv(cd,0x%02x%02x,2,0x%04x,2) returns %d, inBytesLeft=%d, outBytesLeft=%d\n", - dmapSrc[0], dmapSrc[1], - dmap_rec[idx].dmapD22U[offset], - len, inBytesLeft, outBytesLeft); - } -#endif - iconv_close(cd); - return -1; - } - } else { -#ifdef TRACE_DMAP - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), rc=%d, errno=%d in %s at %d\n", - to, from, idx, len, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WITH_TIME, - "%04X: src=%04X%04X, inBuf=0x%02X%02X, inBytesLeft=%d, outBuf=%02X%02X%02X, outBytesLeft=%d\n", - i, dmapSrc[0], dmapSrc[1], inBuf[0], inBuf[1], - inBytesLeft, outBuf[-2], outBuf[-1], outBuf[0], outBytesLeft); - MyPrintf(STDERR_WITH_TIME, - "&dmapSrc=%p, inBuf=%p, %p, outBuf=%p\n", - dmapSrc, inBuf, dmap_rec[idx].dmapU2M3 + (i - 0x80) * 2, outBuf); - } -#endif - } - } - if (dmap_rec[idx].dmapD12U[i] == 0xFFFD) { /* pick the last one */ - myconv_rec[idx].srcSubD=i* 0x100 + j; - } - } - iconv_close(cd); - } - - myconv_rec[idx].subS=0x1A; - myconv_rec[idx].subD=0xFFFD; - myconv_rec[idx].srcSubD=0xFCFC; - - - } else if (((myconvIsUCS2(from) && myconvIsDBCS(to)) && (dmap_rec[idx].codingSchema = DMAP_U2D)) || - ((myconvIsUTF16(from) && myconvIsDBCS(to)) && (dmap_rec[idx].codingSchema = DMAP_T2D)) || - ((myconvIsUTF8(from) && myconvIsDBCS(to)) && (dmap_rec[idx].codingSchema = DMAP_82D))) { - /* UTF-16 -> DBCS single/double byte */ - /* A single table will cover all characters, assuming no second byte is 0x00. */ - if ((dmap_rec[idx].dmapU2D = (uchar *) alloc_root(&dmapMemRoot, 0x10000 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_U2D, errno, __FILE__,__LINE__); -#endif - return -1; - } - - memset(dmap_rec[idx].dmapU2D, 0x00, 0x10000 * 2); - myconv_rec[idx].allocatedSize=(0x10000 * 2); - - { - UniChar dmapSrc[1]; - iconv_t cd; - int32_t i; - size_t inBytesLeft; - size_t outBytesLeft; - size_t len; - char * inBuf; - char * outBuf; - -#ifdef support_surrogate - if ((cd = iconv_open(to, "UTF-16")) == (iconv_t) -1) { -#else - if ((cd = iconv_open(to, "UCS-2")) == (iconv_t) -1) { -#endif -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed with iconv_open(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); -#endif - return -1; - } - - /* easy implementation, convert 1 Unicode character at one time. */ - /* If the open performance is an issue, convert a chunk such as 128 chracters. */ - /* if the converted length is not the same as the original, convert one by one. */ - (dmap_rec[idx].dmapU2D)[0x0000]=0x00; - for (i = 1; i < 0x10000; ++i) { - dmapSrc[0]=i; - inBuf=(char *) dmapSrc; - inBytesLeft=2; - outBuf=(char *) &((dmap_rec[idx].dmapU2D)[2*i]); - outBytesLeft=2; - do { - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { - if (len == 1 && inBytesLeft == 0 && outBytesLeft == 1 && (dmap_rec[idx].dmapU2D)[2*i] == 0x1A) { - /* UCS-2_TIS-620:0x0080 => 0x1A, converted to SBCS replacement character */ - (dmap_rec[idx].dmapU2D)[2*i+1]=0x00; - break; - } else if (len == 1 && inBytesLeft == 0 && outBytesLeft == 0) { - break; - } - if (errno == EILSEQ || errno == EINVAL) { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WO_TIME, - "iconv(cd,%04x,2,%02x%02x,2) returns inBytesLeft=%d, outBytesLeft=%d\n", - dmapSrc[0], - (dmap_rec[idx].dmapU2D)[2*i], (dmap_rec[idx].dmapU2D)[2*i+1], - inBytesLeft, outBytesLeft); - if (outBuf - (char *) dmap_rec[idx].dmapU2M2 > 1) - MyPrintf(STDERR_WO_TIME, "outBuf[-2..2]=%02X%02X%02X%02X%02X\n", outBuf[-2],outBuf[-1],outBuf[0],outBuf[1],outBuf[2]); - else - MyPrintf(STDERR_WO_TIME, "outBuf[0..2]=%02X%02X%02X\n", outBuf[0],outBuf[1],outBuf[2]); - } -#endif - inBuf+=2; - inBytesLeft-=2; - memcpy(outBuf, (char *) &(myconv_rec[idx].subD), 2); - outBuf+=2; - outBytesLeft-=2; - } else { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "[%d] dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - i, to, from, idx, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WO_TIME, - "iconv(cd,%04x,2,%02x%02x,2) returns %d inBytesLeft=%d, outBytesLeft=%d\n", - dmapSrc[0], - (dmap_rec[idx].dmapU2D)[2*i], - (dmap_rec[idx].dmapU2D)[2*i+1], - len, inBytesLeft,outBytesLeft); - if (i == 1) { - MyPrintf(STDERR_WO_TIME, - " inBuf [-1..2]=%02x%02x%02x%02x\n", - inBuf[-1],inBuf[0],inBuf[1],inBuf[2]); - MyPrintf(STDERR_WO_TIME, - " outBuf [-1..2]=%02x%02x%02x%02x\n", - outBuf[-1],outBuf[0],outBuf[1],outBuf[2]); - } else { - MyPrintf(STDERR_WO_TIME, - " inBuf [-2..2]=%02x%02x%02x%02x%02x\n", - inBuf[-2],inBuf[-1],inBuf[0],inBuf[1],inBuf[2]); - MyPrintf(STDERR_WO_TIME, - " outBuf [-2..2]=%02x%02x%02x%02x%02x\n", - outBuf[-2],outBuf[-1],outBuf[0],outBuf[1],outBuf[2]); - } -#endif - iconv_close(cd); - return -1; - } - if (len == 0 && inBytesLeft == 0 && outBytesLeft == 1) { /* converted to SBCS */ - (dmap_rec[idx].dmapU2D)[2*i+1]=0x00; - break; - } - } - } while (inBytesLeft > 0); - } - iconv_close(cd); - myconv_rec[idx].subS = dmap_rec[idx].dmapU2D[2*0x1A]; - myconv_rec[idx].subD = dmap_rec[idx].dmapU2D[2*0xFFFD] * 0x100 - + dmap_rec[idx].dmapU2D[2*0xFFFD+1]; - myconv_rec[idx].srcSubS = 0x1A; - myconv_rec[idx].srcSubD = 0xFFFD; - } - - - } else if (((myconvIsEUC(from) && myconvIsUnicode2(to)) && (dmap_rec[idx].codingSchema = DMAP_E2U)) || - ((myconvIsEUC(from) && myconvIsUTF8(to)) && (dmap_rec[idx].codingSchema = DMAP_E28))) { - int i; - /* S0: 0x00 - 0x7F */ - if ((dmap_rec[idx].dmapE02U = (UniChar *) alloc_root(&dmapMemRoot, 0x100 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_E2U, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapE02U, 0x00, 0x100 * 2); - - /* S1: 0xA0 - 0xFF, 0xA0 - 0xFF */ - if ((dmap_rec[idx].dmapE12U = (UniChar *) alloc_root(&dmapMemRoot, 0x60 * 0x60 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_E2U, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapE12U, 0x00, 0x60 * 0x60 * 2); - - /* SS2: 0x8E + 0xA0 - 0xFF, 0xA0 - 0xFF */ - if ((dmap_rec[idx].dmapE22U = (UniChar *) alloc_root(&dmapMemRoot, 0x60 * 0x61 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_E2U, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapE22U, 0x00, 0x60 * 0x61 * 2); - - /* SS3: 0x8F + 0xA0 - 0xFF, 0xA0 - 0xFF */ - if ((dmap_rec[idx].dmapE32U = (UniChar *) alloc_root(&dmapMemRoot, 0x60 * 0x61 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_E2U, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapE32U, 0x00, 0x60 * 0x61 * 2); - - myconv_rec[idx].allocatedSize=(0x100 + 0x60 * 0x60 + 0x60 * 0x61* 2) * 2; - - - { - char dmapSrc[0x60 * 0x60 * 3]; - iconv_t cd; - int32_t i; - size_t inBytesLeft; - size_t outBytesLeft; - size_t len; - char * inBuf; - char * outBuf; - char SS=0x8E; - -#ifdef support_surrogate - if ((cd = iconv_open("UTF-16", from)) == (iconv_t) -1) { -#else - if ((cd = iconv_open("UCS-2", from)) == (iconv_t) -1) { -#endif -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed with iconv_open(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); -#endif - return -1; - } - - for (i = 0; i < 0x100; ++i) { - dmapSrc[0]=i; - inBuf=dmapSrc; - inBytesLeft=1; - outBuf=(char *) (&dmap_rec[idx].dmapE02U[i]); - outBytesLeft=2; - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); - } -#endif - dmap_rec[idx].dmapE02U[i]=0x0000; - } - if (dmap_rec[idx].dmapE02U[i] == 0x001A && /* pick the first one */ - myconv_rec[idx].srcSubS == 0x00) { - myconv_rec[idx].srcSubS=i; - } - } - - - inBuf=dmapSrc; - for (i = 0; i < 0x60; ++i) { - int j; - for (j = 0; j < 0x60; ++j) { - *inBuf=i+0xA0; - ++inBuf; - *inBuf=j+0xA0; - ++inBuf; - } - } - inBuf=dmapSrc; - inBytesLeft=0x60 * 0x60 * 2; - outBuf=(char *) dmap_rec[idx].dmapE12U; - outBytesLeft=0x60 * 0x60 * 2; - do { - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { - if (errno == EILSEQ) { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WO_TIME, "inBytesLeft=%d, outBytesLeft=%d\n", inBytesLeft, outBytesLeft); - if (inBuf - dmapSrc > 1 && inBuf - dmapSrc <= sizeof(dmapSrc) - 2) - MyPrintf(STDERR_WO_TIME, "inBuf[-2..2]=%02X%02X%02X%02X%02X\n", inBuf[-2],inBuf[-1],inBuf[0],inBuf[1],inBuf[2]); - else - MyPrintf(STDERR_WO_TIME, "inBuf[0..2]=%02X%02X%02X\n", inBuf[0],inBuf[1],inBuf[2]); - if (outBuf - (char *) dmap_rec[idx].dmapE12U > 1) - MyPrintf(STDERR_WO_TIME, "outBuf[-2..2]=%02X%02X%02X%02X%02X\n", outBuf[-2],outBuf[-1],outBuf[0],outBuf[1],outBuf[2]); - else - MyPrintf(STDERR_WO_TIME, "outBuf[0..2]=%02X%02X%02X\n", outBuf[0],outBuf[1],outBuf[2]); - } -#endif - inBuf+=2; - inBytesLeft-=2; - outBuf[0]=0x00; - outBuf[1]=0x00; - outBuf+=2; - outBytesLeft-=2; - } else { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); -#endif - iconv_close(cd); - return -1; - } - } - } while (inBytesLeft > 0); - - /* SS2: 0x8E + 1 or 2 bytes */ - /* SS3: 0x8E + 1 or 2 bytes */ - while (SS != 0x00) { - int32_t numSuccess=0; - for (i = 0; i < 0x60; ++i) { - inBuf=dmapSrc; - inBuf[0]=SS; - inBuf[1]=i+0xA0; - inBytesLeft=2; - if (SS == 0x8E) - outBuf=(char *) &(dmap_rec[idx].dmapE22U[i]); - else - outBuf=(char *) &(dmap_rec[idx].dmapE32U[i]); - outBytesLeft=2; - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { - if (SS == 0x8E) - dmap_rec[idx].dmapE22U[i]=0x0000; - else - dmap_rec[idx].dmapE32U[i]=0x0000; - } else { - ++numSuccess; - } - } - if (numSuccess == 0) { /* SS2 is 2 bytes */ - inBuf=dmapSrc; - for (i = 0; i < 0x60; ++i) { - int j; - for (j = 0; j < 0x60; ++j) { - *inBuf=SS; - ++inBuf; - *inBuf=i+0xA0; - ++inBuf; - *inBuf=j+0xA0; - ++inBuf; - } - } - inBuf=dmapSrc; - inBytesLeft=0x60 * 0x60 * 3; - if (SS == 0x8E) - outBuf=(char *) &(dmap_rec[idx].dmapE22U[0x60]); - else - outBuf=(char *) &(dmap_rec[idx].dmapE32U[0x60]); - outBytesLeft=0x60 * 0x60 * 2; - do { - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "%02X:dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - SS, to, from, idx, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WO_TIME, "inBytesLeft=%d, outBytesLeft=%d\n", inBytesLeft, outBytesLeft); - if (inBuf - dmapSrc > 1 && inBuf - dmapSrc <= sizeof(dmapSrc) - 2) - MyPrintf(STDERR_WO_TIME, "inBuf[-2..2]=%02X%02X%02X%02X%02X\n", inBuf[-2],inBuf[-1],inBuf[0],inBuf[1],inBuf[2]); - else - MyPrintf(STDERR_WO_TIME, "inBuf[0..2]=%02X%02X%02X\n", inBuf[0],inBuf[1],inBuf[2]); - } -#endif - if (errno == EILSEQ || errno == EINVAL) { - inBuf+=3; - inBytesLeft-=3; - outBuf[0]=0x00; - outBuf[1]=0x00; - outBuf+=2; - outBytesLeft-=2; - } else { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "%02X:dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - SS, to, from, idx, errno, __FILE__,__LINE__); -#endif - iconv_close(cd); - return -1; - } - } - } while (inBytesLeft > 0); - } - if (SS == 0x8E) - SS=0x8F; - else - SS = 0x00; - } - iconv_close(cd); - - myconv_rec[idx].subS=0x1A; - myconv_rec[idx].subD=0xFFFD; - for (i = 0; i < 0x80; ++i) { - if (dmap_rec[idx].dmapE02U[i] == 0x001A) { - myconv_rec[idx].srcSubS=i; /* pick the first one */ - break; - } - } - - for (i = 0; i < 0x60 * 0x60; ++i) { - if (dmap_rec[idx].dmapE12U[i] == 0xFFFD) { - uchar byte1=i / 0x60; - uchar byte2=i % 0x60; - myconv_rec[idx].srcSubD=(byte1 + 0xA0) * 0x100 + (byte2 + 0xA0); /* pick the last one */ - } - } - - } - - } else if (((myconvIsUCS2(from) && myconvIsEUC(to)) && (dmap_rec[idx].codingSchema = DMAP_U2E)) || - ((myconvIsUTF16(from) && myconvIsEUC(to)) && (dmap_rec[idx].codingSchema = DMAP_T2E)) || - ((myconvIsUTF8(from) && myconvIsEUC(to)) && (dmap_rec[idx].codingSchema = DMAP_82E))) { - /* S0: 0x00 - 0xFF */ - if ((dmap_rec[idx].dmapU2S = (uchar *) alloc_root(&dmapMemRoot, 0x100)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_U2E, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapU2S, 0x00, 0x100); - - /* U0080 - UFFFF -> S1: 0xA0 - 0xFF, 0xA0 - 0xFF */ - if ((dmap_rec[idx].dmapU2M2 = (uchar *) alloc_root(&dmapMemRoot, 0xFF80 * 2)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_U2E, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapU2M2, 0x00, 0xFF80 * 2); - - /* U0080 - UFFFF -> SS2: 0x8E + 0xA0 - 0xFF, 0xA0 - 0xFF - * SS3: 0x8F + 0xA0 - 0xFF, 0xA0 - 0xFF */ - if ((dmap_rec[idx].dmapU2M3 = (uchar *) alloc_root(&dmapMemRoot, 0xFF80 * 3)) == NULL) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d), CS=%d failed with malloc(), errno = %d in %s at %d\n", - to, from, idx, DMAP_U2E, errno, __FILE__,__LINE__); -#endif - return -1; - } - memset(dmap_rec[idx].dmapU2M3, 0x00, 0xFF80 * 3); - myconv_rec[idx].allocatedSize=(0x100 + 0xFF80 * 2 + 0xFF80 * 3); - - { - UniChar dmapSrc[0x80]; - iconv_t cd; - int32_t i; - size_t inBytesLeft; - size_t outBytesLeft; - size_t len; - char * inBuf; - char * outBuf; - -#ifdef support_surrogate - if ((cd = iconv_open(to, "UTF-16")) == (iconv_t) -1) { -#else - if ((cd = iconv_open(to, "UCS-2")) == (iconv_t) -1) { -#endif -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed with iconv_open(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); -#endif - return -1; - } - - for (i = 0; i < 0x80; ++i) - dmapSrc[i]=i; - inBuf=(char *) dmapSrc; - inBytesLeft=0x80 * 2; - outBuf=(char *) dmap_rec[idx].dmapU2S; - outBytesLeft=0x80; - do { - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { -#ifdef DEBUG - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); -#endif - iconv_close(cd); - return -1; - } - } while (inBytesLeft > 0); - - myconv_rec[idx].srcSubS = 0x1A; - myconv_rec[idx].srcSubD = 0xFFFD; - myconv_rec[idx].subS = dmap_rec[idx].dmapU2S[0x1A]; - - outBuf=(char *) &(myconv_rec[idx].subD); - dmapSrc[0]=0xFFFD; - inBuf=(char *) dmapSrc; - inBytesLeft=2; - outBytesLeft=2; - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), rc=%d, errno=%d in %s at %d\n", - to, from, idx, len, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WO_TIME, "iconv(0x1A,1,%p,1) returns outBuf=%p, outBytesLeft=%d\n", - dmapSrc, outBuf, outBytesLeft); - } -#endif - if (outBytesLeft == 0) { - /* UCS-2_IBM-eucKR returns error. - myconv(iconv) rc=1, error=0, InBytesLeft=0, OutBytesLeft=18 - myconv(iconvRev) rc=-1, error=116, InBytesLeft=2, OutBytesLeft=20 - iconv: 0xFFFD => 0xAFFE => 0x rc=1,-1 sub=0,0 - */ - ; - } else { - iconv_close(cd); - return -1; - } - } - - for (i = 0x80; i < 0xFFFF; ++i) { - uchar eucBuf[3]; - dmapSrc[0]=i; - inBuf=(char *) dmapSrc; - inBytesLeft=2; - outBuf=(char *) eucBuf; - outBytesLeft=sizeof(eucBuf); - errno=0; - if ((len = iconv(cd, &inBuf, &inBytesLeft, &outBuf, &outBytesLeft)) != (size_t) 0) { - if (len == 1 && errno == 0 && inBytesLeft == 0 && outBytesLeft == 1) { /* substitution occurred. */ continue; - } - - if (errno == EILSEQ) { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), errno = %d in %s at %d\n", - to, from, idx, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WO_TIME, "inBytesLeft=%d, outBytesLeft=%d\n", inBytesLeft, outBytesLeft); - if (inBuf - (char *) dmapSrc > 1 && inBuf - (char *) dmapSrc <= sizeof(dmapSrc) - 2) - MyPrintf(STDERR_WO_TIME, "inBuf[-2..2]=%02X%02X%02X%02X%02X\n", inBuf[-2],inBuf[-1],inBuf[0],inBuf[1],inBuf[2]); - else - MyPrintf(STDERR_WO_TIME, "inBuf[0..2]=%02X%02X%02X\n", inBuf[0],inBuf[1],inBuf[2]); - if (outBuf - (char *) dmap_rec[idx].dmapU2M2 > 1) - MyPrintf(STDERR_WO_TIME, "outBuf[-2..2]=%02X%02X%02X%02X%02X\n", outBuf[-2],outBuf[-1],outBuf[0],outBuf[1],outBuf[2]); - else - MyPrintf(STDERR_WO_TIME, "outBuf[0..2]=%02X%02X%02X\n", outBuf[0],outBuf[1],outBuf[2]); - } -#endif - inBuf+=2; - inBytesLeft-=2; - memcpy(outBuf, (char *) &(myconv_rec[idx].subD), 2); - outBuf+=2; - outBytesLeft-=2; - } else { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), rc = %d, errno = %d in %s at %d\n", - to, from, idx, len, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WITH_TIME, - "%04X: src=%04X%04X, inBuf=0x%02X%02X, inBytesLeft=%d, outBuf[-2..0]=%02X%02X%02X, outBytesLeft=%d\n", - i, dmapSrc[0], dmapSrc[1], inBuf[0], inBuf[1], - inBytesLeft, outBuf[-2], outBuf[-1], outBuf[0], outBytesLeft); - MyPrintf(STDERR_WITH_TIME, - "&dmapSrc=%p, inBuf=%p, dmapU2M2 + %d = %p, outBuf=%p\n", - dmapSrc, inBuf, (i - 0x80) * 2, dmap_rec[idx].dmapU2M2 + (i - 0x80) * 2, outBuf); - } -#endif - iconv_close(cd); - return -1; - } - } - if (sizeof(eucBuf) - outBytesLeft == 1) { - if (i < 0x100) { - (dmap_rec[idx].dmapU2S)[i]=eucBuf[0]; - } else { - dmap_rec[idx].dmapU2M2[(i - 0x80) * 2] = eucBuf[0]; - dmap_rec[idx].dmapU2M2[(i - 0x80) * 2 + 1] = 0x00; - } - } else if (sizeof(eucBuf) - outBytesLeft == 2) { /* 2 bytes */ - dmap_rec[idx].dmapU2M2[(i - 0x80) * 2] = eucBuf[0]; - dmap_rec[idx].dmapU2M2[(i - 0x80) * 2 + 1] = eucBuf[1]; - } else if (sizeof(eucBuf) - outBytesLeft == 3) { /* 3 byte SS2/SS3 */ - dmap_rec[idx].dmapU2M3[(i - 0x80) * 3] = eucBuf[0]; - dmap_rec[idx].dmapU2M3[(i - 0x80) * 3 + 1] = eucBuf[1]; - dmap_rec[idx].dmapU2M3[(i - 0x80) * 3 + 2] = eucBuf[2]; - } else { -#ifdef DEBUG - if (myconvDebug) { - MyPrintf(STDERR_WITH_TIME, - "dmap_open(%s,%s,%d) failed to initialize with iconv(), rc=%d, errno=%d in %s at %d\n", - to, from, idx, len, errno, __FILE__,__LINE__); - MyPrintf(STDERR_WITH_TIME, - "%04X: src=%04X%04X, inBuf=0x%02X%02X, inBytesLeft=%d, outBuf=%02X%02X%02X, outBytesLeft=%d\n", - i, dmapSrc[0], dmapSrc[1], inBuf[0], inBuf[1], - inBytesLeft, outBuf[-2], outBuf[-1], outBuf[0], outBytesLeft); - MyPrintf(STDERR_WITH_TIME, - "&dmapSrc=%p, inBuf=%p, %p, outBuf=%p\n", - dmapSrc, inBuf, dmap_rec[idx].dmapU2M3 + (i - 0x80) * 2, outBuf); - } -#endif - return -1; - } - - } - iconv_close(cd); - } - - } else if (myconvIsUTF16(from) && myconvIsUTF8(to)) { - dmap_rec[idx].codingSchema = DMAP_T28; - - } else if (myconvIsUCS2(from) && myconvIsUTF8(to)) { - dmap_rec[idx].codingSchema = DMAP_U28; - - } else if (myconvIsUTF8(from) && myconvIsUnicode2(to)) { - dmap_rec[idx].codingSchema = DMAP_82U; - - } else if (myconvIsUnicode2(from) && myconvIsUnicode2(to)) { - dmap_rec[idx].codingSchema = DMAP_U2U; - - } else { - - return -1; - } - myconv_rec[idx].cnv_dmap=&(dmap_rec[idx]); - return 0; -} - - - -static int bins_open(const char * to, - const char * from, - const int32_t idx) -{ - return -1; -} - - - -static int32_t dmap_close(const int32_t idx) -{ - if (dmap_rec[idx].codingSchema == DMAP_S2S) { - if (dmap_rec[idx].dmapS2S != NULL) { - dmap_rec[idx].dmapS2S=NULL; - } - } else if (dmap_rec[idx].codingSchema = DMAP_E2U) { - if (dmap_rec[idx].dmapE02U != NULL) { - dmap_rec[idx].dmapE02U=NULL; - } - if (dmap_rec[idx].dmapE12U != NULL) { - dmap_rec[idx].dmapE12U=NULL; - } - if (dmap_rec[idx].dmapE22U != NULL) { - dmap_rec[idx].dmapE22U=NULL; - } - if (dmap_rec[idx].dmapE32U != NULL) { - dmap_rec[idx].dmapE32U=NULL; - } - } - - return 0; -} - - -static int32_t bins_close(const int32_t idx) -{ - return 0; -} - - -myconv_t myconv_open(const char * toCode, - const char * fromCode, - int32_t converter) -{ - int32 i; - for (i = 0; i < MAX_CONVERTER; ++i) { - if (myconv_rec[i].converterType == 0) - break; - } - if (i >= MAX_CONVERTER) - return ((myconv_t) -1); - - myconv_rec[i].converterType = converter; - myconv_rec[i].index=i; - myconv_rec[i].fromCcsid=cstoccsid(fromCode); - if (myconv_rec[i].fromCcsid == 0 && memcmp(fromCode, "big5",5) == 0) - myconv_rec[i].fromCcsid=950; - myconv_rec[i].toCcsid=cstoccsid(toCode); - if (myconv_rec[i].toCcsid == 0 && memcmp(toCode, "big5",5) == 0) - myconv_rec[i].toCcsid=950; - strncpy(myconv_rec[i].from, fromCode, sizeof(myconv_rec[i].from)-1); - strncpy(myconv_rec[i].to, toCode, sizeof(myconv_rec[i].to)-1); - - if (converter == CONVERTER_ICONV) { - if ((myconv_rec[i].cnv_iconv=iconv_open(toCode, fromCode)) == (iconv_t) -1) { - return ((myconv_t) -1); - } - myconv_rec[i].allocatedSize = -1; - myconv_rec[i].srcSubS=myconvGetSubS(fromCode); - myconv_rec[i].srcSubD=myconvGetSubD(fromCode); - myconv_rec[i].subS=myconvGetSubS(toCode); - myconv_rec[i].subD=myconvGetSubD(toCode); - return &(myconv_rec[i]); - } else if (converter == CONVERTER_DMAP && - dmap_open(toCode, fromCode, i) != -1) { - return &(myconv_rec[i]); - } - return ((myconv_t) -1); -} - - - -int32_t myconv_close(myconv_t cd) -{ - int32_t ret=0; - - if (cd->converterType == CONVERTER_ICONV) { - ret=iconv_close(cd->cnv_iconv); - } else if (cd->converterType == CONVERTER_DMAP) { - ret=dmap_close(cd->index); - } - memset(&(myconv_rec[cd->index]), 0x00, sizeof(myconv_rec[cd->index])); - return ret; -} - - - - -/* reference: http://www-306.ibm.com/software/globalization/other/es.jsp */ -/* systemCL would be expensive, and myconvIsXXXXX is called frequently. - need to cache entries */ -#define MAX_CCSID 256 -static int ccsidList [MAX_CCSID]; -static int esList [MAX_CCSID]; -int32 getEncodingScheme(const uint16 inCcsid, int32& outEncodingScheme); -EXTERN int myconvGetES(CCSID ccsid) -{ - /* call QtqValidateCCSID in ILE to get encoding schema */ - /* return QtqValidateCCSID(ccsid); */ - int i; - for (i = 0; i < MAX_CCSID; ++i) { - if (ccsidList[i] == ccsid) - return esList[i]; - if (ccsidList[i] == 0x00) - break; - } - - if (i >= MAX_CCSID) { - i=MAX_CCSID-1; - } - - { - ccsidList[i]=ccsid; - getEncodingScheme(ccsid, esList[i]); -#ifdef DEBUG_PASE - if (myconvDebug) { - fprintf(stderr, "CCSID=%d, ES=0x%04X\n", ccsid, esList[i]); - } -#endif - return esList[i]; - } - return 0; -} - - -EXTERN int myconvIsEBCDIC(const char * pName) -{ - int es = myconvGetES(cstoccsid(pName)); - if (es == 0x1100 || - es == 0x1200 || - es == 0x6100 || - es == 0x6200 || - es == 0x1301 ) { - return TRUE; - } - return FALSE; -} - - -EXTERN int myconvIsISO(const char * pName) -{ - int es = myconvGetES(cstoccsid(pName)); - if (es == 0x4100 || - es == 0x4105 || - es == 0x4155 || - es == 0x5100 || - es == 0x5150 || - es == 0x5200 || - es == 0x5404 || - es == 0x5409 || - es == 0x540A || - es == 0x5700) { - return TRUE; - } - return FALSE; -} - - -EXTERN int myconvIsASCII(const char * pName) -{ - int es = myconvGetES(cstoccsid(pName)); - if (es == 0x2100 || - es == 0x3100 || - es == 0x8100 || - es == 0x2200 || - es == 0x3200 || - es == 0x9200 || - es == 0x2300 || - es == 0x2305 || - es == 0x3300 || - es == 0x2900 || - es == 0x2A00) { - return TRUE; - } else if (memcmp(pName, "big5", 5) == 0) { - return TRUE; - } - return FALSE; -} - - - -EXTERN int myconvIsUCS2(const char * pName) -{ - if (cstoccsid(pName) == 13488) { - return TRUE; - } - return FALSE; -} - - -EXTERN int myconvIsUTF16(const char * pName) -{ - if (cstoccsid(pName) == 1200) { - return TRUE; - } - return FALSE; -} - - -EXTERN int myconvIsUnicode2(const char * pName) -{ - int es = myconvGetES(cstoccsid(pName)); - if (es == 0x7200 || - es == 0x720B || - es == 0x720F) { - return TRUE; - } - return FALSE; -} - - -EXTERN int myconvIsUTF8(const char * pName) -{ - int es = myconvGetES(cstoccsid(pName)); - if (es == 0x7807) { - return TRUE; - } - return FALSE; -} - - -EXTERN int myconvIsUnicode(const char * pName) -{ - int es = myconvGetES(cstoccsid(pName)); - if (es == 0x7200 || - es == 0x720B || - es == 0x720F || - es == 0x7807) { - return TRUE; - } - return FALSE; -} - - -EXTERN int myconvIsEUC(const char * pName) -{ - int es = myconvGetES(cstoccsid(pName)); - if (es == 0x4403) { - return TRUE; - } - return FALSE; -} - - -EXTERN int myconvIsDBCS(const char * pName) -{ - int es = myconvGetES(cstoccsid(pName)); - if (es == 0x1200 || - es == 0x2200 || - es == 0x2300 || - es == 0x2305 || - es == 0x2A00 || - es == 0x3200 || - es == 0x3300 || - es == 0x5200 || - es == 0x6200 || - es == 0x9200) { - return TRUE; - } else if (memcmp(pName, "big5", 5) == 0) { - return TRUE; - } - return FALSE; -} - - -EXTERN int myconvIsSBCS(const char * pName) -{ - int es = myconvGetES(cstoccsid(pName)); - if (es == 0x1100 || - es == 0x2100 || - es == 0x3100 || - es == 0x4100 || - es == 0x4105 || - es == 0x5100 || - es == 0x5150 || - es == 0x6100 || - es == 0x8100) { - return TRUE; - } - return FALSE; -} - - - -EXTERN char myconvGetSubS(const char * code) -{ - if (myconvIsEBCDIC(code)) { - return 0x3F; - } else if (myconvIsASCII(code)) { - return 0x1A; - } else if (myconvIsISO(code)) { - return 0x1A; - } else if (myconvIsEUC(code)) { - return 0x1A; - } else if (myconvIsUCS2(code)) { - return 0x00; - } else if (myconvIsUTF8(code)) { - return 0x1A; - } - return 0x00; -} - - -EXTERN UniChar myconvGetSubD(const char * code) -{ - if (myconvIsEBCDIC(code)) { - return 0xFDFD; - } else if (myconvIsASCII(code)) { - return 0xFCFC; - } else if (myconvIsISO(code)) { - return 0x00; - } else if (myconvIsEUC(code)) { - return 0x00; - } else if (myconvIsUCS2(code)) { - return 0xFFFD; - } else if (myconvIsUTF8(code)) { - return 0x00; - } - return 0x00; -} - diff --git a/storage/ibmdb2i/db2i_myconv.h b/storage/ibmdb2i/db2i_myconv.h deleted file mode 100644 index 98032748148..00000000000 --- a/storage/ibmdb2i/db2i_myconv.h +++ /dev/null @@ -1,3201 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - -/** - @file - - @brief A direct map optimization of iconv and related functions - This was show to significantly reduce character conversion cost - for short strings when compared to calling iconv system code. -*/ - -#ifndef DB2I_MYCONV_H -#define DB2I_MYCONV_H - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef TRUE -#define TRUE 1 -#endif - -#ifndef FALSE -#define FALSE 0 -#endif - -#ifdef __cplusplus -#define INTERN inline -#define EXTERN extern "C" -#else -#define INTERN static -#define EXTERN extern -#endif - - -/* ANSI integer data types */ -#if defined(__OS400_TGTVRM__) -/* for DTAMDL(*P128), datamodel(P128): int/long/pointer=4/4/16 */ -/* LLP64:4/4/8 is used for teraspace ?? */ -typedef short int16_t; -typedef unsigned short uint16_t; -typedef int int32_t; -typedef unsigned int uint32_t; -typedef long long int64_t; -typedef unsigned long long uint64_t; -#elif defined(PASE) -/* PASE uses IPL32: int/long/pointer=4/4/4 + long long */ -#elif defined(__64BIT__) -/* AIX 64 bit uses LP64: int/long/pointer=4/8/8 */ -#endif - -#define CONVERTER_ICONV 1 -#define CONVERTER_DMAP 2 - -#define DMAP_S2S 10 -#define DMAP_S2U 20 -#define DMAP_D2U 30 -#define DMAP_E2U 40 -#define DMAP_U2S 120 -#define DMAP_T2S 125 -#define DMAP_U2D 130 -#define DMAP_T2D 135 -#define DMAP_U2E 140 -#define DMAP_T2E 145 -#define DMAP_S28 220 -#define DMAP_D28 230 -#define DMAP_E28 240 -#define DMAP_82S 310 -#define DMAP_82D 320 -#define DMAP_82E 330 -#define DMAP_U28 410 -#define DMAP_82U 420 -#define DMAP_T28 425 -#define DMAP_U2U 510 - - -typedef struct __dmap_rec *dmap_t; - -struct __dmap_rec -{ - uint32_t codingSchema; - unsigned char * dmapS2S; /* SBCS -> SBCS */ - /* The following conversion needs be followed by conversion from UCS-2/UTF-16 to UTF-8 */ - UniChar * dmapD12U; /* DBCS(non-EUC) -> UCS-2/UTF-16 */ - UniChar * dmapD22U; /* DBCS(non-EUC) -> UCS-2/UTF-16 */ - UniChar * dmapE02U; /* EUC/SS0 -> UCS-2/UTF-16 */ - UniChar * dmapE12U; /* EUC/SS1 -> UCS-2/UTF-16 */ - UniChar * dmapE22U; /* EUC/0x8E + SS2 -> UCS-2/UTF-16 */ - UniChar * dmapE32U; /* EUC/0x8F + SS3 -> UCS-2/UTF-16 */ - uchar * dmapU2D; /* UCS-2 -> DBCS */ - uchar * dmapU2S; /* UCS-2 -> EUC SS0 */ - uchar * dmapU2M2; /* UCS-2 -> EUC SS1 */ - uchar * dmapU2M3; /* UCS-2 -> EUC SS2/SS3 */ - /* All of these pointers/tables are not used at the same time. - * You may be able save some space if you consolidate them. - */ - uchar * dmapS28; /* SBCS -> UTF-8 */ - uchar * dmapD28; /* DBCS -> UTF-8 */ -}; - -typedef struct __myconv_rec *myconv_t; -struct __myconv_rec -{ - uint32_t converterType; - uint32_t index; /* for close */ - union { - iconv_t cnv_iconv; - dmap_t cnv_dmap; - }; - int32_t allocatedSize; - int32_t fromCcsid; - int32_t toCcsid; - UniChar subD; /* DBCS substitution char */ - char subS; /* SBCS substitution char */ - UniChar srcSubD; /* DBCS substitution char of src codepage */ - char srcSubS; /* SBCS substitution char of src codepage */ - char from [41+1]; /* codepage name is up to 41 bytes */ - char to [41+1]; /* codepage name is up to 41 bytes */ -#ifdef __64BIT__ - char reserved[10]; /* align 128 */ -#else - char reserved[14]; /* align 128 */ -#endif -}; - - -EXTERN int32_t myconvDebug; - - - -EXTERN int myconvGetES(CCSID); -EXTERN int myconvIsEBCDIC(const char *); -EXTERN int myconvIsASCII(const char *); -EXTERN int myconvIsUnicode(const char *); /* UTF-8, UTF-16, or UCS-2 */ -EXTERN int myconvIsUnicode2(const char *); /* 2 byte Unicode */ -EXTERN int myconvIsUCS2(const char *); -EXTERN int myconvIsUTF16(const char *); -EXTERN int myconvIsUTF8(const char *); -EXTERN int myconvIsEUC(const char *); -EXTERN int myconvIsISO(const char *); -EXTERN int myconvIsSBCS(const char *); -EXTERN int myconvIsDBCS(const char *); -EXTERN char myconvGetSubS(const char *); -EXTERN UniChar myconvGetSubD(const char *); - - -EXTERN myconv_t myconv_open(const char*, const char*, int32_t); -EXTERN int myconv_close(myconv_t); - -INTERN size_t myconv_iconv(myconv_t cd , - char** inBuf, - size_t* inBytesLeft, - char** outBuf, - size_t* outBytesLeft, - size_t* numSub) -{ - return iconv(cd->cnv_iconv, inBuf, inBytesLeft, outBuf, outBytesLeft); -} - -INTERN size_t myconv_dmap(myconv_t cd, - char** inBuf, - size_t* inBytesLeft, - char** outBuf, - size_t* outBytesLeft, - size_t* numSub) -{ - if (cd->cnv_dmap->codingSchema == DMAP_S2S) { - register unsigned char * dmapS2S=cd->cnv_dmap->dmapS2S; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register size_t numS=0; - while (0 < inLen) { - if (pLastOutBuf < pOut) - break; - if (*pIn == 0x00) { - *pOut=0x00; - } else { - *pOut=dmapS2S[*pIn]; - if (*pOut == 0x00) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(*inBytesLeft-inLen); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - if (*pOut == subS) { - if ((*pOut=dmapS2S[*pIn]) == subS) { - if (*pIn != cd->srcSubS) - ++numS; - } - } - } - ++pIn; - --inLen; - ++pOut; - } - *outBytesLeft-=(*inBytesLeft-inLen); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_E2U) { - /* use uchar * instead of UniChar to avoid memcpy */ - register uchar * dmapE02U=(uchar *) (cd->cnv_dmap->dmapE02U); - register uchar * dmapE12U=(uchar *) (cd->cnv_dmap->dmapE12U); - register uchar * dmapE22U=(uchar *) (cd->cnv_dmap->dmapE22U); - register uchar * dmapE32U=(uchar *) (cd->cnv_dmap->dmapE32U); - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register int offset; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register size_t numS=0; - while (0 < inLen) { - if (pLastOutBuf < pOut) - break; - if (*pIn == 0x00) { - *pOut=0x00; - ++pOut; - *pOut=0x00; - ++pOut; - ++pIn; - --inLen; - } else { - if (*pIn == 0x8E) { /* SS2 */ - if (inLen < 2) { - if (cd->fromCcsid == 33722 || /* IBM-eucJP */ - cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - ++pIn; - if (*pIn < 0xA0) { - if (cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset=(*pIn - 0xA0); - offset<<=1; - if (dmapE22U[offset] == 0x00 && - dmapE22U[offset+1] == 0x00) { /* 2 bytes */ - if (inLen < 3) { - if (cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset=(*pIn - 0xA0) * 0x60 + 0x60; - ++pIn; - if (*pIn < 0xA0) { - if (cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - return -1; - } - offset+=(*pIn - 0xA0); - offset<<=1; - if (dmapE22U[offset] == 0x00 && - dmapE22U[offset+1] == 0x00) { - if (cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - return -1; - } - *pOut=dmapE22U[offset]; - ++pOut; - *pOut=dmapE22U[offset+1]; - ++pOut; - if (dmapE22U[offset] == 0xFF && - dmapE22U[offset+1] == 0xFD) { - if (pIn[-2] * 0x100 + pIn[-1] != cd->srcSubD) - ++numS; - } - ++pIn; - inLen-=3; - } else { /* 1 bytes */ - *pOut=dmapE22U[offset]; - ++pOut; - *pOut=dmapE22U[offset+1]; - ++pOut; - ++pIn; - inLen-=2; - } - } else if (*pIn == 0x8F) { /* SS3 */ - if (inLen < 2) { - if (cd->fromCcsid == 33722) /* IBM-eucJP */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - ++pIn; - if (*pIn < 0xA0) { - if (cd->fromCcsid == 970 || /* IBM-eucKR */ - cd->fromCcsid == 964 || /* IBM-eucTW */ - cd->fromCcsid == 1383 || /* IBM-eucCN */ - (cd->fromCcsid == 33722 && 3 <= inLen)) /* IBM-eucJP */ - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset=(*pIn - 0xA0); - offset<<=1; - if (dmapE32U[offset] == 0x00 && - dmapE32U[offset+1] == 0x00) { /* 0x8F + 2 bytes */ - if (inLen < 3) { - if (cd->fromCcsid == 33722) - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset=(*pIn - 0xA0) * 0x60 + 0x60; - ++pIn; - if (*pIn < 0xA0) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - return -1; - } - offset+=(*pIn - 0xA0); - offset<<=1; - if (dmapE32U[offset] == 0x00 && - dmapE32U[offset+1] == 0x00) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - return -1; - } - *pOut=dmapE32U[offset]; - ++pOut; - *pOut=dmapE32U[offset+1]; - ++pOut; - if (dmapE32U[offset] == 0xFF && - dmapE32U[offset+1] == 0xFD) { - if (pIn[-2] * 0x100 + pIn[-1] != cd->srcSubD) - ++numS; - } - ++pIn; - inLen-=3; - } else { /* 0x8F + 1 bytes */ - *pOut=dmapE32U[offset]; - ++pOut; - *pOut=dmapE32U[offset+1]; - ++pOut; - ++pIn; - inLen-=2; - } - - } else { - offset=*pIn; - offset<<=1; - if (dmapE02U[offset] == 0x00 && - dmapE02U[offset+1] == 0x00) { /* SS1 */ - if (inLen < 2) { - if ((cd->fromCcsid == 33722 && (*pIn == 0xA0 || (0xA9 <= *pIn && *pIn <= 0xAF) || *pIn == 0xFF)) || - (cd->fromCcsid == 970 && (*pIn == 0xA0 || *pIn == 0xAD || *pIn == 0xAE || *pIn == 0xAF || *pIn == 0xFF)) || - (cd->fromCcsid == 964 && (*pIn == 0xA0 || (0xAA <= *pIn && *pIn <= 0xC1) || *pIn == 0xC3 || *pIn == 0xFE || *pIn == 0xFF)) || - (cd->fromCcsid == 1383 && (*pIn == 0xA0 || *pIn == 0xFF))) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - if (*pIn < 0xA0) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - offset=(*pIn - 0xA0) * 0x60; - ++pIn; - if (*pIn < 0xA0) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset+=(*pIn - 0xA0); - offset<<=1; - if (dmapE12U[offset] == 0x00 && - dmapE12U[offset+1] == 0x00) { /* undefined mapping */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - *pOut=dmapE12U[offset]; - ++pOut; - *pOut=dmapE12U[offset+1]; - ++pOut; - if (dmapE12U[offset] == 0xFF && - dmapE12U[offset+1] == 0xFD) { - if (pIn[-1] * 0x100 + pIn[0] != cd->srcSubD) - ++numS; - } - ++pIn; - inLen-=2; - } else { - *pOut=dmapE02U[offset]; - ++pOut; - *pOut=dmapE02U[offset+1]; - ++pOut; - if (dmapE02U[offset] == 0x00 && - dmapE02U[offset+1] == 0x1A) { - if (*pIn != cd->srcSubS) - ++numS; - } - ++pIn; - --inLen; - } - } - } - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - - } else if (cd->cnv_dmap->codingSchema == DMAP_E28) { - /* use uchar * instead of UniChar to avoid memcpy */ - register uchar * dmapE02U=(uchar *) (cd->cnv_dmap->dmapE02U); - register uchar * dmapE12U=(uchar *) (cd->cnv_dmap->dmapE12U); - register uchar * dmapE22U=(uchar *) (cd->cnv_dmap->dmapE22U); - register uchar * dmapE32U=(uchar *) (cd->cnv_dmap->dmapE32U); - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register int offset; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register size_t numS=0; - register UniChar in; /* copy part of U28 */ - register UniChar ucs2; - while (0 < inLen) { - if (pLastOutBuf < pOut) - break; - if (*pIn == 0x00) { - *pOut=0x00; - ++pOut; - ++pIn; - --inLen; - } else { - if (*pIn == 0x8E) { /* SS2 */ - if (inLen < 2) { - if (cd->fromCcsid == 33722 || /* IBM-eucJP */ - cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - ++pIn; - if (*pIn < 0xA0) { - if (cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset=(*pIn - 0xA0); - offset<<=1; - if (dmapE22U[offset] == 0x00 && - dmapE22U[offset+1] == 0x00) { /* 2 bytes */ - if (inLen < 3) { - if (cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset=(*pIn - 0xA0) * 0x60 + 0x60; - ++pIn; - if (*pIn < 0xA0) { - if (cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - return -1; - } - offset+=(*pIn - 0xA0); - offset<<=1; - if (dmapE22U[offset] == 0x00 && - dmapE22U[offset+1] == 0x00) { - if (cd->fromCcsid == 964) /* IBM-eucTW */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - return -1; - } - in=dmapE22U[offset]; - in<<=8; - in+=dmapE22U[offset+1]; - if (dmapE22U[offset] == 0xFF && - dmapE22U[offset+1] == 0xFD) { - if (pIn[-2] * 0x100 + pIn[-1] != cd->srcSubD) - ++numS; - } - ++pIn; - inLen-=3; - } else { /* 1 bytes */ - in=dmapE22U[offset]; - in<<=8; - in+=dmapE22U[offset+1]; - ++pIn; - inLen-=2; - } - } else if (*pIn == 0x8F) { /* SS3 */ - if (inLen < 2) { - if (cd->fromCcsid == 33722) /* IBM-eucJP */ - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - ++pIn; - if (*pIn < 0xA0) { - if (cd->fromCcsid == 970 || /* IBM-eucKR */ - cd->fromCcsid == 964 || /* IBM-eucTW */ - cd->fromCcsid == 1383 || /* IBM-eucCN */ - (cd->fromCcsid == 33722 && 3 <= inLen)) /* IBM-eucJP */ - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset=(*pIn - 0xA0); - offset<<=1; - if (dmapE32U[offset] == 0x00 && - dmapE32U[offset+1] == 0x00) { /* 0x8F + 2 bytes */ - if (inLen < 3) { - if (cd->fromCcsid == 33722) - errno=EINVAL; /* 22 */ - else - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset=(*pIn - 0xA0) * 0x60 + 0x60; - ++pIn; - if (*pIn < 0xA0) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - return -1; - } - offset+=(*pIn - 0xA0); - offset<<=1; - if (dmapE32U[offset] == 0x00 && - dmapE32U[offset+1] == 0x00) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - return -1; - } - in=dmapE32U[offset]; - in<<=8; - in+=dmapE32U[offset+1]; - if (dmapE32U[offset] == 0xFF && - dmapE32U[offset+1] == 0xFD) { - if (pIn[-2] * 0x100 + pIn[-1] != cd->srcSubD) - ++numS; - } - ++pIn; - inLen-=3; - } else { /* 0x8F + 1 bytes */ - in=dmapE32U[offset]; - in<<=8; - in+=dmapE32U[offset+1]; - ++pIn; - inLen-=2; - } - - } else { - offset=*pIn; - offset<<=1; - if (dmapE02U[offset] == 0x00 && - dmapE02U[offset+1] == 0x00) { /* SS1 */ - if (inLen < 2) { - if ((cd->fromCcsid == 33722 && (*pIn == 0xA0 || (0xA9 <= *pIn && *pIn <= 0xAF) || *pIn == 0xFF)) || - (cd->fromCcsid == 970 && (*pIn == 0xA0 || *pIn == 0xAD || *pIn == 0xAE || *pIn == 0xAF || *pIn == 0xFF)) || - (cd->fromCcsid == 964 && (*pIn == 0xA0 || (0xAA <= *pIn && *pIn <= 0xC1) || *pIn == 0xC3 || *pIn == 0xFE || *pIn == 0xFF)) || - (cd->fromCcsid == 1383 && (*pIn == 0xA0 || *pIn == 0xFF))) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - if (*pIn < 0xA0) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - offset=(*pIn - 0xA0) * 0x60; - ++pIn; - if (*pIn < 0xA0) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - offset+=(*pIn - 0xA0); - offset<<=1; - if (dmapE12U[offset] == 0x00 && - dmapE12U[offset+1] == 0x00) { /* undefined mapping */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - in=dmapE12U[offset]; - in<<=8; - in+=dmapE12U[offset+1]; - if (dmapE12U[offset] == 0xFF && - dmapE12U[offset+1] == 0xFD) { - if (pIn[-1] * 0x100 + pIn[0] != cd->srcSubD) - ++numS; - } - ++pIn; - inLen-=2; - } else { - in=dmapE02U[offset]; - in<<=8; - in+=dmapE02U[offset+1]; - if (dmapE02U[offset] == 0x00 && - dmapE02U[offset+1] == 0x1A) { - if (*pIn != cd->srcSubS) - ++numS; - } - ++pIn; - --inLen; - } - } - ucs2=in; - if ((in & 0xFF80) == 0x0000) { /* U28: in & 0b1111111110000000 == 0x0000 */ - *pOut=in; - ++pOut; - } else if ((in & 0xF800) == 0x0000) { /* in & 0b1111100000000000 == 0x0000 */ - register uchar byte; - in>>=6; - in&=0x001F; /* 0b0000000000011111 */ - in|=0x00C0; /* 0b0000000011000000 */ - *pOut=in; - ++pOut; - byte=ucs2; /* dmapD12U[offset+1]; */ - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } else if ((in & 0xFC00) == 0xD800) { - *pOut=0xEF; - ++pOut; - *pOut=0xBF; - ++pOut; - *pOut=0xBD; - ++pOut; - } else { - register uchar byte; - register uchar work; - byte=(ucs2>>8); /* dmapD12U[offset]; */ - byte>>=4; - byte|=0xE0; /* 0b11100000; */ - *pOut=byte; - ++pOut; - - byte=(ucs2>>8); /* dmapD12U[offset]; */ - byte<<=2; - work=ucs2; /* dmapD12U[offset+1]; */ - work>>=6; - byte|=work; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - - byte=ucs2; /* dmapD12U[offset+1]; */ - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } - /* end of U28 */ - } - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_U2E) { - register uchar * dmapU2S=cd->cnv_dmap->dmapU2S; - register uchar * dmapU2M2=cd->cnv_dmap->dmapU2M2 - 0x80 * 2; - register uchar * dmapU2M3=cd->cnv_dmap->dmapU2M3 - 0x80 * 3; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register char * pSubD=(char *) &(cd->subD); - register size_t numS=0; - register size_t rc=0; - while (0 < inLen) { - register uint32_t in; - if (inLen == 1) { - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - if (pLastOutBuf < pOut) - break; - in=pIn[0]; - in<<=8; - in+=pIn[1]; - if (in == 0x0000) { - *pOut=0x00; - ++pOut; - } else if (in < 0x100 && dmapU2S[in] != 0x0000) { - if ((*pOut=dmapU2S[in]) == subS) { - if (in != cd->srcSubS) - ++numS; - } - ++pOut; - } else { - in<<=1; - if (dmapU2M2[in] == 0x00) { /* not found in dmapU2M2 */ - in*=1.5; - if (dmapU2M3[in] == 0x00) { /* not found in dmapU2M3*/ - *pOut=pSubD[0]; - ++pOut; - *pOut=pSubD[1]; - ++pOut; - ++numS; - ++rc; - } else { - *pOut=dmapU2M3[in]; - ++pOut; - *pOut=dmapU2M3[1+in]; - ++pOut; - *pOut=dmapU2M3[2+in]; - ++pOut; - } - } else { - *pOut=dmapU2M2[in]; - ++pOut; - if (dmapU2M2[1+in] == 0x00) { - if (*pOut == subS) { - in>>=1; - if (in != cd->srcSubS) - ++numS; - } - } else { - *pOut=dmapU2M2[1+in]; - ++pOut; - if (memcmp(pOut-2, pSubD, 2) == 0) { - in>>=1; - if (in != cd->srcSubD) { - ++numS; - ++rc; - } - } - } - } - } - pIn+=2; - inLen-=2; - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return rc; /* compatibility to iconv() */ - - } else if (cd->cnv_dmap->codingSchema == DMAP_T2E) { - register uchar * dmapU2S=cd->cnv_dmap->dmapU2S; - register uchar * dmapU2M2=cd->cnv_dmap->dmapU2M2 - 0x80 * 2; - register uchar * dmapU2M3=cd->cnv_dmap->dmapU2M3 - 0x80 * 3; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register char * pSubD=(char *) &(cd->subD); - register size_t numS=0; - register size_t rc=0; - while (0 < inLen) { - register uint32_t in; - if (inLen == 1) { - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen-1; - *outBuf=pOut; - *inBuf=pIn; - ++numS; - *numSub+=numS; - return 0; - } - if (pLastOutBuf < pOut) - break; - in=pIn[0]; - in<<=8; - in+=pIn[1]; - if (in == 0x0000) { - *pOut=0x00; - ++pOut; - } else if (0xD800 <= in && in <= 0xDBFF) { /* first byte of surrogate */ - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-2; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn+2; - ++numS; - *numSub+=numS; - return -1; - - } else if (0xDC00 <= in && in <= 0xDFFF) { /* second byte of surrogate */ - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-1; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - ++numS; - *numSub+=numS; - return -1; - - } else if (in < 0x100 && dmapU2S[in] != 0x0000) { - if ((*pOut=dmapU2S[in]) == subS) { - if (in != cd->srcSubS) - ++numS; - } - ++pOut; - } else { - in<<=1; - if (dmapU2M2[in] == 0x00) { /* not found in dmapU2M2 */ - in*=1.5; - if (dmapU2M3[in] == 0x00) { /* not found in dmapU2M3*/ - *pOut=pSubD[0]; - ++pOut; - *pOut=pSubD[1]; - ++pOut; - ++numS; - ++rc; - } else { - *pOut=dmapU2M3[in]; - ++pOut; - *pOut=dmapU2M3[1+in]; - ++pOut; - *pOut=dmapU2M3[2+in]; - ++pOut; - } - } else { - *pOut=dmapU2M2[in]; - ++pOut; - if (dmapU2M2[1+in] == 0x00) { - if (*pOut == subS) { - in>>=1; - if (in != cd->srcSubS) - ++numS; - } - } else { - *pOut=dmapU2M2[1+in]; - ++pOut; - if (memcmp(pOut-2, pSubD, 2) == 0) { - in>>=1; - if (in != cd->srcSubD) { - ++numS; - ++rc; - } - } - } - } - } - pIn+=2; - inLen-=2; - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_82E) { - register uchar * dmapU2S=cd->cnv_dmap->dmapU2S; - register uchar * dmapU2M2=cd->cnv_dmap->dmapU2M2 - 0x80 * 2; - register uchar * dmapU2M3=cd->cnv_dmap->dmapU2M3 - 0x80 * 3; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register char * pSubD=(char *) &(cd->subD); - register size_t numS=0; - register size_t rc=0; - while (0 < inLen) { - register uint32_t in; - uint32_t in2; - if (pLastOutBuf < pOut) - break; - /* convert from UTF-8 to UCS-2 */ - if (*pIn == 0x00) { - in=0x0000; - ++pIn; - --inLen; - } else { /* 82U: */ - register uchar byte1=*pIn; - if ((byte1 & 0x80) == 0x00) { /* if (byte1 & 0b10000000 == 0b00000000) { */ - /* 1 bytes sequence: 0xxxxxxx => 00000000 0xxxxxxx*/ - in=byte1; - ++pIn; - --inLen; - } else if ((byte1 & 0xE0) == 0xC0) { /* (byte1 & 0b11100000 == 0b11000000) { */ - if (inLen < 2) { - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - if (byte1 == 0xC0 || byte1 == 0xC1) { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - /* 2 bytes sequence: - 110yyyyy 10xxxxxx => 00000yyy yyxxxxxx */ - register uchar byte2; - ++pIn; - byte2=*pIn; - if ((byte2 & 0xC0) == 0x80) { /* byte2 & 0b11000000 == 0b10000000) { */ - register uchar work=byte1; - work<<=6; - byte2&=0x3F; /* 0b00111111; */ - byte2|=work; - - byte1&=0x1F; /* 0b00011111; */ - byte1>>=2; - in=byte1; - in<<=8; - in+=byte2; - inLen-=2; - ++pIn; - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - *numSub+=numS; - return -1; - } - } else if ((byte1 & 0xF0) == 0xE0) { /* byte1 & 0b11110000 == 0b11100000 */ - /* 3 bytes sequence: - 1110zzzz 10yyyyyy 10xxxxxx => zzzzyyyy yyxxxxxx */ - register uchar byte2; - register uchar byte3; - if (inLen < 3) { - if (inLen == 2 && (pIn[1] & 0xC0) != 0x80) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - ++pIn; - byte2=*pIn; - ++pIn; - byte3=*pIn; - if ((byte2 & 0xC0) != 0x80 || - (byte3 & 0xC0) != 0x80 || - (byte1 == 0xE0 && byte2 < 0xA0)) { /* invalid sequence, only 0xA0-0xBF allowed after 0xE0 */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - *numSub+=numS; - return -1; - } - { - register uchar work=byte2; - work<<=6; - byte3&=0x3F; /* 0b00111111; */ - byte3|=work; - - byte2&=0x3F; /* 0b00111111; */ - byte2>>=2; - - byte1<<=4; - in=byte1 | byte2;; - in<<=8; - in+=byte3; - inLen-=3; - ++pIn; - } - } else if ((0xF0 <= byte1 && byte1 <= 0xF4)) { /* (bytes1 & 11111000) == 0x1110000 */ - /* 4 bytes sequence - 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx => 110110ww wwzzzzyy 110111yy yyxxxxxx - where uuuuu = wwww + 1 */ - register uchar byte2; - register uchar byte3; - register uchar byte4; - if (inLen < 4) { - if ((inLen >= 2 && (pIn[1] & 0xC0) != 0x80) || - (inLen >= 3 && (pIn[2] & 0xC0) != 0x80) || - (cd->toCcsid == 13488) ) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - ++pIn; - byte2=*pIn; - ++pIn; - byte3=*pIn; - ++pIn; - byte4=*pIn; - if ((byte2 & 0xC0) == 0x80 && /* byte2 & 0b11000000 == 0b10000000 */ - (byte3 & 0xC0) == 0x80 && /* byte3 & 0b11000000 == 0b10000000 */ - (byte4 & 0xC0) == 0x80) { /* byte4 & 0b11000000 == 0b10000000 */ - register uchar work=byte2; - if (byte1 == 0xF0 && byte2 < 0x90) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - /* iconv() returns 0 for 0xF4908080 and convert to 0x00 - } else if (byte1 == 0xF4 && byte2 > 0x8F) { - errno=EINVAL; - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - */ - } - - work&=0x30; /* 0b00110000; */ - work>>=4; - byte1&=0x07; /* 0b00000111; */ - byte1<<=2; - byte1+=work; /* uuuuu */ - --byte1; /* wwww */ - - work=byte1 & 0x0F; - work>>=2; - work+=0xD8; /* 0b11011011; */ - in=work; - in<<=8; - - byte1<<=6; - byte2<<=2; - byte2&=0x3C; /* 0b00111100; */ - work=byte3; - work>>=4; - work&=0x03; /* 0b00000011; */ - work|=byte1; - work|=byte2; - in+=work; - - work=byte3; - work>>=2; - work&=0x03; /* 0b00000011; */ - work|=0xDC; /* 0b110111xx; */ - in2=work; - in2<<=8; - - byte3<<=6; - byte4&=0x3F; /* 0b00111111; */ - byte4|=byte3; - in2+=byte4; - inLen-=4; - ++pIn; -#ifdef match_with_GBK - if ((0xD800 == in && in2 < 0xDC80) || - (0xD840 == in && in2 < 0xDC80) || - (0xD880 == in && in2 < 0xDC80) || - (0xD8C0 == in && in2 < 0xDC80) || - (0xD900 == in && in2 < 0xDC80) || - (0xD940 == in && in2 < 0xDC80) || - (0xD980 == in && in2 < 0xDC80) || - (0xD9C0 == in && in2 < 0xDC80) || - (0xDA00 == in && in2 < 0xDC80) || - (0xDA40 == in && in2 < 0xDC80) || - (0xDA80 == in && in2 < 0xDC80) || - (0xDAC0 == in && in2 < 0xDC80) || - (0xDB00 == in && in2 < 0xDC80) || - (0xDB40 == in && in2 < 0xDC80) || - (0xDB80 == in && in2 < 0xDC80) || - (0xDBC0 == in && in2 < 0xDC80)) { -#else - if ((0xD800 <= in && in <= 0xDBFF) && - (0xDC00 <= in2 && in2 <= 0xDFFF)) { -#endif - *pOut=subS; - ++pOut; - ++numS; - continue; - } - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - } - } else if (0xF5 <= byte1 && byte1 <= 0xFF) { /* minic iconv() behavior */ - if (inLen < 4 || - (inLen >= 4 && byte1 == 0xF8 && pIn[1] < 0x90) || - pIn[1] < 0x80 || 0xBF < pIn[1] || - pIn[2] < 0x80 || 0xBF < pIn[2] || - pIn[3] < 0x80 || 0xBF < pIn[3] ) { - if (inLen == 1) - errno=EINVAL; /* 22 */ - else if (inLen == 2 && (pIn[1] & 0xC0) != 0x80) - errno=EILSEQ; /* 116 */ - else if (inLen == 3 && ((pIn[1] & 0xC0) != 0x80 || (pIn[2] & 0xC0) != 0x80)) - errno=EILSEQ; /* 116 */ - else if (inLen >= 4 && (byte1 == 0xF8 || (pIn[1] & 0xC0) != 0x80 || (pIn[2] & 0xC0) != 0x80 || (pIn[3] & 0xC0) != 0x80)) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } else if ((pIn[1] == 0x80 || pIn[1] == 0x90 || pIn[1] == 0xA0 || pIn[1] == 0xB0) && - pIn[2] < 0x82) { - *pOut=subS; /* Though returns replacement character, which iconv() does not return. */ - ++pOut; - ++numS; - pIn+=4; - inLen-=4; - continue; - } else { - *pOut=pSubD[0]; /* Though returns replacement character, which iconv() does not return. */ - ++pOut; - *pOut=pSubD[1]; - ++pOut; - ++numS; - pIn+=4; - inLen-=4; - continue; - /* iconv() returns 0 with strange 1 byte converted values */ - } - - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - } - /* end of UTF-8 to UCS-2 */ - if (in == 0x0000) { - *pOut=0x00; - ++pOut; - } else if (in < 0x100 && dmapU2S[in] != 0x0000) { - if ((*pOut=dmapU2S[in]) == subS) { - if (in != cd->srcSubS) - ++numS; - } - ++pOut; - } else { - in<<=1; - if (dmapU2M2[in] == 0x00) { /* not found in dmapU2M2 */ - in*=1.5; - if (dmapU2M3[in] == 0x00) { /* not found in dmapU2M3*/ - *pOut=pSubD[0]; - ++pOut; - *pOut=pSubD[1]; - ++pOut; - ++numS; - ++rc; - } else { - *pOut=dmapU2M3[in]; - ++pOut; - *pOut=dmapU2M3[1+in]; - ++pOut; - *pOut=dmapU2M3[2+in]; - ++pOut; - } - } else { - *pOut=dmapU2M2[in]; - ++pOut; - if (dmapU2M2[1+in] == 0x00) { - if (*pOut == subS) { - in>>=1; - if (in != cd->srcSubS) - ++numS; - } - } else { - *pOut=dmapU2M2[1+in]; - ++pOut; - if (memcmp(pOut-2, pSubD, 2) == 0) { - in>>=1; - if (in != cd->srcSubD) { - ++numS; - ++rc; - } - } - } - } - } - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_S2U) { - /* use uchar * instead of UniChar to avoid memcpy */ - register uchar * dmapD12U=(uchar *) (cd->cnv_dmap->dmapD12U); - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register int offset; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register size_t numS=0; - while (0 < inLen) { - if (pLastOutBuf < pOut) - break; - if (*pIn == 0x00) { - *pOut=0x00; - ++pOut; - *pOut=0x00; - ++pOut; - ++pIn; - --inLen; - } else { - offset=*pIn; - offset<<=1; - *pOut=dmapD12U[offset]; - ++pOut; - *pOut=dmapD12U[offset+1]; - ++pOut; - if (dmapD12U[offset] == 0x00) { - if (dmapD12U[offset+1] == 0x1A) { - if (*pIn != cd->srcSubS) - ++numS; - } else if (dmapD12U[offset+1] == 0x00) { - pOut-=2; - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - } - ++pIn; - --inLen; - } - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_S28) { - /* use uchar * instead of UniChar to avoid memcpy */ - register uchar * dmapD12U=(uchar *) (cd->cnv_dmap->dmapD12U); - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register int offset; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register size_t numS=0; - register UniChar in; /* copy part of U28 */ - while (0 < inLen) { - if (pLastOutBuf < pOut) - break; - if (*pIn == 0x00) { - *pOut=0x00; - ++pOut; - ++pIn; - --inLen; - } else { - offset=*pIn; - offset<<=1; - in=dmapD12U[offset]; - in<<=8; - in+=dmapD12U[offset+1]; - if ((in & 0xFF80) == 0x0000) { /* U28: in & 0b1111111110000000 == 0x0000 */ - if (in == 0x000) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - *pOut=in; - ++pOut; - } else if ((in & 0xF800) == 0x0000) { /* in & 0b1111100000000000 == 0x0000 */ - register uchar byte; - in>>=6; - in&=0x001F; /* 0b0000000000011111 */ - in|=0x00C0; /* 0b0000000011000000 */ - *pOut=in; - ++pOut; - byte=dmapD12U[offset+1]; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } else if ((in & 0xFC00) == 0xD800) { /* There should not be no surrogate character in SBCS. */ - *pOut=0xEF; - ++pOut; - *pOut=0xBF; - ++pOut; - *pOut=0xBD; - ++pOut; - } else { - register uchar byte; - register uchar work; - byte=dmapD12U[offset]; - byte>>=4; - byte|=0xE0; /* 0b11100000; */ - *pOut=byte; - ++pOut; - - byte=dmapD12U[offset]; - byte<<=2; - work=dmapD12U[offset+1]; - work>>=6; - byte|=work; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - - byte=dmapD12U[offset+1]; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } - /* end of U28 */ - if (dmapD12U[offset] == 0x00) { - if (dmapD12U[offset+1] == 0x1A) { - if (*pIn != cd->srcSubS) - ++numS; - } - } - ++pIn; - --inLen; - } - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_U2S) { - register uchar * dmapU2S=cd->cnv_dmap->dmapU2S; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register size_t numS=0; - while (0 < inLen) { - register uint32_t in; - if (inLen == 1) { - errno=EINVAL; /* 22 */ - - *inBytesLeft=inLen; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - if (pLastOutBuf < pOut) - break; - in=pIn[0]; - in<<=8; - in+=pIn[1]; - if (in == 0x0000) { - *pOut=0x00; - } else { - if ((*pOut=dmapU2S[in]) == 0x00) { - *pOut=subS; - ++numS; - errno=EINVAL; /* 22 */ - } else if (*pOut == subS) { - if (in != cd->srcSubS) - ++numS; - } - } - ++pOut; - pIn+=2; - inLen-=2; - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return numS; - - } else if (cd->cnv_dmap->codingSchema == DMAP_T2S) { - register uchar * dmapU2S=cd->cnv_dmap->dmapU2S; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register size_t numS=0; - while (0 < inLen) { - register uint32_t in; - if (inLen == 1) { - errno=EINVAL; /* 22 */ - - *inBytesLeft=inLen-1; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - ++numS; - *numSub+=numS; - return 0; - } - if (pLastOutBuf < pOut) - break; - in=pIn[0]; - in<<=8; - in+=pIn[1]; - if (in == 0x0000) { - *pOut=0x00; - - } else if (0xD800 <= in && in <= 0xDFFF) { /* 0xD800-0xDFFF, surrogate first and second values */ - if (0xDC00 <= in ) { - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-1; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - return -1; - - } else if (inLen < 4) { - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-2; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn+2; - return -1; - - } else { - register uint32_t in2; - in2=pIn[2]; - in2<<=8; - in2+=pIn[3]; - if (0xDC00 <= in2 && in2 <= 0xDFFF) { /* second surrogate character =0xDC00 - 0xDFFF*/ - *pOut=subS; - ++numS; - pIn+=4; - } else { - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-1; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - } - } else { - if ((*pOut=dmapU2S[in]) == 0x00) { - *pOut=subS; - ++numS; - errno=EINVAL; /* 22 */ - } else if (*pOut == subS) { - if (in != cd->srcSubS) - ++numS; - } - } - ++pOut; - pIn+=2; - inLen-=2; - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_82S) { - register uchar * dmapU2S=cd->cnv_dmap->dmapU2S; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register size_t numS=0; - while (0 < inLen) { - register uint32_t in; - uint32_t in2; /* The second surrogate value */ - if (pLastOutBuf < pOut) - break; - /* convert from UTF-8 to UCS-2 */ - if (*pIn == 0x00) { - in=0x0000; - ++pIn; - --inLen; - } else { /* 82U: */ - register uchar byte1=*pIn; - if ((byte1 & 0x80) == 0x00) { /* if (byte1 & 0b10000000 == 0b00000000) { */ - /* 1 bytes sequence: 0xxxxxxx => 00000000 0xxxxxxx*/ - in=byte1; - ++pIn; - --inLen; - } else if ((byte1 & 0xE0) == 0xC0) { /* (byte1 & 0b11100000 == 0b11000000) { */ - if (inLen < 2) { - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - if (byte1 == 0xC0 || byte1 == 0xC1) { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - /* 2 bytes sequence: - 110yyyyy 10xxxxxx => 00000yyy yyxxxxxx */ - register uchar byte2; - ++pIn; - byte2=*pIn; - if ((byte2 & 0xC0) == 0x80) { /* byte2 & 0b11000000 == 0b10000000) { */ - register uchar work=byte1; - work<<=6; - byte2&=0x3F; /* 0b00111111; */ - byte2|=work; - - byte1&=0x1F; /* 0b00011111; */ - byte1>>=2; - in=byte1; - in<<=8; - in+=byte2; - inLen-=2; - ++pIn; - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - *numSub+=numS; - return -1; - } - } else if ((byte1 & 0xF0) == 0xE0) { /* byte1 & 0b11110000 == 0b11100000 */ - /* 3 bytes sequence: - 1110zzzz 10yyyyyy 10xxxxxx => zzzzyyyy yyxxxxxx */ - register uchar byte2; - register uchar byte3; - if (inLen < 3) { - if (inLen == 2 && (pIn[1] & 0xC0) != 0x80) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - ++pIn; - byte2=*pIn; - ++pIn; - byte3=*pIn; - if ((byte2 & 0xC0) != 0x80 || - (byte3 & 0xC0) != 0x80 || - (byte1 == 0xE0 && byte2 < 0xA0)) { /* invalid sequence, only 0xA0-0xBF allowed after 0xE0 */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - *numSub+=numS; - return -1; - } - { - register uchar work=byte2; - work<<=6; - byte3&=0x3F; /* 0b00111111; */ - byte3|=work; - - byte2&=0x3F; /* 0b00111111; */ - byte2>>=2; - - byte1<<=4; - in=byte1 | byte2;; - in<<=8; - in+=byte3; - inLen-=3; - ++pIn; - } - } else if ((0xF0 <= byte1 && byte1 <= 0xF4) || /* (bytes1 & 11111000) == 0x1110000 */ - ((byte1&=0xF7) && 0xF0 <= byte1 && byte1 <= 0xF4)) { /* minic iconv() behavior */ - /* 4 bytes sequence - 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx => 110110ww wwzzzzyy 110111yy yyxxxxxx - where uuuuu = wwww + 1 */ - register uchar byte2; - register uchar byte3; - register uchar byte4; - if (inLen < 4) { - if ((inLen >= 2 && (pIn[1] & 0xC0) != 0x80) || - (inLen >= 3 && (pIn[2] & 0xC0) != 0x80) || - (cd->toCcsid == 13488) ) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - ++pIn; - byte2=*pIn; - ++pIn; - byte3=*pIn; - ++pIn; - byte4=*pIn; - if ((byte2 & 0xC0) == 0x80 && /* byte2 & 0b11000000 == 0b10000000 */ - (byte3 & 0xC0) == 0x80 && /* byte3 & 0b11000000 == 0b10000000 */ - (byte4 & 0xC0) == 0x80) { /* byte4 & 0b11000000 == 0b10000000 */ - register uchar work=byte2; - if (byte1 == 0xF0 && byte2 < 0x90) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - /* iconv() returns 0 for 0xF4908080 and convert to 0x00 - } else if (byte1 == 0xF4 && byte2 > 0x8F) { - errno=EINVAL; - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - */ - } - - work&=0x30; /* 0b00110000; */ - work>>=4; - byte1&=0x07; /* 0b00000111; */ - byte1<<=2; - byte1+=work; /* uuuuu */ - --byte1; /* wwww */ - - work=byte1 & 0x0F; - work>>=2; - work+=0xD8; /* 0b11011011; */ - in=work; - in<<=8; - - byte1<<=6; - byte2<<=2; - byte2&=0x3C; /* 0b00111100; */ - work=byte3; - work>>=4; - work&=0x03; /* 0b00000011; */ - work|=byte1; - work|=byte2; - in+=work; - - work=byte3; - work>>=2; - work&=0x03; /* 0b00000011; */ - work|=0xDC; /* 0b110111xx; */ - in2=work; - in2<<=8; - - byte3<<=6; - byte4&=0x3F; /* 0b00111111; */ - byte4|=byte3; - in2+=byte4; - inLen-=4; - ++pIn; - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - } - } else if ((byte1 & 0xF0) == 0xF0) { /* minic iconv() behavior */ - if (inLen < 4 || - pIn[1] < 0x80 || 0xBF < pIn[1] || - pIn[2] < 0x80 || 0xBF < pIn[2] || - pIn[3] < 0x80 || 0xBF < pIn[3] ) { - if (inLen == 1) - errno=EINVAL; /* 22 */ - else if (inLen == 2 && (pIn[1] & 0xC0) != 0x80) - errno=EILSEQ; /* 116 */ - else if (inLen == 3 && ((pIn[1] & 0xC0) != 0x80 || (pIn[2] & 0xC0) != 0x80)) - errno=EILSEQ; /* 116 */ - else if (inLen >= 4 && ((pIn[1] & 0xC0) != 0x80 || (pIn[2] & 0xC0) != 0x80 || (pIn[3] & 0xC0) != 0x80)) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } else { - *pOut=subS; /* Though returns replacement character, which iconv() does not return. */ - ++pOut; - ++numS; - pIn+=4; - inLen-=4; - /* UTF-8_IBM-850 0xF0908080 : converted value does not match, iconv=0x00, dmap=0x7F - UTF-8_IBM-850 0xF0908081 : converted value does not match, iconv=0x01, dmap=0x7F - UTF-8_IBM-850 0xF0908082 : converted value does not match, iconv=0x02, dmap=0x7F - UTF-8_IBM-850 0xF0908083 : converted value does not match, iconv=0x03, dmap=0x7F - .... - UTF-8_IBM-850 0xF09081BE : converted value does not match, iconv=0x7E, dmap=0x7F - UTF-8_IBM-850 0xF09081BF : converted value does not match, iconv=0x1C, dmap=0x7F - UTF-8_IBM-850 0xF09082A0 : converted value does not match, iconv=0xFF, dmap=0x7F - UTF-8_IBM-850 0xF09082A1 : converted value does not match, iconv=0xAD, dmap=0x7F - .... - */ - continue; - /* iconv() returns 0 with strange 1 byte converted values */ - } - - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - } - /* end of UTF-8 to UCS-2 */ - if (in == 0x0000) { - *pOut=0x00; - } else { - if ((*pOut=dmapU2S[in]) == 0x00) { - *pOut=subS; - ++numS; - errno=EINVAL; /* 22 */ - } else if (*pOut == subS) { - if (in != cd->srcSubS) { - ++numS; - } - } - } - ++pOut; - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_D2U) { - /* use uchar * instead of UniChar to avoid memcpy */ - register uchar * dmapD12U=(uchar *) (cd->cnv_dmap->dmapD12U); - register uchar * dmapD22U=(uchar *) (cd->cnv_dmap->dmapD22U); - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register int offset; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register size_t numS=0; - while (0 < inLen) { - if (pLastOutBuf < pOut) - break; - if (*pIn == 0x00) { - *pOut=0x00; - ++pOut; - *pOut=0x00; - ++pOut; - ++pIn; - --inLen; - } else { - offset=*pIn; - offset<<=1; - if (dmapD12U[offset] == 0x00 && - dmapD12U[offset+1] == 0x00) { /* DBCS */ - if (inLen < 2) { - if (*pIn == 0x80 || *pIn == 0xFF || - (cd->fromCcsid == 943 && (*pIn == 0x85 || *pIn == 0x86 || *pIn == 0xA0 || *pIn == 0xEB || *pIn == 0xEC || *pIn == 0xEF || *pIn == 0xFD || *pIn == 0xFE)) || - (cd->fromCcsid == 932 && (*pIn == 0x85 || *pIn == 0x86 || *pIn == 0x87 || *pIn == 0xEB || *pIn == 0xEC || *pIn == 0xED || *pIn == 0xEE || *pIn == 0xEF)) || - (cd->fromCcsid == 1381 && ((0x85 <= *pIn && *pIn <= 0x8B) || (0xAA <= *pIn && *pIn <= 0xAF) || (0xF8 <= *pIn && *pIn <= 0xFE)))) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - offset-=0x100; - ++pIn; - offset<<=8; - offset+=(*pIn * 2); - if (dmapD22U[offset] == 0x00 && - dmapD22U[offset+1] == 0x00) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - *pOut=dmapD22U[offset]; - ++pOut; - *pOut=dmapD22U[offset+1]; - ++pOut; - if (dmapD22U[offset] == 0xFF && - dmapD22U[offset+1] == 0xFD) { - if (pIn[-1] * 0x100 + pIn[0] != cd->srcSubD) - ++numS; - } - ++pIn; - inLen-=2; - } else { /* SBCS */ - *pOut=dmapD12U[offset]; - ++pOut; - *pOut=dmapD12U[offset+1]; - ++pOut; - if (dmapD12U[offset] == 0x00 && - dmapD12U[offset+1] == 0x1A) { - if (*pIn != cd->srcSubS) - ++numS; - } - ++pIn; - --inLen; - } - } - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_D28) { - /* use uchar * instead of UniChar to avoid memcpy */ - register uchar * dmapD12U=(uchar *) (cd->cnv_dmap->dmapD12U); - register uchar * dmapD22U=(uchar *) (cd->cnv_dmap->dmapD22U); - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register int offset; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register size_t numS=0; - register UniChar in; /* copy part of U28 */ - register UniChar ucs2; - while (0 < inLen) { - if (pLastOutBuf < pOut) - break; - if (*pIn == 0x00) { - *pOut=0x00; - ++pOut; - ++pIn; - --inLen; - } else { - offset=*pIn; - offset<<=1; - if (dmapD12U[offset] == 0x00 && - dmapD12U[offset+1] == 0x00) { /* DBCS */ - if (inLen < 2) { - if (*pIn == 0x80 || *pIn == 0xFF || - (cd->fromCcsid == 943 && (*pIn == 0x85 || *pIn == 0x86 || *pIn == 0xA0 || *pIn == 0xEB || *pIn == 0xEC || *pIn == 0xEF || *pIn == 0xFD || *pIn == 0xFE)) || - (cd->fromCcsid == 932 && (*pIn == 0x85 || *pIn == 0x86 || *pIn == 0x87 || *pIn == 0xEB || *pIn == 0xEC || *pIn == 0xED || *pIn == 0xEE || *pIn == 0xEF)) || - (cd->fromCcsid == 1381 && ((0x85 <= *pIn && *pIn <= 0x8B) || (0xAA <= *pIn && *pIn <= 0xAF) || (0xF8 <= *pIn && *pIn <= 0xFE)))) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - offset-=0x100; - ++pIn; - offset<<=8; - offset+=(*pIn * 2); - if (dmapD22U[offset] == 0x00 && - dmapD22U[offset+1] == 0x00) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - return -1; - } - in=dmapD22U[offset]; - in<<=8; - in+=dmapD22U[offset+1]; - ucs2=in; - if (dmapD22U[offset] == 0xFF && - dmapD22U[offset+1] == 0xFD) { - if (in != cd->srcSubD) - ++numS; - } - ++pIn; - inLen-=2; - } else { /* SBCS */ - in=dmapD12U[offset]; - in<<=8; - in+=dmapD12U[offset+1]; - ucs2=in; - if (dmapD12U[offset] == 0x00 && - dmapD12U[offset+1] == 0x1A) { - if (in != cd->srcSubS) - ++numS; - } - ++pIn; - --inLen; - } - if ((in & 0xFF80) == 0x0000) { /* U28: in & 0b1111111110000000 == 0x0000 */ - *pOut=in; - ++pOut; - } else if ((in & 0xF800) == 0x0000) { /* in & 0b1111100000000000 == 0x0000 */ - register uchar byte; - in>>=6; - in&=0x001F; /* 0b0000000000011111 */ - in|=0x00C0; /* 0b0000000011000000 */ - *pOut=in; - ++pOut; - byte=ucs2; /* dmapD12U[offset+1]; */ - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } else if ((in & 0xFC00) == 0xD800) { /* There should not be no surrogate character in SBCS. */ - *pOut=0xEF; - ++pOut; - *pOut=0xBF; - ++pOut; - *pOut=0xBD; - ++pOut; - } else { - register uchar byte; - register uchar work; - byte=(ucs2>>8); /* dmapD12U[offset]; */ - byte>>=4; - byte|=0xE0; /* 0b11100000; */ - *pOut=byte; - ++pOut; - - byte=(ucs2>>8); /* dmapD12U[offset]; */ - byte<<=2; - work=ucs2; /* dmapD12U[offset+1]; */ - work>>=6; - byte|=work; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - - byte=ucs2; /* dmapD12U[offset+1]; */ - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } - /* end of U28 */ - } - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_U2D) { - register uchar * dmapU2D=cd->cnv_dmap->dmapU2D; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register char * pSubD=(char *) &(cd->subD); - register size_t numS=0; - while (0 < inLen) { - register uint32_t in; - if (inLen == 1) { - errno=EINVAL; /* 22 */ - - *inBytesLeft=inLen; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - if (pLastOutBuf < pOut) - break; - in=pIn[0]; - in<<=8; - in+=pIn[1]; - if (in == 0x0000) { - *pOut=0x00; - ++pOut; - } else { - in<<=1; - *pOut=dmapU2D[in]; - ++pOut; - if (dmapU2D[in+1] == 0x00) { /* SBCS */ - if (*pOut == subS) { - if (in != cd->srcSubS) - ++numS; - } - } else { - *pOut=dmapU2D[in+1]; - ++pOut; - if (dmapU2D[in] == pSubD[0] && - dmapU2D[in+1] == pSubD[1]) { - in>>=1; - if (in != cd->srcSubD) - ++numS; - } - } - } - pIn+=2; - inLen-=2; - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return numS; /* to minic iconv() behavior */ - - } else if (cd->cnv_dmap->codingSchema == DMAP_T2D) { - register uchar * dmapU2D=cd->cnv_dmap->dmapU2D; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register char * pSubD=(char *) &(cd->subD); - register size_t numS=0; - while (0 < inLen) { - register uint32_t in; - if (inLen == 1) { - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-1; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - ++numS; - *numSub+=numS; - return 0; - } - if (pLastOutBuf < pOut) - break; - in=pIn[0]; - in<<=8; - in+=pIn[1]; - if (in == 0x0000) { - *pOut=0x00; - ++pOut; - } else if (0xD800 <= in && in <= 0xDBFF) { /* first byte of surrogate */ - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-2; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn+2; - ++numS; - *numSub+=numS; - return -1; - - } else if (0xDC00 <= in && in <= 0xDFFF) { /* second byte of surrogate */ - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-1; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - ++numS; - *numSub+=numS; - return -1; - - } else { - in<<=1; - *pOut=dmapU2D[in]; - ++pOut; - if (dmapU2D[in+1] == 0x00) { /* SBCS */ - if (*pOut == subS) { - if (in != cd->srcSubS) - ++numS; - } - } else { - *pOut=dmapU2D[in+1]; - ++pOut; - if (dmapU2D[in] == pSubD[0] && - dmapU2D[in+1] == pSubD[1]) { - in>>=1; - if (in != cd->srcSubD) - ++numS; - } - } - } - pIn+=2; - inLen-=2; - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; /* to minic iconv() behavior */ - - } else if (cd->cnv_dmap->codingSchema == DMAP_82D) { - register uchar * dmapU2D=cd->cnv_dmap->dmapU2D; - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register char subS=cd->subS; - register char * pSubD=(char *) &(cd->subD); - register size_t numS=0; - while (0 < inLen) { - register uint32_t in; - uint32_t in2; - if (pLastOutBuf < pOut) - break; - /* convert from UTF-8 to UCS-2 */ - if (*pIn == 0x00) { - in=0x0000; - ++pIn; - --inLen; - } else { /* 82U: */ - register uchar byte1=*pIn; - if ((byte1 & 0x80) == 0x00) { /* if (byte1 & 0b10000000 == 0b00000000) { */ - /* 1 bytes sequence: 0xxxxxxx => 00000000 0xxxxxxx*/ - in=byte1; - ++pIn; - --inLen; - } else if ((byte1 & 0xE0) == 0xC0) { /* (byte1 & 0b11100000 == 0b11000000) { */ - if (inLen < 2) { - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - if (byte1 == 0xC0 || byte1 == 0xC1) { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - /* 2 bytes sequence: - 110yyyyy 10xxxxxx => 00000yyy yyxxxxxx */ - register uchar byte2; - ++pIn; - byte2=*pIn; - if ((byte2 & 0xC0) == 0x80) { /* byte2 & 0b11000000 == 0b10000000) { */ - register uchar work=byte1; - work<<=6; - byte2&=0x3F; /* 0b00111111; */ - byte2|=work; - - byte1&=0x1F; /* 0b00011111; */ - byte1>>=2; - in=byte1; - in<<=8; - in+=byte2; - inLen-=2; - ++pIn; - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - *numSub+=numS; - return -1; - } - } else if ((byte1 & 0xF0) == 0xE0) { /* byte1 & 0b11110000 == 0b11100000 */ - /* 3 bytes sequence: - 1110zzzz 10yyyyyy 10xxxxxx => zzzzyyyy yyxxxxxx */ - register uchar byte2; - register uchar byte3; - if (inLen < 3) { - if (inLen == 2 && (pIn[1] & 0xC0) != 0x80) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - ++pIn; - byte2=*pIn; - ++pIn; - byte3=*pIn; - if ((byte2 & 0xC0) != 0x80 || - (byte3 & 0xC0) != 0x80 || - (byte1 == 0xE0 && byte2 < 0xA0)) { /* invalid sequence, only 0xA0-0xBF allowed after 0xE0 */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - *numSub+=numS; - return -1; - } - { - register uchar work=byte2; - work<<=6; - byte3&=0x3F; /* 0b00111111; */ - byte3|=work; - - byte2&=0x3F; /* 0b00111111; */ - byte2>>=2; - - byte1<<=4; - in=byte1 | byte2;; - in<<=8; - in+=byte3; - inLen-=3; - ++pIn; - } - } else if ((0xF0 <= byte1 && byte1 <= 0xF4)) { /* (bytes1 & 11111000) == 0x1110000 */ - /* 4 bytes sequence - 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx => 110110ww wwzzzzyy 110111yy yyxxxxxx - where uuuuu = wwww + 1 */ - register uchar byte2; - register uchar byte3; - register uchar byte4; - if (inLen < 4) { - if ((inLen >= 2 && (pIn[1] & 0xC0) != 0x80) || - (inLen >= 3 && (pIn[2] & 0xC0) != 0x80) || - (cd->toCcsid == 13488) ) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - ++pIn; - byte2=*pIn; - ++pIn; - byte3=*pIn; - ++pIn; - byte4=*pIn; - if ((byte2 & 0xC0) == 0x80 && /* byte2 & 0b11000000 == 0b10000000 */ - (byte3 & 0xC0) == 0x80 && /* byte3 & 0b11000000 == 0b10000000 */ - (byte4 & 0xC0) == 0x80) { /* byte4 & 0b11000000 == 0b10000000 */ - register uchar work=byte2; - if (byte1 == 0xF0 && byte2 < 0x90) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - /* iconv() returns 0 for 0xF4908080 and convert to 0x00 - } else if (byte1 == 0xF4 && byte2 > 0x8F) { - errno=EINVAL; - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - */ - } - - work&=0x30; /* 0b00110000; */ - work>>=4; - byte1&=0x07; /* 0b00000111; */ - byte1<<=2; - byte1+=work; /* uuuuu */ - --byte1; /* wwww */ - - work=byte1 & 0x0F; - work>>=2; - work+=0xD8; /* 0b11011011; */ - in=work; - in<<=8; - - byte1<<=6; - byte2<<=2; - byte2&=0x3C; /* 0b00111100; */ - work=byte3; - work>>=4; - work&=0x03; /* 0b00000011; */ - work|=byte1; - work|=byte2; - in+=work; - - work=byte3; - work>>=2; - work&=0x03; /* 0b00000011; */ - work|=0xDC; /* 0b110111xx; */ - in2=work; - in2<<=8; - - byte3<<=6; - byte4&=0x3F; /* 0b00111111; */ - byte4|=byte3; - in2+=byte4; - inLen-=4; - ++pIn; -#ifdef match_with_GBK - if ((0xD800 == in && in2 < 0xDC80) || - (0xD840 == in && in2 < 0xDC80) || - (0xD880 == in && in2 < 0xDC80) || - (0xD8C0 == in && in2 < 0xDC80) || - (0xD900 == in && in2 < 0xDC80) || - (0xD940 == in && in2 < 0xDC80) || - (0xD980 == in && in2 < 0xDC80) || - (0xD9C0 == in && in2 < 0xDC80) || - (0xDA00 == in && in2 < 0xDC80) || - (0xDA40 == in && in2 < 0xDC80) || - (0xDA80 == in && in2 < 0xDC80) || - (0xDAC0 == in && in2 < 0xDC80) || - (0xDB00 == in && in2 < 0xDC80) || - (0xDB40 == in && in2 < 0xDC80) || - (0xDB80 == in && in2 < 0xDC80) || - (0xDBC0 == in && in2 < 0xDC80)) { -#else - if ((0xD800 <= in && in <= 0xDBFF) && - (0xDC00 <= in2 && in2 <= 0xDFFF)) { -#endif - *pOut=subS; - ++pOut; - ++numS; - continue; - } - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - } - } else if (0xF5 <= byte1 && byte1 <= 0xFF) { /* minic iconv() behavior */ - if (inLen < 4 || - (inLen >= 4 && byte1 == 0xF8 && pIn[1] < 0x90) || - pIn[1] < 0x80 || 0xBF < pIn[1] || - pIn[2] < 0x80 || 0xBF < pIn[2] || - pIn[3] < 0x80 || 0xBF < pIn[3] ) { - if (inLen == 1) - errno=EINVAL; /* 22 */ - else if (inLen == 2 && (pIn[1] & 0xC0) != 0x80) - errno=EILSEQ; /* 116 */ - else if (inLen == 3 && ((pIn[1] & 0xC0) != 0x80 || (pIn[2] & 0xC0) != 0x80)) - errno=EILSEQ; /* 116 */ - else if (inLen >= 4 && (byte1 == 0xF8 || (pIn[1] & 0xC0) != 0x80 || (pIn[2] & 0xC0) != 0x80 || (pIn[3] & 0xC0) != 0x80)) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } else if ((pIn[1] == 0x80 || pIn[1] == 0x90 || pIn[1] == 0xA0 || pIn[1] == 0xB0) && - pIn[2] < 0x82) { - *pOut=subS; /* Though returns replacement character, which iconv() does not return. */ - ++pOut; - ++numS; - pIn+=4; - inLen-=4; - continue; - } else { - *pOut=pSubD[0]; /* Though returns replacement character, which iconv() does not return. */ - ++pOut; - *pOut=pSubD[1]; - ++pOut; - ++numS; - pIn+=4; - inLen-=4; - continue; - /* iconv() returns 0 with strange 1 byte converted values */ - } - - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - } - /* end of UTF-8 to UCS-2 */ - if (in == 0x0000) { - *pOut=0x00; - ++pOut; - } else { - in<<=1; - *pOut=dmapU2D[in]; - ++pOut; - if (dmapU2D[in+1] == 0x00) { /* SBCS */ - if (dmapU2D[in] == subS) { - in>>=1; - if (in != cd->srcSubS) - ++numS; - } - } else { - *pOut=dmapU2D[in+1]; - ++pOut; - if (dmapU2D[in] == pSubD[0] && - dmapU2D[in+1] == pSubD[1]) { - in>>=1; - if (in != cd->srcSubD) - ++numS; - } - } - } - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_82U) { - /* See http://unicode.org/versions/corrigendum1.html */ - /* convert from UTF-8 to UTF-16 can cover all conversion from UTF-8 to UCS-2 */ - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - register size_t numS=0; - while (0 < inLen) { - if (pLastOutBuf < pOut) - break; - if (*pIn == 0x00) { - *pOut=0x00; - ++pOut; - *pOut=0x00; - ++pOut; - ++pIn; - --inLen; - } else { /* 82U: */ - register uchar byte1=*pIn; - if ((byte1 & 0x80) == 0x00) { /* if (byte1 & 0b10000000 == 0b00000000) { */ - /* 1 bytes sequence: 0xxxxxxx => 00000000 0xxxxxxx*/ - *pOut=0x00; - ++pOut; - *pOut=byte1; - ++pOut; - ++pIn; - --inLen; - } else if ((byte1 & 0xE0) == 0xC0) { /* (byte1 & 0b11100000 == 0b11000000) { */ - if (inLen < 2) { - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - if (byte1 == 0xC0 || byte1 == 0xC1) { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - /* 2 bytes sequence: - 110yyyyy 10xxxxxx => 00000yyy yyxxxxxx */ - register uchar byte2; - ++pIn; - byte2=*pIn; - if ((byte2 & 0xC0) == 0x80) { /* byte2 & 0b11000000 == 0b10000000) { */ - register uchar work=byte1; - work<<=6; - byte2&=0x3F; /* 0b00111111; */ - byte2|=work; - - byte1&=0x1F; /* 0b00011111; */ - byte1>>=2; - *pOut=byte1; - ++pOut; - *pOut=byte2; - ++pOut; - inLen-=2; - ++pIn; - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-1; - *numSub+=numS; - return -1; - } - } else if ((byte1 & 0xF0) == 0xE0) { /* byte1 & 0b11110000 == 0b11100000 */ - /* 3 bytes sequence: - 1110zzzz 10yyyyyy 10xxxxxx => zzzzyyyy yyxxxxxx */ - register uchar byte2; - register uchar byte3; - if (inLen < 3) { - if (inLen == 2 && (pIn[1] & 0xC0) != 0x80) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - ++pIn; - byte2=*pIn; - ++pIn; - byte3=*pIn; - if ((byte2 & 0xC0) != 0x80 || - (byte3 & 0xC0) != 0x80 || - (byte1 == 0xE0 && byte2 < 0xA0)) { /* invalid sequence, only 0xA0-0xBF allowed after 0xE0 */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-2; - *numSub+=numS; - return -1; - } - { - register uchar work=byte2; - work<<=6; - byte3&=0x3F; /* 0b00111111; */ - byte3|=work; - - byte2&=0x3F; /* 0b00111111; */ - byte2>>=2; - - byte1<<=4; - *pOut=byte1 | byte2;; - ++pOut; - *pOut=byte3; - ++pOut; - inLen-=3; - ++pIn; - } - } else if ((0xF0 <= byte1 && byte1 <= 0xF4) || /* (bytes1 & 11111000) == 0x1110000 */ - ((byte1&=0xF7) && 0xF0 <= byte1 && byte1 <= 0xF4)) { /* minic iconv() behavior */ - /* 4 bytes sequence - 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx => 110110ww wwzzzzyy 110111yy yyxxxxxx - where uuuuu = wwww + 1 */ - register uchar byte2; - register uchar byte3; - register uchar byte4; - if (inLen < 4 || cd->toCcsid == 13488) { - if ((inLen >= 2 && (pIn[1] & 0xC0) != 0x80) || - (inLen >= 3 && (pIn[2] & 0xC0) != 0x80) || - (cd->toCcsid == 13488) ) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - ++pIn; - byte2=*pIn; - ++pIn; - byte3=*pIn; - ++pIn; - byte4=*pIn; - if ((byte2 & 0xC0) == 0x80 && /* byte2 & 0b11000000 == 0b10000000 */ - (byte3 & 0xC0) == 0x80 && /* byte3 & 0b11000000 == 0b10000000 */ - (byte4 & 0xC0) == 0x80) { /* byte4 & 0b11000000 == 0b10000000 */ - register uchar work=byte2; - if (byte1 == 0xF0 && byte2 < 0x90) { - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - } else if (byte1 == 0xF4 && byte2 > 0x8F) { - errno=EINVAL; /* 22 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - } - - work&=0x30; /* 0b00110000; */ - work>>=4; - byte1&=0x07; /* 0b00000111; */ - byte1<<=2; - byte1+=work; /* uuuuu */ - --byte1; /* wwww */ - - work=byte1 & 0x0F; - work>>=2; - work+=0xD8; /* 0b11011011; */ - *pOut=work; - ++pOut; - - byte1<<=6; - byte2<<=2; - byte2&=0x3C; /* 0b00111100; */ - work=byte3; - work>>=4; - work&=0x03; /* 0b00000011; */ - work|=byte1; - work|=byte2; - *pOut=work; - ++pOut; - - work=byte3; - work>>=2; - work&=0x03; /* 0b00000011; */ - work|=0xDC; /* 0b110111xx; */ - *pOut=work; - ++pOut; - - byte3<<=6; - byte4&=0x3F; /* 0b00111111; */ - byte4|=byte3; - *pOut=byte4; - ++pOut; - inLen-=4; - ++pIn; - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn-3; - *numSub+=numS; - return -1; - } - } else if ((byte1 & 0xF0) == 0xF0) { - if (cd->toCcsid == 13488) { - errno=EILSEQ; /* 116 */ - } else { - if (inLen == 1) - errno=EINVAL; /* 22 */ - else if (inLen == 2 && (pIn[1] & 0xC0) != 0x80) - errno=EILSEQ; /* 116 */ - else if (inLen == 3 && ((pIn[1] & 0xC0) != 0x80 || (pIn[2] & 0xC0) != 0x80)) - errno=EILSEQ; /* 116 */ - else if (inLen >= 4 && ((pIn[1] & 0xC0) != 0x80 || (pIn[2] & 0xC0) != 0x80 || (pIn[3] & 0xC0) != 0x80)) - errno=EILSEQ; /* 116 */ - else - errno=EINVAL; /* 22 */ - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - - } else { /* invalid sequence */ - errno=EILSEQ; /* 116 */ - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return -1; - } - } - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - *numSub+=numS; - return 0; - } else if (cd->cnv_dmap->codingSchema == DMAP_U28) { - /* See http://unicode.org/versions/corrigendum1.html */ - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - // register size_t numS=0; - while (0 < inLen) { - register uint32_t in; - if (inLen == 1) { - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - return -1; - } - if (pLastOutBuf < pOut) - break; - in=pIn[0]; - in<<=8; - in+=pIn[1]; - if (in == 0x0000) { - *pOut=0x00; - ++pOut; - } else if ((in & 0xFF80) == 0x0000) { /* U28: in & 0b1111111110000000 == 0x0000 */ - *pOut=in; - ++pOut; - } else if ((in & 0xF800) == 0x0000) { /* in & 0b1111100000000000 == 0x0000 */ - register uchar byte; - in>>=6; - in&=0x001F; /* 0b0000000000011111 */ - in|=0x00C0; /* 0b0000000011000000 */ - *pOut=in; - ++pOut; - byte=pIn[1]; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } else { - register uchar byte; - register uchar work; - byte=pIn[0]; - byte>>=4; - byte|=0xE0; /* 0b11100000; */ - *pOut=byte; - ++pOut; - - byte=pIn[0]; - byte<<=2; - work=pIn[1]; - work>>=6; - byte|=work; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - - byte=pIn[1]; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } - pIn+=2; - inLen-=2; - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - // *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_T28) { /* UTF-16_UTF-8 */ - /* See http://unicode.org/versions/corrigendum1.html */ - register int inLen=*inBytesLeft; - register char * pOut=*outBuf; - register char * pIn=*inBuf; - register char * pLastOutBuf = *outBuf + *outBytesLeft - 1; - // register size_t numS=0; - while (0 < inLen) { - register uint32_t in; - if (inLen == 1) { - errno=EINVAL; /* 22 */ - *inBytesLeft=0; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - return 0; - } - if (pLastOutBuf < pOut) - break; - in=pIn[0]; - in<<=8; - in+=pIn[1]; - if (in == 0x0000) { - *pOut=0x00; - ++pOut; - } else if ((in & 0xFF80) == 0x0000) { /* U28: in & 0b1111111110000000 == 0x0000 */ - *pOut=in; - ++pOut; - } else if ((in & 0xF800) == 0x0000) { /* in & 0b1111100000000000 == 0x0000 */ - register uchar byte; - in>>=6; - in&=0x001F; /* 0b0000000000011111 */ - in|=0x00C0; /* 0b0000000011000000 */ - *pOut=in; - ++pOut; - byte=pIn[1]; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } else if ((in & 0xFC00) == 0xD800) { /* in & 0b1111110000000000 == 0b1101100000000000, first surrogate character */ - if (0xDC00 <= in ) { - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-1; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - return -1; - - } else if (inLen < 4) { - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-2; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn+2; - return -1; - - } else if ((pIn[2] & 0xFC) != 0xDC) { /* pIn[2] & 0b11111100 == 0b11011100, second surrogate character */ - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-2; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn+2; - return -1; - - } else { - register uchar byte; - register uchar work; - in>>=6; - in&=0x000F; /* 0b0000000000001111 */ - byte=in; /* wwww */ - ++byte; /* uuuuu */ - work=byte; /* save uuuuu */ - byte>>=2; - byte|=0xF0; /* 0b11110000; */ - *pOut=byte; - ++pOut; - - byte=work; - byte&=0x03; /* 0b00000011; */ - byte<<=4; - byte|=0x80; /* 0b10000000; */ - work=pIn[1]; - work&=0x3C; /* 0b00111100; */ - work>>=2; - byte|=work; - *pOut=byte; - ++pOut; - - byte=pIn[1]; - byte&=0x03; /* 0b00000011; */ - byte<<=4; - byte|=0x80; /* 0b10000000; */ - work=pIn[2]; - work&=0x03; /* 0b00000011; */ - work<<=2; - byte|=work; - work=pIn[3]; - work>>=6; - byte|=work; - *pOut=byte; - ++pOut; - - byte=pIn[3]; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - pIn+=2; - inLen-=2; - } - } else if ((in & 0xFC00) == 0xDC00) { /* in & 0b11111100 == 0b11011100, second surrogate character */ - errno=EINVAL; /* 22 */ - *inBytesLeft=inLen-1; - *outBytesLeft-=(pOut-*outBuf); - *outBuf=pOut; - *inBuf=pIn; - return -1; - - } else { - register uchar byte; - register uchar work; - byte=pIn[0]; - byte>>=4; - byte|=0xE0; /* 0b11100000; */ - *pOut=byte; - ++pOut; - - byte=pIn[0]; - byte<<=2; - work=pIn[1]; - work>>=6; - byte|=work; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - - byte=pIn[1]; - byte&=0x3F; /* 0b00111111; */ - byte|=0x80; /* 0b10000000; */ - *pOut=byte; - ++pOut; - } - pIn+=2; - inLen-=2; - } - *outBytesLeft-=(pOut-*outBuf); - *inBytesLeft=inLen; - *outBuf=pOut; - *inBuf=pIn; - // *numSub+=numS; - return 0; - - } else if (cd->cnv_dmap->codingSchema == DMAP_U2U) { /* UTF-16_UCS-2 */ - register int inLen=*inBytesLeft; - register int outLen=*outBytesLeft; - if (inLen <= outLen) { - memcpy(*outBuf, *inBuf, inLen); - (*outBytesLeft)-=inLen; - (*inBuf)+=inLen; - (*outBuf)+=inLen; - *inBytesLeft=0; - return 0; - } - memcpy(*outBuf, *inBuf, outLen); - (*outBytesLeft)=0; - (*inBuf)+=outLen; - (*outBuf)+=outLen; - *inBytesLeft-=outLen; - return (*inBytesLeft); - - } else { - return -1; - } - return 0; -} - - -#ifdef DEBUG -inline size_t myconv(myconv_t cd , - char** inBuf, - size_t* inBytesLeft, - char** outBuf, - size_t* outBytesLeft, - size_t* numSub) -{ - if (cd->converterType == CONVERTER_ICONV) { - return myconv_iconv(cd,inBuf,inBytesLeft,outBuf,outBytesLeft,numSub); - } else if (cd->converterType == CONVERTER_DMAP) { - return myconv_dmap(cd,inBuf,inBytesLeft,outBuf,outBytesLeft,numSub); - } - return -1; -} - -inline char * converterName(int32_t type) -{ - if (type == CONVERTER_ICONV) - return "iconv"; - else if (type == CONVERTER_DMAP) - return "dmap"; - - return "?????"; -} -#else -#define myconv(a,b,c,d,e,f) \ -(((a)->converterType == CONVERTER_ICONV)? myconv_iconv((a),(b),(c),(d),(e),(f)): (((a)->converterType == CONVERTER_DMAP)? myconv_dmap((a),(b),(c),(d),(e),(f)): -1)) - - -#define converterName(a) \ -(((a) == CONVERTER_ICONV)? "iconv": ((a) == CONVERTER_DMAP)? "dmap": "?????") -#endif - -void initMyconv(); -void cleanupMyconv(); - -#endif diff --git a/storage/ibmdb2i/db2i_rir.cc b/storage/ibmdb2i/db2i_rir.cc deleted file mode 100644 index 091c4d98383..00000000000 --- a/storage/ibmdb2i/db2i_rir.cc +++ /dev/null @@ -1,686 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#include "ha_ibmdb2i.h" - -/* Helper function for records_in_range. - Input: Bitmap of used key parts. - Output: Number of used key parts. */ - -static inline int getKeyCntFromMap(key_part_map keypart_map) -{ - int cnt = 0; - while (keypart_map) - { - keypart_map = keypart_map >> 1; - cnt++; - } - return (cnt); -} - -/** - @brief - Given a starting key and an ending key, estimate the number of rows that - will exist between the two keys. - - INPUT - inx Index to use - min_key Min key. Is NULL if no min range - max_key Max key. Is NULL if no max range - - NOTES - min_key.flag can have one of the following values: - HA_READ_KEY_EXACT Include the key in the range - HA_READ_AFTER_KEY Don't include key in range - - max_key.flag can have one of the following values: - HA_READ_BEFORE_KEY Don't include key in range - HA_READ_AFTER_KEY Include all 'end_key' values in the range - - RETURN - HA_POS_ERROR Error or the storage engine cannot estimate the number of rows - 1 There are no matching keys in the given range - n > 0 There are approximately n rows in the range -*/ -ha_rows ha_ibmdb2i::records_in_range(uint inx, - key_range *min_key, - key_range *max_key) -{ - DBUG_ENTER("ha_ibmdb2i::records_in_range"); - int rc = 0; // Return code - ha_rows rows = 0; // Row count returned to caller of this method - uint32 spcLen; // Length of space passed to DB2 - uint32 keyCnt; // Number of fields in the key composite - uint32 literalCnt = 0; // Number of literals - uint32 boundsOff; // Offset from beginning of space to range bounds - uint32 litDefOff; // Offset from beginning of space to literal definitions - uint32 literalsOff; // Offset from beginning of space to literal values - uint32 cutoff = 0; // Early exit cutoff (currently not used) - uint64 recCnt; // Row count from DB2 - uint16 rtnCode; // Return code from DB2 - Bounds* boundsPtr; // Pointer to a pair of range bounds - Bound* boundPtr; // Pointer to a single (high or low) range bound - LitDef* litDefPtr; // Pointer to a literal definition - char* literalsPtr; // Pointer to the start of all literal values - char* literalPtr; // Pointer to the start of this literal value - char* tempPtr; // Temporary pointer - char* tempMinPtr; // Temporary pointer into min_key - int minKeyCnt = 0; // Number of fields in the min_key composite - int maxKeyCnt = 0; // Number of fields in the max_key composite - size_t tempLen = 0; // Temporary length - uint16 DB2FieldWidth = 0; // DB2 field width - uint32 workFieldLen = 0; // Length of workarea needed for CCSID conversions - bool overrideInclusion; // Indicator for inclusion/exclusion - char* endOfLiteralPtr; // Pointer to the end of this literal - char* endOfMinPtr; // Pointer to end of min_key - uint16 endByte = 0; // End byte of char or graphic literal (padding not included) - bool reuseLiteral; // Indicator that hi and lo bounds use same literal - char* minPtr = NULL; // Work pointer for traversing min_key - char* maxPtr = NULL; // Work pointer for traversing max_key - /* - Handle the special case of 'x < null' anywhere in the key range. There are - no values less than null, but return 1 so that MySQL does not assume - the empty set for the query. - */ - if (min_key != NULL && max_key != NULL && - min_key->flag == HA_READ_AFTER_KEY && max_key->flag == HA_READ_BEFORE_KEY && - min_key->length == max_key->length && - (memcmp((uchar*)min_key->key,(uchar*)max_key->key,min_key->length)==0)) - { - DBUG_PRINT("ha_ibmdb2i::records_in_range",("Estimate 1 row for key %d; special case: < null", inx)); - DBUG_RETURN((ha_rows) 1 ); - } - /* - Determine the number of fields in the key composite. - */ - - if (min_key) - { - minKeyCnt = getKeyCntFromMap(min_key->keypart_map); - minPtr = (char*)min_key->key; - } - if (max_key) - { - maxKeyCnt = getKeyCntFromMap(max_key->keypart_map); - maxPtr = (char*)max_key->key; - } - keyCnt = maxKeyCnt >= minKeyCnt ? maxKeyCnt : minKeyCnt; - - /* - Handle the special case where MySQL does not pass either a min or max - key range. In this case, set the key count to 1 (knowing that there - is at least one key field) to flow through and create one bounds structure. - When both the min and max key ranges are nil, the bounds structure will - specify positive and negative infinity and DB2 will estimate the total - number of rows. */ - - if (keyCnt == 0) - keyCnt = 1; - - /* - Allocate the space needed to pass range information to DB2. The - space must be large enough to store the following: - - one pair of bounds (high and low) per field in the key composite - - one literal definition per literal value - - the literal values - - work area for literal CCSID conversions - Since we don't know yet how many of these structures are needed, - allocate enough space for the maximum that we will possibly need. - The workarea for the literal conversion must be big enough to hold the - largest of the DB2 key fields. - */ - KEY& curKey = table->key_info[inx]; - - for (int i = 0; i < keyCnt; i++) - { - DB2FieldWidth = - db2Table->db2Field(curKey.key_part[i].field->field_index).getByteLengthInRecord(); - if (DB2FieldWidth > workFieldLen) - workFieldLen = DB2FieldWidth; // Get length of largest DB2 field - tempLen = tempLen + DB2FieldWidth; // Tally the DB2 field lengths - } - spcLen = (sizeof(Bounds)*keyCnt) + (sizeof(LitDef)*keyCnt*2) + (tempLen*2) + workFieldLen; - - ValidatedPointer spcPtr(spcLen); // Pointer to space passed to DB2 - memset(spcPtr, 0, spcLen); // Clear the allocated space - /* - Set addressability to the various sections of the DB2 interface space. - */ - boundsOff = 0; // Range bounds are at the start of the space - litDefOff = sizeof(Bounds) * keyCnt; // Literal defs follow all the range bounds - literalsOff = litDefOff + (sizeof(LitDef) * keyCnt * 2); // Literal values are last - boundsPtr = (Bounds_t*)(void*)spcPtr; // Address first bounds structure - tempPtr = (char*)((char*)spcPtr + litDefOff); - litDefPtr = (LitDef_t*)tempPtr; // Address first literal definition - tempPtr = (char*)((char*)spcPtr + literalsOff); - literalsPtr = (char*)tempPtr; // Address start of literal values - literalPtr = literalsPtr; // Address first literal value - /* - For each key part, build the low (min) and high (max) DB2 range bounds. - If literals are specified in the MySQL range, build DB2 literal - definitions and store the literal values for access by DB2. - - If no value is specified for a key part, assume infinity. Negative - infinity will cause processing to start at the first index entry. - Positive infinity will cause processing to end at the last index entry. - When infinity is specified in a bound, inclusion/exclusion and position - are ignored, and there is no literal definition or literal value for - the bound. - - If the keypart value is null, the null indicator is set in the range - bound and the other fields in the bound are ignored. When the bound is - null, only index entries with the null value will be included in the - estimate. If one bound is null, both bounds must be null. When the bound - is not null, the data offset and length must be set, and the literal - value stored for access by DB2. - */ - for (int partsInUse = 0; partsInUse < keyCnt; ++partsInUse) - { - Field *field= curKey.key_part[partsInUse].field; - overrideInclusion = false; - reuseLiteral = false; - endOfLiteralPtr = NULL; - /* - Build the low bound for the key range. - */ - if ((partsInUse + 1) > minKeyCnt) // if no min_key info for this part - boundsPtr->LoBound.Infinity[0] = QMY_NEG_INFINITY; // select...where 3 between x and y - else - { - if ((curKey.key_part[partsInUse].null_bit) && (char*)minPtr[0]) - { // min_key is null - if (max_key == NULL || - ((partsInUse + 1) > maxKeyCnt)) // select...where x='ab' and y=null and z != 'c' - boundsPtr->LoBound.Infinity[0] = QMY_NEG_INFINITY; // select...where x not null or - // select...where x > null - else // max_key is not null - { - if (min_key->flag == HA_READ_KEY_EXACT) - boundsPtr->LoBound.IsNull[0] = QMY_YES; // select...where x is null - else - { - if ((char*)maxPtr[0]) - boundsPtr->LoBound.IsNull[0] = QMY_YES; // select...where a = null and b < 5 (max-before) - // select...where a='a' and b is null and c !='a' (max-after) - else - boundsPtr->LoBound.Infinity[0] = QMY_NEG_INFINITY; // select...where x < y - } - } // end min_key is null - } - else // min_key is not null - { - if (literalCnt) litDefPtr = litDefPtr + 1; - literalCnt = literalCnt + 1; - boundsPtr->LoBound.Position = literalCnt; - /* - Determine inclusion or exclusion. - */ - if (min_key->flag == HA_READ_KEY_EXACT || //select...where a like 'this%' - - /* An example for the following conditions is 'select...where a = 5 and b > null'. */ - - (max_key && - (memcmp((uchar*)minPtr,(uchar*)maxPtr, - curKey.key_part[partsInUse].store_length)==0))) - - { - if ((min_key->flag != HA_READ_KEY_EXACT) || - (max_key && - (memcmp((uchar*)minPtr,(uchar*)maxPtr, - curKey.key_part[partsInUse].store_length)==0))) - overrideInclusion = true; // Need inclusion for both min and max - } - else - boundsPtr->LoBound.Embodiment[0] = QMY_EXCLUSION; - litDefPtr->FieldNbr = field->field_index + 1; - DB2Field& db2Field = db2Table->db2Field(field->field_index); - litDefPtr->DataType = db2Field.getType(); - /* - Convert the literal to DB2 format - */ - if ((field->type() != MYSQL_TYPE_BIT) && // Don't do conversion on BIT data - (field->charset() != &my_charset_bin) && // Don't do conversion on BINARY data - (litDefPtr->DataType == QMY_CHAR || - litDefPtr->DataType == QMY_VARCHAR || - litDefPtr->DataType == QMY_GRAPHIC || - litDefPtr->DataType == QMY_VARGRAPHIC)) - { - // Most of the code is required by the considerable wrangling needed - // to prepare partial keys for use by DB2 - // 1. UTF8 (CCSID 1208) data can be copied across unmodified if it is - // utf8_bin. Otherwise, we need to convert the min and max - // characters into the min and max characters employed - // by the DB2 sort sequence. This is complicated by the fact that - // the character widths are not always equal. - // 2. Likewise, UCS2 (CCSID 13488) data can be copied across unmodified - // if it is ucs2_bin or ucs2_general_ci. Otherwise, we need to - // convert the min and max characters into the min and max characters - // employed by the DB2 sort sequence. - // 3. All other data will use standard iconv conversions. If an - // unconvertible character is encountered, we assume it is the min - // char and fill the remainder of the DB2 key with 0s. This may not - // always be accurate, but it is probably sufficient for range - // estimations. - const char* keyData = minPtr+((curKey.key_part[partsInUse].null_bit)? 1 : 0); - char* db2Data = literalPtr; - uint16 outLen = db2Field.getByteLengthInRecord(); - uint16 inLen; - if (litDefPtr->DataType == QMY_VARCHAR || - litDefPtr->DataType == QMY_VARGRAPHIC) - { - inLen = *(uint8*)keyData + ((*(uint8*)(keyData+1)) << 8); - keyData += 2; - outLen -= sizeof(uint16); - db2Data += sizeof(uint16); - } - else - { - inLen = field->max_display_length(); - } - - size_t convertedBytes = 0; - if (db2Field.getCCSID() == 1208) - { - DBUG_ASSERT(inLen <= outLen); - if (strcmp(field->charset()->name, "utf8_bin")) - { - const char* end = keyData+inLen; - const char* curKey = keyData; - char* curDB2 = db2Data; - uint32 min = field->charset()->min_sort_char; - while ((curKey < end) && (curDB2 < db2Data+outLen-3)) - { - my_wc_t temp; - int len = field->charset()->cset->mb_wc(field->charset(), - &temp, - (const uchar*)curKey, - (const uchar*)end); - if (temp != min) - { - DBUG_ASSERT(len <= 3); - switch (len) - { - case 3: *(curDB2+2) = *(curKey+2); - case 2: *(curDB2+1) = *(curKey+1); - case 1: *(curDB2) = *(curKey); - } - curDB2 += len; - } - else - { - *(curDB2++) = 0xEF; - *(curDB2++) = 0xBF; - *(curDB2++) = 0xBF; - } - curKey += len; - } - convertedBytes = curDB2 - db2Data; - } - else - { - memcpy(db2Data, keyData, inLen); - convertedBytes = inLen; - } - rc = 0; - } - else if (db2Field.getCCSID() == 13488) - { - DBUG_ASSERT(inLen <= outLen); - if (strcmp(field->charset()->name, "ucs2_bin") && - strcmp(field->charset()->name, "ucs2_general_ci")) - { - const char* end = keyData+inLen; - const uint16* curKey = (uint16*)keyData; - uint16* curDB2 = (uint16*)db2Data; - uint16 min = field->charset()->min_sort_char; - while (curKey < (uint16*)end) - { - if (*curKey != min) - *curDB2 = *curKey; - else - *curDB2 = 0xFFFF; - ++curKey; - ++curDB2; - } - } - else - { - memcpy(db2Data, keyData, inLen); - } - convertedBytes = inLen; - rc = 0; - } - else - { - rc = convertFieldChars(toDB2, - field->field_index, - keyData, - db2Data, - inLen, - outLen, - &convertedBytes, - true); - - if (rc == DB2I_ERR_ILL_CHAR) - { - // If an illegal character is encountered, we fill the remainder - // of the key with 0x00. This was implemented as a corollary to - // Bug#45012, though it should probably remain even after that - // bug is fixed. - memset(db2Data+convertedBytes, 0x00, outLen-convertedBytes); - convertedBytes = outLen; - rc = 0; - } - } - - if (!rc && - (litDefPtr->DataType == QMY_VARGRAPHIC || - litDefPtr->DataType == QMY_VARCHAR)) - { - *(uint16*)(db2Data-sizeof(uint16)) = - convertedBytes / (litDefPtr->DataType == QMY_VARGRAPHIC ? 2 : 1); - } - - } - else // Non-character fields - { - rc = convertMySQLtoDB2(field, - db2Field, - literalPtr, - (uchar*)minPtr+((curKey.key_part[partsInUse].null_bit)? 1 : 0)); - } - - if (rc != 0) break; - litDefPtr->Offset = (uint32_t)(literalPtr - literalsPtr); - litDefPtr->Length = db2Field.getByteLengthInRecord(); - literalPtr = literalPtr + litDefPtr->Length; // Bump pointer for next literal - } - /* If there is a max_key value for this field, and if the max_key value is - the same as the min_key value, then the low bound literal can be reused - for the high bound literal. This eliminates the overhead of copying and - converting the same value twice. */ - if (max_key && ((partsInUse + 1) <= maxKeyCnt) && - (memcmp((uchar*)minPtr,(uchar*)maxPtr, - curKey.key_part[partsInUse].store_length)==0 || endOfLiteralPtr)) - reuseLiteral = true; - minPtr += curKey.key_part[partsInUse].store_length; - } - /* - Build the high bound for the key range. - */ - if (max_key == NULL || ((partsInUse + 1) > maxKeyCnt)) - boundsPtr->HiBound.Infinity[0] = QMY_POS_INFINITY; - else - { - if ((curKey.key_part[partsInUse].null_bit) && (char*)maxPtr[0]) - { - if (min_key == NULL) - boundsPtr->HiBound.Infinity[0] = QMY_POS_INFINITY; - else - boundsPtr->HiBound.IsNull[0] = QMY_YES; // select...where x is null - } - else // max_key field is not null - { - if (boundsPtr->LoBound.IsNull[0] == QMY_YES) // select where x < 10 or x is null - { - rc = HA_POS_ERROR; - break; - } - if (!reuseLiteral) - { - if (literalCnt) - litDefPtr = litDefPtr + 1; - literalCnt = literalCnt + 1; - litDefPtr->FieldNbr = field->field_index + 1; - DB2Field& db2Field = db2Table->db2Field(field->field_index); - litDefPtr->DataType = db2Field.getType(); - /* - Convert the literal to DB2 format - */ - if ((field->type() != MYSQL_TYPE_BIT) && // Don't do conversion on BIT data - (field->charset() != &my_charset_bin) && // Don't do conversion on BINARY data - (litDefPtr->DataType == QMY_CHAR || - litDefPtr->DataType == QMY_VARCHAR || - litDefPtr->DataType == QMY_GRAPHIC || - litDefPtr->DataType == QMY_VARGRAPHIC)) - { - // We need to handle char fields in a special way in order to account - // for partial keys. Refer to the note above for a description of the - // basic design. - char* keyData = maxPtr+((curKey.key_part[partsInUse].null_bit)? 1 : 0); - char* db2Data = literalPtr; - uint16 outLen = db2Field.getByteLengthInRecord(); - uint16 inLen; - if (litDefPtr->DataType == QMY_VARCHAR || - litDefPtr->DataType == QMY_VARGRAPHIC) - { - inLen = *(uint8*)keyData + ((*(uint8*)(keyData+1)) << 8); - keyData += 2; - outLen -= sizeof(uint16); - db2Data += sizeof(uint16); - } - else - { - inLen = field->max_display_length(); - } - - size_t convertedBytes; - if (db2Field.getCCSID() == 1208) - { - if (strcmp(field->charset()->name, "utf8_bin")) - { - const char* end = keyData+inLen; - const char* curKey = keyData; - char* curDB2 = db2Data; - uint32 max = field->charset()->max_sort_char; - while (curKey < end && (curDB2 < db2Data+outLen-3)) - { - my_wc_t temp; - int len = field->charset()->cset->mb_wc(field->charset(), &temp, (const uchar*)curKey, (const uchar*)end); - if (temp != max) - { - DBUG_ASSERT(len <= 3); - switch (len) - { - case 3: *(curDB2+2) = *(curKey+2); - case 2: *(curDB2+1) = *(curKey+1); - case 1: *(curDB2) = *(curKey); - } - curDB2 += len; - } - else - { - *(curDB2++) = 0xE4; - *(curDB2++) = 0xB6; - *(curDB2++) = 0xBF; - } - curKey += len; - } - convertedBytes = curDB2 - db2Data; - } - else - { - DBUG_ASSERT(inLen <= outLen); - memcpy(db2Data, keyData, inLen); - convertedBytes = inLen; - } - rc = 0; - } - else if (db2Field.getCCSID() == 13488) - { - if (strcmp(field->charset()->name, "ucs2_bin") && - strcmp(field->charset()->name, "ucs2_general_ci")) - { - char* end = keyData+inLen; - uint16* curKey = (uint16*)keyData; - uint16* curDB2 = (uint16*)db2Data; - uint16 max = field->charset()->max_sort_char; - while (curKey < (uint16*)end) - { - if (*curKey != max) - *curDB2 = *curKey; - else - *curDB2 = 0x4DBF; - ++curKey; - ++curDB2; - } - } - else - { - memcpy(db2Data, keyData, outLen); - } - rc = 0; - } - else - { - size_t substituteChars = 0; - rc = convertFieldChars(toDB2, - field->field_index, - keyData, - db2Data, - inLen, - outLen, - &convertedBytes, - true, - &substituteChars); - - if (rc == DB2I_ERR_ILL_CHAR) - { - // If an illegal character is encountered, we fill the remainder - // of the key with 0xFF. This was implemented to work around - // Bug#45012, though it should probably remain even after that - // bug is fixed. - memset(db2Data+convertedBytes, 0xFF, outLen-convertedBytes); - rc = 0; - } - else if ((substituteChars && - (litDefPtr->DataType == QMY_VARCHAR || - litDefPtr->DataType == QMY_CHAR)) || - strcmp(field->charset()->name, "cp1251_bulgarian_ci") == 0) - { - // When iconv translates the max_sort_char with a substitute - // character, we have no way to know whether this affects - // the sort order of the key. Therefore, to be safe, when - // we know that substitute characters have been used in a - // single-byte string, we traverse the translated key - // in reverse, replacing substitue characters with 0xFF, which - // always sorts with the greatest weight in DB2 sort sequences. - // cp1251_bulgarian_ci is also handled this way because the - // max_sort_char is a control character which does not sort - // equivalently in DB2. - DBUG_ASSERT(inLen == outLen); - char* tmpKey = keyData + inLen - 1; - char* tmpDB2 = db2Data + outLen - 1; - while (*tmpKey == field->charset()->max_sort_char && - *tmpDB2 != 0xFF) - { - *tmpDB2 = 0xFF; - --tmpKey; - --tmpDB2; - } - } - } - - if (!rc && - (litDefPtr->DataType == QMY_VARGRAPHIC || - litDefPtr->DataType == QMY_VARCHAR)) - { - *(uint16*)(db2Data-sizeof(uint16)) = - outLen / (litDefPtr->DataType == QMY_VARGRAPHIC ? 2 : 1); - } - } - else - { - rc = convertMySQLtoDB2(field, - db2Field, - literalPtr, - (uchar*)maxPtr+((curKey.key_part[partsInUse].null_bit)? 1 : 0)); - } - if (rc != 0) break; - litDefPtr->Offset = (uint32_t)(literalPtr - literalsPtr); - litDefPtr->Length = db2Field.getByteLengthInRecord(); - literalPtr = literalPtr + litDefPtr->Length; // Bump pointer for next literal - } - boundsPtr->HiBound.Position = literalCnt; - if (max_key->flag == HA_READ_BEFORE_KEY && !overrideInclusion) - boundsPtr->HiBound.Embodiment[0] = QMY_EXCLUSION; - } - maxPtr += curKey.key_part[partsInUse].store_length; - } - /* - Bump to the next field in the key composite. - */ - - if ((partsInUse+1) < keyCnt) - boundsPtr = boundsPtr + 1; - } - - /* - Call DB2 to estimate the number of rows in the key range. - */ - if (rc == 0) - { - rc = db2i_ileBridge::getBridgeForThread()->recordsInRange((indexHandles[inx] ? indexHandles[inx] : db2Table->indexFile(inx)->getMasterDefnHandle()), - spcPtr, - keyCnt, - literalCnt, - boundsOff, - litDefOff, - literalsOff, - cutoff, - (uint32_t)(literalPtr - (char*)spcPtr), - endByte, - &recCnt, - &rtnCode); - } - /* - Set the row count and return. - Beware that if this method returns a zero row count, MySQL assumes the - result set for the query is zero; never return a zero row count. - */ - if ((rc == 0) && (rtnCode == QMY_SUCCESS || rtnCode == QMY_EARLY_EXIT)) - { - rows = recCnt ? (ha_rows)recCnt : 1; - } - - rows = (rows > 0 ? rows : HA_POS_ERROR); - - setIndexReadEstimate(inx, rows); - - DBUG_PRINT("ha_ibmdb2i::recordsInRange",("Estimate %d rows for key %d", uint32(rows), inx)); - - DBUG_RETURN(rows); -} diff --git a/storage/ibmdb2i/db2i_safeString.h b/storage/ibmdb2i/db2i_safeString.h deleted file mode 100644 index e353316c8fc..00000000000 --- a/storage/ibmdb2i/db2i_safeString.h +++ /dev/null @@ -1,98 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - - -#ifndef DB2I_SAFESTRING_H -#define DB2I_SAFESTRING_H - - -#include -#include - -/** - @class SafeString - - This class was designed to provide safe, but lightweight, concatenation - operations C strings inside pre-allocated buffers. -*/ -class SafeString -{ -public: - SafeString(char* buffer, size_t size) : - allocSize(size), curPos(0), buf(buffer) - { - DBUG_ASSERT(size > 0); - buf[allocSize - 1] = 0xFF; // Set an overflow indicator - } - - char* ptr() { return buf; } - operator char*() { return buf; } - - SafeString& strcat(const char* str) - { - return this->strncat(str, strlen(str)); - } - - SafeString& strcat(char one) - { - if (curPos < allocSize - 2) - { - buf[curPos++] = one; - } - buf[curPos] = 0; - - return *this; - } - - SafeString& strncat(const char* str, size_t len) - { - uint64 amountToCopy = min((allocSize-1) - curPos, len); - memcpy(buf + curPos, str, amountToCopy); - curPos += amountToCopy; - buf[curPos] = 0; - return *this; - } - - bool overflowed() const { return (buf[allocSize - 1] == 0);} - -private: - char* buf; - uint64 curPos; - size_t allocSize; -}; - - -#endif diff --git a/storage/ibmdb2i/db2i_sqlStatementStream.cc b/storage/ibmdb2i/db2i_sqlStatementStream.cc deleted file mode 100644 index 92a8b03fd00..00000000000 --- a/storage/ibmdb2i/db2i_sqlStatementStream.cc +++ /dev/null @@ -1,86 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#include "db2i_sqlStatementStream.h" -#include "as400_types.h" - -/** - Add a statement to the statement stream, allocating additional memory as needed. - - @parm stmt The statement text - @parm length The length of the statement text - @parm fileSortSequence The DB2 sort sequence identifier, in EBCDIC - @parm fileSortSequenceLibrary The DB2 sort sequence library, in EBCDIC - - @return Reference to this object -*/ -SqlStatementStream& SqlStatementStream::addStatementInternal(const char* stmt, - uint32 length, - const char* fileSortSequence, - const char* fileSortSequenceLibrary) -{ - uint32 storageNeeded = length + sizeof(StmtHdr_t); - storageNeeded = (storageNeeded + 3) & ~3; // We have to be 4-byte aligned. - if (storageNeeded > storageRemaining()) - { - // We overallocate new storage to reduce number of times reallocation is - // needed. - int newSize = curSize + 2 * storageNeeded; - DBUG_PRINT("SqlStatementStream::addStatementInternal", - ("PERF: Had to realloc! Old size=%d. New size=%d", curSize, newSize)); - char* old_space = block; - char* new_space = (char*)getNewSpace(newSize); - memcpy(new_space, old_space, curSize); - ptr = new_space + (ptr - old_space); - curSize = newSize; - } - - DBUG_ASSERT((address64_t)ptr % 4 == 0); - - memcpy(((StmtHdr_t*)ptr)->SrtSeqNam, - fileSortSequence, - sizeof(((StmtHdr_t*)ptr)->SrtSeqNam)); - memcpy(((StmtHdr_t*)ptr)->SrtSeqSch, - fileSortSequenceLibrary, - sizeof(((StmtHdr_t*)ptr)->SrtSeqSch)); - ((StmtHdr_t*)ptr)->Length = length; - memcpy(ptr + sizeof(StmtHdr_t), stmt, length); - - ptr += storageNeeded; - ++statements; - - return *this; -} diff --git a/storage/ibmdb2i/db2i_sqlStatementStream.h b/storage/ibmdb2i/db2i_sqlStatementStream.h deleted file mode 100644 index 11db41a6c5d..00000000000 --- a/storage/ibmdb2i/db2i_sqlStatementStream.h +++ /dev/null @@ -1,151 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -#ifndef DB2I_SQLSTATEMENTSTREAM_H -#define DB2I_SQLSTATEMENTSTREAM_H - -#include "db2i_charsetSupport.h" -#include "qmyse.h" - -/** - @class SqlStatementStream - - This class handles building the stream of SQL statements expected by the - QMY_EXECUTE_IMMEDIATE and QMY_PREPARE_OPEN_CURSOR APIs. - Memory allocation is handled internally. -*/ -class SqlStatementStream -{ - public: - /** - ctor to be used when multiple strings may be appended. - */ - SqlStatementStream(uint32 firstStringSize) : statements(0) - { - curSize = firstStringSize + sizeof(StmtHdr_t); - curSize = (curSize + 3) & ~3; - ptr = (char*) getNewSpace(curSize); - if (ptr == NULL) - curSize = 0; - } - - /** - ctor to be used when only a single statement will be executed. - */ - SqlStatementStream(const String& statement) : statements(0), block(NULL), curSize(0), ptr(0) - { - addStatement(statement); - } - - /** - ctor to be used when only a single statement will be executed. - */ - SqlStatementStream(const char* statement) : statements(0), block(NULL), curSize(0), ptr(0) - { - addStatement(statement); - } - - /** - Append an SQL statement, specifiying the DB2 sort sequence under which - the statement should be executed. This is important for CREATE TABLE - and CREATE INDEX statements. - */ - SqlStatementStream& addStatement(const String& append, const char* fileSortSequence, const char* fileSortSequenceLibrary) - { - char sortSeqEbcdic[10]; - char sortSeqLibEbcdic[10]; - - DBUG_ASSERT(strlen(fileSortSequence) <= 10 && - strlen(fileSortSequenceLibrary) <= 10); - memset(sortSeqEbcdic, 0x40, 10); - memset(sortSeqLibEbcdic, 0x40, 10); - convToEbcdic(fileSortSequence, sortSeqEbcdic, strlen(fileSortSequence)); - convToEbcdic(fileSortSequenceLibrary, sortSeqLibEbcdic, strlen(fileSortSequenceLibrary)); - - return addStatementInternal(append.ptr(), append.length(), sortSeqEbcdic, sortSeqLibEbcdic); - } - - /** - Append an SQL statement using default (*HEX) sort sequence. - */ - SqlStatementStream& addStatement(const String& append) - { - const char splatHEX[] = {0x5C, 0xC8, 0xC5, 0xE7, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40}; // *HEX - const char blanks[] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40}; // - - return addStatementInternal(append.ptr(), append.length(), splatHEX, blanks); - } - - /** - Append an SQL statement using default (*HEX) sort sequence. - */ - SqlStatementStream& addStatement(const char* stmt) - { - const char splatHEX[] = {0x5C, 0xC8, 0xC5, 0xE7, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40}; // *HEX - const char blanks[] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40}; // - - return addStatementInternal(stmt, strlen(stmt), splatHEX, blanks); - } - - char* getPtrToData() const { return block; } - uint32 getStatementCount() const { return statements; } - private: - SqlStatementStream& addStatementInternal(const char* stmt, - uint32 length, - const char* fileSortSequence, - const char* fileSortSequenceLibrary); - - uint32 storageRemaining() const - { - return (block == NULL ? 0 : curSize - (ptr - block)); - } - - char* getNewSpace(size_t size) - { - allocBase = (char*)sql_alloc(size + 15); - block = (char*)roundToQuadWordBdy(allocBase); - return block; - } - - uint32 curSize; // The size of the usable memory. - char* allocBase; // The allocated memory (with padding for aligment) - char* block; // The usable memory chunck (aligned for ILE) - char* ptr; // The current position within block. - uint32 statements; // The number of statements that have been appended. -}; - -#endif - diff --git a/storage/ibmdb2i/db2i_validatedPointer.h b/storage/ibmdb2i/db2i_validatedPointer.h deleted file mode 100644 index c4e31d1f11b..00000000000 --- a/storage/ibmdb2i/db2i_validatedPointer.h +++ /dev/null @@ -1,162 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - -#ifndef DB2I_VALIDATEDPOINTER_H -#define DB2I_VALIDATEDPOINTER_H - -#include "db2i_ileBridge.h" - -/** - @class ValidatedPointer - @brief Encapsulates a pointer registered for usage by the QMYSE APIs - - @details As a performance optimization, to prevent pointer validation each - time a particular pointer is thunked across to ILE, QMYSE allows us to - "register" a pointer such that it is validated once and then subsequently - referenced on QMYSE APIs by means of a handle value. This class should be - used to manage memory allocation/registration/unregistration of these - pointers. Using the alloc function guarantees that the resulting storage is - 16-byte aligned, a requirement for many pointers passed to QMYSE. -*/ -template -class ValidatedPointer -{ -public: - ValidatedPointer() : address(NULL), handle(NULL) {;} - - ValidatedPointer(size_t size) - { - alloc(size); - } - - ValidatedPointer(T* ptr) - { - assign(ptr); - } - - operator T*() - { - return address; - }; - - operator T*() const - { - return address; - }; - - operator void*() - { - return address; - }; - - operator ILEMemHandle() - { - return handle; - } - - void alloc(size_t size) - { - address = (T*)malloc_aligned(size); - if (address) - db2i_ileBridge::registerPtr(address, &handle); - mallocedHere = 1; - } - - void assign(T* ptr) - { - address = ptr; - db2i_ileBridge::registerPtr((void*)ptr, &handle); - mallocedHere = 0; - } - - void realloc(size_t size) - { - dealloc(); - alloc(size); - } - - void reassign(T* ptr) - { - dealloc(); - assign(ptr); - } - - void dealloc() - { - if (address) - { - db2i_ileBridge::unregisterPtr(handle); - - if (mallocedHere) - free_aligned((void*)address); - } - address = NULL; - handle = 0; - } - - ~ValidatedPointer() - { - dealloc(); - } - -private: - // Disable copy ctor and assignment operator, as these would break - // the registration guarantees provided by the class. - ValidatedPointer& operator= (const ValidatedPointer newVal); - ValidatedPointer(ValidatedPointer& newCopy); - - ILEMemHandle handle; - T* address; - char mallocedHere; -}; - - -/** - @class ValidatedObject - @brief This class allows users to instantiate and register a particular - object in a single step. -*/ -template -class ValidatedObject : public ValidatedPointer -{ - public: - ValidatedObject() : ValidatedPointer(&value) {;} - - T& operator= (const T newVal) { value = newVal; return value; } - - private: - T value; -}; -#endif diff --git a/storage/ibmdb2i/ha_ibmdb2i.cc b/storage/ibmdb2i/ha_ibmdb2i.cc deleted file mode 100644 index 39096be7848..00000000000 --- a/storage/ibmdb2i/ha_ibmdb2i.cc +++ /dev/null @@ -1,3359 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - - -/** - @file ha_ibmdb2i.cc - - @brief - The ha_ibmdb2i storage engine provides an interface from MySQL to IBM DB2 for i. - -*/ - -#ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation -#endif - -#include "ha_ibmdb2i.h" -#include "mysql_priv.h" -#include -#include "db2i_ileBridge.h" -#include "db2i_charsetSupport.h" -#include -#include "db2i_safeString.h" - -static const char __NOT_NULL_VALUE_EBCDIC = 0xF0; // '0' -static const char __NULL_VALUE_EBCDIC = 0xF1; // '1' -static const char __DEFAULT_VALUE_EBCDIC = 0xC4; // 'D' -static const char BlankASPName[19] = " "; -static const int DEFAULT_MAX_ROWS_TO_BUFFER = 4096; - -static const char SAVEPOINT_PREFIX[] = {0xD4, 0xE8, 0xE2, 0xD7}; // MYSP (in EBCDIC) - -OSVersion osVersion; - - -// ================================================================ -// ================================================================ -// System variables -static char* ibmdb2i_rdb_name; -static MYSQL_SYSVAR_STR(rdb_name, ibmdb2i_rdb_name, - PLUGIN_VAR_MEMALLOC | PLUGIN_VAR_READONLY, - "The name of the RDB to use", - NULL, - NULL, - BlankASPName); - -static MYSQL_THDVAR_BOOL(transaction_unsafe, - 0, - "Disable support for commitment control", - NULL, - NULL, - FALSE); - -static MYSQL_THDVAR_UINT(lob_alloc_size, - 0, - "Baseline allocation for lob read buffer", - NULL, - NULL, - 2*1024*1024, - 64*1024, - 128*1024*1024, - 1); - -static MYSQL_THDVAR_UINT(max_read_buffer_size, - 0, - "Maximum size of buffers used for read-ahead.", - NULL, - NULL, - 1*1024*1024, - 32*1024, - 16*1024*1024, - 1); - -static MYSQL_THDVAR_UINT(max_write_buffer_size, - 0, - "Maximum size of buffers used for bulk writes.", - NULL, - NULL, - 8*1024*1024, - 32*1024, - 64*1024*1024, - 1); - -static MYSQL_THDVAR_BOOL(compat_opt_time_as_duration, - 0, - "Control how new TIME columns should be defined in DB2. 0=time-of-day (default), 1=duration.", - NULL, - NULL, - FALSE); - -static MYSQL_THDVAR_UINT(compat_opt_year_as_int, - 0, - "Control how new YEAR columns should be defined in DB2. 0=CHAR(4) (default), 1=SMALLINT.", - NULL, - NULL, - 0, - 0, - 1, - 1); - -static MYSQL_THDVAR_UINT(compat_opt_blob_cols, - 0, - "Control how new TEXT and BLOB columns should be defined in DB2. 0=CLOB/BLOB (default), 1=VARCHAR/VARBINARY", - NULL, - NULL, - 0, - 0, - 1, - 1); - -static MYSQL_THDVAR_UINT(compat_opt_allow_zero_date_vals, - 0, - "Allow substitute values to be used when storing a column with a 0000-00-00 date component. 0=No substitution (default), 1=Substitute '0001-01-01'", - NULL, - NULL, - 0, - 0, - 1, - 1); - -static MYSQL_THDVAR_BOOL(propagate_default_col_vals, - 0, - "Should DEFAULT column values be propagated to the DB2 table definition.", - NULL, - NULL, - TRUE); - -static my_bool ibmdb2i_assume_exclusive_use; -static MYSQL_SYSVAR_BOOL(assume_exclusive_use, ibmdb2i_assume_exclusive_use, - 0, - "Can MySQL assume that this process is the only one modifying the DB2 tables. ", - NULL, - NULL, - FALSE); - -static MYSQL_THDVAR_BOOL(async_enabled, - 0, - "Should reads be done asynchronously when possible", - NULL, - NULL, - TRUE); - -static MYSQL_THDVAR_UINT(create_index_option, - 0, - "Control whether additional indexes are created. 0=No (default), 1=Create additional *HEX-based index", - NULL, - NULL, - 0, - 0, - 1, - 1); - -/* static MYSQL_THDVAR_UINT(discovery_mode, - 0, - "Unsupported", - NULL, - NULL, - 0, - 0, - 1, - 1); */ - -static uint32 ibmdb2i_system_trace; -static MYSQL_SYSVAR_UINT(system_trace_level, ibmdb2i_system_trace, - 0, - "Set system tracing level", - NULL, - NULL, - 0, - 0, - 63, - 1); - - -inline uint8 ha_ibmdb2i::getCommitLevel(THD* thd) -{ - if (!THDVAR(thd, transaction_unsafe)) - { - switch (thd_tx_isolation(thd)) - { - case ISO_READ_UNCOMMITTED: - return (accessIntent == QMY_READ_ONLY ? QMY_READ_UNCOMMITTED : QMY_REPEATABLE_READ); - case ISO_READ_COMMITTED: - return (accessIntent == QMY_READ_ONLY ? QMY_READ_COMMITTED : QMY_REPEATABLE_READ); - case ISO_REPEATABLE_READ: - return QMY_REPEATABLE_READ; - case ISO_SERIALIZABLE: - return QMY_SERIALIZABLE; - } - } - - return QMY_NONE; -} - -inline uint8 ha_ibmdb2i::getCommitLevel() -{ - return getCommitLevel(ha_thd()); -} - -//===================================================================== - -static handler *ibmdb2i_create_handler(handlerton *hton, - TABLE_SHARE *table, - MEM_ROOT *mem_root); -static void ibmdb2i_drop_database(handlerton *hton, char* path); -static int ibmdb2i_savepoint_set(handlerton *hton, THD* thd, void *sv); -static int ibmdb2i_savepoint_rollback(handlerton *hton, THD* thd, void *sv); -static int ibmdb2i_savepoint_release(handlerton *hton, THD* thd, void *sv); -static uint ibmdb2i_alter_table_flags(uint flags); - -handlerton *ibmdb2i_hton; -static bool was_ILE_inited; - -/* Tracks the number of open tables */ -static HASH ibmdb2i_open_tables; - -/* Mutex used to synchronize initialization of the hash */ -static pthread_mutex_t ibmdb2i_mutex; - - -/** - Create hash key for tracking open tables. -*/ - -static uchar* ibmdb2i_get_key(IBMDB2I_SHARE *share,size_t *length, - bool not_used __attribute__((unused))) -{ - *length=share->table_name_length; - return (uchar*) share->table_name; -} - - -int ibmdb2i_close_connection(handlerton* hton, THD *thd) -{ - DBUG_PRINT("ha_ibmdb2i::close_connection", ("Closing %d", thd->thread_id)); - db2i_ileBridge::getBridgeForThread(thd)->closeConnection(thd->thread_id); - db2i_ileBridge::destroyBridgeForThread(thd); - - return 0; -} - - -static int ibmdb2i_init_func(void *p) -{ - DBUG_ENTER("ibmdb2i_init_func"); - - utsname tempName; - uname(&tempName); - osVersion.v = atoi(tempName.version); - osVersion.r = atoi(tempName.release); - - was_ILE_inited = false; - ibmdb2i_hton= (handlerton *)p; - VOID(pthread_mutex_init(&ibmdb2i_mutex,MY_MUTEX_INIT_FAST)); - (void) hash_init(&ibmdb2i_open_tables,table_alias_charset,32,0,0, - (hash_get_key) ibmdb2i_get_key,0,0); - - ibmdb2i_hton->state= SHOW_OPTION_YES; - ibmdb2i_hton->create= ibmdb2i_create_handler; - ibmdb2i_hton->drop_database= ibmdb2i_drop_database; - ibmdb2i_hton->commit= ha_ibmdb2i::doCommit; - ibmdb2i_hton->rollback= ha_ibmdb2i::doRollback; - ibmdb2i_hton->savepoint_offset= 0; - ibmdb2i_hton->savepoint_set= ibmdb2i_savepoint_set; - ibmdb2i_hton->savepoint_rollback= ibmdb2i_savepoint_rollback; - ibmdb2i_hton->savepoint_release= ibmdb2i_savepoint_release; - ibmdb2i_hton->alter_table_flags=ibmdb2i_alter_table_flags; - ibmdb2i_hton->close_connection=ibmdb2i_close_connection; - - int rc; - - rc = initCharsetSupport(); - - if (!rc) - rc = db2i_ileBridge::setup(); - - if (!rc) - { - int nameLen = strlen(ibmdb2i_rdb_name); - for (int i = 0; i < nameLen; ++i) - { - ibmdb2i_rdb_name[i] = my_toupper(system_charset_info, (uchar)ibmdb2i_rdb_name[i]); - } - - rc = db2i_ileBridge::initILE(ibmdb2i_rdb_name, (uint16*)(((char*)&ibmdb2i_system_trace)+2)); - if (rc == 0) - { - was_ILE_inited = true; - } - } - - DBUG_RETURN(rc); -} - - -static int ibmdb2i_done_func(void *p) -{ - int error= 0; - DBUG_ENTER("ibmdb2i_done_func"); - - if (ibmdb2i_open_tables.records) - error= 1; - - if (was_ILE_inited) - db2i_ileBridge::exitILE(); - - db2i_ileBridge::takedown(); - - doneCharsetSupport(); - - hash_free(&ibmdb2i_open_tables); - pthread_mutex_destroy(&ibmdb2i_mutex); - - DBUG_RETURN(0); -} - - -IBMDB2I_SHARE *ha_ibmdb2i::get_share(const char *table_name, TABLE *table) -{ - IBMDB2I_SHARE *share; - uint length; - char *tmp_name; - - pthread_mutex_lock(&ibmdb2i_mutex); - length=(uint) strlen(table_name); - - if (!(share=(IBMDB2I_SHARE*) hash_search(&ibmdb2i_open_tables, - (uchar*)table_name, - length))) - { - if (!(share=(IBMDB2I_SHARE *) - my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), - &share, sizeof(*share), - &tmp_name, length+1, - NullS))) - { - pthread_mutex_unlock(&ibmdb2i_mutex); - return NULL; - } - - share->use_count=0; - share->table_name_length=length; - share->table_name=tmp_name; - strmov(share->table_name,table_name); - if (my_hash_insert(&ibmdb2i_open_tables, (uchar*) share)) - goto error; - thr_lock_init(&share->lock); - pthread_mutexattr_t mutexattr = MY_MUTEX_INIT_FAST; - pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init(&share->mutex, &mutexattr); - - share->db2Table = new db2i_table(table->s, table_name); - int32 rc = share->db2Table->initDB2Objects(table_name); - - if (rc) - { - delete share->db2Table; - hash_delete(&ibmdb2i_open_tables, (uchar*) share); - thr_lock_delete(&share->lock); - my_errno = rc; - goto error; - } - - memset(&share->cachedStats, 0, sizeof(share->cachedStats)); - } - share->use_count++; - pthread_mutex_unlock(&ibmdb2i_mutex); - - db2Table = share->db2Table; - - return share; - -error: - pthread_mutex_destroy(&share->mutex); - my_free((uchar*) share, MYF(0)); - pthread_mutex_unlock(&ibmdb2i_mutex); - - return NULL; -} - - - -int ha_ibmdb2i::free_share(IBMDB2I_SHARE *share) -{ - pthread_mutex_lock(&ibmdb2i_mutex); - if (!--share->use_count) - { - delete share->db2Table; - db2Table = NULL; - - hash_delete(&ibmdb2i_open_tables, (uchar*) share); - thr_lock_delete(&share->lock); - pthread_mutex_destroy(&share->mutex); - my_free(share, MYF(0)); - pthread_mutex_unlock(&ibmdb2i_mutex); - return 1; - } - pthread_mutex_unlock(&ibmdb2i_mutex); - - return 0; -} - -static handler* ibmdb2i_create_handler(handlerton *hton, - TABLE_SHARE *table, - MEM_ROOT *mem_root) -{ - return new (mem_root) ha_ibmdb2i(hton, table); -} - -static void ibmdb2i_drop_database(handlerton *hton, char* path) -{ - DBUG_ENTER("ha_ibmdb2i::ibmdb2i_drop_database"); - int rc = 0; - char queryBuffer[200]; - String query(queryBuffer, sizeof(queryBuffer), system_charset_info); - query.length(0); - query.append(STRING_WITH_LEN(" DROP SCHEMA \"")); - query.append(path+2, strchr(path+2, '/')-(path+2)); - query.append('"'); - - SqlStatementStream sqlStream(query); - - rc = db2i_ileBridge::getBridgeForThread()->execSQL(sqlStream.getPtrToData(), - sqlStream.getStatementCount(), - QMY_NONE, - FALSE, - TRUE); - DBUG_VOID_RETURN; -} - -inline static void genSavepointName(const void* sv, char* out) -{ - *(uint32*)out = *(uint32*)SAVEPOINT_PREFIX; - DBUG_ASSERT(sizeof(SAVEPOINT_PREFIX) == 4); - out += sizeof(SAVEPOINT_PREFIX); - - longlong2str((longlong)sv, out, 10); - while (*out) - { - out += 0xF0; - ++out; - } -} - - -/********************************************************************* -Sets a transaction savepoint. */ -static int ibmdb2i_savepoint_set(handlerton* hton, THD* thd, void* sv) -{ - DBUG_ENTER("ibmdb2i_savepoint_set"); - int rc = 0; - if (!THDVAR(thd ,transaction_unsafe)) - { - char name[64]; - genSavepointName(sv, name); - DBUG_PRINT("ibmdb2i_savepoint_set",("Setting %s", name)); - rc = ha_ibmdb2i::doSavepointSet(thd, name); - } - DBUG_RETURN(rc); -} - - -/********************************************************************* -Rollback a savepoint. */ -static int ibmdb2i_savepoint_rollback(handlerton* hton, THD* thd, void* sv) -{ - DBUG_ENTER("ibmdb2i_savepoint_rollback"); - int rc = 0; - if (!THDVAR(thd,transaction_unsafe)) - { - char name[64]; - genSavepointName(sv, name); - DBUG_PRINT("ibmdb2i_savepoint_rollback",("Rolling back %s", name)); - rc = ha_ibmdb2i::doSavepointRollback(thd, name); - } - DBUG_RETURN(rc); -} - - -/********************************************************************* -Release a savepoint. */ -static int ibmdb2i_savepoint_release(handlerton* hton, THD* thd, void* sv) -{ - DBUG_ENTER("ibmdb2i_savepoint_release"); - int rc = 0; - if (!THDVAR(thd,transaction_unsafe)) - { - char name[64]; - genSavepointName(sv, name); - DBUG_PRINT("ibmdb2i_savepoint_release",("Releasing %s", name)); - rc = ha_ibmdb2i::doSavepointRelease(thd, name); - } - DBUG_RETURN(rc); -} - -/* Thse flags allow for the online add and drop of an index via the CREATE INDEX, - DROP INDEX, and ALTER TABLE statements. These flags indicate that MySQL is not - required to lock the table before calling the storage engine to add or drop the - index(s). */ -static uint ibmdb2i_alter_table_flags(uint flags) -{ - return (HA_ONLINE_ADD_INDEX | HA_ONLINE_DROP_INDEX | - HA_ONLINE_ADD_UNIQUE_INDEX | HA_ONLINE_DROP_UNIQUE_INDEX | - HA_ONLINE_ADD_PK_INDEX | HA_ONLINE_DROP_PK_INDEX); -} - -ha_ibmdb2i::ha_ibmdb2i(handlerton *hton, TABLE_SHARE *table_arg) - :share(NULL), handler(hton, table_arg), - activeHandle(0), dataHandle(0), - activeReadBuf(NULL), activeWriteBuf(NULL), - blobReadBuffers(NULL), accessIntent(QMY_UPDATABLE), currentRRN(0), - releaseRowNeeded(FALSE), - indexReadSizeEstimates(NULL), - outstanding_start_bulk_insert(false), - last_rnd_init_rc(0), - last_index_init_rc(0), - last_start_bulk_insert_rc(0), - autoIncLockAcquired(false), - got_auto_inc_values(false), - next_identity_value(0), - indexHandles(0), - returnDupKeysImmediately(false), - onDupUpdate(false), - blobWriteBuffers(NULL), - forceSingleRowRead(false) - { - activeReferences = 0; - ref_length = sizeof(currentRRN); - if (table_share && table_share->keys > 0) - { - indexHandles = (FILE_HANDLE*)my_malloc(table_share->keys * sizeof(FILE_HANDLE), MYF(MY_WME | MY_ZEROFILL)); - } - clear_alloc_root(&conversionBufferMemroot); - } - - -ha_ibmdb2i::~ha_ibmdb2i() -{ - DBUG_ASSERT(activeReferences == 0 || outstanding_start_bulk_insert); - - if (indexHandles) - my_free(indexHandles, MYF(0)); - if (indexReadSizeEstimates) - my_free(indexReadSizeEstimates, MYF(0)); - - cleanupBuffers(); -} - - -static const char *ha_ibmdb2i_exts[] = { - FID_EXT, - NullS -}; - -const char **ha_ibmdb2i::bas_ext() const -{ - return ha_ibmdb2i_exts; -} - - -int ha_ibmdb2i::open(const char *name, int mode, uint test_if_locked) -{ - DBUG_ENTER("ha_ibmdb2i::open"); - - initBridge(); - - dataHandle = bridge()->findAndRemovePreservedHandle(name, &share); - - if (share) - db2Table = share->db2Table; - - if (!share && (!(share = get_share(name, table)))) - DBUG_RETURN(my_errno); - thr_lock_data_init(&share->lock,&lock,NULL); - - info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE); - - - DBUG_RETURN(0); -} - - - - -int ha_ibmdb2i::close(void) -{ - DBUG_ENTER("ha_ibmdb2i::close"); - int32 rc = 0; - bool preserveShare = false; - - db2i_ileBridge* bridge = db2i_ileBridge::getBridgeForThread(); - - if (dataHandle) - { - if (bridge->expectErrors(QMY_ERR_PEND_LOCKS)->deallocateFile(dataHandle, FALSE) == QMY_ERR_PEND_LOCKS) - { - bridge->preserveHandle(share->table_name, dataHandle, share); - preserveShare = true; - } - dataHandle = 0; - } - - for (int idx = 0; idx < table_share->keys; ++idx) - { - if (indexHandles[idx] != 0) - { - bridge->deallocateFile(indexHandles[idx], FALSE); - } - } - - cleanupBuffers(); - - if (!preserveShare) - { - if (free_share(share)) - share = NULL; - } - - DBUG_RETURN(rc); -} - - - -int ha_ibmdb2i::write_row(uchar * buf) -{ - - DBUG_ENTER("ha_ibmdb2i::write_row"); - - if (last_start_bulk_insert_rc) - DBUG_RETURN( last_start_bulk_insert_rc ); - - ha_statistic_increment(&SSV::ha_write_count); - int rc = 0; - - bool fileHandleNeedsRelease = false; - - if (!activeHandle) - { - rc = useDataFile(); - if (rc) DBUG_RETURN(rc); - fileHandleNeedsRelease = true; - } - - if (!outstanding_start_bulk_insert) - rc = prepWriteBuffer(1, getFileForActiveHandle()); - - if (!rc) - { - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) - table->timestamp_field->set_time(); - - char* writeBuffer = activeWriteBuf->addRow(); - rc = prepareRowForWrite(writeBuffer, - writeBuffer+activeWriteBuf->getRowNullOffset(), - true); - if (rc == 0) - { - // If we are doing block inserts, if the MI is supposed to generate an auto_increment - // (i.e. identity column) value for this record, and if this is not the first record in - // the block, then store the value (that the MI will generate for the identity column) - // into the MySQL write buffer. We can predetermine the value because the file is locked. - - if ((autoIncLockAcquired) && (default_identity_value) && (got_auto_inc_values)) - { - if (unlikely((next_identity_value - 1) == - maxValueForField(table->next_number_field))) - { - rc = QMY_ERR_MAXVALUE; - } - else - { - rc = table->next_number_field->store((longlong) next_identity_value, TRUE); - next_identity_value = next_identity_value + incrementByValue; - } - } - // If the buffer is full, or if we locked the file and this is the first or last row - // of a blocked insert, then flush the buffer. - if (!rc && (activeWriteBuf->endOfBuffer()) || - ((autoIncLockAcquired) && - ((!got_auto_inc_values))) || - (returnDupKeysImmediately)) - rc = flushWrite(activeHandle, buf); - } - else - activeWriteBuf->deleteRow(); - } - - if (fileHandleNeedsRelease) - releaseActiveHandle(); - - DBUG_RETURN(rc); -} - -/** - @brief - Helper function used by write_row and update_row to prepare the MySQL - row for insertion into DB2. -*/ -int ha_ibmdb2i::prepareRowForWrite(char* data, char* nulls, bool honorIdentCols) -{ - int rc = 0; - - // set null map all to non nulls - memset(nulls,__NOT_NULL_VALUE_EBCDIC, table->s->fields); - default_identity_value = FALSE; - - ulong sql_mode = ha_thd()->variables.sql_mode; - - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); - for (Field **field = table->field; *field && !rc; ++field) - { - int fieldIndex = (*field)->field_index; - if ((*field)->Field::is_null()) - { - nulls[fieldIndex] = __NULL_VALUE_EBCDIC; - } - if (honorIdentCols && ((*field)->flags & AUTO_INCREMENT_FLAG) && - *field == table->next_number_field) -// && ((!autoIncLockAcquired) || (!got_auto_inc_values))) - { - if (sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) - { - if (!table->auto_increment_field_not_null) - { - nulls[fieldIndex] = __DEFAULT_VALUE_EBCDIC; - default_identity_value = TRUE; - } - } - else if ((*field)->val_int() == 0) - { - nulls[fieldIndex] = __DEFAULT_VALUE_EBCDIC; - default_identity_value = TRUE; - } - } - - DB2Field& db2Field = db2Table->db2Field(fieldIndex); - if (nulls[fieldIndex] == __NOT_NULL_VALUE_EBCDIC || - db2Field.isBlob()) - { - rc = convertMySQLtoDB2(*field, db2Field, data + db2Field.getBufferOffset()); - } - } - - if (!rc && db2Table->hasBlobs()) - rc = db2i_ileBridge::getBridgeForThread()->objectOverride(activeHandle, - activeWriteBuf->ptr()); - - dbug_tmp_restore_column_map(table->read_set, old_map); - - return rc; -} - - - -int ha_ibmdb2i::update_row(const uchar * old_data, uchar * new_data) -{ - DBUG_ENTER("ha_ibmdb2i::update_row"); - ha_statistic_increment(&SSV::ha_update_count); - int rc; - - bool fileHandleNeedsRelease = false; - - if (!activeHandle) - { - rc = useFileByHandle(QMY_UPDATABLE, rrnAssocHandle); - if (rc) DBUG_RETURN(rc); - fileHandleNeedsRelease = true; - } - - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) - table->timestamp_field->set_time(); - - char* writeBuf = activeWriteBuf->addRow(); - rc = prepareRowForWrite(writeBuf, - writeBuf+activeWriteBuf->getRowNullOffset(), - onDupUpdate); - - char* lastDupKeyNamePtr = NULL; - uint32 lastDupKeyNameLen = 0; - - if (!rc) - { - rc = db2i_ileBridge::getBridgeForThread()->updateRow(activeHandle, - currentRRN, - activeWriteBuf->ptr(), - &lastDupKeyRRN, - &lastDupKeyNamePtr, - &lastDupKeyNameLen); - } - - if (lastDupKeyNameLen) - { - lastDupKeyID = getKeyFromName(lastDupKeyNamePtr, lastDupKeyNameLen); - rrnAssocHandle = activeHandle; - } - - if (fileHandleNeedsRelease) - releaseActiveHandle(); - - activeWriteBuf->resetAfterWrite(); - - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::delete_row(const uchar * buf) -{ - DBUG_ENTER("ha_ibmdb2i::delete_row"); - ha_statistic_increment(&SSV::ha_delete_count); - - bool needReleaseFile = false; - int rc = 0; - - if (!activeHandle) // In some circumstances, MySQL comes here after - { // closing the active handle. We need to re-open. - rc = useFileByHandle(QMY_UPDATABLE, rrnAssocHandle); - needReleaseFile = true; - } - - if (likely(!rc)) - { - rc = db2i_ileBridge::getBridgeForThread()->deleteRow(activeHandle, - currentRRN); - invalidateCachedStats(); - if (needReleaseFile) - releaseActiveHandle(); - } - - DBUG_RETURN(rc); -} - - - -int ha_ibmdb2i::index_init(uint idx, bool sorted) -{ - DBUG_ENTER("ha_ibmdb2i::index_init"); - - int& rc = last_index_init_rc; - rc = 0; - - invalidDataFound=false; - tweakReadSet(); - - active_index=idx; - - rc = useIndexFile(idx); - - if (!rc) - { -// THD* thd = ha_thd(); -// if (accessIntent == QMY_UPDATABLE && -// thd_tx_isolation(thd) == ISO_REPEATABLE_READ && -// !THDVAR(thd, transaction_unsafe)) -// { -// readAccessIntent = QMY_READ_ONLY; -// } -// else -// { - readAccessIntent = accessIntent; -// } - - if (!rc && accessIntent != QMY_READ_ONLY) - rc = prepWriteBuffer(1, db2Table->indexFile(idx)); - - if (rc) - releaseIndexFile(idx); - } - - rrnAssocHandle= 0; - - DBUG_RETURN(rc); -} - - - -int ha_ibmdb2i::index_read(uchar * buf, const uchar * key, - uint key_len, - enum ha_rkey_function find_flag) -{ - DBUG_ENTER("ha_ibmdb2i::index_read"); - - if (unlikely(last_index_init_rc)) DBUG_RETURN(last_index_init_rc); - - int rc; - - ha_rows estimatedRows = getIndexReadEstimate(active_index); - rc = prepReadBuffer(estimatedRows, db2Table->indexFile(active_index), readAccessIntent); - if (unlikely(rc)) DBUG_RETURN(rc); - - DBUG_ASSERT(activeReadBuf); - - keyBuf.allocBuf(activeReadBuf->getRowLength(), - activeReadBuf->getRowNullOffset(), - activeReadBuf->getRowLength()); - keyBuf.zeroBuf(); - - char* db2KeyBufPtr = keyBuf.ptr(); - char* nullKeyMap = db2KeyBufPtr + activeReadBuf->getRowNullOffset(); - - const uchar* keyBegin = key; - int partsInUse; - - KEY& curKey = table->key_info[active_index]; - - for (partsInUse = 0; partsInUse < curKey.key_parts, key - keyBegin < key_len; ++partsInUse) - { - Field* field = curKey.key_part[partsInUse].field; - if ((curKey.key_part[partsInUse].null_bit) && - (char*)key[0]) - { - if (field->flags & AUTO_INCREMENT_FLAG) - { - table->status = STATUS_NOT_FOUND; - DBUG_RETURN(HA_ERR_END_OF_FILE); - } - else - { - nullKeyMap[partsInUse] = __NULL_VALUE_EBCDIC; - } - } - else - { - nullKeyMap[partsInUse] = __NOT_NULL_VALUE_EBCDIC; - convertMySQLtoDB2(field, - db2Table->db2Field(field->field_index), - db2KeyBufPtr, - (uchar*)key+((curKey.key_part[partsInUse].null_bit)? 1 : 0) ); // + (curKey.key_parts+7) / 8); - } - - db2KeyBufPtr += db2Table->db2Field(field->field_index).getByteLengthInRecord(); - key += curKey.key_part[partsInUse].store_length; - } - - keyLen = db2KeyBufPtr - (char*)keyBuf.ptr(); - - DBUG_PRINT("ha_ibmdb2i::index_read", ("find_flag: %d", find_flag)); - - char readDirection = QMY_NEXT; - - switch (find_flag) - { - case HA_READ_AFTER_KEY: - doInitialRead(QMY_AFTER_EQUAL, estimatedRows, - keyBuf.ptr(), keyLen, partsInUse); - break; - case HA_READ_BEFORE_KEY: - doInitialRead(QMY_BEFORE_EQUAL, estimatedRows, - keyBuf.ptr(), keyLen, partsInUse); - break; - case HA_READ_KEY_OR_NEXT: - doInitialRead(QMY_AFTER_OR_EQUAL, estimatedRows, - keyBuf.ptr(), keyLen, partsInUse); - break; - case HA_READ_KEY_OR_PREV: - DBUG_ASSERT(0); // This function is unused - doInitialRead(QMY_BEFORE_OR_EQUAL, estimatedRows, - keyBuf.ptr(), keyLen, partsInUse); - break; - case HA_READ_PREFIX_LAST_OR_PREV: - doInitialRead(QMY_LAST_PREVIOUS, estimatedRows, - keyBuf.ptr(), keyLen, partsInUse); - readDirection = QMY_PREVIOUS; - break; - case HA_READ_PREFIX_LAST: - doInitialRead(QMY_PREFIX_LAST, estimatedRows, - keyBuf.ptr(), keyLen, partsInUse); - readDirection = QMY_PREVIOUS; - break; - case HA_READ_KEY_EXACT: - doInitialRead(QMY_EQUAL, estimatedRows, keyBuf.ptr(), keyLen, partsInUse); - break; - default: - DBUG_ASSERT(0); - return HA_ERR_GENERIC; - break; - } - - ha_statistic_increment(&SSV::ha_read_key_count); - rc = readFromBuffer(buf, readDirection); - - table->status= (rc ? STATUS_NOT_FOUND: 0); - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::index_next(uchar * buf) -{ - DBUG_ENTER("ha_ibmdb2i::index_next"); - ha_statistic_increment(&SSV::ha_read_next_count); - - int rc = readFromBuffer(buf, QMY_NEXT); - - table->status= (rc ? STATUS_NOT_FOUND: 0); - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::index_next_same(uchar *buf, const uchar *key, uint keylen) -{ - DBUG_ENTER("ha_ibmdb2i::index_next_same"); - ha_statistic_increment(&SSV::ha_read_next_count); - - int rc = readFromBuffer(buf, QMY_NEXT_EQUAL); - - if (rc == HA_ERR_KEY_NOT_FOUND) - { - rc = HA_ERR_END_OF_FILE; - } - - table->status= (rc ? STATUS_NOT_FOUND: 0); - DBUG_RETURN(rc); -} - -int ha_ibmdb2i::index_read_last(uchar * buf, const uchar * key, uint key_len) -{ - DBUG_ENTER("ha_ibmdb2i::index_read_last"); - DBUG_RETURN(index_read(buf, key, key_len, HA_READ_PREFIX_LAST)); -} - - - -int ha_ibmdb2i::index_prev(uchar * buf) -{ - DBUG_ENTER("ha_ibmdb2i::index_prev"); - ha_statistic_increment(&SSV::ha_read_prev_count); - - int rc = readFromBuffer(buf, QMY_PREVIOUS); - - table->status= (rc ? STATUS_NOT_FOUND: 0); - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::index_first(uchar * buf) -{ - DBUG_ENTER("ha_ibmdb2i::index_first"); - - if (unlikely(last_index_init_rc)) DBUG_RETURN(last_index_init_rc); - - int rc = prepReadBuffer(DEFAULT_MAX_ROWS_TO_BUFFER, - db2Table->indexFile(active_index), - readAccessIntent); - - if (rc == 0) - { - doInitialRead(QMY_FIRST, DEFAULT_MAX_ROWS_TO_BUFFER); - ha_statistic_increment(&SSV::ha_read_first_count); - rc = readFromBuffer(buf, QMY_NEXT); - } - - table->status= (rc ? STATUS_NOT_FOUND: 0); - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::index_last(uchar * buf) -{ - DBUG_ENTER("ha_ibmdb2i::index_last"); - - if (unlikely(last_index_init_rc)) DBUG_RETURN(last_index_init_rc); - - int rc = prepReadBuffer(DEFAULT_MAX_ROWS_TO_BUFFER, - db2Table->indexFile(active_index), - readAccessIntent); - - if (rc == 0) - { - doInitialRead(QMY_LAST, DEFAULT_MAX_ROWS_TO_BUFFER); - ha_statistic_increment(&SSV::ha_read_last_count); - rc = readFromBuffer(buf, QMY_PREVIOUS); - } - - table->status= (rc ? STATUS_NOT_FOUND: 0); - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::rnd_init(bool scan) -{ - DBUG_ENTER("ha_ibmdb2i::rnd_init"); - - int& rc = last_rnd_init_rc; - rc = 0; - - tweakReadSet(); - invalidDataFound=false; - - uint32 rowsToBlockOnRead; - - if (!scan) - { - rowsToBlockOnRead = 1; - } - else - { - rowsToBlockOnRead = DEFAULT_MAX_ROWS_TO_BUFFER; - } - - rc = useDataFile(); - - if (!rc) - { -// THD* thd = ha_thd(); -// if (accessIntent == QMY_UPDATABLE && -// thd_tx_isolation(thd) == ISO_REPEATABLE_READ && -// !THDVAR(thd, transaction_unsafe)) -// { -// readAccessIntent = QMY_READ_ONLY; -// } -// else -// { - readAccessIntent = accessIntent; -// } - - rc = prepReadBuffer(rowsToBlockOnRead, db2Table->dataFile(), readAccessIntent); - - if (!rc && accessIntent != QMY_READ_ONLY) - rc = prepWriteBuffer(1, db2Table->dataFile()); - - if (!rc && scan) - doInitialRead(QMY_FIRST, rowsToBlockOnRead); - - if (rc) - releaseDataFile(); - } - - rrnAssocHandle= 0; - - DBUG_RETURN(0); // MySQL sometimes does not check the return code, causing - // an assert in ha_rnd_end later on if we return a non-zero - // value here. -} - -int ha_ibmdb2i::rnd_end() -{ - DBUG_ENTER("ha_ibmdb2i::rnd_end"); - - warnIfInvalidData(); - if (likely(activeReadBuf)) - activeReadBuf->endRead(); - if (last_rnd_init_rc == 0) - releaseActiveHandle(); - last_rnd_init_rc = 0; - DBUG_RETURN(0); -} - - -int32 ha_ibmdb2i::mungeDB2row(uchar* record, const char* dataPtr, const char* nullMapPtr, bool skipLOBs) -{ - DBUG_ASSERT(dataPtr); - - my_bitmap_map *old_write_map= dbug_tmp_use_all_columns(table, table->write_set); - my_bitmap_map *old_read_map; - - if (unlikely(readAllColumns)) - old_read_map = tmp_use_all_columns(table, table->read_set); - - resetCharacterConversionBuffers(); - - my_ptrdiff_t old_ptr= (my_ptrdiff_t) (record - table->record[0]); - int fieldIndex = 0; - for (Field **field = table->field; *field; ++field, ++fieldIndex) - { - if (unlikely(old_ptr)) - (*field)->move_field_offset(old_ptr); - if (nullMapPtr[fieldIndex] == __NULL_VALUE_EBCDIC || - (!bitmap_is_set(table->read_set, fieldIndex)) || - (skipLOBs && db2Table->db2Field(fieldIndex).isBlob())) - { - (*field)->set_null(); - } - else - { - (*field)->set_notnull(); - convertDB2toMySQL(db2Table->db2Field(fieldIndex), *field, dataPtr); - } - if (unlikely(old_ptr)) - (*field)->move_field_offset(-old_ptr); - - } - - if (unlikely(readAllColumns)) - tmp_restore_column_map(table->read_set, old_read_map); - dbug_tmp_restore_column_map(table->write_set, old_write_map); - - return 0; -} - - -int ha_ibmdb2i::rnd_next(uchar *buf) -{ - DBUG_ENTER("ha_ibmdb2i::rnd_next"); - - if (unlikely(last_rnd_init_rc)) DBUG_RETURN(last_rnd_init_rc); - ha_statistic_increment(&SSV::ha_read_rnd_next_count); - - int rc; - - rc = readFromBuffer(buf, QMY_NEXT); - - table->status= (rc ? STATUS_NOT_FOUND: 0); - DBUG_RETURN(rc); -} - - -void ha_ibmdb2i::position(const uchar *record) -{ - DBUG_ENTER("ha_ibmdb2i::position"); - my_store_ptr(ref, ref_length, currentRRN); - DBUG_VOID_RETURN; -} - - -int ha_ibmdb2i::rnd_pos(uchar * buf, uchar *pos) -{ - DBUG_ENTER("ha_ibmdb2i::rnd_pos"); - if (unlikely(last_rnd_init_rc)) DBUG_RETURN( last_rnd_init_rc); - ha_statistic_increment(&SSV::ha_read_rnd_count); - - currentRRN = my_get_ptr(pos, ref_length); - - tweakReadSet(); - - int rc = 0; - - if (rrnAssocHandle && - (activeHandle != rrnAssocHandle)) - { - if (activeHandle) releaseActiveHandle(); - rc = useFileByHandle(QMY_UPDATABLE, rrnAssocHandle); - } - - if (likely(rc == 0)) - { - rc = prepReadBuffer(1, getFileForActiveHandle(), accessIntent); - - if (likely(rc == 0) && accessIntent == QMY_UPDATABLE) - rc = prepWriteBuffer(1, getFileForActiveHandle()); - - if (likely(rc == 0)) - { - rc = db2i_ileBridge::getBridgeForThread()->readByRRN(activeHandle, - activeReadBuf->ptr(), - currentRRN, - accessIntent, - getCommitLevel()); - - if (likely(rc == 0)) - { - rrnAssocHandle = activeHandle; - const char* readBuf = activeReadBuf->getRowN(0); - rc = mungeDB2row(buf, readBuf, readBuf + activeReadBuf->getRowNullOffset(), false); - releaseRowNeeded = TRUE; - } - } - } - - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::info(uint flag) -{ - DBUG_ENTER("ha_ibmdb2i::info"); - - uint16 infoRequested = 0; - ValidatedPointer rowKeySpcPtr; // Space pointer passed to DB2 - uint32 rowKeySpcLen; // Length of space passed to DB2 - THD* thd = ha_thd(); - int command = thd_sql_command(thd); - - if (flag & HA_STATUS_AUTO) - stats.auto_increment_value = (ulonglong) 0; - - if (flag & HA_STATUS_ERRKEY) - { - errkey = lastDupKeyID; - my_store_ptr(dup_ref, ref_length, lastDupKeyRRN); - } - - if (flag & HA_STATUS_TIME) - { - if ((flag & HA_STATUS_NO_LOCK) && - ibmdb2i_assume_exclusive_use && - share && - (share->cachedStats.isInited(lastModTime))) - stats.update_time = share->cachedStats.getUpdateTime(); - else - infoRequested |= lastModTime; - } - - if (flag & HA_STATUS_CONST) - { - stats.block_size=4096; - infoRequested |= createTime; - - if (table->s->keys) - { - infoRequested |= rowsPerKey; - rowKeySpcLen = (table->s->keys) * MAX_DB2_KEY_PARTS * sizeof(uint64); - rowKeySpcPtr.alloc(rowKeySpcLen); - memset(rowKeySpcPtr, 0, rowKeySpcLen); // Clear the allocated space - } - } - - if (flag & HA_STATUS_VARIABLE) - { - if ((flag & HA_STATUS_NO_LOCK) && - (command != SQLCOM_SHOW_TABLE_STATUS) && - ibmdb2i_assume_exclusive_use && - share && - (share->cachedStats.isInited(rowCount | deletedRowCount | meanRowLen | ioCount)) && - (share->cachedStats.getRowCount() >= 2)) - { - stats.records = share->cachedStats.getRowCount(); - stats.deleted = share->cachedStats.getDelRowCount(); - stats.mean_rec_length = share->cachedStats.getMeanLength(); - stats.data_file_length = share->cachedStats.getAugmentedDataLength(); - } - else - { - infoRequested |= rowCount | deletedRowCount | meanRowLen; - if (command == SQLCOM_SHOW_TABLE_STATUS) - infoRequested |= objLength; - else - infoRequested |= ioCount; - } - } - - int rc = 0; - - if (infoRequested) - { - DBUG_PRINT("ha_ibmdb2i::info",("Retrieving fresh stats %d", flag)); - - initBridge(thd); - rc = bridge()->retrieveTableInfo((dataHandle ? dataHandle : db2Table->dataFile()->getMasterDefnHandle()), - infoRequested, - stats, - rowKeySpcPtr); - - if (!rc) - { - if ((flag & HA_STATUS_VARIABLE) && - (command != SQLCOM_SHOW_TABLE_STATUS)) - stats.data_file_length = stats.data_file_length * IO_SIZE; - - if ((ibmdb2i_assume_exclusive_use) && - (share) && - (command != SQLCOM_SHOW_TABLE_STATUS)) - { - if (flag & HA_STATUS_VARIABLE) - { - share->cachedStats.cacheRowCount(stats.records); - share->cachedStats.cacheDelRowCount(stats.deleted); - share->cachedStats.cacheMeanLength(stats.mean_rec_length); - share->cachedStats.cacheAugmentedDataLength(stats.data_file_length); - } - - if (flag & HA_STATUS_TIME) - { - share->cachedStats.cacheUpdateTime(stats.update_time); - } - } - - if (flag & HA_STATUS_CONST) - { - ulong i; // Loop counter for indexes - ulong j; // Loop counter for key parts - RowKey* rowKeyPtr; // Pointer to 'number of unique rows' array for this index - - rowKeyPtr = (RowKey_t*)(void*)rowKeySpcPtr; // Address first array of DB2 row counts - for (i = 0; i < table->s->keys; i++) // Do for each index, including primary - { - for (j = 0; j < table->key_info[i].key_parts; j++) - { - table->key_info[i].rec_per_key[j]= rowKeyPtr->RowKeyArray[j]; - } - rowKeyPtr = rowKeyPtr + 1; // Address next array of DB2 row counts - } - } - } - else if (rc == HA_ERR_LOCK_WAIT_TIMEOUT && share) - { - // If we couldn't retrieve the info because the object was locked, - // we'll do our best by returning the most recently cached data. - if ((infoRequested & rowCount) && - share->cachedStats.isInited(rowCount)) - stats.records = share->cachedStats.getRowCount(); - if ((infoRequested & deletedRowCount) && - share->cachedStats.isInited(deletedRowCount)) - stats.deleted = share->cachedStats.getDelRowCount(); - if ((infoRequested & meanRowLen) && - share->cachedStats.isInited(meanRowLen)) - stats.mean_rec_length = share->cachedStats.getMeanLength(); - if ((infoRequested & lastModTime) && - share->cachedStats.isInited(lastModTime)) - stats.update_time = share->cachedStats.getUpdateTime(); - - rc = 0; - } - } - - DBUG_RETURN(rc); -} - - -ha_rows ha_ibmdb2i::records() -{ - DBUG_ENTER("ha_ibmdb2i::records"); - int rc; - rc = bridge()->retrieveTableInfo((dataHandle ? dataHandle : db2Table->dataFile()->getMasterDefnHandle()), - rowCount, - stats); - - if (unlikely(rc)) - { - if (rc == HA_ERR_LOCK_WAIT_TIMEOUT && - share && - (share->cachedStats.isInited(rowCount))) - DBUG_RETURN(share->cachedStats.getRowCount()); - else - DBUG_RETURN(HA_POS_ERROR); - } - else if (share) - { - share->cachedStats.cacheRowCount(stats.records); - } - - DBUG_RETURN(stats.records); -} - - -int ha_ibmdb2i::extra(enum ha_extra_function operation) -{ - DBUG_ENTER("ha_ibmdb2i::extra"); - - switch(operation) - { - // Can these first five flags be replaced by attending to HA_EXTRA_WRITE_CACHE? - case HA_EXTRA_NO_IGNORE_DUP_KEY: - case HA_EXTRA_WRITE_CANNOT_REPLACE: - { - returnDupKeysImmediately = false; - onDupUpdate = false; - } - break; - case HA_EXTRA_INSERT_WITH_UPDATE: - { - returnDupKeysImmediately = true; - onDupUpdate = true; - } - break; - case HA_EXTRA_IGNORE_DUP_KEY: - case HA_EXTRA_WRITE_CAN_REPLACE: - returnDupKeysImmediately = true; - break; - case HA_EXTRA_FLUSH_CACHE: - if (outstanding_start_bulk_insert) - finishBulkInsert(); - break; - } - - - DBUG_RETURN(0); -} - -/** - @brief - The DB2 storage engine will ignore a MySQL generated value and will generate - a new value in SLIC. We arbitrarily set first_value to 1, and set the - interval to infinity for better performance on multi-row inserts. -*/ -void ha_ibmdb2i::get_auto_increment(ulonglong offset, ulonglong increment, - ulonglong nb_desired_values, - ulonglong *first_value, - ulonglong *nb_reserved_values) -{ - DBUG_ENTER("ha_ibmdb2i::get_auto_increment"); - *first_value= 1; - *nb_reserved_values= ULONGLONG_MAX; -} - - - -void ha_ibmdb2i::update_create_info(HA_CREATE_INFO *create_info) -{ - DBUG_ENTER("ha_ibmdb2i::update_create_info"); - - if ((!(create_info->used_fields & HA_CREATE_USED_AUTO)) && - (table->found_next_number_field != NULL)) - { - initBridge(); - - create_info->auto_increment_value= 1; - - ha_rows rowCount = records(); - - if (rowCount == 0) - { - create_info->auto_increment_value = db2Table->getStartId(); - DBUG_VOID_RETURN; - } - else if (rowCount == HA_POS_ERROR) - { - DBUG_VOID_RETURN; - } - - getNextIdVal(&create_info->auto_increment_value); - } - DBUG_VOID_RETURN; -} - - -int ha_ibmdb2i::getNextIdVal(ulonglong *value) -{ - DBUG_ENTER("ha_ibmdb2i::getNextIdVal"); - - char queryBuffer[MAX_DB2_COLNAME_LENGTH + MAX_DB2_QUALIFIEDNAME_LENGTH + 64]; - strcpy(queryBuffer, " SELECT CAST(MAX( "); - convertMySQLNameToDB2Name(table->found_next_number_field->field_name, - strend(queryBuffer), - MAX_DB2_COLNAME_LENGTH+1); - strcat(queryBuffer, ") AS BIGINT) FROM "); - db2Table->getDB2QualifiedName(strend(queryBuffer)); - DBUG_ASSERT(strlen(queryBuffer) < sizeof(queryBuffer)); - - SqlStatementStream sqlStream(queryBuffer); - DBUG_PRINT("ha_ibmdb2i::getNextIdVal", ("Sent to DB2: %s",queryBuffer)); - - int rc = 0; - FILE_HANDLE fileHandle2; - uint32 db2RowDataLen2; - rc = bridge()->prepOpen(sqlStream.getPtrToData(), - &fileHandle2, - &db2RowDataLen2); - if (likely(rc == 0)) - { - IOReadBuffer rowBuffer(1, db2RowDataLen2); - rc = bridge()->read(fileHandle2, - rowBuffer.ptr(), - QMY_READ_ONLY, - QMY_NONE, - QMY_FIRST); - - if (likely(rc == 0)) - { - /* This check is here for the case where the table is not empty, - but the auto_increment starting value has been changed since - the last record was written. */ - - longlong maxIdVal = *(longlong*)(rowBuffer.getRowN(0)); - if ((maxIdVal + 1) > db2Table->getStartId()) - *value = maxIdVal + 1; - else - *value = db2Table->getStartId(); - } - - bridge()->deallocateFile(fileHandle2); - } - DBUG_RETURN(rc); -} - - -/* - Updates index cardinalities. -*/ -int ha_ibmdb2i::analyze(THD* thd, HA_CHECK_OPT *check_opt) -{ - DBUG_ENTER("ha_ibmdb2i::analyze"); - info(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE); - DBUG_RETURN(0); -} - -int ha_ibmdb2i::optimize(THD* thd, HA_CHECK_OPT *check_opt) -{ - DBUG_ENTER("ha_ibmdb2i::optimize"); - - initBridge(thd); - - if (unlikely(records() == 0)) - DBUG_RETURN(0); // DB2 doesn't like to reorganize a table with no data. - - quiesceAllFileHandles(); - - int32 rc = bridge()->optimizeTable(db2Table->dataFile()->getMasterDefnHandle()); - info(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE); - - DBUG_RETURN(rc); -} - - -/** - @brief - Determines if an ALTER TABLE is allowed to switch the storage engine - for this table. If the table has a foreign key or is referenced by a - foreign key, then it cannot be switched. -*/ -bool ha_ibmdb2i::can_switch_engines(void) -/*=================================*/ -{ - DBUG_ENTER("ha_ibmdb2i::can_switch_engines"); - - int rc = 0; - FILE_HANDLE queryFile = 0; - uint32 resultRowLen; - uint count = 0; - bool can_switch = FALSE; // 1 if changing storage engine is allowed - - const char* libName = db2Table->getDB2LibName(db2i_table::ASCII_SQL); - const char* fileName = db2Table->getDB2TableName(db2i_table::ASCII_SQL); - - String query(256); - query.append(STRING_WITH_LEN(" SELECT COUNT(*) FROM SYSIBM.SQLFOREIGNKEYS WHERE ((PKTABLE_SCHEM = '")); - query.append(libName+1, strlen(libName)-2); // Remove quotes from parent schema name - query.append(STRING_WITH_LEN("' AND PKTABLE_NAME = '")); - query.append(fileName+1,strlen(fileName)-2); // Remove quotes from file name - query.append(STRING_WITH_LEN("') OR (FKTABLE_SCHEM = '")); - query.append(libName+1,strlen(libName)-2); // Remove quotes from child schema - query.append(STRING_WITH_LEN("' AND FKTABLE_NAME = '")); - query.append(fileName+1,strlen(fileName)-2); // Remove quotes from child name - query.append(STRING_WITH_LEN("'))")); - - SqlStatementStream sqlStream(query); - - rc = bridge()->prepOpen(sqlStream.getPtrToData(), - &queryFile, - &resultRowLen); - if (rc == 0) - { - IOReadBuffer rowBuffer(1, resultRowLen); - - rc = bridge()->read(queryFile, - rowBuffer.ptr(), - QMY_READ_ONLY, - QMY_NONE, - QMY_FIRST); - if (!rc) - { - count = *(uint*)(rowBuffer.getRowN(0)); - if (count == 0) - can_switch = TRUE; - } - - bridge()->deallocateFile(queryFile); - } - DBUG_RETURN(can_switch); -} - - - -bool ha_ibmdb2i::check_if_incompatible_data(HA_CREATE_INFO *info, - uint table_changes) -{ - DBUG_ENTER("ha_ibmdb2i::check_if_incompatible_data"); - uint i; - /* Check that auto_increment value and field definitions were - not changed. */ - if ((info->used_fields & HA_CREATE_USED_AUTO && - info->auto_increment_value != 0) || - table_changes != IS_EQUAL_YES) - DBUG_RETURN(COMPATIBLE_DATA_NO); - /* Check if any fields were renamed. */ - for (i= 0; i < table->s->fields; i++) - { - Field *field= table->field[i]; - if (field->flags & FIELD_IS_RENAMED) - { - DBUG_PRINT("info", ("Field has been renamed, copy table")); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - } - DBUG_RETURN(COMPATIBLE_DATA_YES); -} - -int ha_ibmdb2i::reset_auto_increment(ulonglong value) - { - DBUG_ENTER("ha_ibmdb2i::reset_auto_increment"); - - int rc = 0; - - quiesceAllFileHandles(); - - const char* libName = db2Table->getDB2LibName(db2i_table::ASCII_SQL); - const char* fileName = db2Table->getDB2TableName(db2i_table::ASCII_SQL); - - String query(512); - query.append(STRING_WITH_LEN(" ALTER TABLE ")); - query.append(libName); - query.append('.'); - query.append(fileName); - query.append(STRING_WITH_LEN(" ALTER COLUMN ")); - char colName[MAX_DB2_COLNAME_LENGTH+1]; - convertMySQLNameToDB2Name(table->found_next_number_field->field_name, - colName, - sizeof(colName)); - query.append(colName); - - char restart_value[22]; - CHARSET_INFO *cs= &my_charset_bin; - uint len = (uint)(cs->cset->longlong10_to_str)(cs,restart_value,sizeof(restart_value), 10, value); - restart_value[len] = 0; - - query.append(STRING_WITH_LEN(" RESTART WITH ")); - query.append(restart_value); - - SqlStatementStream sqlStream(query); - DBUG_PRINT("ha_ibmdb2i::reset_auto_increment", ("Sent to DB2: %s",query.c_ptr())); - - rc = db2i_ileBridge::getBridgeForThread()->execSQL(sqlStream.getPtrToData(), - sqlStream.getStatementCount(), - QMY_NONE, //getCommitLevel(), - FALSE, - FALSE, - TRUE, //FALSE, - dataHandle); - if (rc == 0) - db2Table->updateStartId(value); - - DBUG_RETURN(rc); -} - - -/** - @brief - This function receives an error code that was previously set by the handler. - It returns to MySQL the error string associated with that error. -*/ -bool ha_ibmdb2i::get_error_message(int error, String *buf) -{ - DBUG_ENTER("ha_ibmdb2i::get_error_message"); - if ((error >= DB2I_FIRST_ERR && error <= DB2I_LAST_ERR) || - (error >= QMY_ERR_MIN && error <= QMY_ERR_MAX)) - { - db2i_ileBridge* bridge = db2i_ileBridge::getBridgeForThread(ha_thd()); - char* errMsg = bridge->getErrorStorage(); - buf->copy(errMsg, strlen(errMsg),system_charset_info); - bridge->freeErrorStorage(); - } - DBUG_RETURN(FALSE); -} - - -int ha_ibmdb2i::delete_all_rows() -{ - DBUG_ENTER("ha_ibmdb2i::delete_all_rows"); - int rc = 0; - char queryBuffer[MAX_DB2_QUALIFIEDNAME_LENGTH + 64]; - strcpy(queryBuffer, " DELETE FROM "); - db2Table->getDB2QualifiedName(strend(queryBuffer)); - DBUG_ASSERT(strlen(queryBuffer) < sizeof(queryBuffer)); - - SqlStatementStream sqlStream(queryBuffer); - DBUG_PRINT("ha_ibmdb2i::delete_all_rows", ("Sent to DB2: %s",queryBuffer)); - rc = bridge()->execSQL(sqlStream.getPtrToData(), - sqlStream.getStatementCount(), - getCommitLevel(), - false, - false, - true, - dataHandle); - - /* If this method was called on behalf of a TRUNCATE TABLE statement, and if */ - /* the table has an auto_increment field, then reset the starting value for */ - /* the auto_increment field to 1. - */ - if (rc == 0 && thd_sql_command(ha_thd()) == SQLCOM_TRUNCATE && - table->found_next_number_field ) - rc = reset_auto_increment(1); - - invalidateCachedStats(); - - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::external_lock(THD *thd, int lock_type) -{ - int rc = 0; - - DBUG_ENTER("ha_ibmdb2i::external_lock"); - DBUG_PRINT("ha_ibmdb2i::external_lock",("Lock type: %d", lock_type)); - - if (lock_type == F_RDLCK) - accessIntent = QMY_READ_ONLY; - else if (lock_type == F_WRLCK) - accessIntent = QMY_UPDATABLE; - - initBridge(thd); - int command = thd_sql_command(thd); - - if (!THDVAR(thd,transaction_unsafe)) - { - if (lock_type != F_UNLCK) - { - if (autoCommitIsOn(thd) == QMY_YES) - { - trans_register_ha(thd, FALSE, ibmdb2i_hton); - } - else - { - trans_register_ha(thd, TRUE, ibmdb2i_hton); - if (likely(command != SQLCOM_CREATE_TABLE)) - { - trans_register_ha(thd, FALSE, ibmdb2i_hton); - bridge()->beginStmtTx(); - } - } - } - } - - if (command == SQLCOM_LOCK_TABLES || - command == SQLCOM_ALTER_TABLE || - command == SQLCOM_UNLOCK_TABLES || - (accessIntent == QMY_UPDATABLE && - (command == SQLCOM_UPDATE || - command == SQLCOM_UPDATE_MULTI || - command == SQLCOM_DELETE || - command == SQLCOM_DELETE_MULTI || - command == SQLCOM_REPLACE || - command == SQLCOM_REPLACE_SELECT) && - getCommitLevel(thd) == QMY_NONE)) - { - char action; - char type; - if (lock_type == F_UNLCK) - { - action = QMY_UNLOCK; - type = accessIntent == QMY_READ_ONLY ? QMY_LSRD : QMY_LENR; - } - else - { - action = QMY_LOCK; - type = lock_type == F_RDLCK ? QMY_LSRD : QMY_LENR; - } - - DBUG_PRINT("ha_ibmdb2i::external_lock",("%socking table", action==QMY_LOCK ? "L" : "Unl")); - - if (!dataHandle) - rc = db2Table->dataFile()->allocateNewInstance(&dataHandle, curConnection); - - rc = bridge()->lockObj(dataHandle, - 0, - action, - type, - (command == SQLCOM_LOCK_TABLES ? QMY_NO : QMY_YES)); - - } - - // Cache this away so we don't have to access it on each row operation - cachedZeroDateOption = (enum_ZeroDate)THDVAR(thd, compat_opt_allow_zero_date_vals); - - DBUG_RETURN(rc); -} - - -THR_LOCK_DATA **ha_ibmdb2i::store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type) -{ - if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) - { - if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && - lock_type <= TL_WRITE) && !(thd->in_lock_tables && thd_sql_command(thd) == SQLCOM_LOCK_TABLES)) - lock_type= TL_WRITE_ALLOW_WRITE; - lock.type=lock_type; - } - *to++= &lock; - return to; -} - - -int ha_ibmdb2i::delete_table(const char *name) -{ - DBUG_ENTER("ha_ibmdb2i::delete_table"); - THD* thd = ha_thd(); - db2i_ileBridge* bridge = db2i_ileBridge::getBridgeForThread(thd); - - char db2Name[MAX_DB2_QUALIFIEDNAME_LENGTH]; - db2i_table::getDB2QualifiedNameFromPath(name, db2Name); - - String query(128); - query.append(STRING_WITH_LEN(" DROP TABLE ")); - query.append(db2Name); - - if (thd_sql_command(thd) == SQLCOM_DROP_TABLE && - thd->lex->drop_mode == DROP_RESTRICT) - query.append(STRING_WITH_LEN(" RESTRICT ")); - DBUG_PRINT("ha_ibmdb2i::delete_table", ("Sent to DB2: %s",query.c_ptr())); - - SqlStatementStream sqlStream(query); - - db2i_table::getDB2LibNameFromPath(name, db2Name); - bool isTemporary = (strcmp(db2Name, DB2I_TEMP_TABLE_SCHEMA) == 0 ? TRUE : FALSE); - - int rc = bridge->execSQL(sqlStream.getPtrToData(), - sqlStream.getStatementCount(), - (isTemporary ? QMY_NONE : getCommitLevel(thd)), - FALSE, - FALSE, - isTemporary); - - if (rc == HA_ERR_NO_SUCH_TABLE) - { - warning(thd, DB2I_ERR_TABLE_NOT_FOUND, name); - rc = 0; - } - - if (rc == 0) - { - db2i_table::deleteAssocFiles(name); - } - - FILE_HANDLE savedHandle = bridge->findAndRemovePreservedHandle(name, &share); - while (savedHandle) - { - bridge->deallocateFile(savedHandle, TRUE); - DBUG_ASSERT(share); - if (free_share(share)) - share = NULL; - savedHandle = bridge->findAndRemovePreservedHandle(name, &share); - } - - my_errno = rc; - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::rename_table(const char * from, const char * to) -{ - DBUG_ENTER("ha_ibmdb2i::rename_table "); - - char db2FromFileName[MAX_DB2_FILENAME_LENGTH + 1]; - char db2ToFileName[MAX_DB2_FILENAME_LENGTH+1]; - char db2FromLibName[MAX_DB2_SCHEMANAME_LENGTH+1]; - char db2ToLibName[MAX_DB2_SCHEMANAME_LENGTH+1]; - - db2i_table::getDB2LibNameFromPath(from, db2FromLibName); - db2i_table::getDB2LibNameFromPath(to, db2ToLibName); - - if (strcmp(db2FromLibName, db2ToLibName) != 0 ) - { - getErrTxt(DB2I_ERR_RENAME_MOVE,from,to); - DBUG_RETURN(DB2I_ERR_RENAME_MOVE); - } - - db2i_table::getDB2FileNameFromPath(from, db2FromFileName, db2i_table::ASCII_NATIVE); - db2i_table::getDB2FileNameFromPath(to, db2ToFileName); - - char escapedFromFileName[2 * MAX_DB2_FILENAME_LENGTH + 1]; - - uint o = 0; - uint i = 1; - do - { - escapedFromFileName[o++] = db2FromFileName[i]; - if (db2FromFileName[i] == '+') - escapedFromFileName[o++] = '+'; - } while (db2FromFileName[++i]); - escapedFromFileName[o-1] = 0; - - - int rc = 0; - - char queryBuffer[sizeof(db2FromLibName) + 2 * sizeof(db2FromFileName) + 256]; - SafeString selectQuery(queryBuffer, sizeof(queryBuffer)); - selectQuery.strncat(STRING_WITH_LEN("SELECT CAST(INDEX_NAME AS VARCHAR(128) CCSID 1208) FROM QSYS2.SYSINDEXES WHERE INDEX_NAME LIKE '%+_+_+_%")); - selectQuery.strcat(escapedFromFileName); - selectQuery.strncat(STRING_WITH_LEN("' ESCAPE '+' AND TABLE_NAME='")); - selectQuery.strncat(db2FromFileName+1, strlen(db2FromFileName)-2); - selectQuery.strncat(STRING_WITH_LEN("' AND TABLE_SCHEMA='")); - selectQuery.strncat(db2FromLibName+1, strlen(db2FromLibName)-2); - selectQuery.strcat('\''); - DBUG_ASSERT(!selectQuery.overflowed()); - - SqlStatementStream indexQuery(selectQuery.ptr()); - - FILE_HANDLE queryFile = 0; - uint32 resultRowLen; - - initBridge(); - rc = bridge()->prepOpen(indexQuery.getPtrToData(), - &queryFile, - &resultRowLen); - - if (unlikely(rc)) - DBUG_RETURN(rc); - - IOReadBuffer rowBuffer(1, resultRowLen); - - int tableNameLen = strlen(db2FromFileName) - 2; - - SqlStatementStream renameQuery(64); - String query; - while (rc == 0) - { - query.length(0); - - rc = bridge()->read(queryFile, - rowBuffer.ptr(), - QMY_READ_ONLY, - QMY_NONE, - QMY_NEXT); - - if (!rc) - { - const char* rowData = rowBuffer.getRowN(0); - char indexFileName[MAX_DB2_FILENAME_LENGTH]; - memset(indexFileName, 0, sizeof(indexFileName)); - - uint16 fileNameLen = *(uint16*)(rowData); - strncpy(indexFileName, rowData + sizeof(uint16), fileNameLen); - - int bytesToRetain = fileNameLen - tableNameLen; - if (bytesToRetain <= 0) - /* We can't handle index names in which the MySQL index name and - the table name together are longer than the max index name. */ - { - getErrTxt(DB2I_ERR_INVALID_NAME,"index","*generated*"); - DBUG_RETURN(DB2I_ERR_INVALID_NAME); - } - char indexName[MAX_DB2_FILENAME_LENGTH]; - memset(indexName, 0, sizeof(indexName)); - - strncpy(indexName, - indexFileName, - bytesToRetain); - - char db2IndexName[MAX_DB2_FILENAME_LENGTH+1]; - - convertMySQLNameToDB2Name(indexFileName, db2IndexName, sizeof(db2IndexName)); - - query.append(STRING_WITH_LEN("RENAME INDEX ")); - query.append(db2FromLibName); - query.append('.'); - query.append(db2IndexName); - query.append(STRING_WITH_LEN(" TO ")); - if (db2i_table::appendQualifiedIndexFileName(indexName, db2ToFileName, query, db2i_table::ASCII_SQL, typeNone) == -1) - { - getErrTxt(DB2I_ERR_INVALID_NAME,"index","*generated*"); - DBUG_RETURN(DB2I_ERR_INVALID_NAME ); - } - renameQuery.addStatement(query); - DBUG_PRINT("ha_ibmdb2i::rename_table", ("Sent to DB2: %s",query.c_ptr_safe())); - } - } - - - if (queryFile) - bridge()->deallocateFile(queryFile); - - if (rc != HA_ERR_END_OF_FILE) - DBUG_RETURN(rc); - - char db2Name[MAX_DB2_QUALIFIEDNAME_LENGTH]; - - /* Rename the table */ - query.length(0); - query.append(STRING_WITH_LEN(" RENAME TABLE ")); - db2i_table::getDB2QualifiedNameFromPath(from, db2Name); - query.append(db2Name); - query.append(STRING_WITH_LEN(" TO ")); - query.append(db2ToFileName); - DBUG_PRINT("ha_ibmdb2i::rename_table", ("Sent to DB2: %s",query.c_ptr_safe())); - renameQuery.addStatement(query); - rc = bridge()->execSQL(renameQuery.getPtrToData(), - renameQuery.getStatementCount(), - getCommitLevel()); - - if (!rc) - db2i_table::renameAssocFiles(from, to); - - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::create(const char *name, TABLE *table_arg, - HA_CREATE_INFO *create_info) -{ - DBUG_ENTER("ha_ibmdb2i::create"); - - int rc; - char fileSortSequence[11] = "*HEX"; - char fileSortSequenceLibrary[11] = ""; - char fileSortSequenceType = ' '; - char libName[MAX_DB2_SCHEMANAME_LENGTH+1]; - char fileName[MAX_DB2_FILENAME_LENGTH+1]; - char colName[MAX_DB2_COLNAME_LENGTH+1]; - bool isTemporary; - ulong auto_inc_value; - - db2i_table::getDB2LibNameFromPath(name, libName); - db2i_table::getDB2FileNameFromPath(name, fileName); - - if (osVersion.v < 6) - { - if (strlen(libName) > - MAX_DB2_V5R4_LIBNAME_LENGTH + (isOrdinaryIdentifier(libName) ? 2 : 0)) - { - getErrTxt(DB2I_ERR_TOO_LONG_SCHEMA,libName, MAX_DB2_V5R4_LIBNAME_LENGTH); - DBUG_RETURN(DB2I_ERR_TOO_LONG_SCHEMA); - } - } - else if (strlen(libName) > MAX_DB2_V6R1_LIBNAME_LENGTH) - { - getErrTxt(DB2I_ERR_TOO_LONG_SCHEMA,libName, MAX_DB2_V6R1_LIBNAME_LENGTH); - DBUG_RETURN(DB2I_ERR_TOO_LONG_SCHEMA); - } - - String query(256); - - if (strcmp(libName, DB2I_TEMP_TABLE_SCHEMA)) - { - query.append(STRING_WITH_LEN("CREATE TABLE ")); - query.append(libName); - query.append('.'); - query.append(fileName); - isTemporary = FALSE; - } - else - { - query.append(STRING_WITH_LEN("DECLARE GLOBAL TEMPORARY TABLE ")); - query.append(fileName); - isTemporary = TRUE; - } - query.append(STRING_WITH_LEN(" (")); - - THD* thd = ha_thd(); - enum_TimeFormat timeFormat = (enum_TimeFormat)(THDVAR(thd, compat_opt_time_as_duration)); - enum_YearFormat yearFormat = (enum_YearFormat)(THDVAR(thd, compat_opt_year_as_int)); - enum_BlobMapping blobMapping = (enum_BlobMapping)(THDVAR(thd, compat_opt_blob_cols)); - enum_ZeroDate zeroDate = (enum_ZeroDate)(THDVAR(thd, compat_opt_allow_zero_date_vals)); - bool propagateDefaults = THDVAR(thd, propagate_default_col_vals); - - Field **field; - for (field= table_arg->field; *field; field++) - { - if ( field != table_arg->field ) // Not the first one - query.append(STRING_WITH_LEN(" , ")); - - if (!convertMySQLNameToDB2Name((*field)->field_name, colName, sizeof(colName))) - { - getErrTxt(DB2I_ERR_INVALID_NAME,"field",(*field)->field_name); - DBUG_RETURN(DB2I_ERR_INVALID_NAME ); - } - - query.append(colName); - query.append(' '); - - if (rc = getFieldTypeMapping(*field, - query, - timeFormat, - blobMapping, - zeroDate, - propagateDefaults, - yearFormat)) - DBUG_RETURN(rc); - - if ( (*field)->flags & NOT_NULL_FLAG ) - { - query.append(STRING_WITH_LEN(" NOT NULL ")); - } - if ( (*field)->flags & AUTO_INCREMENT_FLAG ) - { -#ifdef WITH_PARTITION_STORAGE_ENGINE - if (table_arg->part_info) - { - getErrTxt(DB2I_ERR_PART_AUTOINC); - DBUG_RETURN(DB2I_ERR_PART_AUTOINC); - } -#endif - query.append(STRING_WITH_LEN(" GENERATED BY DEFAULT AS IDENTITY ") ); - if (create_info->auto_increment_value != 0) - { - /* Query was ALTER TABLE...AUTO_INCREMENT = x; or - CREATE TABLE ...AUTO_INCREMENT = x; Set the starting - value for the auto_increment column. */ - char stringValue[22]; - CHARSET_INFO *cs= &my_charset_bin; - uint len = (uint)(cs->cset->longlong10_to_str)(cs,stringValue,sizeof(stringValue), 10, create_info->auto_increment_value); - stringValue[len] = 0; - query.append(STRING_WITH_LEN(" (START WITH ")); - query.append(stringValue); - - uint64 maxValue=maxValueForField(*field); - - if (maxValue) - { - len = (uint)(cs->cset->longlong10_to_str)(cs,stringValue,sizeof(stringValue), 10, maxValue); - stringValue[len] = 0; - query.append(STRING_WITH_LEN(" MAXVALUE ")); - query.append(stringValue); - } - - query.append(STRING_WITH_LEN(") ")); - } - - } - } - - String fieldDefinition(128); - - if (table_arg->s->primary_key != MAX_KEY && !isTemporary) - { - query.append(STRING_WITH_LEN(", PRIMARY KEY ")); - rc = buildIndexFieldList(fieldDefinition, - table_arg->key_info[table_arg->s->primary_key], - true, - &fileSortSequenceType, - fileSortSequence, - fileSortSequenceLibrary); - if (rc) DBUG_RETURN(rc); - query.append(fieldDefinition); - } - - rc = buildDB2ConstraintString(thd->lex, - query, - name, - table_arg->field, - &fileSortSequenceType, - fileSortSequence, - fileSortSequenceLibrary); - if (rc) DBUG_RETURN (rc); - - query.append(STRING_WITH_LEN(" ) ")); - - if (isTemporary) - query.append(STRING_WITH_LEN(" ON COMMIT PRESERVE ROWS ")); - - if (create_info->alias) - generateAndAppendRCDFMT(create_info->alias, query); - else if (((TABLE_LIST*)(thd->lex->select_lex.table_list.first))->table_name) - generateAndAppendRCDFMT((char*)((TABLE_LIST*)(thd->lex->select_lex.table_list.first))->table_name, query); - - DBUG_PRINT("ha_ibmdb2i::create", ("Sent to DB2: %s",query.c_ptr())); - SqlStatementStream sqlStream(query.length()); - sqlStream.addStatement(query,fileSortSequence,fileSortSequenceLibrary); - - if (table_arg->s->primary_key != MAX_KEY && - !isTemporary && - (THDVAR(thd, create_index_option)==1) && - (fileSortSequenceType != 'B') && - (fileSortSequenceType != ' ')) - { - rc = generateShadowIndex(sqlStream, - table_arg->key_info[table_arg->s->primary_key], - libName, - fileName, - fieldDefinition); - if (rc) DBUG_RETURN(rc); - } - for (uint i = 0; i < table_arg->s->keys; ++i) - { - if (i != table_arg->s->primary_key || isTemporary) - { - rc = buildCreateIndexStatement(sqlStream, - table_arg->key_info[i], - false, - libName, - fileName); - if (rc) DBUG_RETURN (rc); - } - } - - bool noCommit = isTemporary || ((!autoCommitIsOn(thd)) && (thd_sql_command(thd) == SQLCOM_ALTER_TABLE)); - - initBridge(); - -// if (THDVAR(thd, discovery_mode) == 1) -// bridge()->expectErrors(QMY_ERR_TABLE_EXISTS); - - rc = bridge()->execSQL(sqlStream.getPtrToData(), - sqlStream.getStatementCount(), - (isTemporary ? QMY_NONE : getCommitLevel(thd)), - TRUE, - FALSE, - noCommit ); - - if (unlikely(rc == QMY_ERR_MSGID) && - memcmp(bridge()->getErrorMsgID(), DB2I_SQL0350, 7) == 0) - { - my_error(ER_BLOB_USED_AS_KEY, MYF(0), "*unknown*"); - rc = ER_BLOB_USED_AS_KEY; - } -/* else if (unlikely(rc == QMY_ERR_TABLE_EXISTS) && - THDVAR(thd, discovery_mode) == 1) - { - db2i_table* temp = new db2i_table(table_arg->s, name); - int32 rc = temp->fastInitForCreate(name); - delete temp; - - if (!rc) - warning(thd, DB2I_ERR_WARN_CREATE_DISCOVER); - - DBUG_RETURN(rc); - } -*/ - - if (!rc && !isTemporary) - { - db2i_table* temp = new db2i_table(table_arg->s, name); - rc = temp->fastInitForCreate(name); - delete temp; - if (rc) - delete_table(name); - } - - DBUG_RETURN(rc); -} - - -/** - @brief - Add an index on-line to a table. This method is called on behalf of - a CREATE INDEX or ALTER TABLE statement. - It is implemented via a composed DDL statement passed to DB2. -*/ -int ha_ibmdb2i::add_index(TABLE *table_arg, - KEY *key_info, - uint num_of_keys) -{ - DBUG_ENTER("ha_ibmdb2i::add_index"); - - int rc; - SqlStatementStream sqlStream(256); - const char* libName = db2Table->getDB2LibName(db2i_table::ASCII_SQL); - const char* fileName = db2Table->getDB2TableName(db2i_table::ASCII_SQL); - - quiesceAllFileHandles(); - - uint primaryKey = MAX_KEY; - if (table_arg->s->primary_key >= MAX_KEY && !db2Table->isTemporary()) - { - for (int i = 0; i < num_of_keys; ++i) - { - if (strcmp(key_info[i].name, "PRIMARY") == 0) - { - primaryKey = i; - break; - } - else if (primaryKey == MAX_KEY && - key_info[i].flags & HA_NOSAME) - { - primaryKey = i; - for (int j=0 ; j < key_info[i].key_parts ;j++) - { - uint fieldnr= key_info[i].key_part[j].fieldnr; - if (table_arg->s->field[fieldnr]->null_ptr || - table_arg->s->field[fieldnr]->key_length() != - key_info[i].key_part[j].length) - { - primaryKey = MAX_KEY; - break; - } - } - } - } - } - - - for (int i = 0; i < num_of_keys; ++i) - { - KEY& curKey= key_info[i]; - rc = buildCreateIndexStatement(sqlStream, - curKey, - (i == primaryKey), - libName, - fileName); - if (rc) DBUG_RETURN (rc); - } - - rc = bridge()->execSQL(sqlStream.getPtrToData(), - sqlStream.getStatementCount(), - getCommitLevel(), - FALSE, - FALSE, - FALSE, - dataHandle); - - /* Handle the case where a unique index is being created but an error occurs - because the file contains duplicate key values. */ - if (rc == ER_DUP_ENTRY) - print_keydup_error(MAX_KEY,ER(ER_DUP_ENTRY_WITH_KEY_NAME)); - - DBUG_RETURN(rc); -} - -/** - @brief - Drop an index on-line from a table. This method is called on behalf of - a DROP INDEX or ALTER TABLE statement. - It is implemented via a composed DDL statement passed to DB2. -*/ -int ha_ibmdb2i::prepare_drop_index(TABLE *table_arg, - uint *key_num, uint num_of_keys) -{ - DBUG_ENTER("ha_ibmdb2i::prepare_drop_index"); - int rc; - int i = 0; - String query(64); - SqlStatementStream sqlStream(64 * num_of_keys); - SqlStatementStream shadowStream(64 * num_of_keys); - - quiesceAllFileHandles(); - - const char* libName = db2Table->getDB2LibName(db2i_table::ASCII_SQL); - const char* fileName = db2Table->getDB2TableName(db2i_table::ASCII_SQL); - - while (i < num_of_keys) - { - query.length(0); - DBUG_PRINT("info", ("ha_ibmdb2i::prepare_drop_index %u", key_num[i])); - KEY& curKey= table_arg->key_info[key_num[i]]; - if (key_num[i] == table->s->primary_key && !db2Table->isTemporary()) - { - query.append(STRING_WITH_LEN("ALTER TABLE ")); - query.append(libName); - query.append(STRING_WITH_LEN(".")); - query.append(fileName); - query.append(STRING_WITH_LEN(" DROP PRIMARY KEY")); - } - else - { - query.append(STRING_WITH_LEN("DROP INDEX ")); - query.append(libName); - query.append(STRING_WITH_LEN(".")); - db2i_table::appendQualifiedIndexFileName(curKey.name, fileName, query); - } - DBUG_PRINT("ha_ibmdb2i::prepare_drop_index", ("Sent to DB2: %s",query.c_ptr_safe())); - sqlStream.addStatement(query); - - query.length(0); - query.append(STRING_WITH_LEN("DROP INDEX ")); - query.append(libName); - query.append(STRING_WITH_LEN(".")); - db2i_table::appendQualifiedIndexFileName(curKey.name, fileName, query, db2i_table::ASCII_SQL, typeHex); - - DBUG_PRINT("ha_ibmdb2i::prepare_drop_index", ("Sent to DB2: %s",query.c_ptr_safe())); - shadowStream.addStatement(query); - - ++i; - } - - rc = bridge()->execSQL(sqlStream.getPtrToData(), - sqlStream.getStatementCount(), - getCommitLevel(), - FALSE, - FALSE, - FALSE, - dataHandle); - - if (rc == 0) - bridge()->execSQL(shadowStream.getPtrToData(), - shadowStream.getStatementCount(), - getCommitLevel()); - - DBUG_RETURN(rc); -} - - -void -ha_ibmdb2i::unlock_row() -{ - DBUG_ENTER("ha_ibmdb2i::unlock_row"); - DBUG_VOID_RETURN; -} - -int -ha_ibmdb2i::index_end() -{ - DBUG_ENTER("ha_ibmdb2i::index_end"); - warnIfInvalidData(); - last_index_init_rc = 0; - if (likely(activeReadBuf)) - activeReadBuf->endRead(); - if (likely(!last_index_init_rc)) - releaseIndexFile(active_index); - active_index= MAX_KEY; - DBUG_RETURN (0); -} - -int ha_ibmdb2i::doCommit(handlerton *hton, THD *thd, bool all) -{ - if (!THDVAR(thd, transaction_unsafe)) - { - if (all || autoCommitIsOn(thd)) - { - DBUG_PRINT("ha_ibmdb2i::doCommit",("Committing all")); - return (db2i_ileBridge::getBridgeForThread(thd)->commitmentControl(QMY_COMMIT)); - } - else - { - DBUG_PRINT("ha_ibmdb2i::doCommit",("Committing stmt")); - return (db2i_ileBridge::getBridgeForThread(thd)->commitStmtTx()); - } - } - - return (0); -} - - -int ha_ibmdb2i::doRollback(handlerton *hton, THD *thd, bool all) -{ - if (!THDVAR(thd,transaction_unsafe)) - { - if (all || autoCommitIsOn(thd)) - { - DBUG_PRINT("ha_ibmdb2i::doRollback",("Rolling back all")); - return ( db2i_ileBridge::getBridgeForThread(thd)->commitmentControl(QMY_ROLLBACK)); - } - else - { - DBUG_PRINT("ha_ibmdb2i::doRollback",("Rolling back stmt")); - return (db2i_ileBridge::getBridgeForThread(thd)->rollbackStmtTx()); - } - } - return (0); -} - - -void ha_ibmdb2i::start_bulk_insert(ha_rows rows) -{ - DBUG_ENTER("ha_ibmdb2i::start_bulk_insert"); - DBUG_PRINT("ha_ibmdb2i::start_bulk_insert",("Rows hinted %d", rows)); - int rc; - THD* thd = ha_thd(); - int command = thd_sql_command(thd); - - if (db2Table->hasBlobs() || - (command == SQLCOM_REPLACE || command == SQLCOM_REPLACE_SELECT)) - rows = 1; - else if (rows == 0) - rows = DEFAULT_MAX_ROWS_TO_BUFFER; // Shoot the moon - - // If we're doing a multi-row insert, binlogging is active, and the table has an - // auto_increment column, then we'll attempt to lock the file while we perform a 'fast path' blocked - // insert. If we can't get the lock, then we'll do a row-by-row 'slow path' insert instead. The reason is - // because the MI generates the auto_increment (identity value), and if we can't lock the file, - // then we can't predetermine what that value will be for insertion into the MySQL write buffer. - - if ((rows > 1) && // Multi-row insert - (thd->options & OPTION_BIN_LOG) && // Binlogging is on - (table->found_next_number_field)) // Table has an auto_increment column - { - if (!dataHandle) - rc = db2Table->dataFile()->allocateNewInstance(&dataHandle, curConnection); - - rc = bridge()->lockObj(dataHandle, 1, QMY_LOCK, QMY_LEAR, QMY_YES); - if (rc==0) // Got the lock - { - autoIncLockAcquired = TRUE; - got_auto_inc_values = FALSE; - } - else // Didn't get the lock - rows = 1; // No problem, but don't block inserts - } - - if (activeHandle == 0) - { - last_start_bulk_insert_rc = useDataFile(); - if (last_start_bulk_insert_rc == 0) - last_start_bulk_insert_rc = prepWriteBuffer(rows, db2Table->dataFile()); - } - - if (last_start_bulk_insert_rc == 0) - outstanding_start_bulk_insert = true; - else - { - if (autoIncLockAcquired == TRUE) - { - bridge()->lockObj(dataHandle, 0, QMY_UNLOCK, QMY_LEAR, QMY_YES); - autoIncLockAcquired = FALSE; - } - } - - DBUG_VOID_RETURN; -} - - -int ha_ibmdb2i::end_bulk_insert() -{ - DBUG_ENTER("ha_ibmdb2i::end_bulk_insert"); - int rc = 0; - - if (outstanding_start_bulk_insert) - { - rc = finishBulkInsert(); - } - - my_errno = rc; - - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::prepReadBuffer(ha_rows rowsToRead, const db2i_file* file, char intent) -{ - DBUG_ENTER("ha_ibmdb2i::prepReadBuffer"); - DBUG_ASSERT(rowsToRead > 0); - - THD* thd = ha_thd(); - char cmtLvl = getCommitLevel(thd); - - const db2i_file::RowFormat* format; - int rc = file->obtainRowFormat(activeHandle, intent, cmtLvl, &format); - - if (unlikely(rc)) DBUG_RETURN(rc); - - if (lobFieldsRequested()) - { - forceSingleRowRead = true; - rowsToRead = 1; - } - - rowsToRead = min(stats.records+1,min(rowsToRead, DEFAULT_MAX_ROWS_TO_BUFFER)); - - uint bufSize = min((format->readRowLen * rowsToRead), THDVAR(thd, max_read_buffer_size)); - multiRowReadBuf.allocBuf(format->readRowLen, format->readRowNullOffset, bufSize); - activeReadBuf = &multiRowReadBuf; - - if (db2Table->hasBlobs()) - { - if (!blobReadBuffers) - blobReadBuffers = new BlobCollection(db2Table, THDVAR(thd, lob_alloc_size)); - rc = prepareReadBufferForLobs(); - if (rc) DBUG_RETURN(rc); - } - -// if (accessIntent == QMY_UPDATABLE && -// thd_tx_isolation(thd) == ISO_REPEATABLE_READ && -// !THDVAR(thd, transaction_unsafe)) -// activeReadBuf->update(QMY_READ_ONLY, &releaseRowNeeded, QMY_REPEATABLE_READ); -// else - activeReadBuf->update(intent, &releaseRowNeeded, cmtLvl); - - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::prepWriteBuffer(ha_rows rowsToWrite, const db2i_file* file) -{ - DBUG_ENTER("ha_ibmdb2i::prepWriteBuffer"); - DBUG_ASSERT(accessIntent == QMY_UPDATABLE && rowsToWrite > 0); - - const db2i_file::RowFormat* format; - int rc = file->obtainRowFormat(activeHandle, - QMY_UPDATABLE, - getCommitLevel(ha_thd()), - &format); - - if (unlikely(rc)) DBUG_RETURN(rc); - - rowsToWrite = min(rowsToWrite, DEFAULT_MAX_ROWS_TO_BUFFER); - - uint bufSize = min((format->writeRowLen * rowsToWrite), THDVAR(ha_thd(), max_write_buffer_size)); - multiRowWriteBuf.allocBuf(format->writeRowLen, format->writeRowNullOffset, bufSize); - activeWriteBuf = &multiRowWriteBuf; - - if (!blobWriteBuffers && db2Table->hasBlobs()) - { - blobWriteBuffers = new ValidatedPointer[db2Table->getBlobCount()]; - } - DBUG_RETURN(rc); -} - - -int ha_ibmdb2i::flushWrite(FILE_HANDLE fileHandle, uchar* buf ) -{ - DBUG_ENTER("ha_ibmdb2i::flushWrite"); - int rc; - int64 generatedIdValue = 0; - bool IdValueWasGenerated = FALSE; - char* lastDupKeyNamePtr = NULL; - uint32 lastDupKeyNameLen = 0; - int loopCnt = 0; - bool retry_dup = FALSE; - - while (loopCnt == 0 || retry_dup == TRUE) - { - rc = bridge()->writeRows(fileHandle, - activeWriteBuf->ptr(), - getCommitLevel(), - &generatedIdValue, - &IdValueWasGenerated, - &lastDupKeyRRN, - &lastDupKeyNamePtr, - &lastDupKeyNameLen, - &incrementByValue); - loopCnt++; - retry_dup = FALSE; - invalidateCachedStats(); - if (lastDupKeyNameLen) - { - rrnAssocHandle = fileHandle; - - int command = thd_sql_command(ha_thd()); - - if (command == SQLCOM_REPLACE || - command == SQLCOM_REPLACE_SELECT) - lastDupKeyID = 0; - else - { - lastDupKeyID = getKeyFromName(lastDupKeyNamePtr, lastDupKeyNameLen); - - if (likely(lastDupKeyID != MAX_KEY)) - { - uint16 failedRow = activeWriteBuf->rowsWritten()+1; - - if (buf && (failedRow != activeWriteBuf->rowCount())) - { - const char* badRow = activeWriteBuf->getRowN(failedRow-1); - bool savedReadAllColumns = readAllColumns; - readAllColumns = true; - mungeDB2row(buf, - badRow, - badRow + activeWriteBuf->getRowNullOffset(), - true); - readAllColumns = savedReadAllColumns; - - if (table->found_next_number_field) - { - table->next_number_field->store(next_identity_value - (incrementByValue * (activeWriteBuf->rowCount() - (failedRow - 1)))); - } - } - - if (default_identity_value && // Table has ID colm and generating a value - (!autoIncLockAcquired || !got_auto_inc_values) && - // Writing first or only row in block - loopCnt == 1 && // Didn't already retry - lastDupKeyID == table->s->next_number_index) // Autoinc column is in failed index - { - if (alterStartWith() == 0) // Reset next Identity value to max+1 - retry_dup = TRUE; // Rtry the write operation - } - } - else - { - char unknownIndex[MAX_DB2_FILENAME_LENGTH+1]; - convFromEbcdic(lastDupKeyNamePtr, unknownIndex, min(lastDupKeyNameLen, MAX_DB2_FILENAME_LENGTH)); - unknownIndex[min(lastDupKeyNameLen, MAX_DB2_FILENAME_LENGTH)] = 0; - getErrTxt(DB2I_ERR_UNKNOWN_IDX, unknownIndex); - } - } - } - } - - if ((rc == 0 || rc == HA_ERR_FOUND_DUPP_KEY) - && default_identity_value && IdValueWasGenerated && - (!autoIncLockAcquired || !got_auto_inc_values)) - { - /* Save the generated identity value for the MySQL last_insert_id() function. */ - insert_id_for_cur_row = generatedIdValue; - - /* Store the value into MySQL's buf for row-based replication - or for an 'on duplicate key update' clause. */ - table->next_number_field->store((longlong) generatedIdValue, TRUE); - if (autoIncLockAcquired) - { - got_auto_inc_values = TRUE; - next_identity_value = generatedIdValue + incrementByValue; - } - } - else - { - if (!autoIncLockAcquired) // Don't overlay value for first row of a block - insert_id_for_cur_row = 0; - } - - - activeWriteBuf->resetAfterWrite(); - DBUG_RETURN(rc); -} - -int ha_ibmdb2i::alterStartWith() -{ - DBUG_ENTER("ha_ibmdb2i::alterStartWith"); - int rc = 0; - ulonglong nextIdVal; - if (!dataHandle) - rc = db2Table->dataFile()->allocateNewInstance(&dataHandle, curConnection); - if (!rc) {rc = bridge()->lockObj(dataHandle, 1, QMY_LOCK, QMY_LENR, QMY_YES);} - if (!rc) - { - rc = getNextIdVal(&nextIdVal); - if (!rc) {rc = reset_auto_increment(nextIdVal);} - bridge()->lockObj(dataHandle, 0, QMY_UNLOCK, QMY_LENR, QMY_YES); - } - DBUG_RETURN(rc); -} - -bool ha_ibmdb2i::lobFieldsRequested() -{ - if (!db2Table->hasBlobs()) - { - DBUG_PRINT("ha_ibmdb2i::lobFieldsRequested",("No LOBs")); - return (false); - } - - if (readAllColumns) - { - DBUG_PRINT("ha_ibmdb2i::lobFieldsRequested",("All cols requested")); - return (true); - } - - for (int i = 0; i < db2Table->getBlobCount(); ++i) - { - if (bitmap_is_set(table->read_set, db2Table->blobFields[i])) - { - DBUG_PRINT("ha_ibmdb2i::lobFieldsRequested",("LOB requested")); - return (true); - } - } - - DBUG_PRINT("ha_ibmdb2i::lobFieldsRequested",("No LOBs requested")); - return (false); -} - - -int ha_ibmdb2i::prepareReadBufferForLobs() -{ - DBUG_ENTER("ha_ibmdb2i::prepareReadBufferForLobs"); - DBUG_ASSERT(db2Table->hasBlobs()); - - uint32 activeLobFields = 0; - DB2LobField* lobField; - uint16 blobCount = db2Table->getBlobCount(); - - char* readBuf = activeReadBuf->getRowN(0); - - for (int i = 0; i < blobCount; ++i) - { - int fieldID = db2Table->blobFields[i]; - DB2Field& db2Field = db2Table->db2Field(fieldID); - lobField = db2Field.asBlobField(readBuf); - if (readAllColumns || - bitmap_is_set(table->read_set, fieldID)) - { - lobField->dataHandle = (ILEMemHandle)blobReadBuffers->getBufferPtr(fieldID); - activeLobFields++; - } - else - { - lobField->dataHandle = NULL; - } - } - - if (activeLobFields == 0) - { - for (int i = 0; i < blobCount; ++i) - { - DB2Field& db2Field = db2Table->db2Field(db2Table->blobFields[i]); - uint16 offset = db2Field.getBufferOffset() + db2Field.calcBlobPad(); - - for (int r = 1; r < activeReadBuf->getRowCapacity(); ++r) - { - lobField = (DB2LobField*)(activeReadBuf->getRowN(r) + offset); - lobField->dataHandle = NULL; - } - } - } - - activeReadBuf->setRowsToProcess((activeLobFields ? 1 : activeReadBuf->getRowCapacity())); - int rc = bridge()->objectOverride(activeHandle, - activeReadBuf->ptr(), - activeReadBuf->getRowLength()); - DBUG_RETURN(rc); -} - - -uint32 ha_ibmdb2i::adjustLobBuffersForRead() -{ - DBUG_ENTER("ha_ibmdb2i::adjustLobBuffersForRead"); - - char* readBuf = activeReadBuf->getRowN(0); - - for (int i = 0; i < db2Table->getBlobCount(); ++i) - { - DB2Field& db2Field = db2Table->db2Field(db2Table->blobFields[i]); - DB2LobField* lobField = db2Field.asBlobField(readBuf); - if (readAllColumns || - bitmap_is_set(table->read_set, db2Table->blobFields[i])) - { - lobField->dataHandle = (ILEMemHandle)blobReadBuffers->reallocBuffer(db2Table->blobFields[i], lobField->length); - - if (lobField->dataHandle == NULL) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - } - else - { - lobField->dataHandle = 0; - } - } - - int32 rc = bridge()->objectOverride(activeHandle, - activeReadBuf->ptr()); - DBUG_RETURN(rc); -} - - - -int ha_ibmdb2i::reset() -{ - DBUG_ENTER("ha_ibmdb2i::reset"); - - if (outstanding_start_bulk_insert) - { - finishBulkInsert(); - } - - if (activeHandle != 0) - { - releaseActiveHandle(); - } - - cleanupBuffers(); - - db2i_ileBridge::getBridgeForThread(ha_thd())->freeErrorStorage(); - - last_rnd_init_rc = last_index_init_rc = last_start_bulk_insert_rc = 0; - - returnDupKeysImmediately = false; - onDupUpdate = false; - forceSingleRowRead = false; - -#ifndef DBUG_OFF - cachedBridge=NULL; -#endif - - DBUG_RETURN(0); -} - - -int32 ha_ibmdb2i::buildCreateIndexStatement(SqlStatementStream& sqlStream, - KEY& key, - bool isPrimary, - const char* db2LibName, - const char* db2FileName) -{ - DBUG_ENTER("ha_ibmdb2i::buildCreateIndexStatement"); - - char fileSortSequence[11] = "*HEX"; - char fileSortSequenceLibrary[11] = ""; - char fileSortSequenceType = ' '; - String query(256); - query.length(0); - int rc = 0; - - if (isPrimary) - { - query.append(STRING_WITH_LEN("ALTER TABLE ")); - query.append(db2LibName); - query.append('.'); - query.append(db2FileName); - query.append(STRING_WITH_LEN(" ADD PRIMARY KEY ")); - } - else - { - query.append(STRING_WITH_LEN("CREATE")); - - if (key.flags & HA_NOSAME) - query.append(STRING_WITH_LEN(" UNIQUE WHERE NOT NULL")); - - query.append(STRING_WITH_LEN(" INDEX ")); - - query.append(db2LibName); - query.append('.'); - if (db2i_table::appendQualifiedIndexFileName(key.name, db2FileName, query)) - { - getErrTxt(DB2I_ERR_INVALID_NAME,"index","*generated*"); - DBUG_RETURN(DB2I_ERR_INVALID_NAME ); - } - - query.append(STRING_WITH_LEN(" ON ")); - - query.append(db2LibName); - query.append('.'); - query.append(db2FileName); - } - - String fieldDefinition(128); - rc = buildIndexFieldList(fieldDefinition, - key, - isPrimary, - &fileSortSequenceType, - fileSortSequence, - fileSortSequenceLibrary); - - if (rc) DBUG_RETURN(rc); - - query.append(fieldDefinition); - - if ((THDVAR(ha_thd(), create_index_option)==1) && - (fileSortSequenceType != 'B') && - (fileSortSequenceType != ' ')) - { - rc = generateShadowIndex(sqlStream, - key, - db2LibName, - db2FileName, - fieldDefinition); - if (rc) DBUG_RETURN(rc); - } - - DBUG_PRINT("ha_ibmdb2i::buildCreateIndexStatement", ("Sent to DB2: %s",query.c_ptr_safe())); - sqlStream.addStatement(query,fileSortSequence,fileSortSequenceLibrary); - - DBUG_RETURN(0); -} - -/** - Generate the SQL syntax for the list of fields to be assigned to the - specified key. The corresponding sort sequence is also calculated. - - @param[out] appendHere The string to receive the generated SQL - @param key The key to evaluate - @param isPrimary True if this is being generated on behalf of the primary key - @param[out] fileSortSequenceType The type of the associated sort sequence - @param[out] fileSortSequence The name of the associated sort sequence - @param[out] fileSortSequenceLibrary The library of the associated sort sequence - - @return 0 if successful; error value otherwise -*/ -int32 ha_ibmdb2i::buildIndexFieldList(String& appendHere, - const KEY& key, - bool isPrimary, - char* fileSortSequenceType, - char* fileSortSequence, - char* fileSortSequenceLibrary) -{ - DBUG_ENTER("ha_ibmdb2i::buildIndexFieldList"); - appendHere.append(STRING_WITH_LEN(" ( ")); - for (int j = 0; j < key.key_parts; ++j) - { - char colName[MAX_DB2_COLNAME_LENGTH+1]; - if (j != 0) - { - appendHere.append(STRING_WITH_LEN(" , ")); - } - - KEY_PART_INFO& kpi = key.key_part[j]; - Field* field = kpi.field; - - convertMySQLNameToDB2Name(field->field_name, - colName, - sizeof(colName)); - appendHere.append(colName); - - int32 rc; - rc = updateAssociatedSortSequence(field->charset(), - fileSortSequenceType, - fileSortSequence, - fileSortSequenceLibrary); - if (rc) DBUG_RETURN (rc); - } - - appendHere.append(STRING_WITH_LEN(" ) ")); - - DBUG_RETURN(0); -} - - -/** - Generate an SQL statement that defines a *HEX sorted index to implement - the ibmdb2i_create_index. - - @param[out] stream The stream to append the generated statement to - @param key The key to evaluate - @param[out] libName The library containg the table - @param[out] fileName The DB2-compatible name of the table - @param[out] fieldDefinition The list of the fields in the index, in SQL syntax - - @return 0 if successful; error value otherwise -*/ -int32 ha_ibmdb2i::generateShadowIndex(SqlStatementStream& stream, - const KEY& key, - const char* libName, - const char* fileName, - const String& fieldDefinition) -{ - String shadowQuery(256); - shadowQuery.length(0); - shadowQuery.append(STRING_WITH_LEN("CREATE INDEX ")); - shadowQuery.append(libName); - shadowQuery.append('.'); - if (db2i_table::appendQualifiedIndexFileName(key.name, fileName, shadowQuery, db2i_table::ASCII_SQL, typeHex)) - { - getErrTxt(DB2I_ERR_INVALID_NAME,"index","*generated*"); - return DB2I_ERR_INVALID_NAME; - } - shadowQuery.append(STRING_WITH_LEN(" ON ")); - shadowQuery.append(libName); - shadowQuery.append('.'); - shadowQuery.append(fileName); - shadowQuery.append(fieldDefinition); - DBUG_PRINT("ha_ibmdb2i::generateShadowIndex", ("Sent to DB2: %s",shadowQuery.c_ptr_safe())); - stream.addStatement(shadowQuery,"*HEX","QSYS"); - return 0; -} - - -void ha_ibmdb2i::doInitialRead(char orientation, - uint32 rowsToBuffer, - ILEMemHandle key, - int keyLength, - int keyParts) -{ - DBUG_ENTER("ha_ibmdb2i::doInitialRead"); - - if (forceSingleRowRead) - rowsToBuffer = 1; - else - rowsToBuffer = min(rowsToBuffer, activeReadBuf->getRowCapacity()); - - activeReadBuf->newReadRequest(activeHandle, - orientation, - rowsToBuffer, - THDVAR(ha_thd(), async_enabled), - key, - keyLength, - keyParts); - DBUG_VOID_RETURN; -} - - -int ha_ibmdb2i::start_stmt(THD *thd, thr_lock_type lock_type) -{ - DBUG_ENTER("ha_ibmdb2i::start_stmt"); - initBridge(thd); - if (!THDVAR(thd, transaction_unsafe)) - { - trans_register_ha(thd, FALSE, ibmdb2i_hton); - - if (!autoCommitIsOn(thd)) - { - bridge()->beginStmtTx(); - } - } - - DBUG_RETURN(0); -} - -int32 ha_ibmdb2i::handleLOBReadOverflow() -{ - DBUG_ENTER("ha_ibmdb2i::handleLOBReadOverflow"); - DBUG_ASSERT(db2Table->hasBlobs() && (activeReadBuf->getRowCapacity() == 1)); - - int32 rc = adjustLobBuffersForRead(); - - if (!rc) - { - activeReadBuf->rewind(); - rc = bridge()->expectErrors(QMY_ERR_END_OF_BLOCK) - ->read(activeHandle, - activeReadBuf->ptr(), - accessIntent, - getCommitLevel(), - QMY_SAME); - releaseRowNeeded = TRUE; - - } - DBUG_RETURN(rc); -} - - -int32 ha_ibmdb2i::finishBulkInsert() -{ - int32 rc = 0; - - if (activeWriteBuf->rowCount() && activeHandle) - rc = flushWrite(activeHandle, table->record[0]); - - if (activeHandle) - releaseActiveHandle(); - - if (autoIncLockAcquired == TRUE) - { - // We could check the return code on the unlock, but beware not - // to overlay the return code from the flushwrite or we will mask - // duplicate key errors.. - bridge()->lockObj(dataHandle, 0, QMY_UNLOCK, QMY_LEAR, QMY_YES); - autoIncLockAcquired = FALSE; - } - outstanding_start_bulk_insert = false; - multiRowWriteBuf.freeBuf(); - last_start_bulk_insert_rc = 0; - - resetCharacterConversionBuffers(); - - return rc; -} - -int ha_ibmdb2i::getKeyFromName(const char* name, size_t len) -{ - for (int i = 0; i < table_share->keys; ++i) - { - const char* indexName = db2Table->indexFile(i)->getDB2FileName(); - if ((strncmp(name, indexName, len) == 0) && - (strlen(indexName) == len)) - { - return i; - } - } - return MAX_KEY; -} - -/* -Determine the number of I/O's it takes to read through the table. - */ -double ha_ibmdb2i::scan_time() - { - DBUG_ENTER("ha_ibmdb2i::scan_time"); - DBUG_RETURN(ulonglong2double((stats.data_file_length)/IO_SIZE)); - } - - -/** - Estimate the number of I/O's it takes to read a set of ranges through - an index. - - @param index - @param ranges - @param rows - - @return The estimate number of I/Os -*/ - -double ha_ibmdb2i::read_time(uint index, uint ranges, ha_rows rows) -{ - DBUG_ENTER("ha_ibmdb2i::read_time"); - int rc; - uint64 idxPageCnt = 0; - double cost; - - if (unlikely(rows == HA_POS_ERROR)) - DBUG_RETURN(double(rows) + ranges); - - rc = bridge()->retrieveIndexInfo(db2Table->indexFile(index)->getMasterDefnHandle(), - &idxPageCnt); - if (!rc) - { - if ((idxPageCnt == 1) || // Retrieving rows in requested order or - (ranges == rows)) // 'Sweep' full records retrieval - cost = idxPageCnt/4; - else - { - uint64 totalRecords = stats.records + 1; - double dataPageCount = stats.data_file_length/IO_SIZE; - - cost = (rows * dataPageCount / totalRecords) + - min(idxPageCnt, (log_2(idxPageCnt) * ranges + - rows * (log_2(idxPageCnt) + log_2(rows) - log_2(totalRecords)))); - } - } - else - { - cost = rows2double(ranges+rows); // Use default costing - } - DBUG_RETURN(cost); -} - -int ha_ibmdb2i::useIndexFile(int idx) -{ - DBUG_ENTER("ha_ibmdb2i::useIndexFile"); - - if (activeHandle) - releaseActiveHandle(); - - int rc = 0; - - if (!indexHandles[idx]) - rc = db2Table->indexFile(idx)->allocateNewInstance(&indexHandles[idx], curConnection); - - if (rc == 0) - { - activeHandle = indexHandles[idx]; - bumpInUseCounter(1); - } - - DBUG_RETURN(rc); -} - - -ulong ha_ibmdb2i::index_flags(uint inx, uint part, bool all_parts) const -{ - return HA_READ_NEXT | HA_READ_PREV | HA_KEYREAD_ONLY | HA_READ_ORDER | HA_READ_RANGE; -} - - -static struct st_mysql_sys_var* ibmdb2i_system_variables[] = { - MYSQL_SYSVAR(rdb_name), - MYSQL_SYSVAR(transaction_unsafe), - MYSQL_SYSVAR(lob_alloc_size), - MYSQL_SYSVAR(max_read_buffer_size), - MYSQL_SYSVAR(max_write_buffer_size), - MYSQL_SYSVAR(async_enabled), - MYSQL_SYSVAR(assume_exclusive_use), - MYSQL_SYSVAR(compat_opt_blob_cols), - MYSQL_SYSVAR(compat_opt_time_as_duration), - MYSQL_SYSVAR(compat_opt_allow_zero_date_vals), - MYSQL_SYSVAR(compat_opt_year_as_int), - MYSQL_SYSVAR(propagate_default_col_vals), - MYSQL_SYSVAR(create_index_option), -// MYSQL_SYSVAR(discovery_mode), - MYSQL_SYSVAR(system_trace_level), - NULL -}; - - -struct st_mysql_storage_engine ibmdb2i_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION }; - -mysql_declare_plugin(ibmdb2i) -{ - MYSQL_STORAGE_ENGINE_PLUGIN, - &ibmdb2i_storage_engine, - "IBMDB2I", - "The IBM development team in Rochester, Minnesota", - "IBM DB2 for i Storage Engine", - PLUGIN_LICENSE_GPL, - ibmdb2i_init_func, /* Plugin Init */ - ibmdb2i_done_func, /* Plugin Deinit */ - 0x0100 /* 1.0 */, - NULL, /* status variables */ - ibmdb2i_system_variables, /* system variables */ - NULL /* config options */ -} -mysql_declare_plugin_end; diff --git a/storage/ibmdb2i/ha_ibmdb2i.h b/storage/ibmdb2i/ha_ibmdb2i.h deleted file mode 100644 index b2a43232f2d..00000000000 --- a/storage/ibmdb2i/ha_ibmdb2i.h +++ /dev/null @@ -1,822 +0,0 @@ -/* -Licensed Materials - Property of IBM -DB2 Storage Engine Enablement -Copyright IBM Corporation 2007,2008 -All rights reserved - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - (a) Redistributions of source code must retain this list of conditions, the - copyright notice in section {d} below, and the disclaimer following this - list of conditions. - (b) Redistributions in binary form must reproduce this list of conditions, the - copyright notice in section (d) below, and the disclaimer following this - list of conditions, in the documentation and/or other materials provided - with the distribution. - (c) The name of IBM may not be used to endorse or promote products derived from - this software without specific prior written permission. - (d) The text of the required copyright notice is: - Licensed Materials - Property of IBM - DB2 Storage Engine Enablement - Copyright IBM Corporation 2007,2008 - All rights reserved - -THIS SOFTWARE IS PROVIDED BY IBM CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT -SHALL IBM CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT -OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGE. -*/ - -/** @file ha_ibmdb2i.h - - @brief - - @note - - @see -*/ - -#ifdef USE_PRAGMA_INTERFACE -#pragma interface /* gcc class implementation */ -#endif - -#include "as400_types.h" -#include "as400_protos.h" -#include "db2i_global.h" -#include "db2i_ileBridge.h" -#include "builtins.h" -#include "db2i_misc.h" -#include "db2i_file.h" -#include "db2i_blobCollection.h" -#include "db2i_collationSupport.h" -#include "db2i_validatedPointer.h" -#include "db2i_ioBuffers.h" -#include "db2i_errors.h" -#include "db2i_sqlStatementStream.h" - -/** @brief - IBMDB2I_SHARE is a structure that will be shared among all open handlers. - It is used to describe the underlying table definition, and it caches - table statistics. -*/ -struct IBMDB2I_SHARE { - char *table_name; - uint table_name_length,use_count; - pthread_mutex_t mutex; - THR_LOCK lock; - - db2i_table* db2Table; - - class CStats - { - public: - void cacheUpdateTime(time_t time) - {update_time = time; initFlag |= lastModTime;} - time_t getUpdateTime() const - {return update_time;} - void cacheRowCount(ha_rows rows) - {records = rows; initFlag |= rowCount;} - ha_rows getRowCount() const - {return records;} - void cacheDelRowCount(ha_rows rows) - {deleted = rows; initFlag |= deletedRowCount;} - ha_rows getDelRowCount() const - {return deleted;} - void cacheMeanLength(ulong len) - {mean_rec_length = len; initFlag |= meanRowLen;} - ulong getMeanLength() - {return mean_rec_length;} - void cacheAugmentedDataLength(ulong len) - {data_file_length = len; initFlag |= ioCount;} - ulong getAugmentedDataLength() - {return data_file_length;} - bool isInited(uint flags) - {return initFlag & flags;} - void invalidate(uint flags) - {initFlag &= ~flags;} - - private: - uint initFlag; - time_t update_time; - ha_rows records; - ha_rows deleted; - ulong mean_rec_length; - ulong data_file_length; - } cachedStats; - -}; - -class ha_ibmdb2i: public handler -{ - THR_LOCK_DATA lock; ///< MySQL lock - IBMDB2I_SHARE *share; ///< Shared lock info - - // The record we are positioned on, together with the handle used to get - // i. - uint32 currentRRN; - uint32 rrnAssocHandle; - - // Dup key values needed by info() - uint32 lastDupKeyRRN; - uint32 lastDupKeyID; - - bool returnDupKeysImmediately; - - // Dup key value need by update() - bool onDupUpdate; - - - db2i_table* db2Table; - - // The file handle of the PF or LF being accessed by the current operation. - FILE_HANDLE activeHandle; - - // The file handle of the underlying PF - FILE_HANDLE dataHandle; - - // Array of file handles belonging to the underlying LFs - FILE_HANDLE* indexHandles; - - // Flag to indicate whether a call needs to be made to unlock a row when - // a read operation has ended. DB2 will handle row unlocking as we move - // through rows, but if an operation ends before we reach the end of a file, - // DB2 needs to know to unlock the last row read. - bool releaseRowNeeded; - - // Pointer to a definition of the layout of the row buffer for the file - // described by activeHandle - const db2i_file::RowFormat* activeFormat; - - IORowBuffer keyBuf; - uint32 keyLen; - - IOWriteBuffer multiRowWriteBuf; - IOAsyncReadBuffer multiRowReadBuf; - - IOAsyncReadBuffer* activeReadBuf; - IOWriteBuffer* activeWriteBuf; - - BlobCollection* blobReadBuffers; // Dynamically allocated per query and used - // to manage the buffers used for reading LOBs - ValidatedPointer* blobWriteBuffers; - - // Return codes are not used/honored by rnd_init and start_bulk_insert - // so we need a way to signal the failure "downstream" to subsequent - // functions. - int last_rnd_init_rc; - int last_index_init_rc; - int last_start_bulk_insert_rc; - - // end_bulk_insert may get called twice for a single start_bulk_insert - // This is our way to do cleanup only once. - bool outstanding_start_bulk_insert; - - // Auto_increment 'increment by' value needed by write_row() - uint32 incrementByValue; - bool default_identity_value; - - // Flags and values used during write operations for auto_increment processing - bool autoIncLockAcquired; - bool got_auto_inc_values; - uint64 next_identity_value; - - // The access intent indicated by the last external_locks() call. - // May be either QMY_READ or QMY_UPDATABLE - char accessIntent; - char readAccessIntent; - - ha_rows* indexReadSizeEstimates; - - MEM_ROOT conversionBufferMemroot; - - bool forceSingleRowRead; - - bool readAllColumns; - - bool invalidDataFound; - - db2i_ileBridge* cachedBridge; - - ValidatedObject curConnection; - uint16 activeReferences; - -public: - - ha_ibmdb2i(handlerton *hton, TABLE_SHARE *table_arg); - ~ha_ibmdb2i(); - - const char *table_type() const { return "IBMDB2I"; } - const char *index_type(uint inx) { return "RADIX"; } - const key_map *keys_to_use_for_scanning() { return &key_map_full; } - const char **bas_ext() const; - - ulonglong table_flags() const - { - return HA_NULL_IN_KEY | HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | - HA_PARTIAL_COLUMN_READ | - HA_DUPLICATE_POS | HA_NO_PREFIX_CHAR_KEYS | - HA_HAS_RECORDS | HA_BINLOG_ROW_CAPABLE | HA_REQUIRES_KEY_COLUMNS_FOR_DELETE | - HA_CAN_INDEX_BLOBS; - } - - ulong index_flags(uint inx, uint part, bool all_parts) const; - -// Note that we do not implement max_supported_record_length. -// We'll let create fail accordingly if the row is -// too long. This allows us to hide the fact that varchars > 32K are being -// implemented as DB2 LOBs. - - uint max_supported_keys() const { return 4000; } - uint max_supported_key_parts() const { return MAX_DB2_KEY_PARTS; } - uint max_supported_key_length() const { return 32767; } - uint max_supported_key_part_length() const { return 32767; } - double read_time(uint index, uint ranges, ha_rows rows); - double scan_time(); - int open(const char *name, int mode, uint test_if_locked); - int close(void); - int write_row(uchar * buf); - int update_row(const uchar * old_data, uchar * new_data); - int delete_row(const uchar * buf); - int index_init(uint idx, bool sorted); - int index_read(uchar * buf, const uchar * key, - uint key_len, enum ha_rkey_function find_flag); - int index_next(uchar * buf); - int index_read_last(uchar * buf, const uchar * key, uint key_len); - int index_next_same(uchar *buf, const uchar *key, uint keylen); - int index_prev(uchar * buf); - int index_first(uchar * buf); - int index_last(uchar * buf); - int rnd_init(bool scan); - int rnd_end(); - int rnd_next(uchar *buf); - int rnd_pos(uchar * buf, uchar *pos); - void position(const uchar *record); - int info(uint); - ha_rows records(); - int extra(enum ha_extra_function operation); - int external_lock(THD *thd, int lock_type); - int delete_all_rows(void); - ha_rows records_in_range(uint inx, key_range *min_key, - key_range *max_key); - int delete_table(const char *from); - int rename_table(const char * from, const char * to); - int create(const char *name, TABLE *form, - HA_CREATE_INFO *create_info); - int updateFrm(TABLE *table_def, File file); - int openTableDef(TABLE *table_def); - int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys); - int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys); - int final_drop_index(TABLE *table_arg) {return 0;} - void get_auto_increment(ulonglong offset, ulonglong increment, - ulonglong nb_desired_values, - ulonglong *first_value, - ulonglong *nb_reserved_values); - int reset_auto_increment(ulonglong value); - void restore_auto_increment(ulonglong prev_insert_id) {return;} - void update_create_info(HA_CREATE_INFO *create_info); - int getNextIdVal(ulonglong *value); - int analyze(THD* thd,HA_CHECK_OPT* check_opt); - int optimize(THD* thd, HA_CHECK_OPT* check_opt); - bool can_switch_engines(); - void free_foreign_key_create_info(char* str); - char* get_foreign_key_create_info(); - int get_foreign_key_list(THD *thd, List *f_key_list); - uint referenced_by_foreign_key(); - bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); - virtual bool get_error_message(int error, String *buf); - - THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, - enum thr_lock_type lock_type); - - bool low_byte_first() const { return 0; } - void unlock_row(); - int index_end(); - int reset(); - static int doCommit(handlerton *hton, THD *thd, bool all); - static int doRollback(handlerton *hton, THD *thd, bool all); - void start_bulk_insert(ha_rows rows); - int end_bulk_insert(); - int start_stmt(THD *thd, thr_lock_type lock_type); - - void initBridge(THD* thd = NULL) - { - if (thd == NULL) thd = ha_thd(); - DBUG_PRINT("ha_ibmdb2i::initBridge",("Initing bridge. Conn ID=%d", thd->thread_id)); - cachedBridge = db2i_ileBridge::getBridgeForThread(thd); - } - - db2i_ileBridge* bridge() {DBUG_ASSERT(cachedBridge); return cachedBridge;} - - static uint8 autoCommitIsOn(THD* thd) - { return (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) ? QMY_NO : QMY_YES); } - - uint8 getCommitLevel(); - uint8 getCommitLevel(THD* thd); - - static int doSavepointSet(THD* thd, char* name) - { - return db2i_ileBridge::getBridgeForThread(thd)->savepoint(QMY_SET_SAVEPOINT, - name); - } - - static int doSavepointRollback(THD* thd, char* name) - { - return db2i_ileBridge::getBridgeForThread(thd)->savepoint(QMY_ROLLBACK_SAVEPOINT, - name); - } - - static int doSavepointRelease(THD* thd, char* name) - { - return db2i_ileBridge::getBridgeForThread(thd)->savepoint(QMY_RELEASE_SAVEPOINT, - name); - } - - // We can't guarantee that the rows we know about when this is called - // will be the same number of rows that read returns (since DB2 activity - // may insert additional rows). Therefore, we do as the Federated SE and - // return the max possible. - ha_rows estimate_rows_upper_bound() - { - return HA_POS_ERROR; - } - - -private: - - enum enum_TimeFormat - { - TIME_OF_DAY, - DURATION - }; - - enum enum_BlobMapping - { - AS_BLOB, - AS_VARCHAR - }; - - enum enum_ZeroDate - { - NO_SUBSTITUTE, - SUBSTITUTE_0001_01_01 - }; - - enum enum_YearFormat - { - CHAR4, - SMALLINT - }; - - enum_ZeroDate cachedZeroDateOption; - - IBMDB2I_SHARE *get_share(const char *table_name, TABLE *table); - int free_share(IBMDB2I_SHARE *share); - int32 mungeDB2row(uchar* record, const char* dataPtr, const char* nullMapPtr, bool skipLOBs); - int prepareRowForWrite(char* data, char* nulls, bool honorIdentCols); - int prepareReadBufferForLobs(); - int32 prepareWriteBufferForLobs(); - uint32 adjustLobBuffersForRead(); - bool lobFieldsRequested(); - int convertFieldChars(enum_conversionDirection direction, - uint16 fieldID, - const char* input, - char* output, - size_t ilen, - size_t olen, - size_t* outDataLen, - bool tacitErrors=FALSE, - size_t* substChars=NULL); - - /** - Fast integer log2 function - */ - uint64 log_2(uint64 val) - { - uint64 exp = 0; - while( (val >> exp) != 0) - { - exp++; - } - DBUG_ASSERT(exp-1 == (uint64)log2(val)); - return exp-1; - } - - void bumpInUseCounter(uint16 amount) - { - activeReferences += amount; - DBUG_PRINT("ha_ibmdb2i::bumpInUseCounter", ("activeReferences = %d", activeReferences)); - if (activeReferences) - curConnection = (uint32)(ha_thd()->thread_id); - else - curConnection = 0; - } - - - int useDataFile() - { - DBUG_ENTER("ha_ibmdb2i::useDataFile"); - - int rc = 0; - if (!dataHandle) - rc = db2Table->dataFile()->allocateNewInstance(&dataHandle, curConnection); - else if (activeHandle == dataHandle) - DBUG_RETURN(0); - - DBUG_ASSERT(activeHandle == 0); - - if (likely(rc == 0)) - { - activeHandle = dataHandle; - bumpInUseCounter(1); - } - - DBUG_RETURN(rc); - } - - void releaseAnyLockedRows() - { - if (releaseRowNeeded) - { - DBUG_PRINT("ha_ibmdb2i::releaseAnyLockedRows", ("Releasing rows")); - db2i_ileBridge::getBridgeForThread()->rrlslck(activeHandle, accessIntent); - releaseRowNeeded = FALSE; - } - } - - - void releaseDataFile() - { - DBUG_ENTER("ha_ibmdb2i::releaseDataFile"); - releaseAnyLockedRows(); - bumpInUseCounter(-1); - DBUG_ASSERT((volatile int)activeReferences >= 0); - activeHandle = 0; - DBUG_VOID_RETURN; - } - - int useIndexFile(int idx); - - void releaseIndexFile(int idx) - { - DBUG_ENTER("ha_ibmdb2i::releaseIndexFile"); - releaseAnyLockedRows(); - bumpInUseCounter(-1); - DBUG_ASSERT((volatile int)activeReferences >= 0); - activeHandle = 0; - DBUG_VOID_RETURN; - } - - FILE_HANDLE allocateFileHandle(char* database, char* table, int* activityReference, bool hasBlobs); - - int updateBuffers(const db2i_file::RowFormat* format, uint rowsToRead, uint rowsToWrite); - - int flushWrite(FILE_HANDLE fileHandle, uchar* buf = NULL); - - int alterStartWith(); - - int buildDB2ConstraintString(LEX* lex, - String& appendHere, - const char* database, - Field** fields, - char* fileSortSequenceType, - char* fileSortSequence, - char* fileSortSequenceLibrary); - - void releaseWriteBuffer(); - - void setIndexReadEstimate(uint index, ha_rows rows) - { - if (!indexReadSizeEstimates) - { - indexReadSizeEstimates = (ha_rows*)my_malloc(sizeof(ha_rows) * table->s->keys, MYF(MY_WME | MY_ZEROFILL)); - } - indexReadSizeEstimates[index] = rows; - } - - ha_rows getIndexReadEstimate(uint index) - { - if (indexReadSizeEstimates) - return max(indexReadSizeEstimates[index], 1); - - return 10000; // Assume index scan if no estimate exists. - } - - - void quiesceAllFileHandles() - { - db2i_ileBridge* bridge = db2i_ileBridge::getBridgeForThread(); - if (dataHandle) - { - bridge->quiesceFileInstance(dataHandle); - } - - for (int idx = 0; idx < table_share->keys; ++idx) - { - if (indexHandles[idx] != 0) - { - bridge->quiesceFileInstance(indexHandles[idx]); - } - } - } - - int32 buildCreateIndexStatement(SqlStatementStream& sqlStream, - KEY& key, - bool isPrimary, - const char* db2LibName, - const char* db2FileName); - - int32 buildIndexFieldList(String& appendHere, - const KEY& key, - bool isPrimary, - char* fileSortSequenceType, - char* fileSortSequence, - char* fileSortSequenceLibrary); - - // Specify NULL for data when using the data pointed to by field - int32 convertMySQLtoDB2(Field* field, const DB2Field& db2Field, char* db2Buf, const uchar* data = NULL); - - int32 convertDB2toMySQL(const DB2Field& db2Field, Field* field, const char* buf); - int getFieldTypeMapping(Field* field, - String& mapping, - enum_TimeFormat timeFormate, - enum_BlobMapping blobMapping, - enum_ZeroDate zeroDateHandling, - bool propagateDefaults, - enum_YearFormat yearFormat); - - int getKeyFromName(const char* name, size_t len); - - void releaseActiveHandle() - { - if (activeHandle == dataHandle) - releaseDataFile(); - else - releaseIndexFile(active_index); - } - - - int32 finishBulkInsert(); - - void doInitialRead(char orientation, - uint32 rowsToBuffer, - ILEMemHandle key = 0, - int keyLength = 0, - int keyParts = 0); - - - int32 readFromBuffer(uchar* destination, char orientation) - { - char* row; - int32 rc = 0; - row = activeReadBuf->readNextRow(orientation, currentRRN); - - if (unlikely(!row)) - { - rc = activeReadBuf->lastrc(); - if (rc == QMY_ERR_LOB_SPACE_TOO_SMALL) - { - rc = handleLOBReadOverflow(); - if (rc == 0) - { - DBUG_ASSERT(activeReadBuf->rowCount() == 1); - row = activeReadBuf->readNextRow(orientation, currentRRN); - - if (unlikely(!row)) - rc = activeReadBuf->lastrc(); - } - } - } - - if (likely(rc == 0)) - { - rrnAssocHandle = activeHandle; - rc = mungeDB2row(destination, row, row+activeReadBuf->getRowNullOffset(), false); - } - return rc; - } - - int32 handleLOBReadOverflow(); - - char* getCharacterConversionBuffer(int fieldId, int length) - { - if (unlikely(!alloc_root_inited(&conversionBufferMemroot))) - init_alloc_root(&conversionBufferMemroot, 8192, 0); - - return (char*)alloc_root(&conversionBufferMemroot, length);; - } - - void resetCharacterConversionBuffers() - { - if (alloc_root_inited(&conversionBufferMemroot)) - { - free_root(&conversionBufferMemroot, MYF(MY_MARK_BLOCKS_FREE)); - } - } - - void tweakReadSet() - { - THD* thd = ha_thd(); - int command = thd_sql_command(thd); - if ((command == SQLCOM_UPDATE || - command == SQLCOM_UPDATE_MULTI) || - ((command == SQLCOM_DELETE || - command == SQLCOM_DELETE_MULTI) && - thd->options & OPTION_BIN_LOG)) - readAllColumns = TRUE; - else - readAllColumns = FALSE; - } - - /** - - */ - int useFileByHandle(char intent, - FILE_HANDLE handle) - { - DBUG_ENTER("ha_ibmdb2i::useFileByHandle"); - - const db2i_file* file; - if (handle == dataHandle) - file = db2Table->dataFile(); - else - { - for (uint i = 0; i < table_share->keys; ++i) - { - if (indexHandles[i] == handle) - { - file = db2Table->indexFile(i); - active_index = i; - } - } - } - - int rc = file->obtainRowFormat(handle, intent, getCommitLevel(), &activeFormat); - if (likely(rc == 0)) - { - activeHandle = handle; - bumpInUseCounter(1); - } - - DBUG_RETURN(rc); - } - - const db2i_file* getFileForActiveHandle() const - { - if (activeHandle == dataHandle) - return db2Table->dataFile(); - else - for (uint i = 0; i < table_share->keys; ++i) - if (indexHandles[i] == activeHandle) - return db2Table->indexFile(i); - DBUG_ASSERT(0); - return NULL; - } - - int prepReadBuffer(ha_rows rowsToRead, const db2i_file* file, char intent); - int prepWriteBuffer(ha_rows rowsToWrite, const db2i_file* file); - - void invalidateCachedStats() - { - share->cachedStats.invalidate(rowCount | deletedRowCount | objLength | - meanRowLen | ioCount); - } - - void warnIfInvalidData() - { - if (unlikely(invalidDataFound)) - { - warning(ha_thd(), DB2I_ERR_INVALID_DATA, table->alias); - } - } - - /** - Calculate the maximum value that a particular field can hold. - - This is used to anticipate overflows in the auto_increment processing. - - @param field The Field to be analyzed - - @return The maximum value - */ - static uint64 maxValueForField(const Field* field) - { - uint64 maxValue=0; - switch (field->type()) - { - case MYSQL_TYPE_TINY: - if (((const Field_num*)field)->unsigned_flag) - maxValue = (1 << 8) - 1; - else - maxValue = (1 << 7) - 1; - break; - case MYSQL_TYPE_SHORT: - if (((const Field_num*)field)->unsigned_flag) - maxValue = (1 << 16) - 1; - else - maxValue = (1 << 15) - 1; - break; - case MYSQL_TYPE_INT24: - if (((const Field_num*)field)->unsigned_flag) - maxValue = (1 << 24) - 1; - else - maxValue = (1 << 23) - 1; - break; - case MYSQL_TYPE_LONG: - if (((const Field_num*)field)->unsigned_flag) - maxValue = (1LL << 32) - 1; - else - maxValue = (1 << 31) - 1; - break; - case MYSQL_TYPE_LONGLONG: - if (((const Field_num*)field)->unsigned_flag) - maxValue = ~(0LL); - else - maxValue = 1 << 63 - 1; - break; - } - - return maxValue; - } - - void cleanupBuffers() - { - if (blobReadBuffers) - { - delete blobReadBuffers; - blobReadBuffers = NULL; - } - if (blobWriteBuffers) - { - delete[] blobWriteBuffers; - blobWriteBuffers = NULL; - } - if (alloc_root_inited(&conversionBufferMemroot)) - { - free_root(&conversionBufferMemroot, MYF(0)); - } - } - - -/** - Generate a valid RCDFMT name based on the name of the table. - - The RCDFMT name is devised by munging the name of the table, - uppercasing all ascii alpha-numeric characters and replacing all other - characters with underscores until up to ten characters have been generated. - - @param tableName The name of the table, as given on the MySQL - CREATE TABLE statement - @param[out] query The string to receive the generated RCDFMT name -*/ - static void generateAndAppendRCDFMT(const char* tableName, String& query) - { - char rcdfmt[11]; - - // The RCDFMT name must begin with an alpha character. - // We enforce this by skipping to the first alpha character in the table - // name. If no alpha character exists, we use 'X' for the RCDFMT name; - - while (*tableName && - (!my_isascii(*tableName) || - !my_isalpha(system_charset_info, *tableName))) - { - tableName += my_mbcharlen(system_charset_info, *tableName); - } - - if (unlikely(!(*tableName))) - { - rcdfmt[0]= 'X'; - rcdfmt[1]= 0; - } - else - { - int r= 0; - while ((r < sizeof(rcdfmt)-1) && *tableName) - { - if (my_isascii(*tableName) && - my_isalnum(system_charset_info, *tableName)) - rcdfmt[r] = my_toupper(system_charset_info, *tableName); - else - rcdfmt[r] = '_'; - - ++r; - tableName += my_mbcharlen(system_charset_info, *tableName); - } - rcdfmt[r]= 0; - } - query.append(STRING_WITH_LEN(" RCDFMT ")); - query.append(rcdfmt); - } - - int32 generateShadowIndex(SqlStatementStream& stream, - const KEY& key, - const char* libName, - const char* fileName, - const String& fieldDefinition); -}; diff --git a/storage/ibmdb2i/plug.in b/storage/ibmdb2i/plug.in deleted file mode 100644 index 0913d72aabf..00000000000 --- a/storage/ibmdb2i/plug.in +++ /dev/null @@ -1,12 +0,0 @@ -MYSQL_STORAGE_ENGINE([ibmdb2i], [], [IBM DB2 for i Storage Engine], - [IBM DB2 for i Storage Engine], [max,max-no-ndb]) -MYSQL_PLUGIN_DYNAMIC([ibmdb2i], [ha_ibmdb2i.la]) - -AC_CHECK_HEADER([qlgusr.h], - # qlgusr.h is just one of the headers from the i5/OS PASE environment; the - # EBCDIC headers are in /QIBM/include, and have to be converted to ASCII - # before cpp gets to them - [:], - # Missing PASE environment, can't build this engine - [mysql_plugin_ibmdb2i=no - with_plugin_ibmdb2i=no]) -- cgit v1.2.1 From 30a3e345a700483a2b044558343f80baa5f023da Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:28:45 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3507.1.26 > revision-id: mattias.jonsson@oracle.com-20101201112046-2ugtweya0dltcrys > parent: nirbhay.choubey@sun.com-20101201065531-w2f0j3g5yzok8z9x > committer: Mattias Jonsson > branch nick: b56380-51-bt > timestamp: Wed 2010-12-01 12:20:46 +0100 > message: > Bug#56380: valgrind memory leak warning from partition tests > > There could be memory leaks if ALTER ... PARTITION command fails. > > Problem was that the list of items to free was not set in > the partition info structure when fix_partition_func call failed > during ALTER ... PARTITION. > > Solved by always setting the list in the partition info struct. --- sql/table.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/table.cc b/sql/table.cc index 18523f08551..b43d29294a8 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1867,8 +1867,8 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, { if (work_part_info_used) tmp= fix_partition_func(thd, outparam, is_create_table); - outparam->part_info->item_free_list= part_func_arena.free_list; } + outparam->part_info->item_free_list= part_func_arena.free_list; partititon_err: if (tmp) { -- cgit v1.2.1 From 3eff3ea2420b8f4d8a4d5f70f2341e7bf27bd644 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:30:48 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3507.1.7 > revision-id: guilhem@mysql.com-20101122085759-53uuoyqyjkh4em2m > parent: davi.arnaut@oracle.com-20101120142951-l0f3bxmcwibcplxq > committer: Guilhem Bichot > branch nick: mysql-5.1-bugteam > timestamp: Mon 2010-11-22 09:57:59 +0100 > message: > Fix for Bug#56138 "valgrind errors about overlapping memory when double-assigning same variable", > and related small fixes. --- mysql-test/r/user_var.result | 3 +++ mysql-test/t/user_var.test | 7 +++++++ sql/field_conv.cc | 7 ++----- sql/item_func.cc | 2 +- sql/sql_select.cc | 8 ++++++-- 5 files changed, 19 insertions(+), 8 deletions(-) diff --git a/mysql-test/r/user_var.result b/mysql-test/r/user_var.result index 56266a46e20..cf82a18ea83 100644 --- a/mysql-test/r/user_var.result +++ b/mysql-test/r/user_var.result @@ -447,4 +447,7 @@ IF( count(*), 1) 1 DROP TABLE t1; +select @v:=@v:=sum(1) from dual; +@v:=@v:=sum(1) +1 End of 5.1 tests diff --git a/mysql-test/t/user_var.test b/mysql-test/t/user_var.test index 4f27866de23..56217fe67d5 100644 --- a/mysql-test/t/user_var.test +++ b/mysql-test/t/user_var.test @@ -346,4 +346,11 @@ FROM t1 GROUP BY a LIMIT 1; DROP TABLE t1; +# +# BUG#56138 "valgrind errors about overlapping memory when +# double-assigning same variable" +# + +select @v:=@v:=sum(1) from dual; + --echo End of 5.1 tests diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 0bffde9671a..a4fca6f8ad7 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -786,11 +786,8 @@ int field_conv(Field *to,Field *from) ((Field_varstring*)from)->length_bytes == ((Field_varstring*)to)->length_bytes)) { // Identical fields -#ifdef HAVE_purify - /* This may happen if one does 'UPDATE ... SET x=x' */ - if (to->ptr != from->ptr) -#endif - memcpy(to->ptr,from->ptr,to->pack_length()); + // to->ptr==from->ptr may happen if one does 'UPDATE ... SET x=x' + memmove(to->ptr, from->ptr, to->pack_length()); return 0; } } diff --git a/sql/item_func.cc b/sql/item_func.cc index eaf6a1b6d14..258ba0f01d5 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -3913,7 +3913,7 @@ update_hash(user_var_entry *entry, bool set_null, void *ptr, uint length, length--; // Fix length change above entry->value[length]= 0; // Store end \0 } - memcpy(entry->value,ptr,length); + memmove(entry->value, ptr, length); if (type == DECIMAL_RESULT) ((my_decimal*)entry->value)->fix_buffer_pointer(); entry->length= length; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 08bd0c28738..9bf0a236e1b 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -4034,8 +4034,12 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, continue; } -#ifdef HAVE_purify - /* Valgrind complains about overlapped memcpy when save_pos==use. */ +#if defined(__GNUC__) && !MY_GNUC_PREREQ(4,4) + /* + Old gcc used a memcpy(), which is undefined if save_pos==use: + http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19410 + http://gcc.gnu.org/bugzilla/show_bug.cgi?id=39480 + */ if (save_pos != use) #endif *save_pos= *use; -- cgit v1.2.1 From 00447eadd597a9f434330f5ccb1c9d8178c79602 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:37:36 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3517 > revision-id: sergey.glukhov@oracle.com-20101213103926-okypkn10adeeyns8 > parent: davi.arnaut@oracle.com-20101207150620-s49dstok48oy585o > committer: Sergey Glukhov > branch nick: mysql-5.1-bugteam > timestamp: Mon 2010-12-13 13:39:26 +0300 > message: > Bug#58396 group_concat and explain extended are still crashy > Explain fails at fix_fields stage and some items are left unfixed, > particulary Item_group_concat. Item_group_concat::orig_args field > is uninitialized in this case and Item_group_concat::print call > leads to crash. > The fix: > move the initialization of Item_group_concat::orig_args > into constructor. --- mysql-test/r/func_gconcat.result | 12 ++++++++++++ mysql-test/t/func_gconcat.test | 11 +++++++++++ sql/item_sum.cc | 2 +- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/func_gconcat.result b/mysql-test/r/func_gconcat.result index ae48eb1e0ff..a4deaf4d90c 100644 --- a/mysql-test/r/func_gconcat.result +++ b/mysql-test/r/func_gconcat.result @@ -1029,4 +1029,16 @@ GROUP_CONCAT(t1.a ORDER BY t1.a) 1,1,2,2 DEALLOCATE PREPARE stmt; DROP TABLE t1; +# +# Bug#58396 group_concat and explain extended are still crashy +# +CREATE TABLE t1(a INT); +EXPLAIN EXTENDED SELECT UPDATEXML('1', a, '1') +FROM t1 ORDER BY (SELECT GROUP_CONCAT(1) FROM t1); +ERROR HY000: Only constant XPATH queries are supported +SHOW WARNINGS; +Level Code Message +Error 1105 Only constant XPATH queries are supported +Note 1003 select updatexml('1',`test`.`t1`.`a`,'1') AS `UPDATEXML('1', a, '1')` from `test`.`t1` order by (select group_concat(1 separator ',') from `test`.`t1`) +DROP TABLE t1; End of 5.1 tests diff --git a/mysql-test/t/func_gconcat.test b/mysql-test/t/func_gconcat.test index 926c1f92855..e8d66f120a3 100644 --- a/mysql-test/t/func_gconcat.test +++ b/mysql-test/t/func_gconcat.test @@ -734,4 +734,15 @@ EXECUTE stmt; DEALLOCATE PREPARE stmt; DROP TABLE t1; +--echo # +--echo # Bug#58396 group_concat and explain extended are still crashy +--echo # + +CREATE TABLE t1(a INT); +--error ER_UNKNOWN_ERROR +EXPLAIN EXTENDED SELECT UPDATEXML('1', a, '1') +FROM t1 ORDER BY (SELECT GROUP_CONCAT(1) FROM t1); +SHOW WARNINGS; +DROP TABLE t1; + --echo End of 5.1 tests diff --git a/sql/item_sum.cc b/sql/item_sum.cc index ae9e46e2abf..e479db7f7b9 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -3003,6 +3003,7 @@ Item_func_group_concat(Name_resolution_context *context_arg, order_item->item= arg_ptr++; } } + memcpy(orig_args, args, sizeof(Item*) * arg_count); } @@ -3233,7 +3234,6 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref) if (check_sum_func(thd, ref)) return TRUE; - memcpy (orig_args, args, sizeof (Item *) * arg_count); fixed= 1; return FALSE; } -- cgit v1.2.1 From c044b77aa9bc5563b2f26cf75e015d35b72d9160 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:55:28 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3518 > revision-id: sergey.glukhov@oracle.com-20101213114812-kaq7sh0s623128lv > parent: sergey.glukhov@oracle.com-20101213103926-okypkn10adeeyns8 > committer: Sergey Glukhov > branch nick: mysql-5.1-bugteam > timestamp: Mon 2010-12-13 14:48:12 +0300 > message: > Bug#39828 : Autoinc wraps around when offset and increment > 1 > Auto increment value wraps when performing a bulk insert with > auto_increment_increment and auto_increment_offset greater than > one. > The fix: > If overflow happened then return MAX_ULONGLONG value as an > indication of overflow and check this before storing the > value into the field in update_auto_increment(). --- mysql-test/r/auto_increment.result | 21 ++++++++++ mysql-test/suite/innodb/r/innodb-autoinc.result | 18 +++----- mysql-test/suite/innodb/t/innodb-autoinc.test | 49 +++------------------- .../suite/innodb_plugin/r/innodb-autoinc.result | 18 +++----- .../suite/innodb_plugin/t/innodb-autoinc.test | 49 +++------------------- mysql-test/t/auto_increment.test | 21 ++++++++++ sql/handler.cc | 30 +++++++++---- 7 files changed, 88 insertions(+), 118 deletions(-) diff --git a/mysql-test/r/auto_increment.result b/mysql-test/r/auto_increment.result index 4a2e108f8c6..5fae14e2c07 100644 --- a/mysql-test/r/auto_increment.result +++ b/mysql-test/r/auto_increment.result @@ -476,3 +476,24 @@ SELECT a FROM t2; a 2 DROP TABLE t1, t2; +# +# Bug#39828 autoinc wraps around when offset and increment > 1 +# +CREATE TABLE t1 (c1 BIGINT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(c1)) engine=MyISAM; +INSERT INTO t1 VALUES(1); +INSERT INTO t1 VALUES (18446744073709551601); +SET @@SESSION.AUTO_INCREMENT_INCREMENT=10; +SELECT @@SESSION.AUTO_INCREMENT_OFFSET; +@@SESSION.AUTO_INCREMENT_OFFSET +1 +INSERT INTO t1 VALUES (NULL), (NULL), (NULL); +ERROR 22003: Out of range value for column 't1' at row 167 +SELECT * FROM t1; +c1 +1 +18446744073709551601 +18446744073709551611 +SET @@SESSION.AUTO_INCREMENT_INCREMENT=default; +SET @@SESSION.AUTO_INCREMENT_OFFSET=default; +DROP TABLE t1; +End of 5.1 tests diff --git a/mysql-test/suite/innodb/r/innodb-autoinc.result b/mysql-test/suite/innodb/r/innodb-autoinc.result index 350c7ebd541..0c8d16f27fb 100644 --- a/mysql-test/suite/innodb/r/innodb-autoinc.result +++ b/mysql-test/suite/innodb/r/innodb-autoinc.result @@ -471,17 +471,12 @@ SHOW VARIABLES LIKE "%auto_inc%"; Variable_name Value auto_increment_increment 2 auto_increment_offset 10 -INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL); +INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL); +ERROR HY000: Failed to read auto-increment value from storage engine SELECT * FROM t1; c1 1 18446744073709551603 -18446744073709551604 -18446744073709551606 -18446744073709551608 -18446744073709551610 -18446744073709551612 -18446744073709551614 DROP TABLE t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1; SET @@INSERT_ID=1; @@ -504,13 +499,12 @@ SHOW VARIABLES LIKE "%auto_inc%"; Variable_name Value auto_increment_increment 5 auto_increment_offset 7 -INSERT INTO t1 VALUES (NULL),(NULL); +INSERT INTO t1 VALUES (NULL),(NULL), (NULL); +ERROR HY000: Failed to read auto-increment value from storage engine SELECT * FROM t1; c1 1 18446744073709551603 -18446744073709551607 -18446744073709551612 DROP TABLE t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1; SET @@INSERT_ID=1; @@ -572,12 +566,12 @@ SHOW VARIABLES LIKE "%auto_inc%"; Variable_name Value auto_increment_increment 65535 auto_increment_offset 65535 -INSERT INTO t1 VALUES (NULL); +INSERT INTO t1 VALUES (NULL),(NULL); +ERROR 22003: Out of range value for column 't1' at row 167 SELECT * FROM t1; c1 1 18446744073709551610 -18446744073709551615 DROP TABLE t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1; SET @@INSERT_ID=1; diff --git a/mysql-test/suite/innodb/t/innodb-autoinc.test b/mysql-test/suite/innodb/t/innodb-autoinc.test index 10602499222..c3b64c7c963 100644 --- a/mysql-test/suite/innodb/t/innodb-autoinc.test +++ b/mysql-test/suite/innodb/t/innodb-autoinc.test @@ -291,21 +291,8 @@ INSERT INTO t1 VALUES (18446744073709551603); #-- 2^64 - 13 SELECT * FROM t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=2, @@SESSION.AUTO_INCREMENT_OFFSET=10; SHOW VARIABLES LIKE "%auto_inc%"; -# This should fail because of overflow but it doesn't, it seems to be -# a MySQL server bug. It wraps around to 0 for the last value. -# See MySQL Bug# 39828 -# -# Instead of wrapping around, it asserts when MySQL is compiled --with-debug -# (see sql/handler.cc:handler::update_auto_increment()). Don't test for -# overflow until Bug #39828 is fixed. -# -# Since this asserts when compiled --with-debug, we can't properly test this -# until Bug #39828 is fixed. For now, this test is meaningless. -#if Bug #39828 is fixed -#INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL); -#else -INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL); -#endif +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL); SELECT * FROM t1; DROP TABLE t1; @@ -323,20 +310,8 @@ INSERT INTO t1 VALUES (18446744073709551603); #-- 2^64 - 13 SELECT * FROM t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=5, @@SESSION.AUTO_INCREMENT_OFFSET=7; SHOW VARIABLES LIKE "%auto_inc%"; -# This should fail because of overflow but it doesn't. It fails with -# a duplicate entry message because of a MySQL server bug, it wraps -# around. See MySQL Bug# 39828, once MySQL fix the bug we can replace -# the ER_DUP_ENTRY, 1062 below with the appropriate error message -# -# Since this asserts when compiled --with-debug, we can't properly test this -# until Bug #39828 is fixed. For now, this test is meaningless. -#if Bug #39828 is fixed -# Still need to fix this error code, error should mention overflow -#-- error ER_DUP_ENTRY,1062 -#INSERT INTO t1 VALUES (NULL),(NULL), (NULL); -#else -INSERT INTO t1 VALUES (NULL),(NULL); -#endif +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1 VALUES (NULL),(NULL), (NULL); SELECT * FROM t1; DROP TABLE t1; @@ -374,20 +349,8 @@ INSERT INTO t1 VALUES (18446744073709551610); #-- 2^64 - 2 SELECT * FROM t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=1152921504606846976, @@SESSION.AUTO_INCREMENT_OFFSET=1152921504606846976; SHOW VARIABLES LIKE "%auto_inc%"; -# This should fail because of overflow but it doesn't. It wraps around -# and the autoinc values look bogus too. -# See MySQL Bug# 39828, once MySQL fix the bug we can enable the error -# code expected test. -# -- error ER_AUTOINC_READ_FAILED,1467 -# -# Since this asserts when compiled --with-debug, we can't properly test this -# until Bug #39828 is fixed. For now, this test is meaningless. -#if Bug #39828 is fixed -#-- error ER_AUTOINC_READ_FAILED,1467 -#INSERT INTO t1 VALUES (NULL),(NULL); -#else -INSERT INTO t1 VALUES (NULL); -#endif +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t1 VALUES (NULL),(NULL); SELECT * FROM t1; DROP TABLE t1; diff --git a/mysql-test/suite/innodb_plugin/r/innodb-autoinc.result b/mysql-test/suite/innodb_plugin/r/innodb-autoinc.result index 350c7ebd541..0c8d16f27fb 100644 --- a/mysql-test/suite/innodb_plugin/r/innodb-autoinc.result +++ b/mysql-test/suite/innodb_plugin/r/innodb-autoinc.result @@ -471,17 +471,12 @@ SHOW VARIABLES LIKE "%auto_inc%"; Variable_name Value auto_increment_increment 2 auto_increment_offset 10 -INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL); +INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL); +ERROR HY000: Failed to read auto-increment value from storage engine SELECT * FROM t1; c1 1 18446744073709551603 -18446744073709551604 -18446744073709551606 -18446744073709551608 -18446744073709551610 -18446744073709551612 -18446744073709551614 DROP TABLE t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1; SET @@INSERT_ID=1; @@ -504,13 +499,12 @@ SHOW VARIABLES LIKE "%auto_inc%"; Variable_name Value auto_increment_increment 5 auto_increment_offset 7 -INSERT INTO t1 VALUES (NULL),(NULL); +INSERT INTO t1 VALUES (NULL),(NULL), (NULL); +ERROR HY000: Failed to read auto-increment value from storage engine SELECT * FROM t1; c1 1 18446744073709551603 -18446744073709551607 -18446744073709551612 DROP TABLE t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1; SET @@INSERT_ID=1; @@ -572,12 +566,12 @@ SHOW VARIABLES LIKE "%auto_inc%"; Variable_name Value auto_increment_increment 65535 auto_increment_offset 65535 -INSERT INTO t1 VALUES (NULL); +INSERT INTO t1 VALUES (NULL),(NULL); +ERROR 22003: Out of range value for column 't1' at row 167 SELECT * FROM t1; c1 1 18446744073709551610 -18446744073709551615 DROP TABLE t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=1; SET @@INSERT_ID=1; diff --git a/mysql-test/suite/innodb_plugin/t/innodb-autoinc.test b/mysql-test/suite/innodb_plugin/t/innodb-autoinc.test index 997c503d2d3..4967a6efbb9 100644 --- a/mysql-test/suite/innodb_plugin/t/innodb-autoinc.test +++ b/mysql-test/suite/innodb_plugin/t/innodb-autoinc.test @@ -293,21 +293,8 @@ INSERT INTO t1 VALUES (18446744073709551603); #-- 2^64 - 13 SELECT * FROM t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=2, @@SESSION.AUTO_INCREMENT_OFFSET=10; SHOW VARIABLES LIKE "%auto_inc%"; -# This should fail because of overflow but it doesn't, it seems to be -# a MySQL server bug. It wraps around to 0 for the last value. -# See MySQL Bug# 39828 -# -# Instead of wrapping around, it asserts when MySQL is compiled --with-debug -# (see sql/handler.cc:handler::update_auto_increment()). Don't test for -# overflow until Bug #39828 is fixed. -# -# Since this asserts when compiled --with-debug, we can't properly test this -# until Bug #39828 is fixed. For now, this test is meaningless. -#if Bug #39828 is fixed -#INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL); -#else -INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL); -#endif +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1 VALUES (NULL),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL); SELECT * FROM t1; DROP TABLE t1; @@ -325,20 +312,8 @@ INSERT INTO t1 VALUES (18446744073709551603); #-- 2^64 - 13 SELECT * FROM t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=5, @@SESSION.AUTO_INCREMENT_OFFSET=7; SHOW VARIABLES LIKE "%auto_inc%"; -# This should fail because of overflow but it doesn't. It fails with -# a duplicate entry message because of a MySQL server bug, it wraps -# around. See MySQL Bug# 39828, once MySQL fix the bug we can replace -# the ER_DUP_ENTRY, 1062 below with the appropriate error message -# -# Since this asserts when compiled --with-debug, we can't properly test this -# until Bug #39828 is fixed. For now, this test is meaningless. -#if Bug #39828 is fixed -# Still need to fix this error code, error should mention overflow -#-- error ER_DUP_ENTRY,1062 -#INSERT INTO t1 VALUES (NULL),(NULL), (NULL); -#else -INSERT INTO t1 VALUES (NULL),(NULL); -#endif +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1 VALUES (NULL),(NULL), (NULL); SELECT * FROM t1; DROP TABLE t1; @@ -376,20 +351,8 @@ INSERT INTO t1 VALUES (18446744073709551610); #-- 2^64 - 2 SELECT * FROM t1; SET @@SESSION.AUTO_INCREMENT_INCREMENT=1152921504606846976, @@SESSION.AUTO_INCREMENT_OFFSET=1152921504606846976; SHOW VARIABLES LIKE "%auto_inc%"; -# This should fail because of overflow but it doesn't. It wraps around -# and the autoinc values look bogus too. -# See MySQL Bug# 39828, once MySQL fix the bug we can enable the error -# code expected test. -# -- error ER_AUTOINC_READ_FAILED,1467 -# -# Since this asserts when compiled --with-debug, we can't properly test this -# until Bug #39828 is fixed. For now, this test is meaningless. -#if Bug #39828 is fixed -#-- error ER_AUTOINC_READ_FAILED,1467 -#INSERT INTO t1 VALUES (NULL),(NULL); -#else -INSERT INTO t1 VALUES (NULL); -#endif +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t1 VALUES (NULL),(NULL); SELECT * FROM t1; DROP TABLE t1; diff --git a/mysql-test/t/auto_increment.test b/mysql-test/t/auto_increment.test index 076e32eb22c..8ab2e6fcf31 100644 --- a/mysql-test/t/auto_increment.test +++ b/mysql-test/t/auto_increment.test @@ -342,3 +342,24 @@ SELECT a FROM t2; DROP TABLE t1, t2; +--echo # +--echo # Bug#39828 autoinc wraps around when offset and increment > 1 +--echo # + +CREATE TABLE t1 (c1 BIGINT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(c1)) engine=MyISAM; +INSERT INTO t1 VALUES(1); +INSERT INTO t1 VALUES (18446744073709551601); + +SET @@SESSION.AUTO_INCREMENT_INCREMENT=10; + +SELECT @@SESSION.AUTO_INCREMENT_OFFSET; +--error ER_WARN_DATA_OUT_OF_RANGE +INSERT INTO t1 VALUES (NULL), (NULL), (NULL); +SELECT * FROM t1; + +SET @@SESSION.AUTO_INCREMENT_INCREMENT=default; +SET @@SESSION.AUTO_INCREMENT_OFFSET=default; + +DROP TABLE t1; + +--echo End of 5.1 tests diff --git a/sql/handler.cc b/sql/handler.cc index 19f397ef09f..3286367e7a7 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2166,7 +2166,8 @@ int handler::read_first_row(uchar * buf, uint primary_key) computes the lowest number - strictly greater than "nr" - of the form: auto_increment_offset + N * auto_increment_increment - + If overflow happened then return MAX_ULONGLONG value as an + indication of overflow. In most cases increment= offset= 1, in which case we get: @verbatim 1,2,3,4,5,... @endverbatim If increment=10 and offset=5 and previous number is 1, we get: @@ -2175,13 +2176,23 @@ int handler::read_first_row(uchar * buf, uint primary_key) inline ulonglong compute_next_insert_id(ulonglong nr,struct system_variables *variables) { + const ulonglong save_nr= nr; + if (variables->auto_increment_increment == 1) - return (nr+1); // optimization of the formula below - nr= (((nr+ variables->auto_increment_increment - - variables->auto_increment_offset)) / - (ulonglong) variables->auto_increment_increment); - return (nr* (ulonglong) variables->auto_increment_increment + - variables->auto_increment_offset); + nr= nr + 1; // optimization of the formula below + else + { + nr= (((nr+ variables->auto_increment_increment - + variables->auto_increment_offset)) / + (ulonglong) variables->auto_increment_increment); + nr= (nr* (ulonglong) variables->auto_increment_increment + + variables->auto_increment_offset); + } + + if (unlikely(nr <= save_nr)) + return ULONGLONG_MAX; + + return nr; } @@ -2392,7 +2403,7 @@ int handler::update_auto_increment() variables->auto_increment_increment, nb_desired_values, &nr, &nb_reserved_values); - if (nr == ~(ulonglong) 0) + if (nr == ULONGLONG_MAX) DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure /* @@ -2423,6 +2434,9 @@ int handler::update_auto_increment() } } + if (unlikely(nr == ULONGLONG_MAX)) + DBUG_RETURN(HA_ERR_AUTOINC_ERANGE); + DBUG_PRINT("info",("auto_increment: %lu", (ulong) nr)); if (unlikely(table->next_number_field->store((longlong) nr, TRUE))) -- cgit v1.2.1 From 89b9934cdbd7397b6fadf32ebd719398a5080ada Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:57:57 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3520 > revision-id: sergey.glukhov@oracle.com-20101214093303-wmo9mqcb8rz0wv9f > parent: tor.didriksen@oracle.com-20101213161301-81lprlbune7r98dl > committer: Sergey Glukhov > branch nick: mysql-5.1-bugteam > timestamp: Tue 2010-12-14 12:33:03 +0300 > message: > Fixed following problems: > --Bug#52157 various crashes and assertions with multi-table update, stored function > --Bug#54475 improper error handling causes cascading crashing failures in innodb/ndb > --Bug#57703 create view cause Assertion failed: 0, file .\item_subselect.cc, line 846 > --Bug#57352 valgrind warnings when creating view > --Recently discovered problem when a nested materialized derived table is used > before being populated and it leads to incorrect result > > We have several modes when we should disable subquery evaluation. > The reasons for disabling are different. It could be > uselessness of the evaluation as in case of 'CREATE VIEW' > or 'PREPARE stmt', or we should disable subquery evaluation > if tables are not locked yet as it happens in bug#54475, or > too early evaluation of subqueries can lead to wrong result > as it happened in Bug#19077. > Main problem is that if subquery items are treated as const > they are evaluated in ::fix_fields(), ::fix_length_and_dec() > of the parental items as a lot of these methods have > Item::val_...() calls inside. > We have to make subqueries non-const to prevent unnecessary > subquery evaluation. At the moment we have different methods > for this. Here is a list of these modes: > > 1. PREPARE stmt; > We use UNCACHEABLE_PREPARE flag. > It is set during parsing in sql_parse.cc, mysql_new_select() for > each SELECT_LEX object and cleared at the end of PREPARE in > sql_prepare.cc, init_stmt_after_parse(). If this flag is set > subquery becomes non-const and evaluation does not happen. > > 2. CREATE|ALTER VIEW, SHOW CREATE VIEW, I_S tables which > process FRM files > We use LEX::view_prepare_mode field. We set it before > view preparation and check this flag in > ::fix_fields(), ::fix_length_and_dec(). > Some bugs are fixed using this approach, > some are not(Bug#57352, Bug#57703). The problem here is > that we have a lot of ::fix_fields(), ::fix_length_and_dec() > where we use Item::val_...() calls for const items. > > 3. Derived tables with subquery = wrong result(Bug19077) > The reason of this bug is too early subquery evaluation. > It was fixed by adding Item::with_subselect field > The check of this field in appropriate places prevents > const item evaluation if the item have subquery. > The fix for Bug19077 fixes only the problem with > convert_constant_item() function and does not cover > other places(::fix_fields(), ::fix_length_and_dec() again) > where subqueries could be evaluated. > > Example: > CREATE TABLE t1 (i INT, j BIGINT); > INSERT INTO t1 VALUES (1, 2), (2, 2), (3, 2); > SELECT * FROM (SELECT MIN(i) FROM t1 > WHERE j = SUBSTRING('12', (SELECT * FROM (SELECT MIN(j) FROM t1) t2))) t3; > DROP TABLE t1; > > 4. Derived tables with subquery where subquery > is evaluated before table locking(Bug#54475, Bug#52157) > > Suggested solution is following: > > -Introduce new field LEX::context_analysis_only with the following > possible flags: > #define CONTEXT_ANALYSIS_ONLY_PREPARE 1 > #define CONTEXT_ANALYSIS_ONLY_VIEW 2 > #define CONTEXT_ANALYSIS_ONLY_DERIVED 4 > -Set/clean these flags when we perform > context analysis operation > -Item_subselect::const_item() returns > result depending on LEX::context_analysis_only. > If context_analysis_only is set then we return > FALSE that means that subquery is non-const. > As all subquery types are wrapped by Item_subselect > it allow as to make subquery non-const when > it's necessary. --- mysql-test/r/derived.result | 11 ++++++++ mysql-test/r/multi_update.result | 11 ++++++++ mysql-test/r/view.result | 13 +++++++++ .../suite/innodb/r/innodb_multi_update.result | 8 ++++++ mysql-test/suite/innodb/t/innodb_multi_update.test | 11 ++++++++ .../innodb_plugin/r/innodb_multi_update.result | 8 ++++++ .../suite/innodb_plugin/t/innodb_multi_update.test | 11 ++++++++ mysql-test/t/derived.test | 11 ++++++++ mysql-test/t/multi_update.test | 11 ++++++++ mysql-test/t/view.test | 16 +++++++++++ sql/item.cc | 11 +------- sql/item_cmpfunc.cc | 10 +++---- sql/item_func.cc | 2 +- sql/item_row.cc | 8 ++---- sql/item_subselect.cc | 20 +++---------- sql/item_subselect.h | 1 - sql/mysql_priv.h | 33 +++++++++++++++++++--- sql/sql_class.h | 2 -- sql/sql_derived.cc | 3 +- sql/sql_lex.cc | 2 +- sql/sql_lex.h | 17 +++++------ sql/sql_parse.cc | 7 ----- sql/sql_prepare.cc | 18 ++---------- sql/sql_select.cc | 3 +- sql/sql_show.cc | 8 +++--- sql/sql_view.cc | 2 +- 26 files changed, 175 insertions(+), 83 deletions(-) diff --git a/mysql-test/r/derived.result b/mysql-test/r/derived.result index 80f04ffd455..61f1db9989f 100644 --- a/mysql-test/r/derived.result +++ b/mysql-test/r/derived.result @@ -400,4 +400,15 @@ SELECT 0 FROM (SELECT 0) t61; 0 0 +# +# A nested materialized derived table is used before being populated. +# (addon for bug#19077) +# +CREATE TABLE t1 (i INT, j BIGINT); +INSERT INTO t1 VALUES (1, 2), (2, 2), (3, 2); +SELECT * FROM (SELECT MIN(i) FROM t1 +WHERE j = SUBSTRING('12', (SELECT * FROM (SELECT MIN(j) FROM t1) t2))) t3; +MIN(i) +1 +DROP TABLE t1; # End of 5.0 tests diff --git a/mysql-test/r/multi_update.result b/mysql-test/r/multi_update.result index d77ad1d2953..df3d7be6714 100644 --- a/mysql-test/r/multi_update.result +++ b/mysql-test/r/multi_update.result @@ -659,4 +659,15 @@ Error 1242 Subquery returns more than 1 row Error 1242 Subquery returns more than 1 row DROP TABLE t1, t2, t3; SET SESSION sql_safe_updates = DEFAULT; +# +# Bug#52157 various crashes and assertions with multi-table update, stored function +# +CREATE FUNCTION f1 () RETURNS BLOB RETURN 1; +CREATE TABLE t1 (f1 DATE); +INSERT INTO t1 VALUES('2001-01-01'); +UPDATE (SELECT 1 FROM t1 WHERE f1 = (SELECT f1() FROM t1)) x, t1 SET f1 = 1; +Warnings: +Warning 1292 Truncated incorrect datetime value: '1' +DROP FUNCTION f1; +DROP TABLE t1; end of tests diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 0aec44b70f1..3771a7a3e12 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -3882,6 +3882,19 @@ CREATE VIEW v1 AS SELECT 1 from t1 WHERE t1.b <=> (SELECT a FROM t1 WHERE a < SOME(SELECT '1')); DROP VIEW v1; DROP TABLE t1; +# +# Bug#57703 create view cause Assertion failed: 0, file .\item_subselect.cc, line 846 +# +CREATE TABLE t1(a int); +CREATE VIEW v1 AS SELECT 1 FROM t1 GROUP BY +SUBSTRING(1 FROM (SELECT 3 FROM t1 WHERE a >= ANY(SELECT 1))); +DROP VIEW v1; +DROP TABLE t1; +# +# Bug#57352 valgrind warnings when creating view +# +CREATE VIEW v1 AS SELECT 1 IN (1 LIKE 2,0) AS f; +DROP VIEW v1; # ----------------------------------------------------------------- # -- End of 5.1 tests. # ----------------------------------------------------------------- diff --git a/mysql-test/suite/innodb/r/innodb_multi_update.result b/mysql-test/suite/innodb/r/innodb_multi_update.result index 7af9b030d1f..558fc3938a8 100644 --- a/mysql-test/suite/innodb/r/innodb_multi_update.result +++ b/mysql-test/suite/innodb/r/innodb_multi_update.result @@ -74,3 +74,11 @@ a b 4 14 5 15 drop table bug38999_1,bug38999_2; +# +# Bug#54475 improper error handling causes cascading crashing failures in innodb/ndb +# +CREATE TABLE t1(f1 INT) ENGINE=INNODB; +INSERT INTO t1 VALUES(1); +UPDATE (SELECT ((SELECT 1 FROM t1), 1) FROM t1 WHERE (SELECT 1 FROM t1)) x, (SELECT 1) AS d SET d.f1 = 1; +ERROR 21000: Operand should contain 1 column(s) +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/innodb_multi_update.test b/mysql-test/suite/innodb/t/innodb_multi_update.test index 7ab17ccf70a..8356c20c88f 100644 --- a/mysql-test/suite/innodb/t/innodb_multi_update.test +++ b/mysql-test/suite/innodb/t/innodb_multi_update.test @@ -27,3 +27,14 @@ select * from bug38999_1; select * from bug38999_2; drop table bug38999_1,bug38999_2; + + +--echo # +--echo # Bug#54475 improper error handling causes cascading crashing failures in innodb/ndb +--echo # +CREATE TABLE t1(f1 INT) ENGINE=INNODB; +INSERT INTO t1 VALUES(1); +--error ER_OPERAND_COLUMNS +UPDATE (SELECT ((SELECT 1 FROM t1), 1) FROM t1 WHERE (SELECT 1 FROM t1)) x, (SELECT 1) AS d SET d.f1 = 1; +DROP TABLE t1; + diff --git a/mysql-test/suite/innodb_plugin/r/innodb_multi_update.result b/mysql-test/suite/innodb_plugin/r/innodb_multi_update.result index 7af9b030d1f..558fc3938a8 100644 --- a/mysql-test/suite/innodb_plugin/r/innodb_multi_update.result +++ b/mysql-test/suite/innodb_plugin/r/innodb_multi_update.result @@ -74,3 +74,11 @@ a b 4 14 5 15 drop table bug38999_1,bug38999_2; +# +# Bug#54475 improper error handling causes cascading crashing failures in innodb/ndb +# +CREATE TABLE t1(f1 INT) ENGINE=INNODB; +INSERT INTO t1 VALUES(1); +UPDATE (SELECT ((SELECT 1 FROM t1), 1) FROM t1 WHERE (SELECT 1 FROM t1)) x, (SELECT 1) AS d SET d.f1 = 1; +ERROR 21000: Operand should contain 1 column(s) +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_plugin/t/innodb_multi_update.test b/mysql-test/suite/innodb_plugin/t/innodb_multi_update.test index 890889301e6..3d9a9a53193 100644 --- a/mysql-test/suite/innodb_plugin/t/innodb_multi_update.test +++ b/mysql-test/suite/innodb_plugin/t/innodb_multi_update.test @@ -27,3 +27,14 @@ select * from bug38999_1; select * from bug38999_2; drop table bug38999_1,bug38999_2; + + +--echo # +--echo # Bug#54475 improper error handling causes cascading crashing failures in innodb/ndb +--echo # +CREATE TABLE t1(f1 INT) ENGINE=INNODB; +INSERT INTO t1 VALUES(1); +--error ER_OPERAND_COLUMNS +UPDATE (SELECT ((SELECT 1 FROM t1), 1) FROM t1 WHERE (SELECT 1 FROM t1)) x, (SELECT 1) AS d SET d.f1 = 1; +DROP TABLE t1; + diff --git a/mysql-test/t/derived.test b/mysql-test/t/derived.test index d28c19bbd18..5ce6b52b74f 100644 --- a/mysql-test/t/derived.test +++ b/mysql-test/t/derived.test @@ -301,4 +301,15 @@ SELECT 0 FROM (SELECT 0) t56, (SELECT 0) t57, (SELECT 0) t58, (SELECT 0) t59, (SELECT 0) t60, (SELECT 0) t61; # 61 == MAX_TABLES +--echo # +--echo # A nested materialized derived table is used before being populated. +--echo # (addon for bug#19077) +--echo # + +CREATE TABLE t1 (i INT, j BIGINT); +INSERT INTO t1 VALUES (1, 2), (2, 2), (3, 2); +SELECT * FROM (SELECT MIN(i) FROM t1 +WHERE j = SUBSTRING('12', (SELECT * FROM (SELECT MIN(j) FROM t1) t2))) t3; +DROP TABLE t1; + --echo # End of 5.0 tests diff --git a/mysql-test/t/multi_update.test b/mysql-test/t/multi_update.test index 85d2ed19fda..5298701d790 100644 --- a/mysql-test/t/multi_update.test +++ b/mysql-test/t/multi_update.test @@ -673,4 +673,15 @@ SET t3.a = 0; DROP TABLE t1, t2, t3; SET SESSION sql_safe_updates = DEFAULT; +--echo # +--echo # Bug#52157 various crashes and assertions with multi-table update, stored function +--echo # + +CREATE FUNCTION f1 () RETURNS BLOB RETURN 1; +CREATE TABLE t1 (f1 DATE); +INSERT INTO t1 VALUES('2001-01-01'); +UPDATE (SELECT 1 FROM t1 WHERE f1 = (SELECT f1() FROM t1)) x, t1 SET f1 = 1; +DROP FUNCTION f1; +DROP TABLE t1; + --echo end of tests diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index 3736f53b288..b1b3b5f2a83 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -3925,6 +3925,22 @@ WHERE t1.b <=> (SELECT a FROM t1 WHERE a < SOME(SELECT '1')); DROP VIEW v1; DROP TABLE t1; +--echo # +--echo # Bug#57703 create view cause Assertion failed: 0, file .\item_subselect.cc, line 846 +--echo # + +CREATE TABLE t1(a int); +CREATE VIEW v1 AS SELECT 1 FROM t1 GROUP BY +SUBSTRING(1 FROM (SELECT 3 FROM t1 WHERE a >= ANY(SELECT 1))); +DROP VIEW v1; +DROP TABLE t1; + +--echo # +--echo # Bug#57352 valgrind warnings when creating view +--echo # +CREATE VIEW v1 AS SELECT 1 IN (1 LIKE 2,0) AS f; +DROP VIEW v1; + --echo # ----------------------------------------------------------------- --echo # -- End of 5.1 tests. --echo # ----------------------------------------------------------------- diff --git a/sql/item.cc b/sql/item.cc index 61dd8a97dcb..c782b25f127 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1712,16 +1712,7 @@ bool agg_item_set_converter(DTCollation &coll, const char *fname, if (!(conv= (*arg)->safe_charset_converter(coll.collation)) && ((*arg)->collation.repertoire == MY_REPERTOIRE_ASCII)) - { - /* - We should disable const subselect item evaluation because - subselect transformation does not happen in view_prepare_mode - and thus val_...() methods can not be called for const items. - */ - bool resolve_const= ((*arg)->type() == Item::SUBSELECT_ITEM && - thd->lex->view_prepare_mode) ? FALSE : TRUE; - conv= new Item_func_conv_charset(*arg, coll.collation, resolve_const); - } + conv= new Item_func_conv_charset(*arg, coll.collation, 1); if (!conv) { diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 8b8ee4a0054..ce65975f4ea 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -401,7 +401,7 @@ static bool convert_constant_item(THD *thd, Item_field *field_item, Field *field= field_item->field; int result= 0; - if (!(*item)->with_subselect && (*item)->const_item()) + if ((*item)->const_item()) { TABLE *table= field->table; ulong orig_sql_mode= thd->variables.sql_mode; @@ -497,7 +497,7 @@ void Item_bool_func2::fix_length_and_dec() } thd= current_thd; - if (!thd->is_context_analysis_only()) + if (!thd->lex->is_ps_or_view_context_analysis()) { if (args[0]->real_item()->type() == FIELD_ITEM) { @@ -801,7 +801,7 @@ Arg_comparator::can_compare_as_dates(Item *a, Item *b, ulonglong *const_value) confuse storage engines since in context analysis mode tables aren't locked. */ - if (!thd->is_context_analysis_only() && + if (!thd->lex->is_ps_or_view_context_analysis() && cmp_type != CMP_DATE_WITH_DATE && str_arg->const_item() && (str_arg->type() != Item::FUNC_ITEM || ((Item_func*)str_arg)->functype() != Item_func::GUSERVAR_FUNC)) @@ -1027,7 +1027,7 @@ Item** Arg_comparator::cache_converted_constant(THD *thd_arg, Item **value, Item_result type) { /* Don't need cache if doing context analysis only. */ - if (!thd_arg->is_context_analysis_only() && + if (!thd->lex->is_ps_or_view_context_analysis() && (*value)->const_item() && type != (*value)->result_type()) { Item_cache *cache= Item_cache::get_cache(*value, type); @@ -4689,7 +4689,7 @@ bool Item_func_like::fix_fields(THD *thd, Item **ref) return TRUE; } - if (escape_item->const_item() && !thd->lex->view_prepare_mode) + if (escape_item->const_item()) { /* If we are on execution stage */ String *escape_str= escape_item->val_str(&cmp.value1); diff --git a/sql/item_func.cc b/sql/item_func.cc index 258ba0f01d5..e121e9ed3c6 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -6045,7 +6045,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref) if (res) DBUG_RETURN(res); - if (thd->lex->view_prepare_mode) + if (thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_VIEW) { /* Here we check privileges of the stored routine only during view diff --git a/sql/item_row.cc b/sql/item_row.cc index 7535c1fa80b..408bc11eb9b 100644 --- a/sql/item_row.cc +++ b/sql/item_row.cc @@ -73,12 +73,8 @@ bool Item_row::fix_fields(THD *thd, Item **ref) used_tables_cache |= item->used_tables(); const_item_cache&= item->const_item() && !with_null; not_null_tables_cache|= item->not_null_tables(); - /* - Some subqueries transformations aren't done in the view_prepare_mode thus - is_null() will fail. So we skip is_null() calculation for CREATE VIEW as - not necessary. - */ - if (const_item_cache && !thd->lex->view_prepare_mode) + + if (const_item_cache) { if (item->cols() > 1) with_null|= item->null_inside(); diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 1ed36ce7656..9bad6728076 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -123,20 +123,6 @@ void Item_subselect::cleanup() } -/* - We cannot use generic Item::safe_charset_converter() because - Subselect transformation does not happen in view_prepare_mode - and thus we can not evaluate val_...() for const items. -*/ - -Item *Item_subselect::safe_charset_converter(CHARSET_INFO *tocs) -{ - Item_func_conv_charset *conv= - new Item_func_conv_charset(this, tocs, thd->lex->view_prepare_mode ? 0 : 1); - return conv->safe ? conv : NULL; -} - - void Item_singlerow_subselect::cleanup() { DBUG_ENTER("Item_singlerow_subselect::cleanup"); @@ -271,6 +257,7 @@ bool Item_subselect::exec() if (thd->is_error() || thd->killed) return 1; + DBUG_ASSERT(!thd->lex->context_analysis_only); /* Simulate a failure in sub-query execution. Used to test e.g. out of memory or query being killed conditions. @@ -307,7 +294,7 @@ table_map Item_subselect::used_tables() const bool Item_subselect::const_item() const { - return const_item_cache; + return thd->lex->context_analysis_only ? FALSE : const_item_cache; } Item *Item_subselect::get_tmp_table_item(THD *thd_arg) @@ -1638,7 +1625,8 @@ bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref) { bool result = 0; - if (thd_arg->lex->view_prepare_mode && left_expr && !left_expr->fixed) + if ((thd_arg->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_VIEW) && + left_expr && !left_expr->fixed) result = left_expr->fix_fields(thd_arg, &left_expr); return result || Item_subselect::fix_fields(thd_arg, ref); diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 3806e68e377..467e9b22637 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -126,7 +126,6 @@ public: virtual void reset_value_registration() {} enum_parsing_place place() { return parsing_place; } bool walk(Item_processor processor, bool walk_subquery, uchar *arg); - Item *safe_charset_converter(CHARSET_INFO *tocs); /** Get the SELECT_LEX structure associated with this Item. diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 9f2c0b04f2c..c8b923e0d61 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -566,17 +566,42 @@ protected: #define MY_CHARSET_BIN_MB_MAXLEN 1 +/* + Flags below are set when we perform + context analysis of the statement and make + subqueries non-const. It prevents subquery + evaluation at context analysis stage. +*/ + +/* + Don't evaluate this subquery during statement prepare even if + it's a constant one. The flag is switched off in the end of + mysqld_stmt_prepare. +*/ +#define CONTEXT_ANALYSIS_ONLY_PREPARE 1 +/* + Special JOIN::prepare mode: changing of query is prohibited. + When creating a view, we need to just check its syntax omitting + any optimizations: afterwards definition of the view will be + reconstructed by means of ::print() methods and written to + to an .frm file. We need this definition to stay untouched. +*/ +#define CONTEXT_ANALYSIS_ONLY_VIEW 2 +/* + Don't evaluate this subquery during derived table prepare even if + it's a constant one. +*/ +#define CONTEXT_ANALYSIS_ONLY_DERIVED 4 + // uncachable cause #define UNCACHEABLE_DEPENDENT 1 #define UNCACHEABLE_RAND 2 #define UNCACHEABLE_SIDEEFFECT 4 /// forcing to save JOIN for explain #define UNCACHEABLE_EXPLAIN 8 -/** Don't evaluate subqueries in prepare even if they're not correlated */ -#define UNCACHEABLE_PREPARE 16 /* For uncorrelated SELECT in an UNION with some correlated SELECTs */ -#define UNCACHEABLE_UNITED 32 -#define UNCACHEABLE_CHECKOPTION 64 +#define UNCACHEABLE_UNITED 16 +#define UNCACHEABLE_CHECKOPTION 32 /* Used to check GROUP BY list in the MODE_ONLY_FULL_GROUP_BY mode */ #define UNDEF_POS (-1) diff --git a/sql/sql_class.h b/sql/sql_class.h index 42c873e9fc3..0af28171160 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2180,8 +2180,6 @@ public: (variables.sql_mode & MODE_STRICT_ALL_TABLES))); } void set_status_var_init(); - bool is_context_analysis_only() - { return stmt_arena->is_stmt_prepare() || lex->view_prepare_mode; } void reset_n_backup_open_tables_state(Open_tables_state *backup); void restore_backup_open_tables_state(Open_tables_state *backup); void reset_sub_statement_state(Sub_statement_state *backup, uint new_state); diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 782589f7d0f..3214c756bc7 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -147,10 +147,11 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) if (!(derived_result= new select_union)) DBUG_RETURN(TRUE); // out of memory + lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED; // st_select_lex_unit::prepare correctly work for single select if ((res= unit->prepare(thd, derived_result, 0))) goto exit; - + lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED; if ((res= check_duplicate_names(unit->types, 0))) goto exit; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 24c51be2512..9ea144df9bc 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -305,7 +305,7 @@ void lex_start(THD *thd) lex->select_lex.group_list.empty(); lex->describe= 0; lex->subqueries= FALSE; - lex->view_prepare_mode= FALSE; + lex->context_analysis_only= 0; lex->derived_tables= 0; lex->lock_option= TL_READ; lex->safe_to_cache_query= 1; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 9131cec9d04..b1f30b07824 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1715,14 +1715,8 @@ typedef struct st_lex : public Query_tables_list bool verbose, no_write_to_binlog; bool tx_chain, tx_release; - /* - Special JOIN::prepare mode: changing of query is prohibited. - When creating a view, we need to just check its syntax omitting - any optimizations: afterwards definition of the view will be - reconstructed by means of ::print() methods and written to - to an .frm file. We need this definition to stay untouched. - */ - bool view_prepare_mode; + + uint8 context_analysis_only; bool safe_to_cache_query; bool subqueries, ignore; st_parsing_options parsing_options; @@ -1843,6 +1837,13 @@ typedef struct st_lex : public Query_tables_list delete_dynamic(&plugins); } + inline bool is_ps_or_view_context_analysis() + { + return (context_analysis_only & + (CONTEXT_ANALYSIS_ONLY_PREPARE | + CONTEXT_ANALYSIS_ONLY_VIEW)); + } + inline void uncacheable(uint8 cause) { safe_to_cache_query= 0; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index fbe9c9753d9..169cea3a3ed 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -5848,13 +5848,6 @@ mysql_new_select(LEX *lex, bool move_down) DBUG_RETURN(1); } select_lex->nest_level= lex->nest_level; - /* - Don't evaluate this subquery during statement prepare even if - it's a constant one. The flag is switched off in the end of - mysqld_stmt_prepare. - */ - if (thd->stmt_arena->is_stmt_prepare()) - select_lex->uncacheable|= UNCACHEABLE_PREPARE; if (move_down) { SELECT_LEX_UNIT *unit; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 5ba375f9710..aadfb831087 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1688,7 +1688,7 @@ static bool mysql_test_create_view(Prepared_statement *stmt) if (open_normal_and_derived_tables(thd, tables, 0)) goto err; - lex->view_prepare_mode= 1; + lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW; res= select_like_stmt_test(stmt, 0, 0); err: @@ -2234,19 +2234,6 @@ end: } -/** Init PS/SP specific parse tree members. */ - -static void init_stmt_after_parse(LEX *lex) -{ - SELECT_LEX *sl= lex->all_selects_list; - /* - Switch off a temporary flag that prevents evaluation of - subqueries in statement prepare. - */ - for (; sl; sl= sl->next_select_in_list()) - sl->uncacheable&= ~UNCACHEABLE_PREPARE; -} - /** SQLCOM_PREPARE implementation. @@ -3080,6 +3067,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) parser_state.m_lip.stmt_prepare_mode= TRUE; lex_start(thd); + lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_PREPARE; error= parse_sql(thd, & parser_state, NULL) || thd->is_error() || @@ -3132,7 +3120,7 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len) if (error == 0) { setup_set_params(); - init_stmt_after_parse(lex); + lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_PREPARE; state= Query_arena::PREPARED; flags&= ~ (uint) IS_IN_USE; /* diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 9bf0a236e1b..224fa43d0ef 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -538,7 +538,8 @@ JOIN::prepare(Item ***rref_pointer_array, thd->lex->allow_sum_func= save_allow_sum_func; } - if (!thd->lex->view_prepare_mode && !(select_options & SELECT_DESCRIBE)) + if (!(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_VIEW) && + !(select_options & SELECT_DESCRIBE)) { Item_subselect *subselect; /* Is it subselect? */ diff --git a/sql/sql_show.cc b/sql/sql_show.cc index e074461b452..55d65a59536 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -724,7 +724,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) table_list->table_name)); /* We want to preserve the tree for views. */ - thd->lex->view_prepare_mode= TRUE; + thd->lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW; { Show_create_error_handler view_error_suppressor(thd, table_list); @@ -3321,7 +3321,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) uint derived_tables= lex->derived_tables; int error= 1; Open_tables_state open_tables_state_backup; - bool save_view_prepare_mode= lex->view_prepare_mode; + uint8 save_context_analysis_only= lex->context_analysis_only; Query_tables_list query_tables_list_backup; #ifndef NO_EMBEDDED_ACCESS_CHECKS Security_context *sctx= thd->security_ctx; @@ -3329,7 +3329,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) uint table_open_method; DBUG_ENTER("get_all_tables"); - lex->view_prepare_mode= TRUE; + lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW; lex->reset_n_backup_query_tables_list(&query_tables_list_backup); /* @@ -3546,7 +3546,7 @@ err: lex->restore_backup_query_tables_list(&query_tables_list_backup); lex->derived_tables= derived_tables; lex->all_selects_list= old_all_select_lex; - lex->view_prepare_mode= save_view_prepare_mode; + lex->context_analysis_only= save_context_analysis_only; lex->sql_command= save_sql_command; DBUG_RETURN(error); } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 133574089aa..6cb4f590ae0 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -545,7 +545,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, } /* prepare select to resolve all fields */ - lex->view_prepare_mode= 1; + lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW; if (unit->prepare(thd, 0, 0)) { /* -- cgit v1.2.1 From e4f23ff5810547d3efdec9da883f4c8d48c618e6 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 21:59:29 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3527.3.1 > revision-id: mattias.jonsson@oracle.com-20101222095036-2lpx0gqu4i45jtkz > parent: sven.sandberg@oracle.com-20101220090735-psae11j9nwuj8vzl > committer: Mattias Jonsson > branch nick: b54483-51-bt_2 > timestamp: Wed 2010-12-22 10:50:36 +0100 > message: > Bug#54483: valgrind errors when making warnings for multiline inserts into partition > Bug#57071: EXTRACT(WEEK from date_col) cannot be allowed as partitioning function > > There were functions allowed as partitioning functions > that implicit allowed cast. That could result in unacceptable > behaviour. > > Solution was to check that the arguments of date and time functions > have allowed types (field and date/datetime/time depending on function). --- mysql-test/r/partition.result | 2 +- mysql-test/r/partition_error.result | 638 +++ .../parts/inc/part_supported_sql_funcs_main.inc | 48 +- .../parts/r/part_supported_sql_func_innodb.result | 5210 ++++++-------------- .../parts/r/part_supported_sql_func_myisam.result | 5210 ++++++-------------- mysql-test/t/partition.test | 5 +- mysql-test/t/partition_error.test | 664 +++ sql/item.h | 8 +- sql/item_func.h | 42 +- sql/item_timefunc.h | 94 +- sql/sql_partition.cc | 2 +- 11 files changed, 4637 insertions(+), 7286 deletions(-) diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index 8e65557d690..27ada9d1129 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -1778,7 +1778,7 @@ c1 bigint, c2 set('sweet'), key (c2,c1,c0), key(c0) -) engine=myisam partition by hash (month(c0)) partitions 5; +) engine=myisam partition by hash (c0) partitions 5; insert ignore into t1 set c0 = -6502262, c1 = 3992917, c2 = 35019; insert ignore into t1 set c0 = 241221, c1 = -6862346, c2 = 56644; select c1 from t1 group by (select c0 from t1 limit 1); diff --git a/mysql-test/r/partition_error.result b/mysql-test/r/partition_error.result index ea74f476ceb..0426ce42071 100644 --- a/mysql-test/r/partition_error.result +++ b/mysql-test/r/partition_error.result @@ -1,5 +1,643 @@ drop table if exists t1; # +# Bug#54483: valgrind errors when making warnings for multiline inserts +# into partition +# +CREATE TABLE t1 (a VARBINARY(10)) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a CHAR(10)) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIMESTAMP) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +INSERT INTO t1 VALUES ('test'),('a'),('5'); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +Warning 1265 Data truncated for column 'a' at row 2 +Warning 1265 Data truncated for column 'a' at row 3 +SHOW WARNINGS; +Level Code Message +Warning 1265 Data truncated for column 'a' at row 1 +Warning 1265 Data truncated for column 'a' at row 2 +Warning 1265 Data truncated for column 'a' at row 3 +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +INSERT INTO t1 VALUES ('test'),('a'),('5'); +Warnings: +Warning 1264 Out of range value for column 'a' at row 1 +Warning 1264 Out of range value for column 'a' at row 2 +Warning 1264 Out of range value for column 'a' at row 3 +SHOW WARNINGS; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +Warning 1264 Out of range value for column 'a' at row 2 +Warning 1264 Out of range value for column 'a' at row 3 +DROP TABLE t1; +CREATE TABLE t1 (a TIME) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +SHOW WARNINGS; +Level Code Message +Error 1486 Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (TO_DAYS(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (TO_DAYS(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (TO_DAYS(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (TO_DAYS(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (TO_DAYS(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (DAYOFMONTH(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (DAYOFMONTH(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (DAYOFMONTH(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (DAYOFMONTH(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (DAYOFMONTH(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (MONTH(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (MONTH(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (MONTH(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (MONTH(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (MONTH(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (DAYOFYEAR(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (DAYOFYEAR(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (DAYOFYEAR(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (DAYOFYEAR(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (DAYOFYEAR(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (HOUR(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (HOUR(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (HOUR(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (HOUR(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (HOUR(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (MINUTE(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (MINUTE(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (MINUTE(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (MINUTE(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (MINUTE(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (QUARTER(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (QUARTER(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (QUARTER(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (QUARTER(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (QUARTER(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (SECOND(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (SECOND(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (SECOND(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (SECOND(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (SECOND(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (YEARWEEK(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (YEARWEEK(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (YEARWEEK(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (YEARWEEK(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (YEARWEEK(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (WEEKDAY(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (WEEKDAY(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (WEEKDAY(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (WEEKDAY(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (WEEKDAY(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +# TO_SECONDS() is added in 5.5. +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (TO_SECONDS(a)); +ERROR 42000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed near ')' at line 2 +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (TIME_TO_SEC(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (TIME_TO_SEC(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (TIME_TO_SEC(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (TIME_TO_SEC(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (TIME_TO_SEC(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (FROM_DAYS(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (FROM_DAYS(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (FROM_DAYS(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (TO_DAYS(FROM_DAYS(a))); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (FROM_DAYS(a)); +ERROR HY000: The PARTITION function returns the wrong type +CREATE TABLE t1 (a INT) +PARTITION BY HASH (TO_DAYS(FROM_DAYS(a))); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (FROM_DAYS(a)); +ERROR HY000: The PARTITION function returns the wrong type +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (MICROSECOND(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (MICROSECOND(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (MICROSECOND(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (MICROSECOND(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (MICROSECOND(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +# Bug#57071 +CREATE TABLE t1 +(`date` date, +`extracted_week` int, +`yearweek` int, +`week` int, +`default_week_format` int) +PARTITION BY LIST (EXTRACT(WEEK FROM date) % 3) +(PARTITION p0 VALUES IN (0), +PARTITION p1 VALUES IN (1), +PARTITION p2 VALUES IN (2)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 +(`date` date, +`extracted_week` int, +`yearweek` int, +`week` int, +`default_week_format` int); +SET @old_default_week_format := @@default_week_format; +SET default_week_format = 0; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 1; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 2; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 3; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 4; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 5; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 6; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 7; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SELECT * FROM t1; +date extracted_week yearweek week default_week_format +2000-01-01 0 199952 0 0 +2000-01-01 0 199952 0 1 +2000-01-01 52 199952 52 2 +2000-01-01 52 199952 52 3 +2000-01-01 0 199952 0 4 +2000-01-01 0 199952 0 5 +2000-01-01 52 199952 52 6 +2000-01-01 52 199952 52 7 +SET default_week_format = @old_default_week_format; +DROP TABLE t1; +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +# EXTRACT(WEEK...) is disallowed, see bug#57071. +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME, b DATE) +PARTITION BY HASH (DATEDIFF(a, b)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATE, b DATETIME) +PARTITION BY HASH (DATEDIFF(a, b)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME, b DATE) +PARTITION BY HASH (DATEDIFF(a, b)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE, b VARCHAR(10)) +PARTITION BY HASH (DATEDIFF(a, b)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT, b DATETIME) +PARTITION BY HASH (DATEDIFF(a, b)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (TIME_TO_SEC(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (TIME_TO_SEC(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (TIME_TO_SEC(a)); +DROP TABLE t1; +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (TIME_TO_SEC(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +CREATE TABLE t1 (a INT) +PARTITION BY HASH (TIME_TO_SEC(a)); +ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed +# # Bug#49161: Out of memory; restart server and try again (needed 2 bytes) # CREATE TABLE t1 (a INT) PARTITION BY HASH (a); diff --git a/mysql-test/suite/parts/inc/part_supported_sql_funcs_main.inc b/mysql-test/suite/parts/inc/part_supported_sql_funcs_main.inc index 25a9774d2a1..f5c9d09f067 100644 --- a/mysql-test/suite/parts/inc/part_supported_sql_funcs_main.inc +++ b/mysql-test/suite/parts/inc/part_supported_sql_funcs_main.inc @@ -111,8 +111,9 @@ let $val2 = '2006-01-17'; let $val3 = '2006-02-25'; let $val4 = '2006-02-05'; --source suite/parts/inc/partition_supported_sql_funcs.inc -let $coltype = char(30); ---source suite/parts/inc/partition_supported_sql_funcs.inc +# Disabled after fixing bug#54483. +#let $coltype = char(30); +#--source suite/parts/inc/partition_supported_sql_funcs.inc let $sqlfunc = extract(month from col1); let $valsqlfunc = extract(year from '1998-11-23'); @@ -162,8 +163,9 @@ let $val2 = '14:30:20'; let $val3 = '21:59:22'; let $val4 = '10:22:33'; --source suite/parts/inc/partition_supported_sql_funcs.inc -let $coltype = char(30); ---source suite/parts/inc/partition_supported_sql_funcs.inc +# second(non_time_col) is disabled after bug#54483. +#let $coltype = char(30); +#--source suite/parts/inc/partition_supported_sql_funcs.inc let $sqlfunc = month(col1); let $valsqlfunc = month('2006-10-14'); @@ -195,26 +197,28 @@ let $val3 = '21:59:22'; let $val4 = '10:33:11'; --source suite/parts/inc/partition_supported_sql_funcs.inc -let $sqlfunc = to_days(col1)-to_days('2006-01-01'); -let $valsqlfunc = to_days('2006-02-02')-to_days('2006-01-01'); -let $coltype = date; -let $infile = part_supported_sql_funcs_int_date.inc; -let $val1 = '2006-02-03'; -let $val2 = '2006-01-17'; -let $val3 = '2006-01-25'; -let $val4 = '2006-02-06'; ---source suite/parts/inc/partition_supported_sql_funcs.inc +# to_days(non_date_col) is disabled after bug#54483. +#let $sqlfunc = to_days(col1)-to_days('2006-01-01'); +#let $valsqlfunc = to_days('2006-02-02')-to_days('2006-01-01'); +#let $coltype = date; +#let $infile = part_supported_sql_funcs_int_date.inc; +#let $val1 = '2006-02-03'; +#let $val2 = '2006-01-17'; +#let $val3 = '2006-01-25'; +#let $val4 = '2006-02-06'; +#--source suite/parts/inc/partition_supported_sql_funcs.inc +# to_days(non_date_col) is disabled after bug#54483. # DATEDIFF() is implemented as (TO_DAYS(d1) - TO_DAYS(d2)) -let $sqlfunc = datediff(col1, '2006-01-01'); -let $valsqlfunc = datediff('2006-02-02', '2006-01-01'); -let $coltype = date; -let $infile = part_supported_sql_funcs_int_date.inc; -let $val1 = '2006-02-03'; -let $val2 = '2006-01-17'; -let $val3 = '2006-01-25'; -let $val4 = '2006-02-06'; ---source suite/parts/inc/partition_supported_sql_funcs.inc +#let $sqlfunc = datediff(col1, '2006-01-01'); +#let $valsqlfunc = datediff('2006-02-02', '2006-01-01'); +#let $coltype = date; +#let $infile = part_supported_sql_funcs_int_date.inc; +#let $val1 = '2006-02-03'; +#let $val2 = '2006-01-17'; +#let $val3 = '2006-01-25'; +#let $val4 = '2006-02-06'; +#--source suite/parts/inc/partition_supported_sql_funcs.inc let $sqlfunc = weekday(col1); let $valsqlfunc = weekday('2006-10-14'); diff --git a/mysql-test/suite/parts/r/part_supported_sql_func_innodb.result b/mysql-test/suite/parts/r/part_supported_sql_func_innodb.result index 79462c25050..c5b4e0a8665 100644 --- a/mysql-test/suite/parts/r/part_supported_sql_func_innodb.result +++ b/mysql-test/suite/parts/r/part_supported_sql_func_innodb.result @@ -5425,7 +5425,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- dayofyear(col1) in partition with coltype char(30) +--- extract(month from col1) in partition with coltype date ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -5434,14 +5434,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with dayofyear(col1) +--- Create tables with extract(month from col1) ------------------------------------------------------------------------- -create table t1 (col1 char(30)) engine='INNODB' -partition by range(dayofyear(col1)) +create table t1 (col1 date) engine='INNODB' +partition by range(extract(month from col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 char(30)) engine='INNODB' -partition by list(dayofyear(col1)) +create table t2 (col1 date) engine='INNODB' +partition by list(extract(month from col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5449,16 +5449,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 char(30)) engine='INNODB' -partition by hash(dayofyear(col1)); -create table t4 (colint int, col1 char(30)) engine='INNODB' +create table t3 (col1 date) engine='INNODB' +partition by hash(extract(month from col1)); +create table t4 (colint int, col1 date) engine='INNODB' partition by range(colint) -subpartition by hash(dayofyear(col1)) subpartitions 2 +subpartition by hash(extract(month from col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 char(30)) engine='INNODB' +create table t5 (colint int, col1 date) engine='INNODB' partition by list(colint) -subpartition by hash(dayofyear(col1)) subpartitions 2 +subpartition by hash(extract(month from col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5466,42 +5466,42 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 char(30)) engine='INNODB' +create table t6 (colint int, col1 date) engine='INNODB' partition by range(colint) -(partition p0 values less than (dayofyear('2006-12-25')), +(partition p0 values less than (extract(year from '1998-11-23')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with dayofyear(col1) +--- Access tables with extract(month from col1) ------------------------------------------------------------------------- insert into t1 values ('2006-01-03'); -insert into t1 values ('2006-01-17'); +insert into t1 values ('2006-02-17'); insert into t2 values ('2006-01-03'); -insert into t2 values ('2006-01-17'); -insert into t2 values ('2006-02-25'); +insert into t2 values ('2006-02-17'); +insert into t2 values ('2006-01-25'); insert into t3 values ('2006-01-03'); -insert into t3 values ('2006-01-17'); -insert into t3 values ('2006-02-25'); +insert into t3 values ('2006-02-17'); +insert into t3 values ('2006-01-25'); load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select dayofyear(col1) from t1 order by col1; -dayofyear(col1) -3 -17 +select extract(month from col1) from t1 order by col1; +extract(month from col1) +1 +2 select * from t1 order by col1; col1 2006-01-03 -2006-01-17 +2006-02-17 select * from t2 order by col1; col1 2006-01-03 -2006-01-17 -2006-02-25 +2006-01-25 +2006-02-17 select * from t3 order by col1; col1 2006-01-03 -2006-01-17 -2006-02-25 +2006-01-25 +2006-02-17 select * from t4 order by colint; colint col1 1 2006-02-03 @@ -5528,18 +5528,18 @@ update t5 set col1='2006-02-05' where col1='2006-01-03'; update t6 set col1='2006-02-05' where col1='2006-01-03'; select * from t1 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 select * from t2 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t3 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t4 order by colint; colint col1 1 2006-02-03 @@ -5559,7 +5559,7 @@ colint col1 3 2006-01-25 4 2006-02-05 ------------------------------------------------------------------------- ---- Alter tables with dayofyear(col1) +--- Alter tables with extract(month from col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -5574,11 +5574,11 @@ create table t44 engine='INNODB' as select * from t4; create table t55 engine='INNODB' as select * from t5; create table t66 engine='INNODB' as select * from t6; alter table t11 -partition by range(dayofyear(col1)) +partition by range(extract(month from col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(dayofyear(col1)) +partition by list(extract(month from col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5587,15 +5587,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(dayofyear(col1)); +partition by hash(extract(month from col1)); alter table t44 partition by range(colint) -subpartition by hash(dayofyear(col1)) subpartitions 2 +subpartition by hash(extract(month from col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(dayofyear(col1)) subpartitions 2 +subpartition by hash(extract(month from col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5605,22 +5605,22 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (dayofyear('2006-12-25')), +(partition p0 values less than (extract(year from '1998-11-23')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 select * from t22 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t33 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t44 order by colint; colint col1 1 2006-02-03 @@ -5647,19 +5647,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 alter table t55 partition by list(colint) -subpartition by hash(dayofyear(col1)) subpartitions 5 +subpartition by hash(extract(month from col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5671,10 +5671,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` char(30) DEFAULT NULL + `col1` date DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (dayofyear(col1)) +SUBPARTITION BY HASH (extract(month from col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, @@ -5699,7 +5699,7 @@ colint col1 4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (dayofyear('2006-12-25')), +(partition p0 values less than (extract(year from '1998-11-23')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 @@ -5718,7 +5718,7 @@ colint col1 4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (dayofyear('2006-12-25')), +(partition p0 values less than (extract(year from '1998-11-23')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 @@ -5727,73 +5727,78 @@ colint col1 3 2006-01-25 4 2006-02-05 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with dayofyear(col1) +--- Delete rows and partitions of tables with extract(month from col1) ------------------------------------------------------------------------- -delete from t1 where col1='2006-01-17'; -delete from t2 where col1='2006-01-17'; -delete from t3 where col1='2006-01-17'; -delete from t4 where col1='2006-01-17'; -delete from t5 where col1='2006-01-17'; -delete from t6 where col1='2006-01-17'; +delete from t1 where col1='2006-02-17'; +delete from t2 where col1='2006-02-17'; +delete from t3 where col1='2006-02-17'; +delete from t4 where col1='2006-02-17'; +delete from t5 where col1='2006-02-17'; +delete from t6 where col1='2006-02-17'; select * from t1 order by col1; col1 2006-02-05 select * from t2 order by col1; col1 +2006-01-25 2006-02-05 -2006-02-25 select * from t3 order by col1; col1 +2006-01-25 2006-02-05 -2006-02-25 select * from t4 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 select * from t5 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-01-17'); -insert into t3 values ('2006-01-17'); -insert into t4 values (60,'2006-01-17'); -insert into t5 values (60,'2006-01-17'); -insert into t6 values (60,'2006-01-17'); +insert into t1 values ('2006-02-17'); +insert into t2 values ('2006-02-17'); +insert into t3 values ('2006-02-17'); +insert into t4 values (60,'2006-02-17'); +insert into t5 values (60,'2006-02-17'); +insert into t6 values (60,'2006-02-17'); select * from t1 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 select * from t2 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t3 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t4 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 select * from t5 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 select * from t6 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -5801,94 +5806,94 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -2006-01-17 -2006-02-05 select * from t2 order by col1; col1 -2006-01-17 -2006-02-05 -2006-02-25 select * from t3 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t4 order by colint; colint col1 -60 2006-01-17 +60 2006-02-17 select * from t5 order by colint; colint col1 -60 2006-01-17 +60 2006-02-17 select * from t6 order by colint; colint col1 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with dayofyear(col1) +--- Delete rows and partitions of tables with extract(month from col1) ------------------------------------------------------------------------- -delete from t11 where col1='2006-01-17'; -delete from t22 where col1='2006-01-17'; -delete from t33 where col1='2006-01-17'; -delete from t44 where col1='2006-01-17'; -delete from t55 where col1='2006-01-17'; -delete from t66 where col1='2006-01-17'; +delete from t11 where col1='2006-02-17'; +delete from t22 where col1='2006-02-17'; +delete from t33 where col1='2006-02-17'; +delete from t44 where col1='2006-02-17'; +delete from t55 where col1='2006-02-17'; +delete from t66 where col1='2006-02-17'; select * from t11 order by col1; col1 2006-02-05 select * from t22 order by col1; col1 +2006-01-25 2006-02-05 -2006-02-25 select * from t33 order by col1; col1 +2006-01-25 2006-02-05 -2006-02-25 select * from t44 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 select * from t55 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -insert into t11 values ('2006-01-17'); -insert into t22 values ('2006-01-17'); -insert into t33 values ('2006-01-17'); -insert into t44 values (60,'2006-01-17'); -insert into t55 values (60,'2006-01-17'); -insert into t66 values (60,'2006-01-17'); +insert into t11 values ('2006-02-17'); +insert into t22 values ('2006-02-17'); +insert into t33 values ('2006-02-17'); +insert into t44 values (60,'2006-02-17'); +insert into t55 values (60,'2006-02-17'); +insert into t66 values (60,'2006-02-17'); select * from t11 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 select * from t22 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t33 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t44 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 select * from t55 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 select * from t66 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -5896,24 +5901,19 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -2006-01-17 -2006-02-05 select * from t22 order by col1; col1 -2006-01-17 -2006-02-05 -2006-02-25 select * from t33 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t44 order by colint; colint col1 -60 2006-01-17 +60 2006-02-17 select * from t55 order by colint; colint col1 -60 2006-01-17 +60 2006-02-17 select * from t66 order by colint; colint col1 ------------------------- @@ -5932,7 +5932,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- extract(month from col1) in partition with coltype date +--- hour(col1) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -5941,14 +5941,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with extract(month from col1) +--- Create tables with hour(col1) ------------------------------------------------------------------------- -create table t1 (col1 date) engine='INNODB' -partition by range(extract(month from col1)) +create table t1 (col1 time) engine='INNODB' +partition by range(hour(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 date) engine='INNODB' -partition by list(extract(month from col1)) +create table t2 (col1 time) engine='INNODB' +partition by list(hour(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5956,16 +5956,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 date) engine='INNODB' -partition by hash(extract(month from col1)); -create table t4 (colint int, col1 date) engine='INNODB' +create table t3 (col1 time) engine='INNODB' +partition by hash(hour(col1)); +create table t4 (colint int, col1 time) engine='INNODB' partition by range(colint) -subpartition by hash(extract(month from col1)) subpartitions 2 +subpartition by hash(hour(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='INNODB' +create table t5 (colint int, col1 time) engine='INNODB' partition by list(colint) -subpartition by hash(extract(month from col1)) subpartitions 2 +subpartition by hash(hour(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5973,100 +5973,100 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 date) engine='INNODB' +create table t6 (colint int, col1 time) engine='INNODB' partition by range(colint) -(partition p0 values less than (extract(year from '1998-11-23')), +(partition p0 values less than (hour('18:30')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with extract(month from col1) +--- Access tables with hour(col1) ------------------------------------------------------------------------- -insert into t1 values ('2006-01-03'); -insert into t1 values ('2006-02-17'); -insert into t2 values ('2006-01-03'); -insert into t2 values ('2006-02-17'); -insert into t2 values ('2006-01-25'); -insert into t3 values ('2006-01-03'); -insert into t3 values ('2006-02-17'); -insert into t3 values ('2006-01-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select extract(month from col1) from t1 order by col1; -extract(month from col1) -1 -2 +insert into t1 values ('09:09'); +insert into t1 values ('14:30'); +insert into t2 values ('09:09'); +insert into t2 values ('14:30'); +insert into t2 values ('21:59'); +insert into t3 values ('09:09'); +insert into t3 values ('14:30'); +insert into t3 values ('21:59'); +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; +select hour(col1) from t1 order by col1; +hour(col1) +9 +14 select * from t1 order by col1; col1 -2006-01-03 -2006-02-17 +09:09:00 +14:30:00 select * from t2 order by col1; col1 -2006-01-03 -2006-01-25 -2006-02-17 +09:09:00 +14:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-03 -2006-01-25 -2006-02-17 +09:09:00 +14:30:00 +21:59:00 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-02-05' where col1='2006-01-03'; -update t2 set col1='2006-02-05' where col1='2006-01-03'; -update t3 set col1='2006-02-05' where col1='2006-01-03'; -update t4 set col1='2006-02-05' where col1='2006-01-03'; -update t5 set col1='2006-02-05' where col1='2006-01-03'; -update t6 set col1='2006-02-05' where col1='2006-01-03'; +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +update t1 set col1='10:30' where col1='09:09'; +update t2 set col1='10:30' where col1='09:09'; +update t3 set col1='10:30' where col1='09:09'; +update t4 set col1='10:30' where col1='09:09'; +update t5 set col1='10:30' where col1='09:09'; +update t6 set col1='10:30' where col1='09:09'; select * from t1 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 select * from t2 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with extract(month from col1) +--- Alter tables with hour(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -6081,11 +6081,11 @@ create table t44 engine='INNODB' as select * from t4; create table t55 engine='INNODB' as select * from t5; create table t66 engine='INNODB' as select * from t6; alter table t11 -partition by range(extract(month from col1)) +partition by range(hour(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(extract(month from col1)) +partition by list(hour(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6094,15 +6094,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(extract(month from col1)); +partition by hash(hour(col1)); alter table t44 partition by range(colint) -subpartition by hash(extract(month from col1)) subpartitions 2 +subpartition by hash(hour(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(extract(month from col1)) subpartitions 2 +subpartition by hash(hour(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6112,40 +6112,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (extract(year from '1998-11-23')), +(partition p0 values less than (hour('18:30')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 select * from t22 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t33 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t44 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 --------------------------- ---- some alter table begin --------------------------- @@ -6154,19 +6154,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 alter table t55 partition by list(colint) -subpartition by hash(extract(month from col1)) subpartitions 5 +subpartition by hash(hour(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6178,10 +6178,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL + `col1` time DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (extract(month from col1)) +SUBPARTITION BY HASH (hour(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, @@ -6191,121 +6191,121 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (extract(year from '1998-11-23')), +(partition p0 values less than (hour('18:30')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (extract(year from '1998-11-23')), +(partition p0 values less than (hour('18:30')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with extract(month from col1) +--- Delete rows and partitions of tables with hour(col1) ------------------------------------------------------------------------- -delete from t1 where col1='2006-02-17'; -delete from t2 where col1='2006-02-17'; -delete from t3 where col1='2006-02-17'; -delete from t4 where col1='2006-02-17'; -delete from t5 where col1='2006-02-17'; -delete from t6 where col1='2006-02-17'; +delete from t1 where col1='14:30'; +delete from t2 where col1='14:30'; +delete from t3 where col1='14:30'; +delete from t4 where col1='14:30'; +delete from t5 where col1='14:30'; +delete from t6 where col1='14:30'; select * from t1 order by col1; col1 -2006-02-05 +10:30:00 select * from t2 order by col1; col1 -2006-01-25 -2006-02-05 +10:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-25 -2006-02-05 +10:30:00 +21:59:00 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-02-17'); -insert into t2 values ('2006-02-17'); -insert into t3 values ('2006-02-17'); -insert into t4 values (60,'2006-02-17'); -insert into t5 values (60,'2006-02-17'); -insert into t6 values (60,'2006-02-17'); +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +insert into t1 values ('14:30'); +insert into t2 values ('14:30'); +insert into t3 values ('14:30'); +insert into t4 values (60,'14:30'); +insert into t5 values (60,'14:30'); +insert into t6 values (60,'14:30'); select * from t1 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 select * from t2 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -6315,92 +6315,95 @@ select * from t1 order by col1; col1 select * from t2 order by col1; col1 +14:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t4 order by colint; colint col1 -60 2006-02-17 +60 14:30:00 select * from t5 order by colint; colint col1 -60 2006-02-17 +60 14:30:00 select * from t6 order by colint; colint col1 +60 14:30:00 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with extract(month from col1) +--- Delete rows and partitions of tables with hour(col1) ------------------------------------------------------------------------- -delete from t11 where col1='2006-02-17'; -delete from t22 where col1='2006-02-17'; -delete from t33 where col1='2006-02-17'; -delete from t44 where col1='2006-02-17'; -delete from t55 where col1='2006-02-17'; -delete from t66 where col1='2006-02-17'; +delete from t11 where col1='14:30'; +delete from t22 where col1='14:30'; +delete from t33 where col1='14:30'; +delete from t44 where col1='14:30'; +delete from t55 where col1='14:30'; +delete from t66 where col1='14:30'; select * from t11 order by col1; col1 -2006-02-05 +10:30:00 select * from t22 order by col1; col1 -2006-01-25 -2006-02-05 +10:30:00 +21:59:00 select * from t33 order by col1; col1 -2006-01-25 -2006-02-05 +10:30:00 +21:59:00 select * from t44 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-02-17'); -insert into t22 values ('2006-02-17'); -insert into t33 values ('2006-02-17'); -insert into t44 values (60,'2006-02-17'); -insert into t55 values (60,'2006-02-17'); -insert into t66 values (60,'2006-02-17'); +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +insert into t11 values ('14:30'); +insert into t22 values ('14:30'); +insert into t33 values ('14:30'); +insert into t44 values (60,'14:30'); +insert into t55 values (60,'14:30'); +insert into t66 values (60,'14:30'); select * from t11 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 select * from t22 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t33 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t44 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -6410,19 +6413,22 @@ select * from t11 order by col1; col1 select * from t22 order by col1; col1 +14:30:00 +21:59:00 select * from t33 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t44 order by colint; colint col1 -60 2006-02-17 +60 14:30:00 select * from t55 order by colint; colint col1 -60 2006-02-17 +60 14:30:00 select * from t66 order by colint; colint col1 +60 14:30:00 ------------------------- ---- some alter table end ------------------------- @@ -6439,7 +6445,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- hour(col1) in partition with coltype time +--- microsecond(col1) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -6448,14 +6454,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with hour(col1) +--- Create tables with microsecond(col1) ------------------------------------------------------------------------- create table t1 (col1 time) engine='INNODB' -partition by range(hour(col1)) +partition by range(microsecond(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); create table t2 (col1 time) engine='INNODB' -partition by list(hour(col1)) +partition by list(microsecond(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6464,15 +6470,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t3 (col1 time) engine='INNODB' -partition by hash(hour(col1)); +partition by hash(microsecond(col1)); create table t4 (colint int, col1 time) engine='INNODB' partition by range(colint) -subpartition by hash(hour(col1)) subpartitions 2 +subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); create table t5 (colint int, col1 time) engine='INNODB' partition by list(colint) -subpartition by hash(hour(col1)) subpartitions 2 +subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6482,41 +6488,41 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t6 (colint int, col1 time) engine='INNODB' partition by range(colint) -(partition p0 values less than (hour('18:30')), +(partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with hour(col1) +--- Access tables with microsecond(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09'); -insert into t1 values ('14:30'); -insert into t2 values ('09:09'); -insert into t2 values ('14:30'); -insert into t2 values ('21:59'); -insert into t3 values ('09:09'); -insert into t3 values ('14:30'); -insert into t3 values ('21:59'); +insert into t1 values ('09:09:15.000002'); +insert into t1 values ('04:30:01.000018'); +insert into t2 values ('09:09:15.000002'); +insert into t2 values ('04:30:01.000018'); +insert into t2 values ('00:59:22.000024'); +insert into t3 values ('09:09:15.000002'); +insert into t3 values ('04:30:01.000018'); +insert into t3 values ('00:59:22.000024'); load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select hour(col1) from t1 order by col1; -hour(col1) -9 -14 +select microsecond(col1) from t1 order by col1; +microsecond(col1) +0 +0 select * from t1 order by col1; col1 -09:09:00 -14:30:00 +04:30:01 +09:09:15 select * from t2 order by col1; col1 -09:09:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +09:09:15 select * from t3 order by col1; col1 -09:09:00 -14:30:00 -21:59:00 -select * from t4 order by colint; +00:59:22 +04:30:01 +09:09:15 +select * from t4 order by colint; colint col1 1 09:09:15 2 04:30:01 @@ -6534,46 +6540,46 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 -update t1 set col1='10:30' where col1='09:09'; -update t2 set col1='10:30' where col1='09:09'; -update t3 set col1='10:30' where col1='09:09'; -update t4 set col1='10:30' where col1='09:09'; -update t5 set col1='10:30' where col1='09:09'; -update t6 set col1='10:30' where col1='09:09'; +update t1 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t2 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t3 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t4 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t5 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t6 set col1='05:30:34.000037' where col1='09:09:15.000002'; select * from t1 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 select * from t2 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t3 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t4 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 select * from t6 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with hour(col1) +--- Alter tables with microsecond(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -6588,11 +6594,11 @@ create table t44 engine='INNODB' as select * from t4; create table t55 engine='INNODB' as select * from t5; create table t66 engine='INNODB' as select * from t6; alter table t11 -partition by range(hour(col1)) +partition by range(microsecond(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(hour(col1)) +partition by list(microsecond(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6601,15 +6607,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(hour(col1)); +partition by hash(microsecond(col1)); alter table t44 partition by range(colint) -subpartition by hash(hour(col1)) subpartitions 2 +subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(hour(col1)) subpartitions 2 +subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6619,37 +6625,37 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (hour('18:30')), +(partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 select * from t22 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t33 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t44 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 @@ -6661,19 +6667,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 alter table t55 partition by list(colint) -subpartition by hash(hour(col1)) subpartitions 5 +subpartition by hash(microsecond(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6688,7 +6694,7 @@ t55 CREATE TABLE `t55` ( `col1` time DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (hour(col1)) +SUBPARTITION BY HASH (microsecond(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, @@ -6698,7 +6704,7 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ select * from t55 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 @@ -6707,17 +6713,17 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (hour('18:30')), +(partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 @@ -6726,93 +6732,88 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (hour('18:30')), +(partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with hour(col1) +--- Delete rows and partitions of tables with microsecond(col1) ------------------------------------------------------------------------- -delete from t1 where col1='14:30'; -delete from t2 where col1='14:30'; -delete from t3 where col1='14:30'; -delete from t4 where col1='14:30'; -delete from t5 where col1='14:30'; -delete from t6 where col1='14:30'; +delete from t1 where col1='04:30:01.000018'; +delete from t2 where col1='04:30:01.000018'; +delete from t3 where col1='04:30:01.000018'; +delete from t4 where col1='04:30:01.000018'; +delete from t5 where col1='04:30:01.000018'; +delete from t6 where col1='04:30:01.000018'; select * from t1 order by col1; col1 -10:30:00 +05:30:34 select * from t2 order by col1; col1 -10:30:00 -21:59:00 +00:59:22 +05:30:34 select * from t3 order by col1; col1 -10:30:00 -21:59:00 +00:59:22 +05:30:34 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -insert into t1 values ('14:30'); -insert into t2 values ('14:30'); -insert into t3 values ('14:30'); -insert into t4 values (60,'14:30'); -insert into t5 values (60,'14:30'); -insert into t6 values (60,'14:30'); +insert into t1 values ('04:30:01.000018'); +insert into t2 values ('04:30:01.000018'); +insert into t3 values ('04:30:01.000018'); +insert into t4 values (60,'04:30:01.000018'); +insert into t5 values (60,'04:30:01.000018'); +insert into t6 values (60,'04:30:01.000018'); select * from t1 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 select * from t2 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t3 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -6822,95 +6823,88 @@ select * from t1 order by col1; col1 select * from t2 order by col1; col1 -14:30:00 -21:59:00 select * from t3 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t4 order by colint; colint col1 -60 14:30:00 +60 04:30:01 select * from t5 order by colint; colint col1 -60 14:30:00 +60 04:30:01 select * from t6 order by colint; colint col1 -60 14:30:00 +60 04:30:01 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with hour(col1) +--- Delete rows and partitions of tables with microsecond(col1) ------------------------------------------------------------------------- -delete from t11 where col1='14:30'; -delete from t22 where col1='14:30'; -delete from t33 where col1='14:30'; -delete from t44 where col1='14:30'; -delete from t55 where col1='14:30'; -delete from t66 where col1='14:30'; +delete from t11 where col1='04:30:01.000018'; +delete from t22 where col1='04:30:01.000018'; +delete from t33 where col1='04:30:01.000018'; +delete from t44 where col1='04:30:01.000018'; +delete from t55 where col1='04:30:01.000018'; +delete from t66 where col1='04:30:01.000018'; select * from t11 order by col1; col1 -10:30:00 +05:30:34 select * from t22 order by col1; col1 -10:30:00 -21:59:00 +00:59:22 +05:30:34 select * from t33 order by col1; col1 -10:30:00 -21:59:00 +00:59:22 +05:30:34 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -insert into t11 values ('14:30'); -insert into t22 values ('14:30'); -insert into t33 values ('14:30'); -insert into t44 values (60,'14:30'); -insert into t55 values (60,'14:30'); -insert into t66 values (60,'14:30'); +insert into t11 values ('04:30:01.000018'); +insert into t22 values ('04:30:01.000018'); +insert into t33 values ('04:30:01.000018'); +insert into t44 values (60,'04:30:01.000018'); +insert into t55 values (60,'04:30:01.000018'); +insert into t66 values (60,'04:30:01.000018'); select * from t11 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 select * from t22 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t33 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -6920,22 +6914,20 @@ select * from t11 order by col1; col1 select * from t22 order by col1; col1 -14:30:00 -21:59:00 select * from t33 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t44 order by colint; colint col1 -60 14:30:00 +60 04:30:01 select * from t55 order by colint; colint col1 -60 14:30:00 +60 04:30:01 select * from t66 order by colint; colint col1 -60 14:30:00 +60 04:30:01 ------------------------- ---- some alter table end ------------------------- @@ -6952,7 +6944,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- microsecond(col1) in partition with coltype time +--- minute(col1) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -6961,14 +6953,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with microsecond(col1) +--- Create tables with minute(col1) ------------------------------------------------------------------------- create table t1 (col1 time) engine='INNODB' -partition by range(microsecond(col1)) +partition by range(minute(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); create table t2 (col1 time) engine='INNODB' -partition by list(microsecond(col1)) +partition by list(minute(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6977,15 +6969,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t3 (col1 time) engine='INNODB' -partition by hash(microsecond(col1)); +partition by hash(minute(col1)); create table t4 (colint int, col1 time) engine='INNODB' partition by range(colint) -subpartition by hash(microsecond(col1)) subpartitions 2 +subpartition by hash(minute(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); create table t5 (colint int, col1 time) engine='INNODB' partition by list(colint) -subpartition by hash(microsecond(col1)) subpartitions 2 +subpartition by hash(minute(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6995,40 +6987,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t6 (colint int, col1 time) engine='INNODB' partition by range(colint) -(partition p0 values less than (microsecond('10:30:10.000010')), +(partition p0 values less than (minute('18:30')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with microsecond(col1) +--- Access tables with minute(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09:15.000002'); -insert into t1 values ('04:30:01.000018'); -insert into t2 values ('09:09:15.000002'); -insert into t2 values ('04:30:01.000018'); -insert into t2 values ('00:59:22.000024'); -insert into t3 values ('09:09:15.000002'); -insert into t3 values ('04:30:01.000018'); -insert into t3 values ('00:59:22.000024'); +insert into t1 values ('09:09:15'); +insert into t1 values ('14:30:45'); +insert into t2 values ('09:09:15'); +insert into t2 values ('14:30:45'); +insert into t2 values ('21:59:22'); +insert into t3 values ('09:09:15'); +insert into t3 values ('14:30:45'); +insert into t3 values ('21:59:22'); load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select microsecond(col1) from t1 order by col1; -microsecond(col1) -0 -0 +select minute(col1) from t1 order by col1; +minute(col1) +9 +30 select * from t1 order by col1; col1 -04:30:01 09:09:15 +14:30:45 select * from t2 order by col1; col1 -00:59:22 -04:30:01 09:09:15 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -04:30:01 09:09:15 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 1 09:09:15 @@ -7047,46 +7039,46 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 -update t1 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t2 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t3 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t4 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t5 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t6 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t1 set col1='10:24:23' where col1='09:09:15'; +update t2 set col1='10:24:23' where col1='09:09:15'; +update t3 set col1='10:24:23' where col1='09:09:15'; +update t4 set col1='10:24:23' where col1='09:09:15'; +update t5 set col1='10:24:23' where col1='09:09:15'; +update t6 set col1='10:24:23' where col1='09:09:15'; select * from t1 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 select * from t6 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with microsecond(col1) +--- Alter tables with minute(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -7101,11 +7093,11 @@ create table t44 engine='INNODB' as select * from t4; create table t55 engine='INNODB' as select * from t5; create table t66 engine='INNODB' as select * from t6; alter table t11 -partition by range(microsecond(col1)) +partition by range(minute(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(microsecond(col1)) +partition by list(minute(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7114,15 +7106,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(microsecond(col1)); +partition by hash(minute(col1)); alter table t44 partition by range(colint) -subpartition by hash(microsecond(col1)) subpartitions 2 +subpartition by hash(minute(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(microsecond(col1)) subpartitions 2 +subpartition by hash(minute(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7132,37 +7124,37 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (microsecond('10:30:10.000010')), +(partition p0 values less than (minute('18:30')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 select * from t22 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7174,19 +7166,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 alter table t55 partition by list(colint) -subpartition by hash(microsecond(col1)) subpartitions 5 +subpartition by hash(minute(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7201,7 +7193,7 @@ t55 CREATE TABLE `t55` ( `col1` time DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (microsecond(col1)) +SUBPARTITION BY HASH (minute(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, @@ -7211,7 +7203,7 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ select * from t55 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7220,17 +7212,17 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (microsecond('10:30:10.000010')), +(partition p0 values less than (minute('18:30')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7239,88 +7231,93 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (microsecond('10:30:10.000010')), +(partition p0 values less than (minute('18:30')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with microsecond(col1) +--- Delete rows and partitions of tables with minute(col1) ------------------------------------------------------------------------- -delete from t1 where col1='04:30:01.000018'; -delete from t2 where col1='04:30:01.000018'; -delete from t3 where col1='04:30:01.000018'; -delete from t4 where col1='04:30:01.000018'; -delete from t5 where col1='04:30:01.000018'; -delete from t6 where col1='04:30:01.000018'; +delete from t1 where col1='14:30:45'; +delete from t2 where col1='14:30:45'; +delete from t3 where col1='14:30:45'; +delete from t4 where col1='14:30:45'; +delete from t5 where col1='14:30:45'; +delete from t6 where col1='14:30:45'; select * from t1 order by col1; col1 -05:30:34 +10:24:23 select * from t2 order by col1; col1 -00:59:22 -05:30:34 +10:24:23 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -05:30:34 +10:24:23 +21:59:22 select * from t4 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -insert into t1 values ('04:30:01.000018'); -insert into t2 values ('04:30:01.000018'); -insert into t3 values ('04:30:01.000018'); -insert into t4 values (60,'04:30:01.000018'); -insert into t5 values (60,'04:30:01.000018'); -insert into t6 values (60,'04:30:01.000018'); +insert into t1 values ('14:30:45'); +insert into t2 values ('14:30:45'); +insert into t3 values ('14:30:45'); +insert into t4 values (60,'14:30:45'); +insert into t5 values (60,'14:30:45'); +insert into t6 values (60,'14:30:45'); select * from t1 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 select * from t5 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 select * from t6 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -7328,90 +7325,100 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 +10:24:23 +14:30:45 select * from t2 order by col1; col1 +10:24:23 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -60 04:30:01 +60 14:30:45 select * from t5 order by colint; colint col1 -60 04:30:01 +60 14:30:45 select * from t6 order by colint; colint col1 -60 04:30:01 +60 14:30:45 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with microsecond(col1) +--- Delete rows and partitions of tables with minute(col1) ------------------------------------------------------------------------- -delete from t11 where col1='04:30:01.000018'; -delete from t22 where col1='04:30:01.000018'; -delete from t33 where col1='04:30:01.000018'; -delete from t44 where col1='04:30:01.000018'; -delete from t55 where col1='04:30:01.000018'; -delete from t66 where col1='04:30:01.000018'; +delete from t11 where col1='14:30:45'; +delete from t22 where col1='14:30:45'; +delete from t33 where col1='14:30:45'; +delete from t44 where col1='14:30:45'; +delete from t55 where col1='14:30:45'; +delete from t66 where col1='14:30:45'; select * from t11 order by col1; col1 -05:30:34 +10:24:23 select * from t22 order by col1; col1 -00:59:22 -05:30:34 +10:24:23 +21:59:22 select * from t33 order by col1; col1 -00:59:22 -05:30:34 +10:24:23 +21:59:22 select * from t44 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -insert into t11 values ('04:30:01.000018'); -insert into t22 values ('04:30:01.000018'); -insert into t33 values ('04:30:01.000018'); -insert into t44 values (60,'04:30:01.000018'); -insert into t55 values (60,'04:30:01.000018'); -insert into t66 values (60,'04:30:01.000018'); +insert into t11 values ('14:30:45'); +insert into t22 values ('14:30:45'); +insert into t33 values ('14:30:45'); +insert into t44 values (60,'14:30:45'); +insert into t55 values (60,'14:30:45'); +insert into t66 values (60,'14:30:45'); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 select * from t22 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 select * from t55 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -7419,22 +7426,27 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 +10:24:23 +14:30:45 select * from t22 order by col1; col1 +10:24:23 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -60 04:30:01 +60 14:30:45 select * from t55 order by colint; colint col1 -60 04:30:01 +60 14:30:45 select * from t66 order by colint; colint col1 -60 04:30:01 +60 14:30:45 ------------------------- ---- some alter table end ------------------------- @@ -7451,7 +7463,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- minute(col1) in partition with coltype time +--- second(col1) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -7460,14 +7472,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with minute(col1) +--- Create tables with second(col1) ------------------------------------------------------------------------- create table t1 (col1 time) engine='INNODB' -partition by range(minute(col1)) +partition by range(second(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); create table t2 (col1 time) engine='INNODB' -partition by list(minute(col1)) +partition by list(second(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7476,15 +7488,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t3 (col1 time) engine='INNODB' -partition by hash(minute(col1)); +partition by hash(second(col1)); create table t4 (colint int, col1 time) engine='INNODB' partition by range(colint) -subpartition by hash(minute(col1)) subpartitions 2 +subpartition by hash(second(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); create table t5 (colint int, col1 time) engine='INNODB' partition by list(colint) -subpartition by hash(minute(col1)) subpartitions 2 +subpartition by hash(second(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7494,39 +7506,39 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t6 (colint int, col1 time) engine='INNODB' partition by range(colint) -(partition p0 values less than (minute('18:30')), +(partition p0 values less than (second('18:30:14')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with minute(col1) +--- Access tables with second(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09:15'); -insert into t1 values ('14:30:45'); -insert into t2 values ('09:09:15'); -insert into t2 values ('14:30:45'); +insert into t1 values ('09:09:09'); +insert into t1 values ('14:30:20'); +insert into t2 values ('09:09:09'); +insert into t2 values ('14:30:20'); insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:15'); -insert into t3 values ('14:30:45'); +insert into t3 values ('09:09:09'); +insert into t3 values ('14:30:20'); insert into t3 values ('21:59:22'); load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select minute(col1) from t1 order by col1; -minute(col1) +select second(col1) from t1 order by col1; +second(col1) 9 -30 +20 select * from t1 order by col1; col1 -09:09:15 -14:30:45 +09:09:09 +14:30:20 select * from t2 order by col1; col1 -09:09:15 -14:30:45 +09:09:09 +14:30:20 21:59:22 select * from t3 order by col1; col1 -09:09:15 -14:30:45 +09:09:09 +14:30:20 21:59:22 select * from t4 order by colint; colint col1 @@ -7546,46 +7558,46 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 -update t1 set col1='10:24:23' where col1='09:09:15'; -update t2 set col1='10:24:23' where col1='09:09:15'; -update t3 set col1='10:24:23' where col1='09:09:15'; -update t4 set col1='10:24:23' where col1='09:09:15'; -update t5 set col1='10:24:23' where col1='09:09:15'; -update t6 set col1='10:24:23' where col1='09:09:15'; +update t1 set col1='10:22:33' where col1='09:09:09'; +update t2 set col1='10:22:33' where col1='09:09:09'; +update t3 set col1='10:22:33' where col1='09:09:09'; +update t4 set col1='10:22:33' where col1='09:09:09'; +update t5 set col1='10:22:33' where col1='09:09:09'; +update t6 set col1='10:22:33' where col1='09:09:09'; select * from t1 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t2 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t3 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t4 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t6 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with minute(col1) +--- Alter tables with second(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -7600,11 +7612,11 @@ create table t44 engine='INNODB' as select * from t4; create table t55 engine='INNODB' as select * from t5; create table t66 engine='INNODB' as select * from t6; alter table t11 -partition by range(minute(col1)) +partition by range(second(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(minute(col1)) +partition by list(second(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7613,15 +7625,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(minute(col1)); +partition by hash(second(col1)); alter table t44 partition by range(colint) -subpartition by hash(minute(col1)) subpartitions 2 +subpartition by hash(second(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(minute(col1)) subpartitions 2 +subpartition by hash(second(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7631,37 +7643,37 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (minute('18:30')), +(partition p0 values less than (second('18:30:14')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t22 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t33 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t44 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7673,19 +7685,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 alter table t55 partition by list(colint) -subpartition by hash(minute(col1)) subpartitions 5 +subpartition by hash(second(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7700,7 +7712,7 @@ t55 CREATE TABLE `t55` ( `col1` time DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (minute(col1)) +SUBPARTITION BY HASH (second(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, @@ -7710,7 +7722,7 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ select * from t55 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7719,17 +7731,17 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (minute('18:30')), +(partition p0 values less than (second('18:30:14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7738,93 +7750,93 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (minute('18:30')), +(partition p0 values less than (second('18:30:14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with minute(col1) +--- Delete rows and partitions of tables with second(col1) ------------------------------------------------------------------------- -delete from t1 where col1='14:30:45'; -delete from t2 where col1='14:30:45'; -delete from t3 where col1='14:30:45'; -delete from t4 where col1='14:30:45'; -delete from t5 where col1='14:30:45'; -delete from t6 where col1='14:30:45'; +delete from t1 where col1='14:30:20'; +delete from t2 where col1='14:30:20'; +delete from t3 where col1='14:30:20'; +delete from t4 where col1='14:30:20'; +delete from t5 where col1='14:30:20'; +delete from t6 where col1='14:30:20'; select * from t1 order by col1; col1 -10:24:23 +10:22:33 select * from t2 order by col1; col1 -10:24:23 +10:22:33 21:59:22 select * from t3 order by col1; col1 -10:24:23 +10:22:33 21:59:22 select * from t4 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -insert into t1 values ('14:30:45'); -insert into t2 values ('14:30:45'); -insert into t3 values ('14:30:45'); -insert into t4 values (60,'14:30:45'); -insert into t5 values (60,'14:30:45'); -insert into t6 values (60,'14:30:45'); +insert into t1 values ('14:30:20'); +insert into t2 values ('14:30:20'); +insert into t3 values ('14:30:20'); +insert into t4 values (60,'14:30:20'); +insert into t5 values (60,'14:30:20'); +insert into t6 values (60,'14:30:20'); select * from t1 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t2 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t3 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t4 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 select * from t5 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 select * from t6 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -7832,100 +7844,100 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t2 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t3 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t4 order by colint; colint col1 -60 14:30:45 +60 14:30:20 select * from t5 order by colint; colint col1 -60 14:30:45 +60 14:30:20 select * from t6 order by colint; colint col1 -60 14:30:45 +60 14:30:20 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with minute(col1) +--- Delete rows and partitions of tables with second(col1) ------------------------------------------------------------------------- -delete from t11 where col1='14:30:45'; -delete from t22 where col1='14:30:45'; -delete from t33 where col1='14:30:45'; -delete from t44 where col1='14:30:45'; -delete from t55 where col1='14:30:45'; -delete from t66 where col1='14:30:45'; +delete from t11 where col1='14:30:20'; +delete from t22 where col1='14:30:20'; +delete from t33 where col1='14:30:20'; +delete from t44 where col1='14:30:20'; +delete from t55 where col1='14:30:20'; +delete from t66 where col1='14:30:20'; select * from t11 order by col1; col1 -10:24:23 +10:22:33 select * from t22 order by col1; col1 -10:24:23 +10:22:33 21:59:22 select * from t33 order by col1; col1 -10:24:23 +10:22:33 21:59:22 select * from t44 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -insert into t11 values ('14:30:45'); -insert into t22 values ('14:30:45'); -insert into t33 values ('14:30:45'); -insert into t44 values (60,'14:30:45'); -insert into t55 values (60,'14:30:45'); -insert into t66 values (60,'14:30:45'); +insert into t11 values ('14:30:20'); +insert into t22 values ('14:30:20'); +insert into t33 values ('14:30:20'); +insert into t44 values (60,'14:30:20'); +insert into t55 values (60,'14:30:20'); +insert into t66 values (60,'14:30:20'); select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t22 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t33 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t44 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 select * from t55 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -7933,27 +7945,27 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t22 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t33 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t44 order by colint; colint col1 -60 14:30:45 +60 14:30:20 select * from t55 order by colint; colint col1 -60 14:30:45 +60 14:30:20 select * from t66 order by colint; colint col1 -60 14:30:45 +60 14:30:20 ------------------------- ---- some alter table end ------------------------- @@ -7970,7 +7982,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- second(col1) in partition with coltype time +--- month(col1) in partition with coltype date ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -7979,14 +7991,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with second(col1) +--- Create tables with month(col1) ------------------------------------------------------------------------- -create table t1 (col1 time) engine='INNODB' -partition by range(second(col1)) +create table t1 (col1 date) engine='INNODB' +partition by range(month(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 time) engine='INNODB' -partition by list(second(col1)) +create table t2 (col1 date) engine='INNODB' +partition by list(month(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7994,16 +8006,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 time) engine='INNODB' -partition by hash(second(col1)); -create table t4 (colint int, col1 time) engine='INNODB' +create table t3 (col1 date) engine='INNODB' +partition by hash(month(col1)); +create table t4 (colint int, col1 date) engine='INNODB' partition by range(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(month(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 time) engine='INNODB' +create table t5 (colint int, col1 date) engine='INNODB' partition by list(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(month(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8011,100 +8023,100 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 time) engine='INNODB' +create table t6 (colint int, col1 date) engine='INNODB' partition by range(colint) -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (month('2006-10-14')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with second(col1) +--- Access tables with month(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09:09'); -insert into t1 values ('14:30:20'); -insert into t2 values ('09:09:09'); -insert into t2 values ('14:30:20'); -insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:09'); -insert into t3 values ('14:30:20'); -insert into t3 values ('21:59:22'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select second(col1) from t1 order by col1; -second(col1) -9 -20 +insert into t1 values ('2006-01-03'); +insert into t1 values ('2006-12-17'); +insert into t2 values ('2006-01-03'); +insert into t2 values ('2006-12-17'); +insert into t2 values ('2006-05-25'); +insert into t3 values ('2006-01-03'); +insert into t3 values ('2006-12-17'); +insert into t3 values ('2006-05-25'); +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; +select month(col1) from t1 order by col1; +month(col1) +1 +12 select * from t1 order by col1; col1 -09:09:09 -14:30:20 +2006-01-03 +2006-12-17 select * from t2 order by col1; col1 -09:09:09 -14:30:20 -21:59:22 +2006-01-03 +2006-05-25 +2006-12-17 select * from t3 order by col1; col1 -09:09:09 -14:30:20 -21:59:22 +2006-01-03 +2006-05-25 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -update t1 set col1='10:22:33' where col1='09:09:09'; -update t2 set col1='10:22:33' where col1='09:09:09'; -update t3 set col1='10:22:33' where col1='09:09:09'; -update t4 set col1='10:22:33' where col1='09:09:09'; -update t5 set col1='10:22:33' where col1='09:09:09'; -update t6 set col1='10:22:33' where col1='09:09:09'; +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +update t1 set col1='2006-11-06' where col1='2006-01-03'; +update t2 set col1='2006-11-06' where col1='2006-01-03'; +update t3 set col1='2006-11-06' where col1='2006-01-03'; +update t4 set col1='2006-11-06' where col1='2006-01-03'; +update t5 set col1='2006-11-06' where col1='2006-01-03'; +update t6 set col1='2006-11-06' where col1='2006-01-03'; select * from t1 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 ------------------------------------------------------------------------- ---- Alter tables with second(col1) +--- Alter tables with month(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -8119,11 +8131,11 @@ create table t44 engine='INNODB' as select * from t4; create table t55 engine='INNODB' as select * from t5; create table t66 engine='INNODB' as select * from t6; alter table t11 -partition by range(second(col1)) +partition by range(month(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(second(col1)) +partition by list(month(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8132,15 +8144,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(second(col1)); +partition by hash(month(col1)); alter table t44 partition by range(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(month(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(month(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8150,40 +8162,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (month('2006-10-14')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 --------------------------- ---- some alter table begin --------------------------- @@ -8192,19 +8204,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 alter table t55 partition by list(colint) -subpartition by hash(second(col1)) subpartitions 5 +subpartition by hash(month(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8216,10 +8228,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` time DEFAULT NULL + `col1` date DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (second(col1)) +SUBPARTITION BY HASH (month(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, @@ -8229,121 +8241,121 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (month('2006-10-14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (month('2006-10-14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with second(col1) +--- Delete rows and partitions of tables with month(col1) ------------------------------------------------------------------------- -delete from t1 where col1='14:30:20'; -delete from t2 where col1='14:30:20'; -delete from t3 where col1='14:30:20'; -delete from t4 where col1='14:30:20'; -delete from t5 where col1='14:30:20'; -delete from t6 where col1='14:30:20'; +delete from t1 where col1='2006-12-17'; +delete from t2 where col1='2006-12-17'; +delete from t3 where col1='2006-12-17'; +delete from t4 where col1='2006-12-17'; +delete from t5 where col1='2006-12-17'; +delete from t6 where col1='2006-12-17'; select * from t1 order by col1; col1 -10:22:33 +2006-11-06 select * from t2 order by col1; col1 -10:22:33 -21:59:22 +2006-05-25 +2006-11-06 select * from t3 order by col1; col1 -10:22:33 -21:59:22 +2006-05-25 +2006-11-06 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t1 values ('14:30:20'); -insert into t2 values ('14:30:20'); -insert into t3 values ('14:30:20'); -insert into t4 values (60,'14:30:20'); -insert into t5 values (60,'14:30:20'); -insert into t6 values (60,'14:30:20'); +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +insert into t1 values ('2006-12-17'); +insert into t2 values ('2006-12-17'); +insert into t3 values ('2006-12-17'); +insert into t4 values (60,'2006-12-17'); +insert into t5 values (60,'2006-12-17'); +insert into t6 values (60,'2006-12-17'); select * from t1 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -8351,100 +8363,97 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -10:22:33 -14:30:20 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-11-06 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t4 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t5 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t6 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with second(col1) +--- Delete rows and partitions of tables with month(col1) ------------------------------------------------------------------------- -delete from t11 where col1='14:30:20'; -delete from t22 where col1='14:30:20'; -delete from t33 where col1='14:30:20'; -delete from t44 where col1='14:30:20'; -delete from t55 where col1='14:30:20'; -delete from t66 where col1='14:30:20'; +delete from t11 where col1='2006-12-17'; +delete from t22 where col1='2006-12-17'; +delete from t33 where col1='2006-12-17'; +delete from t44 where col1='2006-12-17'; +delete from t55 where col1='2006-12-17'; +delete from t66 where col1='2006-12-17'; select * from t11 order by col1; col1 -10:22:33 +2006-11-06 select * from t22 order by col1; col1 -10:22:33 -21:59:22 +2006-05-25 +2006-11-06 select * from t33 order by col1; col1 -10:22:33 -21:59:22 +2006-05-25 +2006-11-06 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t11 values ('14:30:20'); -insert into t22 values ('14:30:20'); -insert into t33 values ('14:30:20'); -insert into t44 values (60,'14:30:20'); -insert into t55 values (60,'14:30:20'); -insert into t66 values (60,'14:30:20'); +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +insert into t11 values ('2006-12-17'); +insert into t22 values ('2006-12-17'); +insert into t33 values ('2006-12-17'); +insert into t44 values (60,'2006-12-17'); +insert into t55 values (60,'2006-12-17'); +insert into t66 values (60,'2006-12-17'); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -8452,27 +8461,24 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -10:22:33 -14:30:20 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-11-06 +2006-12-17 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t44 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t55 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t66 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 ------------------------- ---- some alter table end ------------------------- @@ -8489,7 +8495,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- second(col1) in partition with coltype char(30) +--- quarter(col1) in partition with coltype date ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -8498,14 +8504,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with second(col1) +--- Create tables with quarter(col1) ------------------------------------------------------------------------- -create table t1 (col1 char(30)) engine='INNODB' -partition by range(second(col1)) +create table t1 (col1 date) engine='INNODB' +partition by range(quarter(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 char(30)) engine='INNODB' -partition by list(second(col1)) +create table t2 (col1 date) engine='INNODB' +partition by list(quarter(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8513,16 +8519,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 char(30)) engine='INNODB' -partition by hash(second(col1)); -create table t4 (colint int, col1 char(30)) engine='INNODB' +create table t3 (col1 date) engine='INNODB' +partition by hash(quarter(col1)); +create table t4 (colint int, col1 date) engine='INNODB' partition by range(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(quarter(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 char(30)) engine='INNODB' +create table t5 (colint int, col1 date) engine='INNODB' partition by list(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(quarter(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8530,100 +8536,100 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 char(30)) engine='INNODB' +create table t6 (colint int, col1 date) engine='INNODB' partition by range(colint) -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (quarter('2006-10-14')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with second(col1) +--- Access tables with quarter(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09:09'); -insert into t1 values ('14:30:20'); -insert into t2 values ('09:09:09'); -insert into t2 values ('14:30:20'); -insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:09'); -insert into t3 values ('14:30:20'); -insert into t3 values ('21:59:22'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select second(col1) from t1 order by col1; -second(col1) -9 -20 +insert into t1 values ('2006-01-03'); +insert into t1 values ('2006-12-17'); +insert into t2 values ('2006-01-03'); +insert into t2 values ('2006-12-17'); +insert into t2 values ('2006-09-25'); +insert into t3 values ('2006-01-03'); +insert into t3 values ('2006-12-17'); +insert into t3 values ('2006-09-25'); +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; +select quarter(col1) from t1 order by col1; +quarter(col1) +1 +4 select * from t1 order by col1; col1 -09:09:09 -14:30:20 +2006-01-03 +2006-12-17 select * from t2 order by col1; col1 -09:09:09 -14:30:20 -21:59:22 +2006-01-03 +2006-09-25 +2006-12-17 select * from t3 order by col1; col1 -09:09:09 -14:30:20 -21:59:22 +2006-01-03 +2006-09-25 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t6 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -update t1 set col1='10:22:33' where col1='09:09:09'; -update t2 set col1='10:22:33' where col1='09:09:09'; -update t3 set col1='10:22:33' where col1='09:09:09'; -update t4 set col1='10:22:33' where col1='09:09:09'; -update t5 set col1='10:22:33' where col1='09:09:09'; -update t6 set col1='10:22:33' where col1='09:09:09'; +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +update t1 set col1='2006-07-30' where col1='2006-01-03'; +update t2 set col1='2006-07-30' where col1='2006-01-03'; +update t3 set col1='2006-07-30' where col1='2006-01-03'; +update t4 set col1='2006-07-30' where col1='2006-01-03'; +update t5 set col1='2006-07-30' where col1='2006-01-03'; +update t6 set col1='2006-07-30' where col1='2006-01-03'; select * from t1 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t6 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 ------------------------------------------------------------------------- ---- Alter tables with second(col1) +--- Alter tables with quarter(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -8638,11 +8644,11 @@ create table t44 engine='INNODB' as select * from t4; create table t55 engine='INNODB' as select * from t5; create table t66 engine='INNODB' as select * from t6; alter table t11 -partition by range(second(col1)) +partition by range(quarter(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(second(col1)) +partition by list(quarter(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8651,15 +8657,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(second(col1)); +partition by hash(quarter(col1)); alter table t44 partition by range(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(quarter(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(quarter(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8669,40 +8675,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (quarter('2006-10-14')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 -select * from t33 order by col1; -col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 +select * from t33 order by col1; +col1 +2006-07-30 +2006-09-25 +2006-12-17 select * from t44 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t55 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 --------------------------- ---- some alter table begin --------------------------- @@ -8711,19 +8717,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 alter table t55 partition by list(colint) -subpartition by hash(second(col1)) subpartitions 5 +subpartition by hash(quarter(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8735,10 +8741,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` char(30) DEFAULT NULL + `col1` date DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (second(col1)) +SUBPARTITION BY HASH (quarter(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, @@ -8748,121 +8754,121 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ select * from t55 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (quarter('2006-10-14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (quarter('2006-10-14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with second(col1) +--- Delete rows and partitions of tables with quarter(col1) ------------------------------------------------------------------------- -delete from t1 where col1='14:30:20'; -delete from t2 where col1='14:30:20'; -delete from t3 where col1='14:30:20'; -delete from t4 where col1='14:30:20'; -delete from t5 where col1='14:30:20'; -delete from t6 where col1='14:30:20'; +delete from t1 where col1='2006-12-17'; +delete from t2 where col1='2006-12-17'; +delete from t3 where col1='2006-12-17'; +delete from t4 where col1='2006-12-17'; +delete from t5 where col1='2006-12-17'; +delete from t6 where col1='2006-12-17'; select * from t1 order by col1; col1 -10:22:33 +2006-07-30 select * from t2 order by col1; col1 -10:22:33 -21:59:22 +2006-07-30 +2006-09-25 select * from t3 order by col1; col1 -10:22:33 -21:59:22 +2006-07-30 +2006-09-25 select * from t4 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -insert into t1 values ('14:30:20'); -insert into t2 values ('14:30:20'); -insert into t3 values ('14:30:20'); -insert into t4 values (60,'14:30:20'); -insert into t5 values (60,'14:30:20'); -insert into t6 values (60,'14:30:20'); +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +insert into t1 values ('2006-12-17'); +insert into t2 values ('2006-12-17'); +insert into t3 values ('2006-12-17'); +insert into t4 values (60,'2006-12-17'); +insert into t5 values (60,'2006-12-17'); +insert into t6 values (60,'2006-12-17'); select * from t1 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t5 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t6 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -8870,100 +8876,96 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -10:22:33 -14:30:20 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t4 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t5 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t6 order by colint; colint col1 -60 14:30:20 +4 2006-02-05 +60 2006-12-17 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with second(col1) +--- Delete rows and partitions of tables with quarter(col1) ------------------------------------------------------------------------- -delete from t11 where col1='14:30:20'; -delete from t22 where col1='14:30:20'; -delete from t33 where col1='14:30:20'; -delete from t44 where col1='14:30:20'; -delete from t55 where col1='14:30:20'; -delete from t66 where col1='14:30:20'; +delete from t11 where col1='2006-12-17'; +delete from t22 where col1='2006-12-17'; +delete from t33 where col1='2006-12-17'; +delete from t44 where col1='2006-12-17'; +delete from t55 where col1='2006-12-17'; +delete from t66 where col1='2006-12-17'; select * from t11 order by col1; col1 -10:22:33 +2006-07-30 select * from t22 order by col1; col1 -10:22:33 -21:59:22 +2006-07-30 +2006-09-25 select * from t33 order by col1; col1 -10:22:33 -21:59:22 +2006-07-30 +2006-09-25 select * from t44 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t55 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -insert into t11 values ('14:30:20'); -insert into t22 values ('14:30:20'); -insert into t33 values ('14:30:20'); -insert into t44 values (60,'14:30:20'); -insert into t55 values (60,'14:30:20'); -insert into t66 values (60,'14:30:20'); +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +insert into t11 values ('2006-12-17'); +insert into t22 values ('2006-12-17'); +insert into t33 values ('2006-12-17'); +insert into t44 values (60,'2006-12-17'); +insert into t55 values (60,'2006-12-17'); +insert into t66 values (60,'2006-12-17'); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t44 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t55 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -8971,27 +8973,23 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -10:22:33 -14:30:20 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t44 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t55 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t66 order by colint; colint col1 -60 14:30:20 +4 2006-02-05 +60 2006-12-17 ------------------------- ---- some alter table end ------------------------- @@ -9008,7 +9006,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- month(col1) in partition with coltype date +--- time_to_sec(col1)-(time_to_sec(col1)-20) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -9017,14 +9015,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with month(col1) +--- Create tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- -create table t1 (col1 date) engine='INNODB' -partition by range(month(col1)) +create table t1 (col1 time) engine='INNODB' +partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 date) engine='INNODB' -partition by list(month(col1)) +create table t2 (col1 time) engine='INNODB' +partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9032,16 +9030,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 date) engine='INNODB' -partition by hash(month(col1)); -create table t4 (colint int, col1 date) engine='INNODB' +create table t3 (col1 time) engine='INNODB' +partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); +create table t4 (colint int, col1 time) engine='INNODB' partition by range(colint) -subpartition by hash(month(col1)) subpartitions 2 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='INNODB' +create table t5 (colint int, col1 time) engine='INNODB' partition by list(colint) -subpartition by hash(month(col1)) subpartitions 2 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9049,100 +9047,100 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 date) engine='INNODB' +create table t6 (colint int, col1 time) engine='INNODB' partition by range(colint) -(partition p0 values less than (month('2006-10-14')), +(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with month(col1) +--- Access tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- -insert into t1 values ('2006-01-03'); -insert into t1 values ('2006-12-17'); -insert into t2 values ('2006-01-03'); -insert into t2 values ('2006-12-17'); -insert into t2 values ('2006-05-25'); -insert into t3 values ('2006-01-03'); -insert into t3 values ('2006-12-17'); -insert into t3 values ('2006-05-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select month(col1) from t1 order by col1; -month(col1) -1 -12 +insert into t1 values ('09:09:15'); +insert into t1 values ('14:30:45'); +insert into t2 values ('09:09:15'); +insert into t2 values ('14:30:45'); +insert into t2 values ('21:59:22'); +insert into t3 values ('09:09:15'); +insert into t3 values ('14:30:45'); +insert into t3 values ('21:59:22'); +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; +select time_to_sec(col1)-(time_to_sec(col1)-20) from t1 order by col1; +time_to_sec(col1)-(time_to_sec(col1)-20) +20 +20 select * from t1 order by col1; col1 -2006-01-03 -2006-12-17 +09:09:15 +14:30:45 select * from t2 order by col1; col1 -2006-01-03 -2006-05-25 -2006-12-17 +09:09:15 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -2006-01-03 -2006-05-25 -2006-12-17 +09:09:15 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-11-06' where col1='2006-01-03'; -update t2 set col1='2006-11-06' where col1='2006-01-03'; -update t3 set col1='2006-11-06' where col1='2006-01-03'; -update t4 set col1='2006-11-06' where col1='2006-01-03'; -update t5 set col1='2006-11-06' where col1='2006-01-03'; -update t6 set col1='2006-11-06' where col1='2006-01-03'; +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +update t1 set col1='10:33:11' where col1='09:09:15'; +update t2 set col1='10:33:11' where col1='09:09:15'; +update t3 set col1='10:33:11' where col1='09:09:15'; +update t4 set col1='10:33:11' where col1='09:09:15'; +update t5 set col1='10:33:11' where col1='09:09:15'; +update t6 set col1='10:33:11' where col1='09:09:15'; select * from t1 order by col1; col1 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 select * from t2 order by col1; col1 -2006-05-25 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -2006-05-25 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with month(col1) +--- Alter tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -9157,11 +9155,11 @@ create table t44 engine='INNODB' as select * from t4; create table t55 engine='INNODB' as select * from t5; create table t66 engine='INNODB' as select * from t6; alter table t11 -partition by range(month(col1)) +partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(month(col1)) +partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9170,15 +9168,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(month(col1)); +partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); alter table t44 partition by range(colint) -subpartition by hash(month(col1)) subpartitions 2 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(month(col1)) subpartitions 2 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9188,40 +9186,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (month('2006-10-14')), +(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 select * from t22 order by col1; col1 -2006-05-25 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -2006-05-25 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 --------------------------- ---- some alter table begin --------------------------- @@ -9230,19 +9228,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 alter table t55 partition by list(colint) -subpartition by hash(month(col1)) subpartitions 5 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9254,10 +9252,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL + `col1` time DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (month(col1)) +SUBPARTITION BY HASH (time_to_sec(col1)-(time_to_sec(col1)-20)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, @@ -9267,2166 +9265,121 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (month('2006-10-14')), +(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (month('2006-10-14')), +(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with month(col1) -------------------------------------------------------------------------- -delete from t1 where col1='2006-12-17'; -delete from t2 where col1='2006-12-17'; -delete from t3 where col1='2006-12-17'; -delete from t4 where col1='2006-12-17'; -delete from t5 where col1='2006-12-17'; -delete from t6 where col1='2006-12-17'; -select * from t1 order by col1; -col1 -2006-11-06 -select * from t2 order by col1; -col1 -2006-05-25 -2006-11-06 -select * from t3 order by col1; -col1 -2006-05-25 -2006-11-06 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-12-17'); -insert into t2 values ('2006-12-17'); -insert into t3 values ('2006-12-17'); -insert into t4 values (60,'2006-12-17'); -insert into t5 values (60,'2006-12-17'); -insert into t6 values (60,'2006-12-17'); -select * from t1 order by col1; -col1 -2006-11-06 -2006-12-17 -select * from t2 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t3 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -select * from t2 order by col1; -col1 -2006-11-06 -2006-12-17 -select * from t3 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t4 order by colint; -colint col1 -60 2006-12-17 -select * from t5 order by colint; -colint col1 -60 2006-12-17 -select * from t6 order by colint; -colint col1 -60 2006-12-17 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with month(col1) -------------------------------------------------------------------------- -delete from t11 where col1='2006-12-17'; -delete from t22 where col1='2006-12-17'; -delete from t33 where col1='2006-12-17'; -delete from t44 where col1='2006-12-17'; -delete from t55 where col1='2006-12-17'; -delete from t66 where col1='2006-12-17'; -select * from t11 order by col1; -col1 -2006-11-06 -select * from t22 order by col1; -col1 -2006-05-25 -2006-11-06 -select * from t33 order by col1; -col1 -2006-05-25 -2006-11-06 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-12-17'); -insert into t22 values ('2006-12-17'); -insert into t33 values ('2006-12-17'); -insert into t44 values (60,'2006-12-17'); -insert into t55 values (60,'2006-12-17'); -insert into t66 values (60,'2006-12-17'); -select * from t11 order by col1; -col1 -2006-11-06 -2006-12-17 -select * from t22 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t33 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -select * from t22 order by col1; -col1 -2006-11-06 -2006-12-17 -select * from t33 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t44 order by colint; -colint col1 -60 2006-12-17 -select * from t55 order by colint; -colint col1 -60 2006-12-17 -select * from t66 order by colint; -colint col1 -60 2006-12-17 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- ---- quarter(col1) in partition with coltype date -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with quarter(col1) -------------------------------------------------------------------------- -create table t1 (col1 date) engine='INNODB' -partition by range(quarter(col1)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 date) engine='INNODB' -partition by list(quarter(col1)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 date) engine='INNODB' -partition by hash(quarter(col1)); -create table t4 (colint int, col1 date) engine='INNODB' -partition by range(colint) -subpartition by hash(quarter(col1)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='INNODB' -partition by list(colint) -subpartition by hash(quarter(col1)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 date) engine='INNODB' -partition by range(colint) -(partition p0 values less than (quarter('2006-10-14')), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with quarter(col1) -------------------------------------------------------------------------- -insert into t1 values ('2006-01-03'); -insert into t1 values ('2006-12-17'); -insert into t2 values ('2006-01-03'); -insert into t2 values ('2006-12-17'); -insert into t2 values ('2006-09-25'); -insert into t3 values ('2006-01-03'); -insert into t3 values ('2006-12-17'); -insert into t3 values ('2006-09-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select quarter(col1) from t1 order by col1; -quarter(col1) -1 -4 -select * from t1 order by col1; -col1 -2006-01-03 -2006-12-17 -select * from t2 order by col1; -col1 -2006-01-03 -2006-09-25 -2006-12-17 -select * from t3 order by col1; -col1 -2006-01-03 -2006-09-25 -2006-12-17 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-07-30' where col1='2006-01-03'; -update t2 set col1='2006-07-30' where col1='2006-01-03'; -update t3 set col1='2006-07-30' where col1='2006-01-03'; -update t4 set col1='2006-07-30' where col1='2006-01-03'; -update t5 set col1='2006-07-30' where col1='2006-01-03'; -update t6 set col1='2006-07-30' where col1='2006-01-03'; -select * from t1 order by col1; -col1 -2006-07-30 -2006-12-17 -select * from t2 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t3 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Alter tables with quarter(col1) -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='INNODB' as select * from t1; -create table t22 engine='INNODB' as select * from t2; -create table t33 engine='INNODB' as select * from t3; -create table t44 engine='INNODB' as select * from t4; -create table t55 engine='INNODB' as select * from t5; -create table t66 engine='INNODB' as select * from t6; -alter table t11 -partition by range(quarter(col1)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(quarter(col1)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(quarter(col1)); -alter table t44 -partition by range(colint) -subpartition by hash(quarter(col1)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(quarter(col1)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (quarter('2006-10-14')), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-07-30 -2006-12-17 -select * from t22 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t33 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-07-30 -2006-12-17 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-07-30 -2006-12-17 -alter table t55 -partition by list(colint) -subpartition by hash(quarter(col1)) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (quarter(col1)) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = InnoDB, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = InnoDB, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = InnoDB, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (quarter('2006-10-14')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (quarter('2006-10-14')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with quarter(col1) -------------------------------------------------------------------------- -delete from t1 where col1='2006-12-17'; -delete from t2 where col1='2006-12-17'; -delete from t3 where col1='2006-12-17'; -delete from t4 where col1='2006-12-17'; -delete from t5 where col1='2006-12-17'; -delete from t6 where col1='2006-12-17'; -select * from t1 order by col1; -col1 -2006-07-30 -select * from t2 order by col1; -col1 -2006-07-30 -2006-09-25 -select * from t3 order by col1; -col1 -2006-07-30 -2006-09-25 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-12-17'); -insert into t2 values ('2006-12-17'); -insert into t3 values ('2006-12-17'); -insert into t4 values (60,'2006-12-17'); -insert into t5 values (60,'2006-12-17'); -insert into t6 values (60,'2006-12-17'); -select * from t1 order by col1; -col1 -2006-07-30 -2006-12-17 -select * from t2 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t3 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -select * from t2 order by col1; -col1 -select * from t3 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t4 order by colint; -colint col1 -60 2006-12-17 -select * from t5 order by colint; -colint col1 -60 2006-12-17 -select * from t6 order by colint; -colint col1 -4 2006-02-05 -60 2006-12-17 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with quarter(col1) -------------------------------------------------------------------------- -delete from t11 where col1='2006-12-17'; -delete from t22 where col1='2006-12-17'; -delete from t33 where col1='2006-12-17'; -delete from t44 where col1='2006-12-17'; -delete from t55 where col1='2006-12-17'; -delete from t66 where col1='2006-12-17'; -select * from t11 order by col1; -col1 -2006-07-30 -select * from t22 order by col1; -col1 -2006-07-30 -2006-09-25 -select * from t33 order by col1; -col1 -2006-07-30 -2006-09-25 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-12-17'); -insert into t22 values ('2006-12-17'); -insert into t33 values ('2006-12-17'); -insert into t44 values (60,'2006-12-17'); -insert into t55 values (60,'2006-12-17'); -insert into t66 values (60,'2006-12-17'); -select * from t11 order by col1; -col1 -2006-07-30 -2006-12-17 -select * from t22 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t33 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -select * from t22 order by col1; -col1 -select * from t33 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t44 order by colint; -colint col1 -60 2006-12-17 -select * from t55 order by colint; -colint col1 -60 2006-12-17 -select * from t66 order by colint; -colint col1 -4 2006-02-05 -60 2006-12-17 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- ---- time_to_sec(col1)-(time_to_sec(col1)-20) in partition with coltype time -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -create table t1 (col1 time) engine='INNODB' -partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 time) engine='INNODB' -partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 time) engine='INNODB' -partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); -create table t4 (colint int, col1 time) engine='INNODB' -partition by range(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 time) engine='INNODB' -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 time) engine='INNODB' -partition by range(colint) -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -insert into t1 values ('09:09:15'); -insert into t1 values ('14:30:45'); -insert into t2 values ('09:09:15'); -insert into t2 values ('14:30:45'); -insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:15'); -insert into t3 values ('14:30:45'); -insert into t3 values ('21:59:22'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select time_to_sec(col1)-(time_to_sec(col1)-20) from t1 order by col1; -time_to_sec(col1)-(time_to_sec(col1)-20) -20 -20 -select * from t1 order by col1; -col1 -09:09:15 -14:30:45 -select * from t2 order by col1; -col1 -09:09:15 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -09:09:15 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t6 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -update t1 set col1='10:33:11' where col1='09:09:15'; -update t2 set col1='10:33:11' where col1='09:09:15'; -update t3 set col1='10:33:11' where col1='09:09:15'; -update t4 set col1='10:33:11' where col1='09:09:15'; -update t5 set col1='10:33:11' where col1='09:09:15'; -update t6 set col1='10:33:11' where col1='09:09:15'; -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t6 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -------------------------------------------------------------------------- ---- Alter tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='INNODB' as select * from t1; -create table t22 engine='INNODB' as select * from t2; -create table t33 engine='INNODB' as select * from t3; -create table t44 engine='INNODB' as select * from t4; -create table t55 engine='INNODB' as select * from t5; -create table t66 engine='INNODB' as select * from t6; -alter table t11 -partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); -alter table t44 -partition by range(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -alter table t55 -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` time DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (time_to_sec(col1)-(time_to_sec(col1)-20)) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = InnoDB, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = InnoDB, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = InnoDB, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -delete from t1 where col1='14:30:45'; -delete from t2 where col1='14:30:45'; -delete from t3 where col1='14:30:45'; -delete from t4 where col1='14:30:45'; -delete from t5 where col1='14:30:45'; -delete from t6 where col1='14:30:45'; -select * from t1 order by col1; -col1 -10:33:11 -select * from t2 order by col1; -col1 -10:33:11 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t1 values ('14:30:45'); -insert into t2 values ('14:30:45'); -insert into t3 values ('14:30:45'); -insert into t4 values (60,'14:30:45'); -insert into t5 values (60,'14:30:45'); -insert into t6 values (60,'14:30:45'); -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t6 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -60 14:30:45 -select * from t5 order by colint; -colint col1 -60 14:30:45 -select * from t6 order by colint; -colint col1 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -delete from t11 where col1='14:30:45'; -delete from t22 where col1='14:30:45'; -delete from t33 where col1='14:30:45'; -delete from t44 where col1='14:30:45'; -delete from t55 where col1='14:30:45'; -delete from t66 where col1='14:30:45'; -select * from t11 order by col1; -col1 -10:33:11 -select * from t22 order by col1; -col1 -10:33:11 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t11 values ('14:30:45'); -insert into t22 values ('14:30:45'); -insert into t33 values ('14:30:45'); -insert into t44 values (60,'14:30:45'); -insert into t55 values (60,'14:30:45'); -insert into t66 values (60,'14:30:45'); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -60 14:30:45 -select * from t55 order by colint; -colint col1 -60 14:30:45 -select * from t66 order by colint; -colint col1 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- ---- to_days(col1)-to_days('2006-01-01') in partition with coltype date -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -create table t1 (col1 date) engine='INNODB' -partition by range(to_days(col1)-to_days('2006-01-01')) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 date) engine='INNODB' -partition by list(to_days(col1)-to_days('2006-01-01')) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 date) engine='INNODB' -partition by hash(to_days(col1)-to_days('2006-01-01')); -create table t4 (colint int, col1 date) engine='INNODB' -partition by range(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='INNODB' -partition by list(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 date) engine='INNODB' -partition by range(colint) -(partition p0 values less than (to_days('2006-02-02')-to_days('2006-01-01')), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -insert into t1 values ('2006-02-03'); -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-02-03'); -insert into t2 values ('2006-01-17'); -insert into t2 values ('2006-01-25'); -insert into t3 values ('2006-02-03'); -insert into t3 values ('2006-01-17'); -insert into t3 values ('2006-01-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select to_days(col1)-to_days('2006-01-01') from t1 order by col1; -to_days(col1)-to_days('2006-01-01') -16 -33 -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-03 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-03 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-03 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-02-06' where col1='2006-02-03'; -update t2 set col1='2006-02-06' where col1='2006-02-03'; -update t3 set col1='2006-02-06' where col1='2006-02-03'; -update t4 set col1='2006-02-06' where col1='2006-02-03'; -update t5 set col1='2006-02-06' where col1='2006-02-03'; -update t6 set col1='2006-02-06' where col1='2006-02-03'; -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Alter tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='INNODB' as select * from t1; -create table t22 engine='INNODB' as select * from t2; -create table t33 engine='INNODB' as select * from t3; -create table t44 engine='INNODB' as select * from t4; -create table t55 engine='INNODB' as select * from t5; -create table t66 engine='INNODB' as select * from t6; -alter table t11 -partition by range(to_days(col1)-to_days('2006-01-01')) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(to_days(col1)-to_days('2006-01-01')) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(to_days(col1)-to_days('2006-01-01')); -alter table t44 -partition by range(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (to_days('2006-02-02')-to_days('2006-01-01')), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -alter table t55 -partition by list(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (to_days(col1)-to_days('2006-01-01')) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = InnoDB, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = InnoDB, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = InnoDB, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ -select * from t55 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (to_days('2006-02-02')-to_days('2006-01-01')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (to_days('2006-02-02')-to_days('2006-01-01')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -delete from t1 where col1='2006-01-17'; -delete from t2 where col1='2006-01-17'; -delete from t3 where col1='2006-01-17'; -delete from t4 where col1='2006-01-17'; -delete from t5 where col1='2006-01-17'; -delete from t6 where col1='2006-01-17'; -select * from t1 order by col1; -col1 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-01-17'); -insert into t3 values ('2006-01-17'); -insert into t4 values (60,'2006-01-17'); -insert into t5 values (60,'2006-01-17'); -insert into t6 values (60,'2006-01-17'); -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -select * from t5 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -select * from t6 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -60 2006-01-17 -select * from t5 order by colint; -colint col1 -60 2006-01-17 -select * from t6 order by colint; -colint col1 -60 2006-01-17 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -delete from t11 where col1='2006-01-17'; -delete from t22 where col1='2006-01-17'; -delete from t33 where col1='2006-01-17'; -delete from t44 where col1='2006-01-17'; -delete from t55 where col1='2006-01-17'; -delete from t66 where col1='2006-01-17'; -select * from t11 order by col1; -col1 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-01-17'); -insert into t22 values ('2006-01-17'); -insert into t33 values ('2006-01-17'); -insert into t44 values (60,'2006-01-17'); -insert into t55 values (60,'2006-01-17'); -insert into t66 values (60,'2006-01-17'); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -select * from t55 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -select * from t66 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -60 2006-01-17 -select * from t55 order by colint; -colint col1 -60 2006-01-17 -select * from t66 order by colint; -colint col1 -60 2006-01-17 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- ---- datediff(col1, '2006-01-01') in partition with coltype date -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with datediff(col1, '2006-01-01') -------------------------------------------------------------------------- -create table t1 (col1 date) engine='INNODB' -partition by range(datediff(col1, '2006-01-01')) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 date) engine='INNODB' -partition by list(datediff(col1, '2006-01-01')) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 date) engine='INNODB' -partition by hash(datediff(col1, '2006-01-01')); -create table t4 (colint int, col1 date) engine='INNODB' -partition by range(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='INNODB' -partition by list(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 date) engine='INNODB' -partition by range(colint) -(partition p0 values less than (datediff('2006-02-02', '2006-01-01')), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with datediff(col1, '2006-01-01') -------------------------------------------------------------------------- -insert into t1 values ('2006-02-03'); -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-02-03'); -insert into t2 values ('2006-01-17'); -insert into t2 values ('2006-01-25'); -insert into t3 values ('2006-02-03'); -insert into t3 values ('2006-01-17'); -insert into t3 values ('2006-01-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select datediff(col1, '2006-01-01') from t1 order by col1; -datediff(col1, '2006-01-01') -16 -33 -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-03 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-03 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-03 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-02-06' where col1='2006-02-03'; -update t2 set col1='2006-02-06' where col1='2006-02-03'; -update t3 set col1='2006-02-06' where col1='2006-02-03'; -update t4 set col1='2006-02-06' where col1='2006-02-03'; -update t5 set col1='2006-02-06' where col1='2006-02-03'; -update t6 set col1='2006-02-06' where col1='2006-02-03'; -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Alter tables with datediff(col1, '2006-01-01') -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='INNODB' as select * from t1; -create table t22 engine='INNODB' as select * from t2; -create table t33 engine='INNODB' as select * from t3; -create table t44 engine='INNODB' as select * from t4; -create table t55 engine='INNODB' as select * from t5; -create table t66 engine='INNODB' as select * from t6; -alter table t11 -partition by range(datediff(col1, '2006-01-01')) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(datediff(col1, '2006-01-01')) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(datediff(col1, '2006-01-01')); -alter table t44 -partition by range(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (datediff('2006-02-02', '2006-01-01')), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -alter table t55 -partition by list(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (datediff(col1, '2006-01-01')) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = InnoDB, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = InnoDB, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = InnoDB, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = InnoDB, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = InnoDB, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = InnoDB) */ -select * from t55 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (datediff('2006-02-02', '2006-01-01')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (datediff('2006-02-02', '2006-01-01')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with datediff(col1, '2006-01-01') +--- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- -delete from t1 where col1='2006-01-17'; -delete from t2 where col1='2006-01-17'; -delete from t3 where col1='2006-01-17'; -delete from t4 where col1='2006-01-17'; -delete from t5 where col1='2006-01-17'; -delete from t6 where col1='2006-01-17'; +delete from t1 where col1='14:30:45'; +delete from t2 where col1='14:30:45'; +delete from t3 where col1='14:30:45'; +delete from t4 where col1='14:30:45'; +delete from t5 where col1='14:30:45'; +delete from t6 where col1='14:30:45'; select * from t1 order by col1; col1 -2006-02-06 +10:33:11 select * from t2 order by col1; col1 -2006-01-25 -2006-02-06 +10:33:11 +21:59:22 select * from t3 order by col1; col1 -2006-01-25 -2006-02-06 +10:33:11 +21:59:22 select * from t4 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-01-17'); -insert into t3 values ('2006-01-17'); -insert into t4 values (60,'2006-01-17'); -insert into t5 values (60,'2006-01-17'); -insert into t6 values (60,'2006-01-17'); +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +insert into t1 values ('14:30:45'); +insert into t2 values ('14:30:45'); +insert into t3 values ('14:30:45'); +insert into t4 values (60,'14:30:45'); +insert into t5 values (60,'14:30:45'); +insert into t6 values (60,'14:30:45'); select * from t1 order by col1; col1 -2006-01-17 -2006-02-06 +10:33:11 +14:30:45 select * from t2 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 select * from t5 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 select * from t6 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -11434,95 +9387,99 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -2006-01-17 -2006-02-06 +10:33:11 +14:30:45 select * from t2 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -60 2006-01-17 +60 14:30:45 select * from t5 order by colint; colint col1 -60 2006-01-17 +60 14:30:45 select * from t6 order by colint; colint col1 -60 2006-01-17 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with datediff(col1, '2006-01-01') +--- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- -delete from t11 where col1='2006-01-17'; -delete from t22 where col1='2006-01-17'; -delete from t33 where col1='2006-01-17'; -delete from t44 where col1='2006-01-17'; -delete from t55 where col1='2006-01-17'; -delete from t66 where col1='2006-01-17'; +delete from t11 where col1='14:30:45'; +delete from t22 where col1='14:30:45'; +delete from t33 where col1='14:30:45'; +delete from t44 where col1='14:30:45'; +delete from t55 where col1='14:30:45'; +delete from t66 where col1='14:30:45'; select * from t11 order by col1; col1 -2006-02-06 +10:33:11 select * from t22 order by col1; col1 -2006-01-25 -2006-02-06 +10:33:11 +21:59:22 select * from t33 order by col1; col1 -2006-01-25 -2006-02-06 +10:33:11 +21:59:22 select * from t44 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t55 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-01-17'); -insert into t22 values ('2006-01-17'); -insert into t33 values ('2006-01-17'); -insert into t44 values (60,'2006-01-17'); -insert into t55 values (60,'2006-01-17'); -insert into t66 values (60,'2006-01-17'); +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +insert into t11 values ('14:30:45'); +insert into t22 values ('14:30:45'); +insert into t33 values ('14:30:45'); +insert into t44 values (60,'14:30:45'); +insert into t55 values (60,'14:30:45'); +insert into t66 values (60,'14:30:45'); select * from t11 order by col1; col1 -2006-01-17 -2006-02-06 +10:33:11 +14:30:45 select * from t22 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 select * from t55 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 select * from t66 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -11530,27 +9487,26 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -2006-01-17 -2006-02-06 +10:33:11 +14:30:45 select * from t22 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -60 2006-01-17 +60 14:30:45 select * from t55 order by colint; colint col1 -60 2006-01-17 +60 14:30:45 select * from t66 order by colint; colint col1 -60 2006-01-17 ------------------------- ---- some alter table end ------------------------- diff --git a/mysql-test/suite/parts/r/part_supported_sql_func_myisam.result b/mysql-test/suite/parts/r/part_supported_sql_func_myisam.result index 375a6e130be..3cd8e10a4f3 100644 --- a/mysql-test/suite/parts/r/part_supported_sql_func_myisam.result +++ b/mysql-test/suite/parts/r/part_supported_sql_func_myisam.result @@ -5425,7 +5425,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- dayofyear(col1) in partition with coltype char(30) +--- extract(month from col1) in partition with coltype date ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -5434,14 +5434,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with dayofyear(col1) +--- Create tables with extract(month from col1) ------------------------------------------------------------------------- -create table t1 (col1 char(30)) engine='MYISAM' -partition by range(dayofyear(col1)) +create table t1 (col1 date) engine='MYISAM' +partition by range(extract(month from col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 char(30)) engine='MYISAM' -partition by list(dayofyear(col1)) +create table t2 (col1 date) engine='MYISAM' +partition by list(extract(month from col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5449,16 +5449,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 char(30)) engine='MYISAM' -partition by hash(dayofyear(col1)); -create table t4 (colint int, col1 char(30)) engine='MYISAM' +create table t3 (col1 date) engine='MYISAM' +partition by hash(extract(month from col1)); +create table t4 (colint int, col1 date) engine='MYISAM' partition by range(colint) -subpartition by hash(dayofyear(col1)) subpartitions 2 +subpartition by hash(extract(month from col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 char(30)) engine='MYISAM' +create table t5 (colint int, col1 date) engine='MYISAM' partition by list(colint) -subpartition by hash(dayofyear(col1)) subpartitions 2 +subpartition by hash(extract(month from col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5466,42 +5466,42 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 char(30)) engine='MYISAM' +create table t6 (colint int, col1 date) engine='MYISAM' partition by range(colint) -(partition p0 values less than (dayofyear('2006-12-25')), +(partition p0 values less than (extract(year from '1998-11-23')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with dayofyear(col1) +--- Access tables with extract(month from col1) ------------------------------------------------------------------------- insert into t1 values ('2006-01-03'); -insert into t1 values ('2006-01-17'); +insert into t1 values ('2006-02-17'); insert into t2 values ('2006-01-03'); -insert into t2 values ('2006-01-17'); -insert into t2 values ('2006-02-25'); +insert into t2 values ('2006-02-17'); +insert into t2 values ('2006-01-25'); insert into t3 values ('2006-01-03'); -insert into t3 values ('2006-01-17'); -insert into t3 values ('2006-02-25'); +insert into t3 values ('2006-02-17'); +insert into t3 values ('2006-01-25'); load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select dayofyear(col1) from t1 order by col1; -dayofyear(col1) -3 -17 +select extract(month from col1) from t1 order by col1; +extract(month from col1) +1 +2 select * from t1 order by col1; col1 2006-01-03 -2006-01-17 +2006-02-17 select * from t2 order by col1; col1 2006-01-03 -2006-01-17 -2006-02-25 +2006-01-25 +2006-02-17 select * from t3 order by col1; col1 2006-01-03 -2006-01-17 -2006-02-25 +2006-01-25 +2006-02-17 select * from t4 order by colint; colint col1 1 2006-02-03 @@ -5528,18 +5528,18 @@ update t5 set col1='2006-02-05' where col1='2006-01-03'; update t6 set col1='2006-02-05' where col1='2006-01-03'; select * from t1 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 select * from t2 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t3 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t4 order by colint; colint col1 1 2006-02-03 @@ -5559,7 +5559,7 @@ colint col1 3 2006-01-25 4 2006-02-05 ------------------------------------------------------------------------- ---- Alter tables with dayofyear(col1) +--- Alter tables with extract(month from col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -5574,11 +5574,11 @@ create table t44 engine='MYISAM' as select * from t4; create table t55 engine='MYISAM' as select * from t5; create table t66 engine='MYISAM' as select * from t6; alter table t11 -partition by range(dayofyear(col1)) +partition by range(extract(month from col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(dayofyear(col1)) +partition by list(extract(month from col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5587,15 +5587,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(dayofyear(col1)); +partition by hash(extract(month from col1)); alter table t44 partition by range(colint) -subpartition by hash(dayofyear(col1)) subpartitions 2 +subpartition by hash(extract(month from col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(dayofyear(col1)) subpartitions 2 +subpartition by hash(extract(month from col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5605,22 +5605,22 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (dayofyear('2006-12-25')), +(partition p0 values less than (extract(year from '1998-11-23')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 select * from t22 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t33 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t44 order by colint; colint col1 1 2006-02-03 @@ -5647,19 +5647,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 alter table t55 partition by list(colint) -subpartition by hash(dayofyear(col1)) subpartitions 5 +subpartition by hash(extract(month from col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5671,10 +5671,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` char(30) DEFAULT NULL + `col1` date DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (dayofyear(col1)) +SUBPARTITION BY HASH (extract(month from col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, @@ -5699,7 +5699,7 @@ colint col1 4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (dayofyear('2006-12-25')), +(partition p0 values less than (extract(year from '1998-11-23')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 @@ -5718,7 +5718,7 @@ colint col1 4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (dayofyear('2006-12-25')), +(partition p0 values less than (extract(year from '1998-11-23')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 @@ -5727,73 +5727,78 @@ colint col1 3 2006-01-25 4 2006-02-05 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with dayofyear(col1) +--- Delete rows and partitions of tables with extract(month from col1) ------------------------------------------------------------------------- -delete from t1 where col1='2006-01-17'; -delete from t2 where col1='2006-01-17'; -delete from t3 where col1='2006-01-17'; -delete from t4 where col1='2006-01-17'; -delete from t5 where col1='2006-01-17'; -delete from t6 where col1='2006-01-17'; +delete from t1 where col1='2006-02-17'; +delete from t2 where col1='2006-02-17'; +delete from t3 where col1='2006-02-17'; +delete from t4 where col1='2006-02-17'; +delete from t5 where col1='2006-02-17'; +delete from t6 where col1='2006-02-17'; select * from t1 order by col1; col1 2006-02-05 select * from t2 order by col1; col1 +2006-01-25 2006-02-05 -2006-02-25 select * from t3 order by col1; col1 +2006-01-25 2006-02-05 -2006-02-25 select * from t4 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 select * from t5 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-01-17'); -insert into t3 values ('2006-01-17'); -insert into t4 values (60,'2006-01-17'); -insert into t5 values (60,'2006-01-17'); -insert into t6 values (60,'2006-01-17'); +insert into t1 values ('2006-02-17'); +insert into t2 values ('2006-02-17'); +insert into t3 values ('2006-02-17'); +insert into t4 values (60,'2006-02-17'); +insert into t5 values (60,'2006-02-17'); +insert into t6 values (60,'2006-02-17'); select * from t1 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 select * from t2 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t3 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t4 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 select * from t5 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 select * from t6 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -5801,94 +5806,94 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -2006-01-17 -2006-02-05 select * from t2 order by col1; col1 -2006-01-17 -2006-02-05 -2006-02-25 select * from t3 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t4 order by colint; colint col1 -60 2006-01-17 +60 2006-02-17 select * from t5 order by colint; colint col1 -60 2006-01-17 +60 2006-02-17 select * from t6 order by colint; colint col1 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with dayofyear(col1) +--- Delete rows and partitions of tables with extract(month from col1) ------------------------------------------------------------------------- -delete from t11 where col1='2006-01-17'; -delete from t22 where col1='2006-01-17'; -delete from t33 where col1='2006-01-17'; -delete from t44 where col1='2006-01-17'; -delete from t55 where col1='2006-01-17'; -delete from t66 where col1='2006-01-17'; +delete from t11 where col1='2006-02-17'; +delete from t22 where col1='2006-02-17'; +delete from t33 where col1='2006-02-17'; +delete from t44 where col1='2006-02-17'; +delete from t55 where col1='2006-02-17'; +delete from t66 where col1='2006-02-17'; select * from t11 order by col1; col1 2006-02-05 select * from t22 order by col1; col1 +2006-01-25 2006-02-05 -2006-02-25 select * from t33 order by col1; col1 +2006-01-25 2006-02-05 -2006-02-25 select * from t44 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 select * from t55 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -insert into t11 values ('2006-01-17'); -insert into t22 values ('2006-01-17'); -insert into t33 values ('2006-01-17'); -insert into t44 values (60,'2006-01-17'); -insert into t55 values (60,'2006-01-17'); -insert into t66 values (60,'2006-01-17'); +insert into t11 values ('2006-02-17'); +insert into t22 values ('2006-02-17'); +insert into t33 values ('2006-02-17'); +insert into t44 values (60,'2006-02-17'); +insert into t55 values (60,'2006-02-17'); +insert into t66 values (60,'2006-02-17'); select * from t11 order by col1; col1 -2006-01-17 2006-02-05 +2006-02-17 select * from t22 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t33 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t44 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 select * from t55 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 select * from t66 order by colint; colint col1 1 2006-02-03 +2 2006-01-17 3 2006-01-25 4 2006-02-05 -60 2006-01-17 +60 2006-02-17 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -5896,24 +5901,19 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -2006-01-17 -2006-02-05 select * from t22 order by col1; col1 -2006-01-17 -2006-02-05 -2006-02-25 select * from t33 order by col1; col1 -2006-01-17 +2006-01-25 2006-02-05 -2006-02-25 +2006-02-17 select * from t44 order by colint; colint col1 -60 2006-01-17 +60 2006-02-17 select * from t55 order by colint; colint col1 -60 2006-01-17 +60 2006-02-17 select * from t66 order by colint; colint col1 ------------------------- @@ -5932,7 +5932,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- extract(month from col1) in partition with coltype date +--- hour(col1) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -5941,14 +5941,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with extract(month from col1) +--- Create tables with hour(col1) ------------------------------------------------------------------------- -create table t1 (col1 date) engine='MYISAM' -partition by range(extract(month from col1)) +create table t1 (col1 time) engine='MYISAM' +partition by range(hour(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 date) engine='MYISAM' -partition by list(extract(month from col1)) +create table t2 (col1 time) engine='MYISAM' +partition by list(hour(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5956,16 +5956,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 date) engine='MYISAM' -partition by hash(extract(month from col1)); -create table t4 (colint int, col1 date) engine='MYISAM' +create table t3 (col1 time) engine='MYISAM' +partition by hash(hour(col1)); +create table t4 (colint int, col1 time) engine='MYISAM' partition by range(colint) -subpartition by hash(extract(month from col1)) subpartitions 2 +subpartition by hash(hour(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='MYISAM' +create table t5 (colint int, col1 time) engine='MYISAM' partition by list(colint) -subpartition by hash(extract(month from col1)) subpartitions 2 +subpartition by hash(hour(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -5973,100 +5973,100 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 date) engine='MYISAM' +create table t6 (colint int, col1 time) engine='MYISAM' partition by range(colint) -(partition p0 values less than (extract(year from '1998-11-23')), +(partition p0 values less than (hour('18:30')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with extract(month from col1) +--- Access tables with hour(col1) ------------------------------------------------------------------------- -insert into t1 values ('2006-01-03'); -insert into t1 values ('2006-02-17'); -insert into t2 values ('2006-01-03'); -insert into t2 values ('2006-02-17'); -insert into t2 values ('2006-01-25'); -insert into t3 values ('2006-01-03'); -insert into t3 values ('2006-02-17'); -insert into t3 values ('2006-01-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select extract(month from col1) from t1 order by col1; -extract(month from col1) -1 -2 +insert into t1 values ('09:09'); +insert into t1 values ('14:30'); +insert into t2 values ('09:09'); +insert into t2 values ('14:30'); +insert into t2 values ('21:59'); +insert into t3 values ('09:09'); +insert into t3 values ('14:30'); +insert into t3 values ('21:59'); +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; +select hour(col1) from t1 order by col1; +hour(col1) +9 +14 select * from t1 order by col1; col1 -2006-01-03 -2006-02-17 +09:09:00 +14:30:00 select * from t2 order by col1; col1 -2006-01-03 -2006-01-25 -2006-02-17 +09:09:00 +14:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-03 -2006-01-25 -2006-02-17 +09:09:00 +14:30:00 +21:59:00 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-02-05' where col1='2006-01-03'; -update t2 set col1='2006-02-05' where col1='2006-01-03'; -update t3 set col1='2006-02-05' where col1='2006-01-03'; -update t4 set col1='2006-02-05' where col1='2006-01-03'; -update t5 set col1='2006-02-05' where col1='2006-01-03'; -update t6 set col1='2006-02-05' where col1='2006-01-03'; +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +update t1 set col1='10:30' where col1='09:09'; +update t2 set col1='10:30' where col1='09:09'; +update t3 set col1='10:30' where col1='09:09'; +update t4 set col1='10:30' where col1='09:09'; +update t5 set col1='10:30' where col1='09:09'; +update t6 set col1='10:30' where col1='09:09'; select * from t1 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 select * from t2 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with extract(month from col1) +--- Alter tables with hour(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -6081,11 +6081,11 @@ create table t44 engine='MYISAM' as select * from t4; create table t55 engine='MYISAM' as select * from t5; create table t66 engine='MYISAM' as select * from t6; alter table t11 -partition by range(extract(month from col1)) +partition by range(hour(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(extract(month from col1)) +partition by list(hour(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6094,15 +6094,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(extract(month from col1)); +partition by hash(hour(col1)); alter table t44 partition by range(colint) -subpartition by hash(extract(month from col1)) subpartitions 2 +subpartition by hash(hour(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(extract(month from col1)) subpartitions 2 +subpartition by hash(hour(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6112,40 +6112,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (extract(year from '1998-11-23')), +(partition p0 values less than (hour('18:30')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 select * from t22 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t33 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t44 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 --------------------------- ---- some alter table begin --------------------------- @@ -6154,19 +6154,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 alter table t55 partition by list(colint) -subpartition by hash(extract(month from col1)) subpartitions 5 +subpartition by hash(hour(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6178,10 +6178,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL + `col1` time DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (extract(month from col1)) +SUBPARTITION BY HASH (hour(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, @@ -6191,121 +6191,121 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (extract(year from '1998-11-23')), +(partition p0 values less than (hour('18:30')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (extract(year from '1998-11-23')), +(partition p0 values less than (hour('18:30')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with extract(month from col1) +--- Delete rows and partitions of tables with hour(col1) ------------------------------------------------------------------------- -delete from t1 where col1='2006-02-17'; -delete from t2 where col1='2006-02-17'; -delete from t3 where col1='2006-02-17'; -delete from t4 where col1='2006-02-17'; -delete from t5 where col1='2006-02-17'; -delete from t6 where col1='2006-02-17'; +delete from t1 where col1='14:30'; +delete from t2 where col1='14:30'; +delete from t3 where col1='14:30'; +delete from t4 where col1='14:30'; +delete from t5 where col1='14:30'; +delete from t6 where col1='14:30'; select * from t1 order by col1; col1 -2006-02-05 +10:30:00 select * from t2 order by col1; col1 -2006-01-25 -2006-02-05 +10:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-25 -2006-02-05 +10:30:00 +21:59:00 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-02-17'); -insert into t2 values ('2006-02-17'); -insert into t3 values ('2006-02-17'); -insert into t4 values (60,'2006-02-17'); -insert into t5 values (60,'2006-02-17'); -insert into t6 values (60,'2006-02-17'); +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +insert into t1 values ('14:30'); +insert into t2 values ('14:30'); +insert into t3 values ('14:30'); +insert into t4 values (60,'14:30'); +insert into t5 values (60,'14:30'); +insert into t6 values (60,'14:30'); select * from t1 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 select * from t2 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -6315,92 +6315,95 @@ select * from t1 order by col1; col1 select * from t2 order by col1; col1 +14:30:00 +21:59:00 select * from t3 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t4 order by colint; colint col1 -60 2006-02-17 +60 14:30:00 select * from t5 order by colint; colint col1 -60 2006-02-17 +60 14:30:00 select * from t6 order by colint; colint col1 +60 14:30:00 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with extract(month from col1) +--- Delete rows and partitions of tables with hour(col1) ------------------------------------------------------------------------- -delete from t11 where col1='2006-02-17'; -delete from t22 where col1='2006-02-17'; -delete from t33 where col1='2006-02-17'; -delete from t44 where col1='2006-02-17'; -delete from t55 where col1='2006-02-17'; -delete from t66 where col1='2006-02-17'; +delete from t11 where col1='14:30'; +delete from t22 where col1='14:30'; +delete from t33 where col1='14:30'; +delete from t44 where col1='14:30'; +delete from t55 where col1='14:30'; +delete from t66 where col1='14:30'; select * from t11 order by col1; col1 -2006-02-05 +10:30:00 select * from t22 order by col1; col1 -2006-01-25 -2006-02-05 +10:30:00 +21:59:00 select * from t33 order by col1; col1 -2006-01-25 -2006-02-05 +10:30:00 +21:59:00 select * from t44 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-02-17'); -insert into t22 values ('2006-02-17'); -insert into t33 values ('2006-02-17'); -insert into t44 values (60,'2006-02-17'); -insert into t55 values (60,'2006-02-17'); -insert into t66 values (60,'2006-02-17'); +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +insert into t11 values ('14:30'); +insert into t22 values ('14:30'); +insert into t33 values ('14:30'); +insert into t44 values (60,'14:30'); +insert into t55 values (60,'14:30'); +insert into t66 values (60,'14:30'); select * from t11 order by col1; col1 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 select * from t22 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t33 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t44 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-02-17 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:00 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -6410,19 +6413,22 @@ select * from t11 order by col1; col1 select * from t22 order by col1; col1 +14:30:00 +21:59:00 select * from t33 order by col1; col1 -2006-01-25 -2006-02-05 -2006-02-17 +10:30:00 +14:30:00 +21:59:00 select * from t44 order by colint; colint col1 -60 2006-02-17 +60 14:30:00 select * from t55 order by colint; colint col1 -60 2006-02-17 +60 14:30:00 select * from t66 order by colint; colint col1 +60 14:30:00 ------------------------- ---- some alter table end ------------------------- @@ -6439,7 +6445,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- hour(col1) in partition with coltype time +--- microsecond(col1) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -6448,14 +6454,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with hour(col1) +--- Create tables with microsecond(col1) ------------------------------------------------------------------------- create table t1 (col1 time) engine='MYISAM' -partition by range(hour(col1)) +partition by range(microsecond(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); create table t2 (col1 time) engine='MYISAM' -partition by list(hour(col1)) +partition by list(microsecond(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6464,15 +6470,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t3 (col1 time) engine='MYISAM' -partition by hash(hour(col1)); +partition by hash(microsecond(col1)); create table t4 (colint int, col1 time) engine='MYISAM' partition by range(colint) -subpartition by hash(hour(col1)) subpartitions 2 +subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); create table t5 (colint int, col1 time) engine='MYISAM' partition by list(colint) -subpartition by hash(hour(col1)) subpartitions 2 +subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6482,41 +6488,41 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t6 (colint int, col1 time) engine='MYISAM' partition by range(colint) -(partition p0 values less than (hour('18:30')), +(partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with hour(col1) +--- Access tables with microsecond(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09'); -insert into t1 values ('14:30'); -insert into t2 values ('09:09'); -insert into t2 values ('14:30'); -insert into t2 values ('21:59'); -insert into t3 values ('09:09'); -insert into t3 values ('14:30'); -insert into t3 values ('21:59'); +insert into t1 values ('09:09:15.000002'); +insert into t1 values ('04:30:01.000018'); +insert into t2 values ('09:09:15.000002'); +insert into t2 values ('04:30:01.000018'); +insert into t2 values ('00:59:22.000024'); +insert into t3 values ('09:09:15.000002'); +insert into t3 values ('04:30:01.000018'); +insert into t3 values ('00:59:22.000024'); load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select hour(col1) from t1 order by col1; -hour(col1) -9 -14 +select microsecond(col1) from t1 order by col1; +microsecond(col1) +0 +0 select * from t1 order by col1; col1 -09:09:00 -14:30:00 +04:30:01 +09:09:15 select * from t2 order by col1; col1 -09:09:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +09:09:15 select * from t3 order by col1; col1 -09:09:00 -14:30:00 -21:59:00 -select * from t4 order by colint; +00:59:22 +04:30:01 +09:09:15 +select * from t4 order by colint; colint col1 1 09:09:15 2 04:30:01 @@ -6534,46 +6540,46 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 -update t1 set col1='10:30' where col1='09:09'; -update t2 set col1='10:30' where col1='09:09'; -update t3 set col1='10:30' where col1='09:09'; -update t4 set col1='10:30' where col1='09:09'; -update t5 set col1='10:30' where col1='09:09'; -update t6 set col1='10:30' where col1='09:09'; +update t1 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t2 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t3 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t4 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t5 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t6 set col1='05:30:34.000037' where col1='09:09:15.000002'; select * from t1 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 select * from t2 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t3 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t4 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 select * from t6 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with hour(col1) +--- Alter tables with microsecond(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -6588,11 +6594,11 @@ create table t44 engine='MYISAM' as select * from t4; create table t55 engine='MYISAM' as select * from t5; create table t66 engine='MYISAM' as select * from t6; alter table t11 -partition by range(hour(col1)) +partition by range(microsecond(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(hour(col1)) +partition by list(microsecond(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6601,15 +6607,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(hour(col1)); +partition by hash(microsecond(col1)); alter table t44 partition by range(colint) -subpartition by hash(hour(col1)) subpartitions 2 +subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(hour(col1)) subpartitions 2 +subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6619,37 +6625,37 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (hour('18:30')), +(partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 select * from t22 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t33 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t44 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 @@ -6661,19 +6667,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 alter table t55 partition by list(colint) -subpartition by hash(hour(col1)) subpartitions 5 +subpartition by hash(microsecond(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6688,7 +6694,7 @@ t55 CREATE TABLE `t55` ( `col1` time DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (hour(col1)) +SUBPARTITION BY HASH (microsecond(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, @@ -6698,7 +6704,7 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ select * from t55 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 @@ -6707,17 +6713,17 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (hour('18:30')), +(partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 @@ -6726,93 +6732,88 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (hour('18:30')), +(partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 +1 05:30:34 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with hour(col1) +--- Delete rows and partitions of tables with microsecond(col1) ------------------------------------------------------------------------- -delete from t1 where col1='14:30'; -delete from t2 where col1='14:30'; -delete from t3 where col1='14:30'; -delete from t4 where col1='14:30'; -delete from t5 where col1='14:30'; -delete from t6 where col1='14:30'; +delete from t1 where col1='04:30:01.000018'; +delete from t2 where col1='04:30:01.000018'; +delete from t3 where col1='04:30:01.000018'; +delete from t4 where col1='04:30:01.000018'; +delete from t5 where col1='04:30:01.000018'; +delete from t6 where col1='04:30:01.000018'; select * from t1 order by col1; col1 -10:30:00 +05:30:34 select * from t2 order by col1; col1 -10:30:00 -21:59:00 +00:59:22 +05:30:34 select * from t3 order by col1; col1 -10:30:00 -21:59:00 +00:59:22 +05:30:34 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -insert into t1 values ('14:30'); -insert into t2 values ('14:30'); -insert into t3 values ('14:30'); -insert into t4 values (60,'14:30'); -insert into t5 values (60,'14:30'); -insert into t6 values (60,'14:30'); +insert into t1 values ('04:30:01.000018'); +insert into t2 values ('04:30:01.000018'); +insert into t3 values ('04:30:01.000018'); +insert into t4 values (60,'04:30:01.000018'); +insert into t5 values (60,'04:30:01.000018'); +insert into t6 values (60,'04:30:01.000018'); select * from t1 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 select * from t2 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t3 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -6822,95 +6823,88 @@ select * from t1 order by col1; col1 select * from t2 order by col1; col1 -14:30:00 -21:59:00 select * from t3 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t4 order by colint; colint col1 -60 14:30:00 +60 04:30:01 select * from t5 order by colint; colint col1 -60 14:30:00 +60 04:30:01 select * from t6 order by colint; colint col1 -60 14:30:00 +60 04:30:01 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with hour(col1) +--- Delete rows and partitions of tables with microsecond(col1) ------------------------------------------------------------------------- -delete from t11 where col1='14:30'; -delete from t22 where col1='14:30'; -delete from t33 where col1='14:30'; -delete from t44 where col1='14:30'; -delete from t55 where col1='14:30'; -delete from t66 where col1='14:30'; +delete from t11 where col1='04:30:01.000018'; +delete from t22 where col1='04:30:01.000018'; +delete from t33 where col1='04:30:01.000018'; +delete from t44 where col1='04:30:01.000018'; +delete from t55 where col1='04:30:01.000018'; +delete from t66 where col1='04:30:01.000018'; select * from t11 order by col1; col1 -10:30:00 +05:30:34 select * from t22 order by col1; col1 -10:30:00 -21:59:00 +00:59:22 +05:30:34 select * from t33 order by col1; col1 -10:30:00 -21:59:00 +00:59:22 +05:30:34 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -insert into t11 values ('14:30'); -insert into t22 values ('14:30'); -insert into t33 values ('14:30'); -insert into t44 values (60,'14:30'); -insert into t55 values (60,'14:30'); -insert into t66 values (60,'14:30'); +insert into t11 values ('04:30:01.000018'); +insert into t22 values ('04:30:01.000018'); +insert into t33 values ('04:30:01.000018'); +insert into t44 values (60,'04:30:01.000018'); +insert into t55 values (60,'04:30:01.000018'); +insert into t66 values (60,'04:30:01.000018'); select * from t11 order by col1; col1 -10:30:00 -14:30:00 +04:30:01 +05:30:34 select * from t22 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t33 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 +1 05:30:34 3 00:59:22 4 05:30:34 -60 14:30:00 +60 04:30:01 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -6920,22 +6914,20 @@ select * from t11 order by col1; col1 select * from t22 order by col1; col1 -14:30:00 -21:59:00 select * from t33 order by col1; col1 -10:30:00 -14:30:00 -21:59:00 +00:59:22 +04:30:01 +05:30:34 select * from t44 order by colint; colint col1 -60 14:30:00 +60 04:30:01 select * from t55 order by colint; colint col1 -60 14:30:00 +60 04:30:01 select * from t66 order by colint; colint col1 -60 14:30:00 +60 04:30:01 ------------------------- ---- some alter table end ------------------------- @@ -6952,7 +6944,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- microsecond(col1) in partition with coltype time +--- minute(col1) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -6961,14 +6953,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with microsecond(col1) +--- Create tables with minute(col1) ------------------------------------------------------------------------- create table t1 (col1 time) engine='MYISAM' -partition by range(microsecond(col1)) +partition by range(minute(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); create table t2 (col1 time) engine='MYISAM' -partition by list(microsecond(col1)) +partition by list(minute(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6977,15 +6969,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t3 (col1 time) engine='MYISAM' -partition by hash(microsecond(col1)); +partition by hash(minute(col1)); create table t4 (colint int, col1 time) engine='MYISAM' partition by range(colint) -subpartition by hash(microsecond(col1)) subpartitions 2 +subpartition by hash(minute(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); create table t5 (colint int, col1 time) engine='MYISAM' partition by list(colint) -subpartition by hash(microsecond(col1)) subpartitions 2 +subpartition by hash(minute(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -6995,40 +6987,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t6 (colint int, col1 time) engine='MYISAM' partition by range(colint) -(partition p0 values less than (microsecond('10:30:10.000010')), +(partition p0 values less than (minute('18:30')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with microsecond(col1) +--- Access tables with minute(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09:15.000002'); -insert into t1 values ('04:30:01.000018'); -insert into t2 values ('09:09:15.000002'); -insert into t2 values ('04:30:01.000018'); -insert into t2 values ('00:59:22.000024'); -insert into t3 values ('09:09:15.000002'); -insert into t3 values ('04:30:01.000018'); -insert into t3 values ('00:59:22.000024'); +insert into t1 values ('09:09:15'); +insert into t1 values ('14:30:45'); +insert into t2 values ('09:09:15'); +insert into t2 values ('14:30:45'); +insert into t2 values ('21:59:22'); +insert into t3 values ('09:09:15'); +insert into t3 values ('14:30:45'); +insert into t3 values ('21:59:22'); load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select microsecond(col1) from t1 order by col1; -microsecond(col1) -0 -0 +select minute(col1) from t1 order by col1; +minute(col1) +9 +30 select * from t1 order by col1; col1 -04:30:01 09:09:15 +14:30:45 select * from t2 order by col1; col1 -00:59:22 -04:30:01 09:09:15 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -04:30:01 09:09:15 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 1 09:09:15 @@ -7047,46 +7039,46 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 -update t1 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t2 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t3 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t4 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t5 set col1='05:30:34.000037' where col1='09:09:15.000002'; -update t6 set col1='05:30:34.000037' where col1='09:09:15.000002'; +update t1 set col1='10:24:23' where col1='09:09:15'; +update t2 set col1='10:24:23' where col1='09:09:15'; +update t3 set col1='10:24:23' where col1='09:09:15'; +update t4 set col1='10:24:23' where col1='09:09:15'; +update t5 set col1='10:24:23' where col1='09:09:15'; +update t6 set col1='10:24:23' where col1='09:09:15'; select * from t1 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 select * from t6 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with microsecond(col1) +--- Alter tables with minute(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -7101,11 +7093,11 @@ create table t44 engine='MYISAM' as select * from t4; create table t55 engine='MYISAM' as select * from t5; create table t66 engine='MYISAM' as select * from t6; alter table t11 -partition by range(microsecond(col1)) +partition by range(minute(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(microsecond(col1)) +partition by list(minute(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7114,15 +7106,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(microsecond(col1)); +partition by hash(minute(col1)); alter table t44 partition by range(colint) -subpartition by hash(microsecond(col1)) subpartitions 2 +subpartition by hash(minute(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(microsecond(col1)) subpartitions 2 +subpartition by hash(minute(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7132,37 +7124,37 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (microsecond('10:30:10.000010')), +(partition p0 values less than (minute('18:30')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 select * from t22 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7174,19 +7166,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 alter table t55 partition by list(colint) -subpartition by hash(microsecond(col1)) subpartitions 5 +subpartition by hash(minute(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7201,7 +7193,7 @@ t55 CREATE TABLE `t55` ( `col1` time DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (microsecond(col1)) +SUBPARTITION BY HASH (minute(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, @@ -7211,7 +7203,7 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ select * from t55 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7220,17 +7212,17 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (microsecond('10:30:10.000010')), +(partition p0 values less than (minute('18:30')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7239,88 +7231,93 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (microsecond('10:30:10.000010')), +(partition p0 values less than (minute('18:30')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with microsecond(col1) +--- Delete rows and partitions of tables with minute(col1) ------------------------------------------------------------------------- -delete from t1 where col1='04:30:01.000018'; -delete from t2 where col1='04:30:01.000018'; -delete from t3 where col1='04:30:01.000018'; -delete from t4 where col1='04:30:01.000018'; -delete from t5 where col1='04:30:01.000018'; -delete from t6 where col1='04:30:01.000018'; +delete from t1 where col1='14:30:45'; +delete from t2 where col1='14:30:45'; +delete from t3 where col1='14:30:45'; +delete from t4 where col1='14:30:45'; +delete from t5 where col1='14:30:45'; +delete from t6 where col1='14:30:45'; select * from t1 order by col1; col1 -05:30:34 +10:24:23 select * from t2 order by col1; col1 -00:59:22 -05:30:34 +10:24:23 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -05:30:34 +10:24:23 +21:59:22 select * from t4 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -insert into t1 values ('04:30:01.000018'); -insert into t2 values ('04:30:01.000018'); -insert into t3 values ('04:30:01.000018'); -insert into t4 values (60,'04:30:01.000018'); -insert into t5 values (60,'04:30:01.000018'); -insert into t6 values (60,'04:30:01.000018'); +insert into t1 values ('14:30:45'); +insert into t2 values ('14:30:45'); +insert into t3 values ('14:30:45'); +insert into t4 values (60,'14:30:45'); +insert into t5 values (60,'14:30:45'); +insert into t6 values (60,'14:30:45'); select * from t1 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 select * from t5 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 select * from t6 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -7328,90 +7325,100 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 +10:24:23 +14:30:45 select * from t2 order by col1; col1 +10:24:23 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -60 04:30:01 +60 14:30:45 select * from t5 order by colint; colint col1 -60 04:30:01 +60 14:30:45 select * from t6 order by colint; colint col1 -60 04:30:01 +60 14:30:45 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with microsecond(col1) +--- Delete rows and partitions of tables with minute(col1) ------------------------------------------------------------------------- -delete from t11 where col1='04:30:01.000018'; -delete from t22 where col1='04:30:01.000018'; -delete from t33 where col1='04:30:01.000018'; -delete from t44 where col1='04:30:01.000018'; -delete from t55 where col1='04:30:01.000018'; -delete from t66 where col1='04:30:01.000018'; +delete from t11 where col1='14:30:45'; +delete from t22 where col1='14:30:45'; +delete from t33 where col1='14:30:45'; +delete from t44 where col1='14:30:45'; +delete from t55 where col1='14:30:45'; +delete from t66 where col1='14:30:45'; select * from t11 order by col1; col1 -05:30:34 +10:24:23 select * from t22 order by col1; col1 -00:59:22 -05:30:34 +10:24:23 +21:59:22 select * from t33 order by col1; col1 -00:59:22 -05:30:34 +10:24:23 +21:59:22 select * from t44 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -insert into t11 values ('04:30:01.000018'); -insert into t22 values ('04:30:01.000018'); -insert into t33 values ('04:30:01.000018'); -insert into t44 values (60,'04:30:01.000018'); -insert into t55 values (60,'04:30:01.000018'); -insert into t66 values (60,'04:30:01.000018'); +insert into t11 values ('14:30:45'); +insert into t22 values ('14:30:45'); +insert into t33 values ('14:30:45'); +insert into t44 values (60,'14:30:45'); +insert into t55 values (60,'14:30:45'); +insert into t66 values (60,'14:30:45'); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +10:24:23 +14:30:45 select * from t22 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 select * from t55 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 select * from t66 order by colint; colint col1 -1 05:30:34 +1 10:24:23 +2 04:30:01 3 00:59:22 4 05:30:34 -60 04:30:01 +60 14:30:45 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -7419,22 +7426,27 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 +10:24:23 +14:30:45 select * from t22 order by col1; col1 +10:24:23 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +10:24:23 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -60 04:30:01 +60 14:30:45 select * from t55 order by colint; colint col1 -60 04:30:01 +60 14:30:45 select * from t66 order by colint; colint col1 -60 04:30:01 +60 14:30:45 ------------------------- ---- some alter table end ------------------------- @@ -7451,7 +7463,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- minute(col1) in partition with coltype time +--- second(col1) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -7460,14 +7472,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with minute(col1) +--- Create tables with second(col1) ------------------------------------------------------------------------- create table t1 (col1 time) engine='MYISAM' -partition by range(minute(col1)) +partition by range(second(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); create table t2 (col1 time) engine='MYISAM' -partition by list(minute(col1)) +partition by list(second(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7476,15 +7488,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t3 (col1 time) engine='MYISAM' -partition by hash(minute(col1)); +partition by hash(second(col1)); create table t4 (colint int, col1 time) engine='MYISAM' partition by range(colint) -subpartition by hash(minute(col1)) subpartitions 2 +subpartition by hash(second(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); create table t5 (colint int, col1 time) engine='MYISAM' partition by list(colint) -subpartition by hash(minute(col1)) subpartitions 2 +subpartition by hash(second(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7494,39 +7506,39 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); create table t6 (colint int, col1 time) engine='MYISAM' partition by range(colint) -(partition p0 values less than (minute('18:30')), +(partition p0 values less than (second('18:30:14')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with minute(col1) +--- Access tables with second(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09:15'); -insert into t1 values ('14:30:45'); -insert into t2 values ('09:09:15'); -insert into t2 values ('14:30:45'); +insert into t1 values ('09:09:09'); +insert into t1 values ('14:30:20'); +insert into t2 values ('09:09:09'); +insert into t2 values ('14:30:20'); insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:15'); -insert into t3 values ('14:30:45'); +insert into t3 values ('09:09:09'); +insert into t3 values ('14:30:20'); insert into t3 values ('21:59:22'); load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select minute(col1) from t1 order by col1; -minute(col1) +select second(col1) from t1 order by col1; +second(col1) 9 -30 +20 select * from t1 order by col1; col1 -09:09:15 -14:30:45 +09:09:09 +14:30:20 select * from t2 order by col1; col1 -09:09:15 -14:30:45 +09:09:09 +14:30:20 21:59:22 select * from t3 order by col1; col1 -09:09:15 -14:30:45 +09:09:09 +14:30:20 21:59:22 select * from t4 order by colint; colint col1 @@ -7546,46 +7558,46 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 -update t1 set col1='10:24:23' where col1='09:09:15'; -update t2 set col1='10:24:23' where col1='09:09:15'; -update t3 set col1='10:24:23' where col1='09:09:15'; -update t4 set col1='10:24:23' where col1='09:09:15'; -update t5 set col1='10:24:23' where col1='09:09:15'; -update t6 set col1='10:24:23' where col1='09:09:15'; +update t1 set col1='10:22:33' where col1='09:09:09'; +update t2 set col1='10:22:33' where col1='09:09:09'; +update t3 set col1='10:22:33' where col1='09:09:09'; +update t4 set col1='10:22:33' where col1='09:09:09'; +update t5 set col1='10:22:33' where col1='09:09:09'; +update t6 set col1='10:22:33' where col1='09:09:09'; select * from t1 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t2 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t3 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t4 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t6 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with minute(col1) +--- Alter tables with second(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -7600,11 +7612,11 @@ create table t44 engine='MYISAM' as select * from t4; create table t55 engine='MYISAM' as select * from t5; create table t66 engine='MYISAM' as select * from t6; alter table t11 -partition by range(minute(col1)) +partition by range(second(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(minute(col1)) +partition by list(second(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7613,15 +7625,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(minute(col1)); +partition by hash(second(col1)); alter table t44 partition by range(colint) -subpartition by hash(minute(col1)) subpartitions 2 +subpartition by hash(second(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(minute(col1)) subpartitions 2 +subpartition by hash(second(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7631,37 +7643,37 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (minute('18:30')), +(partition p0 values less than (second('18:30:14')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t22 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t33 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t44 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7673,19 +7685,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 alter table t55 partition by list(colint) -subpartition by hash(minute(col1)) subpartitions 5 +subpartition by hash(second(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7700,7 +7712,7 @@ t55 CREATE TABLE `t55` ( `col1` time DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (minute(col1)) +SUBPARTITION BY HASH (second(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, @@ -7710,7 +7722,7 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ select * from t55 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7719,17 +7731,17 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (minute('18:30')), +(partition p0 values less than (second('18:30:14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 @@ -7738,93 +7750,93 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (minute('18:30')), +(partition p0 values less than (second('18:30:14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with minute(col1) +--- Delete rows and partitions of tables with second(col1) ------------------------------------------------------------------------- -delete from t1 where col1='14:30:45'; -delete from t2 where col1='14:30:45'; -delete from t3 where col1='14:30:45'; -delete from t4 where col1='14:30:45'; -delete from t5 where col1='14:30:45'; -delete from t6 where col1='14:30:45'; +delete from t1 where col1='14:30:20'; +delete from t2 where col1='14:30:20'; +delete from t3 where col1='14:30:20'; +delete from t4 where col1='14:30:20'; +delete from t5 where col1='14:30:20'; +delete from t6 where col1='14:30:20'; select * from t1 order by col1; col1 -10:24:23 +10:22:33 select * from t2 order by col1; col1 -10:24:23 +10:22:33 21:59:22 select * from t3 order by col1; col1 -10:24:23 +10:22:33 21:59:22 select * from t4 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t5 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -insert into t1 values ('14:30:45'); -insert into t2 values ('14:30:45'); -insert into t3 values ('14:30:45'); -insert into t4 values (60,'14:30:45'); -insert into t5 values (60,'14:30:45'); -insert into t6 values (60,'14:30:45'); +insert into t1 values ('14:30:20'); +insert into t2 values ('14:30:20'); +insert into t3 values ('14:30:20'); +insert into t4 values (60,'14:30:20'); +insert into t5 values (60,'14:30:20'); +insert into t6 values (60,'14:30:20'); select * from t1 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t2 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t3 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t4 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 select * from t5 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 select * from t6 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -7832,100 +7844,100 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t2 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t3 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t4 order by colint; colint col1 -60 14:30:45 +60 14:30:20 select * from t5 order by colint; colint col1 -60 14:30:45 +60 14:30:20 select * from t6 order by colint; colint col1 -60 14:30:45 +60 14:30:20 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with minute(col1) +--- Delete rows and partitions of tables with second(col1) ------------------------------------------------------------------------- -delete from t11 where col1='14:30:45'; -delete from t22 where col1='14:30:45'; -delete from t33 where col1='14:30:45'; -delete from t44 where col1='14:30:45'; -delete from t55 where col1='14:30:45'; -delete from t66 where col1='14:30:45'; +delete from t11 where col1='14:30:20'; +delete from t22 where col1='14:30:20'; +delete from t33 where col1='14:30:20'; +delete from t44 where col1='14:30:20'; +delete from t55 where col1='14:30:20'; +delete from t66 where col1='14:30:20'; select * from t11 order by col1; col1 -10:24:23 +10:22:33 select * from t22 order by col1; col1 -10:24:23 +10:22:33 21:59:22 select * from t33 order by col1; col1 -10:24:23 +10:22:33 21:59:22 select * from t44 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 select * from t55 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -insert into t11 values ('14:30:45'); -insert into t22 values ('14:30:45'); -insert into t33 values ('14:30:45'); -insert into t44 values (60,'14:30:45'); -insert into t55 values (60,'14:30:45'); -insert into t66 values (60,'14:30:45'); +insert into t11 values ('14:30:20'); +insert into t22 values ('14:30:20'); +insert into t33 values ('14:30:20'); +insert into t44 values (60,'14:30:20'); +insert into t55 values (60,'14:30:20'); +insert into t66 values (60,'14:30:20'); select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t22 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t33 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t44 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 select * from t55 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 select * from t66 order by colint; colint col1 -1 10:24:23 +1 09:09:15 2 04:30:01 3 00:59:22 4 05:30:34 -60 14:30:45 +60 14:30:20 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -7933,27 +7945,27 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 select * from t22 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t33 order by col1; col1 -10:24:23 -14:30:45 +10:22:33 +14:30:20 21:59:22 select * from t44 order by colint; colint col1 -60 14:30:45 +60 14:30:20 select * from t55 order by colint; colint col1 -60 14:30:45 +60 14:30:20 select * from t66 order by colint; colint col1 -60 14:30:45 +60 14:30:20 ------------------------- ---- some alter table end ------------------------- @@ -7970,7 +7982,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- second(col1) in partition with coltype time +--- month(col1) in partition with coltype date ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -7979,14 +7991,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with second(col1) +--- Create tables with month(col1) ------------------------------------------------------------------------- -create table t1 (col1 time) engine='MYISAM' -partition by range(second(col1)) +create table t1 (col1 date) engine='MYISAM' +partition by range(month(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 time) engine='MYISAM' -partition by list(second(col1)) +create table t2 (col1 date) engine='MYISAM' +partition by list(month(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -7994,16 +8006,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 time) engine='MYISAM' -partition by hash(second(col1)); -create table t4 (colint int, col1 time) engine='MYISAM' +create table t3 (col1 date) engine='MYISAM' +partition by hash(month(col1)); +create table t4 (colint int, col1 date) engine='MYISAM' partition by range(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(month(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 time) engine='MYISAM' +create table t5 (colint int, col1 date) engine='MYISAM' partition by list(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(month(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8011,100 +8023,100 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 time) engine='MYISAM' +create table t6 (colint int, col1 date) engine='MYISAM' partition by range(colint) -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (month('2006-10-14')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with second(col1) +--- Access tables with month(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09:09'); -insert into t1 values ('14:30:20'); -insert into t2 values ('09:09:09'); -insert into t2 values ('14:30:20'); -insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:09'); -insert into t3 values ('14:30:20'); -insert into t3 values ('21:59:22'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select second(col1) from t1 order by col1; -second(col1) -9 -20 +insert into t1 values ('2006-01-03'); +insert into t1 values ('2006-12-17'); +insert into t2 values ('2006-01-03'); +insert into t2 values ('2006-12-17'); +insert into t2 values ('2006-05-25'); +insert into t3 values ('2006-01-03'); +insert into t3 values ('2006-12-17'); +insert into t3 values ('2006-05-25'); +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; +select month(col1) from t1 order by col1; +month(col1) +1 +12 select * from t1 order by col1; col1 -09:09:09 -14:30:20 +2006-01-03 +2006-12-17 select * from t2 order by col1; col1 -09:09:09 -14:30:20 -21:59:22 +2006-01-03 +2006-05-25 +2006-12-17 select * from t3 order by col1; col1 -09:09:09 -14:30:20 -21:59:22 +2006-01-03 +2006-05-25 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -update t1 set col1='10:22:33' where col1='09:09:09'; -update t2 set col1='10:22:33' where col1='09:09:09'; -update t3 set col1='10:22:33' where col1='09:09:09'; -update t4 set col1='10:22:33' where col1='09:09:09'; -update t5 set col1='10:22:33' where col1='09:09:09'; -update t6 set col1='10:22:33' where col1='09:09:09'; +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +update t1 set col1='2006-11-06' where col1='2006-01-03'; +update t2 set col1='2006-11-06' where col1='2006-01-03'; +update t3 set col1='2006-11-06' where col1='2006-01-03'; +update t4 set col1='2006-11-06' where col1='2006-01-03'; +update t5 set col1='2006-11-06' where col1='2006-01-03'; +update t6 set col1='2006-11-06' where col1='2006-01-03'; select * from t1 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 ------------------------------------------------------------------------- ---- Alter tables with second(col1) +--- Alter tables with month(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -8119,11 +8131,11 @@ create table t44 engine='MYISAM' as select * from t4; create table t55 engine='MYISAM' as select * from t5; create table t66 engine='MYISAM' as select * from t6; alter table t11 -partition by range(second(col1)) +partition by range(month(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(second(col1)) +partition by list(month(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8132,15 +8144,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(second(col1)); +partition by hash(month(col1)); alter table t44 partition by range(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(month(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(month(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8150,40 +8162,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (month('2006-10-14')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 --------------------------- ---- some alter table begin --------------------------- @@ -8192,19 +8204,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 alter table t55 partition by list(colint) -subpartition by hash(second(col1)) subpartitions 5 +subpartition by hash(month(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8216,10 +8228,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` time DEFAULT NULL + `col1` date DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (second(col1)) +SUBPARTITION BY HASH (month(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, @@ -8229,121 +8241,121 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (month('2006-10-14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (month('2006-10-14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with second(col1) +--- Delete rows and partitions of tables with month(col1) ------------------------------------------------------------------------- -delete from t1 where col1='14:30:20'; -delete from t2 where col1='14:30:20'; -delete from t3 where col1='14:30:20'; -delete from t4 where col1='14:30:20'; -delete from t5 where col1='14:30:20'; -delete from t6 where col1='14:30:20'; +delete from t1 where col1='2006-12-17'; +delete from t2 where col1='2006-12-17'; +delete from t3 where col1='2006-12-17'; +delete from t4 where col1='2006-12-17'; +delete from t5 where col1='2006-12-17'; +delete from t6 where col1='2006-12-17'; select * from t1 order by col1; col1 -10:22:33 +2006-11-06 select * from t2 order by col1; col1 -10:22:33 -21:59:22 +2006-05-25 +2006-11-06 select * from t3 order by col1; col1 -10:22:33 -21:59:22 +2006-05-25 +2006-11-06 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t1 values ('14:30:20'); -insert into t2 values ('14:30:20'); -insert into t3 values ('14:30:20'); -insert into t4 values (60,'14:30:20'); -insert into t5 values (60,'14:30:20'); -insert into t6 values (60,'14:30:20'); +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +insert into t1 values ('2006-12-17'); +insert into t2 values ('2006-12-17'); +insert into t3 values ('2006-12-17'); +insert into t4 values (60,'2006-12-17'); +insert into t5 values (60,'2006-12-17'); +insert into t6 values (60,'2006-12-17'); select * from t1 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -8351,100 +8363,97 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -10:22:33 -14:30:20 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-11-06 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t4 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t5 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t6 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with second(col1) +--- Delete rows and partitions of tables with month(col1) ------------------------------------------------------------------------- -delete from t11 where col1='14:30:20'; -delete from t22 where col1='14:30:20'; -delete from t33 where col1='14:30:20'; -delete from t44 where col1='14:30:20'; -delete from t55 where col1='14:30:20'; -delete from t66 where col1='14:30:20'; +delete from t11 where col1='2006-12-17'; +delete from t22 where col1='2006-12-17'; +delete from t33 where col1='2006-12-17'; +delete from t44 where col1='2006-12-17'; +delete from t55 where col1='2006-12-17'; +delete from t66 where col1='2006-12-17'; select * from t11 order by col1; col1 -10:22:33 +2006-11-06 select * from t22 order by col1; col1 -10:22:33 -21:59:22 +2006-05-25 +2006-11-06 select * from t33 order by col1; col1 -10:22:33 -21:59:22 +2006-05-25 +2006-11-06 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t11 values ('14:30:20'); -insert into t22 values ('14:30:20'); -insert into t33 values ('14:30:20'); -insert into t44 values (60,'14:30:20'); -insert into t55 values (60,'14:30:20'); -insert into t66 values (60,'14:30:20'); +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +insert into t11 values ('2006-12-17'); +insert into t22 values ('2006-12-17'); +insert into t33 values ('2006-12-17'); +insert into t44 values (60,'2006-12-17'); +insert into t55 values (60,'2006-12-17'); +insert into t66 values (60,'2006-12-17'); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-11-06 +2006-12-17 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t44 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t55 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t66 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -8452,27 +8461,24 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -10:22:33 -14:30:20 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-11-06 +2006-12-17 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-05-25 +2006-11-06 +2006-12-17 select * from t44 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t55 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t66 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 ------------------------- ---- some alter table end ------------------------- @@ -8489,7 +8495,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- second(col1) in partition with coltype char(30) +--- quarter(col1) in partition with coltype date ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -8498,14 +8504,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with second(col1) +--- Create tables with quarter(col1) ------------------------------------------------------------------------- -create table t1 (col1 char(30)) engine='MYISAM' -partition by range(second(col1)) +create table t1 (col1 date) engine='MYISAM' +partition by range(quarter(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 char(30)) engine='MYISAM' -partition by list(second(col1)) +create table t2 (col1 date) engine='MYISAM' +partition by list(quarter(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8513,16 +8519,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 char(30)) engine='MYISAM' -partition by hash(second(col1)); -create table t4 (colint int, col1 char(30)) engine='MYISAM' +create table t3 (col1 date) engine='MYISAM' +partition by hash(quarter(col1)); +create table t4 (colint int, col1 date) engine='MYISAM' partition by range(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(quarter(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 char(30)) engine='MYISAM' +create table t5 (colint int, col1 date) engine='MYISAM' partition by list(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(quarter(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8530,100 +8536,100 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 char(30)) engine='MYISAM' +create table t6 (colint int, col1 date) engine='MYISAM' partition by range(colint) -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (quarter('2006-10-14')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with second(col1) +--- Access tables with quarter(col1) ------------------------------------------------------------------------- -insert into t1 values ('09:09:09'); -insert into t1 values ('14:30:20'); -insert into t2 values ('09:09:09'); -insert into t2 values ('14:30:20'); -insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:09'); -insert into t3 values ('14:30:20'); -insert into t3 values ('21:59:22'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select second(col1) from t1 order by col1; -second(col1) -9 -20 +insert into t1 values ('2006-01-03'); +insert into t1 values ('2006-12-17'); +insert into t2 values ('2006-01-03'); +insert into t2 values ('2006-12-17'); +insert into t2 values ('2006-09-25'); +insert into t3 values ('2006-01-03'); +insert into t3 values ('2006-12-17'); +insert into t3 values ('2006-09-25'); +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; +select quarter(col1) from t1 order by col1; +quarter(col1) +1 +4 select * from t1 order by col1; col1 -09:09:09 -14:30:20 +2006-01-03 +2006-12-17 select * from t2 order by col1; col1 -09:09:09 -14:30:20 -21:59:22 +2006-01-03 +2006-09-25 +2006-12-17 select * from t3 order by col1; col1 -09:09:09 -14:30:20 -21:59:22 +2006-01-03 +2006-09-25 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t6 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -update t1 set col1='10:22:33' where col1='09:09:09'; -update t2 set col1='10:22:33' where col1='09:09:09'; -update t3 set col1='10:22:33' where col1='09:09:09'; -update t4 set col1='10:22:33' where col1='09:09:09'; -update t5 set col1='10:22:33' where col1='09:09:09'; -update t6 set col1='10:22:33' where col1='09:09:09'; +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +update t1 set col1='2006-07-30' where col1='2006-01-03'; +update t2 set col1='2006-07-30' where col1='2006-01-03'; +update t3 set col1='2006-07-30' where col1='2006-01-03'; +update t4 set col1='2006-07-30' where col1='2006-01-03'; +update t5 set col1='2006-07-30' where col1='2006-01-03'; +update t6 set col1='2006-07-30' where col1='2006-01-03'; select * from t1 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t6 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 ------------------------------------------------------------------------- ---- Alter tables with second(col1) +--- Alter tables with quarter(col1) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -8638,11 +8644,11 @@ create table t44 engine='MYISAM' as select * from t4; create table t55 engine='MYISAM' as select * from t5; create table t66 engine='MYISAM' as select * from t6; alter table t11 -partition by range(second(col1)) +partition by range(quarter(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(second(col1)) +partition by list(quarter(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8651,15 +8657,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(second(col1)); +partition by hash(quarter(col1)); alter table t44 partition by range(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(quarter(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(second(col1)) subpartitions 2 +subpartition by hash(quarter(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8669,40 +8675,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (quarter('2006-10-14')), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 -select * from t33 order by col1; -col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 +select * from t33 order by col1; +col1 +2006-07-30 +2006-09-25 +2006-12-17 select * from t44 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t55 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 --------------------------- ---- some alter table begin --------------------------- @@ -8711,19 +8717,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 alter table t55 partition by list(colint) -subpartition by hash(second(col1)) subpartitions 5 +subpartition by hash(quarter(col1)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -8735,10 +8741,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` char(30) DEFAULT NULL + `col1` date DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (second(col1)) +SUBPARTITION BY HASH (quarter(col1)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, @@ -8748,121 +8754,121 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ select * from t55 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (quarter('2006-10-14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 alter table t66 reorganize partition s1 into -(partition p0 values less than (second('18:30:14')), +(partition p0 values less than (quarter('2006-10-14')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with second(col1) +--- Delete rows and partitions of tables with quarter(col1) ------------------------------------------------------------------------- -delete from t1 where col1='14:30:20'; -delete from t2 where col1='14:30:20'; -delete from t3 where col1='14:30:20'; -delete from t4 where col1='14:30:20'; -delete from t5 where col1='14:30:20'; -delete from t6 where col1='14:30:20'; +delete from t1 where col1='2006-12-17'; +delete from t2 where col1='2006-12-17'; +delete from t3 where col1='2006-12-17'; +delete from t4 where col1='2006-12-17'; +delete from t5 where col1='2006-12-17'; +delete from t6 where col1='2006-12-17'; select * from t1 order by col1; col1 -10:22:33 +2006-07-30 select * from t2 order by col1; col1 -10:22:33 -21:59:22 +2006-07-30 +2006-09-25 select * from t3 order by col1; col1 -10:22:33 -21:59:22 +2006-07-30 +2006-09-25 select * from t4 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t5 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -insert into t1 values ('14:30:20'); -insert into t2 values ('14:30:20'); -insert into t3 values ('14:30:20'); -insert into t4 values (60,'14:30:20'); -insert into t5 values (60,'14:30:20'); -insert into t6 values (60,'14:30:20'); +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +insert into t1 values ('2006-12-17'); +insert into t2 values ('2006-12-17'); +insert into t3 values ('2006-12-17'); +insert into t4 values (60,'2006-12-17'); +insert into t5 values (60,'2006-12-17'); +insert into t6 values (60,'2006-12-17'); select * from t1 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t4 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t5 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t6 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -8870,100 +8876,96 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -10:22:33 -14:30:20 select * from t2 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 select * from t3 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t4 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t5 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t6 order by colint; colint col1 -60 14:30:20 +4 2006-02-05 +60 2006-12-17 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with second(col1) +--- Delete rows and partitions of tables with quarter(col1) ------------------------------------------------------------------------- -delete from t11 where col1='14:30:20'; -delete from t22 where col1='14:30:20'; -delete from t33 where col1='14:30:20'; -delete from t44 where col1='14:30:20'; -delete from t55 where col1='14:30:20'; -delete from t66 where col1='14:30:20'; +delete from t11 where col1='2006-12-17'; +delete from t22 where col1='2006-12-17'; +delete from t33 where col1='2006-12-17'; +delete from t44 where col1='2006-12-17'; +delete from t55 where col1='2006-12-17'; +delete from t66 where col1='2006-12-17'; select * from t11 order by col1; col1 -10:22:33 +2006-07-30 select * from t22 order by col1; col1 -10:22:33 -21:59:22 +2006-07-30 +2006-09-25 select * from t33 order by col1; col1 -10:22:33 -21:59:22 +2006-07-30 +2006-09-25 select * from t44 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 select * from t55 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -insert into t11 values ('14:30:20'); -insert into t22 values ('14:30:20'); -insert into t33 values ('14:30:20'); -insert into t44 values (60,'14:30:20'); -insert into t55 values (60,'14:30:20'); -insert into t66 values (60,'14:30:20'); +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +insert into t11 values ('2006-12-17'); +insert into t22 values ('2006-12-17'); +insert into t33 values ('2006-12-17'); +insert into t44 values (60,'2006-12-17'); +insert into t55 values (60,'2006-12-17'); +insert into t66 values (60,'2006-12-17'); select * from t11 order by col1; col1 -10:22:33 -14:30:20 +2006-07-30 +2006-12-17 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t44 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t55 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 select * from t66 order by colint; colint col1 -1 09:09:15.000002 -2 04:30:01.000018 -3 00:59:22.000024 -4 05:30:34.000037 -60 14:30:20 +1 2006-02-03 +2 2006-01-17 +3 2006-01-25 +4 2006-02-05 +60 2006-12-17 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -8971,27 +8973,23 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -10:22:33 -14:30:20 select * from t22 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 select * from t33 order by col1; col1 -10:22:33 -14:30:20 -21:59:22 +2006-07-30 +2006-09-25 +2006-12-17 select * from t44 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t55 order by colint; colint col1 -60 14:30:20 +60 2006-12-17 select * from t66 order by colint; colint col1 -60 14:30:20 +4 2006-02-05 +60 2006-12-17 ------------------------- ---- some alter table end ------------------------- @@ -9008,7 +9006,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- month(col1) in partition with coltype date +--- time_to_sec(col1)-(time_to_sec(col1)-20) in partition with coltype time ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -9017,14 +9015,14 @@ drop table if exists t4 ; drop table if exists t5 ; drop table if exists t6 ; ------------------------------------------------------------------------- ---- Create tables with month(col1) +--- Create tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- -create table t1 (col1 date) engine='MYISAM' -partition by range(month(col1)) +create table t1 (col1 time) engine='MYISAM' +partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 date) engine='MYISAM' -partition by list(month(col1)) +create table t2 (col1 time) engine='MYISAM' +partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9032,16 +9030,16 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 date) engine='MYISAM' -partition by hash(month(col1)); -create table t4 (colint int, col1 date) engine='MYISAM' +create table t3 (col1 time) engine='MYISAM' +partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); +create table t4 (colint int, col1 time) engine='MYISAM' partition by range(colint) -subpartition by hash(month(col1)) subpartitions 2 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='MYISAM' +create table t5 (colint int, col1 time) engine='MYISAM' partition by list(colint) -subpartition by hash(month(col1)) subpartitions 2 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9049,100 +9047,100 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 date) engine='MYISAM' +create table t6 (colint int, col1 time) engine='MYISAM' partition by range(colint) -(partition p0 values less than (month('2006-10-14')), +(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), partition p1 values less than maxvalue); ------------------------------------------------------------------------- ---- Access tables with month(col1) +--- Access tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- -insert into t1 values ('2006-01-03'); -insert into t1 values ('2006-12-17'); -insert into t2 values ('2006-01-03'); -insert into t2 values ('2006-12-17'); -insert into t2 values ('2006-05-25'); -insert into t3 values ('2006-01-03'); -insert into t3 values ('2006-12-17'); -insert into t3 values ('2006-05-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select month(col1) from t1 order by col1; -month(col1) -1 -12 +insert into t1 values ('09:09:15'); +insert into t1 values ('14:30:45'); +insert into t2 values ('09:09:15'); +insert into t2 values ('14:30:45'); +insert into t2 values ('21:59:22'); +insert into t3 values ('09:09:15'); +insert into t3 values ('14:30:45'); +insert into t3 values ('21:59:22'); +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; +load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; +select time_to_sec(col1)-(time_to_sec(col1)-20) from t1 order by col1; +time_to_sec(col1)-(time_to_sec(col1)-20) +20 +20 select * from t1 order by col1; col1 -2006-01-03 -2006-12-17 +09:09:15 +14:30:45 select * from t2 order by col1; col1 -2006-01-03 -2006-05-25 -2006-12-17 +09:09:15 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -2006-01-03 -2006-05-25 -2006-12-17 +09:09:15 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-11-06' where col1='2006-01-03'; -update t2 set col1='2006-11-06' where col1='2006-01-03'; -update t3 set col1='2006-11-06' where col1='2006-01-03'; -update t4 set col1='2006-11-06' where col1='2006-01-03'; -update t5 set col1='2006-11-06' where col1='2006-01-03'; -update t6 set col1='2006-11-06' where col1='2006-01-03'; +1 09:09:15 +2 04:30:01 +3 00:59:22 +4 05:30:34 +update t1 set col1='10:33:11' where col1='09:09:15'; +update t2 set col1='10:33:11' where col1='09:09:15'; +update t3 set col1='10:33:11' where col1='09:09:15'; +update t4 set col1='10:33:11' where col1='09:09:15'; +update t5 set col1='10:33:11' where col1='09:09:15'; +update t6 set col1='10:33:11' where col1='09:09:15'; select * from t1 order by col1; col1 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 select * from t2 order by col1; col1 -2006-05-25 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -2006-05-25 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t6 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 ------------------------------------------------------------------------- ---- Alter tables with month(col1) +--- Alter tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- drop table if exists t11 ; drop table if exists t22 ; @@ -9157,11 +9155,11 @@ create table t44 engine='MYISAM' as select * from t4; create table t55 engine='MYISAM' as select * from t5; create table t66 engine='MYISAM' as select * from t6; alter table t11 -partition by range(month(col1)) +partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t22 -partition by list(month(col1)) +partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9170,15 +9168,15 @@ partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t33 -partition by hash(month(col1)); +partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); alter table t44 partition by range(colint) -subpartition by hash(month(col1)) subpartitions 2 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); alter table t55 partition by list(colint) -subpartition by hash(month(col1)) subpartitions 2 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9188,40 +9186,40 @@ partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); alter table t66 partition by range(colint) -(partition p0 values less than (month('2006-10-14')), +(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 select * from t22 order by col1; col1 -2006-05-25 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -2006-05-25 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 --------------------------- ---- some alter table begin --------------------------- @@ -9230,19 +9228,19 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -2006-11-06 -2006-12-17 +10:33:11 +14:30:45 alter table t55 partition by list(colint) -subpartition by hash(month(col1)) subpartitions 5 +subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 5 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), partition p2 values in (21,22,23,24,25,26,27,28,29,30), @@ -9254,10 +9252,10 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL + `col1` time DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (month(col1)) +SUBPARTITION BY HASH (time_to_sec(col1)-(time_to_sec(col1)-20)) SUBPARTITIONS 5 (PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, @@ -9267,2166 +9265,121 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ select * from t55 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (month('2006-10-14')), +(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 alter table t66 reorganize partition s1 into -(partition p0 values less than (month('2006-10-14')), +(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with month(col1) -------------------------------------------------------------------------- -delete from t1 where col1='2006-12-17'; -delete from t2 where col1='2006-12-17'; -delete from t3 where col1='2006-12-17'; -delete from t4 where col1='2006-12-17'; -delete from t5 where col1='2006-12-17'; -delete from t6 where col1='2006-12-17'; -select * from t1 order by col1; -col1 -2006-11-06 -select * from t2 order by col1; -col1 -2006-05-25 -2006-11-06 -select * from t3 order by col1; -col1 -2006-05-25 -2006-11-06 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-12-17'); -insert into t2 values ('2006-12-17'); -insert into t3 values ('2006-12-17'); -insert into t4 values (60,'2006-12-17'); -insert into t5 values (60,'2006-12-17'); -insert into t6 values (60,'2006-12-17'); -select * from t1 order by col1; -col1 -2006-11-06 -2006-12-17 -select * from t2 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t3 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -select * from t2 order by col1; -col1 -2006-11-06 -2006-12-17 -select * from t3 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t4 order by colint; -colint col1 -60 2006-12-17 -select * from t5 order by colint; -colint col1 -60 2006-12-17 -select * from t6 order by colint; -colint col1 -60 2006-12-17 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with month(col1) -------------------------------------------------------------------------- -delete from t11 where col1='2006-12-17'; -delete from t22 where col1='2006-12-17'; -delete from t33 where col1='2006-12-17'; -delete from t44 where col1='2006-12-17'; -delete from t55 where col1='2006-12-17'; -delete from t66 where col1='2006-12-17'; -select * from t11 order by col1; -col1 -2006-11-06 -select * from t22 order by col1; -col1 -2006-05-25 -2006-11-06 -select * from t33 order by col1; -col1 -2006-05-25 -2006-11-06 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-12-17'); -insert into t22 values ('2006-12-17'); -insert into t33 values ('2006-12-17'); -insert into t44 values (60,'2006-12-17'); -insert into t55 values (60,'2006-12-17'); -insert into t66 values (60,'2006-12-17'); -select * from t11 order by col1; -col1 -2006-11-06 -2006-12-17 -select * from t22 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t33 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -select * from t22 order by col1; -col1 -2006-11-06 -2006-12-17 -select * from t33 order by col1; -col1 -2006-05-25 -2006-11-06 -2006-12-17 -select * from t44 order by colint; -colint col1 -60 2006-12-17 -select * from t55 order by colint; -colint col1 -60 2006-12-17 -select * from t66 order by colint; -colint col1 -60 2006-12-17 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- ---- quarter(col1) in partition with coltype date -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with quarter(col1) -------------------------------------------------------------------------- -create table t1 (col1 date) engine='MYISAM' -partition by range(quarter(col1)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 date) engine='MYISAM' -partition by list(quarter(col1)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 date) engine='MYISAM' -partition by hash(quarter(col1)); -create table t4 (colint int, col1 date) engine='MYISAM' -partition by range(colint) -subpartition by hash(quarter(col1)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='MYISAM' -partition by list(colint) -subpartition by hash(quarter(col1)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 date) engine='MYISAM' -partition by range(colint) -(partition p0 values less than (quarter('2006-10-14')), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with quarter(col1) -------------------------------------------------------------------------- -insert into t1 values ('2006-01-03'); -insert into t1 values ('2006-12-17'); -insert into t2 values ('2006-01-03'); -insert into t2 values ('2006-12-17'); -insert into t2 values ('2006-09-25'); -insert into t3 values ('2006-01-03'); -insert into t3 values ('2006-12-17'); -insert into t3 values ('2006-09-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select quarter(col1) from t1 order by col1; -quarter(col1) -1 -4 -select * from t1 order by col1; -col1 -2006-01-03 -2006-12-17 -select * from t2 order by col1; -col1 -2006-01-03 -2006-09-25 -2006-12-17 -select * from t3 order by col1; -col1 -2006-01-03 -2006-09-25 -2006-12-17 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-07-30' where col1='2006-01-03'; -update t2 set col1='2006-07-30' where col1='2006-01-03'; -update t3 set col1='2006-07-30' where col1='2006-01-03'; -update t4 set col1='2006-07-30' where col1='2006-01-03'; -update t5 set col1='2006-07-30' where col1='2006-01-03'; -update t6 set col1='2006-07-30' where col1='2006-01-03'; -select * from t1 order by col1; -col1 -2006-07-30 -2006-12-17 -select * from t2 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t3 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Alter tables with quarter(col1) -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='MYISAM' as select * from t1; -create table t22 engine='MYISAM' as select * from t2; -create table t33 engine='MYISAM' as select * from t3; -create table t44 engine='MYISAM' as select * from t4; -create table t55 engine='MYISAM' as select * from t5; -create table t66 engine='MYISAM' as select * from t6; -alter table t11 -partition by range(quarter(col1)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(quarter(col1)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(quarter(col1)); -alter table t44 -partition by range(colint) -subpartition by hash(quarter(col1)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(quarter(col1)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (quarter('2006-10-14')), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-07-30 -2006-12-17 -select * from t22 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t33 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-07-30 -2006-12-17 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-07-30 -2006-12-17 -alter table t55 -partition by list(colint) -subpartition by hash(quarter(col1)) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (quarter(col1)) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = MyISAM, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = MyISAM, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = MyISAM, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (quarter('2006-10-14')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (quarter('2006-10-14')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with quarter(col1) -------------------------------------------------------------------------- -delete from t1 where col1='2006-12-17'; -delete from t2 where col1='2006-12-17'; -delete from t3 where col1='2006-12-17'; -delete from t4 where col1='2006-12-17'; -delete from t5 where col1='2006-12-17'; -delete from t6 where col1='2006-12-17'; -select * from t1 order by col1; -col1 -2006-07-30 -select * from t2 order by col1; -col1 -2006-07-30 -2006-09-25 -select * from t3 order by col1; -col1 -2006-07-30 -2006-09-25 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-12-17'); -insert into t2 values ('2006-12-17'); -insert into t3 values ('2006-12-17'); -insert into t4 values (60,'2006-12-17'); -insert into t5 values (60,'2006-12-17'); -insert into t6 values (60,'2006-12-17'); -select * from t1 order by col1; -col1 -2006-07-30 -2006-12-17 -select * from t2 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t3 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -select * from t2 order by col1; -col1 -select * from t3 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t4 order by colint; -colint col1 -60 2006-12-17 -select * from t5 order by colint; -colint col1 -60 2006-12-17 -select * from t6 order by colint; -colint col1 -4 2006-02-05 -60 2006-12-17 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with quarter(col1) -------------------------------------------------------------------------- -delete from t11 where col1='2006-12-17'; -delete from t22 where col1='2006-12-17'; -delete from t33 where col1='2006-12-17'; -delete from t44 where col1='2006-12-17'; -delete from t55 where col1='2006-12-17'; -delete from t66 where col1='2006-12-17'; -select * from t11 order by col1; -col1 -2006-07-30 -select * from t22 order by col1; -col1 -2006-07-30 -2006-09-25 -select * from t33 order by col1; -col1 -2006-07-30 -2006-09-25 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-12-17'); -insert into t22 values ('2006-12-17'); -insert into t33 values ('2006-12-17'); -insert into t44 values (60,'2006-12-17'); -insert into t55 values (60,'2006-12-17'); -insert into t66 values (60,'2006-12-17'); -select * from t11 order by col1; -col1 -2006-07-30 -2006-12-17 -select * from t22 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t33 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t44 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t55 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -select * from t66 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -60 2006-12-17 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -select * from t22 order by col1; -col1 -select * from t33 order by col1; -col1 -2006-07-30 -2006-09-25 -2006-12-17 -select * from t44 order by colint; -colint col1 -60 2006-12-17 -select * from t55 order by colint; -colint col1 -60 2006-12-17 -select * from t66 order by colint; -colint col1 -4 2006-02-05 -60 2006-12-17 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- ---- time_to_sec(col1)-(time_to_sec(col1)-20) in partition with coltype time -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -create table t1 (col1 time) engine='MYISAM' -partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 time) engine='MYISAM' -partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 time) engine='MYISAM' -partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); -create table t4 (colint int, col1 time) engine='MYISAM' -partition by range(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 time) engine='MYISAM' -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 time) engine='MYISAM' -partition by range(colint) -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -insert into t1 values ('09:09:15'); -insert into t1 values ('14:30:45'); -insert into t2 values ('09:09:15'); -insert into t2 values ('14:30:45'); -insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:15'); -insert into t3 values ('14:30:45'); -insert into t3 values ('21:59:22'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select time_to_sec(col1)-(time_to_sec(col1)-20) from t1 order by col1; -time_to_sec(col1)-(time_to_sec(col1)-20) -20 -20 -select * from t1 order by col1; -col1 -09:09:15 -14:30:45 -select * from t2 order by col1; -col1 -09:09:15 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -09:09:15 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t6 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -update t1 set col1='10:33:11' where col1='09:09:15'; -update t2 set col1='10:33:11' where col1='09:09:15'; -update t3 set col1='10:33:11' where col1='09:09:15'; -update t4 set col1='10:33:11' where col1='09:09:15'; -update t5 set col1='10:33:11' where col1='09:09:15'; -update t6 set col1='10:33:11' where col1='09:09:15'; -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t6 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -------------------------------------------------------------------------- ---- Alter tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='MYISAM' as select * from t1; -create table t22 engine='MYISAM' as select * from t2; -create table t33 engine='MYISAM' as select * from t3; -create table t44 engine='MYISAM' as select * from t4; -create table t55 engine='MYISAM' as select * from t5; -create table t66 engine='MYISAM' as select * from t6; -alter table t11 -partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); -alter table t44 -partition by range(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -alter table t55 -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` time DEFAULT NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (time_to_sec(col1)-(time_to_sec(col1)-20)) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = MyISAM, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = MyISAM, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = MyISAM, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -delete from t1 where col1='14:30:45'; -delete from t2 where col1='14:30:45'; -delete from t3 where col1='14:30:45'; -delete from t4 where col1='14:30:45'; -delete from t5 where col1='14:30:45'; -delete from t6 where col1='14:30:45'; -select * from t1 order by col1; -col1 -10:33:11 -select * from t2 order by col1; -col1 -10:33:11 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t1 values ('14:30:45'); -insert into t2 values ('14:30:45'); -insert into t3 values ('14:30:45'); -insert into t4 values (60,'14:30:45'); -insert into t5 values (60,'14:30:45'); -insert into t6 values (60,'14:30:45'); -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t6 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -60 14:30:45 -select * from t5 order by colint; -colint col1 -60 14:30:45 -select * from t6 order by colint; -colint col1 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -delete from t11 where col1='14:30:45'; -delete from t22 where col1='14:30:45'; -delete from t33 where col1='14:30:45'; -delete from t44 where col1='14:30:45'; -delete from t55 where col1='14:30:45'; -delete from t66 where col1='14:30:45'; -select * from t11 order by col1; -col1 -10:33:11 -select * from t22 order by col1; -col1 -10:33:11 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t11 values ('14:30:45'); -insert into t22 values ('14:30:45'); -insert into t33 values ('14:30:45'); -insert into t44 values (60,'14:30:45'); -insert into t55 values (60,'14:30:45'); -insert into t66 values (60,'14:30:45'); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -60 14:30:45 -select * from t55 order by colint; -colint col1 -60 14:30:45 -select * from t66 order by colint; -colint col1 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- ---- to_days(col1)-to_days('2006-01-01') in partition with coltype date -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -create table t1 (col1 date) engine='MYISAM' -partition by range(to_days(col1)-to_days('2006-01-01')) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 date) engine='MYISAM' -partition by list(to_days(col1)-to_days('2006-01-01')) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 date) engine='MYISAM' -partition by hash(to_days(col1)-to_days('2006-01-01')); -create table t4 (colint int, col1 date) engine='MYISAM' -partition by range(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='MYISAM' -partition by list(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 date) engine='MYISAM' -partition by range(colint) -(partition p0 values less than (to_days('2006-02-02')-to_days('2006-01-01')), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -insert into t1 values ('2006-02-03'); -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-02-03'); -insert into t2 values ('2006-01-17'); -insert into t2 values ('2006-01-25'); -insert into t3 values ('2006-02-03'); -insert into t3 values ('2006-01-17'); -insert into t3 values ('2006-01-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select to_days(col1)-to_days('2006-01-01') from t1 order by col1; -to_days(col1)-to_days('2006-01-01') -16 -33 -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-03 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-03 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-03 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-02-06' where col1='2006-02-03'; -update t2 set col1='2006-02-06' where col1='2006-02-03'; -update t3 set col1='2006-02-06' where col1='2006-02-03'; -update t4 set col1='2006-02-06' where col1='2006-02-03'; -update t5 set col1='2006-02-06' where col1='2006-02-03'; -update t6 set col1='2006-02-06' where col1='2006-02-03'; -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Alter tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='MYISAM' as select * from t1; -create table t22 engine='MYISAM' as select * from t2; -create table t33 engine='MYISAM' as select * from t3; -create table t44 engine='MYISAM' as select * from t4; -create table t55 engine='MYISAM' as select * from t5; -create table t66 engine='MYISAM' as select * from t6; -alter table t11 -partition by range(to_days(col1)-to_days('2006-01-01')) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(to_days(col1)-to_days('2006-01-01')) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(to_days(col1)-to_days('2006-01-01')); -alter table t44 -partition by range(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (to_days('2006-02-02')-to_days('2006-01-01')), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -alter table t55 -partition by list(colint) -subpartition by hash(to_days(col1)-to_days('2006-01-01')) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (to_days(col1)-to_days('2006-01-01')) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = MyISAM, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = MyISAM, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = MyISAM, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ -select * from t55 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (to_days('2006-02-02')-to_days('2006-01-01')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (to_days('2006-02-02')-to_days('2006-01-01')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -delete from t1 where col1='2006-01-17'; -delete from t2 where col1='2006-01-17'; -delete from t3 where col1='2006-01-17'; -delete from t4 where col1='2006-01-17'; -delete from t5 where col1='2006-01-17'; -delete from t6 where col1='2006-01-17'; -select * from t1 order by col1; -col1 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-01-17'); -insert into t3 values ('2006-01-17'); -insert into t4 values (60,'2006-01-17'); -insert into t5 values (60,'2006-01-17'); -insert into t6 values (60,'2006-01-17'); -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -select * from t5 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -select * from t6 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -60 2006-01-17 -select * from t5 order by colint; -colint col1 -60 2006-01-17 -select * from t6 order by colint; -colint col1 -60 2006-01-17 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with to_days(col1)-to_days('2006-01-01') -------------------------------------------------------------------------- -delete from t11 where col1='2006-01-17'; -delete from t22 where col1='2006-01-17'; -delete from t33 where col1='2006-01-17'; -delete from t44 where col1='2006-01-17'; -delete from t55 where col1='2006-01-17'; -delete from t66 where col1='2006-01-17'; -select * from t11 order by col1; -col1 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-01-17'); -insert into t22 values ('2006-01-17'); -insert into t33 values ('2006-01-17'); -insert into t44 values (60,'2006-01-17'); -insert into t55 values (60,'2006-01-17'); -insert into t66 values (60,'2006-01-17'); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -select * from t55 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -select * from t66 order by colint; -colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -60 2006-01-17 -select * from t55 order by colint; -colint col1 -60 2006-01-17 -select * from t66 order by colint; -colint col1 -60 2006-01-17 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- ---- datediff(col1, '2006-01-01') in partition with coltype date -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with datediff(col1, '2006-01-01') -------------------------------------------------------------------------- -create table t1 (col1 date) engine='MYISAM' -partition by range(datediff(col1, '2006-01-01')) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 date) engine='MYISAM' -partition by list(datediff(col1, '2006-01-01')) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 date) engine='MYISAM' -partition by hash(datediff(col1, '2006-01-01')); -create table t4 (colint int, col1 date) engine='MYISAM' -partition by range(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 date) engine='MYISAM' -partition by list(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 date) engine='MYISAM' -partition by range(colint) -(partition p0 values less than (datediff('2006-02-02', '2006-01-01')), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with datediff(col1, '2006-01-01') -------------------------------------------------------------------------- -insert into t1 values ('2006-02-03'); -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-02-03'); -insert into t2 values ('2006-01-17'); -insert into t2 values ('2006-01-25'); -insert into t3 values ('2006-02-03'); -insert into t3 values ('2006-01-17'); -insert into t3 values ('2006-01-25'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; -select datediff(col1, '2006-01-01') from t1 order by col1; -datediff(col1, '2006-01-01') -16 -33 -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-03 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-03 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-03 -select * from t4 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-03 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -update t1 set col1='2006-02-06' where col1='2006-02-03'; -update t2 set col1='2006-02-06' where col1='2006-02-03'; -update t3 set col1='2006-02-06' where col1='2006-02-03'; -update t4 set col1='2006-02-06' where col1='2006-02-03'; -update t5 set col1='2006-02-06' where col1='2006-02-03'; -update t6 set col1='2006-02-06' where col1='2006-02-03'; -select * from t1 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t2 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t3 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t4 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t5 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t6 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -------------------------------------------------------------------------- ---- Alter tables with datediff(col1, '2006-01-01') -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='MYISAM' as select * from t1; -create table t22 engine='MYISAM' as select * from t2; -create table t33 engine='MYISAM' as select * from t3; -create table t44 engine='MYISAM' as select * from t4; -create table t55 engine='MYISAM' as select * from t5; -create table t66 engine='MYISAM' as select * from t6; -alter table t11 -partition by range(datediff(col1, '2006-01-01')) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(datediff(col1, '2006-01-01')) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(datediff(col1, '2006-01-01')); -alter table t44 -partition by range(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (datediff('2006-02-02', '2006-01-01')), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -select * from t22 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t33 order by col1; -col1 -2006-01-17 -2006-01-25 -2006-02-06 -select * from t44 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t55 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -2006-01-17 -2006-02-06 -alter table t55 -partition by list(colint) -subpartition by hash(datediff(col1, '2006-01-01')) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` date DEFAULT NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (datediff(col1, '2006-01-01')) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = MyISAM, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = MyISAM, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = MyISAM, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = MyISAM, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = MyISAM, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = MyISAM) */ -select * from t55 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (datediff('2006-02-02', '2006-01-01')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (datediff('2006-02-02', '2006-01-01')), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 2006-02-06 -2 2006-01-17 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with datediff(col1, '2006-01-01') +--- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- -delete from t1 where col1='2006-01-17'; -delete from t2 where col1='2006-01-17'; -delete from t3 where col1='2006-01-17'; -delete from t4 where col1='2006-01-17'; -delete from t5 where col1='2006-01-17'; -delete from t6 where col1='2006-01-17'; +delete from t1 where col1='14:30:45'; +delete from t2 where col1='14:30:45'; +delete from t3 where col1='14:30:45'; +delete from t4 where col1='14:30:45'; +delete from t5 where col1='14:30:45'; +delete from t6 where col1='14:30:45'; select * from t1 order by col1; col1 -2006-02-06 +10:33:11 select * from t2 order by col1; col1 -2006-01-25 -2006-02-06 +10:33:11 +21:59:22 select * from t3 order by col1; col1 -2006-01-25 -2006-02-06 +10:33:11 +21:59:22 select * from t4 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t5 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -insert into t1 values ('2006-01-17'); -insert into t2 values ('2006-01-17'); -insert into t3 values ('2006-01-17'); -insert into t4 values (60,'2006-01-17'); -insert into t5 values (60,'2006-01-17'); -insert into t6 values (60,'2006-01-17'); +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +insert into t1 values ('14:30:45'); +insert into t2 values ('14:30:45'); +insert into t3 values ('14:30:45'); +insert into t4 values (60,'14:30:45'); +insert into t5 values (60,'14:30:45'); +insert into t6 values (60,'14:30:45'); select * from t1 order by col1; col1 -2006-01-17 -2006-02-06 +10:33:11 +14:30:45 select * from t2 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 select * from t5 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 select * from t6 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -11434,95 +9387,99 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 -2006-01-17 -2006-02-06 +10:33:11 +14:30:45 select * from t2 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t3 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t4 order by colint; colint col1 -60 2006-01-17 +60 14:30:45 select * from t5 order by colint; colint col1 -60 2006-01-17 +60 14:30:45 select * from t6 order by colint; colint col1 -60 2006-01-17 ------------------------------------------------------------------------- ---- Delete rows and partitions of tables with datediff(col1, '2006-01-01') +--- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) ------------------------------------------------------------------------- -delete from t11 where col1='2006-01-17'; -delete from t22 where col1='2006-01-17'; -delete from t33 where col1='2006-01-17'; -delete from t44 where col1='2006-01-17'; -delete from t55 where col1='2006-01-17'; -delete from t66 where col1='2006-01-17'; +delete from t11 where col1='14:30:45'; +delete from t22 where col1='14:30:45'; +delete from t33 where col1='14:30:45'; +delete from t44 where col1='14:30:45'; +delete from t55 where col1='14:30:45'; +delete from t66 where col1='14:30:45'; select * from t11 order by col1; col1 -2006-02-06 +10:33:11 select * from t22 order by col1; col1 -2006-01-25 -2006-02-06 +10:33:11 +21:59:22 select * from t33 order by col1; col1 -2006-01-25 -2006-02-06 +10:33:11 +21:59:22 select * from t44 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 select * from t55 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -insert into t11 values ('2006-01-17'); -insert into t22 values ('2006-01-17'); -insert into t33 values ('2006-01-17'); -insert into t44 values (60,'2006-01-17'); -insert into t55 values (60,'2006-01-17'); -insert into t66 values (60,'2006-01-17'); +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +insert into t11 values ('14:30:45'); +insert into t22 values ('14:30:45'); +insert into t33 values ('14:30:45'); +insert into t44 values (60,'14:30:45'); +insert into t55 values (60,'14:30:45'); +insert into t66 values (60,'14:30:45'); select * from t11 order by col1; col1 -2006-01-17 -2006-02-06 +10:33:11 +14:30:45 select * from t22 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 select * from t55 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 select * from t66 order by colint; colint col1 -1 2006-02-06 -3 2006-01-25 -4 2006-02-05 -60 2006-01-17 +1 10:33:11 +2 04:30:01 +3 00:59:22 +4 05:30:34 +60 14:30:45 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -11530,27 +9487,26 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 -2006-01-17 -2006-02-06 +10:33:11 +14:30:45 select * from t22 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t33 order by col1; col1 -2006-01-17 -2006-01-25 -2006-02-06 +10:33:11 +14:30:45 +21:59:22 select * from t44 order by colint; colint col1 -60 2006-01-17 +60 14:30:45 select * from t55 order by colint; colint col1 -60 2006-01-17 +60 14:30:45 select * from t66 order by colint; colint col1 -60 2006-01-17 ------------------------- ---- some alter table end ------------------------- diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test index 86e2603cd01..0151820cef9 100644 --- a/mysql-test/t/partition.test +++ b/mysql-test/t/partition.test @@ -1821,15 +1821,16 @@ while ($cnt) drop table t1; # -# BUG#32272: partition crash 1: enum column +# BUG#32772: partition crash 1: enum column # +# Note that month(int_col) is disallowed after bug#54483. create table t1 ( c0 int, c1 bigint, c2 set('sweet'), key (c2,c1,c0), key(c0) -) engine=myisam partition by hash (month(c0)) partitions 5; +) engine=myisam partition by hash (c0) partitions 5; --disable_warnings insert ignore into t1 set c0 = -6502262, c1 = 3992917, c2 = 35019; diff --git a/mysql-test/t/partition_error.test b/mysql-test/t/partition_error.test index d3f10628254..b222b02252b 100644 --- a/mysql-test/t/partition_error.test +++ b/mysql-test/t/partition_error.test @@ -10,6 +10,670 @@ drop table if exists t1; let $MYSQLD_DATADIR= `SELECT @@datadir`; +--echo # +--echo # Bug#54483: valgrind errors when making warnings for multiline inserts +--echo # into partition +--echo # +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARBINARY(10)) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a CHAR(10)) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIMESTAMP) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +CREATE TABLE t1 (a DATE) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +INSERT INTO t1 VALUES ('test'),('a'),('5'); +SHOW WARNINGS; +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +INSERT INTO t1 VALUES ('test'),('a'),('5'); +SHOW WARNINGS; +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY RANGE (DAYOFWEEK(a)) +(PARTITION a1 VALUES LESS THAN (60)); +SHOW WARNINGS; + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (TO_DAYS(a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (TO_DAYS(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (TO_DAYS(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (TO_DAYS(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (TO_DAYS(a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (DAYOFMONTH(a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (DAYOFMONTH(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (DAYOFMONTH(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (DAYOFMONTH(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (DAYOFMONTH(a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (MONTH(a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (MONTH(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (MONTH(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (MONTH(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (MONTH(a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (DAYOFYEAR(a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (DAYOFYEAR(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (DAYOFYEAR(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (DAYOFYEAR(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (DAYOFYEAR(a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (HOUR(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (HOUR(a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (HOUR(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (HOUR(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (HOUR(a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (MINUTE(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (MINUTE(a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (MINUTE(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (MINUTE(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (MINUTE(a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (QUARTER(a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (QUARTER(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (QUARTER(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (QUARTER(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (QUARTER(a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (SECOND(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (SECOND(a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (SECOND(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (SECOND(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (SECOND(a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (YEARWEEK(a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (YEARWEEK(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (YEARWEEK(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (YEARWEEK(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (YEARWEEK(a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (WEEKDAY(a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (WEEKDAY(a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (WEEKDAY(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (WEEKDAY(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (WEEKDAY(a)); +--echo # TO_SECONDS() is added in 5.5. + +--error ER_PARSE_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (TO_SECONDS(a)); +#--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +#CREATE TABLE t1 (a TIME) +#PARTITION BY HASH (TO_SECONDS(a)); +#CREATE TABLE t1 (a DATE) +#PARTITION BY HASH (TO_SECONDS(a)); +#DROP TABLE t1; +#CREATE TABLE t1 (a DATETIME) +#PARTITION BY HASH (TO_SECONDS(a)); +#DROP TABLE t1; +#--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +#CREATE TABLE t1 (a VARCHAR(10)) +#PARTITION BY HASH (TO_SECONDS(a)); +#--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +#CREATE TABLE t1 (a INT) +#PARTITION BY HASH (TO_SECONDS(a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (TIME_TO_SEC(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (TIME_TO_SEC(a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (TIME_TO_SEC(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (TIME_TO_SEC(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (TIME_TO_SEC(a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (FROM_DAYS(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (FROM_DAYS(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (FROM_DAYS(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (TO_DAYS(FROM_DAYS(a))); +--error ER_PARTITION_FUNC_NOT_ALLOWED_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (FROM_DAYS(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (TO_DAYS(FROM_DAYS(a))); +--error ER_PARTITION_FUNC_NOT_ALLOWED_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (FROM_DAYS(a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (MICROSECOND(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (MICROSECOND(a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (MICROSECOND(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (MICROSECOND(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (MICROSECOND(a)); +--echo # Bug#57071 +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 +(`date` date, + `extracted_week` int, + `yearweek` int, + `week` int, + `default_week_format` int) +PARTITION BY LIST (EXTRACT(WEEK FROM date) % 3) +(PARTITION p0 VALUES IN (0), + PARTITION p1 VALUES IN (1), + PARTITION p2 VALUES IN (2)); +CREATE TABLE t1 +(`date` date, + `extracted_week` int, + `yearweek` int, + `week` int, + `default_week_format` int); +SET @old_default_week_format := @@default_week_format; +SET default_week_format = 0; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 1; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 2; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 3; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 4; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 5; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 6; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SET default_week_format = 7; +INSERT INTO t1 VALUES ('2000-01-01', EXTRACT(WEEK FROM '2000-01-01'), YEARWEEK('2000-01-01'), WEEK('2000-01-01'), @@default_week_format); +SELECT * FROM t1; +SET default_week_format = @old_default_week_format; +DROP TABLE t1; + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(YEAR FROM a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(YEAR_MONTH FROM a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(QUARTER FROM a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MONTH FROM a)); + +--echo # EXTRACT(WEEK...) is disallowed, see bug#57071. +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(WEEK FROM a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY FROM a)); +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY FROM a)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY FROM a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY_HOUR FROM a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY_MINUTE FROM a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY_SECOND FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(HOUR FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(HOUR_MINUTE FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(HOUR_SECOND FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MINUTE FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MINUTE_SECOND FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(SECOND FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MICROSECOND FROM a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(DAY_MICROSECOND FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(HOUR_MICROSECOND FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(MINUTE_MICROSECOND FROM a)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (EXTRACT(SECOND_MICROSECOND FROM a)); + +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a TIME, b DATE) +PARTITION BY HASH (DATEDIFF(a, b)); +CREATE TABLE t1 (a DATE, b DATETIME) +PARTITION BY HASH (DATEDIFF(a, b)); +DROP TABLE t1; +CREATE TABLE t1 (a DATETIME, b DATE) +PARTITION BY HASH (DATEDIFF(a, b)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE, b VARCHAR(10)) +PARTITION BY HASH (DATEDIFF(a, b)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT, b DATETIME) +PARTITION BY HASH (DATEDIFF(a, b)); + +CREATE TABLE t1 (a TIME) +PARTITION BY HASH (TIME_TO_SEC(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a DATE) +PARTITION BY HASH (TIME_TO_SEC(a)); +CREATE TABLE t1 (a DATETIME) +PARTITION BY HASH (TIME_TO_SEC(a)); +DROP TABLE t1; +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a VARCHAR(10)) +PARTITION BY HASH (TIME_TO_SEC(a)); +--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR +CREATE TABLE t1 (a INT) +PARTITION BY HASH (TIME_TO_SEC(a)); + + --echo # --echo # Bug#49161: Out of memory; restart server and try again (needed 2 bytes) --echo # diff --git a/sql/item.h b/sql/item.h index fd85fc780af..c2dd2052303 100644 --- a/sql/item.h +++ b/sql/item.h @@ -966,11 +966,11 @@ public: virtual bool set_no_const_sub(uchar *arg) { return FALSE; } virtual Item *replace_equal_field(uchar * arg) { return this; } /* - Check if an expression value depends on the current timezone. Used by - partitioning code to reject timezone-dependent expressions in a - (sub)partitioning function. + Check if an expression value has allowed arguments, like DATE/DATETIME + for date functions. Also used by partitioning code to reject + timezone-dependent expressions in a (sub)partitioning function. */ - virtual bool is_timezone_dependent_processor(uchar *bool_arg) + virtual bool is_arguments_valid_processor(uchar *bool_arg) { return FALSE; } diff --git a/sql/item_func.h b/sql/item_func.h index 256348eee08..548db490ac1 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -189,6 +189,7 @@ public: null_value=1; return 0.0; } + bool has_timestamp_args() { DBUG_ASSERT(fixed == TRUE); @@ -200,6 +201,45 @@ public: } return FALSE; } + + bool has_date_args() + { + DBUG_ASSERT(fixed == TRUE); + for (uint i= 0; i < arg_count; i++) + { + if (args[i]->type() == Item::FIELD_ITEM && + (args[i]->field_type() == MYSQL_TYPE_DATE || + args[i]->field_type() == MYSQL_TYPE_DATETIME)) + return TRUE; + } + return FALSE; + } + + bool has_time_args() + { + DBUG_ASSERT(fixed == TRUE); + for (uint i= 0; i < arg_count; i++) + { + if (args[i]->type() == Item::FIELD_ITEM && + (args[i]->field_type() == MYSQL_TYPE_TIME || + args[i]->field_type() == MYSQL_TYPE_DATETIME)) + return TRUE; + } + return FALSE; + } + + bool has_datetime_args() + { + DBUG_ASSERT(fixed == TRUE); + for (uint i= 0; i < arg_count; i++) + { + if (args[i]->type() == Item::FIELD_ITEM && + args[i]->field_type() == MYSQL_TYPE_DATETIME) + return TRUE; + } + return FALSE; + } + /* We assume the result of any function that has a TIMESTAMP argument to be timezone-dependent, since a TIMESTAMP value in both numeric and string @@ -208,7 +248,7 @@ public: representation of a TIMESTAMP argument verbatim, and thus does not depend on the timezone. */ - virtual bool is_timezone_dependent_processor(uchar *bool_arg) + virtual bool is_arguments_valid_processor(uchar *bool_arg) { return has_timestamp_args(); } diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index ef86406e1be..8f951645c78 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -70,6 +70,10 @@ public: enum_monotonicity_info get_monotonicity_info() const; longlong val_int_endpoint(bool left_endp, bool *incl_endp); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_date_args(); + } }; @@ -86,6 +90,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_date_args(); + } }; @@ -111,6 +119,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_date_args(); + } }; @@ -140,6 +152,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_date_args(); + } }; @@ -156,6 +172,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_time_args(); + } }; @@ -172,6 +192,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_time_args(); + } }; @@ -188,6 +212,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_date_args(); + } }; @@ -204,6 +232,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_time_args(); + } }; @@ -234,6 +266,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_date_args(); + } }; @@ -252,6 +288,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_date_args(); + } }; @@ -282,6 +322,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_date_args(); + } }; class Item_func_dayname :public Item_func_weekday @@ -311,7 +355,7 @@ public: (and thus may not be used as a partitioning function) when its argument is NOT of the TIMESTAMP type. */ - bool is_timezone_dependent_processor(uchar *int_arg) + bool is_arguments_valid_processor(uchar *int_arg) { return !has_timestamp_args(); } @@ -335,6 +379,10 @@ public: max_length=10*MY_CHARSET_BIN_MB_MAXLEN; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_time_args(); + } }; @@ -588,6 +636,10 @@ public: const char *func_name() const { return "from_days"; } bool get_date(MYSQL_TIME *res, uint fuzzy_date); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return has_date_args() || has_time_args(); + } }; @@ -714,6 +766,42 @@ class Item_extract :public Item_int_func bool eq(const Item *item, bool binary_cmp) const; virtual void print(String *str, enum_query_type query_type); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + switch (int_type) { + case INTERVAL_YEAR: + case INTERVAL_YEAR_MONTH: + case INTERVAL_QUARTER: + case INTERVAL_MONTH: + /* case INTERVAL_WEEK: Not allowed as partitioning function, bug#57071 */ + case INTERVAL_DAY: + return !has_date_args(); + case INTERVAL_DAY_HOUR: + case INTERVAL_DAY_MINUTE: + case INTERVAL_DAY_SECOND: + case INTERVAL_DAY_MICROSECOND: + return !has_datetime_args(); + case INTERVAL_HOUR: + case INTERVAL_HOUR_MINUTE: + case INTERVAL_HOUR_SECOND: + case INTERVAL_MINUTE: + case INTERVAL_MINUTE_SECOND: + case INTERVAL_SECOND: + case INTERVAL_MICROSECOND: + case INTERVAL_HOUR_MICROSECOND: + case INTERVAL_MINUTE_MICROSECOND: + case INTERVAL_SECOND_MICROSECOND: + return !has_time_args(); + default: + /* + INTERVAL_LAST is only an end marker, + INTERVAL_WEEK depends on default_week_format which is a session + variable and cannot be used for partitioning. See bug#57071. + */ + break; + } + return true; + } }; @@ -964,6 +1052,10 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} + bool is_arguments_valid_processor(uchar *int_arg) + { + return !has_time_args(); + } }; diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 7b0c47865d8..52bc7507cc4 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1019,7 +1019,7 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, opening existing tables for easier maintenance. This exception should be deprecated at some point in future so that we always throw an error. */ - if (func_expr->walk(&Item::is_timezone_dependent_processor, + if (func_expr->walk(&Item::is_arguments_valid_processor, 0, NULL)) { if (is_create_table_ind) -- cgit v1.2.1 From d19c8ec930055e446cd7612b37dff871924bd9b6 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 22:01:35 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3527.3.2 > revision-id: mattias.jonsson@oracle.com-20101222144517-cbv3l5jlbd0mq5s8 > parent: mattias.jonsson@oracle.com-20101222095036-2lpx0gqu4i45jtkz > committer: Mattias Jonsson > branch nick: b54483-51-bt_2 > timestamp: Wed 2010-12-22 15:45:17 +0100 > message: > Bug#54483: valgrind errors when making warnings for > multiline inserts into partition > Bug#57071: EXTRACT(WEEK from date_col) cannot be > allowed as partitioning function > > Renamed function according to reviewers comments. --- sql/item.h | 2 +- sql/item_func.h | 2 +- sql/item_timefunc.h | 32 ++++++++++++++++---------------- sql/sql_partition.cc | 11 ++++++----- 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/sql/item.h b/sql/item.h index c2dd2052303..e55f24dba5a 100644 --- a/sql/item.h +++ b/sql/item.h @@ -970,7 +970,7 @@ public: for date functions. Also used by partitioning code to reject timezone-dependent expressions in a (sub)partitioning function. */ - virtual bool is_arguments_valid_processor(uchar *bool_arg) + virtual bool check_valid_arguments_processor(uchar *bool_arg) { return FALSE; } diff --git a/sql/item_func.h b/sql/item_func.h index 548db490ac1..f4828bc7244 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -248,7 +248,7 @@ public: representation of a TIMESTAMP argument verbatim, and thus does not depend on the timezone. */ - virtual bool is_arguments_valid_processor(uchar *bool_arg) + virtual bool check_valid_arguments_processor(uchar *bool_arg) { return has_timestamp_args(); } diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 8f951645c78..fafa267a190 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -70,7 +70,7 @@ public: enum_monotonicity_info get_monotonicity_info() const; longlong val_int_endpoint(bool left_endp, bool *incl_endp); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_date_args(); } @@ -90,7 +90,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_date_args(); } @@ -119,7 +119,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_date_args(); } @@ -152,7 +152,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_date_args(); } @@ -172,7 +172,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_time_args(); } @@ -192,7 +192,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_time_args(); } @@ -212,7 +212,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_date_args(); } @@ -232,7 +232,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_time_args(); } @@ -266,7 +266,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_date_args(); } @@ -288,7 +288,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_date_args(); } @@ -322,7 +322,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_date_args(); } @@ -355,7 +355,7 @@ public: (and thus may not be used as a partitioning function) when its argument is NOT of the TIMESTAMP type. */ - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_timestamp_args(); } @@ -379,7 +379,7 @@ public: max_length=10*MY_CHARSET_BIN_MB_MAXLEN; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_time_args(); } @@ -636,7 +636,7 @@ public: const char *func_name() const { return "from_days"; } bool get_date(MYSQL_TIME *res, uint fuzzy_date); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return has_date_args() || has_time_args(); } @@ -766,7 +766,7 @@ class Item_extract :public Item_int_func bool eq(const Item *item, bool binary_cmp) const; virtual void print(String *str, enum_query_type query_type); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { switch (int_type) { case INTERVAL_YEAR: @@ -1052,7 +1052,7 @@ public: maybe_null=1; } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} - bool is_arguments_valid_processor(uchar *int_arg) + bool check_valid_arguments_processor(uchar *int_arg) { return !has_time_args(); } diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 52bc7507cc4..91267c7f273 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1014,12 +1014,13 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, } /* - We don't allow creating partitions with timezone-dependent expressions as - a (sub)partitioning function, but we want to allow such expressions when - opening existing tables for easier maintenance. This exception should be - deprecated at some point in future so that we always throw an error. + We don't allow creating partitions with expressions with non matching + arguments as a (sub)partitioning function, + but we want to allow such expressions when opening existing tables for + easier maintenance. This exception should be deprecated at some point + in future so that we always throw an error. */ - if (func_expr->walk(&Item::is_arguments_valid_processor, + if (func_expr->walk(&Item::check_valid_arguments_processor, 0, NULL)) { if (is_create_table_ind) -- cgit v1.2.1 From 436b67e324b61ad3d4de6ea9368edd0e00817237 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 22:02:48 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3536 > revision-id: davi.arnaut@oracle.com-20110107183336-kp8niwm2hz3wb4c3 > parent: saikumar.v@sun.com-20110106103945-rhsek9uy6f63db44 > committer: Davi Arnaut > branch nick: 51023-5.1 > timestamp: Fri 2011-01-07 16:33:36 -0200 > message: > Bug#51023: Mysql server crashes on SIGHUP and destroys InnoDB files > > From a user perspective, the problem is that a FLUSH LOGS or SIGHUP > signal could end up associating the stdout and stderr to random > files. In the case of this bug report, the streams would end up > associated to InnoDB ibd files. > > The freopen(3) function is not thread-safe on FreeBSD. What this > means is that if another thread calls open(2) during freopen() > is executing that another thread's fd returned by open(2) may get > re-associated with the file being passed to freopen(3). See FreeBSD > PR number 79887 for reference: > > http://www.freebsd.org/cgi/query-pr.cgi?pr=79887 > > This problem is worked around by substituting a internal hook within > the FILE structure. This avoids the loss of atomicity by not having > the original fd closed before its duplicated. > > Patch based on the original work by Vasil Dimov. --- include/my_sys.h | 1 + mysys/my_fopen.c | 135 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- sql/log.cc | 74 +++++------------------------- 3 files changed, 145 insertions(+), 65 deletions(-) diff --git a/include/my_sys.h b/include/my_sys.h index 3a240cfc118..43f66c73825 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -664,6 +664,7 @@ extern void init_glob_errs(void); extern void wait_for_free_space(const char *filename, int errors); extern FILE *my_fopen(const char *FileName,int Flags,myf MyFlags); extern FILE *my_fdopen(File Filedes,const char *name, int Flags,myf MyFlags); +extern FILE *my_freopen(const char *path, const char *mode, FILE *stream); extern int my_fclose(FILE *fd,myf MyFlags); extern int my_chsize(File fd,my_off_t newlength, int filler, myf MyFlags); extern int my_sync(File fd, myf my_flags); diff --git a/mysys/my_fopen.c b/mysys/my_fopen.c index 44156da6ae3..a822b63dd63 100644 --- a/mysys/my_fopen.c +++ b/mysys/my_fopen.c @@ -18,6 +18,10 @@ #include #include "mysys_err.h" +#if defined(__FreeBSD__) +extern int getosreldate(void); +#endif + static void make_ftype(char * to,int flag); /* @@ -97,8 +101,137 @@ FILE *my_fopen(const char *filename, int flags, myf MyFlags) } /* my_fopen */ - /* Close a stream */ +#if defined(_WIN32) + +static FILE *my_win_freopen(const char *path, FILE *stream) +{ + int handle_fd, fd= _fileno(stream); + HANDLE osfh; + + DBUG_ASSERT(filename && stream); + + /* Services don't have stdout/stderr on Windows, so _fileno returns -1. */ + if (fd < 0) + { + if (!freopen(filename, mode, stream)) + return NULL; + + fd= _fileno(stream); + } + + if ((osfh= CreateFile(path, GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE | + FILE_SHARE_DELETE, NULL, + OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, + NULL)) == INVALID_HANDLE_VALUE) + return NULL; + + if ((handle_fd= _open_osfhandle((intptr_t)osfh, + _O_APPEND | _O_TEXT)) == -1) + { + CloseHandle(osfh); + return NULL; + } + + if (_dup2(handle_fd, fd) < 0) + { + CloseHandle(osfh); + return NULL; + } + + _close(handle_fd); + + return stream; +} + +#elif defined(__FreeBSD__) + +/* No close operation hook. */ + +static int no_close(void *cookie __attribute__((unused))) +{ + return 0; +} + +/* + A hack around a race condition in the implementation of freopen. + + The race condition steams from the fact that the current fd of + the stream is closed before its number is used to duplicate the + new file descriptor. This defeats the desired atomicity of the + close and duplicate of dup2(). + + See PR number 79887 for reference: + http://www.freebsd.org/cgi/query-pr.cgi?pr=79887 +*/ + +static FILE *my_freebsd_freopen(const char *path, const char *mode, FILE *stream) +{ + int old_fd; + FILE *result; + + flockfile(stream); + + old_fd= fileno(stream); + + /* Use a no operation close hook to avoid having the fd closed. */ + stream->_close= no_close; + + /* Relies on the implicit dup2 to close old_fd. */ + result= freopen(path, mode, stream); + + /* If successful, the _close hook was replaced. */ + + if (result == NULL) + close(old_fd); + else + funlockfile(result); + + return result; +} + +#endif + + +/** + Change the file associated with a file stream. + + @param path Path to file. + @param mode Mode of the stream. + @param stream File stream. + + @note + This function is used to redirect stdout and stderr to a file and + subsequently to close and reopen that file for log rotation. + + @retval A FILE pointer on success. Otherwise, NULL. +*/ + +FILE *my_freopen(const char *path, const char *mode, FILE *stream) +{ + FILE *result; + +#if defined(_WIN32) + result= my_win_freopen(path, mode, stream); +#elif defined(__FreeBSD__) + /* + XXX: Once the fix is ported to the stable releases, this should + be dependent upon the specific FreeBSD versions. Check at: + http://www.freebsd.org/cgi/query-pr.cgi?pr=79887 + */ + if (getosreldate() > 900027) + result= freopen(path, mode, stream); + else + result= my_freebsd_freopen(path, mode, stream); +#else + result= freopen(path, mode, stream); +#endif + + return result; +} + +/* Close a stream */ int my_fclose(FILE *fd, myf MyFlags) { int err,file; diff --git a/sql/log.cc b/sql/log.cc index 56f151fe2ab..d6314da1e29 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -5067,80 +5067,26 @@ void sql_perror(const char *message) } -#ifdef __WIN__ +/* + Change the file associated with two output streams. Used to + redirect stdout and stderr to a file. The streams are reopened + only for appending (writing at end of file). +*/ extern "C" my_bool reopen_fstreams(const char *filename, FILE *outstream, FILE *errstream) { - int handle_fd; - int err_fd, out_fd; - HANDLE osfh; - - DBUG_ASSERT(filename && errstream); - - // Services don't have stdout/stderr on Windows, so _fileno returns -1. - err_fd= _fileno(errstream); - if (err_fd < 0) - { - if (!freopen(filename, "a+", errstream)) - return TRUE; - - setbuf(errstream, NULL); - err_fd= _fileno(errstream); - } - - if (outstream) - { - out_fd= _fileno(outstream); - if (out_fd < 0) - { - if (!freopen(filename, "a+", outstream)) - return TRUE; - out_fd= _fileno(outstream); - } - } - - if ((osfh= CreateFile(filename, GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE | - FILE_SHARE_DELETE, NULL, - OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, - NULL)) == INVALID_HANDLE_VALUE) + if (outstream && !my_freopen(filename, "a", outstream)) return TRUE; - if ((handle_fd= _open_osfhandle((intptr_t)osfh, - _O_APPEND | _O_TEXT)) == -1) - { - CloseHandle(osfh); + if (errstream && !my_freopen(filename, "a", errstream)) return TRUE; - } - if (_dup2(handle_fd, err_fd) < 0) - { - CloseHandle(osfh); - return TRUE; - } - - if (outstream && _dup2(handle_fd, out_fd) < 0) - { - CloseHandle(osfh); - return TRUE; - } - - _close(handle_fd); - return FALSE; -} -#else -extern "C" my_bool reopen_fstreams(const char *filename, - FILE *outstream, FILE *errstream) -{ - if (outstream && !freopen(filename, "a+", outstream)) - return TRUE; - - if (errstream && !freopen(filename, "a+", errstream)) - return TRUE; + /* The error stream must be unbuffered. */ + if (errstream) + setbuf(errstream, NULL); return FALSE; } -#endif /* -- cgit v1.2.1 From 5ac3c5dea35185a1f94eedd60cee2a6f0d39dd7b Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 22:04:11 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3537 > revision-id: davi.arnaut@oracle.com-20110107192806-rmvvxwhk10sy0219 > parent: davi.arnaut@oracle.com-20110107183336-kp8niwm2hz3wb4c3 > committer: Davi Arnaut > branch nick: mysql-5.1 > timestamp: Fri 2011-01-07 17:28:06 -0200 > message: > Bug#51023: Mysql server crashes on SIGHUP and destroys InnoDB files > > WIN32 compilation fixes: define ETIMEDOUT only if not available and > fix typos and add a missing parameter. --- include/my_pthread.h | 4 +++- mysys/my_fopen.c | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/my_pthread.h b/include/my_pthread.h index fec7c972a7b..d64c7d2199e 100644 --- a/include/my_pthread.h +++ b/include/my_pthread.h @@ -126,7 +126,9 @@ struct tm *gmtime_r(const time_t *timep,struct tm *tmp); void pthread_exit(void *a); /* was #define pthread_exit(A) ExitThread(A)*/ -#define ETIMEDOUT 145 /* Win32 doesn't have this */ +#ifndef ETIMEDOUT +#define ETIMEDOUT 145 /* Win32 might not have this */ +#endif #define getpid() GetCurrentThreadId() #define HAVE_LOCALTIME_R 1 #define _REENTRANT 1 diff --git a/mysys/my_fopen.c b/mysys/my_fopen.c index a822b63dd63..b8373ecb3ab 100644 --- a/mysys/my_fopen.c +++ b/mysys/my_fopen.c @@ -103,17 +103,17 @@ FILE *my_fopen(const char *filename, int flags, myf MyFlags) #if defined(_WIN32) -static FILE *my_win_freopen(const char *path, FILE *stream) +static FILE *my_win_freopen(const char *path, const char *mode, FILE *stream) { int handle_fd, fd= _fileno(stream); HANDLE osfh; - DBUG_ASSERT(filename && stream); + DBUG_ASSERT(path && stream); /* Services don't have stdout/stderr on Windows, so _fileno returns -1. */ if (fd < 0) { - if (!freopen(filename, mode, stream)) + if (!freopen(path, mode, stream)) return NULL; fd= _fileno(stream); -- cgit v1.2.1 From 6497b7aa4dfac482a559eefa294499f3f658a02f Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 22:06:24 +0100 Subject: Backport into build-201102032246-5.1.52sp1 > ------------------------------------------------------------ > revno: 3545 > revision-id: holyfoot@mysql.com-20110112130241-50lwmhq562otxw31 > parent: dmitry.lenev@oracle.com-20110112130830-csanweanbny2ua3n > committer: Alexey Botchkov > branch nick: 51mrg > timestamp: Wed 2011-01-12 17:02:41 +0400 > message: > Bug #57321 crashes and valgrind errors from spatial types > Item_func_spatial_collection::fix_length_and_dec didn't call parent's method, so > the maybe_null was set to '0' after it. But in this case the result was > just NULL, that caused wrong behaviour. > > per-file comments: > mysql-test/r/gis.result > Bug #57321 crashes and valgrind errors from spatial types > test result updated. > > mysql-test/t/gis.test > Bug #57321 crashes and valgrind errors from spatial types > test case added. > sql/item_geofunc.h > Bug #57321 crashes and valgrind errors from spatial types > Item_func_geometry::fix_length_and_dec() called in > Item_func_spatial_collection::fix_length_and_dec(). --- mysql-test/r/gis.result | 8 ++++++++ mysql-test/t/gis.test | 10 ++++++++++ sql/item_geofunc.h | 1 + 3 files changed, 19 insertions(+) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 3b18ee61336..f4aa361ffcf 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -1014,4 +1014,12 @@ SET @a=0x00000000030000000100000000000000000000000000144000000000000014400000000 SET @a=POLYFROMWKB(@a); SET @a=0x00000000030000000000000000000000000000000000144000000000000014400000000000001840000000000000184000000000000014400000000000001440; SET @a=POLYFROMWKB(@a); +create table t1(a polygon NOT NULL)engine=myisam; +insert into t1 values (geomfromtext("point(0 1)")); +insert into t1 values (geomfromtext("point(1 0)")); +select * from (select polygon(t1.a) as p from t1 order by t1.a) d; +p +NULL +NULL +drop table t1; End of 5.1 tests diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index fd0a18ab4dd..97fc6f94b6a 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -744,4 +744,14 @@ SET @a=0x00000000030000000000000000000000000000000000144000000000000014400000000 SET @a=POLYFROMWKB(@a); +# +# Bug #57321 crashes and valgrind errors from spatial types +# + +create table t1(a polygon NOT NULL)engine=myisam; +insert into t1 values (geomfromtext("point(0 1)")); +insert into t1 values (geomfromtext("point(1 0)")); +select * from (select polygon(t1.a) as p from t1 order by t1.a) d; +drop table t1; + --echo End of 5.1 tests diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index b3ecbc39933..08161badfd3 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -177,6 +177,7 @@ public: String *val_str(String *); void fix_length_and_dec() { + Item_geometry_func::fix_length_and_dec(); for (unsigned int i= 0; i < arg_count; ++i) { if (args[i]->fixed && args[i]->field_type() != MYSQL_TYPE_GEOMETRY) -- cgit v1.2.1 From 39f85a447681d9f67180c979223e7a571bfd7fd1 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 22:14:11 +0100 Subject: removing EXCEPTIONS-CLIENT --- README | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README b/README index ac92d77f8bd..5c81817e74e 100644 --- a/README +++ b/README @@ -2,8 +2,7 @@ This is a release of MySQL, a dual-license SQL DBMS. MySQL is brought to you by the MySQL team at Oracle Corporation. License information can be found in these files: -- For GPL (free) distributions, see the COPYING file and - the EXCEPTIONS-CLIENT file. +- For GPL (free) distributions, see the COPYING file. - For commercial distributions, see the LICENSE.mysql file. GPLv2 Disclaimer -- cgit v1.2.1 From 6b736a7f0fd10b1df4e363b6ec5c9d2399378561 Mon Sep 17 00:00:00 2001 From: MySQL Build Team Date: Wed, 9 Feb 2011 23:07:08 +0100 Subject: adding macro definition for MY_GNUC_PREREQ --- include/my_compiler.h | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/include/my_compiler.h b/include/my_compiler.h index 1cd46ff4260..5f898621159 100644 --- a/include/my_compiler.h +++ b/include/my_compiler.h @@ -32,8 +32,15 @@ /* GNU C/C++ */ #if defined __GNUC__ +/* Convenience macro to test the minimum required GCC version. */ +# define MY_GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) /* Any after 2.95... */ # define MY_ALIGN_EXT +/* Comunicate to the compiler the unreachability of the code. */ +# if MY_GNUC_PREREQ(4,5) +# define MY_ASSERT_UNREACHABLE() __builtin_unreachable() +# endif /* Microsoft Visual C++ */ #elif defined _MSC_VER @@ -67,8 +74,13 @@ #endif /** - Generic compiler-dependent features. + Generic (compiler-independent) features. */ + +#ifndef MY_GNUC_PREREQ +# define MY_GNUC_PREREQ(maj, min) (0) +#endif + #ifndef MY_ALIGNOF # ifdef __cplusplus template struct my_alignof_helper { char m1; type m2; }; @@ -79,6 +91,10 @@ # endif #endif +#ifndef MY_ASSERT_UNREACHABLE +# define MY_ASSERT_UNREACHABLE() do { assert(0); } while (0) +#endif + /** C++ Type Traits */ -- cgit v1.2.1 From 8aa7e213dc7127bc334087eed5dbcd17e2865a47 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Thu, 10 Feb 2011 13:21:22 +0200 Subject: Increment InnoDB Plugin version from 1.0.15 to 1.0.16. InnoDB Plugin 1.0.15 has been released with MySQL 5.1.55. --- storage/innodb_plugin/include/univ.i | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innodb_plugin/include/univ.i b/storage/innodb_plugin/include/univ.i index 6dd38df3782..22ed765e680 100644 --- a/storage/innodb_plugin/include/univ.i +++ b/storage/innodb_plugin/include/univ.i @@ -46,7 +46,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 1 #define INNODB_VERSION_MINOR 0 -#define INNODB_VERSION_BUGFIX 15 +#define INNODB_VERSION_BUGFIX 16 /* The following is the InnoDB version as shown in SELECT plugin_version FROM information_schema.plugins; -- cgit v1.2.1 From b5c61ed1f25001e6a967bc2e70273d61bd22bab6 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Thu, 10 Feb 2011 17:16:32 +0200 Subject: Fix Bug#59307 Valgrind: uninitialized value in rw_lock_set_writer_id_and_recursion_flag() by silencing a bogus Valgrind warning: ==4392== Conditional jump or move depends on uninitialised value(s) ==4392== at 0x5A18416: rw_lock_set_writer_id_and_recursion_flag (sync0rw.ic:283) ==4392== by 0x5A1865C: rw_lock_x_lock_low (sync0rw.c:558) ==4392== by 0x5A18481: rw_lock_x_lock_func (sync0rw.c:617) ==4392== by 0x597EEE6: mtr_x_lock_func (mtr0mtr.ic:271) ==4392== by 0x597EBBD: fsp_header_init (fsp0fsp.c:970) ==4392== by 0x5A15E78: innobase_start_or_create_for_mysql (srv0start.c:1508) ==4392== by 0x598B789: innobase_init(void*) (ha_innodb.cc:2282) os_compare_and_swap_thread_id() is defined as __sync_bool_compare_and_swap(). From the GCC doc: `bool __sync_bool_compare_and_swap (TYPE *ptr, TYPE oldval TYPE newval, ...)' ... The "bool" version returns true if the comparison is successful and NEWVAL was written. So it is not possible that the return value is uninitialized, no matter what the arguments to os_compare_and_swap_thread_id() are. Probably Valgrind gets confused by the implementation of the GCC internal function __sync_bool_compare_and_swap(). --- storage/innodb_plugin/include/sync0rw.ic | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/innodb_plugin/include/sync0rw.ic b/storage/innodb_plugin/include/sync0rw.ic index 7116f1b7c9b..4110a0a7e0c 100644 --- a/storage/innodb_plugin/include/sync0rw.ic +++ b/storage/innodb_plugin/include/sync0rw.ic @@ -280,6 +280,7 @@ rw_lock_set_writer_id_and_recursion_flag( local_thread = lock->writer_thread; success = os_compare_and_swap_thread_id( &lock->writer_thread, local_thread, curr_thread); + UNIV_MEM_VALID(&success, sizeof(success)); ut_a(success); lock->recursive = recursive; -- cgit v1.2.1 From 867217156d43b6f6f7ace569a093b829cccaf5e3 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Fri, 11 Feb 2011 12:09:04 +0200 Subject: version bump to 5.1.57 --- configure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.in b/configure.in index 6d5bc07ba9a..dc944386f22 100644 --- a/configure.in +++ b/configure.in @@ -12,7 +12,7 @@ dnl dnl When changing the major version number please also check the switch dnl statement in mysqlbinlog::check_master_version(). You may also need dnl to update version.c in ndb. -AC_INIT([MySQL Server], [5.1.56], [], [mysql]) +AC_INIT([MySQL Server], [5.1.57], [], [mysql]) AC_CONFIG_SRCDIR([sql/mysqld.cc]) AC_CANONICAL_SYSTEM -- cgit v1.2.1 From c278961c335c08804a76f7d95f05b7c46120a97f Mon Sep 17 00:00:00 2001 From: Jonathan Perkin Date: Fri, 11 Feb 2011 11:32:03 +0100 Subject: Raise version number after cloning 5.1.56 --- configure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.in b/configure.in index 6d5bc07ba9a..dc944386f22 100644 --- a/configure.in +++ b/configure.in @@ -12,7 +12,7 @@ dnl dnl When changing the major version number please also check the switch dnl statement in mysqlbinlog::check_master_version(). You may also need dnl to update version.c in ndb. -AC_INIT([MySQL Server], [5.1.56], [], [mysql]) +AC_INIT([MySQL Server], [5.1.57], [], [mysql]) AC_CONFIG_SRCDIR([sql/mysqld.cc]) AC_CANONICAL_SYSTEM -- cgit v1.2.1 From 4a8c83574626f4107ca84964f4c1faa13176eff1 Mon Sep 17 00:00:00 2001 From: Jimmy Yang Date: Mon, 14 Feb 2011 02:07:59 -0800 Subject: Fix Bug #59749 Enabling concurrent reads while creating non-primary unique index gives failures. Approved by Marko --- storage/innodb_plugin/ChangeLog | 6 ++++++ storage/innodb_plugin/handler/handler0alter.cc | 12 ++++++++---- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index 0cbdc8ed9d2..9fa00ac8e6f 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,9 @@ +2011-02-14 The InnoDB Team + + * handler/handler0alter.cc: + Bug#59749 Enabling concurrent reads while creating non-primary + unique index gives failures + 2011-01-31 The InnoDB Team * btr/btr0cur.c, include/row0upd.h, diff --git a/storage/innodb_plugin/handler/handler0alter.cc b/storage/innodb_plugin/handler/handler0alter.cc index 517445f7e69..dc1317d5c5a 100644 --- a/storage/innodb_plugin/handler/handler0alter.cc +++ b/storage/innodb_plugin/handler/handler0alter.cc @@ -782,10 +782,6 @@ err_exit: ut_ad(error == DB_SUCCESS); - /* We will need to rebuild index translation table. Set - valid index entry count in the translation table to zero */ - share->idx_trans_tbl.index_count = 0; - /* Commit the data dictionary transaction in order to release the table locks on the system tables. This means that if MySQL crashes while creating a new primary key inside @@ -911,6 +907,14 @@ error: } convert_error: + if (error == DB_SUCCESS) { + /* Build index is successful. We will need to + rebuild index translation table. Reset the + index entry count in the translation table + to zero, so that translation table will be rebuilt */ + share->idx_trans_tbl.index_count = 0; + } + error = convert_error_code_to_mysql(error, innodb_table->flags, user_thd); -- cgit v1.2.1 From 0efaef7d469eb6decdd8cf17057154914a10fd41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 15 Feb 2011 10:51:33 +0200 Subject: Bug#59307 Valgrind: uninitialized value in rw_lock_set_writer_id_and_recursion_flag() rw_lock_create_func(): Initialize lock->writer_thread, so that Valgrind will not complain even when Valgrind instrumentation is not enabled. Flag lock->writer_thread uninitialized, so that Valgrind can complain when it is used uninitialized. rw_lock_set_writer_id_and_recursion_flag(): Revert the bogus Valgrind instrumentation that was pushed in the first attempt to fix this bug. --- .../suite/innodb_plugin/r/innodb_bug59307.result | 28 +++++++++++++++++++ .../suite/innodb_plugin/t/innodb_bug59307.test | 32 ++++++++++++++++++++++ storage/innodb_plugin/ChangeLog | 6 ++++ storage/innodb_plugin/include/sync0rw.ic | 1 - storage/innodb_plugin/sync/sync0rw.c | 3 ++ 5 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 mysql-test/suite/innodb_plugin/r/innodb_bug59307.result create mode 100644 mysql-test/suite/innodb_plugin/t/innodb_bug59307.test diff --git a/mysql-test/suite/innodb_plugin/r/innodb_bug59307.result b/mysql-test/suite/innodb_plugin/r/innodb_bug59307.result new file mode 100644 index 00000000000..0d726e83708 --- /dev/null +++ b/mysql-test/suite/innodb_plugin/r/innodb_bug59307.result @@ -0,0 +1,28 @@ +CREATE TABLE t1 ( +t1_int INT, +t1_time TIME +) ENGINE=innodb; +CREATE TABLE t2 ( +t2_int int PRIMARY KEY, +t2_int2 INT +) ENGINE=INNODB; +INSERT INTO t2 VALUES (); +Warnings: +Warning 1364 Field 't2_int' doesn't have a default value +INSERT INTO t1 VALUES (); +SELECT * +FROM t1 AS t1a +WHERE NOT EXISTS +(SELECT * +FROM t1 AS t1b +WHERE t1b.t1_int NOT IN +(SELECT t2.t2_int +FROM t2 +WHERE t1b.t1_time LIKE t1b.t1_int +OR t1b.t1_time <> t2.t2_int2 +AND 6=7 +) +) +; +t1_int t1_time +DROP TABLE t1,t2; diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug59307.test b/mysql-test/suite/innodb_plugin/t/innodb_bug59307.test new file mode 100644 index 00000000000..9c68adf36cf --- /dev/null +++ b/mysql-test/suite/innodb_plugin/t/innodb_bug59307.test @@ -0,0 +1,32 @@ +-- source include/have_innodb_plugin.inc +# Bug #59307 uninitialized value in rw_lock_set_writer_id_and_recursion_flag() +# when Valgrind instrumentation (UNIV_DEBUG_VALGRIND) is not enabled + +CREATE TABLE t1 ( + t1_int INT, + t1_time TIME +) ENGINE=innodb; + +CREATE TABLE t2 ( + t2_int int PRIMARY KEY, + t2_int2 INT +) ENGINE=INNODB; + +INSERT INTO t2 VALUES (); +INSERT INTO t1 VALUES (); + +SELECT * +FROM t1 AS t1a +WHERE NOT EXISTS + (SELECT * + FROM t1 AS t1b + WHERE t1b.t1_int NOT IN + (SELECT t2.t2_int + FROM t2 + WHERE t1b.t1_time LIKE t1b.t1_int + OR t1b.t1_time <> t2.t2_int2 + AND 6=7 + ) +) +; +DROP TABLE t1,t2; diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index 9fa00ac8e6f..1b2747ab012 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,9 @@ +2011-02-15 The InnoDB Team + + * sync/sync0rw.c, innodb_bug59307.test: + Bug#59307 Valgrind: uninitialized value in + rw_lock_set_writer_id_and_recursion_flag() + 2011-02-14 The InnoDB Team * handler/handler0alter.cc: diff --git a/storage/innodb_plugin/include/sync0rw.ic b/storage/innodb_plugin/include/sync0rw.ic index 4110a0a7e0c..7116f1b7c9b 100644 --- a/storage/innodb_plugin/include/sync0rw.ic +++ b/storage/innodb_plugin/include/sync0rw.ic @@ -280,7 +280,6 @@ rw_lock_set_writer_id_and_recursion_flag( local_thread = lock->writer_thread; success = os_compare_and_swap_thread_id( &lock->writer_thread, local_thread, curr_thread); - UNIV_MEM_VALID(&success, sizeof(success)); ut_a(success); lock->recursive = recursive; diff --git a/storage/innodb_plugin/sync/sync0rw.c b/storage/innodb_plugin/sync/sync0rw.c index 00e0324becd..a5da606ad80 100644 --- a/storage/innodb_plugin/sync/sync0rw.c +++ b/storage/innodb_plugin/sync/sync0rw.c @@ -260,6 +260,9 @@ rw_lock_create_func( contains garbage at initialization and cannot be used for recursive x-locking. */ lock->recursive = FALSE; + /* Silence Valgrind when UNIV_DEBUG_VALGRIND is not enabled. */ + memset((void*) &lock->writer_thread, 0, sizeof lock->writer_thread); + UNIV_MEM_INVALID(&lock->writer_thread, sizeof lock->writer_thread); #ifdef UNIV_SYNC_DEBUG UT_LIST_INIT(lock->debug_list); -- cgit v1.2.1 From cb884043b6ea225c0093c2ae5dba7575214fac49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 15 Feb 2011 12:12:27 +0200 Subject: Add a test for suspected Bug#60049. --- mysql-test/suite/innodb/r/innodb_bug60049.result | 8 +++++ .../suite/innodb/t/innodb_bug60049-master.opt | 1 + mysql-test/suite/innodb/t/innodb_bug60049.test | 38 ++++++++++++++++++++++ .../suite/innodb_plugin/r/innodb_bug60049.result | 8 +++++ .../innodb_plugin/t/innodb_bug60049-master.opt | 1 + .../suite/innodb_plugin/t/innodb_bug60049.test | 38 ++++++++++++++++++++++ 6 files changed, 94 insertions(+) create mode 100644 mysql-test/suite/innodb/r/innodb_bug60049.result create mode 100644 mysql-test/suite/innodb/t/innodb_bug60049-master.opt create mode 100644 mysql-test/suite/innodb/t/innodb_bug60049.test create mode 100644 mysql-test/suite/innodb_plugin/r/innodb_bug60049.result create mode 100644 mysql-test/suite/innodb_plugin/t/innodb_bug60049-master.opt create mode 100644 mysql-test/suite/innodb_plugin/t/innodb_bug60049.test diff --git a/mysql-test/suite/innodb/r/innodb_bug60049.result b/mysql-test/suite/innodb/r/innodb_bug60049.result new file mode 100644 index 00000000000..bec0e05a897 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_bug60049.result @@ -0,0 +1,8 @@ +CREATE TABLE t(a INT)ENGINE=InnoDB; +RENAME TABLE t TO u; +DROP TABLE u; +SELECT @@innodb_fast_shutdown; +@@innodb_fast_shutdown +0 +Last record of ID_IND root page (9): +1808000018050074000000000000000c5359535f464f524549474e5f434f4c53 diff --git a/mysql-test/suite/innodb/t/innodb_bug60049-master.opt b/mysql-test/suite/innodb/t/innodb_bug60049-master.opt new file mode 100644 index 00000000000..22a5d4ed221 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_bug60049-master.opt @@ -0,0 +1 @@ +--innodb_fast_shutdown=0 diff --git a/mysql-test/suite/innodb/t/innodb_bug60049.test b/mysql-test/suite/innodb/t/innodb_bug60049.test new file mode 100644 index 00000000000..1e0feaf0c89 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_bug60049.test @@ -0,0 +1,38 @@ +# Bug #60049 Verify that purge leaves no garbage in unique secondary indexes +# This test requires a fresh server start-up and a slow shutdown. +# This was a suspected bug (not a bug). + +-- source include/have_innodb.inc + +CREATE TABLE t(a INT)ENGINE=InnoDB; +RENAME TABLE t TO u; +DROP TABLE u; +SELECT @@innodb_fast_shutdown; +let $MYSQLD_DATADIR=`select @@datadir`; + +# Shut down the server +-- exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- shutdown_server 10 +-- source include/wait_until_disconnected.inc + +# Check the tail of ID_IND (SYS_TABLES.ID) +let IBDATA1=$MYSQLD_DATADIR/ibdata1; +perl; +my $file = $ENV{'IBDATA1'}; +open(FILE, "<$file") || die "Unable to open $file"; +# Read DICT_HDR_TABLE_IDS, the root page number of ID_IND (SYS_TABLES.ID). +seek(FILE, 7*16384+38+36, 0) || die "Unable to seek $file"; +die unless read(FILE, $_, 4) == 4; +my $sys_tables_id_root = unpack "N"; +print "Last record of ID_IND root page ($sys_tables_id_root):\n"; +# This should be the last record in ID_IND. Dump it in hexadecimal. +seek(FILE, $sys_tables_id_root*16384 + 152, 0) || die "Unable to seek $file"; +read(FILE, $_, 32) || die "Unable to read $file"; +close(FILE); +print unpack("H*"),"\n"; +EOF + +# Restart the server. +-- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc diff --git a/mysql-test/suite/innodb_plugin/r/innodb_bug60049.result b/mysql-test/suite/innodb_plugin/r/innodb_bug60049.result new file mode 100644 index 00000000000..bec0e05a897 --- /dev/null +++ b/mysql-test/suite/innodb_plugin/r/innodb_bug60049.result @@ -0,0 +1,8 @@ +CREATE TABLE t(a INT)ENGINE=InnoDB; +RENAME TABLE t TO u; +DROP TABLE u; +SELECT @@innodb_fast_shutdown; +@@innodb_fast_shutdown +0 +Last record of ID_IND root page (9): +1808000018050074000000000000000c5359535f464f524549474e5f434f4c53 diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug60049-master.opt b/mysql-test/suite/innodb_plugin/t/innodb_bug60049-master.opt new file mode 100644 index 00000000000..22a5d4ed221 --- /dev/null +++ b/mysql-test/suite/innodb_plugin/t/innodb_bug60049-master.opt @@ -0,0 +1 @@ +--innodb_fast_shutdown=0 diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test b/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test new file mode 100644 index 00000000000..0c093ac94b1 --- /dev/null +++ b/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test @@ -0,0 +1,38 @@ +# Bug #60049 Verify that purge leaves no garbage in unique secondary indexes +# This test requires a fresh server start-up and a slow shutdown. +# This was a suspected bug (not a bug). + +-- source include/have_innodb_plugin.inc + +CREATE TABLE t(a INT)ENGINE=InnoDB; +RENAME TABLE t TO u; +DROP TABLE u; +SELECT @@innodb_fast_shutdown; +let $MYSQLD_DATADIR=`select @@datadir`; + +# Shut down the server +-- exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- shutdown_server 10 +-- source include/wait_until_disconnected.inc + +# Check the tail of ID_IND (SYS_TABLES.ID) +let IBDATA1=$MYSQLD_DATADIR/ibdata1; +perl; +my $file = $ENV{'IBDATA1'}; +open(FILE, "<$file") || die "Unable to open $file"; +# Read DICT_HDR_TABLE_IDS, the root page number of ID_IND (SYS_TABLES.ID). +seek(FILE, 7*16384+38+36, 0) || die "Unable to seek $file"; +die unless read(FILE, $_, 4) == 4; +my $sys_tables_id_root = unpack "N"; +print "Last record of ID_IND root page ($sys_tables_id_root):\n"; +# This should be the last record in ID_IND. Dump it in hexadecimal. +seek(FILE, $sys_tables_id_root*16384 + 152, 0) || die "Unable to seek $file"; +read(FILE, $_, 32) || die "Unable to read $file"; +close(FILE); +print unpack("H*"),"\n"; +EOF + +# Restart the server. +-- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc -- cgit v1.2.1 From 9c89cca5e364310928bde10287a56128378c107c Mon Sep 17 00:00:00 2001 From: Dmitry Lenev Date: Tue, 15 Feb 2011 14:03:05 +0300 Subject: Fix for bug#11766714 (former bug @59888) "debug assertion when attempt to create spatial index on char > 31 bytes". Attempt to create spatial index on char field with length greater than 31 byte led to assertion failure on server compiled with safemutex support. The problem occurred in mi_create() function which was called to create a new version of table being altered. This function failed since it detected an attempt to create a spatial key on non-binary column and tried to return an error. On its error path it tried to unlock THR_LOCK_myisam mutex which has not been not locked at this point. Indeed such an incorrect behavior was caught by safemutex wrapper and caused assertion failure. This patch fixes the problem by ensuring that mi_create() doesn't releases THR_LOCK_myisam mutex on error path if it was not acquired. mysql-test/r/gis.result: Added test for bug @59888 "debug assertion when attempt to create spatial index on char > 31 bytes". mysql-test/t/gis.test: Added test for bug @59888 "debug assertion when attempt to create spatial index on char > 31 bytes". storage/myisam/mi_create.c: Changed mi_create() not to release THR_LOCK_myisam mutex on error path if it was not acquired. --- mysql-test/r/gis.result | 8 ++++++++ mysql-test/t/gis.test | 12 ++++++++++++ storage/myisam/mi_create.c | 16 +++++++++------- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index a9beb9631ae..d82a86a6423 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -1034,4 +1034,12 @@ p NULL NULL drop table t1; +# +# Test for bug #59888 "debug assertion when attempt to create spatial index +# on char > 31 bytes". +# +create table t1(a char(32) not null) engine=myisam; +create spatial index i on t1 (a); +ERROR HY000: Can't create table '#sql-temporary' (errno: 140) +drop table t1; End of 5.1 tests diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index bdbbfc7c064..94cec60944a 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -754,4 +754,16 @@ insert into t1 values (geomfromtext("point(1 0)")); select * from (select polygon(t1.a) as p from t1 order by t1.a) d; drop table t1; + +--echo # +--echo # Test for bug #59888 "debug assertion when attempt to create spatial index +--echo # on char > 31 bytes". +--echo # +create table t1(a char(32) not null) engine=myisam; +--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ +--error ER_CANT_CREATE_TABLE +create spatial index i on t1 (a); +drop table t1; + + --echo End of 5.1 tests diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c index 42bd8e26a94..8c83996cadf 100644 --- a/storage/myisam/mi_create.c +++ b/storage/myisam/mi_create.c @@ -272,7 +272,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, keyseg->type != HA_KEYTYPE_VARBINARY2) { my_errno=HA_WRONG_CREATE_OPTION; - goto err; + goto err_no_lock; } } keydef->keysegs+=sp_segs; @@ -281,7 +281,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, min_key_length_skip+=SPLEN*2*SPDIMS; #else my_errno= HA_ERR_UNSUPPORTED; - goto err; + goto err_no_lock; #endif /*HAVE_SPATIAL*/ } else if (keydef->flag & HA_FULLTEXT) @@ -297,7 +297,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, keyseg->type != HA_KEYTYPE_VARTEXT2) { my_errno=HA_WRONG_CREATE_OPTION; - goto err; + goto err_no_lock; } if (!(keyseg->flag & HA_BLOB_PART) && (keyseg->type == HA_KEYTYPE_VARTEXT1 || @@ -422,7 +422,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, if (keydef->keysegs > MI_MAX_KEY_SEG) { my_errno=HA_WRONG_CREATE_OPTION; - goto err; + goto err_no_lock; } /* key_segs may be 0 in the case when we only want to be able to @@ -447,7 +447,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, length >= MI_MAX_KEY_BUFF) { my_errno=HA_WRONG_CREATE_OPTION; - goto err; + goto err_no_lock; } set_if_bigger(max_key_block_length,keydef->block_length); keydef->keylength= (uint16) key_length; @@ -494,7 +494,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, "indexes and/or unique constraints.", MYF(0), name + dirname_length(name)); my_errno= HA_WRONG_CREATE_OPTION; - goto err; + goto err_no_lock; } bmove(share.state.header.file_version,(uchar*) myisam_file_magic,4); @@ -827,12 +827,14 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, errpos=0; pthread_mutex_unlock(&THR_LOCK_myisam); if (my_close(file,MYF(0))) - goto err; + goto err_no_lock; my_free((char*) rec_per_key_part,MYF(0)); DBUG_RETURN(0); err: pthread_mutex_unlock(&THR_LOCK_myisam); + +err_no_lock: save_errno=my_errno; switch (errpos) { case 3: -- cgit v1.2.1 From 6459e2c3e1e53c5ead7e8420dc3ccc2cf8015e88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 16 Feb 2011 15:34:16 +0200 Subject: Make the implicit unpack parameter explicit in the Bug #60049 test. --- mysql-test/suite/innodb/t/innodb_bug60049.test | 4 ++-- mysql-test/suite/innodb_plugin/t/innodb_bug60049.test | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/innodb/t/innodb_bug60049.test b/mysql-test/suite/innodb/t/innodb_bug60049.test index 1e0feaf0c89..cff1c3dc09d 100644 --- a/mysql-test/suite/innodb/t/innodb_bug60049.test +++ b/mysql-test/suite/innodb/t/innodb_bug60049.test @@ -23,13 +23,13 @@ open(FILE, "<$file") || die "Unable to open $file"; # Read DICT_HDR_TABLE_IDS, the root page number of ID_IND (SYS_TABLES.ID). seek(FILE, 7*16384+38+36, 0) || die "Unable to seek $file"; die unless read(FILE, $_, 4) == 4; -my $sys_tables_id_root = unpack "N"; +my $sys_tables_id_root = unpack("N", $_); print "Last record of ID_IND root page ($sys_tables_id_root):\n"; # This should be the last record in ID_IND. Dump it in hexadecimal. seek(FILE, $sys_tables_id_root*16384 + 152, 0) || die "Unable to seek $file"; read(FILE, $_, 32) || die "Unable to read $file"; close(FILE); -print unpack("H*"),"\n"; +print unpack("H*", $_), "\n"; EOF # Restart the server. diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test b/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test index 0c093ac94b1..b3557243235 100644 --- a/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test +++ b/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test @@ -23,13 +23,13 @@ open(FILE, "<$file") || die "Unable to open $file"; # Read DICT_HDR_TABLE_IDS, the root page number of ID_IND (SYS_TABLES.ID). seek(FILE, 7*16384+38+36, 0) || die "Unable to seek $file"; die unless read(FILE, $_, 4) == 4; -my $sys_tables_id_root = unpack "N"; +my $sys_tables_id_root = unpack("N", $_); print "Last record of ID_IND root page ($sys_tables_id_root):\n"; # This should be the last record in ID_IND. Dump it in hexadecimal. seek(FILE, $sys_tables_id_root*16384 + 152, 0) || die "Unable to seek $file"; read(FILE, $_, 32) || die "Unable to read $file"; close(FILE); -print unpack("H*"),"\n"; +print unpack("H*", $_), "\n"; EOF # Restart the server. -- cgit v1.2.1 From 6ad0c9b16daccb64ec34dffc8a21e73ca9c6073d Mon Sep 17 00:00:00 2001 From: Jon Olav Hauglid Date: Wed, 16 Feb 2011 16:26:19 +0100 Subject: Bug #11752069 (former bug 43152) Assertion `bitmap_is_set_all(&table->s->all_set)' failed in handler::ha_reset This assertion could be triggered if two connections simultaneously executed two bitmap test functions on the same bitmap. For example, the assertion could be triggered if one connection executed UPDATE while a second connection executed SELECT on the same table. Even if bitmap test functions have read-only semantics and have const bitmaps as parameter, several of them modified the internal state of the bitmap. With interleaved execution of two such functions it was possible for one function to modify the state of the same bitmap that the other function had just modified. This lead to an inconsistent state and could trigger the assert. Internally the bitmap uses 32 bit words for storage. Since bitmaps can contain any number of bits, the last word in the bitmap may not be fully used. A 32 bit mask is maintained where a bit is set if the corresponding bit in the last bitmap word is unused. The problem was that several test functions applied this mask to the last word. Sometimes the mask was negated and used to zero out the remainder of the last word and sometimes the mask was used as-is to fill the remainder of the last word with 1's. This meant that if a function first used the negated mask and another function then used the mask as-is (or vice-versa), the first function would then get the wrong result. This patch fixes the problem by changing the implementation of 9 bitmap functions that modified the bitmap state even if the bitmap was declared const. These functions now preserve the internal state of the bitmap. This makes it possible for two connections to concurrently execute two of these functions on the same bitmap without issues. The patch also removes dead testing code from my_bitmap.c. These tests have already been moved to unittest/mysys/bitmap-t.c. Existing test coverage of my_bitmap has been extended. No MTR test case added as this would require adding several sync points to the bitmap functions. The patch has been tested with a non-deterministic test case posted on the bug report. include/my_bit.h: Removed my_count_bits_ushort() which is not needed anymore. Added my_count_bits_uint32(). unittest/mysys/bitmap-t.c: Extended test coverage of my_bitmap. --- include/my_bit.h | 24 +- include/my_bitmap.h | 9 +- mysys/my_bitmap.c | 635 +++++++++++----------------------------------- unittest/mysys/bitmap-t.c | 173 ++++++++++++- 4 files changed, 331 insertions(+), 510 deletions(-) diff --git a/include/my_bit.h b/include/my_bit.h index 2e464e89049..2ab47b04184 100644 --- a/include/my_bit.h +++ b/include/my_bit.h @@ -1,3 +1,18 @@ +/* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + /* Some useful bit functions */ @@ -42,9 +57,12 @@ STATIC_INLINE uint my_count_bits(ulonglong v) #endif } -STATIC_INLINE uint my_count_bits_ushort(ushort v) +STATIC_INLINE uint my_count_bits_uint32(uint32 v) { - return _my_bits_nbits[v]; + return (uint) (uchar) (_my_bits_nbits[(uchar) v] + + _my_bits_nbits[(uchar) (v >> 8)] + + _my_bits_nbits[(uchar) (v >> 16)] + + _my_bits_nbits[(uchar) (v >> 24)]); } @@ -104,6 +122,6 @@ extern uint32 my_round_up_to_next_power(uint32 v); uint32 my_clear_highest_bit(uint32 v); uint32 my_reverse_bits(uint32 key); extern uint my_count_bits(ulonglong v); -extern uint my_count_bits_ushort(ushort v); +extern uint my_count_bits_uint32(uint32 v); #endif /* HAVE_INLINE */ C_MODE_END diff --git a/include/my_bitmap.h b/include/my_bitmap.h index ab69b2d671d..42f985c8918 100644 --- a/include/my_bitmap.h +++ b/include/my_bitmap.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -149,9 +149,10 @@ bitmap_is_set(const MY_BITMAP *map,uint bit) static inline my_bool bitmap_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2) { - *(map1)->last_word_ptr|= (map1)->last_word_mask; - *(map2)->last_word_ptr|= (map2)->last_word_mask; - return memcmp((map1)->bitmap, (map2)->bitmap, 4*no_words_in_map((map1)))==0; + if (memcmp(map1->bitmap, map2->bitmap, 4*(no_words_in_map(map1)-1)) != 0) + return FALSE; + return ((*map1->last_word_ptr | map1->last_word_mask) == + (*map2->last_word_ptr | map2->last_word_mask)); } #define bitmap_clear_all(MAP) \ diff --git a/mysys/my_bitmap.c b/mysys/my_bitmap.c index b7258080337..3d3ab16b599 100644 --- a/mysys/my_bitmap.c +++ b/mysys/my_bitmap.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2000 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -91,6 +91,7 @@ static inline void bitmap_lock(MY_BITMAP *map __attribute__((unused))) #endif } + static inline void bitmap_unlock(MY_BITMAP *map __attribute__((unused))) { #ifdef THREAD @@ -100,6 +101,46 @@ static inline void bitmap_unlock(MY_BITMAP *map __attribute__((unused))) } +static inline uint get_first_set(uint32 value, uint word_pos) +{ + uchar *byte_ptr= (uchar*)&value; + uchar byte_value; + uint byte_pos, bit_pos; + + for (byte_pos=0; byte_pos < 4; byte_pos++, byte_ptr++) + { + byte_value= *byte_ptr; + if (byte_value) + { + for (bit_pos=0; ; bit_pos++) + if (byte_value & (1 << bit_pos)) + return (word_pos*32) + (byte_pos*8) + bit_pos; + } + } + return MY_BIT_NONE; +} + + +static inline uint get_first_not_set(uint32 value, uint word_pos) +{ + uchar *byte_ptr= (uchar*)&value; + uchar byte_value; + uint byte_pos, bit_pos; + + for (byte_pos=0; byte_pos < 4; byte_pos++, byte_ptr++) + { + byte_value= *byte_ptr; + if (byte_value != 0xFF) + { + for (bit_pos=0; ; bit_pos++) + if (!(byte_value & (1 << bit_pos))) + return (word_pos*32) + (byte_pos*8) + bit_pos; + } + } + return MY_BIT_NONE; +} + + my_bool bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits, my_bool thread_safe __attribute__((unused))) { @@ -259,7 +300,7 @@ void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size) memset(m, 0xff, prefix_bytes); m+= prefix_bytes; if ((prefix_bits= prefix_size & 7)) - *m++= (1 << prefix_bits)-1; + *(m++)= (1 << prefix_bits)-1; if ((d= no_bytes_in_map(map)-prefix_bytes)) bzero(m, d); } @@ -267,28 +308,43 @@ void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size) my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size) { - uint prefix_bits= prefix_size & 0x7, res; - uchar *m= (uchar*)map->bitmap; - uchar *end_prefix= m+prefix_size/8; - uchar *end; - DBUG_ASSERT(m && prefix_size <= map->n_bits); - end= m+no_bytes_in_map(map); - - while (m < end_prefix) - if (*m++ != 0xff) - return 0; - - *map->last_word_ptr&= ~map->last_word_mask; /*Clear bits*/ - res= 0; - if (prefix_bits && *m++ != (1 << prefix_bits)-1) - goto ret; - - while (m < end) - if (*m++ != 0) - goto ret; - res= 1; -ret: - return res; + uint prefix_bits= prefix_size % 32; + my_bitmap_map *word_ptr= map->bitmap, last_word; + my_bitmap_map *end_prefix= word_ptr + prefix_size / 32; + DBUG_ASSERT(word_ptr && prefix_size <= map->n_bits); + + /* 1: Words that should be filled with 1 */ + for (; word_ptr < end_prefix; word_ptr++) + if (*word_ptr != 0xFFFFFFFF) + return FALSE; + + last_word= *map->last_word_ptr & ~map->last_word_mask; + + /* 2: Word which contains the end of the prefix (if any) */ + if (prefix_bits) + { + if (word_ptr == map->last_word_ptr) + return uint4korr((uchar*)&last_word) == (uint32)((1 << prefix_bits) - 1); + else if (uint4korr((uchar*)word_ptr) != (uint32)((1 << prefix_bits) - 1)) + return FALSE; + word_ptr++; + } + + /* 3: Words that should be filled with 0 */ + for (; word_ptr < map->last_word_ptr; word_ptr++) + if (*word_ptr != 0) + return FALSE; + + /* + We can end up here in two situations: + 1) We went through the whole bitmap in step 1. This will happen if the + whole bitmap is filled with 1 and prefix_size is a multiple of 32 + (i.e. the prefix does not end in the middle of a word). + In this case word_ptr will be larger than map->last_word_ptr. + 2) We have gone through steps 1-3 and just need to check that also + the last word is 0. + */ + return word_ptr > map->last_word_ptr || last_word == 0; } @@ -296,10 +352,12 @@ my_bool bitmap_is_set_all(const MY_BITMAP *map) { my_bitmap_map *data_ptr= map->bitmap; my_bitmap_map *end= map->last_word_ptr; - *map->last_word_ptr |= map->last_word_mask; - for (; data_ptr <= end; data_ptr++) + + for (; data_ptr < end; data_ptr++) if (*data_ptr != 0xFFFFFFFF) return FALSE; + if ((*map->last_word_ptr | map->last_word_mask) != 0xFFFFFFFF) + return FALSE; return TRUE; } @@ -307,13 +365,13 @@ my_bool bitmap_is_set_all(const MY_BITMAP *map) my_bool bitmap_is_clear_all(const MY_BITMAP *map) { my_bitmap_map *data_ptr= map->bitmap; - my_bitmap_map *end; - if (*map->last_word_ptr & ~map->last_word_mask) - return FALSE; - end= map->last_word_ptr; + my_bitmap_map *end= map->last_word_ptr; + for (; data_ptr < end; data_ptr++) if (*data_ptr) return FALSE; + if (*map->last_word_ptr & ~map->last_word_mask) + return FALSE; return TRUE; } @@ -327,14 +385,14 @@ my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2) map1->n_bits==map2->n_bits); end= map1->last_word_ptr; - *map1->last_word_ptr &= ~map1->last_word_mask; - *map2->last_word_ptr &= ~map2->last_word_mask; - while (m1 <= end) - { - if ((*m1++) & ~(*m2++)) - return 0; - } - return 1; + for (; m1 < end; m1++, m2++) + if (*m1 & ~(*m2)) + return FALSE; + + if ((*map1->last_word_ptr & ~map1->last_word_mask) & + ~(*map2->last_word_ptr & ~map2->last_word_mask)) + return FALSE; + return TRUE; } /* True if bitmaps has any common bits */ @@ -347,14 +405,14 @@ my_bool bitmap_is_overlapping(const MY_BITMAP *map1, const MY_BITMAP *map2) map1->n_bits==map2->n_bits); end= map1->last_word_ptr; - *map1->last_word_ptr &= ~map1->last_word_mask; - *map2->last_word_ptr &= ~map2->last_word_mask; - while (m1 <= end) - { - if ((*m1++) & (*m2++)) - return 1; - } - return 0; + for (; m1 < end; m1++, m2++) + if (*m1 & *m2) + return TRUE; + + if ((*map1->last_word_ptr & ~map1->last_word_mask) & + (*map2->last_word_ptr & ~map2->last_word_mask)) + return TRUE; + return FALSE; } @@ -366,15 +424,17 @@ void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2) DBUG_ASSERT(map->bitmap && map2->bitmap); end= to+min(len,len2); - *map2->last_word_ptr&= ~map2->last_word_mask; /*Clear last bits in map2*/ - while (to < end) - *to++ &= *from++; + for (; to < end; to++, from++) + *to &= *from; + + if (len >= len2) + map->bitmap[len2 - 1] &= ~map2->last_word_mask; if (len2 < len) { end+=len-len2; - while (to < end) - *to++=0; + for (; to < end; to++) + *to= 0; } } @@ -405,8 +465,8 @@ void bitmap_set_above(MY_BITMAP *map, uint from_byte, uint use_bit) uchar *to= (uchar *)map->bitmap + from_byte; uchar *end= (uchar *)map->bitmap + (map->n_bits+7)/8; - while (to < end) - *to++= use_byte; + for (; to < end; to++) + *to= use_byte; } @@ -415,59 +475,60 @@ void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2) my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end; DBUG_ASSERT(map->bitmap && map2->bitmap && map->n_bits==map2->n_bits); - end= map->last_word_ptr; - while (to <= end) - *to++ &= ~(*from++); + for (; to <= end; to++, from++) + *to &= ~(*from); } void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2) { my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end; - DBUG_ASSERT(map->bitmap && map2->bitmap && map->n_bits==map2->n_bits); end= map->last_word_ptr; - while (to <= end) - *to++ |= *from++; + for (; to <= end; to++, from++) + *to |= *from; } void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2) { - my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end= map->last_word_ptr; + my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end; DBUG_ASSERT(map->bitmap && map2->bitmap && map->n_bits==map2->n_bits); - while (to <= end) - *to++ ^= *from++; + end= map->last_word_ptr; + + for (; to <= end; to++, from++) + *to ^= *from; } void bitmap_invert(MY_BITMAP *map) { my_bitmap_map *to= map->bitmap, *end; - DBUG_ASSERT(map->bitmap); end= map->last_word_ptr; - while (to <= end) - *to++ ^= 0xFFFFFFFF; + for (; to <= end; to++) + *to ^= 0xFFFFFFFF; } uint bitmap_bits_set(const MY_BITMAP *map) -{ - uchar *m= (uchar*)map->bitmap; - uchar *end= m + no_bytes_in_map(map); +{ + my_bitmap_map *data_ptr= map->bitmap; + my_bitmap_map *end= map->last_word_ptr; uint res= 0; - DBUG_ASSERT(map->bitmap); - *map->last_word_ptr&= ~map->last_word_mask; /*Reset last bits to zero*/ - while (m < end) - res+= my_count_bits_ushort(*m++); + + for (; data_ptr < end; data_ptr++) + res+= my_count_bits_uint32(*data_ptr); + + /*Reset last bits to zero*/ + res+= my_count_bits_uint32(*map->last_word_ptr & ~map->last_word_mask); return res; } @@ -475,76 +536,44 @@ uint bitmap_bits_set(const MY_BITMAP *map) void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2) { my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end; - DBUG_ASSERT(map->bitmap && map2->bitmap && map->n_bits==map2->n_bits); end= map->last_word_ptr; - while (to <= end) - *to++ = *from++; + + for (; to <= end; to++, from++) + *to = *from; } uint bitmap_get_first_set(const MY_BITMAP *map) { - uchar *byte_ptr; - uint i,j,k; + uint word_pos; my_bitmap_map *data_ptr, *end= map->last_word_ptr; DBUG_ASSERT(map->bitmap); data_ptr= map->bitmap; - *map->last_word_ptr &= ~map->last_word_mask; - for (i=0; data_ptr <= end; data_ptr++, i++) - { + for (word_pos=0; data_ptr < end; data_ptr++, word_pos++) if (*data_ptr) - { - byte_ptr= (uchar*)data_ptr; - for (j=0; ; j++, byte_ptr++) - { - if (*byte_ptr) - { - for (k=0; ; k++) - { - if (*byte_ptr & (1 << k)) - return (i*32) + (j*8) + k; - } - } - } - } - } - return MY_BIT_NONE; + return get_first_set(*data_ptr, word_pos); + + return get_first_set(*map->last_word_ptr & ~map->last_word_mask, word_pos); } uint bitmap_get_first(const MY_BITMAP *map) { - uchar *byte_ptr; - uint i,j,k; + uint word_pos; my_bitmap_map *data_ptr, *end= map->last_word_ptr; DBUG_ASSERT(map->bitmap); data_ptr= map->bitmap; - *map->last_word_ptr|= map->last_word_mask; - for (i=0; data_ptr <= end; data_ptr++, i++) - { + for (word_pos=0; data_ptr < end; data_ptr++, word_pos++) if (*data_ptr != 0xFFFFFFFF) - { - byte_ptr= (uchar*)data_ptr; - for (j=0; ; j++, byte_ptr++) - { - if (*byte_ptr != 0xFF) - { - for (k=0; ; k++) - { - if (!(*byte_ptr & (1 << k))) - return (i*32) + (j*8) + k; - } - } - } - } - } - return MY_BIT_NONE; + return get_first_not_set(*data_ptr, word_pos); + + return get_first_not_set(*map->last_word_ptr | map->last_word_mask, word_pos); } @@ -752,375 +781,3 @@ void bitmap_lock_flip_bit(MY_BITMAP *map, uint bitmap_bit) bitmap_unlock(map); } #endif -#ifdef MAIN - -uint get_rand_bit(uint bitsize) -{ - return (rand() % bitsize); -} - -bool test_set_get_clear_bit(MY_BITMAP *map, uint bitsize) -{ - uint i, test_bit; - uint no_loops= bitsize > 128 ? 128 : bitsize; - for (i=0; i < no_loops; i++) - { - test_bit= get_rand_bit(bitsize); - bitmap_set_bit(map, test_bit); - if (!bitmap_is_set(map, test_bit)) - goto error1; - bitmap_clear_bit(map, test_bit); - if (bitmap_is_set(map, test_bit)) - goto error2; - } - return FALSE; -error1: - printf("Error in set bit, bit %u, bitsize = %u", test_bit, bitsize); - return TRUE; -error2: - printf("Error in clear bit, bit %u, bitsize = %u", test_bit, bitsize); - return TRUE; -} - -bool test_flip_bit(MY_BITMAP *map, uint bitsize) -{ - uint i, test_bit; - uint no_loops= bitsize > 128 ? 128 : bitsize; - for (i=0; i < no_loops; i++) - { - test_bit= get_rand_bit(bitsize); - bitmap_flip_bit(map, test_bit); - if (!bitmap_is_set(map, test_bit)) - goto error1; - bitmap_flip_bit(map, test_bit); - if (bitmap_is_set(map, test_bit)) - goto error2; - } - return FALSE; -error1: - printf("Error in flip bit 1, bit %u, bitsize = %u", test_bit, bitsize); - return TRUE; -error2: - printf("Error in flip bit 2, bit %u, bitsize = %u", test_bit, bitsize); - return TRUE; -} - -bool test_operators(MY_BITMAP *map __attribute__((unused)), - uint bitsize __attribute__((unused))) -{ - return FALSE; -} - -bool test_get_all_bits(MY_BITMAP *map, uint bitsize) -{ - uint i; - bitmap_set_all(map); - if (!bitmap_is_set_all(map)) - goto error1; - if (!bitmap_is_prefix(map, bitsize)) - goto error5; - bitmap_clear_all(map); - if (!bitmap_is_clear_all(map)) - goto error2; - if (!bitmap_is_prefix(map, 0)) - goto error6; - for (i=0; i 128 ? 128 : bitsize; - MY_BITMAP map2_obj, map3_obj; - MY_BITMAP *map2= &map2_obj, *map3= &map3_obj; - my_bitmap_map map2buf[1024]; - my_bitmap_map map3buf[1024]; - bitmap_init(&map2_obj, map2buf, bitsize, FALSE); - bitmap_init(&map3_obj, map3buf, bitsize, FALSE); - bitmap_clear_all(map2); - bitmap_clear_all(map3); - for (i=0; i < no_loops; i++) - { - test_bit1=get_rand_bit(bitsize); - bitmap_set_prefix(map, test_bit1); - test_bit2=get_rand_bit(bitsize); - bitmap_set_prefix(map2, test_bit2); - bitmap_intersect(map, map2); - test_bit3= test_bit2 < test_bit1 ? test_bit2 : test_bit1; - bitmap_set_prefix(map3, test_bit3); - if (!bitmap_cmp(map, map3)) - goto error1; - bitmap_clear_all(map); - bitmap_clear_all(map2); - bitmap_clear_all(map3); - test_bit1=get_rand_bit(bitsize); - test_bit2=get_rand_bit(bitsize); - test_bit3=get_rand_bit(bitsize); - bitmap_set_prefix(map, test_bit1); - bitmap_set_prefix(map2, test_bit2); - test_bit3= test_bit2 > test_bit1 ? test_bit2 : test_bit1; - bitmap_set_prefix(map3, test_bit3); - bitmap_union(map, map2); - if (!bitmap_cmp(map, map3)) - goto error2; - bitmap_clear_all(map); - bitmap_clear_all(map2); - bitmap_clear_all(map3); - test_bit1=get_rand_bit(bitsize); - test_bit2=get_rand_bit(bitsize); - test_bit3=get_rand_bit(bitsize); - bitmap_set_prefix(map, test_bit1); - bitmap_set_prefix(map2, test_bit2); - bitmap_xor(map, map2); - test_bit3= test_bit2 > test_bit1 ? test_bit2 : test_bit1; - test_bit4= test_bit2 < test_bit1 ? test_bit2 : test_bit1; - bitmap_set_prefix(map3, test_bit3); - for (j=0; j < test_bit4; j++) - bitmap_clear_bit(map3, j); - if (!bitmap_cmp(map, map3)) - goto error3; - bitmap_clear_all(map); - bitmap_clear_all(map2); - bitmap_clear_all(map3); - test_bit1=get_rand_bit(bitsize); - test_bit2=get_rand_bit(bitsize); - test_bit3=get_rand_bit(bitsize); - bitmap_set_prefix(map, test_bit1); - bitmap_set_prefix(map2, test_bit2); - bitmap_subtract(map, map2); - if (test_bit2 < test_bit1) - { - bitmap_set_prefix(map3, test_bit1); - for (j=0; j < test_bit2; j++) - bitmap_clear_bit(map3, j); - } - if (!bitmap_cmp(map, map3)) - goto error4; - bitmap_clear_all(map); - bitmap_clear_all(map2); - bitmap_clear_all(map3); - test_bit1=get_rand_bit(bitsize); - bitmap_set_prefix(map, test_bit1); - bitmap_invert(map); - bitmap_set_all(map3); - for (j=0; j < test_bit1; j++) - bitmap_clear_bit(map3, j); - if (!bitmap_cmp(map, map3)) - goto error5; - bitmap_clear_all(map); - bitmap_clear_all(map3); - } - return FALSE; -error1: - printf("intersect error bitsize=%u,size1=%u,size2=%u", bitsize, - test_bit1,test_bit2); - return TRUE; -error2: - printf("union error bitsize=%u,size1=%u,size2=%u", bitsize, - test_bit1,test_bit2); - return TRUE; -error3: - printf("xor error bitsize=%u,size1=%u,size2=%u", bitsize, - test_bit1,test_bit2); - return TRUE; -error4: - printf("subtract error bitsize=%u,size1=%u,size2=%u", bitsize, - test_bit1,test_bit2); - return TRUE; -error5: - printf("invert error bitsize=%u,size=%u", bitsize, - test_bit1); - return TRUE; -} - -bool test_count_bits_set(MY_BITMAP *map, uint bitsize) -{ - uint i, bit_count=0, test_bit; - uint no_loops= bitsize > 128 ? 128 : bitsize; - for (i=0; i < no_loops; i++) - { - test_bit=get_rand_bit(bitsize); - if (!bitmap_is_set(map, test_bit)) - { - bitmap_set_bit(map, test_bit); - bit_count++; - } - } - if (bit_count==0 && bitsize > 0) - goto error1; - if (bitmap_bits_set(map) != bit_count) - goto error2; - return FALSE; -error1: - printf("No bits set bitsize = %u", bitsize); - return TRUE; -error2: - printf("Wrong count of bits set, bitsize = %u", bitsize); - return TRUE; -} - -bool test_get_first_bit(MY_BITMAP *map, uint bitsize) -{ - uint i, test_bit; - uint no_loops= bitsize > 128 ? 128 : bitsize; - for (i=0; i < no_loops; i++) - { - test_bit=get_rand_bit(bitsize); - bitmap_set_bit(map, test_bit); - if (bitmap_get_first_set(map) != test_bit) - goto error1; - bitmap_set_all(map); - bitmap_clear_bit(map, test_bit); - if (bitmap_get_first(map) != test_bit) - goto error2; - bitmap_clear_all(map); - } - return FALSE; -error1: - printf("get_first_set error bitsize=%u,prefix_size=%u",bitsize,test_bit); - return TRUE; -error2: - printf("get_first error bitsize= %u, prefix_size= %u",bitsize,test_bit); - return TRUE; -} - -bool test_get_next_bit(MY_BITMAP *map, uint bitsize) -{ - uint i, j, test_bit; - uint no_loops= bitsize > 128 ? 128 : bitsize; - for (i=0; i < no_loops; i++) - { - test_bit=get_rand_bit(bitsize); - for (j=0; j < test_bit; j++) - bitmap_set_next(map); - if (!bitmap_is_prefix(map, test_bit)) - goto error1; - bitmap_clear_all(map); - } - return FALSE; -error1: - printf("get_next error bitsize= %u, prefix_size= %u", bitsize,test_bit); - return TRUE; -} - -bool test_prefix(MY_BITMAP *map, uint bitsize) -{ - uint i, j, test_bit; - uint no_loops= bitsize > 128 ? 128 : bitsize; - for (i=0; i < no_loops; i++) - { - test_bit=get_rand_bit(bitsize); - bitmap_set_prefix(map, test_bit); - if (!bitmap_is_prefix(map, test_bit)) - goto error1; - bitmap_clear_all(map); - for (j=0; j < test_bit; j++) - bitmap_set_bit(map, j); - if (!bitmap_is_prefix(map, test_bit)) - goto error2; - bitmap_set_all(map); - for (j=bitsize - 1; ~(j-test_bit); j--) - bitmap_clear_bit(map, j); - if (!bitmap_is_prefix(map, test_bit)) - goto error3; - bitmap_clear_all(map); - } - return FALSE; -error1: - printf("prefix1 error bitsize = %u, prefix_size = %u", bitsize,test_bit); - return TRUE; -error2: - printf("prefix2 error bitsize = %u, prefix_size = %u", bitsize,test_bit); - return TRUE; -error3: - printf("prefix3 error bitsize = %u, prefix_size = %u", bitsize,test_bit); - return TRUE; -} - - -bool do_test(uint bitsize) -{ - MY_BITMAP map; - my_bitmap_map buf[1024]; - if (bitmap_init(&map, buf, bitsize, FALSE)) - { - printf("init error for bitsize %d", bitsize); - goto error; - } - if (test_set_get_clear_bit(&map,bitsize)) - goto error; - bitmap_clear_all(&map); - if (test_flip_bit(&map,bitsize)) - goto error; - bitmap_clear_all(&map); - if (test_operators(&map,bitsize)) - goto error; - bitmap_clear_all(&map); - if (test_get_all_bits(&map, bitsize)) - goto error; - bitmap_clear_all(&map); - if (test_compare_operators(&map,bitsize)) - goto error; - bitmap_clear_all(&map); - if (test_count_bits_set(&map,bitsize)) - goto error; - bitmap_clear_all(&map); - if (test_get_first_bit(&map,bitsize)) - goto error; - bitmap_clear_all(&map); - if (test_get_next_bit(&map,bitsize)) - goto error; - if (test_prefix(&map,bitsize)) - goto error; - return FALSE; -error: - printf("\n"); - return TRUE; -} - -int main() -{ - int i; - for (i= 1; i < 4096; i++) - { - printf("Start test for bitsize=%u\n",i); - if (do_test(i)) - return -1; - } - printf("OK\n"); - return 0; -} - -/* - In directory mysys: - make test_bitmap - will build the bitmap tests and ./test_bitmap will execute it -*/ - -#endif diff --git a/unittest/mysys/bitmap-t.c b/unittest/mysys/bitmap-t.c index 0bd21b63430..d5c1791ca14 100644 --- a/unittest/mysys/bitmap-t.c +++ b/unittest/mysys/bitmap-t.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -24,6 +24,8 @@ #include #include +#define MAX_TESTED_BITMAP_SIZE 1024 + uint get_rand_bit(uint bitsize) { return (rand() % bitsize); @@ -75,12 +77,6 @@ error2: return TRUE; } -my_bool test_operators(MY_BITMAP *map __attribute__((unused)), - uint bitsize __attribute__((unused))) -{ - return FALSE; -} - my_bool test_get_all_bits(MY_BITMAP *map, uint bitsize) { uint i; @@ -129,8 +125,8 @@ my_bool test_compare_operators(MY_BITMAP *map, uint bitsize) uint no_loops= bitsize > 128 ? 128 : bitsize; MY_BITMAP map2_obj, map3_obj; MY_BITMAP *map2= &map2_obj, *map3= &map3_obj; - uint32 map2buf[1024]; - uint32 map3buf[1024]; + uint32 map2buf[MAX_TESTED_BITMAP_SIZE]; + uint32 map3buf[MAX_TESTED_BITMAP_SIZE]; bitmap_init(&map2_obj, map2buf, bitsize, FALSE); bitmap_init(&map3_obj, map3buf, bitsize, FALSE); bitmap_clear_all(map2); @@ -259,6 +255,19 @@ my_bool test_get_first_bit(MY_BITMAP *map, uint bitsize) { uint i, test_bit; uint no_loops= bitsize > 128 ? 128 : bitsize; + + bitmap_set_all(map); + for (i=0; i < bitsize; i++) + bitmap_clear_bit(map, i); + if (bitmap_get_first_set(map) != MY_BIT_NONE) + goto error1; + bitmap_clear_all(map); + for (i=0; i < bitsize; i++) + bitmap_set_bit(map, i); + if (bitmap_get_first(map) != MY_BIT_NONE) + goto error2; + bitmap_clear_all(map); + for (i=0; i < no_loops; i++) { test_bit=get_rand_bit(bitsize); @@ -321,6 +330,24 @@ my_bool test_prefix(MY_BITMAP *map, uint bitsize) goto error3; bitmap_clear_all(map); } + for (i=0; i < bitsize; i++) + { + if (bitmap_is_prefix(map, i + 1)) + goto error4; + bitmap_set_bit(map, i); + if (!bitmap_is_prefix(map, i + 1)) + goto error5; + test_bit=get_rand_bit(bitsize); + bitmap_set_bit(map, test_bit); + if (test_bit <= i && !bitmap_is_prefix(map, i + 1)) + goto error5; + else if (test_bit > i) + { + if (bitmap_is_prefix(map, i + 1)) + goto error4; + bitmap_clear_bit(map, test_bit); + } + } return FALSE; error1: diag("prefix1 error bitsize = %u, prefix_size = %u", bitsize,test_bit); @@ -331,13 +358,127 @@ error2: error3: diag("prefix3 error bitsize = %u, prefix_size = %u", bitsize,test_bit); return TRUE; +error4: + diag("prefix4 error bitsize = %u, i = %u", bitsize,i); + return TRUE; +error5: + diag("prefix5 error bitsize = %u, i = %u", bitsize,i); + return TRUE; +} + +my_bool test_compare(MY_BITMAP *map, uint bitsize) +{ + MY_BITMAP map2; + uint32 map2buf[MAX_TESTED_BITMAP_SIZE]; + uint i, test_bit; + uint no_loops= bitsize > 128 ? 128 : bitsize; + if (bitmap_init(&map2, map2buf, bitsize, FALSE)) + { + diag("init error for bitsize %d", bitsize); + return TRUE; + } + /* Test all 4 possible combinations of set/unset bits. */ + for (i=0; i < no_loops; i++) + { + test_bit=get_rand_bit(bitsize); + bitmap_clear_bit(map, test_bit); + bitmap_clear_bit(&map2, test_bit); + if (!bitmap_is_subset(map, &map2)) + goto error_is_subset; + bitmap_set_bit(map, test_bit); + if (bitmap_is_subset(map, &map2)) + goto error_is_subset; + bitmap_set_bit(&map2, test_bit); + if (!bitmap_is_subset(map, &map2)) + goto error_is_subset; + bitmap_clear_bit(map, test_bit); + if (!bitmap_is_subset(map, &map2)) + goto error_is_subset; + /* Note that test_bit is not cleared i map2. */ + } + bitmap_clear_all(map); + bitmap_clear_all(&map2); + /* Test all 4 possible combinations of set/unset bits. */ + for (i=0; i < no_loops; i++) + { + test_bit=get_rand_bit(bitsize); + if (bitmap_is_overlapping(map, &map2)) + goto error_is_overlapping; + bitmap_set_bit(map, test_bit); + if (bitmap_is_overlapping(map, &map2)) + goto error_is_overlapping; + bitmap_set_bit(&map2, test_bit); + if (!bitmap_is_overlapping(map, &map2)) + goto error_is_overlapping; + bitmap_clear_bit(map, test_bit); + if (bitmap_is_overlapping(map, &map2)) + goto error_is_overlapping; + bitmap_clear_bit(&map2, test_bit); + /* Note that test_bit is not cleared i map2. */ + } + return FALSE; +error_is_subset: + diag("is_subset error bitsize = %u", bitsize); + return TRUE; +error_is_overlapping: + diag("is_overlapping error bitsize = %u", bitsize); + return TRUE; } +my_bool test_intersect(MY_BITMAP *map, uint bitsize) +{ + uint bitsize2 = 1 + get_rand_bit(MAX_TESTED_BITMAP_SIZE - 1); + MY_BITMAP map2; + uint32 map2buf[bitsize2]; + uint i, test_bit1, test_bit2, test_bit3; + if (bitmap_init(&map2, map2buf, bitsize2, FALSE)) + { + diag("init error for bitsize %d", bitsize2); + return TRUE; + } + test_bit1= get_rand_bit(bitsize); + test_bit2= get_rand_bit(bitsize); + bitmap_set_bit(map, test_bit1); + bitmap_set_bit(map, test_bit2); + test_bit3= get_rand_bit(bitsize2); + bitmap_set_bit(&map2, test_bit3); + if (test_bit2 < bitsize2) + bitmap_set_bit(&map2, test_bit2); + + bitmap_intersect(map, &map2); + if (test_bit2 < bitsize2) + { + if (!bitmap_is_set(map, test_bit2)) + goto error; + bitmap_clear_bit(map, test_bit2); + } + if (test_bit1 == test_bit3) + { + if (!bitmap_is_set(map, test_bit1)) + goto error; + bitmap_clear_bit(map, test_bit1); + } + if (!bitmap_is_clear_all(map)) + goto error; + + bitmap_set_all(map); + bitmap_set_all(&map2); + for (i=0; i < bitsize2; i++) + bitmap_clear_bit(&map2, i); + bitmap_intersect(map, &map2); + if (!bitmap_is_clear_all(map)) + goto error; + return FALSE; +error: + diag("intersect error bitsize = %u, bit1 = %u, bit2 = %u, bit3 = %u", + bitsize, test_bit1, test_bit2, test_bit3); + return TRUE; +} my_bool do_test(uint bitsize) { MY_BITMAP map; - uint32 buf[1024]; + uint32 buf[MAX_TESTED_BITMAP_SIZE]; if (bitmap_init(&map, buf, bitsize, FALSE)) { diag("init error for bitsize %d", bitsize); @@ -349,9 +490,6 @@ my_bool do_test(uint bitsize) if (test_flip_bit(&map,bitsize)) goto error; bitmap_clear_all(&map); - if (test_operators(&map,bitsize)) - goto error; - bitmap_clear_all(&map); if (test_get_all_bits(&map, bitsize)) goto error; bitmap_clear_all(&map); @@ -366,8 +504,15 @@ my_bool do_test(uint bitsize) bitmap_clear_all(&map); if (test_get_next_bit(&map,bitsize)) goto error; + bitmap_clear_all(&map); if (test_prefix(&map,bitsize)) goto error; + bitmap_clear_all(&map); + if (test_compare(&map,bitsize)) + goto error; + bitmap_clear_all(&map); + if (test_intersect(&map,bitsize)) + goto error; return FALSE; error: return TRUE; @@ -377,7 +522,7 @@ int main() { int i; int const min_size = 1; - int const max_size = 1024; + int const max_size = MAX_TESTED_BITMAP_SIZE; MY_INIT("bitmap-t"); plan(max_size - min_size); -- cgit v1.2.1 From 0e28aa2f1c6c5bd12fd907636cb28e255f425590 Mon Sep 17 00:00:00 2001 From: Jon Olav Hauglid Date: Wed, 16 Feb 2011 18:19:10 +0100 Subject: Followup to Bug #11752069 (former bug 43152) Assertion `bitmap_is_set_all(&table->s->all_set)' failed in handler::ha_reset This followup fixes the compilation warning 'test_bit' may be used uninitialized in this function introduced by the previous patch. --- unittest/mysys/bitmap-t.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unittest/mysys/bitmap-t.c b/unittest/mysys/bitmap-t.c index d5c1791ca14..d0df9fbb040 100644 --- a/unittest/mysys/bitmap-t.c +++ b/unittest/mysys/bitmap-t.c @@ -253,7 +253,7 @@ error2: my_bool test_get_first_bit(MY_BITMAP *map, uint bitsize) { - uint i, test_bit; + uint i, test_bit= 0; uint no_loops= bitsize > 128 ? 128 : bitsize; bitmap_set_all(map); -- cgit v1.2.1 From ee2f9d868c59934bc8c051faf52519ffe77747cd Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Wed, 16 Feb 2011 21:43:12 +0100 Subject: Simple fix of suppress pattern in test insert_select --- mysql-test/r/insert_select.result | 2 +- mysql-test/t/insert_select.test | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/insert_select.result b/mysql-test/r/insert_select.result index f12c9a0a61a..708c44f07a9 100644 --- a/mysql-test/r/insert_select.result +++ b/mysql-test/r/insert_select.result @@ -841,7 +841,7 @@ SET max_heap_table_size = 16384; SET @old_myisam_data_pointer_size = @@myisam_data_pointer_size; SET GLOBAL myisam_data_pointer_size = 2; INSERT INTO t1 VALUES (1), (2), (3), (4), (5); -call mtr.add_suppression("mysqld: The table '.*#sql.*' is full"); +call mtr.add_suppression("mysqld.*: The table '.*#sql.*' is full"); INSERT IGNORE INTO t1 SELECT t1.a FROM t1,t1 t2,t1 t3,t1 t4,t1 t5,t1 t6,t1 t7; Got one of the listed errors SET GLOBAL myisam_data_pointer_size = @old_myisam_data_pointer_size; diff --git a/mysql-test/t/insert_select.test b/mysql-test/t/insert_select.test index 7318e45889a..d7fe816bec7 100644 --- a/mysql-test/t/insert_select.test +++ b/mysql-test/t/insert_select.test @@ -407,7 +407,7 @@ SET GLOBAL myisam_data_pointer_size = 2; INSERT INTO t1 VALUES (1), (2), (3), (4), (5); -call mtr.add_suppression("mysqld: The table '.*#sql.*' is full"); +call mtr.add_suppression("mysqld.*: The table '.*#sql.*' is full"); --error ER_RECORD_FILE_FULL,ER_RECORD_FILE_FULL INSERT IGNORE INTO t1 SELECT t1.a FROM t1,t1 t2,t1 t3,t1 t4,t1 t5,t1 t6,t1 t7; -- cgit v1.2.1 From e53ffb8f7f7a0420e92158eb61fc8afdfc385f16 Mon Sep 17 00:00:00 2001 From: Jonathan Perkin Date: Wed, 16 Feb 2011 14:42:44 -0800 Subject: Updated README file. --- README | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/README b/README index 5463de4a7e8..2e18fb55a22 100644 --- a/README +++ b/README @@ -3,18 +3,29 @@ MySQL Server This is a release of MySQL, a dual-license SQL database server. For the avoidance of doubt, this particular copy of the software is released under the version 2 of the GNU General Public License. -MySQL is brought to you by the MySQL team at Oracle. +MySQL is brought to you by Oracle. -Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. License information can be found in the COPYING file. +MySQL FOSS License Exception +We want free and open source software applications under certain +licenses to be able to use specified GPL-licensed MySQL client +libraries despite the fact that not all such FOSS licenses are +compatible with version 2 of the GNU General Public License. +Therefore there are special exceptions to the terms and conditions +of the GPLv2 as applied to these client libraries, which are +identified and described in more detail in the FOSS License +Exception at +. + This distribution may include materials developed by third parties. For license and attribution notices for these materials, please refer to the documentation that accompanies -this distribution (see the Licenses for Third-Party Components -appendix). A copy of the license/notices is also reproduced -below. +this distribution (see the "Licenses for Third-Party Components" +appendix) or view the online documentation at +. GPLv2 Disclaimer For the avoidance of doubt, except that if any license choice @@ -38,8 +49,6 @@ Some Reference Manual sections of special interest: chapter. - For the new features/bugfix history, see the MySQL Change History appendix. -- For currently known bugs, see the Errors and Common Problems - appendix. You can browse the MySQL Reference Manual online or download it in any of several formats at the URL given earlier in this file. -- cgit v1.2.1 From 61449541fb8aec7f82d8ab8e64688a4c5118e923 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 17 Feb 2011 09:45:07 +0200 Subject: Disable the Bug #60049 test on embedded, as it requires server restart. --- mysql-test/suite/innodb/t/innodb_bug60049.test | 1 + mysql-test/suite/innodb_plugin/t/innodb_bug60049.test | 1 + 2 files changed, 2 insertions(+) diff --git a/mysql-test/suite/innodb/t/innodb_bug60049.test b/mysql-test/suite/innodb/t/innodb_bug60049.test index cff1c3dc09d..b35fb12cc5e 100644 --- a/mysql-test/suite/innodb/t/innodb_bug60049.test +++ b/mysql-test/suite/innodb/t/innodb_bug60049.test @@ -2,6 +2,7 @@ # This test requires a fresh server start-up and a slow shutdown. # This was a suspected bug (not a bug). +-- source include/not_embedded.inc -- source include/have_innodb.inc CREATE TABLE t(a INT)ENGINE=InnoDB; diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test b/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test index b3557243235..8d6c38ff9ef 100644 --- a/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test +++ b/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test @@ -2,6 +2,7 @@ # This test requires a fresh server start-up and a slow shutdown. # This was a suspected bug (not a bug). +-- source include/not_embedded.inc -- source include/have_innodb_plugin.inc CREATE TABLE t(a INT)ENGINE=InnoDB; -- cgit v1.2.1 From 6503226743a2fa24c7330d4541560a5b8fa821d6 Mon Sep 17 00:00:00 2001 From: Magne Mahre Date: Thu, 17 Feb 2011 12:43:53 +0100 Subject: Bug#48053 String::c_ptr has a race and/or does an invalid memory reference There are two issues present here. 1) There is a possibility that we test a byte beyond the allocated buffer 2) We compare a byte that might never have been initalized to see if it's 0. The first issue is not triggered by existing code, but an ASSERT has been added to safe-guard against introducing new code that triggers it. The second issue is what triggers the Valgrind warnings reported in the bug report. A buffer is allocated in class String to hold the value. This buffer is populated by the character data constituting the string, but is not zero-terminated in most cases. Testing if it is indeed zero-terminated means that we check a byte that has never been explicitly set, thus causing Valgrind to trigger. Note that issue 2 is not a serious problem. The variable is read, and if it's not zero, we will set it to zero. There are no further consequences. Note that this patch does not fix the underlying problems with issue 1, as it is deemed too risky to fix at this point (as noted in the bug report). As discussed in the report, the c_ptr() method should probably be replaced, but this requires a thorough analysis of the ~200 calls to the method. sql/set_var.cc: These two cases have been reported to fail with Valgrind. --- mysql-test/r/ctype_cp1250_ch.result | 3 +++ mysql-test/r/ctype_cp1251.result | 2 ++ mysql-test/r/ctype_eucjpms.result | 2 ++ mysql-test/t/ctype_cp1250_ch.test | 10 ++++++++++ mysql-test/t/ctype_cp1251.test | 10 ++++++++++ mysql-test/t/ctype_eucjpms.test | 8 ++++++++ sql/set_var.cc | 4 ++-- sql/sql_string.h | 3 +++ 8 files changed, 40 insertions(+), 2 deletions(-) mode change 100755 => 100644 mysql-test/r/ctype_eucjpms.result diff --git a/mysql-test/r/ctype_cp1250_ch.result b/mysql-test/r/ctype_cp1250_ch.result index 7f0cdf3f17b..46ca1f25ef4 100644 --- a/mysql-test/r/ctype_cp1250_ch.result +++ b/mysql-test/r/ctype_cp1250_ch.result @@ -238,3 +238,6 @@ select a from t1 where a like "abcdefgh a abcdefghá drop table t1; +set global LC_MESSAGES=convert((@@global.log_bin_trust_function_creators) +using cp1250); +ERROR HY000: Unknown system variable 'LC_MESSAGES' diff --git a/mysql-test/r/ctype_cp1251.result b/mysql-test/r/ctype_cp1251.result index dc12f9ceb03..2e91ecb7bc0 100644 --- a/mysql-test/r/ctype_cp1251.result +++ b/mysql-test/r/ctype_cp1251.result @@ -375,6 +375,8 @@ FD FD FD D18D FD FE FE FE D18E FE FF FF FF D18F FF DROP TABLE t1; +set global LC_TIME_NAMES=convert((-8388608) using cp1251); +ERROR HY000: Unknown locale: '-8388608' # # End of 5.1 tests # diff --git a/mysql-test/r/ctype_eucjpms.result b/mysql-test/r/ctype_eucjpms.result old mode 100755 new mode 100644 index 21aa38b7fe6..21109f596c1 --- a/mysql-test/r/ctype_eucjpms.result +++ b/mysql-test/r/ctype_eucjpms.result @@ -9859,3 +9859,5 @@ hex(convert(_eucjpms 0xA5FE41 using ucs2)) select hex(convert(_eucjpms 0x8FABF841 using ucs2)); hex(convert(_eucjpms 0x8FABF841 using ucs2)) 003F0041 +set global LC_TIME_NAMES=convert((convert((0x63) using eucjpms)) using utf8); +ERROR HY000: Unknown locale: 'c' diff --git a/mysql-test/t/ctype_cp1250_ch.test b/mysql-test/t/ctype_cp1250_ch.test index 1fb656f2a01..3e17ee52164 100644 --- a/mysql-test/t/ctype_cp1250_ch.test +++ b/mysql-test/t/ctype_cp1250_ch.test @@ -72,3 +72,13 @@ select a from t1 where a like "abcdefgh drop table t1; # End of 4.1 tests + +# +# Bug #48053 String::c_ptr has a race and/or does an invalid +# memory reference +# (triggered by Valgrind tests) +# (see also ctype_eucjpms.test, ctype_cp1250.test, ctype_cp1251.test) +# +--error 1193 +set global LC_MESSAGES=convert((@@global.log_bin_trust_function_creators) + using cp1250); diff --git a/mysql-test/t/ctype_cp1251.test b/mysql-test/t/ctype_cp1251.test index 2331c731061..bde72d04ba7 100644 --- a/mysql-test/t/ctype_cp1251.test +++ b/mysql-test/t/ctype_cp1251.test @@ -55,6 +55,16 @@ drop table t1; --source include/ctype_8bit.inc +# +# Bug #48053 String::c_ptr has a race and/or does an invalid +# memory reference +# (triggered by Valgrind tests) +# (see also ctype_eucjpms.test, ctype_cp1250.test, ctype_cp1251.test) +# +--error 1105 +set global LC_TIME_NAMES=convert((-8388608) using cp1251); + + --echo # --echo # End of 5.1 tests --echo # diff --git a/mysql-test/t/ctype_eucjpms.test b/mysql-test/t/ctype_eucjpms.test index ec358d94900..165cfba897a 100644 --- a/mysql-test/t/ctype_eucjpms.test +++ b/mysql-test/t/ctype_eucjpms.test @@ -381,3 +381,11 @@ select hex(convert(_eucjpms 0xA5FE41 using ucs2)); # the next character, which is a single byte character 0x41. select hex(convert(_eucjpms 0x8FABF841 using ucs2)); +# +# Bug #48053 String::c_ptr has a race and/or does an invalid +# memory reference +# (triggered by Valgrind tests) +# (see also ctype_eucjpms.test, ctype_cp1250.test, ctype_cp1251.test) +# +--error 1105 +set global LC_TIME_NAMES=convert((convert((0x63) using eucjpms)) using utf8); diff --git a/sql/set_var.cc b/sql/set_var.cc index d297be3fc10..26c9b06a912 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -1828,7 +1828,7 @@ bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names) } var->save_result.ulong_value= ((ulong) - find_set(enum_names, res->c_ptr(), + find_set(enum_names, res->c_ptr_safe(), res->length(), NULL, &error, &error_len, @@ -2941,7 +2941,7 @@ bool sys_var_thd_lc_time_names::check(THD *thd, set_var *var) my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, "NULL"); return 1; } - const char *locale_str= res->c_ptr(); + const char *locale_str= res->c_ptr_safe(); if (!(locale_match= my_locale_by_name(locale_str))) { my_printf_error(ER_UNKNOWN_ERROR, diff --git a/sql/sql_string.h b/sql/sql_string.h index 092e194646f..c56c69493d4 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -106,6 +106,9 @@ public: inline const char *ptr() const { return Ptr; } inline char *c_ptr() { + DBUG_ASSERT(!alloced || !Ptr || !Alloced_length || + (Alloced_length >= (str_length + 1))); + if (!Ptr || Ptr[str_length]) /* Should be safe */ (void) realloc(str_length); return Ptr; -- cgit v1.2.1 From a27d85aa46147ecb59b1306c02194bd12a3378ed Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Thu, 17 Feb 2011 19:09:53 +0200 Subject: Fix the non-determinism in innodb_information_schema.test Thanks to Kristian Nielsen for finding out the root cause for the failure, see: https://bugs.launchpad.net/maria/+bug/677407 --- .../innodb_plugin/t/innodb_information_schema.test | 28 ++++++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/mysql-test/suite/innodb_plugin/t/innodb_information_schema.test b/mysql-test/suite/innodb_plugin/t/innodb_information_schema.test index 25255e0b2a9..20c25015c56 100644 --- a/mysql-test/suite/innodb_plugin/t/innodb_information_schema.test +++ b/mysql-test/suite/innodb_plugin/t/innodb_information_schema.test @@ -116,11 +116,29 @@ SELECT * FROM ```t'\"_str` WHERE c1 = '4' FOR UPDATE; # executes before some of them, resulting in less than expected number # of rows being selected from innodb_locks. If there is a bug and there # are no 14 rows in innodb_locks then this test will fail with timeout. -let $count = 14; -let $table = INFORMATION_SCHEMA.INNODB_LOCKS; --- source include/wait_until_rows_count.inc -# the above enables the query log, re-disable it --- disable_query_log +# Notice that if we query INNODB_LOCKS more often than once per 0.1 sec +# then its contents will never change because the cache from which it is +# filled is updated only if it has not been read for 0.1 seconds. See +# CACHE_MIN_IDLE_TIME_US in trx/trx0i_s.c. +let $cnt=10; +while ($cnt) +{ + let $success=`SELECT COUNT(*) = 14 FROM INFORMATION_SCHEMA.INNODB_LOCKS`; + if ($success) + { + let $cnt=0; + } + if (!$success) + { + real_sleep 0.2; + dec $cnt; + } +} +if (!$success) +{ + -- echo Timeout waiting for rows in INNODB_LOCKS to appear +} + SELECT lock_mode, lock_type, lock_table, lock_index, lock_rec, lock_data FROM INFORMATION_SCHEMA.INNODB_LOCKS ORDER BY lock_data; -- cgit v1.2.1 From c83889d9d6a95dc0e1628dedfef9127da7bad3df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 17 Feb 2011 22:25:33 +0200 Subject: Allow 30 seconds for slow shutdown in the Bug #60049 test. --- mysql-test/suite/innodb/t/innodb_bug60049.test | 2 +- mysql-test/suite/innodb_plugin/t/innodb_bug60049.test | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/innodb/t/innodb_bug60049.test b/mysql-test/suite/innodb/t/innodb_bug60049.test index b35fb12cc5e..ec4e3b8de7e 100644 --- a/mysql-test/suite/innodb/t/innodb_bug60049.test +++ b/mysql-test/suite/innodb/t/innodb_bug60049.test @@ -13,7 +13,7 @@ let $MYSQLD_DATADIR=`select @@datadir`; # Shut down the server -- exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --- shutdown_server 10 +-- shutdown_server 30 -- source include/wait_until_disconnected.inc # Check the tail of ID_IND (SYS_TABLES.ID) diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test b/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test index 8d6c38ff9ef..0423f5d3635 100644 --- a/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test +++ b/mysql-test/suite/innodb_plugin/t/innodb_bug60049.test @@ -13,7 +13,7 @@ let $MYSQLD_DATADIR=`select @@datadir`; # Shut down the server -- exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --- shutdown_server 10 +-- shutdown_server 30 -- source include/wait_until_disconnected.inc # Check the tail of ID_IND (SYS_TABLES.ID) -- cgit v1.2.1 From 61b256177bc9876d05ac807a98cb141c70d9357e Mon Sep 17 00:00:00 2001 From: Martin Hansson Date: Fri, 18 Feb 2011 11:50:06 +0100 Subject: Bug#11766675 - 59839: Aggregation followed by subquery yields wrong result The loop that was looping over subqueries' references to outer field used a local boolean variable to tell whether the field was grouped or not. But the implementor failed to reset the variable after each iteration. Thus a field that was not directly aggregated appeared to be. Fixed by resetting the variable upon each new iteration. --- mysql-test/r/group_by.result | 36 ++++++++++++++++++++++++ mysql-test/t/group_by.test | 37 +++++++++++++++++++++++++ sql/sql_select.cc | 66 +++++++++++++++++++++++--------------------- 3 files changed, 108 insertions(+), 31 deletions(-) diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index 83f1f220023..1dfb0f5860a 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -1855,4 +1855,40 @@ ON 1 WHERE t2.f1 > 1 GROUP BY t2.f1; COUNT(*) 2 DROP TABLE t1; +# +# Bug#59839: Aggregation followed by subquery yields wrong result +# +CREATE TABLE t1 ( +a INT, +b INT, +c INT, +KEY (a, b) +); +INSERT INTO t1 VALUES +( 1, 1, 1 ), +( 1, 2, 2 ), +( 1, 3, 3 ), +( 1, 4, 6 ), +( 1, 5, 5 ), +( 1, 9, 13 ), +( 2, 1, 6 ), +( 2, 2, 7 ), +( 2, 3, 8 ); +EXPLAIN +SELECT a, AVG(t1.b), +(SELECT t11.c FROM t1 t11 WHERE t11.a = t1.a AND t11.b = AVG(t1.b)) AS t11c, +(SELECT t12.c FROM t1 t12 WHERE t12.a = t1.a AND t12.b = AVG(t1.b)) AS t12c +FROM t1 GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index NULL a 10 NULL 9 Using index +3 DEPENDENT SUBQUERY t12 ref a a 10 func,func 2 Using where +2 DEPENDENT SUBQUERY t11 ref a a 10 func,func 2 Using where +SELECT a, AVG(t1.b), +(SELECT t11.c FROM t1 t11 WHERE t11.a = t1.a AND t11.b = AVG(t1.b)) AS t11c, +(SELECT t12.c FROM t1 t12 WHERE t12.a = t1.a AND t12.b = AVG(t1.b)) AS t12c +FROM t1 GROUP BY a; +a AVG(t1.b) t11c t12c +1 4.0000 6 6 +2 2.0000 7 7 +DROP TABLE t1; # End of 5.1 tests diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test index 580c2e5091c..1a4b9a3bab7 100644 --- a/mysql-test/t/group_by.test +++ b/mysql-test/t/group_by.test @@ -1247,4 +1247,41 @@ ON 1 WHERE t2.f1 > 1 GROUP BY t2.f1; DROP TABLE t1; +--echo # +--echo # Bug#59839: Aggregation followed by subquery yields wrong result +--echo # + +CREATE TABLE t1 ( + a INT, + b INT, + c INT, + KEY (a, b) +); + +INSERT INTO t1 VALUES + ( 1, 1, 1 ), + ( 1, 2, 2 ), + ( 1, 3, 3 ), + ( 1, 4, 6 ), + ( 1, 5, 5 ), + ( 1, 9, 13 ), + + ( 2, 1, 6 ), + ( 2, 2, 7 ), + ( 2, 3, 8 ); + +EXPLAIN +SELECT a, AVG(t1.b), +(SELECT t11.c FROM t1 t11 WHERE t11.a = t1.a AND t11.b = AVG(t1.b)) AS t11c, +(SELECT t12.c FROM t1 t12 WHERE t12.a = t1.a AND t12.b = AVG(t1.b)) AS t12c +FROM t1 GROUP BY a; + +SELECT a, AVG(t1.b), +(SELECT t11.c FROM t1 t11 WHERE t11.a = t1.a AND t11.b = AVG(t1.b)) AS t11c, +(SELECT t12.c FROM t1 t12 WHERE t12.a = t1.a AND t12.b = AVG(t1.b)) AS t12c +FROM t1 GROUP BY a; + +DROP TABLE t1; + + --echo # End of 5.1 tests diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 869fd01ac60..eb2559fc600 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -278,61 +278,65 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, } -/* +/** Fix fields referenced from inner selects. - SYNOPSIS - fix_inner_refs() - thd Thread handle - all_fields List of all fields used in select - select Current select - ref_pointer_array Array of references to Items used in current select - group_list GROUP BY list (is NULL by default) + @param thd Thread handle + @param all_fields List of all fields used in select + @param select Current select + @param ref_pointer_array Array of references to Items used in current select + @param group_list GROUP BY list (is NULL by default) - DESCRIPTION - The function serves 3 purposes - adds fields referenced from inner - selects to the current select list, resolves which class to use - to access referenced item (Item_ref of Item_direct_ref) and fixes - references (Item_ref objects) to these fields. + @details + The function serves 3 purposes + + - adds fields referenced from inner query blocks to the current select list + + - Decides which class to use to reference the items (Item_ref or + Item_direct_ref) - If a field isn't already in the select list and the ref_pointer_array + - fixes references (Item_ref objects) to these fields. + + If a field isn't already on the select list and the ref_pointer_array is provided then it is added to the all_fields list and the pointer to it is saved in the ref_pointer_array. The class to access the outer field is determined by the following rules: - 1. If the outer field isn't used under an aggregate function - then the Item_ref class should be used. - 2. If the outer field is used under an aggregate function and this - function is aggregated in the select where the outer field was - resolved or in some more inner select then the Item_direct_ref - class should be used. - Also it should be used if we are grouping by a subquery containing - the outer field. + + -#. If the outer field isn't used under an aggregate function then the + Item_ref class should be used. + + -#. If the outer field is used under an aggregate function and this + function is, in turn, aggregated in the query block where the outer + field was resolved or some query nested therein, then the + Item_direct_ref class should be used. Also it should be used if we are + grouping by a subquery containing the outer field. + The resolution is done here and not at the fix_fields() stage as - it can be done only after sum functions are fixed and pulled up to - selects where they are have to be aggregated. + it can be done only after aggregate functions are fixed and pulled up to + selects where they are to be aggregated. + When the class is chosen it substitutes the original field in the Item_outer_ref object. After this we proceed with fixing references (Item_outer_ref objects) to this field from inner subqueries. - RETURN - TRUE an error occured - FALSE ok -*/ + @return Status + @retval true An error occured. + @retval false OK. + */ bool fix_inner_refs(THD *thd, List &all_fields, SELECT_LEX *select, Item **ref_pointer_array, ORDER *group_list) { Item_outer_ref *ref; - bool res= FALSE; - bool direct_ref= FALSE; List_iterator ref_it(select->inner_refs_list); while ((ref= ref_it++)) { + bool direct_ref= false; Item *item= ref->outer_ref; Item **item_ref= ref->ref; Item_ref *new_ref; @@ -404,7 +408,7 @@ fix_inner_refs(THD *thd, List &all_fields, SELECT_LEX *select, return TRUE; thd->used_tables|= item->used_tables(); } - return res; + return false; } /** -- cgit v1.2.1 From 5f55c23208c746bd1211c09237fe85303dd49567 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Fri, 18 Feb 2011 16:12:36 +0300 Subject: Bug#11765108 (Bug#58036) client utf32, utf16, ucs2 should be disallowed, they crash server A separate fix for 5.1 (as 5.1 and 5.5 have seriously differged in the related pieces of the code). A patch for 5.5 was approved earlier. Problem: ucs2 was correctly disallowed in "SET NAMES" only, while mysql_real_connect() and mysql_change_user() still allowed to use ucs2, which made server crash. Fix: disallow ucs2 in mysql_real_connect() and mysql_change_user(). @ sql/sql_priv.h - changing return type for thd_init_client_charset() to bool, to return errors to the caller @ sql/sql_var.cc - using new function @ sql/sql_connect.cc - thd_client_charset_init: in case of unsupported client character set send error and return true; in case of success return false - check_connection: Return error if character set initialization failed @ sql/sql_parse.cc - check charset in the very beginnig of the CMD_CHANGE_USER handling code @ tests/mysql_client_test.c - adding tests --- sql/mysql_priv.h | 6 ++++- sql/set_var.cc | 2 +- sql/sql_connect.cc | 34 ++++++++++++++++++++---- sql/sql_parse.cc | 17 ++++++++++-- tests/mysql_client_test.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 117 insertions(+), 9 deletions(-) diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 1838f0c924d..67631b265ab 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1019,7 +1019,11 @@ void reset_mqh(LEX_USER *lu, bool get_them); bool check_mqh(THD *thd, uint check_command); void time_out_user_resource_limits(THD *thd, USER_CONN *uc); void decrease_user_connections(USER_CONN *uc); -void thd_init_client_charset(THD *thd, uint cs_number); +bool thd_init_client_charset(THD *thd, uint cs_number); +inline bool is_supported_parser_charset(CHARSET_INFO *cs) +{ + return test(cs->mbminlen == 1); +} bool setup_connection_thread_globals(THD *thd); int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent); diff --git a/sql/set_var.cc b/sql/set_var.cc index 26c9b06a912..831b68bbe14 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -2187,7 +2187,7 @@ bool sys_var_character_set_client::check(THD *thd, set_var *var) if (sys_var_character_set_sv::check(thd, var)) return 1; /* Currently, UCS-2 cannot be used as a client character set */ - if (var->save_result.charset->mbminlen > 1) + if (!is_supported_parser_charset(var->save_result.charset)) { my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, var->save_result.charset->csname); diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 9fa6966baa2..8129324e300 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -582,8 +582,23 @@ void reset_mqh(LEX_USER *lu, bool get_them= 0) } -void thd_init_client_charset(THD *thd, uint cs_number) +/** + Set thread character set variables from the given ID + + @param thd thread handle + @param cs_number character set and collation ID + + @retval 0 OK; character_set_client, collation_connection and + character_set_results are set to the new value, + or to the default global values. + + @retval 1 error, e.g. the given ID is not supported by parser. + Corresponding SQL error is sent. +*/ + +bool thd_init_client_charset(THD *thd, uint cs_number) { + CHARSET_INFO *cs; /* Use server character set and collation if - opt_character_set_client_handshake is not set @@ -592,10 +607,10 @@ void thd_init_client_charset(THD *thd, uint cs_number) - client character set doesn't exists in server */ if (!opt_character_set_client_handshake || - !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || + !(cs= get_charset(cs_number, MYF(0))) || !my_strcasecmp(&my_charset_latin1, global_system_variables.character_set_client->name, - thd->variables.character_set_client->name)) + cs->name)) { thd->variables.character_set_client= global_system_variables.character_set_client; @@ -606,10 +621,18 @@ void thd_init_client_charset(THD *thd, uint cs_number) } else { + if (!is_supported_parser_charset(cs)) + { + /* Disallow non-supported parser character sets: UCS2, UTF16, UTF32 */ + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client", + cs->csname); + return true; + } thd->variables.character_set_results= thd->variables.collation_connection= - thd->variables.character_set_client; + thd->variables.character_set_client= cs; } + return false; } @@ -782,7 +805,8 @@ static int check_connection(THD *thd) thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; thd->max_client_packet_length= uint4korr(net->read_pos+4); DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); - thd_init_client_charset(thd, (uint) net->read_pos[8]); + if (thd_init_client_charset(thd, (uint) net->read_pos[8])) + return 1; thd->update_charset(); end= (char*) net->read_pos+32; } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 6ce800312ff..9bf55f4dcdd 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1153,13 +1153,22 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (ptr < packet_end) { + CHARSET_INFO *cs; if (ptr + 2 > packet_end) { my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } - cs_number= uint2korr(ptr); + if ((cs_number= uint2korr(ptr)) && + (cs= get_charset(cs_number, MYF(0))) && + !is_supported_parser_charset(cs)) + { + /* Disallow non-supported parser character sets: UCS2, UTF16, UTF32 */ + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client", + cs->csname); + break; + } } /* Convert database name to utf8 */ @@ -1205,7 +1214,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (cs_number) { - thd_init_client_charset(thd, cs_number); + /* + We have checked charset earlier, + so thd_init_client_charset cannot fail. + */ + DBUG_ASSERT(!thd_init_client_charset(thd, cs_number)); thd->update_charset(); } } diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 80c7be64e94..5a45c132a5f 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -18398,6 +18398,72 @@ static void test_bug47485() } +/* + Bug#58036 client utf32, utf16, ucs2 should be disallowed, they crash server +*/ +static void test_bug58036() +{ + MYSQL *conn; + DBUG_ENTER("test_bug47485"); + myheader("test_bug58036"); + + /* Part1: try to connect with ucs2 client character set */ + conn= mysql_client_init(NULL); + mysql_options(conn, MYSQL_SET_CHARSET_NAME, "ucs2"); + if (mysql_real_connect(conn, opt_host, opt_user, + opt_password, opt_db ? opt_db : "test", + opt_port, opt_unix_socket, 0)) + { + if (!opt_silent) + printf("mysql_real_connect() succeeded (failure expected)\n"); + mysql_close(conn); + DIE(); + } + + if (!opt_silent) + printf("Got mysql_real_connect() error (expected): %s (%d)\n", + mysql_error(conn), mysql_errno(conn)); + DIE_UNLESS(mysql_errno(conn) == ER_WRONG_VALUE_FOR_VAR); + mysql_close(conn); + + + /* + Part2: + - connect with latin1 + - then change client character set to ucs2 + - then try mysql_change_user() + */ + conn= mysql_client_init(NULL); + mysql_options(conn, MYSQL_SET_CHARSET_NAME, "latin1"); + if (!mysql_real_connect(conn, opt_host, opt_user, + opt_password, opt_db ? opt_db : "test", + opt_port, opt_unix_socket, 0)) + { + if (!opt_silent) + printf("mysql_real_connect() failed: %s (%d)\n", + mysql_error(conn), mysql_errno(conn)); + mysql_close(conn); + DIE(); + } + + mysql_options(conn, MYSQL_SET_CHARSET_NAME, "ucs2"); + if (!mysql_change_user(conn, opt_user, opt_password, NULL)) + { + if (!opt_silent) + printf("mysql_change_user() succedded, error expected!"); + mysql_close(conn); + DIE(); + } + + if (!opt_silent) + printf("Got mysql_change_user() error (expected): %s (%d)\n", + mysql_error(conn), mysql_errno(conn)); + mysql_close(conn); + + DBUG_VOID_RETURN; +} + + /* Read and parse arguments and MySQL options from my.cnf */ @@ -18724,6 +18790,7 @@ static struct my_tests_st my_tests[]= { { "test_bug42373", test_bug42373 }, { "test_bug54041", test_bug54041 }, { "test_bug47485", test_bug47485 }, + { "test_bug58036", test_bug58036 }, { 0, 0 } }; -- cgit v1.2.1 From e2e6eb8f6d3dabbf335870efa29fcb77248ab156 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Fri, 18 Feb 2011 17:17:37 +0300 Subject: A post-fix for b58036. --- tests/mysql_client_test.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 5a45c132a5f..fc1f2e8293e 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -18417,7 +18417,7 @@ static void test_bug58036() if (!opt_silent) printf("mysql_real_connect() succeeded (failure expected)\n"); mysql_close(conn); - DIE(); + DIE(""); } if (!opt_silent) @@ -18443,7 +18443,7 @@ static void test_bug58036() printf("mysql_real_connect() failed: %s (%d)\n", mysql_error(conn), mysql_errno(conn)); mysql_close(conn); - DIE(); + DIE(""); } mysql_options(conn, MYSQL_SET_CHARSET_NAME, "ucs2"); @@ -18452,7 +18452,7 @@ static void test_bug58036() if (!opt_silent) printf("mysql_change_user() succedded, error expected!"); mysql_close(conn); - DIE(); + DIE(""); } if (!opt_silent) -- cgit v1.2.1 From 876502d7439f3cf34d8554d22f28f06bf8b54388 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Mon, 21 Feb 2011 12:37:24 +0530 Subject: Bug#11766310 : 59398: MYSQLDUMP 5.1 CAN'T HANDLE A DASH ("-") IN DATABASE NAMES IN ALTER DATABASE. mysqldump did not quote database name in 'ALTER DATABASE' statements in its output. This can further cause a failure while loading if database name contains a hyphen '-'. This happened as, while printing the 'ALTER DATABASE' statements, the database name was not quoted. Fixed by quoting the database name. client/mysqldump.c: Bug#11766310 : 59398: MYSQLDUMP 5.1 CAN'T HANDLE A DASH ("-") IN DATABASE NAMES IN ALTER DATABASE. Modified the print statement in order to print the quoted database name for 'ALTER DATABASE' statements. mysql-test/r/mysqldump.result: Added a test case for bug#11766310. mysql-test/t/mysqldump.test: Added a test case for bug#11766310. --- client/mysqldump.c | 10 ++++++++-- mysql-test/r/mysqldump.result | 36 ++++++++++++++++++++++++++++++++++++ mysql-test/t/mysqldump.test | 21 +++++++++++++++++++++ 3 files changed, 65 insertions(+), 2 deletions(-) diff --git a/client/mysqldump.c b/client/mysqldump.c index 57e3f5b0349..0f2f1562ce5 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -1134,6 +1134,9 @@ static int switch_db_collation(FILE *sql_file, { if (strcmp(current_db_cl_name, required_db_cl_name) != 0) { + char quoted_db_buf[NAME_LEN * 2 + 3]; + char *quoted_db_name= quote_name(db_name, quoted_db_buf, FALSE); + CHARSET_INFO *db_cl= get_charset_by_name(required_db_cl_name, MYF(0)); if (!db_cl) @@ -1141,7 +1144,7 @@ static int switch_db_collation(FILE *sql_file, fprintf(sql_file, "ALTER DATABASE %s CHARACTER SET %s COLLATE %s %s\n", - (const char *) db_name, + (const char *) quoted_db_name, (const char *) db_cl->csname, (const char *) db_cl->name, (const char *) delimiter); @@ -1162,6 +1165,9 @@ static int restore_db_collation(FILE *sql_file, const char *delimiter, const char *db_cl_name) { + char quoted_db_buf[NAME_LEN * 2 + 3]; + char *quoted_db_name= quote_name(db_name, quoted_db_buf, FALSE); + CHARSET_INFO *db_cl= get_charset_by_name(db_cl_name, MYF(0)); if (!db_cl) @@ -1169,7 +1175,7 @@ static int restore_db_collation(FILE *sql_file, fprintf(sql_file, "ALTER DATABASE %s CHARACTER SET %s COLLATE %s %s\n", - (const char *) db_name, + (const char *) quoted_db_name, (const char *) db_cl->csname, (const char *) db_cl->name, (const char *) delimiter); diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index 289e7f66406..fb70e0f1731 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -4591,5 +4591,41 @@ CREATE TABLE `comment_table` (i INT COMMENT 'FIELD COMMENT') COMMENT = 'TABLE CO DROP TABLE `comment_table`; # +# BUG#11766310 : 59398: MYSQLDUMP 5.1 CAN'T HANDLE A DASH ("-") IN +# DATABASE NAMES IN ALTER DATABASE +# +CREATE DATABASE `test-database`; +USE `test-database`; +CREATE TABLE `test` (`c1` VARCHAR(10)) ENGINE=MYISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; +CREATE TRIGGER `trig` BEFORE INSERT ON `test` FOR EACH ROW BEGIN +END | +ALTER DATABASE `test-database` CHARACTER SET latin1 COLLATE latin1_swedish_ci; +ALTER DATABASE `test-database` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `test` ( + `c1` varchar(10) COLLATE utf8_unicode_ci DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; +/*!40101 SET character_set_client = @saved_cs_client */; +ALTER DATABASE `test-database` CHARACTER SET latin1 COLLATE latin1_swedish_ci ; +/*!50003 SET @saved_cs_client = @@character_set_client */ ; +/*!50003 SET @saved_cs_results = @@character_set_results */ ; +/*!50003 SET @saved_col_connection = @@collation_connection */ ; +/*!50003 SET character_set_client = latin1 */ ; +/*!50003 SET character_set_results = latin1 */ ; +/*!50003 SET collation_connection = latin1_swedish_ci */ ; +/*!50003 SET @saved_sql_mode = @@sql_mode */ ; +/*!50003 SET sql_mode = '' */ ; +DELIMITER ;; +/*!50003 CREATE*/ /*!50017 DEFINER=`root`@`localhost`*/ /*!50003 TRIGGER `trig` BEFORE INSERT ON `test` FOR EACH ROW BEGIN +END */;; +DELIMITER ; +/*!50003 SET sql_mode = @saved_sql_mode */ ; +/*!50003 SET character_set_client = @saved_cs_client */ ; +/*!50003 SET character_set_results = @saved_cs_results */ ; +/*!50003 SET collation_connection = @saved_col_connection */ ; +ALTER DATABASE `test-database` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +DROP DATABASE `test-database`; +# # End of 5.1 tests # diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index fe0b05dbb42..0b533284ffa 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -2173,6 +2173,27 @@ CREATE TABLE `comment_table` (i INT COMMENT 'FIELD COMMENT') COMMENT = 'TABLE CO --exec $MYSQL_DUMP --compact --skip-create --xml test DROP TABLE `comment_table`; +--echo # +--echo # BUG#11766310 : 59398: MYSQLDUMP 5.1 CAN'T HANDLE A DASH ("-") IN +--echo # DATABASE NAMES IN ALTER DATABASE +--echo # + +CREATE DATABASE `test-database`; +USE `test-database`; +CREATE TABLE `test` (`c1` VARCHAR(10)) ENGINE=MYISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; + +DELIMITER |; +CREATE TRIGGER `trig` BEFORE INSERT ON `test` FOR EACH ROW BEGIN +END | +DELIMITER ;| + +ALTER DATABASE `test-database` CHARACTER SET latin1 COLLATE latin1_swedish_ci; +ALTER DATABASE `test-database` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; + +--exec $MYSQL_DUMP --quote-names --compact test-database + +DROP DATABASE `test-database`; + --echo # --echo # End of 5.1 tests --echo # -- cgit v1.2.1 From f27a13cf09290c75f9945cca9198fc2a8e2a9d4d Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Tue, 22 Feb 2011 19:28:50 +0530 Subject: Postfix for tests failing due to fix for bug#11766310. --- mysql-test/r/ddl_i18n_koi8r.result | 48 +++++++++++++++++++------------------- mysql-test/r/ddl_i18n_utf8.result | 48 +++++++++++++++++++------------------- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/mysql-test/r/ddl_i18n_koi8r.result b/mysql-test/r/ddl_i18n_koi8r.result index fe24c17a1c5..4a4a425362d 100644 --- a/mysql-test/r/ddl_i18n_koi8r.result +++ b/mysql-test/r/ddl_i18n_koi8r.result @@ -724,7 +724,7 @@ utf8_general_ci utf8_general_ci CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest1` /*!40100 DEFAULT CHARACTER SET cp866 */; USE `mysqltest1`; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -757,8 +757,8 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -791,7 +791,7 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ; ---> Dumping mysqltest1 to ddl_i18n_koi8r.sp.mysqltest1.sql @@ -800,7 +800,7 @@ ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest2` /*!40100 DEFAULT CHARACTER SET cp866 */; USE `mysqltest2`; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -833,8 +833,8 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -867,7 +867,7 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ; ---> Dumping mysqltest2 to ddl_i18n_koi8r.sp.mysqltest2.sql @@ -1742,7 +1742,7 @@ CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; /*!40101 SET character_set_client = @saved_cs_client */; INSERT INTO `t1` VALUES (1),(0),(1); -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -1770,8 +1770,8 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -1799,7 +1799,7 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ; ---> Dumping mysqltest1 to ddl_i18n_koi8r.triggers.mysqltest1.sql @@ -1821,7 +1821,7 @@ CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; /*!40101 SET character_set_client = @saved_cs_client */; INSERT INTO `t1` VALUES (1),(0),(1); -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -1849,8 +1849,8 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -1878,7 +1878,7 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ; ---> Dumping mysqltest2 to ddl_i18n_koi8r.triggers.mysqltest2.sql @@ -2486,7 +2486,7 @@ CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest1` /*!40100 DEFAULT CHARACTER USE `mysqltest1`; /*!50106 SET @save_time_zone= @@TIME_ZONE */ ; DELIMITER ;; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; /*!50003 SET @saved_cs_client = @@character_set_client */ ;; /*!50003 SET @saved_cs_results = @@character_set_results */ ;; /*!50003 SET @saved_col_connection = @@collation_connection */ ;; @@ -2512,9 +2512,9 @@ END */ ;; /*!50003 SET character_set_client = @saved_cs_client */ ;; /*!50003 SET character_set_results = @saved_cs_results */ ;; /*!50003 SET collation_connection = @saved_col_connection */ ;; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ;; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ;; DELIMITER ;; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; /*!50003 SET @saved_cs_client = @@character_set_client */ ;; /*!50003 SET @saved_cs_results = @@character_set_results */ ;; /*!50003 SET @saved_col_connection = @@collation_connection */ ;; @@ -2540,7 +2540,7 @@ END */ ;; /*!50003 SET character_set_client = @saved_cs_client */ ;; /*!50003 SET character_set_results = @saved_cs_results */ ;; /*!50003 SET collation_connection = @saved_col_connection */ ;; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ;; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ;; DELIMITER ; /*!50106 SET TIME_ZONE= @save_time_zone */ ; @@ -2553,7 +2553,7 @@ CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest2` /*!40100 DEFAULT CHARACTER USE `mysqltest2`; /*!50106 SET @save_time_zone= @@TIME_ZONE */ ; DELIMITER ;; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; /*!50003 SET @saved_cs_client = @@character_set_client */ ;; /*!50003 SET @saved_cs_results = @@character_set_results */ ;; /*!50003 SET @saved_col_connection = @@collation_connection */ ;; @@ -2579,9 +2579,9 @@ END */ ;; /*!50003 SET character_set_client = @saved_cs_client */ ;; /*!50003 SET character_set_results = @saved_cs_results */ ;; /*!50003 SET collation_connection = @saved_col_connection */ ;; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ;; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ;; DELIMITER ;; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; /*!50003 SET @saved_cs_client = @@character_set_client */ ;; /*!50003 SET @saved_cs_results = @@character_set_results */ ;; /*!50003 SET @saved_col_connection = @@collation_connection */ ;; @@ -2607,7 +2607,7 @@ END */ ;; /*!50003 SET character_set_client = @saved_cs_client */ ;; /*!50003 SET character_set_results = @saved_cs_results */ ;; /*!50003 SET collation_connection = @saved_col_connection */ ;; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ;; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ;; DELIMITER ; /*!50106 SET TIME_ZONE= @save_time_zone */ ; diff --git a/mysql-test/r/ddl_i18n_utf8.result b/mysql-test/r/ddl_i18n_utf8.result index cf4272bf90c..7969ccafa09 100644 --- a/mysql-test/r/ddl_i18n_utf8.result +++ b/mysql-test/r/ddl_i18n_utf8.result @@ -724,7 +724,7 @@ utf8_general_ci utf8_general_ci CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest1` /*!40100 DEFAULT CHARACTER SET cp866 */; USE `mysqltest1`; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -757,8 +757,8 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -791,7 +791,7 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ; ---> Dumping mysqltest1 to ddl_i18n_utf8sp.mysqltest1.sql @@ -800,7 +800,7 @@ ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest2` /*!40100 DEFAULT CHARACTER SET cp866 */; USE `mysqltest2`; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -833,8 +833,8 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -867,7 +867,7 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ; ---> Dumping mysqltest2 to ddl_i18n_utf8sp.mysqltest2.sql @@ -1742,7 +1742,7 @@ CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; /*!40101 SET character_set_client = @saved_cs_client */; INSERT INTO `t1` VALUES (1),(0),(1); -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -1770,8 +1770,8 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -1799,7 +1799,7 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ; ---> Dumping mysqltest1 to ddl_i18n_utf8triggers.mysqltest1.sql @@ -1821,7 +1821,7 @@ CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; /*!40101 SET character_set_client = @saved_cs_client */; INSERT INTO `t1` VALUES (1),(0),(1); -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -1849,8 +1849,8 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ; /*!50003 SET @saved_cs_client = @@character_set_client */ ; /*!50003 SET @saved_cs_results = @@character_set_results */ ; /*!50003 SET @saved_col_connection = @@collation_connection */ ; @@ -1878,7 +1878,7 @@ DELIMITER ; /*!50003 SET character_set_client = @saved_cs_client */ ; /*!50003 SET character_set_results = @saved_cs_results */ ; /*!50003 SET collation_connection = @saved_col_connection */ ; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ; ---> Dumping mysqltest2 to ddl_i18n_utf8triggers.mysqltest2.sql @@ -2486,7 +2486,7 @@ CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest1` /*!40100 DEFAULT CHARACTER USE `mysqltest1`; /*!50106 SET @save_time_zone= @@TIME_ZONE */ ; DELIMITER ;; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; /*!50003 SET @saved_cs_client = @@character_set_client */ ;; /*!50003 SET @saved_cs_results = @@character_set_results */ ;; /*!50003 SET @saved_col_connection = @@collation_connection */ ;; @@ -2512,9 +2512,9 @@ END */ ;; /*!50003 SET character_set_client = @saved_cs_client */ ;; /*!50003 SET character_set_results = @saved_cs_results */ ;; /*!50003 SET collation_connection = @saved_col_connection */ ;; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ;; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ;; DELIMITER ;; -ALTER DATABASE mysqltest1 CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; +ALTER DATABASE `mysqltest1` CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; /*!50003 SET @saved_cs_client = @@character_set_client */ ;; /*!50003 SET @saved_cs_results = @@character_set_results */ ;; /*!50003 SET @saved_col_connection = @@collation_connection */ ;; @@ -2540,7 +2540,7 @@ END */ ;; /*!50003 SET character_set_client = @saved_cs_client */ ;; /*!50003 SET character_set_results = @saved_cs_results */ ;; /*!50003 SET collation_connection = @saved_col_connection */ ;; -ALTER DATABASE mysqltest1 CHARACTER SET cp866 COLLATE cp866_general_ci ;; +ALTER DATABASE `mysqltest1` CHARACTER SET cp866 COLLATE cp866_general_ci ;; DELIMITER ; /*!50106 SET TIME_ZONE= @save_time_zone */ ; @@ -2553,7 +2553,7 @@ CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mysqltest2` /*!40100 DEFAULT CHARACTER USE `mysqltest2`; /*!50106 SET @save_time_zone= @@TIME_ZONE */ ; DELIMITER ;; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; /*!50003 SET @saved_cs_client = @@character_set_client */ ;; /*!50003 SET @saved_cs_results = @@character_set_results */ ;; /*!50003 SET @saved_col_connection = @@collation_connection */ ;; @@ -2579,9 +2579,9 @@ END */ ;; /*!50003 SET character_set_client = @saved_cs_client */ ;; /*!50003 SET character_set_results = @saved_cs_results */ ;; /*!50003 SET collation_connection = @saved_col_connection */ ;; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ;; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ;; DELIMITER ;; -ALTER DATABASE mysqltest2 CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; +ALTER DATABASE `mysqltest2` CHARACTER SET utf8 COLLATE utf8_unicode_ci ;; /*!50003 SET @saved_cs_client = @@character_set_client */ ;; /*!50003 SET @saved_cs_results = @@character_set_results */ ;; /*!50003 SET @saved_col_connection = @@collation_connection */ ;; @@ -2607,7 +2607,7 @@ END */ ;; /*!50003 SET character_set_client = @saved_cs_client */ ;; /*!50003 SET character_set_results = @saved_cs_results */ ;; /*!50003 SET collation_connection = @saved_col_connection */ ;; -ALTER DATABASE mysqltest2 CHARACTER SET cp866 COLLATE cp866_general_ci ;; +ALTER DATABASE `mysqltest2` CHARACTER SET cp866 COLLATE cp866_general_ci ;; DELIMITER ; /*!50106 SET TIME_ZONE= @save_time_zone */ ; -- cgit v1.2.1 From 8b3f9560200717a4f06be01e991f4c6ee4758504 Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Wed, 23 Feb 2011 10:31:37 +0100 Subject: Bug #11762407 54999: MTR GLOBAL SUPPRESSION HIDES SQL THREAD EXECUTION UNEXPECTED ERRORS Removed the global suppression, added lots of local ones to affected tests Re-commit, now kept "Slave SQL" at start of patterns. --- mysql-test/extra/binlog_tests/binlog.test | 1 + mysql-test/extra/rpl_tests/rpl_conflicts.test | 2 ++ mysql-test/extra/rpl_tests/rpl_extra_col_master.test | 6 ++++++ mysql-test/extra/rpl_tests/rpl_extra_col_slave.test | 4 ++++ mysql-test/extra/rpl_tests/rpl_loaddata.test | 2 ++ mysql-test/extra/rpl_tests/rpl_row_basic.test | 4 +++- mysql-test/extra/rpl_tests/rpl_row_tabledefs.test | 1 + mysql-test/extra/rpl_tests/rpl_stm_EE_err2.test | 1 + mysql-test/include/mtr_warnings.sql | 1 - mysql-test/suite/binlog/r/binlog_base64_flag.result | 2 ++ mysql-test/suite/binlog/r/binlog_row_binlog.result | 1 + mysql-test/suite/binlog/r/binlog_stm_binlog.result | 1 + mysql-test/suite/binlog/t/binlog_base64_flag.test | 2 ++ mysql-test/suite/rpl/r/rpl_binlog_corruption.result | 1 + mysql-test/suite/rpl/r/rpl_binlog_max_cache_size.result | 1 + mysql-test/suite/rpl/r/rpl_bug33931.result | 1 + mysql-test/suite/rpl/r/rpl_circular_for_4_hosts.result | 1 + mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result | 12 ++++++++++++ mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result | 12 ++++++++++++ mysql-test/suite/rpl/r/rpl_extra_col_slave_innodb.result | 3 +++ mysql-test/suite/rpl/r/rpl_extra_col_slave_myisam.result | 3 +++ mysql-test/suite/rpl/r/rpl_filter_tables_not_exist.result | 1 + mysql-test/suite/rpl/r/rpl_idempotency.result | 7 ++++--- mysql-test/suite/rpl/r/rpl_ignore_table.result | 1 + mysql-test/suite/rpl/r/rpl_incident.result | 1 + mysql-test/suite/rpl/r/rpl_init_slave_errors.result | 2 +- mysql-test/suite/rpl/r/rpl_known_bugs_detection.result | 1 + mysql-test/suite/rpl/r/rpl_loaddata.result | 2 ++ mysql-test/suite/rpl/r/rpl_loaddata_concurrent.result | 2 ++ mysql-test/suite/rpl/r/rpl_loaddata_fatal.result | 1 + mysql-test/suite/rpl/r/rpl_rotate_logs.result | 1 + mysql-test/suite/rpl/r/rpl_row_basic_11bugs.result | 1 + mysql-test/suite/rpl/r/rpl_row_basic_2myisam.result | 2 ++ mysql-test/suite/rpl/r/rpl_row_basic_3innodb.result | 2 ++ mysql-test/suite/rpl/r/rpl_row_colSize.result | 1 + mysql-test/suite/rpl/r/rpl_row_conflicts.result | 2 ++ mysql-test/suite/rpl/r/rpl_row_inexist_tbl.result | 1 + mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result | 1 + mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result | 1 + mysql-test/suite/rpl/r/rpl_skip_error.result | 1 + mysql-test/suite/rpl/r/rpl_slave_grp_exec.result | 1 + mysql-test/suite/rpl/r/rpl_slave_load_remove_tmpfile.result | 1 + .../suite/rpl/r/rpl_slave_load_tmpdir_not_exist.result | 1 + mysql-test/suite/rpl/r/rpl_stm_EE_err2.result | 1 + mysql-test/suite/rpl/r/rpl_stm_conflicts.result | 1 + mysql-test/suite/rpl/r/rpl_temporary_errors.result | 1 + mysql-test/suite/rpl/t/rpl_binlog_corruption.test | 2 +- mysql-test/suite/rpl/t/rpl_binlog_max_cache_size.test | 1 + mysql-test/suite/rpl/t/rpl_bug33931.test | 1 + mysql-test/suite/rpl/t/rpl_circular_for_4_hosts.test | 1 + mysql-test/suite/rpl/t/rpl_filter_tables_not_exist.test | 2 ++ mysql-test/suite/rpl/t/rpl_idempotency.test | 7 ++++--- mysql-test/suite/rpl/t/rpl_ignore_table.test | 1 + mysql-test/suite/rpl/t/rpl_incident.test | 1 + mysql-test/suite/rpl/t/rpl_init_slave_errors.test | 2 +- mysql-test/suite/rpl/t/rpl_known_bugs_detection.test | 1 + mysql-test/suite/rpl/t/rpl_loaddata_fatal.test | 1 + mysql-test/suite/rpl/t/rpl_rotate_logs.test | 1 + mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test | 1 + mysql-test/suite/rpl/t/rpl_row_colSize.test | 3 +++ mysql-test/suite/rpl/t/rpl_row_inexist_tbl.test | 1 + mysql-test/suite/rpl/t/rpl_skip_error.test | 2 ++ mysql-test/suite/rpl/t/rpl_slave_grp_exec.test | 1 + mysql-test/suite/rpl/t/rpl_slave_load_remove_tmpfile.test | 1 + mysql-test/suite/rpl/t/rpl_slave_load_tmpdir_not_exist.test | 1 + mysql-test/suite/rpl/t/rpl_temporary_errors.test | 3 +++ 66 files changed, 122 insertions(+), 11 deletions(-) diff --git a/mysql-test/extra/binlog_tests/binlog.test b/mysql-test/extra/binlog_tests/binlog.test index fd6ba1c17fa..a776ba5eaf6 100644 --- a/mysql-test/extra/binlog_tests/binlog.test +++ b/mysql-test/extra/binlog_tests/binlog.test @@ -343,6 +343,7 @@ SHOW SESSION VARIABLES LIKE "%_checks"; --echo # INSERT INTO t1 VALUES(2) --echo # foreign_key_checks=1 and unique_checks=1 --echo # It should not change current session's variables, even error happens +call mtr.add_suppression("Slave SQL.*Could not execute Write_rows event on table test.t1; Duplicate entry .2. for key .PRIMARY., Error_code: 1062"); --error 1062 BINLOG ' dfLtTBMBAAAAKQAAAKsBAAAAABcAAAAAAAEABHRlc3QAAnQxAAEDAAE= diff --git a/mysql-test/extra/rpl_tests/rpl_conflicts.test b/mysql-test/extra/rpl_tests/rpl_conflicts.test index 943d254736d..866a31e92b0 100644 --- a/mysql-test/extra/rpl_tests/rpl_conflicts.test +++ b/mysql-test/extra/rpl_tests/rpl_conflicts.test @@ -93,6 +93,7 @@ if (`SELECT @@global.binlog_format != 'ROW' OR @@global.slave_exec_mode = 'STRIC source include/wait_for_slave_sql_error.inc; let $err= query_get_value("SHOW SLAVE STATUS", Last_SQL_Error, 1); --echo Last_SQL_Error = $err (expected "duplicate key" error) + call mtr.add_suppression("Slave SQL.*Duplicate entry .1. for key .PRIMARY.* Error_code: 1062"); SELECT * FROM t1; --echo ---- Resolve the conflict on the slave and restart SQL thread ---- @@ -137,6 +138,7 @@ connection slave; # replication continues. if (`SELECT @@global.binlog_format = 'ROW' AND @@global.slave_exec_mode = 'STRICT'`) { --echo ---- Wait until slave stops with an error ---- + call mtr.add_suppression("Can.t find record in .t1., Error_code: 1032"); let $slave_sql_errno= 1032; # ER_KEY_NOT_FOUND source include/wait_for_slave_sql_error.inc; diff --git a/mysql-test/extra/rpl_tests/rpl_extra_col_master.test b/mysql-test/extra/rpl_tests/rpl_extra_col_master.test index eb50149655e..6dba4202260 100644 --- a/mysql-test/extra/rpl_tests/rpl_extra_col_master.test +++ b/mysql-test/extra/rpl_tests/rpl_extra_col_master.test @@ -121,6 +121,12 @@ SELECT f1,f2,f3,f4,f5,f6,f7,f8,f9, hex(f10),hex(f11) FROM t1 ORDER BY f3 LIMIT 20; #connection slave; + +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); +call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); +call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); +call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); + sync_slave_with_master; --echo --echo * Select count and 20 rows from Slave * diff --git a/mysql-test/extra/rpl_tests/rpl_extra_col_slave.test b/mysql-test/extra/rpl_tests/rpl_extra_col_slave.test index 882ef2c4e63..cb2421d5d74 100644 --- a/mysql-test/extra/rpl_tests/rpl_extra_col_slave.test +++ b/mysql-test/extra/rpl_tests/rpl_extra_col_slave.test @@ -766,6 +766,10 @@ RESET MASTER; connection slave; START SLAVE; +call mtr.add_suppression("Slave SQL.*Error .Unknown table .t6.. on query.* Error_code: 1051"); +call mtr.add_suppression("Slave SQL.*Error .Duplicate column name .c6.. on query.* Error_code: 1060"); +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column . ...e mismatch.* Error_code: 1535"); + --echo *** Master Data Insert *** connection master; set @b1 = 'b1b1b1b1'; diff --git a/mysql-test/extra/rpl_tests/rpl_loaddata.test b/mysql-test/extra/rpl_tests/rpl_loaddata.test index 4a21123e8a1..ae04c1b4aaa 100644 --- a/mysql-test/extra/rpl_tests/rpl_loaddata.test +++ b/mysql-test/extra/rpl_tests/rpl_loaddata.test @@ -63,6 +63,8 @@ eval $lower_stmt_head infile '../../std_data/rpl_loaddata.dat' into table t1; save_master_pos; connection slave; # 1062 = ER_DUP_ENTRY +call mtr.add_suppression("Slave SQL.*Error .Duplicate entry .10. for key .b.. on query.* Error_code: 1062"); +call mtr.add_suppression("Slave SQL.*Query caused different errors on master and slave.*Error on master:.*error code=1062.*Error on slave:.*Error_code: 0"); --let $slave_sql_errno= 1062 --source include/wait_for_slave_sql_error_and_skip.inc diff --git a/mysql-test/extra/rpl_tests/rpl_row_basic.test b/mysql-test/extra/rpl_tests/rpl_row_basic.test index c8de853a297..70b4edceb27 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_basic.test +++ b/mysql-test/extra/rpl_tests/rpl_row_basic.test @@ -371,7 +371,9 @@ INSERT INTO t3 VALUES (1, "", 1); INSERT INTO t3 VALUES (2, repeat(_utf8'a', 128), 2); connection slave; -# 1535 = ER_BINLOG_ROW_WRONG_TABLE_DEF +# 1535 = ER_BINLOG_ROW_WRONG_TABLE_DEF +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 1 size mismatch.* Error_code: 1535"); +call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.* Error_code: 1032"); --let $slave_sql_errno= 1535 --let $show_slave_sql_error= 1 --source include/wait_for_slave_sql_error.inc diff --git a/mysql-test/extra/rpl_tests/rpl_row_tabledefs.test b/mysql-test/extra/rpl_tests/rpl_row_tabledefs.test index ee6205c79d8..4e15a5a98ff 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_tabledefs.test +++ b/mysql-test/extra/rpl_tests/rpl_row_tabledefs.test @@ -147,6 +147,7 @@ sync_slave_with_master; connection master; INSERT INTO t4 VALUES (4); connection slave; +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column [012] type mismatch.* Error_code: 1535"); --let $slave_skip_counter= 2 --let $slave_sql_errno= 1535 --let $show_slave_sql_error= 1 diff --git a/mysql-test/extra/rpl_tests/rpl_stm_EE_err2.test b/mysql-test/extra/rpl_tests/rpl_stm_EE_err2.test index d4140785878..cc69c08fe7c 100644 --- a/mysql-test/extra/rpl_tests/rpl_stm_EE_err2.test +++ b/mysql-test/extra/rpl_tests/rpl_stm_EE_err2.test @@ -25,6 +25,7 @@ drop table t1; connection slave; --source include/wait_for_slave_sql_to_stop.inc +call mtr.add_suppression("Slave SQL.*Query caused different errors on master and slave.*Error on master:.* error code=1062.*Error on slave:.* Error_code: 0"); let $error= query_get_value(SHOW SLAVE STATUS, Last_SQL_Error, 1); let $errno= query_get_value(SHOW SLAVE STATUS, Last_SQL_Errno, 1); --echo Error: "$error" (expected different error codes on master and slave) diff --git a/mysql-test/include/mtr_warnings.sql b/mysql-test/include/mtr_warnings.sql index 9dc64952979..30919dd10dc 100644 --- a/mysql-test/include/mtr_warnings.sql +++ b/mysql-test/include/mtr_warnings.sql @@ -107,7 +107,6 @@ INSERT INTO global_suppressions VALUES ("Slave: The incident LOST_EVENTS occured on the master"), ("Slave: Unknown error.* 1105"), ("Slave: Can't drop database.* database doesn't exist"), - ("Slave SQL:.*(Error_code: \[\[:digit:\]\]+|Query:.*)"), ("Sort aborted"), ("Time-out in NDB"), ("Warning:\s+One can only use the --user.*root"), diff --git a/mysql-test/suite/binlog/r/binlog_base64_flag.result b/mysql-test/suite/binlog/r/binlog_base64_flag.result index 7fb5e50a219..a4c610c845a 100644 --- a/mysql-test/suite/binlog/r/binlog_base64_flag.result +++ b/mysql-test/suite/binlog/r/binlog_base64_flag.result @@ -91,6 +91,8 @@ iONkSBcBAAAAKwAAAMQBAAAQABAAAAAAAAEAA//4AQAAAAMAMTIzAQAAAA== '; ERROR HY000: master may suffer from http://bugs.mysql.com/bug.php?id=37426 so slave stops; check error log on slave for more info drop table t1, char63_utf8, char128_utf8; +call mtr.add_suppression("Slave SQL.*master suffers from this bug: http:..bugs.mysql.com.bug.php.id=37426.* Error_code: 1105"); +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 1 size mismatch.* Error_code: 1535"); # # Bug #54393: crash and/or valgrind errors in # mysql_client_binlog_statement diff --git a/mysql-test/suite/binlog/r/binlog_row_binlog.result b/mysql-test/suite/binlog/r/binlog_row_binlog.result index 1678f8add58..e726d236323 100644 --- a/mysql-test/suite/binlog/r/binlog_row_binlog.result +++ b/mysql-test/suite/binlog/r/binlog_row_binlog.result @@ -1374,6 +1374,7 @@ unique_checks OFF # INSERT INTO t1 VALUES(2) # foreign_key_checks=1 and unique_checks=1 # It should not change current session's variables, even error happens +call mtr.add_suppression("Slave SQL.*Could not execute Write_rows event on table test.t1; Duplicate entry .2. for key .PRIMARY., Error_code: 1062"); BINLOG ' dfLtTBMBAAAAKQAAAKsBAAAAABcAAAAAAAEABHRlc3QAAnQxAAEDAAE= dfLtTBcBAAAAIgAAAM0BAAAAABcAAAAAAAEAAf/+AgAAAA== diff --git a/mysql-test/suite/binlog/r/binlog_stm_binlog.result b/mysql-test/suite/binlog/r/binlog_stm_binlog.result index 872a93bef43..5a06bb72b90 100644 --- a/mysql-test/suite/binlog/r/binlog_stm_binlog.result +++ b/mysql-test/suite/binlog/r/binlog_stm_binlog.result @@ -845,6 +845,7 @@ unique_checks OFF # INSERT INTO t1 VALUES(2) # foreign_key_checks=1 and unique_checks=1 # It should not change current session's variables, even error happens +call mtr.add_suppression("Slave SQL.*Could not execute Write_rows event on table test.t1; Duplicate entry .2. for key .PRIMARY., Error_code: 1062"); BINLOG ' dfLtTBMBAAAAKQAAAKsBAAAAABcAAAAAAAEABHRlc3QAAnQxAAEDAAE= dfLtTBcBAAAAIgAAAM0BAAAAABcAAAAAAAEAAf/+AgAAAA== diff --git a/mysql-test/suite/binlog/t/binlog_base64_flag.test b/mysql-test/suite/binlog/t/binlog_base64_flag.test index 3f1e4e98bec..296c4bf05aa 100644 --- a/mysql-test/suite/binlog/t/binlog_base64_flag.test +++ b/mysql-test/suite/binlog/t/binlog_base64_flag.test @@ -151,6 +151,8 @@ iONkSBcBAAAAKwAAAMQBAAAQABAAAAAAAAEAA//4AQAAAAMAMTIzAQAAAA== drop table t1, char63_utf8, char128_utf8; +call mtr.add_suppression("Slave SQL.*master suffers from this bug: http:..bugs.mysql.com.bug.php.id=37426.* Error_code: 1105"); +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 1 size mismatch.* Error_code: 1535"); --echo # --echo # Bug #54393: crash and/or valgrind errors in diff --git a/mysql-test/suite/rpl/r/rpl_binlog_corruption.result b/mysql-test/suite/rpl/r/rpl_binlog_corruption.result index 4f1eca42e1a..a2a065e0a4f 100644 --- a/mysql-test/suite/rpl/r/rpl_binlog_corruption.result +++ b/mysql-test/suite/rpl/r/rpl_binlog_corruption.result @@ -1,6 +1,7 @@ include/master-slave.inc [connection master] call mtr.add_suppression('Found invalid event in binary log'); +call mtr.add_suppression('Slave SQL.*Relay log read failure: Could not parse relay log event entry.* 1594'); ==== Initialize ==== include/stop_slave.inc RESET SLAVE; diff --git a/mysql-test/suite/rpl/r/rpl_binlog_max_cache_size.result b/mysql-test/suite/rpl/r/rpl_binlog_max_cache_size.result index a7e8b86ac79..2d5676a82d6 100644 --- a/mysql-test/suite/rpl/r/rpl_binlog_max_cache_size.result +++ b/mysql-test/suite/rpl/r/rpl_binlog_max_cache_size.result @@ -125,6 +125,7 @@ include/stop_slave.inc include/start_slave.inc CALL mtr.add_suppression("Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage.*"); CALL mtr.add_suppression("Writing one row to the row-based binary log failed.*"); +CALL mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occured on the master. Message: error writing to the binary log"); TRUNCATE t1; SET GLOBAL max_binlog_cache_size= ORIGINAL_VALUE; SET GLOBAL binlog_cache_size= ORIGINAL_VALUE; diff --git a/mysql-test/suite/rpl/r/rpl_bug33931.result b/mysql-test/suite/rpl/r/rpl_bug33931.result index d27308db1d5..ce8b6b169c7 100644 --- a/mysql-test/suite/rpl/r/rpl_bug33931.result +++ b/mysql-test/suite/rpl/r/rpl_bug33931.result @@ -1,6 +1,7 @@ include/master-slave.inc [connection master] call mtr.add_suppression("Failed during slave I/O thread initialization"); +call mtr.add_suppression("Slave SQL.*Failed during slave thread initialization.* 1593"); include/stop_slave.inc reset slave; SET GLOBAL debug="d,simulate_io_slave_error_on_init,simulate_sql_slave_error_on_init"; diff --git a/mysql-test/suite/rpl/r/rpl_circular_for_4_hosts.result b/mysql-test/suite/rpl/r/rpl_circular_for_4_hosts.result index 6981e549918..412021d6446 100644 --- a/mysql-test/suite/rpl/r/rpl_circular_for_4_hosts.result +++ b/mysql-test/suite/rpl/r/rpl_circular_for_4_hosts.result @@ -46,6 +46,7 @@ SET GLOBAL SQL_SLAVE_SKIP_COUNTER = 1; include/start_slave.inc INSERT INTO t1 VALUES(6,'C',2); INSERT INTO t1(b,c) VALUES('B',2); +call mtr.add_suppression("Slave SQL.*Duplicate entry .6. for key .PRIMARY.* Error_code: 1062"); include/wait_for_slave_sql_error.inc [errno=1062] INSERT INTO t1(b,c) VALUES('A',2); INSERT INTO t1(b,c) VALUES('D',2); diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result b/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result index affb179d50e..f235c68cc95 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result @@ -58,6 +58,10 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); +call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); +call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); +call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); * Select count and 20 rows from Slave * @@ -929,6 +933,10 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); +call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); +call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); +call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); * Select count and 20 rows from Slave * @@ -1800,6 +1808,10 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); +call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); +call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); +call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); * Select count and 20 rows from Slave * diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result b/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result index 8aeb5bdc1c9..52f4a7a8453 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result @@ -58,6 +58,10 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); +call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); +call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); +call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); * Select count and 20 rows from Slave * @@ -929,6 +933,10 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); +call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); +call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); +call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); * Select count and 20 rows from Slave * @@ -1800,6 +1808,10 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); +call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); +call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); +call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); * Select count and 20 rows from Slave * diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_slave_innodb.result b/mysql-test/suite/rpl/r/rpl_extra_col_slave_innodb.result index 9ea319379c0..e71f408ae85 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_slave_innodb.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_slave_innodb.result @@ -460,6 +460,9 @@ c4 BLOB, c5 CHAR(5)) ENGINE='InnoDB'; RESET MASTER; *** Start Slave *** START SLAVE; +call mtr.add_suppression("Slave SQL.*Error .Unknown table .t6.. on query.* Error_code: 1051"); +call mtr.add_suppression("Slave SQL.*Error .Duplicate column name .c6.. on query.* Error_code: 1060"); +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column . ...e mismatch.* Error_code: 1535"); *** Master Data Insert *** set @b1 = 'b1b1b1b1'; set @b1 = concat(@b1,@b1); diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_slave_myisam.result b/mysql-test/suite/rpl/r/rpl_extra_col_slave_myisam.result index 716a35b3464..51ca555b545 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_slave_myisam.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_slave_myisam.result @@ -460,6 +460,9 @@ c4 BLOB, c5 CHAR(5)) ENGINE='MyISAM'; RESET MASTER; *** Start Slave *** START SLAVE; +call mtr.add_suppression("Slave SQL.*Error .Unknown table .t6.. on query.* Error_code: 1051"); +call mtr.add_suppression("Slave SQL.*Error .Duplicate column name .c6.. on query.* Error_code: 1060"); +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column . ...e mismatch.* Error_code: 1535"); *** Master Data Insert *** set @b1 = 'b1b1b1b1'; set @b1 = concat(@b1,@b1); diff --git a/mysql-test/suite/rpl/r/rpl_filter_tables_not_exist.result b/mysql-test/suite/rpl/r/rpl_filter_tables_not_exist.result index 4eaf61e5f9e..c88dcee9dbc 100644 --- a/mysql-test/suite/rpl/r/rpl_filter_tables_not_exist.result +++ b/mysql-test/suite/rpl/r/rpl_filter_tables_not_exist.result @@ -42,6 +42,7 @@ UPDATE t4 LEFT JOIN (t1, t2, t5) ON (t1.id=t4.id and t2.id=t4.id and t5.id=t4.id UPDATE t4 LEFT JOIN (t1, t6, t7) ON (t4.id=t1.id and t4.id=t6.id and t4.id=t7.id) SET a=0, d=0, f=0, g=0 where t4.id=1; UPDATE t7 LEFT JOIN (t4, t1, t2) ON (t7.id=t4.id and t7.id=t1.id and t7.id=t2.id) SET a=0, b=0, d=0, g=0 where t7.id=1; UPDATE t7 LEFT JOIN (t8, t4, t1) ON (t7.id=t8.id and t7.id=t4.id and t7.id=t1.id) SET a=0, d=0, g=0, h=0 where t7.id=1; +call mtr.add_suppression("Slave SQL.*Error .Table .test.t[47]. doesn.t exist. on query.* Error_code: 1146"); UPDATE t1 LEFT JOIN t4 ON (t1.id=t4.id) SET a=0 where t1.id=1; include/wait_for_slave_sql_error_and_skip.inc [errno=1146] Last_SQL_Error = 'Error 'Table 'test.t4' doesn't exist' on query. Default database: 'test'. Query: 'UPDATE t1 LEFT JOIN t4 ON (t1.id=t4.id) SET a=0 where t1.id=1'' diff --git a/mysql-test/suite/rpl/r/rpl_idempotency.result b/mysql-test/suite/rpl/r/rpl_idempotency.result index 1ae2d0b9ee5..8ccef65a192 100644 --- a/mysql-test/suite/rpl/r/rpl_idempotency.result +++ b/mysql-test/suite/rpl/r/rpl_idempotency.result @@ -1,8 +1,9 @@ include/master-slave.inc [connection master] -call mtr.add_suppression("Slave: Can't find record in 't.' Error_code: 1032"); -call mtr.add_suppression("Slave: Cannot delete or update a parent row: a foreign key constraint fails .* Error_code: 1451"); -call mtr.add_suppression("Slave: Cannot add or update a child row: a foreign key constraint fails .* Error_code: 1452"); +call mtr.add_suppression("Can.t find record in .t[12].* Error_code: 1032"); +call mtr.add_suppression("Cannot delete or update a parent row: a foreign key constraint fails .* Error_code: 1451"); +call mtr.add_suppression("Cannot add or update a child row: a foreign key constraint fails .* Error_code: 1452"); +call mtr.add_suppression("Slave SQL.*Could not execute Write_rows event on table test.* Duplicate entry .1. for key .PRIMARY.* Error_code: 1062"); SET @old_slave_exec_mode= @@global.slave_exec_mode; CREATE TABLE t1 (a INT PRIMARY KEY); CREATE TABLE t2 (a INT); diff --git a/mysql-test/suite/rpl/r/rpl_ignore_table.result b/mysql-test/suite/rpl/r/rpl_ignore_table.result index 2e3fd62fbec..b92f97e24ee 100644 --- a/mysql-test/suite/rpl/r/rpl_ignore_table.result +++ b/mysql-test/suite/rpl/r/rpl_ignore_table.result @@ -112,6 +112,7 @@ show grants for mysqltest4@localhost; Grants for mysqltest4@localhost GRANT USAGE ON *.* TO 'mysqltest4'@'localhost' IDENTIFIED BY PASSWORD '*196BDEDE2AE4F84CA44C47D54D78478C7E2BD7B7' set global slave_exec_mode='IDEMPOTENT'; +call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table mysql.* Error_code: 1032"); drop table t1, mysqltest2.t2; drop table t4; drop database mysqltest2; diff --git a/mysql-test/suite/rpl/r/rpl_incident.result b/mysql-test/suite/rpl/r/rpl_incident.result index b54d7d400f7..d528fb3297a 100644 --- a/mysql-test/suite/rpl/r/rpl_incident.result +++ b/mysql-test/suite/rpl/r/rpl_incident.result @@ -15,6 +15,7 @@ a 2 3 4 +call mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occured on the master.* 1590"); include/wait_for_slave_sql_error.inc [errno=1590] Last_SQL_Error = 'The incident LOST_EVENTS occured on the master. Message: ' **** On Slave **** diff --git a/mysql-test/suite/rpl/r/rpl_init_slave_errors.result b/mysql-test/suite/rpl/r/rpl_init_slave_errors.result index 46bc66cbb65..a185afc5af2 100644 --- a/mysql-test/suite/rpl/r/rpl_init_slave_errors.result +++ b/mysql-test/suite/rpl/r/rpl_init_slave_errors.result @@ -6,7 +6,7 @@ SET GLOBAL debug= "d,simulate_io_slave_error_on_init,simulate_sql_slave_error_on start slave; include/wait_for_slave_sql_error.inc [errno=1593] Last_SQL_Error = 'Failed during slave thread initialization' -call mtr.add_suppression("Failed during slave I/O thread initialization"); +call mtr.add_suppression("Failed during slave.* thread initialization"); SET GLOBAL debug= ""; reset slave; SET GLOBAL init_slave= "garbage"; diff --git a/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result b/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result index 52980e81523..972e877bf18 100644 --- a/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result +++ b/mysql-test/suite/rpl/r/rpl_known_bugs_detection.result @@ -7,6 +7,7 @@ SELECT * FROM t1; a b 1 10 2 2 +call mtr.add_suppression("Slave SQL.*suffer.*http:..bugs.mysql.com.bug.php.id=24432"); include/wait_for_slave_sql_error.inc [errno=1105] Last_SQL_Error = 'Error 'master may suffer from http://bugs.mysql.com/bug.php?id=24432 so slave stops; check error log on slave for more info' on query. Default database: 'test'. Query: 'INSERT INTO t1(b) VALUES(1),(1),(2) ON DUPLICATE KEY UPDATE t1.b=10'' SELECT * FROM t1; diff --git a/mysql-test/suite/rpl/r/rpl_loaddata.result b/mysql-test/suite/rpl/r/rpl_loaddata.result index bd8bc5093aa..c4bcb692133 100644 --- a/mysql-test/suite/rpl/r/rpl_loaddata.result +++ b/mysql-test/suite/rpl/r/rpl_loaddata.result @@ -27,6 +27,8 @@ drop table t3; create table t1(a int, b int, unique(b)); insert into t1 values(1,10); load data infile '../../std_data/rpl_loaddata.dat' into table t1; +call mtr.add_suppression("Slave SQL.*Error .Duplicate entry .10. for key .b.. on query.* Error_code: 1062"); +call mtr.add_suppression("Slave SQL.*Query caused different errors on master and slave.*Error on master:.*error code=1062.*Error on slave:.*Error_code: 0"); include/wait_for_slave_sql_error_and_skip.inc [errno=1062] include/check_slave_no_error.inc set sql_log_bin=0; diff --git a/mysql-test/suite/rpl/r/rpl_loaddata_concurrent.result b/mysql-test/suite/rpl/r/rpl_loaddata_concurrent.result index bc40f32842d..d1c7bf65624 100644 --- a/mysql-test/suite/rpl/r/rpl_loaddata_concurrent.result +++ b/mysql-test/suite/rpl/r/rpl_loaddata_concurrent.result @@ -38,6 +38,8 @@ drop table t3; create table t1(a int, b int, unique(b)); insert into t1 values(1,10); load data CONCURRENT infile '../../std_data/rpl_loaddata.dat' into table t1; +call mtr.add_suppression("Slave SQL.*Error .Duplicate entry .10. for key .b.. on query.* Error_code: 1062"); +call mtr.add_suppression("Slave SQL.*Query caused different errors on master and slave.*Error on master:.*error code=1062.*Error on slave:.*Error_code: 0"); include/wait_for_slave_sql_error_and_skip.inc [errno=1062] include/check_slave_no_error.inc set sql_log_bin=0; diff --git a/mysql-test/suite/rpl/r/rpl_loaddata_fatal.result b/mysql-test/suite/rpl/r/rpl_loaddata_fatal.result index a81813de19f..b13e2ced183 100644 --- a/mysql-test/suite/rpl/r/rpl_loaddata_fatal.result +++ b/mysql-test/suite/rpl/r/rpl_loaddata_fatal.result @@ -3,6 +3,7 @@ include/master-slave.inc CREATE TABLE t1 (a INT, b INT); INSERT INTO t1 VALUES (1,10); LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE t1; +call mtr.add_suppression("Slave SQL.*Fatal error: Not enough memory, Error_code: 1593"); include/wait_for_slave_sql_error_and_skip.inc [errno=1593] Last_SQL_Error = 'Fatal error: Not enough memory' DROP TABLE t1; diff --git a/mysql-test/suite/rpl/r/rpl_rotate_logs.result b/mysql-test/suite/rpl/r/rpl_rotate_logs.result index e41f56c0a71..1166b0d18a9 100644 --- a/mysql-test/suite/rpl/r/rpl_rotate_logs.result +++ b/mysql-test/suite/rpl/r/rpl_rotate_logs.result @@ -36,6 +36,7 @@ drop table temp_table, t3; insert into t2 values(1234); set insert_id=1234; insert into t2 values(NULL); +call mtr.add_suppression("Slave SQL.*Error .Duplicate entry .1234. for key .PRIMARY.. on query.* Error_code: 1062"); include/wait_for_slave_sql_error_and_skip.inc [errno=1062] purge master logs to 'master-bin.000002'; show master logs; diff --git a/mysql-test/suite/rpl/r/rpl_row_basic_11bugs.result b/mysql-test/suite/rpl/r/rpl_row_basic_11bugs.result index d769b0b0881..0f2db9cbf1d 100644 --- a/mysql-test/suite/rpl/r/rpl_row_basic_11bugs.result +++ b/mysql-test/suite/rpl/r/rpl_row_basic_11bugs.result @@ -63,6 +63,7 @@ DROP TABLE t1; include/rpl_reset.inc **** On Slave **** SET GLOBAL QUERY_CACHE_SIZE=0; +call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.t1.* Error_code: 1032"); **** On Master **** CREATE TABLE t1 (a INT); INSERT INTO t1 VALUES (1),(2),(3); diff --git a/mysql-test/suite/rpl/r/rpl_row_basic_2myisam.result b/mysql-test/suite/rpl/r/rpl_row_basic_2myisam.result index 7afc70bfa5c..4f051d19e4b 100644 --- a/mysql-test/suite/rpl/r/rpl_row_basic_2myisam.result +++ b/mysql-test/suite/rpl/r/rpl_row_basic_2myisam.result @@ -478,6 +478,8 @@ include/diff_tables.inc [master:t2, slave:t2] [expecting slave to stop] INSERT INTO t3 VALUES (1, "", 1); INSERT INTO t3 VALUES (2, repeat(_utf8'a', 128), 2); +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 1 size mismatch.* Error_code: 1535"); +call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.* Error_code: 1032"); include/wait_for_slave_sql_error.inc [errno=1535] Last_SQL_Error = 'Table definition on master and slave does not match: Column 1 size mismatch - master has size 384, test.t3 on slave has size 49. Master's column size should be <= the slave's column size.' include/rpl_reset.inc diff --git a/mysql-test/suite/rpl/r/rpl_row_basic_3innodb.result b/mysql-test/suite/rpl/r/rpl_row_basic_3innodb.result index b8620894bd1..1ae1cd84579 100644 --- a/mysql-test/suite/rpl/r/rpl_row_basic_3innodb.result +++ b/mysql-test/suite/rpl/r/rpl_row_basic_3innodb.result @@ -478,6 +478,8 @@ include/diff_tables.inc [master:t2, slave:t2] [expecting slave to stop] INSERT INTO t3 VALUES (1, "", 1); INSERT INTO t3 VALUES (2, repeat(_utf8'a', 128), 2); +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 1 size mismatch.* Error_code: 1535"); +call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.* Error_code: 1032"); include/wait_for_slave_sql_error.inc [errno=1535] Last_SQL_Error = 'Table definition on master and slave does not match: Column 1 size mismatch - master has size 384, test.t3 on slave has size 49. Master's column size should be <= the slave's column size.' include/rpl_reset.inc diff --git a/mysql-test/suite/rpl/r/rpl_row_colSize.result b/mysql-test/suite/rpl/r/rpl_row_colSize.result index 49753d2b25c..d31f6d1c54e 100644 --- a/mysql-test/suite/rpl/r/rpl_row_colSize.result +++ b/mysql-test/suite/rpl/r/rpl_row_colSize.result @@ -265,6 +265,7 @@ STOP SLAVE; RESET SLAVE; RESET MASTER; START SLAVE; +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 0 ...e mismatch.* Error_code: 1535"); *** Cleanup *** DROP TABLE IF EXISTS t1; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_row_conflicts.result b/mysql-test/suite/rpl/r/rpl_row_conflicts.result index d8fbab98191..25bc9cfe5b3 100644 --- a/mysql-test/suite/rpl/r/rpl_row_conflicts.result +++ b/mysql-test/suite/rpl/r/rpl_row_conflicts.result @@ -22,6 +22,7 @@ a ---- Wait until slave stops with an error ---- include/wait_for_slave_sql_error.inc [errno=1062] Last_SQL_Error = Could not execute Write_rows event on table test.t1; Duplicate entry '1' for key 'PRIMARY', Error_code: 1062; handler error HA_ERR_FOUND_DUPP_KEY; the event's master log master-bin.000001, end_log_pos 346 (expected "duplicate key" error) +call mtr.add_suppression("Slave SQL.*Duplicate entry .1. for key .PRIMARY.* Error_code: 1062"); SELECT * FROM t1; a 1 @@ -48,6 +49,7 @@ SELECT * FROM t1; a [on slave] ---- Wait until slave stops with an error ---- +call mtr.add_suppression("Can.t find record in .t1., Error_code: 1032"); include/wait_for_slave_sql_error.inc [errno=1032] Last_SQL_Error (expected "duplicate key" error) Could not execute Delete_rows event on table test.t1; Can't find record in 't1', Error_code: 1032; handler error HA_ERR_KEY_NOT_FOUND; the event's master log master-bin.000001, end_log_pos END_LOG_POS diff --git a/mysql-test/suite/rpl/r/rpl_row_inexist_tbl.result b/mysql-test/suite/rpl/r/rpl_row_inexist_tbl.result index 148840cc8c5..20fbcbb7763 100644 --- a/mysql-test/suite/rpl/r/rpl_row_inexist_tbl.result +++ b/mysql-test/suite/rpl/r/rpl_row_inexist_tbl.result @@ -10,6 +10,7 @@ DROP TABLE t1; INSERT INTO t1 VALUES (1); ==== Verify error on slave ==== [on slave] +call mtr.add_suppression("Slave SQL.*Error .Table .test.t1. doesn.t exist. on opening tables, Error_code: 1146"); include/wait_for_slave_sql_error.inc [errno=1146] ==== Clean up ==== include/stop_slave_io.inc diff --git a/mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result b/mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result index 5a29acfda1d..593aaa7ae51 100644 --- a/mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result +++ b/mysql-test/suite/rpl/r/rpl_row_tabledefs_2myisam.result @@ -117,6 +117,7 @@ a include/check_slave_is_running.inc INSERT INTO t9 VALUES (4); INSERT INTO t4 VALUES (4); +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column [012] type mismatch.* Error_code: 1535"); include/wait_for_slave_sql_error_and_skip.inc [errno=1535] Last_SQL_Error = 'Table definition on master and slave does not match: Column 0 type mismatch - received type 3, test.t4 has type 4' INSERT INTO t9 VALUES (5); diff --git a/mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result b/mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result index f6620ecf05f..e89118ac0a7 100644 --- a/mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result +++ b/mysql-test/suite/rpl/r/rpl_row_tabledefs_3innodb.result @@ -117,6 +117,7 @@ a include/check_slave_is_running.inc INSERT INTO t9 VALUES (4); INSERT INTO t4 VALUES (4); +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column [012] type mismatch.* Error_code: 1535"); include/wait_for_slave_sql_error_and_skip.inc [errno=1535] Last_SQL_Error = 'Table definition on master and slave does not match: Column 0 type mismatch - received type 3, test.t4 has type 4' INSERT INTO t9 VALUES (5); diff --git a/mysql-test/suite/rpl/r/rpl_skip_error.result b/mysql-test/suite/rpl/r/rpl_skip_error.result index d46338fd5b0..f5675b5e25f 100644 --- a/mysql-test/suite/rpl/r/rpl_skip_error.result +++ b/mysql-test/suite/rpl/r/rpl_skip_error.result @@ -56,6 +56,7 @@ t1 CREATE TABLE `t1` ( PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 SET SQL_LOG_BIN=1; +call mtr.add_suppression("Slave SQL.*Could not execute .*te_rows event on table test.t.; Duplicate entry.* Error_code: 1062"); CREATE TABLE t1(id INT NOT NULL PRIMARY KEY, data INT) Engine=InnoDB; SHOW CREATE TABLE t1; Table Create Table diff --git a/mysql-test/suite/rpl/r/rpl_slave_grp_exec.result b/mysql-test/suite/rpl/r/rpl_slave_grp_exec.result index 25deb65fa0f..a90c3c7b68f 100644 --- a/mysql-test/suite/rpl/r/rpl_slave_grp_exec.result +++ b/mysql-test/suite/rpl/r/rpl_slave_grp_exec.result @@ -29,6 +29,7 @@ a b SELECT * FROM t3 ORDER BY a; a b 1 ZZ +call mtr.add_suppression("Slave SQL.*Error .Table .test.t3. doesn.t exist. on.* Error_code: 1146"); include/wait_for_slave_sql_error.inc [errno=1146] SHOW TABLES LIKE 't%'; Tables_in_test (t%) diff --git a/mysql-test/suite/rpl/r/rpl_slave_load_remove_tmpfile.result b/mysql-test/suite/rpl/r/rpl_slave_load_remove_tmpfile.result index 213ab06f0c1..425611e2175 100644 --- a/mysql-test/suite/rpl/r/rpl_slave_load_remove_tmpfile.result +++ b/mysql-test/suite/rpl/r/rpl_slave_load_remove_tmpfile.result @@ -13,4 +13,5 @@ include/stop_slave_io.inc RESET SLAVE; drop table t1; call mtr.add_suppression("Slave: Error writing file 'UNKNOWN' .Errcode: 9. Error_code: 3"); +call mtr.add_suppression("Slave SQL.*Error in Begin_load_query event: write to.* failed, Error_code: 9"); include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_slave_load_tmpdir_not_exist.result b/mysql-test/suite/rpl/r/rpl_slave_load_tmpdir_not_exist.result index 8cd6218dcdc..262404ff6f9 100644 --- a/mysql-test/suite/rpl/r/rpl_slave_load_tmpdir_not_exist.result +++ b/mysql-test/suite/rpl/r/rpl_slave_load_tmpdir_not_exist.result @@ -1,6 +1,7 @@ include/master-slave.inc [connection master] START SLAVE; +call mtr.add_suppression("Slave SQL.*Unable to use slave.s temporary directory.* Error_code: 12"); include/wait_for_slave_sql_error.inc [errno=12] include/stop_slave_io.inc RESET SLAVE; diff --git a/mysql-test/suite/rpl/r/rpl_stm_EE_err2.result b/mysql-test/suite/rpl/r/rpl_stm_EE_err2.result index 0e83f1dfb67..d4a14adc6b8 100644 --- a/mysql-test/suite/rpl/r/rpl_stm_EE_err2.result +++ b/mysql-test/suite/rpl/r/rpl_stm_EE_err2.result @@ -8,6 +8,7 @@ insert into t1 values(1),(2); ERROR 23000: Duplicate entry '2' for key 'a' drop table t1; include/wait_for_slave_sql_to_stop.inc +call mtr.add_suppression("Slave SQL.*Query caused different errors on master and slave.*Error on master:.* error code=1062.*Error on slave:.* Error_code: 0"); Error: "Query caused different errors on master and slave. Error on master: message (format)='Duplicate entry '%-.192s' for key %d' error code=1062 ; Error on slave: actual message='no error', error code=0. Default database: 'test'. Query: 'insert into t1 values(1),(2)'" (expected different error codes on master and slave) Errno: "0" (expected 0) drop table t1; diff --git a/mysql-test/suite/rpl/r/rpl_stm_conflicts.result b/mysql-test/suite/rpl/r/rpl_stm_conflicts.result index b56297a5487..ee137c34fd0 100644 --- a/mysql-test/suite/rpl/r/rpl_stm_conflicts.result +++ b/mysql-test/suite/rpl/r/rpl_stm_conflicts.result @@ -17,6 +17,7 @@ a ---- Wait until slave stops with an error ---- include/wait_for_slave_sql_error.inc [errno=1062] Last_SQL_Error = Error 'Duplicate entry '1' for key 'PRIMARY'' on query. Default database: 'test'. Query: 'INSERT INTO t1 VALUES (1)' (expected "duplicate key" error) +call mtr.add_suppression("Slave SQL.*Duplicate entry .1. for key .PRIMARY.* Error_code: 1062"); SELECT * FROM t1; a 1 diff --git a/mysql-test/suite/rpl/r/rpl_temporary_errors.result b/mysql-test/suite/rpl/r/rpl_temporary_errors.result index 27843e1dcda..f667ea98907 100644 --- a/mysql-test/suite/rpl/r/rpl_temporary_errors.result +++ b/mysql-test/suite/rpl/r/rpl_temporary_errors.result @@ -37,6 +37,7 @@ a b 3 3 4 4 include/check_slave_is_running.inc +call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.t1"); **** On Master **** DROP TABLE t1; SET SESSION BINLOG_FORMAT=MIXED; diff --git a/mysql-test/suite/rpl/t/rpl_binlog_corruption.test b/mysql-test/suite/rpl/t/rpl_binlog_corruption.test index 2c9ec02764c..6717bda0fa7 100644 --- a/mysql-test/suite/rpl/t/rpl_binlog_corruption.test +++ b/mysql-test/suite/rpl/t/rpl_binlog_corruption.test @@ -22,7 +22,7 @@ source include/have_debug.inc; --connection slave call mtr.add_suppression('Found invalid event in binary log'); - +call mtr.add_suppression('Slave SQL.*Relay log read failure: Could not parse relay log event entry.* 1594'); # # BUG#40482: server/mysqlbinlog crashes when reading invalid Incident_log_event diff --git a/mysql-test/suite/rpl/t/rpl_binlog_max_cache_size.test b/mysql-test/suite/rpl/t/rpl_binlog_max_cache_size.test index 265bde2ccf6..06ba1fdc38e 100644 --- a/mysql-test/suite/rpl/t/rpl_binlog_max_cache_size.test +++ b/mysql-test/suite/rpl/t/rpl_binlog_max_cache_size.test @@ -398,6 +398,7 @@ source include/stop_slave.inc; source include/start_slave.inc; CALL mtr.add_suppression("Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage.*"); CALL mtr.add_suppression("Writing one row to the row-based binary log failed.*"); +CALL mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occured on the master. Message: error writing to the binary log"); connection master; TRUNCATE t1; diff --git a/mysql-test/suite/rpl/t/rpl_bug33931.test b/mysql-test/suite/rpl/t/rpl_bug33931.test index c2cf5811938..7ee6da94357 100644 --- a/mysql-test/suite/rpl/t/rpl_bug33931.test +++ b/mysql-test/suite/rpl/t/rpl_bug33931.test @@ -9,6 +9,7 @@ connection slave; # Add suppression for expected warnings in slaves error log call mtr.add_suppression("Failed during slave I/O thread initialization"); +call mtr.add_suppression("Slave SQL.*Failed during slave thread initialization.* 1593"); --source include/stop_slave.inc reset slave; diff --git a/mysql-test/suite/rpl/t/rpl_circular_for_4_hosts.test b/mysql-test/suite/rpl/t/rpl_circular_for_4_hosts.test index 820ffc42933..1380b3d97cf 100644 --- a/mysql-test/suite/rpl/t/rpl_circular_for_4_hosts.test +++ b/mysql-test/suite/rpl/t/rpl_circular_for_4_hosts.test @@ -82,6 +82,7 @@ INSERT INTO t1(b,c) VALUES('B',2); # Wait while C will stop. --connection server_3 # 1062 = ER_DUP_ENTRY +call mtr.add_suppression("Slave SQL.*Duplicate entry .6. for key .PRIMARY.* Error_code: 1062"); --let $slave_sql_errno= 1062 --source include/wait_for_slave_sql_error.inc --connection server_1 diff --git a/mysql-test/suite/rpl/t/rpl_filter_tables_not_exist.test b/mysql-test/suite/rpl/t/rpl_filter_tables_not_exist.test index 8e1c9eb98b2..13c66f9f64b 100644 --- a/mysql-test/suite/rpl/t/rpl_filter_tables_not_exist.test +++ b/mysql-test/suite/rpl/t/rpl_filter_tables_not_exist.test @@ -122,6 +122,8 @@ UPDATE t7 LEFT JOIN (t8, t4, t1) ON (t7.id=t8.id and t7.id=t4.id and t7.id=t1.id # if any of the above statement are not ignored, it would cause error # and stop slave sql thread. sync_slave_with_master; +connection slave; +call mtr.add_suppression("Slave SQL.*Error .Table .test.t[47]. doesn.t exist. on query.* Error_code: 1146"); connection master; # Parameters for include/wait_for_slave_sql_error_and_skip.inc: diff --git a/mysql-test/suite/rpl/t/rpl_idempotency.test b/mysql-test/suite/rpl/t/rpl_idempotency.test index 141f3699b14..c72f19dec56 100644 --- a/mysql-test/suite/rpl/t/rpl_idempotency.test +++ b/mysql-test/suite/rpl/t/rpl_idempotency.test @@ -8,9 +8,10 @@ connection slave; source include/have_innodb.inc; # Add suppression for expected warning(s) in slaves error log -call mtr.add_suppression("Slave: Can't find record in 't.' Error_code: 1032"); -call mtr.add_suppression("Slave: Cannot delete or update a parent row: a foreign key constraint fails .* Error_code: 1451"); -call mtr.add_suppression("Slave: Cannot add or update a child row: a foreign key constraint fails .* Error_code: 1452"); +call mtr.add_suppression("Can.t find record in .t[12].* Error_code: 1032"); +call mtr.add_suppression("Cannot delete or update a parent row: a foreign key constraint fails .* Error_code: 1451"); +call mtr.add_suppression("Cannot add or update a child row: a foreign key constraint fails .* Error_code: 1452"); +call mtr.add_suppression("Slave SQL.*Could not execute Write_rows event on table test.* Duplicate entry .1. for key .PRIMARY.* Error_code: 1062"); SET @old_slave_exec_mode= @@global.slave_exec_mode; diff --git a/mysql-test/suite/rpl/t/rpl_ignore_table.test b/mysql-test/suite/rpl/t/rpl_ignore_table.test index 1ab7393d8fc..53a772aea8f 100644 --- a/mysql-test/suite/rpl/t/rpl_ignore_table.test +++ b/mysql-test/suite/rpl/t/rpl_ignore_table.test @@ -125,6 +125,7 @@ show grants for mysqltest4@localhost; # where mysqltest1 does not exist on slave, # to succeed on slave the mode is temporarily changed set global slave_exec_mode='IDEMPOTENT'; +call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table mysql.* Error_code: 1032"); connection master; drop table t1, mysqltest2.t2; diff --git a/mysql-test/suite/rpl/t/rpl_incident.test b/mysql-test/suite/rpl/t/rpl_incident.test index b65441c7d50..d6034009f4f 100644 --- a/mysql-test/suite/rpl/t/rpl_incident.test +++ b/mysql-test/suite/rpl/t/rpl_incident.test @@ -15,6 +15,7 @@ SELECT * FROM t1; connection slave; # Wait until SQL thread stops with error LOST_EVENT on master +call mtr.add_suppression("Slave SQL.*The incident LOST_EVENTS occured on the master.* 1590"); let $slave_sql_errno= 1590; let $show_slave_sql_error= 1; source include/wait_for_slave_sql_error.inc; diff --git a/mysql-test/suite/rpl/t/rpl_init_slave_errors.test b/mysql-test/suite/rpl/t/rpl_init_slave_errors.test index cf72de18e13..4dab13856d4 100644 --- a/mysql-test/suite/rpl/t/rpl_init_slave_errors.test +++ b/mysql-test/suite/rpl/t/rpl_init_slave_errors.test @@ -58,7 +58,7 @@ start slave; --let $show_slave_sql_error= 1 --source include/wait_for_slave_sql_error.inc -call mtr.add_suppression("Failed during slave I/O thread initialization"); +call mtr.add_suppression("Failed during slave.* thread initialization"); SET GLOBAL debug= ""; diff --git a/mysql-test/suite/rpl/t/rpl_known_bugs_detection.test b/mysql-test/suite/rpl/t/rpl_known_bugs_detection.test index a8c7c2c1f5b..09bc715f9a5 100644 --- a/mysql-test/suite/rpl/t/rpl_known_bugs_detection.test +++ b/mysql-test/suite/rpl/t/rpl_known_bugs_detection.test @@ -30,6 +30,7 @@ connection slave; #1105 = ER_UNKNOWN_ERROR --let $slave_sql_errno= 1105 --let $show_slave_sql_error= 1 +call mtr.add_suppression("Slave SQL.*suffer.*http:..bugs.mysql.com.bug.php.id=24432"); --source include/wait_for_slave_sql_error.inc # show that it was not replicated SELECT * FROM t1; diff --git a/mysql-test/suite/rpl/t/rpl_loaddata_fatal.test b/mysql-test/suite/rpl/t/rpl_loaddata_fatal.test index ecbaddb6995..be099c1b6c4 100644 --- a/mysql-test/suite/rpl/t/rpl_loaddata_fatal.test +++ b/mysql-test/suite/rpl/t/rpl_loaddata_fatal.test @@ -15,6 +15,7 @@ connection master; LOAD DATA INFILE '../../std_data/rpl_loaddata.dat' INTO TABLE t1; connection slave; +call mtr.add_suppression("Slave SQL.*Fatal error: Not enough memory, Error_code: 1593"); let $slave_sql_errno= 1593; let $show_slave_sql_error= 1; source include/wait_for_slave_sql_error_and_skip.inc; diff --git a/mysql-test/suite/rpl/t/rpl_rotate_logs.test b/mysql-test/suite/rpl/t/rpl_rotate_logs.test index 6ed690f91bf..4d48190cdf2 100644 --- a/mysql-test/suite/rpl/t/rpl_rotate_logs.test +++ b/mysql-test/suite/rpl/t/rpl_rotate_logs.test @@ -93,6 +93,7 @@ set insert_id=1234; insert into t2 values(NULL); connection slave; # 1062 = ER_DUP_ENTRY +call mtr.add_suppression("Slave SQL.*Error .Duplicate entry .1234. for key .PRIMARY.. on query.* Error_code: 1062"); --let $slave_sql_errno= 1062 --source include/wait_for_slave_sql_error_and_skip.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test b/mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test index 915e747dd45..a78a6e8b8d1 100644 --- a/mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test +++ b/mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test @@ -62,6 +62,7 @@ DROP TABLE t1; --echo **** On Slave **** connection slave; SET GLOBAL QUERY_CACHE_SIZE=0; +call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.t1.* Error_code: 1032"); --echo **** On Master **** connection master; diff --git a/mysql-test/suite/rpl/t/rpl_row_colSize.test b/mysql-test/suite/rpl/t/rpl_row_colSize.test index 04434517518..be7d72e858e 100644 --- a/mysql-test/suite/rpl/t/rpl_row_colSize.test +++ b/mysql-test/suite/rpl/t/rpl_row_colSize.test @@ -160,6 +160,9 @@ let $test_table_slave = CREATE TABLE t1 (a TINYBLOB); let $test_insert = INSERT INTO t1 VALUES ('This is a test.'); source include/test_fieldsize.inc; +connection slave; +call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 0 ...e mismatch.* Error_code: 1535"); + --echo *** Cleanup *** connection master; DROP TABLE IF EXISTS t1; diff --git a/mysql-test/suite/rpl/t/rpl_row_inexist_tbl.test b/mysql-test/suite/rpl/t/rpl_row_inexist_tbl.test index b695428dd38..4c8e56d626f 100644 --- a/mysql-test/suite/rpl/t/rpl_row_inexist_tbl.test +++ b/mysql-test/suite/rpl/t/rpl_row_inexist_tbl.test @@ -30,6 +30,7 @@ INSERT INTO t1 VALUES (1); connection slave; # slave should have stopped because can't find table t1 # 1146 = ER_NO_SUCH_TABLE +call mtr.add_suppression("Slave SQL.*Error .Table .test.t1. doesn.t exist. on opening tables, Error_code: 1146"); --let $slave_sql_errno= 1146 --source include/wait_for_slave_sql_error.inc diff --git a/mysql-test/suite/rpl/t/rpl_skip_error.test b/mysql-test/suite/rpl/t/rpl_skip_error.test index 2853c95b212..82d6e61a2cd 100644 --- a/mysql-test/suite/rpl/t/rpl_skip_error.test +++ b/mysql-test/suite/rpl/t/rpl_skip_error.test @@ -102,6 +102,8 @@ SET SQL_LOG_BIN=1; connection slave; +call mtr.add_suppression("Slave SQL.*Could not execute .*te_rows event on table test.t.; Duplicate entry.* Error_code: 1062"); + CREATE TABLE t1(id INT NOT NULL PRIMARY KEY, data INT) Engine=InnoDB; SHOW CREATE TABLE t1; diff --git a/mysql-test/suite/rpl/t/rpl_slave_grp_exec.test b/mysql-test/suite/rpl/t/rpl_slave_grp_exec.test index 70ebba0047b..8525718283e 100644 --- a/mysql-test/suite/rpl/t/rpl_slave_grp_exec.test +++ b/mysql-test/suite/rpl/t/rpl_slave_grp_exec.test @@ -63,6 +63,7 @@ SELECT * FROM t3 ORDER BY a; --connection slave # 1146 = ER_NO_SUCH_TABLE +call mtr.add_suppression("Slave SQL.*Error .Table .test.t3. doesn.t exist. on.* Error_code: 1146"); --let $slave_sql_errno= 1146 --source include/wait_for_slave_sql_error.inc SHOW TABLES LIKE 't%'; diff --git a/mysql-test/suite/rpl/t/rpl_slave_load_remove_tmpfile.test b/mysql-test/suite/rpl/t/rpl_slave_load_remove_tmpfile.test index d80a2ed9e25..c718f072e11 100644 --- a/mysql-test/suite/rpl/t/rpl_slave_load_remove_tmpfile.test +++ b/mysql-test/suite/rpl/t/rpl_slave_load_remove_tmpfile.test @@ -49,5 +49,6 @@ RESET SLAVE; drop table t1; call mtr.add_suppression("Slave: Error writing file 'UNKNOWN' .Errcode: 9. Error_code: 3"); +call mtr.add_suppression("Slave SQL.*Error in Begin_load_query event: write to.* failed, Error_code: 9"); --let $rpl_only_running_threads= 1 --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_slave_load_tmpdir_not_exist.test b/mysql-test/suite/rpl/t/rpl_slave_load_tmpdir_not_exist.test index 0481581998c..b99c71b1ca0 100644 --- a/mysql-test/suite/rpl/t/rpl_slave_load_tmpdir_not_exist.test +++ b/mysql-test/suite/rpl/t/rpl_slave_load_tmpdir_not_exist.test @@ -11,6 +11,7 @@ --connection slave START SLAVE; # Why 12??? +call mtr.add_suppression("Slave SQL.*Unable to use slave.s temporary directory.* Error_code: 12"); --let $slave_sql_errno= 12 source include/wait_for_slave_sql_error.inc; diff --git a/mysql-test/suite/rpl/t/rpl_temporary_errors.test b/mysql-test/suite/rpl/t/rpl_temporary_errors.test index 7ebaa10e9db..e85bf6ab753 100644 --- a/mysql-test/suite/rpl/t/rpl_temporary_errors.test +++ b/mysql-test/suite/rpl/t/rpl_temporary_errors.test @@ -28,6 +28,9 @@ SHOW STATUS LIKE 'Slave_retried_transactions'; SELECT * FROM t1; source include/check_slave_is_running.inc; +connection slave; +call mtr.add_suppression("Slave SQL.*Could not execute Update_rows event on table test.t1"); + --echo **** On Master **** connection master; DROP TABLE t1; -- cgit v1.2.1 From 27166fc64f54ef82f1800988ffff431868377645 Mon Sep 17 00:00:00 2001 From: Magne Mahre Date: Thu, 24 Feb 2011 12:23:38 +0100 Subject: Bug#11767480 - SPATIAL INDEXES ON NON-SPATIAL COLUMNS CAUSE CRASHES. This is a backport of the patch for MySQL Bug#50574. Adding a SPATIAL INDEX on non-geometrical columns caused a segmentation fault when the table was subsequently inserted into. A test was added in mysql_prepare_create_table to explicitly check whether non-geometrical columns are used in a spatial index, and throw an error if so. For MySQL 5.5 and later, a new and more meaningful error message was introduced. For 5.1, we (re-)use an existing error code. --- mysql-test/r/gis.result | 33 +++++++++++++++++++++++++++++++++ mysql-test/t/gis.test | 47 +++++++++++++++++++++++++++++++++++++++++++++++ sql/sql_table.cc | 22 ++++++++++++++++------ 3 files changed, 96 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index a9beb9631ae..151d0cfffa1 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -1034,4 +1034,37 @@ p NULL NULL drop table t1; +CREATE TABLE t0 (a BINARY(32) NOT NULL); +CREATE SPATIAL INDEX i on t0 (a); +ERROR HY000: Incorrect arguments to SPATIAL INDEX +INSERT INTO t0 VALUES (1); +CREATE TABLE t1( +col0 BINARY NOT NULL, +col2 TIMESTAMP, +SPATIAL INDEX i1 (col0) +) ENGINE=MyISAM; +ERROR HY000: Incorrect arguments to SPATIAL INDEX +CREATE TABLE t1 ( +col0 BINARY NOT NULL, +col2 TIMESTAMP +) ENGINE=MyISAM; +CREATE SPATIAL INDEX idx0 ON t1(col0); +ERROR HY000: Incorrect arguments to SPATIAL INDEX +ALTER TABLE t1 ADD SPATIAL INDEX i1 (col0); +ERROR HY000: Incorrect arguments to SPATIAL INDEX +CREATE TABLE t2 ( +col0 INTEGER NOT NULL, +col1 POINT, +col2 POINT +); +CREATE SPATIAL INDEX idx0 ON t2 (col1, col2); +ERROR HY000: Incorrect arguments to SPATIAL INDEX +CREATE TABLE t3 ( +col0 INTEGER NOT NULL, +col1 POINT, +col2 LINESTRING, +SPATIAL INDEX i1 (col1, col2) +); +ERROR HY000: Incorrect arguments to SPATIAL INDEX +DROP TABLE t0, t1, t2; End of 5.1 tests diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index bdbbfc7c064..b50df062d7e 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -754,4 +754,51 @@ insert into t1 values (geomfromtext("point(1 0)")); select * from (select polygon(t1.a) as p from t1 order by t1.a) d; drop table t1; +# +# Bug#11767480 - SPATIAL INDEXES ON NON-SPATIAL COLUMNS CAUSE CRASHES. +# +CREATE TABLE t0 (a BINARY(32) NOT NULL); +--error ER_WRONG_ARGUMENTS +CREATE SPATIAL INDEX i on t0 (a); +INSERT INTO t0 VALUES (1); + +--error ER_WRONG_ARGUMENTS +CREATE TABLE t1( + col0 BINARY NOT NULL, + col2 TIMESTAMP, + SPATIAL INDEX i1 (col0) +) ENGINE=MyISAM; + +# Test other ways to add indices +CREATE TABLE t1 ( + col0 BINARY NOT NULL, + col2 TIMESTAMP +) ENGINE=MyISAM; + +--error ER_WRONG_ARGUMENTS +CREATE SPATIAL INDEX idx0 ON t1(col0); + +--error ER_WRONG_ARGUMENTS +ALTER TABLE t1 ADD SPATIAL INDEX i1 (col0); + +CREATE TABLE t2 ( + col0 INTEGER NOT NULL, + col1 POINT, + col2 POINT +); + +--error ER_WRONG_ARGUMENTS +CREATE SPATIAL INDEX idx0 ON t2 (col1, col2); + +--error ER_WRONG_ARGUMENTS +CREATE TABLE t3 ( + col0 INTEGER NOT NULL, + col1 POINT, + col2 LINESTRING, + SPATIAL INDEX i1 (col1, col2) +); + +# cleanup +DROP TABLE t0, t1, t2; + --echo End of 5.1 tests diff --git a/sql/sql_table.cc b/sql/sql_table.cc index b919ea9eae7..c5fc037a49e 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1,4 +1,4 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright 2000-2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,8 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + MA 02110-1301 USA */ /* drop and alter of tables */ @@ -3184,11 +3185,20 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, { column->length*= sql_field->charset->mbmaxlen; - if (key->type == Key::SPATIAL && column->length) + if (key->type == Key::SPATIAL) { - my_error(ER_WRONG_SUB_KEY, MYF(0)); - DBUG_RETURN(TRUE); - } + if (column->length) + { + my_error(ER_WRONG_SUB_KEY, MYF(0)); + DBUG_RETURN(TRUE); + } + + if (!f_is_geom(sql_field->pack_flag)) + { + my_error(ER_WRONG_ARGUMENTS, MYF(0), "SPATIAL INDEX"); + DBUG_RETURN(TRUE); + } + } if (f_is_blob(sql_field->pack_flag) || (f_is_geom(sql_field->pack_flag) && key->type != Key::SPATIAL)) -- cgit v1.2.1 From 5a805fe7c4b0ec4907376c4439c677d88b2bb0dd Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Fri, 25 Feb 2011 11:50:18 +0200 Subject: Fix BUG#11798085 - INCORRECT INTEGER TYPES USED IN CALCULATION RESULT IN OVERFLOW Do not assign the result of the difference to a signed variable and checking whether it is negative afterwards because this limits the max diff to 2G on 32 bit systems. E.g. "signed = 3.5G - 1G" would be negative and the code would assume that 3.5G < 1G. Instead compare the two variables directly and assign to unsigned only if we know that the result of the subtraction will be positive. Discussed with: Jimmy and Sunny (via IRC) --- storage/innodb_plugin/buf/buf0buf.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/storage/innodb_plugin/buf/buf0buf.c b/storage/innodb_plugin/buf/buf0buf.c index 6bbd5565c58..51a3a393d36 100644 --- a/storage/innodb_plugin/buf/buf0buf.c +++ b/storage/innodb_plugin/buf/buf0buf.c @@ -1893,16 +1893,19 @@ buf_block_align( /* TODO: protect buf_pool->chunks with a mutex (it will currently remain constant after buf_pool_init()) */ for (chunk = buf_pool->chunks, i = buf_pool->n_chunks; i--; chunk++) { - lint offs = ptr - chunk->blocks->frame; + ulint offs; - if (UNIV_UNLIKELY(offs < 0)) { + if (UNIV_UNLIKELY(ptr < chunk->blocks->frame)) { continue; } + /* else */ + + offs = ptr - chunk->blocks->frame; offs >>= UNIV_PAGE_SIZE_SHIFT; - if (UNIV_LIKELY((ulint) offs < chunk->size)) { + if (UNIV_LIKELY(offs < chunk->size)) { buf_block_t* block = &chunk->blocks[offs]; /* The function buf_chunk_init() invokes -- cgit v1.2.1 From 0f8ae318c7203158e1ea70cbf3a6bba41fd2dde6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 28 Feb 2011 13:51:18 +0200 Subject: Bug #58549 Race condition in buf_LRU_drop_page_hash_for_tablespace() and compressed tables buf_LRU_drop_page_hash_for_tablespace(): after releasing and reacquiring the buffer pool mutex, do not dereference any block descriptor pointer that is not known to be a pointer to an uncompressed page frame (type buf_block_t; state == BUF_BLOCK_FILE_PAGE). Also, defer the acquisition of the block_mutex until it is needed. buf_page_get_gen(): Add mode == BUF_GET_IF_IN_POOL_PEEK for buffer-fixing a block without making it young in the LRU list. buf_page_get_gen(), buf_page_init(), buf_LRU_block_remove_hashed_page(): Set bpage->state = BUF_BLOCK_ZIP_FREE before buf_buddy_free(bpage), so that similar race conditions might be detected a little easier. btr_search_drop_page_hash_when_freed(): Use BUF_GET_IF_IN_POOL_PEEK when dropping the hash indexes. rb://528 approved by Jimmy Yang --- storage/innodb_plugin/ChangeLog | 6 +++ storage/innodb_plugin/btr/btr0sea.c | 4 +- storage/innodb_plugin/buf/buf0buf.c | 32 +++++++++--- storage/innodb_plugin/buf/buf0lru.c | 87 +++++++++++++++++---------------- storage/innodb_plugin/include/buf0buf.h | 4 +- 5 files changed, 81 insertions(+), 52 deletions(-) diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index 1b2747ab012..1ece3ad1825 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,9 @@ +2011-02-28 The InnoDB Team + + * btr/btr0sea.c, buf/buf0buf.c, buf/buf0lru.c: + Fix Bug#58549 Race condition in buf_LRU_drop_page_hash_for_tablespace() + and compressed tables + 2011-02-15 The InnoDB Team * sync/sync0rw.c, innodb_bug59307.test: diff --git a/storage/innodb_plugin/btr/btr0sea.c b/storage/innodb_plugin/btr/btr0sea.c index 9835efcf712..cd0eadbb1b8 100644 --- a/storage/innodb_plugin/btr/btr0sea.c +++ b/storage/innodb_plugin/btr/btr0sea.c @@ -1201,8 +1201,8 @@ btr_search_drop_page_hash_when_freed( having to fear a deadlock. */ block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL, - BUF_GET_IF_IN_POOL, __FILE__, __LINE__, - &mtr); + BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__, + &mtr); /* Because the buffer pool mutex was released by buf_page_peek_if_search_hashed(), it is possible that the block was removed from the buffer pool by another thread diff --git a/storage/innodb_plugin/buf/buf0buf.c b/storage/innodb_plugin/buf/buf0buf.c index 51a3a393d36..14ec7b75911 100644 --- a/storage/innodb_plugin/buf/buf0buf.c +++ b/storage/innodb_plugin/buf/buf0buf.c @@ -2031,7 +2031,7 @@ buf_page_get_gen( ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */ buf_block_t* guess, /*!< in: guessed block or NULL */ ulint mode, /*!< in: BUF_GET, BUF_GET_IF_IN_POOL, - BUF_GET_NO_LATCH */ + BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH */ const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ mtr_t* mtr) /*!< in: mini-transaction */ @@ -2047,9 +2047,19 @@ buf_page_get_gen( ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH) || (rw_latch == RW_NO_LATCH)); - ut_ad((mode != BUF_GET_NO_LATCH) || (rw_latch == RW_NO_LATCH)); - ut_ad((mode == BUF_GET) || (mode == BUF_GET_IF_IN_POOL) - || (mode == BUF_GET_NO_LATCH)); +#ifdef UNIV_DEBUG + switch (mode) { + case BUF_GET_NO_LATCH: + ut_ad(rw_latch == RW_NO_LATCH); + break; + case BUF_GET: + case BUF_GET_IF_IN_POOL: + case BUF_PEEK_IF_IN_POOL: + break; + default: + ut_error; + } +#endif /* UNIV_DEBUG */ ut_ad(zip_size == fil_space_get_zip_size(space)); ut_ad(ut_is_2pow(zip_size)); #ifndef UNIV_LOG_DEBUG @@ -2091,7 +2101,8 @@ loop2: buf_pool_mutex_exit(); - if (mode == BUF_GET_IF_IN_POOL) { + if (mode == BUF_GET_IF_IN_POOL + || mode == BUF_PEEK_IF_IN_POOL) { return(NULL); } @@ -2130,7 +2141,8 @@ loop2: must_read = buf_block_get_io_fix(block) == BUF_IO_READ; - if (must_read && mode == BUF_GET_IF_IN_POOL) { + if (must_read && (mode == BUF_GET_IF_IN_POOL + || mode == BUF_PEEK_IF_IN_POOL)) { /* The page is only being read to buffer */ buf_pool_mutex_exit(); @@ -2248,6 +2260,7 @@ wait_until_unfixed: mutex_exit(&buf_pool_zip_mutex); buf_pool->n_pend_unzip++; + bpage->state = BUF_BLOCK_ZIP_FREE; buf_buddy_free(bpage, sizeof *bpage); buf_pool_mutex_exit(); @@ -2324,7 +2337,9 @@ wait_until_unfixed: buf_pool_mutex_exit(); - buf_page_set_accessed_make_young(&block->page, access_time); + if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL)) { + buf_page_set_accessed_make_young(&block->page, access_time); + } #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG ut_a(!block->page.file_page_was_freed); @@ -2377,7 +2392,7 @@ wait_until_unfixed: mtr_memo_push(mtr, block, fix_type); - if (!access_time) { + if (UNIV_LIKELY(mode != BUF_PEEK_IF_IN_POOL) && !access_time) { /* In the case of a first access, try to apply linear read-ahead */ @@ -2926,6 +2941,7 @@ err_exit: && UNIV_LIKELY_NULL(buf_page_hash_get(space, offset))) { /* The block was added by some other thread. */ + bpage->state = BUF_BLOCK_ZIP_FREE; buf_buddy_free(bpage, sizeof *bpage); buf_buddy_free(data, zip_size); diff --git a/storage/innodb_plugin/buf/buf0lru.c b/storage/innodb_plugin/buf/buf0lru.c index 39feb06ff23..a69b2658c51 100644 --- a/storage/innodb_plugin/buf/buf0lru.c +++ b/storage/innodb_plugin/buf/buf0lru.c @@ -246,71 +246,75 @@ buf_LRU_drop_page_hash_for_tablespace( page_arr = ut_malloc(sizeof(ulint) * BUF_LRU_DROP_SEARCH_HASH_SIZE); buf_pool_mutex_enter(); + num_entries = 0; scan_again: - num_entries = 0; bpage = UT_LIST_GET_LAST(buf_pool->LRU); while (bpage != NULL) { - mutex_t* block_mutex = buf_page_get_mutex(bpage); buf_page_t* prev_bpage; + ibool is_fixed; - mutex_enter(block_mutex); prev_bpage = UT_LIST_GET_PREV(LRU, bpage); ut_a(buf_page_in_file(bpage)); if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE || bpage->space != id - || bpage->buf_fix_count > 0 || bpage->io_fix != BUF_IO_NONE) { - /* We leave the fixed pages as is in this scan. - To be dealt with later in the final scan. */ - mutex_exit(block_mutex); - goto next_page; + /* Compressed pages are never hashed. + Skip blocks of other tablespaces. + Skip I/O-fixed blocks (to be dealt with later). */ +next_page: + bpage = prev_bpage; + continue; } - if (((buf_block_t*) bpage)->is_hashed) { + mutex_enter(&((buf_block_t*) bpage)->mutex); + is_fixed = bpage->buf_fix_count > 0 + || !((buf_block_t*) bpage)->is_hashed; + mutex_exit(&((buf_block_t*) bpage)->mutex); - /* Store the offset(i.e.: page_no) in the array - so that we can drop hash index in a batch - later. */ - page_arr[num_entries] = bpage->offset; - mutex_exit(block_mutex); - ut_a(num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE); - ++num_entries; + if (is_fixed) { + goto next_page; + } - if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) { - goto next_page; - } - /* Array full. We release the buf_pool_mutex to - obey the latching order. */ - buf_pool_mutex_exit(); - - buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, - num_entries); - num_entries = 0; - buf_pool_mutex_enter(); - } else { - mutex_exit(block_mutex); + /* Store the page number so that we can drop the hash + index in a batch later. */ + page_arr[num_entries] = bpage->offset; + ut_a(num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE); + ++num_entries; + + if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) { + goto next_page; } -next_page: - /* Note that we may have released the buf_pool mutex - above after reading the prev_bpage during processing - of a page_hash_batch (i.e.: when the array was full). - This means that prev_bpage can change in LRU list. - This is OK because this function is a 'best effort' - to drop as many search hash entries as possible and - it does not guarantee that ALL such entries will be - dropped. */ - bpage = prev_bpage; + /* Array full. We release the buf_pool_mutex to + obey the latching order. */ + buf_pool_mutex_exit(); + buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, + num_entries); + buf_pool_mutex_enter(); + num_entries = 0; + + /* Note that we released the buf_pool mutex above + after reading the prev_bpage during processing of a + page_hash_batch (i.e.: when the array was full). + Because prev_bpage could belong to a compressed-only + block, it may have been relocated, and thus the + pointer cannot be trusted. Because bpage is of type + buf_block_t, it is safe to dereference. + + bpage can change in the LRU list. This is OK because + this function is a 'best effort' to drop as many + search hash entries as possible and it does not + guarantee that ALL such entries will be dropped. */ /* If, however, bpage has been removed from LRU list to the free list then we should restart the scan. bpage->state is protected by buf_pool mutex. */ - if (bpage && !buf_page_in_file(bpage)) { - ut_a(num_entries == 0); + if (bpage + && buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { goto scan_again; } } @@ -1799,6 +1803,7 @@ buf_LRU_block_remove_hashed_page( buf_pool_mutex_exit_forbid(); buf_buddy_free(bpage->zip.data, page_zip_get_size(&bpage->zip)); + bpage->state = BUF_BLOCK_ZIP_FREE; buf_buddy_free(bpage, sizeof(*bpage)); buf_pool_mutex_exit_allow(); UNIV_MEM_UNDESC(bpage); diff --git a/storage/innodb_plugin/include/buf0buf.h b/storage/innodb_plugin/include/buf0buf.h index a16de67aa3a..05dead5ac9e 100644 --- a/storage/innodb_plugin/include/buf0buf.h +++ b/storage/innodb_plugin/include/buf0buf.h @@ -41,6 +41,8 @@ Created 11/5/1995 Heikki Tuuri /* @{ */ #define BUF_GET 10 /*!< get always */ #define BUF_GET_IF_IN_POOL 11 /*!< get if in pool */ +#define BUF_PEEK_IF_IN_POOL 12 /*!< get if in pool, do not make + the block young in the LRU list */ #define BUF_GET_NO_LATCH 14 /*!< get and bufferfix, but set no latch; we have separated this case, because @@ -284,7 +286,7 @@ buf_page_get_gen( ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */ buf_block_t* guess, /*!< in: guessed block or NULL */ ulint mode, /*!< in: BUF_GET, BUF_GET_IF_IN_POOL, - BUF_GET_NO_LATCH */ + BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH */ const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ mtr_t* mtr); /*!< in: mini-transaction */ -- cgit v1.2.1 From 4a54e5adb157a7eb9bbee84d7aa233ad1fb087a0 Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Mon, 28 Feb 2011 16:10:35 +0100 Subject: Attempt at solving missing end of mtr output on Windows --- mysql-test/lib/mtr_report.pm | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mysql-test/lib/mtr_report.pm b/mysql-test/lib/mtr_report.pm index 0090316cf7b..bbf1002c123 100644 --- a/mysql-test/lib/mtr_report.pm +++ b/mysql-test/lib/mtr_report.pm @@ -32,6 +32,7 @@ our @EXPORT= qw(report_option mtr_print_line mtr_print_thick_line use mtr_match; use My::Platform; use POSIX qw[ _exit ]; +use IO::Handle qw[ flush ]; require "mtr_io.pl"; my $tot_real_time= 0; @@ -477,6 +478,7 @@ sub mtr_warning (@) { # Print error to screen and then exit sub mtr_error (@) { + IO::Handle::flush(\*STDOUT) if IS_WINDOWS; print STDERR _name(). _timestamp(). "mysql-test-run: *** ERROR: ". join(" ", @_). "\n"; if (IS_WINDOWS) -- cgit v1.2.1 From fd1e3b03ff8837e8af1a8aa486cc2b13f872861f Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 1 Mar 2011 15:30:18 +0300 Subject: Bug#11766725 (Bug#59901) EXTRACTVALUE STILL BROKEN AFTER FIX FOR BUG #44332 Problem: a byte behind the end of input string was read in case of a broken XML not having a quote or doublequote character closing a string value. Fix: changing condition not to read behind the end of input string @ mysql-test/r/xml.result @ mysql-test/t/xml.test Adding tests @ strings/xml.c When checking if the closing quote/doublequote was found, using p->cur[0] us unsafe, as p->cur can point to the byte after the value. Comparing p->cur to p->beg instead. --- mysql-test/r/xml.result | 8 ++++++++ mysql-test/t/xml.test | 5 +++++ strings/xml.c | 7 ++++++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/xml.result b/mysql-test/r/xml.result index 0a71a596505..dda77cba04c 100644 --- a/mysql-test/r/xml.result +++ b/mysql-test/r/xml.result @@ -1124,4 +1124,12 @@ Warning 1525 Incorrect XML value: 'parse error at line 1 pos 2: END-OF-INPUT une SELECT UPDATEXML(CONVERT(_latin1' SLAVE BREAK WITH ERROR HA_ERR_END_OF_ The slave was not able to find the correct row in the innodb table, because the row fetched from the innodb table would not match the before image. This happened because the (don't care) bytes in the NULLed fields would change once the row was stored in the storage engine (from zero to the default value). This would make bulk memory comparison (using memcmp) to fail. We fix this by taking a preventing measure and avoiding memcmp for tables that contain nullable fields. Therefore, we protect the slave search routine from engines that return arbitrary values for don't care bytes (in the nulled fields). Instead, the slave thread will only check null_bits and those fields that are not set to NULL when comparing the before image against the storage engine row. mysql-test/extra/rpl_tests/rpl_record_compare.test: Added test case to the include file so that this is tested with more than one engine. mysql-test/suite/rpl/r/rpl_row_rec_comp_innodb.result: Result update. mysql-test/suite/rpl/r/rpl_row_rec_comp_myisam.result: Result update. mysql-test/suite/rpl/t/rpl_row_rec_comp_myisam.test: Moved the include file last, so that the result from BUG#11766865 is not intermixed with the result for BUG#11760454. sql/log_event.cc: Skips memory comparison if the table has nullable columns and compares only non-nulled fields in the field comparison loop. --- mysql-test/extra/rpl_tests/rpl_record_compare.test | 20 ++++++++++++++ .../suite/rpl/r/rpl_row_rec_comp_innodb.result | 6 +++++ .../suite/rpl/r/rpl_row_rec_comp_myisam.result | 20 +++++++++----- .../suite/rpl/t/rpl_row_rec_comp_myisam.test | 6 +++-- sql/log_event.cc | 31 ++++++++++++++++++---- 5 files changed, 69 insertions(+), 14 deletions(-) diff --git a/mysql-test/extra/rpl_tests/rpl_record_compare.test b/mysql-test/extra/rpl_tests/rpl_record_compare.test index f29e4fb791a..210aee025d0 100644 --- a/mysql-test/extra/rpl_tests/rpl_record_compare.test +++ b/mysql-test/extra/rpl_tests/rpl_record_compare.test @@ -62,4 +62,24 @@ UPDATE t1 SET c1= 0; DROP TABLE t1; -- sync_slave_with_master +# +# BUG#11766865: 60091: RBR + NO PK + UPDATE NULL VALUE --> SLAVE BREAK WITH ERROR HA_ERR_END_OF_ +# + +--connection master +--source include/rpl_reset.inc +--connection master + +--eval CREATE TABLE t1 (c1 int(11) NOT NULL, c2 int(11) NOT NULL, c3 int(11) DEFAULT '-1') ENGINE=$engine DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (1,2,NULL); +UPDATE t1 SET c1=1, c2=2, c3=-1 WHERE c1=1 AND c2=2 AND ISNULL(c3); + +--sync_slave_with_master + +--let $diff_tables=master:test.t1, slave:test.t1 +--source include/diff_tables.inc + +--connection master +DROP TABLE t1; +--sync_slave_with_master diff --git a/mysql-test/suite/rpl/r/rpl_row_rec_comp_innodb.result b/mysql-test/suite/rpl/r/rpl_row_rec_comp_innodb.result index d9ebb52493b..523564a222e 100644 --- a/mysql-test/suite/rpl/r/rpl_row_rec_comp_innodb.result +++ b/mysql-test/suite/rpl/r/rpl_row_rec_comp_innodb.result @@ -25,4 +25,10 @@ INSERT INTO t1(c1) VALUES (NULL); UPDATE t1 SET c1= 0; include/diff_tables.inc [master:t1, slave:t1] DROP TABLE t1; +include/rpl_reset.inc +CREATE TABLE t1 (c1 int(11) NOT NULL, c2 int(11) NOT NULL, c3 int(11) DEFAULT '-1') ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO t1 VALUES (1,2,NULL); +UPDATE t1 SET c1=1, c2=2, c3=-1 WHERE c1=1 AND c2=2 AND ISNULL(c3); +include/diff_tables.inc [master:test.t1, slave:test.t1] +DROP TABLE t1; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_row_rec_comp_myisam.result b/mysql-test/suite/rpl/r/rpl_row_rec_comp_myisam.result index e9ffcc927be..4dc7c0bc7a3 100644 --- a/mysql-test/suite/rpl/r/rpl_row_rec_comp_myisam.result +++ b/mysql-test/suite/rpl/r/rpl_row_rec_comp_myisam.result @@ -1,5 +1,14 @@ include/master-slave.inc [connection master] +## coverage purposes - Field_bits +## 1 X bit + 2 Null bits + 5 bits => last_null_bit_pos==0 +include/rpl_reset.inc +CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bit(5)) ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1(c1,c2) VALUES (10, b'1'); +INSERT INTO t1(c1,c2) VALUES (NULL, b'1'); +UPDATE t1 SET c1= 0; +include/diff_tables.inc [master:t1, slave:t1] +DROP TABLE t1; ## case #1 - last_null_bit_pos==0 in record_compare without X bit include/rpl_reset.inc CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bigint(20) DEFAULT 0, c3 bigint(20) DEFAULT 0, c4 varchar(1) DEFAULT '', c5 bigint(20) DEFAULT 0, c6 bigint(20) DEFAULT 0, c7 bigint(20) DEFAULT 0, c8 bigint(20) DEFAULT 0) ENGINE=MyISAM DEFAULT CHARSET=latin1; @@ -25,13 +34,10 @@ INSERT INTO t1(c1) VALUES (NULL); UPDATE t1 SET c1= 0; include/diff_tables.inc [master:t1, slave:t1] DROP TABLE t1; -## coverage purposes - Field_bits -## 1 X bit + 2 Null bits + 5 bits => last_null_bit_pos==0 include/rpl_reset.inc -CREATE TABLE t1 (c1 bigint(20) DEFAULT 0, c2 bit(5)) ENGINE=MyISAM DEFAULT CHARSET=latin1; -INSERT INTO t1(c1,c2) VALUES (10, b'1'); -INSERT INTO t1(c1,c2) VALUES (NULL, b'1'); -UPDATE t1 SET c1= 0; -include/diff_tables.inc [master:t1, slave:t1] +CREATE TABLE t1 (c1 int(11) NOT NULL, c2 int(11) NOT NULL, c3 int(11) DEFAULT '-1') ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1 VALUES (1,2,NULL); +UPDATE t1 SET c1=1, c2=2, c3=-1 WHERE c1=1 AND c2=2 AND ISNULL(c3); +include/diff_tables.inc [master:test.t1, slave:test.t1] DROP TABLE t1; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_rec_comp_myisam.test b/mysql-test/suite/rpl/t/rpl_row_rec_comp_myisam.test index e40cd615ca6..f96603f69ed 100644 --- a/mysql-test/suite/rpl/t/rpl_row_rec_comp_myisam.test +++ b/mysql-test/suite/rpl/t/rpl_row_rec_comp_myisam.test @@ -1,12 +1,11 @@ -- source include/have_binlog_format_row.inc -- source include/master-slave.inc +-- let $engine= MyISAM # # BUG#52868 Wrong handling of NULL value during update, replication out of sync # --- let $engine= MyISAM --- source extra/rpl_tests/rpl_record_compare.test -- echo ## coverage purposes - Field_bits -- echo ## 1 X bit + 2 Null bits + 5 bits => last_null_bit_pos==0 @@ -28,4 +27,7 @@ UPDATE t1 SET c1= 0; -- connection master DROP TABLE t1; -- sync_slave_with_master + +-- source extra/rpl_tests/rpl_record_compare.test + --source include/rpl_end.inc diff --git a/sql/log_event.cc b/sql/log_event.cc index 0b938df1987..19f82b69048 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -8888,7 +8888,19 @@ static bool record_compare(TABLE *table) } } - if (table->s->blob_fields + table->s->varchar_fields == 0) + /** + Compare full record only if: + - there are no blob fields (otherwise we would also need + to compare blobs contents as well); + - there are no varchar fields (otherwise we would also need + to compare varchar contents as well); + - there are no null fields, otherwise NULLed fields + contents (i.e., the don't care bytes) may show arbitrary + values, depending on how each engine handles internally. + */ + if ((table->s->blob_fields + + table->s->varchar_fields + + table->s->null_fields) == 0) { result= cmp_record(table,record[1]); goto record_compare_exit; @@ -8903,13 +8915,22 @@ static bool record_compare(TABLE *table) goto record_compare_exit; } - /* Compare updated fields */ + /* Compare fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { - if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + + /** + We only compare field contents that are not null. + NULL fields (i.e., their null bits) were compared + earlier. + */ + if (!(*(ptr))->is_null()) { - result= TRUE; - goto record_compare_exit; + if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + { + result= TRUE; + goto record_compare_exit; + } } } -- cgit v1.2.1 From dcf6b68d08acfbfdc3183b0a13f041af51573eb1 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Fri, 25 Mar 2011 12:57:27 +0200 Subject: Bug #11766769: 59959: SMALL VALUES OF --MAX-ALLOWED-PACKET ARE NOT BEING HONORED max_allowed_packet works in conjunction with net_buffer_length. max_allowed_packet is an upper bound of net_buffer_length. So it doesn't make sense to set the upper limit lower than the value. Added a warning (using ER_UNKNOWN_ERRROR and a specific message) when this is done (in the log at startup and when setting either max_allowed_packet or the net_buffer_length variables) Added a test case. Fixed several tests that broke the above rule. --- mysql-test/r/packet.result | 1 + mysql-test/r/variables.result | 27 +++++++++++++++ mysql-test/suite/rpl/r/rpl_packet.result | 2 ++ mysql-test/suite/rpl/t/rpl_loaddata_map-master.opt | 2 +- mysql-test/suite/rpl/t/rpl_loaddata_map-slave.opt | 2 +- mysql-test/t/variables.test | 26 +++++++++++++++ sql/mysqld.cc | 8 +++++ sql/set_var.cc | 38 ++++++++++++++++++++-- 8 files changed, 102 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/packet.result b/mysql-test/r/packet.result index ecbb47d4ee0..d673ab42691 100644 --- a/mysql-test/r/packet.result +++ b/mysql-test/r/packet.result @@ -3,6 +3,7 @@ set @net_buffer_length=@@global.net_buffer_length; set global max_allowed_packet=100; Warnings: Warning 1292 Truncated incorrect max_allowed_packet value: '100' +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' set global net_buffer_length=100; Warnings: Warning 1292 Truncated incorrect net_buffer_length value: '100' diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index f4e2a8c08fc..af3b76b09f3 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -280,6 +280,7 @@ NET_BUFFER_LENGTH 1024 set global net_buffer_length=2000000000; Warnings: Warning 1292 Truncated incorrect net_buffer_length value: '2000000000' +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' show global variables like 'net_buffer_length'; Variable_name Value net_buffer_length 1048576 @@ -502,6 +503,7 @@ set low_priority_updates=1; set global max_allowed_packet=100; Warnings: Warning 1292 Truncated incorrect max_allowed_packet value: '100' +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' set global max_binlog_cache_size=100; Warnings: Warning 1292 Truncated incorrect max_binlog_cache_size value: '100' @@ -1059,6 +1061,8 @@ set global max_write_lock_count =default; set global myisam_data_pointer_size =@my_myisam_data_pointer_size; set global myisam_max_sort_file_size =@my_myisam_max_sort_file_size; set global net_buffer_length =@my_net_buffer_length; +Warnings: +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' set global net_write_timeout =@my_net_write_timeout; set global net_read_timeout =@my_net_read_timeout; set global query_cache_limit =@my_query_cache_limit; @@ -1547,4 +1551,27 @@ SET @@global.max_binlog_cache_size=DEFAULT; SET @@global.max_join_size=DEFAULT; SET @@global.key_buffer_size=@kbs; SET @@global.key_cache_block_size=@kcbs; +# +# Bug #11766769 : 59959: SMALL VALUES OF --MAX-ALLOWED-PACKET +# ARE NOT BEING HONORED +# +CREATE TABLE t1 (a MEDIUMTEXT); +SET GLOBAL max_allowed_packet=2048; +Warnings: +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' +SET GLOBAL net_buffer_length=4096; +Warnings: +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' +SHOW SESSION VARIABLES LIKE 'max_allowed_packet'; +Variable_name Value +max_allowed_packet 2048 +SHOW SESSION VARIABLES LIKE 'net_buffer_length'; +Variable_name Value +net_buffer_length 4096 +ERROR 08S01: Got a packet bigger than 'max_allowed_packet' bytes +SELECT LENGTH(a) FROM t1; +LENGTH(a) +SET GLOBAL max_allowed_packet=default; +SET GLOBAL net_buffer_length=default; +DROP TABLE t1; End of 5.1 tests diff --git a/mysql-test/suite/rpl/r/rpl_packet.result b/mysql-test/suite/rpl/r/rpl_packet.result index 9239a718504..7a7f8141ac8 100644 --- a/mysql-test/suite/rpl/r/rpl_packet.result +++ b/mysql-test/suite/rpl/r/rpl_packet.result @@ -49,6 +49,8 @@ SET @max_allowed_packet_2= @@session.max_allowed_packet; ==== clean up ==== DROP TABLE t1; SET @@global.max_allowed_packet= 1024; +Warnings: +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' SET @@global.net_buffer_length= 1024; DROP TABLE t1; RESET SLAVE; diff --git a/mysql-test/suite/rpl/t/rpl_loaddata_map-master.opt b/mysql-test/suite/rpl/t/rpl_loaddata_map-master.opt index 831680eb5ef..5fdeb855110 100644 --- a/mysql-test/suite/rpl/t/rpl_loaddata_map-master.opt +++ b/mysql-test/suite/rpl/t/rpl_loaddata_map-master.opt @@ -1 +1 @@ ---read_buffer_size=12K --max_allowed_packet=8K +--read_buffer_size=12K --max_allowed_packet=8K --net-buffer-length=8K diff --git a/mysql-test/suite/rpl/t/rpl_loaddata_map-slave.opt b/mysql-test/suite/rpl/t/rpl_loaddata_map-slave.opt index 95f55bcf7d8..7d404fae240 100644 --- a/mysql-test/suite/rpl/t/rpl_loaddata_map-slave.opt +++ b/mysql-test/suite/rpl/t/rpl_loaddata_map-slave.opt @@ -1 +1 @@ ---max_allowed_packet=8K +--max_allowed_packet=8K --net-buffer-length=8K diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index c61e2aa3708..383bdfc79a9 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -1304,4 +1304,30 @@ SET @@global.max_join_size=DEFAULT; SET @@global.key_buffer_size=@kbs; SET @@global.key_cache_block_size=@kcbs; + +--echo # +--echo # Bug #11766769 : 59959: SMALL VALUES OF --MAX-ALLOWED-PACKET +--echo # ARE NOT BEING HONORED +--echo # + +CREATE TABLE t1 (a MEDIUMTEXT); + +SET GLOBAL max_allowed_packet=2048; +SET GLOBAL net_buffer_length=4096; +CONNECT (con1,localhost,root,,test); +SHOW SESSION VARIABLES LIKE 'max_allowed_packet'; +SHOW SESSION VARIABLES LIKE 'net_buffer_length'; +--disable_query_log +--error ER_NET_PACKET_TOO_LARGE +INSERT INTO t1 VALUES ('123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890'); +--enable_query_log +SELECT LENGTH(a) FROM t1; + +CONNECTION default; +DISCONNECT con1; +SET GLOBAL max_allowed_packet=default; +SET GLOBAL net_buffer_length=default; +DROP TABLE t1; + + --echo End of 5.1 tests diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 46376a08ec9..54850f36d10 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -8795,6 +8795,14 @@ static int get_options(int *argc,char **argv) opt_log_slow_slave_statements) && !opt_slow_log) sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log_slow_queries is not set"); + if (global_system_variables.net_buffer_length > + global_system_variables.max_allowed_packet) + { + sql_print_warning("net_buffer_length (%lu) is set to be larger " + "than max_allowed_packet (%lu). Please rectify.", + global_system_variables.net_buffer_length, + global_system_variables.max_allowed_packet); + } #if defined(HAVE_BROKEN_REALPATH) my_use_symdir=0; diff --git a/sql/set_var.cc b/sql/set_var.cc index 333fb90c795..76957e32536 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -147,6 +147,8 @@ static void sys_default_general_log_path(THD *thd, enum_var_type type); static bool sys_update_slow_log_path(THD *thd, set_var * var); static void sys_default_slow_log_path(THD *thd, enum_var_type type); static uchar *get_myisam_mmap_size(THD *thd); +static int check_max_allowed_packet(THD *thd, set_var *var); +static int check_net_buffer_length(THD *thd, set_var *var); /* Variable definition list @@ -360,7 +362,8 @@ static sys_var_const sys_lower_case_table_names(&vars, (uchar*) &lower_case_table_names); static sys_var_thd_ulong_session_readonly sys_max_allowed_packet(&vars, "max_allowed_packet", - &SV::max_allowed_packet); + &SV::max_allowed_packet, + check_max_allowed_packet); static sys_var_ulonglong_ptr sys_max_binlog_cache_size(&vars, "max_binlog_cache_size", &max_binlog_cache_size); static sys_var_long_ptr sys_max_binlog_size(&vars, "max_binlog_size", @@ -450,7 +453,8 @@ static sys_var_const sys_named_pipe(&vars, "named_pipe", /* purecov: end */ #endif static sys_var_thd_ulong_session_readonly sys_net_buffer_length(&vars, "net_buffer_length", - &SV::net_buffer_length); + &SV::net_buffer_length, + check_net_buffer_length); static sys_var_thd_ulong sys_net_read_timeout(&vars, "net_read_timeout", &SV::net_read_timeout, 0, fix_net_read_timeout); @@ -4312,6 +4316,36 @@ uchar *sys_var_event_scheduler::value_ptr(THD *thd, enum_var_type type, } #endif + +int +check_max_allowed_packet(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (val < (longlong) global_system_variables.net_buffer_length) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "The value of 'max_allowed_packet' should be no less than " + "the value of 'net_buffer_length'"); + } + return 0; +} + + +int +check_net_buffer_length(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (val > (longlong) global_system_variables.max_allowed_packet) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "The value of 'max_allowed_packet' should be no less than " + "the value of 'net_buffer_length'"); + } + return 0; +} + /**************************************************************************** Used templates ****************************************************************************/ -- cgit v1.2.1 From e0887df8e1127c0f1410b9d4ad61647cb5f93be2 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Fri, 25 Mar 2011 12:36:02 +0100 Subject: Bug#11766249 bug#59316: PARTITIONING AND INDEX_MERGE MEMORY LEAK When executing row-ordered-retrieval index merge, the handler was cloned, but it used the wrong memory root, so instead of allocating memory on the thread/query's mem_root, it used the table's mem_root, resulting in non released memory in the table object, and was not freed until the table was closed. Solution was to ensure that memory used during cloning of a handler was allocated from the correct memory root. This was implemented by fixing handler::clone() to also take a name argument, so it can be used with partitioning. And in ha_partition only allocate the ha_partition's ref, and call the original ha_partition partitions clone() and set at cloned partitions. Fix of .bzrignore on Windows with VS 2010 --- .bzrignore | 12 ++ sql/ha_partition.cc | 230 ++++++++++++++++++++++++++++---------- sql/ha_partition.h | 20 ++-- sql/handler.cc | 6 +- sql/handler.h | 2 +- sql/opt_range.cc | 2 +- storage/heap/ha_heap.cc | 4 +- storage/heap/ha_heap.h | 2 +- storage/myisam/ha_myisam.cc | 5 +- storage/myisam/ha_myisam.h | 2 +- storage/myisammrg/ha_myisammrg.cc | 9 +- storage/myisammrg/ha_myisammrg.h | 2 +- 12 files changed, 214 insertions(+), 82 deletions(-) diff --git a/.bzrignore b/.bzrignore index 3d27c001e2b..9287e9499e3 100644 --- a/.bzrignore +++ b/.bzrignore @@ -37,7 +37,13 @@ *.user *.vcproj *.vcproj.cmake +*.vcxproj +*.vcxproj.filters */*.dir/* +*.dir +Debug +MySql.sdf +Win32 */*_pure_*warnings */.deps */.libs/* @@ -46,6 +52,7 @@ */minsizerel/* */release/* */relwithdebinfo/* +RelWithDebInfo *~ .*.swp ./CMakeCache.txt @@ -607,6 +614,7 @@ include/mysql_h.ic include/mysql_version.h include/mysqld_ername.h include/mysqld_error.h +include/mysqld_error.h.rule include/openssl include/readline include/readline/*.h @@ -1879,7 +1887,9 @@ scripts/mysql_find_rows scripts/mysql_fix_extensions scripts/mysql_fix_privilege_tables scripts/mysql_fix_privilege_tables.sql +scripts/mysql_fix_privilege_tables.sql.rule scripts/mysql_fix_privilege_tables_sql.c +scripts/mysql_fix_privilege_tables_sql.c.rule scripts/mysql_install_db scripts/mysql_secure_installation scripts/mysql_setpermission @@ -2116,6 +2126,7 @@ sql/handlerton.cc sql/html sql/latex sql/lex_hash.h +sql/lex_hash.h.rule sql/link_sources sql/max/* sql/message.h @@ -2147,6 +2158,7 @@ sql/sql_builtin.cc sql/sql_select.cc.orig sql/sql_yacc.cc sql/sql_yacc.h +sql/sql_yacc.h.rule sql/sql_yacc.output sql/sql_yacc.yy.orig sql/test_time diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 7bcbd241541..946ecc652ef 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -163,10 +163,14 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF; */ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) - :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), - m_is_sub_partitioned(0) + :handler(hton, share) { DBUG_ENTER("ha_partition::ha_partition(table)"); + m_part_info= NULL; + m_create_handler= FALSE; + m_is_sub_partitioned= 0; + m_is_clone_of= NULL; + m_clone_mem_root= NULL; init_handler_variables(); DBUG_VOID_RETURN; } @@ -184,15 +188,46 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) */ ha_partition::ha_partition(handlerton *hton, partition_info *part_info) - :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), - m_is_sub_partitioned(m_part_info->is_sub_partitioned()) + :handler(hton, NULL) { DBUG_ENTER("ha_partition::ha_partition(part_info)"); + DBUG_ASSERT(part_info); + m_part_info= part_info; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); init_handler_variables(); - DBUG_ASSERT(m_part_info); DBUG_VOID_RETURN; } +/** + ha_partition constructor method used by ha_partition::clone() + + @param hton Handlerton (partition_hton) + @param share Table share object + @param part_info_arg partition_info to use + @param clone_arg ha_partition to clone + @param clme_mem_root_arg MEM_ROOT to use + + @return New partition handler +*/ + +ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share, + partition_info *part_info_arg, + ha_partition *clone_arg, + MEM_ROOT *clone_mem_root_arg) + :handler(hton, share) +{ + DBUG_ENTER("ha_partition::ha_partition(clone)"); + m_part_info= part_info_arg; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); + m_is_clone_of= clone_arg; + m_clone_mem_root= clone_mem_root_arg; + init_handler_variables(); + m_tot_parts= clone_arg->m_tot_parts; + DBUG_ASSERT(m_tot_parts); + DBUG_VOID_RETURN; +} /* Initialize handler object @@ -244,7 +279,6 @@ void ha_partition::init_handler_variables() m_rec0= 0; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; - is_clone= FALSE, m_part_func_monotonicity_info= NON_MONOTONIC; auto_increment_lock= FALSE; auto_increment_safe_stmt_log_lock= FALSE; @@ -359,7 +393,8 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root) */ DBUG_RETURN(0); } - else if (get_from_handler_file(table_share->normalized_path.str, mem_root)) + else if (get_from_handler_file(table_share->normalized_path.str, + mem_root, false)) { my_message(ER_UNKNOWN_ERROR, "Failed to read from the .par file", MYF(0)); DBUG_RETURN(1); @@ -1848,7 +1883,7 @@ uint ha_partition::del_ren_cre_table(const char *from, DBUG_RETURN(TRUE); } - if (get_from_handler_file(from, ha_thd()->mem_root)) + if (get_from_handler_file(from, ha_thd()->mem_root, false)) DBUG_RETURN(TRUE); DBUG_ASSERT(m_file_buffer); DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to)); @@ -2368,7 +2403,8 @@ error_end: partitions. */ -bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) +bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool clone) { char buff[FN_REFLEN], *address_tot_name_len; File file; @@ -2403,15 +2439,18 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) m_tot_parts= uint4korr((file_buffer) + 8); DBUG_PRINT("info", ("No of parts = %u", m_tot_parts)); tot_partition_words= (m_tot_parts + 3) / 4; - engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); - for (i= 0; i < m_tot_parts; i++) + if (!clone) { - engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), - (enum legacy_db_type) - *(uchar *) ((file_buffer) + - 12 + i)); - if (!engine_array[i]) - goto err3; + engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); + for (i= 0; i < m_tot_parts; i++) + { + engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), + (enum legacy_db_type) + *(uchar *) ((file_buffer) + + 12 + i)); + if (!engine_array[i]) + goto err3; + } } address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words; tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4; @@ -2422,16 +2461,19 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) m_file_buffer= file_buffer; // Will be freed in clear_handler_file() m_name_buffer_ptr= name_buffer_ptr; - if (!(m_engine_array= (plugin_ref*) - my_malloc(m_tot_parts * sizeof(plugin_ref), MYF(MY_WME)))) - goto err3; + if (!clone) + { + if (!(m_engine_array= (plugin_ref*) + my_malloc(m_tot_parts * sizeof(plugin_ref), MYF(MY_WME)))) + goto err3; - for (i= 0; i < m_tot_parts; i++) - m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); + for (i= 0; i < m_tot_parts; i++) + m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); - my_afree((gptr) engine_array); + my_afree((gptr) engine_array); + } - if (!m_file && create_handlers(mem_root)) + if (!clone && !m_file && create_handlers(mem_root)) { clear_handler_file(); DBUG_RETURN(TRUE); @@ -2439,7 +2481,8 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) DBUG_RETURN(FALSE); err3: - my_afree((gptr) engine_array); + if (!clone) + my_afree((gptr) engine_array); err2: my_free(file_buffer, MYF(0)); err1: @@ -2491,13 +2534,13 @@ void ha_data_partition_destroy(void *ha_data) int ha_partition::open(const char *name, int mode, uint test_if_locked) { - char *name_buffer_ptr= m_name_buffer_ptr; + char *name_buffer_ptr; int error; uint alloc_len; handler **file; char name_buff[FN_REFLEN]; bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE); - ulonglong check_table_flags= 0; + ulonglong check_table_flags; DBUG_ENTER("ha_partition::open"); DBUG_ASSERT(table->s == table_share); @@ -2505,8 +2548,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_mode= mode; m_open_test_lock= test_if_locked; m_part_field_array= m_part_info->full_part_field_array; - if (get_from_handler_file(name, &table->mem_root)) + if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of))) DBUG_RETURN(1); + name_buffer_ptr= m_name_buffer_ptr; m_start_key.length= 0; m_rec0= table->record[0]; m_rec_length= table_share->reclength; @@ -2542,8 +2586,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(1); bitmap_clear_all(&m_bulk_insert_started); /* Initialize the bitmap we use to determine what partitions are used */ - if (!is_clone) + if (!m_is_clone_of) { + DBUG_ASSERT(!m_clone_mem_root); if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) { bitmap_free(&m_bulk_insert_started); @@ -2552,32 +2597,70 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) bitmap_set_all(&(m_part_info->used_partitions)); } + if (m_is_clone_of) + { + uint i; + DBUG_ASSERT(m_clone_mem_root); + /* Allocate an array of handler pointers for the partitions handlers. */ + alloc_len= (m_tot_parts + 1) * sizeof(handler*); + if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len))) + goto err_alloc; + memset(m_file, 0, alloc_len); + /* + Populate them by cloning the original partitions. This also opens them. + Note that file->ref is allocated too. + */ + file= m_is_clone_of->m_file; + for (i= 0; i < m_tot_parts; i++) + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + if (!(m_file[i]= file[i]->clone((const char*) name_buff, + m_clone_mem_root))) + { + error= HA_ERR_INITIALIZATION; + file= &m_file[i]; + goto err_handler; + } + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } + } + else + { + file= m_file; + do + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, + test_if_locked))) + goto err_handler; + m_no_locks+= (*file)->lock_count(); + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } while (*(++file)); + } + file= m_file; + ref_length= (*file)->ref_length; + check_table_flags= (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); + file++; do { - create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, - FALSE); - if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, - test_if_locked))) - goto err_handler; - m_no_locks+= (*file)->lock_count(); - name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + DBUG_ASSERT(ref_length >= (*file)->ref_length); set_if_bigger(ref_length, ((*file)->ref_length)); /* Verify that all partitions have the same set of table flags. Mask all flags that partitioning enables/disables. */ - if (!check_table_flags) - { - check_table_flags= (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS)); - } - else if (check_table_flags != (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS))) + if (check_table_flags != (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS))) { error= HA_ERR_INITIALIZATION; + /* set file to last handler, so all of them is closed */ + file = &m_file[m_tot_parts - 1]; goto err_handler; } } while (*(++file)); @@ -2589,6 +2672,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) */ ref_length+= PARTITION_BYTES_IN_POS; m_ref_length= ref_length; + /* Release buffer read from .par file. It will not be reused again after being opened once. @@ -2646,25 +2730,55 @@ err_handler: DEBUG_SYNC(ha_thd(), "partition_open_error"); while (file-- != m_file) (*file)->close(); +err_alloc: bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); DBUG_RETURN(error); } -handler *ha_partition::clone(MEM_ROOT *mem_root) + +/** + Clone the open and locked partitioning handler. + + @param mem_root MEM_ROOT to use. + + @return Pointer to the successfully created clone or NULL + + @details + This function creates a new ha_partition handler as a clone/copy. The + original (this) must already be opened and locked. The clone will use + the originals m_part_info. + It also allocates memory to ref + ref_dup. + In ha_partition::open() it will clone its original handlers partitions + which will allocate then om the correct MEM_ROOT and also open them. +*/ + +handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, - table->s->db_type()); - ((ha_partition*)new_handler)->m_part_info= m_part_info; - ((ha_partition*)new_handler)->is_clone= TRUE; - if (new_handler && !new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) - return new_handler; - return NULL; + ha_partition *new_handler; + + DBUG_ENTER("ha_partition::clone"); + new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info, + this, mem_root); + if (!new_handler) + DBUG_RETURN(NULL); + + /* + Allocate new_handler->ref here because otherwise ha_open will allocate it + on this->table->mem_root and we will not be able to reclaim that memory + when the clone handler object is destroyed. + */ + new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(m_ref_length)*2); + if (!new_handler->ref) + DBUG_RETURN(NULL); + + if (new_handler->ha_open(table, name, + table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) + DBUG_RETURN(NULL); + + DBUG_RETURN((handler*) new_handler); } @@ -2695,7 +2809,7 @@ int ha_partition::close(void) DBUG_ASSERT(table->s == table_share); delete_queue(&m_queue); bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); file= m_file; diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 76b91e160ca..a38d56af8ff 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -133,6 +133,13 @@ private: bool m_is_sub_partitioned; // Is subpartitioned bool m_ordered_scan_ongoing; + /* + If set, this object was created with ha_partition::clone and doesn't + "own" the m_part_info structure. + */ + ha_partition *m_is_clone_of; + MEM_ROOT *m_clone_mem_root; + /* We keep track if all underlying handlers are MyISAM since MyISAM has a great number of extra flags not needed by other handlers. @@ -169,11 +176,6 @@ private: PARTITION_SHARE *share; /* Shared lock info */ #endif - /* - TRUE <=> this object was created with ha_partition::clone and doesn't - "own" the m_part_info structure. - */ - bool is_clone; bool auto_increment_lock; /**< lock reading/updating auto_inc */ /** Flag to keep the auto_increment lock through out the statement. @@ -186,7 +188,7 @@ private: /** used for prediction of start_bulk_insert rows */ enum_monotonicity_info m_part_func_monotonicity_info; public: - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) { m_part_info= part_info; @@ -205,6 +207,10 @@ public: */ ha_partition(handlerton *hton, TABLE_SHARE * table); ha_partition(handlerton *hton, partition_info * part_info); + ha_partition(handlerton *hton, TABLE_SHARE *share, + partition_info *part_info_arg, + ha_partition *clone_arg, + MEM_ROOT *clone_mem_root_arg); ~ha_partition(); /* A partition handler has no characteristics in itself. It only inherits @@ -275,7 +281,7 @@ private: And one method to read it in. */ bool create_handler_file(const char *name); - bool get_from_handler_file(const char *name, MEM_ROOT *mem_root); + bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, bool clone); bool new_handlers_from_part_info(MEM_ROOT *mem_root); bool create_handlers(MEM_ROOT *mem_root); void clear_handler_file(); diff --git a/sql/handler.cc b/sql/handler.cc index 5968a78b587..8adb8e061a3 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2037,9 +2037,9 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, /**************************************************************************** ** General handler functions ****************************************************************************/ -handler *handler::clone(MEM_ROOT *mem_root) +handler *handler::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); + handler *new_handler= get_new_handler(table->s, mem_root, ht); /* Allocate handler->ref here because otherwise ha_open will allocate it on this->table->mem_root and we will not be able to reclaim that memory @@ -2048,7 +2048,7 @@ handler *handler::clone(MEM_ROOT *mem_root) if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2))) return NULL; if (new_handler && !new_handler->ha_open(table, - table->s->normalized_path.str, + name, table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) return new_handler; diff --git a/sql/handler.h b/sql/handler.h index dabc179079a..3de901dec62 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1166,7 +1166,7 @@ public: DBUG_ASSERT(locked == FALSE); /* TODO: DBUG_ASSERT(inited == NONE); */ } - virtual handler *clone(MEM_ROOT *mem_root); + virtual handler *clone(const char *name, MEM_ROOT *mem_root); /** This is called after create to allow us to set up cached variables */ void init() { diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 9edd4f58f04..fd71166dc23 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1335,7 +1335,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) } thd= head->in_use; - if (!(file= head->file->clone(thd->mem_root))) + if (!(file= head->file->clone(head->s->normalized_path.str, thd->mem_root))) { /* Manually set the error flag. Note: there seems to be quite a few diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index fb7c13e4e41..9f29dee2030 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -142,11 +142,11 @@ int ha_heap::close(void) DESCRIPTION Do same as default implementation but use file->s->name instead of table->s->path. This is needed by Windows where the clone() call sees - '/'-delimited path in table->s->path, while ha_peap::open() was called + '/'-delimited path in table->s->path, while ha_heap::open() was called with '\'-delimited path. */ -handler *ha_heap::clone(MEM_ROOT *mem_root) +handler *ha_heap::clone(const char *name, MEM_ROOT *mem_root) { handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); if (new_handler && !new_handler->ha_open(table, file->s->name, table->db_stat, diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h index 22722129f4c..69751101645 100644 --- a/storage/heap/ha_heap.h +++ b/storage/heap/ha_heap.h @@ -34,7 +34,7 @@ class ha_heap: public handler public: ha_heap(handlerton *hton, TABLE_SHARE *table); ~ha_heap() {} - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); const char *table_type() const { return (table->in_use->variables.sql_mode & MODE_MYSQL323) ? diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 2650cc850a8..e5b657a4630 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -552,9 +552,10 @@ ha_myisam::ha_myisam(handlerton *hton, TABLE_SHARE *table_arg) can_enable_indexes(1) {} -handler *ha_myisam::clone(MEM_ROOT *mem_root) +handler *ha_myisam::clone(const char *name, MEM_ROOT *mem_root) { - ha_myisam *new_handler= static_cast (handler::clone(mem_root)); + ha_myisam *new_handler= static_cast (handler::clone(name, + mem_root)); if (new_handler) new_handler->file->state= file->state; return new_handler; diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h index 55a5eac92de..54801bfd0b8 100644 --- a/storage/myisam/ha_myisam.h +++ b/storage/myisam/ha_myisam.h @@ -44,7 +44,7 @@ class ha_myisam: public handler public: ha_myisam(handlerton *hton, TABLE_SHARE *table_arg); ~ha_myisam() {} - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); const char *table_type() const { return "MyISAM"; } const char *index_type(uint key_number); const char **bas_ext() const; diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 4c8d45d1fe1..3beabd83512 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -459,8 +459,7 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)), problem because all locking is handled by the original MERGE table from which this is cloned of. */ - if (!(file= myrg_open(table->s->normalized_path.str, table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED))) + if (!(file= myrg_open(name, table->db_stat, HA_OPEN_IGNORE_IF_LOCKED))) { DBUG_PRINT("error", ("my_errno %d", my_errno)); DBUG_RETURN(my_errno ? my_errno : -1); @@ -484,7 +483,7 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)), @return A cloned handler instance. */ -handler *ha_myisammrg::clone(MEM_ROOT *mem_root) +handler *ha_myisammrg::clone(const char *name, MEM_ROOT *mem_root) { MYRG_TABLE *u_table,*newu_table; ha_myisammrg *new_handler= @@ -505,8 +504,8 @@ handler *ha_myisammrg::clone(MEM_ROOT *mem_root) return NULL; } - if (new_handler->ha_open(table, table->s->normalized_path.str, table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) + if (new_handler->ha_open(table, name, table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED)) { delete new_handler; return NULL; diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h index 790aa15e90a..a1272c633a1 100644 --- a/storage/myisammrg/ha_myisammrg.h +++ b/storage/myisammrg/ha_myisammrg.h @@ -62,7 +62,7 @@ class ha_myisammrg: public handler int open(const char *name, int mode, uint test_if_locked); int attach_children(void); int detach_children(void); - virtual handler *clone(MEM_ROOT *mem_root); + virtual handler *clone(const char *name, MEM_ROOT *mem_root); int close(void); int write_row(uchar * buf); int update_row(const uchar * old_data, uchar * new_data); -- cgit v1.2.1 From f1b638d33cdf95b70fa925cce304864c96fdf7ee Mon Sep 17 00:00:00 2001 From: Sven Sandberg Date: Fri, 25 Mar 2011 15:16:13 +0100 Subject: BUG#11766427, BUG#59539: Filter by server id in mysqlbinlog fails Problem: mysqlbinlog --server-id may filter out Format_description_log_events. If mysqlbinlog does not process the Format_description_log_event, then mysqlbinlog cannot read the rest of the binary log correctly. This can have the effect that mysqlbinlog crashes, generates an error, or generates output that causes mysqld to crash, generate an error, or corrupt data. Fix: Never filter out Format_description_log_events. Also, never filter out Rotate_log_events. client/mysqlbinlog.cc: Process Format_description_log_events even when the server_id does not match the number given by --server-id. mysql-test/t/mysqlbinlog.test: Add test case. --- client/mysqlbinlog.cc | 14 +++++++++++--- mysql-test/r/mysqlbinlog.result | 12 ++++++++++++ mysql-test/t/mysqlbinlog.test | 20 ++++++++++++++++++++ 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index dec3f142798..30a8bddc17c 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -705,10 +705,18 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev, */ start_datetime= 0; offset= 0; // print everything and protect against cycling rec_count + /* + Skip events according to the --server-id flag. However, don't + skip format_description or rotate events, because they they + are really "global" events that are relevant for the entire + binlog, even if they have a server_id. Also, we have to read + the format_description event so that we can parse subsequent + events. + */ + if (ev_type != ROTATE_EVENT && + server_id && (server_id != ev->server_id)) + goto end; } - if (server_id && (server_id != ev->server_id)) - /* skip just this event, continue processing the log. */ - goto end; if (((my_time_t)(ev->when) >= stop_datetime) || (pos >= stop_position_mot)) { diff --git a/mysql-test/r/mysqlbinlog.result b/mysql-test/r/mysqlbinlog.result index 1f2e1ed67e0..45068ddfaec 100644 --- a/mysql-test/r/mysqlbinlog.result +++ b/mysql-test/r/mysqlbinlog.result @@ -658,3 +658,15 @@ master-bin.000002 # Query # # CREATE DATABASE test1 master-bin.000002 # Query # # use `test1`; CREATE TABLE t1(id int) master-bin.000002 # Query # # use `test1`; DROP TABLE t1 master-bin.000002 # Query # # DROP DATABASE test1 +RESET MASTER; +USE test; +CREATE TABLE t1 (a INT); +SET GLOBAL SERVER_ID = 2; +DROP TABLE t1; +FLUSH LOGS; +SHOW TABLES IN test; +Tables_in_test +t1 +SHOW TABLES IN test; +Tables_in_test +SET GLOBAL SERVER_ID = 1; diff --git a/mysql-test/t/mysqlbinlog.test b/mysql-test/t/mysqlbinlog.test index d5dd3052269..98ee18b554e 100644 --- a/mysql-test/t/mysqlbinlog.test +++ b/mysql-test/t/mysqlbinlog.test @@ -501,3 +501,23 @@ exec $MYSQL_BINLOG $MYSQLD_DATADIR/$master_binlog | $MYSQL test 2>&1; let $binlog_file= query_get_value(SHOW MASTER STATUS, File, 1); source include/show_binlog_events.inc; +# +# BUG#11766427 BUG#59530: Filter by server id in mysqlbinlog fails +# This test checks that the format description log event is not +# filtered out by the --server-id option. +# +RESET MASTER; +USE test; +CREATE TABLE t1 (a INT); +--let $old_server_id= `SELECT @@GLOBAL.SERVER_ID` +SET GLOBAL SERVER_ID = 2; +DROP TABLE t1; +--let $master_binlog= query_get_value(SHOW MASTER STATUS, File, 1) +FLUSH LOGS; +# The following should only create t1, not drop it. +--exec $MYSQL_BINLOG --server-id=1 $MYSQLD_DATADIR/$master_binlog | $MYSQL +SHOW TABLES IN test; +# The following should only drop t1, not create it. +--exec $MYSQL_BINLOG --server-id=2 $MYSQLD_DATADIR/$master_binlog | $MYSQL +SHOW TABLES IN test; +eval SET GLOBAL SERVER_ID = $old_server_id; -- cgit v1.2.1 From d499851be03a2a20f7cb230d9b2d69e169aa81c8 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Mon, 28 Mar 2011 11:53:18 +0400 Subject: Bug#11766112 59151:UNINITIALIZED VALUES IN EXTRACT_DATE_TIME WITH STR_TO_DATE(SPACE(..) ... Valgrind warining happens due to missing 'end of the string' check. The fix is to check if we reached the end of the string. mysql-test/r/func_time.result: test case mysql-test/t/func_time.test: test case sql/item_timefunc.cc: check if we reached the end of the string after leading spaces skipping. --- mysql-test/r/func_time.result | 6 ++++++ mysql-test/t/func_time.test | 6 ++++++ sql/item_timefunc.cc | 4 ++-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 0d4ce9414e5..f63860039d7 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1375,4 +1375,10 @@ Warning 1292 Truncated incorrect time value: '' Warning 1292 Truncated incorrect time value: '' Warning 1292 Truncated incorrect time value: '' DROP TABLE t1; +# +# Bug#11766112 59151:UNINITIALIZED VALUES IN EXTRACT_DATE_TIME WITH STR_TO_DATE(SPACE(..) ... +# +SELECT STR_TO_DATE(SPACE(2),'1'); +STR_TO_DATE(SPACE(2),'1') +0000-00-00 End of 5.1 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index f32110ef87c..c48351d33f2 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -881,4 +881,10 @@ INSERT INTO t1 VALUES (''),(''); SELECT COUNT(*) FROM t1 GROUP BY TIME_TO_SEC(a); DROP TABLE t1; +--echo # +--echo # Bug#11766112 59151:UNINITIALIZED VALUES IN EXTRACT_DATE_TIME WITH STR_TO_DATE(SPACE(..) ... +--echo # + +SELECT STR_TO_DATE(SPACE(2),'1'); + --echo End of 5.1 tests diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 6335199b8de..71b2baf4fee 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -294,8 +294,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, for (; ptr != end && val != val_end; ptr++) { /* Skip pre-space between each argument */ - while (val != val_end && my_isspace(cs, *val)) - val++; + if ((val+= cs->cset->scan(cs, val, val_end, MY_SEQ_SPACES)) >= val_end) + break; if (*ptr == '%' && ptr+1 != end) { -- cgit v1.2.1 From ff23f5360ee328bd6adb14436e68e080cfe1d110 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Mon, 28 Mar 2011 12:28:30 +0400 Subject: Bug#11766424 59527: DECIMAL_BIN_SIZE: ASSERTION `SCALE >= 0 && PRECISION > 0 && SCALE <= PRE Assertion happens due to missing initialization of unsigned_flag for Item_func_set_user_var object. It leads to incorrect calculation of decimal field size. The fix is to add initialization of unsigned_flag. mysql-test/r/variables.result: test case mysql-test/t/variables.test: test case sql/item_func.cc: add initialization of unsigned_flag. --- mysql-test/r/variables.result | 16 ++++++++++++++++ mysql-test/t/variables.test | 13 +++++++++++++ sql/item_func.cc | 1 + 3 files changed, 30 insertions(+) diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index af3b76b09f3..f92e1dec4c9 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -1547,6 +1547,22 @@ Warning 1292 Truncated incorrect key_cache_block_size value: '0' select @@max_long_data_size; @@max_long_data_size 1048576 +# +# Bug#11766424 59527: DECIMAL_BIN_SIZE: ASSERTION `SCALE >= 0 && PRECISION > 0 && SCALE <= PRE +# +CREATE TABLE t1(f1 DECIMAL(1,1) UNSIGNED); +INSERT INTO t1 VALUES (0.2),(0.1); +SELECT 1 FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE @a=f1); +1 +1 +DROP TABLE t1; +CREATE TABLE t1 AS SELECT @a:= CAST(1 AS UNSIGNED) AS a; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(1) unsigned NOT NULL DEFAULT '0' +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; SET @@global.max_binlog_cache_size=DEFAULT; SET @@global.max_join_size=DEFAULT; SET @@global.key_buffer_size=@kbs; diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index 383bdfc79a9..8f111e7cf3b 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -1298,6 +1298,19 @@ SET @@global.key_cache_block_size=0; # select @@max_long_data_size; +--echo # +--echo # Bug#11766424 59527: DECIMAL_BIN_SIZE: ASSERTION `SCALE >= 0 && PRECISION > 0 && SCALE <= PRE +--echo # + +CREATE TABLE t1(f1 DECIMAL(1,1) UNSIGNED); +INSERT INTO t1 VALUES (0.2),(0.1); +SELECT 1 FROM t1 GROUP BY @a:= (SELECT ROUND(f1) FROM t1 WHERE @a=f1); +DROP TABLE t1; + +CREATE TABLE t1 AS SELECT @a:= CAST(1 AS UNSIGNED) AS a; +SHOW CREATE TABLE t1; +DROP TABLE t1; + # cleanup SET @@global.max_binlog_cache_size=DEFAULT; SET @@global.max_join_size=DEFAULT; diff --git a/sql/item_func.cc b/sql/item_func.cc index efae928a8b6..d4fd2c94e1d 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -3840,6 +3840,7 @@ Item_func_set_user_var::fix_length_and_dec() maybe_null=args[0]->maybe_null; max_length=args[0]->max_length; decimals=args[0]->decimals; + unsigned_flag= args[0]->unsigned_flag; collation.set(args[0]->collation.collation, DERIVATION_IMPLICIT); } -- cgit v1.2.1 From 08d598fb98e0f7e5c34f47c6510577a375d0fab2 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Mon, 28 Mar 2011 11:34:12 +0300 Subject: Store the '\0'-terminated query in row->trx_query This problem was introduced in marko.makela@oracle.com-20100514130815-ym7j7cfu88ro6km4 and is probably the reason for the following valgrind warning: from http://bugs.mysql.com/52691 , http://bugs.mysql.com/file.php?id=16880 : Version: '5.6.3-m5-valgrind-max-debug' socket: '/tmp/mysql.sock' port: 3306 Source distribution ==14947== Thread 18: ==14947== Conditional jump or move depends on uninitialised value(s) ==14947== at 0x4A06318: __GI_strlen (mc_replace_strmem.c:284) ==14947== by 0x9F3D7A: fill_innodb_trx_from_cache(trx_i_s_cache_struct*, THD*, TABLE*) (i_s.cc:591) ==14947== by 0x9F4D7D: trx_i_s_common_fill_table(THD*, TABLE_LIST*, Item*) (i_s.cc:1238) ==14947== by 0x7689F3: get_schema_tables_result(JOIN*, enum_schema_table_state) (sql_show.cc:6745) ==14947== by 0x715A75: JOIN::exec() (sql_select.cc:2861) ==14947== by 0x7185BD: mysql_select(THD*, Item***, TABLE_LIST*, unsigned int, List&, Item*, unsigned int, st_order*, st_order*, Item*, st_order*, unsigned long long, select_result*, st_select_lex_unit*, st_select_lex*) (sql_select.cc:3609) ==14947== by 0x70E823: handle_select(THD*, LEX*, select_result*, unsigned long) (sql_select.cc:319) ==14947== by 0x6F2305: execute_sqlcom_select(THD*, TABLE_LIST*) (sql_parse.cc:4557) ==14947== by 0x6EAED4: mysql_execute_command(THD*) (sql_parse.cc:2135) ==14947== by 0x6F44C9: mysql_parse(THD*, char*, unsigned int, Parser_state*) (sql_parse.cc:5597) ==14947== by 0x6E864B: dispatch_command(enum_server_command, THD*, char*, unsigned int) (sql_parse.cc:1093) ==14947== by 0x6E785E: do_command(THD*) (sql_parse.cc:815) ==14947== by 0x6C18DD: do_handle_one_connection(THD*) (sql_connect.cc:771) ==14947== by 0x6C146E: handle_one_connection (sql_connect.cc:707) ==14947== by 0x30E1807760: start_thread (pthread_create.c:301) ==14947== by 0x35EA670F: ??? ==14947== Uninitialised value was created by a heap allocation ==14947== at 0x4A0515D: malloc (vg_replace_malloc.c:195) ==14947== by 0xB4B948: mem_area_alloc (mem0pool.c:385) ==14947== by 0xB4A27C: mem_heap_create_block (mem0mem.c:333) ==14947== by 0xB4A530: mem_heap_add_block (mem0mem.c:446) ==14947== by 0xB0D2A4: mem_heap_alloc (mem0mem.ic:186) ==14947== by 0xB0D9C2: ha_storage_put_memlim (ha0storage.c:118) ==14947== by 0xA479D8: fill_trx_row (trx0i_s.c:521) ==14947== by 0xA490E9: fetch_data_into_cache (trx0i_s.c:1319) ==14947== by 0xA491BA: trx_i_s_possibly_fetch_data_into_cache (trx0i_s.c:1352) ==14947== by 0x9F4CE7: trx_i_s_common_fill_table(THD*, TABLE_LIST*, Item*) (i_s.cc:1221) ==14947== by 0x7689F3: get_schema_tables_result(JOIN*, enum_schema_table_state) (sql_show.cc:6745) ==14947== by 0x715A75: JOIN::exec() (sql_select.cc:2861) ==14947== by 0x7185BD: mysql_select(THD*, Item***, TABLE_LIST*, unsigned int, List&, Item*, unsigned int, st_order*, st_order*, Item*, st_order*, unsigned long long, select_result*, st_select_lex_unit*, st_select_lex*) (sql_select.cc:3609) ==14947== by 0x70E823: handle_select(THD*, LEX*, select_result*, unsigned long) (sql_select.cc:319) ==14947== by 0x6F2305: execute_sqlcom_select(THD*, TABLE_LIST*) (sql_parse.cc:4557) ==14947== by 0x6EAED4: mysql_execute_command(THD*) (sql_parse.cc:2135) ==14947== by 0x6F44C9: mysql_parse(THD*, char*, unsigned int, Parser_state*) (sql_parse.cc:5597) ==14947== by 0x6E864B: dispatch_command(enum_server_command, THD*, char*, unsigned int) (sql_parse.cc:1093) ==14947== by 0x6E785E: do_command(THD*) (sql_parse.cc:815) ==14947== by 0x6C18DD: do_handle_one_connection(THD*) (sql_connect.cc:771) ==14947== by 0x6C146E: handle_one_connection (sql_connect.cc:707) ==14947== by 0x30E1807760: start_thread (pthread_create.c:301) ==14947== by 0x35EA670F: ??? (gdb) bt #0 0x0000000004a06318 in _vgrZU_libcZdsoZa___GI_strlen (str=0x3026bfa0 "insert into `blobtest` set `data`='pkefxxpkalpabzgrczlxefkreqljeqbvzrcnhvhsjsfnvxzjsltfuincffigdkmhvvcmnseluzgbtedrfmxvnrdmzesbinjgwvharkpgjplrlnqudfidbqwgbykupycxzyikzqincnsjrxgncqzlgyqwjdbjulztgsffxpjgymsnntdibvklwqylmwhsmdskmllxuwafabdjnwlyofknwuixiyrgnplmerfdewgizkdhznitesfqepsqbbwkdepkmjoseyxjofmmjaqdipwopfrwidmhqbtovdslvayxcnpewzhppeetblccppniamezibuoinvlxkafpcmozawtplfpepxwlwhymsuraezcwvjqzwogsozodlsfzjiyrcaljjhqwdrcjawvelhefzzaexvcbyorlcyupqwgjuamiqpiputtndjwcsuyzdfhuxswuowhrzdvriwrxqmcqthvzzzvivbabbnhdbtcfdtgssvmirrcddnytnctcvqplwytxxzxelldhwahalzxvgynaiwjyezhxqhlsqudngekocfvlbqprxqhyhwbaomgqiwkpfguohuvlnhtrsszgacxhhzeppyqwfwabiqzgyzkperiidyunrykopysvlcxwhrcboetjltawdjergalsfvaxncmzoznryumrjmncvhvxqvqhhbznnifkguuiffmlrbmgwtzvnuwlaguixqadkupfhasbbxnwkrvsfhrqanfmvjtzfqodtutkjlxfcogtsjywrdgmzgszjtsmimaelsveayqrwviqwwefeziuaqsqpauxpnzhaxjtkdfvvodniwezskbxfxszyniyzkzxngcfwgjlyrlskmrzxqnptwlilsxybuguafxxkvryyjrnkhhcmxuusitaflaiuxjhyfnzkahlgmaszujqmfdhyppdnpweqanmvzgjfyzjolbmprhnuuxextcaxzicfvsuochprmlf"...) at mc_replace_strmem.c:284 #1 0x00000000009f3d7b in fill_innodb_trx_from_cache (cache=0x1462440, thd=0x2a495000, table=0x2a422500) at /home/sbester/build/bzr/mysql-trunk/storage/innobase/handler/i_s.cc:591 #2 0x00000000009f4d7e in trx_i_s_common_fill_table (thd=0x2a495000, tables=0x2a4c3ec0) at /home/sbester/build/bzr/mysql-trunk/storage/innobase/handler/i_s.cc:1238 #3 0x00000000007689f4 in get_schema_tables_result (join=0x30f90c40, executed_place=PROCESSED_BY_JOIN_EXEC) at /home/sbester/build/bzr/mysql-trunk/sql/sql_show.cc:6745 #4 0x0000000000715a76 in JOIN::exec (this=0x30f90c40) at /home/sbester/build/bzr/mysql-trunk/sql/sql_select.cc:2861 #5 0x00000000007185be in mysql_select (thd=0x2a495000, rref_pointer_array=0x2a497590, tables=0x2a4c3ec0, wild_num=1, fields=..., conds=0x0, og_num=0, order=0x0, group=0x0, having=0x0, proc_param=0x0, select_options=2684619520, result=0x30319720, unit=0x2a496d28, select_lex=0x2a497378) at /home/sbester/build/bzr/mysql-trunk/sql/sql_select.cc:3609 #6 0x000000000070e824 in handle_select (thd=0x2a495000, lex=0x2a496c78, result=0x30319720, setup_tables_done_option=0) at /home/sbester/build/bzr/mysql-trunk/sql/sql_select.cc:319 #7 0x00000000006f2306 in execute_sqlcom_select (thd=0x2a495000, all_tables=0x2a4c3ec0) at /home/sbester/build/bzr/mysql-trunk/sql/sql_parse.cc:4557 #8 0x00000000006eaed5 in mysql_execute_command (thd=0x2a495000) at /home/sbester/build/bzr/mysql-trunk/sql/sql_parse.cc:2135 #9 0x00000000006f44ca in mysql_parse (thd=0x2a495000, rawbuf=0x30d80060 "select * from innodb_trx", length=24, parser_state=0x35ea5540) at /home/sbester/build/bzr/mysql-trunk/sql/sql_parse.cc:5597 #10 0x00000000006e864c in dispatch_command (command=COM_QUERY, thd=0x2a495000, packet=0x30bb4e31 "select * from innodb_trx", packet_length=24) at /home/sbester/build/bzr/mysql-trunk/sql/sql_parse.cc:1093 #11 0x00000000006e785f in do_command (thd=0x2a495000) at /home/sbester/build/bzr/mysql-trunk/sql/sql_parse.cc:815 #12 0x00000000006c18de in do_handle_one_connection (thd_arg=0x2a495000) at /home/sbester/build/bzr/mysql-trunk/sql/sql_connect.cc:771 #13 0x00000000006c146f in handle_one_connection (arg=0x2a495000) at /home/sbester/build/bzr/mysql-trunk/sql/sql_connect.cc:707 #14 0x00000030e1807761 in start_thread (arg=0x35ea6710) at pthread_create.c:301 #15 0x00000030e14e14ed in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:115 (gdb) frame 1 #1 0x00000000009f3d7b in fill_innodb_trx_from_cache (cache=0x1462440, thd=0x2a495000, table=0x2a422500) at /home/sbester/build/bzr/mysql-trunk/storage/innobase/handler/i_s.cc:591 591 row->trx_query_cs); (gdb) list 586 if (row->trx_query) { 587 /* store will do appropriate character set 588 conversion check */ 589 fields[IDX_TRX_QUERY]->store( 590 row->trx_query, strlen(row->trx_query), 591 row->trx_query_cs); 592 fields[IDX_TRX_QUERY]->set_notnull(); 593 } else { 594 fields[IDX_TRX_QUERY]->set_null(); 595 } --- storage/innodb_plugin/trx/trx0i_s.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innodb_plugin/trx/trx0i_s.c b/storage/innodb_plugin/trx/trx0i_s.c index 267e91db22e..53f4dcb0bef 100644 --- a/storage/innodb_plugin/trx/trx0i_s.c +++ b/storage/innodb_plugin/trx/trx0i_s.c @@ -508,7 +508,7 @@ fill_trx_row( query[stmt_len] = '\0'; row->trx_query = ha_storage_put_memlim( - cache->storage, stmt, stmt_len + 1, + cache->storage, query, stmt_len + 1, MAX_ALLOWED_FOR_STORAGE(cache)); row->trx_query_cs = innobase_get_charset(trx->mysql_thd); -- cgit v1.2.1 From a88faf2a4af5f60722647a8e01de6aac20305bb7 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Mon, 28 Mar 2011 12:35:50 +0400 Subject: Bug#11764994 57900: CREATE TABLE .. SELECT ASSERTS SCALE >= 0 && PRECISION > 0 && SCALE <= PR Assert fails due to overflow which happens in Item_func_int_val::fix_num_length_and_dec() as geometry functions have max_length value equal to max_field_size(4294967295U). The fix is to skip max_length calculation for some boundary cases. mysql-test/r/func_math.result: test case mysql-test/t/func_math.test: test case sql/item_func.cc: skip max_length calculation if argument max_length is near max_field_size. --- mysql-test/r/func_math.result | 7 +++++++ mysql-test/t/func_math.test | 9 +++++++++ sql/item_func.cc | 7 ++++--- 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result index 3a626084c9e..ad0b872145b 100644 --- a/mysql-test/r/func_math.result +++ b/mysql-test/r/func_math.result @@ -511,4 +511,11 @@ t1 CREATE TABLE `t1` ( `C` varchar(23) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; +# +# Bug#11764994 57900: CREATE TABLE .. SELECT ASSERTS SCALE >= 0 && PRECISION > 0 && SCALE <= PR +# +CREATE TABLE t1 SELECT CEIL(LINESTRINGFROMWKB(1) DIV NULL); +DROP TABLE t1; +CREATE TABLE t1 SELECT FLOOR(LINESTRINGFROMWKB(1) DIV NULL); +DROP TABLE t1; End of 5.1 tests diff --git a/mysql-test/t/func_math.test b/mysql-test/t/func_math.test index c8ea11c7490..64b6a3a4ea6 100644 --- a/mysql-test/t/func_math.test +++ b/mysql-test/t/func_math.test @@ -324,4 +324,13 @@ CREATE TABLE t1 SELECT CAST((CASE(('')) WHEN (CONVERT(1, CHAR(1))) THEN (('' / 1 SHOW CREATE TABLE t1; DROP TABLE t1; +--echo # +--echo # Bug#11764994 57900: CREATE TABLE .. SELECT ASSERTS SCALE >= 0 && PRECISION > 0 && SCALE <= PR +--echo # + +CREATE TABLE t1 SELECT CEIL(LINESTRINGFROMWKB(1) DIV NULL); +DROP TABLE t1; +CREATE TABLE t1 SELECT FLOOR(LINESTRINGFROMWKB(1) DIV NULL); +DROP TABLE t1; + --echo End of 5.1 tests diff --git a/sql/item_func.cc b/sql/item_func.cc index d4fd2c94e1d..79fa37bd372 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -1803,9 +1803,10 @@ void Item_func_integer::fix_length_and_dec() void Item_func_int_val::fix_num_length_and_dec() { - max_length= args[0]->max_length - (args[0]->decimals ? - args[0]->decimals + 1 : - 0) + 2; + ulonglong tmp_max_length= (ulonglong ) args[0]->max_length - + (args[0]->decimals ? args[0]->decimals + 1 : 0) + 2; + max_length= tmp_max_length > (ulonglong) max_field_size ? + max_field_size : (uint32) tmp_max_length; uint tmp= float_length(decimals); set_if_smaller(max_length,tmp); decimals= 0; -- cgit v1.2.1 From 9ff72a1acfffe95cd5e6d9e06c61c5ee9b0000e0 Mon Sep 17 00:00:00 2001 From: Magne Mahre Date: Mon, 28 Mar 2011 10:47:30 +0200 Subject: Bug#11900714 REMOVE LGPL LICENSED FILES IN MYSQL 5.1 The LGPL license is used in some legacy code, and to adhere to current licensing polity, we remove those files that are no longer used, and reorganize the remaining LGPL code so it will be GPL licensed from now on. Note: This patch only removed LGPL licensed files in MySQL 5.1, and is the second of a set of patches to remove LGPL from all trees. (See Bug# 11840513 for details) --- extra/perror.c | 31 ++- include/Makefile.am | 2 +- include/heap.h | 2 +- include/my_compare.h | 89 +++++++ include/my_global.h | 2 +- include/my_handler.h | 128 --------- include/myisam.h | 24 +- libmysql/CMakeLists.txt | 2 +- libmysql/Makefile.shared | 2 +- mysys/CMakeLists.txt | 4 +- mysys/Makefile.am | 6 +- mysys/my_compare.c | 469 +++++++++++++++++++++++++++++++++ mysys/my_gethostbyname.c | 113 -------- mysys/my_handler.c | 598 ------------------------------------------ mysys/my_net.c | 89 +++++++ mysys/my_port.c | 40 --- sql/field.h | 2 + sql/handler.h | 1 - storage/myisam/ft_stopwords.c | 2 +- storage/myisam/mi_check.c | 87 ++++++ storage/myisam/mi_test1.c | 1 + storage/myisam/mi_write.c | 1 + storage/myisam/myisamdef.h | 2 + storage/myisam/sp_test.c | 1 + 24 files changed, 805 insertions(+), 893 deletions(-) create mode 100644 include/my_compare.h delete mode 100644 include/my_handler.h create mode 100644 mysys/my_compare.c delete mode 100644 mysys/my_gethostbyname.c delete mode 100644 mysys/my_handler.c delete mode 100644 mysys/my_port.c diff --git a/extra/perror.c b/extra/perror.c index c32ad2bc791..5162f5e03dc 100644 --- a/extra/perror.c +++ b/extra/perror.c @@ -32,7 +32,6 @@ static my_bool verbose, print_all_codes; #include "../include/my_base.h" #include "../mysys/my_handler_errors.h" -#include "../include/my_handler.h" #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE static my_bool ndb_code; @@ -185,6 +184,36 @@ static const char *get_ha_error_msg(int code) } +/* + Register handler error messages for usage with my_error() + + NOTES + This is safe to call multiple times as my_error_register() + will ignore calls to register already registered error numbers. +*/ +void my_handler_error_register(void) +{ + /* + If you got compilation error here about compile_time_assert array, check + that every HA_ERR_xxx constant has a corresponding error message in + handler_error_messages[] list (check mysys/ma_handler_errors.h and + include/my_base.h). + */ + compile_time_assert(HA_ERR_FIRST + array_elements(handler_error_messages) == + HA_ERR_LAST + 1); + my_error_register(handler_error_messages, HA_ERR_FIRST, + HA_ERR_FIRST+ array_elements(handler_error_messages)-1); +} + + +void my_handler_error_unregister(void) +{ + my_error_unregister(HA_ERR_FIRST, + HA_ERR_FIRST+ array_elements(handler_error_messages)-1); +} + + + #if defined(__WIN__) static my_bool print_win_error_msg(DWORD error, my_bool verbose) { diff --git a/include/Makefile.am b/include/Makefile.am index a3dbc386857..2e29806e0df 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -37,7 +37,7 @@ noinst_HEADERS = config-win.h config-netware.h my_bit.h \ my_nosys.h my_alarm.h queues.h rijndael.h sha1.h \ my_aes.h my_tree.h my_trie.h hash.h thr_alarm.h \ thr_lock.h t_ctype.h violite.h my_md5.h base64.h \ - my_handler.h my_time.h my_vle.h my_user.h \ + my_compare.h my_time.h my_vle.h my_user.h \ my_libwrap.h my_stacktrace.h EXTRA_DIST = mysql.h.pp mysql/plugin.h.pp diff --git a/include/heap.h b/include/heap.h index 4a1c7d419ed..126ce4fa12d 100644 --- a/include/heap.h +++ b/include/heap.h @@ -30,7 +30,7 @@ extern "C" { #include #endif -#include "my_handler.h" +#include "my_compare.h" #include "my_tree.h" /* defines used by heap-funktions */ diff --git a/include/my_compare.h b/include/my_compare.h new file mode 100644 index 00000000000..dedae5c8052 --- /dev/null +++ b/include/my_compare.h @@ -0,0 +1,89 @@ +/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _my_compare_h +#define _my_compare_h + +#include "my_base.h" +#include "m_ctype.h" +#include "myisampack.h" + +typedef struct st_HA_KEYSEG /* Key-portion */ +{ + CHARSET_INFO *charset; + uint32 start; /* Start of key in record */ + uint32 null_pos; /* position to NULL indicator */ + uint16 bit_pos; /* Position to bit part */ + uint16 flag; + uint16 length; /* Keylength */ + uint8 type; /* Type of key (for sort) */ + uint8 language; + uint8 null_bit; /* bitmask to test for NULL */ + uint8 bit_start,bit_end; /* if bit field */ + uint8 bit_length; /* Length of bit part */ +} HA_KEYSEG; + +#define get_key_length(length,key) \ +{ if ((uchar) *(key) != 255) \ + length= (uint) (uchar) *((key)++); \ + else \ + { length=mi_uint2korr((key)+1); (key)+=3; } \ +} + +#define get_key_length_rdonly(length,key) \ +{ if ((uchar) *(key) != 255) \ + length= ((uint) (uchar) *((key))); \ + else \ + { length=mi_uint2korr((key)+1); } \ +} + +#define get_key_pack_length(length,length_pack,key) \ +{ if ((uchar) *(key) != 255) \ + { length= (uint) (uchar) *((key)++); length_pack=1; }\ + else \ + { length=mi_uint2korr((key)+1); (key)+=3; length_pack=3; } \ +} + +#define store_key_length_inc(key,length) \ +{ if ((length) < 255) \ + { *(key)++=(length); } \ + else \ + { *(key)=255; mi_int2store((key)+1,(length)); (key)+=3; } \ +} + +#define get_rec_bits(bit_ptr, bit_ofs, bit_len) \ + (((((uint16) (bit_ptr)[1] << 8) | (uint16) (bit_ptr)[0]) >> (bit_ofs)) & \ + ((1 << (bit_len)) - 1)) + +#define set_rec_bits(bits, bit_ptr, bit_ofs, bit_len) \ +{ \ + (bit_ptr)[0]= ((bit_ptr)[0] & ~(((1 << (bit_len)) - 1) << (bit_ofs))) | \ + ((bits) << (bit_ofs)); \ + if ((bit_ofs) + (bit_len) > 8) \ + (bit_ptr)[1]= ((bit_ptr)[1] & ~((1 << ((bit_len) - 8 + (bit_ofs))) - 1)) | \ + ((bits) >> (8 - (bit_ofs))); \ +} + +#define clr_rec_bits(bit_ptr, bit_ofs, bit_len) \ + set_rec_bits(0, bit_ptr, bit_ofs, bit_len) + +extern int ha_compare_text(CHARSET_INFO *, uchar *, uint, uchar *, uint , + my_bool, my_bool); +extern int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, + register uchar *b, uint key_length, uint nextflag, + uint *diff_pos); + + +#endif /* _my_compare_h */ diff --git a/include/my_global.h b/include/my_global.h index ac5d72249f2..005180dae3b 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -359,7 +359,7 @@ C_MODE_END #define ulonglong2double(A) my_ulonglong2double(A) #define my_off_t2double(A) my_ulonglong2double(A) C_MODE_START -double my_ulonglong2double(unsigned long long A); +inline double my_ulonglong2double(unsigned long long A) { return (double) A; } C_MODE_END #endif /* _AIX */ diff --git a/include/my_handler.h b/include/my_handler.h deleted file mode 100644 index 7dfdb345a89..00000000000 --- a/include/my_handler.h +++ /dev/null @@ -1,128 +0,0 @@ -/* Copyright (C) 2002-2006 MySQL AB - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; version 2 - of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, - MA 02111-1307, USA */ - -#ifndef _my_handler_h -#define _my_handler_h - -#include "myisampack.h" -#ifdef __cplusplus -extern "C" { -#endif - -/* - There is a hard limit for the maximum number of keys as there are only - 8 bits in the index file header for the number of keys in a table. - This means that 0..255 keys can exist for a table. The idea of - HA_MAX_POSSIBLE_KEY is to ensure that one can use myisamchk & tools on - a MyISAM table for which one has more keys than MyISAM is normally - compiled for. If you don't have this, you will get a core dump when - running myisamchk compiled for 128 keys on a table with 255 keys. -*/ - -#define HA_MAX_POSSIBLE_KEY 255 /* For myisamchk */ -/* - The following defines can be increased if necessary. - But beware the dependency of MI_MAX_POSSIBLE_KEY_BUFF and HA_MAX_KEY_LENGTH. -*/ - -#define HA_MAX_KEY_LENGTH 1000 /* Max length in bytes */ -#define HA_MAX_KEY_SEG 16 /* Max segments for key */ - -#define HA_MAX_POSSIBLE_KEY_BUFF (HA_MAX_KEY_LENGTH + 24+ 6+6) -#define HA_MAX_KEY_BUFF (HA_MAX_KEY_LENGTH+HA_MAX_KEY_SEG*6+8+8) - -typedef struct st_HA_KEYSEG /* Key-portion */ -{ - CHARSET_INFO *charset; - uint32 start; /* Start of key in record */ - uint32 null_pos; /* position to NULL indicator */ - uint16 bit_pos; /* Position to bit part */ - uint16 flag; - uint16 length; /* Keylength */ - uint8 type; /* Type of key (for sort) */ - uint8 language; - uint8 null_bit; /* bitmask to test for NULL */ - uint8 bit_start,bit_end; /* if bit field */ - uint8 bit_length; /* Length of bit part */ -} HA_KEYSEG; - -#define get_key_length(length,key) \ -{ if (*(uchar*) (key) != 255) \ - length= (uint) *(uchar*) ((key)++); \ - else \ - { length= mi_uint2korr((key)+1); (key)+=3; } \ -} - -#define get_key_length_rdonly(length,key) \ -{ if (*(uchar*) (key) != 255) \ - length= ((uint) *(uchar*) ((key))); \ - else \ - { length= mi_uint2korr((key)+1); } \ -} - -#define get_key_pack_length(length,length_pack,key) \ -{ if (*(uchar*) (key) != 255) \ - { length= (uint) *(uchar*) ((key)++); length_pack= 1; }\ - else \ - { length=mi_uint2korr((key)+1); (key)+= 3; length_pack= 3; } \ -} - -#define store_key_length_inc(key,length) \ -{ if ((length) < 255) \ - { *(key)++= (length); } \ - else \ - { *(key)=255; mi_int2store((key)+1,(length)); (key)+=3; } \ -} - -#define size_to_store_key_length(length) ((length) < 255 ? 1 : 3) - -#define get_rec_bits(bit_ptr, bit_ofs, bit_len) \ - (((((uint16) (bit_ptr)[1] << 8) | (uint16) (bit_ptr)[0]) >> (bit_ofs)) & \ - ((1 << (bit_len)) - 1)) - -#define set_rec_bits(bits, bit_ptr, bit_ofs, bit_len) \ -{ \ - (bit_ptr)[0]= ((bit_ptr)[0] & ~(((1 << (bit_len)) - 1) << (bit_ofs))) | \ - ((bits) << (bit_ofs)); \ - if ((bit_ofs) + (bit_len) > 8) \ - (bit_ptr)[1]= ((bit_ptr)[1] & ~((1 << ((bit_len) - 8 + (bit_ofs))) - 1)) | \ - ((bits) >> (8 - (bit_ofs))); \ -} - -#define clr_rec_bits(bit_ptr, bit_ofs, bit_len) \ - set_rec_bits(0, bit_ptr, bit_ofs, bit_len) - -extern int ha_compare_text(CHARSET_INFO *, uchar *, uint, uchar *, uint , - my_bool, my_bool); -extern int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, - register uchar *b, uint key_length, uint nextflag, - uint *diff_pos); - -extern HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a); -extern void my_handler_error_register(void); -extern void my_handler_error_unregister(void); -/* - Inside an in-memory data record, memory pointers to pieces of the - record (like BLOBs) are stored in their native byte order and in - this amount of bytes. -*/ -#define portable_sizeof_char_ptr 8 -#ifdef __cplusplus -} -#endif - -#endif /* _my_handler_h */ diff --git a/include/myisam.h b/include/myisam.h index e502daa2f17..09f54ef0019 100644 --- a/include/myisam.h +++ b/include/myisam.h @@ -30,8 +30,30 @@ extern "C" { #ifndef _keycache_h #include "keycache.h" #endif -#include "my_handler.h" #include +#include "my_compare.h" + +/* + There is a hard limit for the maximum number of keys as there are only + 8 bits in the index file header for the number of keys in a table. + This means that 0..255 keys can exist for a table. The idea of + HA_MAX_POSSIBLE_KEY is to ensure that one can use myisamchk & tools on + a MyISAM table for which one has more keys than MyISAM is normally + compiled for. If you don't have this, you will get a core dump when + running myisamchk compiled for 128 keys on a table with 255 keys. +*/ + +#define HA_MAX_POSSIBLE_KEY 255 /* For myisamchk */ +/* + The following defines can be increased if necessary. + But beware the dependency of MI_MAX_POSSIBLE_KEY_BUFF and HA_MAX_KEY_LENGTH. +*/ + +#define HA_MAX_KEY_LENGTH 1000 /* Max length in bytes */ +#define HA_MAX_KEY_SEG 16 /* Max segments for key */ + +#define HA_MAX_POSSIBLE_KEY_BUFF (HA_MAX_KEY_LENGTH + 24+ 6+6) +#define HA_MAX_KEY_BUFF (HA_MAX_KEY_LENGTH+HA_MAX_KEY_SEG*6+8+8) /* Limit max keys according to HA_MAX_POSSIBLE_KEY diff --git a/libmysql/CMakeLists.txt b/libmysql/CMakeLists.txt index 55138e4aa06..129b923dd27 100755 --- a/libmysql/CMakeLists.txt +++ b/libmysql/CMakeLists.txt @@ -82,7 +82,7 @@ SET(CLIENT_SOURCES ../mysys/array.c ../strings/bchange.c ../strings/bmove.c ../mysys/mf_wcomp.c ../mysys/mulalloc.c ../mysys/my_access.c ../mysys/my_alloc.c ../mysys/my_chsize.c ../mysys/my_compress.c ../mysys/my_create.c ../mysys/my_delete.c ../mysys/my_div.c ../mysys/my_error.c ../mysys/my_file.c - ../mysys/my_fopen.c ../mysys/my_fstream.c ../mysys/my_gethostbyname.c + ../mysys/my_fopen.c ../mysys/my_fstream.c ../mysys/my_getopt.c ../mysys/my_getwd.c ../mysys/my_init.c ../mysys/my_lib.c ../mysys/my_malloc.c ../mysys/my_messnc.c ../mysys/my_net.c ../mysys/my_once.c ../mysys/my_open.c ../mysys/my_pread.c ../mysys/my_pthread.c ../mysys/my_read.c diff --git a/libmysql/Makefile.shared b/libmysql/Makefile.shared index a27949eb7ca..7249bcab19a 100644 --- a/libmysql/Makefile.shared +++ b/libmysql/Makefile.shared @@ -66,7 +66,7 @@ mysysobjects1 = my_init.lo my_static.lo my_malloc.lo my_realloc.lo \ charset.lo charset-def.lo hash.lo mf_iocache.lo \ mf_iocache2.lo my_seek.lo my_sleep.lo \ my_pread.lo mf_cache.lo md5.lo sha1.lo \ - my_getopt.lo my_gethostbyname.lo my_port.lo \ + my_getopt.lo \ my_rename.lo my_chsize.lo my_sync.lo my_getsystime.lo sqlobjects = net.lo sql_cmn_objects = pack.lo client.lo my_time.lo diff --git a/mysys/CMakeLists.txt b/mysys/CMakeLists.txt index 7afb800643c..9db8a40407e 100755 --- a/mysys/CMakeLists.txt +++ b/mysys/CMakeLists.txt @@ -33,8 +33,8 @@ SET(MYSYS_SOURCES array.c charset-def.c charset.c checksum.c default.c default_ mf_tempfile.c mf_unixpath.c mf_wcomp.c mf_wfile.c mulalloc.c my_access.c my_aes.c my_alarm.c my_alloc.c my_append.c my_bit.c my_bitmap.c my_chsize.c my_clock.c my_compress.c my_conio.c my_copy.c my_crc32.c my_create.c my_delete.c - my_div.c my_error.c my_file.c my_fopen.c my_fstream.c my_gethostbyname.c - my_gethwaddr.c my_getopt.c my_getsystime.c my_getwd.c my_handler.c my_init.c + my_div.c my_error.c my_file.c my_fopen.c my_fstream.c + my_gethwaddr.c my_getopt.c my_getsystime.c my_getwd.c my_compare.c my_init.c my_lib.c my_lock.c my_lockmem.c my_malloc.c my_messnc.c my_mkdir.c my_mmap.c my_net.c my_once.c my_open.c my_pread.c my_pthread.c my_quick.c my_read.c my_realloc.c my_redel.c my_rename.c my_seek.c my_sleep.c diff --git a/mysys/Makefile.am b/mysys/Makefile.am index e4c71f66079..00575375c11 100644 --- a/mysys/Makefile.am +++ b/mysys/Makefile.am @@ -46,10 +46,10 @@ libmysys_a_SOURCES = my_init.c my_getwd.c mf_getdate.c my_mmap.c \ my_sync.c my_getopt.c my_mkdir.c \ default_modify.c default.c \ my_compress.c checksum.c \ - my_net.c my_port.c my_sleep.c \ + my_net.c my_sleep.c \ charset.c charset-def.c my_bitmap.c my_bit.c md5.c \ - my_gethostbyname.c rijndael.c my_aes.c sha1.c \ - my_handler.c my_netware.c my_largepage.c \ + rijndael.c my_aes.c sha1.c \ + my_compare.c my_netware.c my_largepage.c \ my_memmem.c stacktrace.c \ my_windac.c my_access.c base64.c my_libwrap.c diff --git a/mysys/my_compare.c b/mysys/my_compare.c new file mode 100644 index 00000000000..8d33861d91c --- /dev/null +++ b/mysys/my_compare.c @@ -0,0 +1,469 @@ +/* Copyright (c) 2011 Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "my_compare.h" + +int ha_compare_text(CHARSET_INFO *charset_info, uchar *a, uint a_length, + uchar *b, uint b_length, my_bool part_key, + my_bool skip_end_space) +{ + if (!part_key) + return charset_info->coll->strnncollsp(charset_info, a, a_length, + b, b_length, (my_bool)!skip_end_space); + return charset_info->coll->strnncoll(charset_info, a, a_length, + b, b_length, part_key); +} + + +static int compare_bin(uchar *a, uint a_length, uchar *b, uint b_length, + my_bool part_key, my_bool skip_end_space) +{ + uint length= min(a_length,b_length); + uchar *end= a+ length; + int flag; + + while (a < end) + if ((flag= (int) *a++ - (int) *b++)) + return flag; + if (part_key && b_length < a_length) + return 0; + if (skip_end_space && a_length != b_length) + { + int swap= 1; + /* + We are using space compression. We have to check if longer key + has next character < ' ', in which case it's less than the shorter + key that has an implicite space afterwards. + + This code is identical to the one in + strings/ctype-simple.c:my_strnncollsp_simple + */ + if (a_length < b_length) + { + /* put shorter key in a */ + a_length= b_length; + a= b; + swap= -1; /* swap sign of result */ + } + for (end= a + a_length-length; a < end ; a++) + { + if (*a != ' ') + return (*a < ' ') ? -swap : swap; + } + return 0; + } + return (int) (a_length-b_length); +} + + +/* + Compare two keys + + SYNOPSIS + ha_key_cmp() + keyseg Array of key segments of key to compare + a First key to compare, in format from _mi_pack_key() + This is normally key specified by user + b Second key to compare. This is always from a row + key_length Length of key to compare. This can be shorter than + a to just compare sub keys + next_flag How keys should be compared + If bit SEARCH_FIND is not set the keys includes the row + position and this should also be compared + diff_pos OUT Number of first keypart where values differ, counting + from one. + diff_pos[1] OUT (b + diff_pos[1]) points to first value in tuple b + that is different from corresponding value in tuple a. + + EXAMPLES + Example1: if the function is called for tuples + ('aaa','bbb') and ('eee','fff'), then + diff_pos[0] = 1 (as 'aaa' != 'eee') + diff_pos[1] = 0 (offset from beggining of tuple b to 'eee' keypart). + + Example2: if the index function is called for tuples + ('aaa','bbb') and ('aaa','fff'), + diff_pos[0] = 2 (as 'aaa' != 'eee') + diff_pos[1] = 3 (offset from beggining of tuple b to 'fff' keypart, + here we assume that first key part is CHAR(3) NOT NULL) + + NOTES + Number-keys can't be splited + + RETURN VALUES + <0 If a < b + 0 If a == b + >0 If a > b +*/ + +#define FCMP(A,B) ((int) (A) - (int) (B)) + +int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, + register uchar *b, uint key_length, uint nextflag, + uint *diff_pos) +{ + int flag; + int16 s_1,s_2; + int32 l_1,l_2; + uint32 u_1,u_2; + float f_1,f_2; + double d_1,d_2; + uint next_key_length; + uchar *orig_b= b; + + *diff_pos=0; + for ( ; (int) key_length >0 ; key_length=next_key_length, keyseg++) + { + uchar *end; + uint piks=! (keyseg->flag & HA_NO_SORT); + (*diff_pos)++; + diff_pos[1]= (uint)(b - orig_b); + + /* Handle NULL part */ + if (keyseg->null_bit) + { + key_length--; + if (*a != *b && piks) + { + flag = (int) *a - (int) *b; + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + } + b++; + if (!*a++) /* If key was NULL */ + { + if (nextflag == (SEARCH_FIND | SEARCH_UPDATE)) + nextflag=SEARCH_SAME; /* Allow duplicate keys */ + else if (nextflag & SEARCH_NULL_ARE_NOT_EQUAL) + { + /* + This is only used from mi_check() to calculate cardinality. + It can't be used when searching for a key as this would cause + compare of (a,b) and (b,a) to return the same value. + */ + return -1; + } + next_key_length=key_length; + continue; /* To next key part */ + } + } + end= a+ min(keyseg->length,key_length); + next_key_length=key_length-keyseg->length; + + switch ((enum ha_base_keytype) keyseg->type) { + case HA_KEYTYPE_TEXT: /* Ascii; Key is converted */ + if (keyseg->flag & HA_SPACE_PACK) + { + int a_length,b_length,pack_length; + get_key_length(a_length,a); + get_key_pack_length(b_length,pack_length,b); + next_key_length=key_length-b_length-pack_length; + + if (piks && + (flag=ha_compare_text(keyseg->charset,a,a_length,b,b_length, + (my_bool) ((nextflag & SEARCH_PREFIX) && + next_key_length <= 0), + (my_bool)!(nextflag & SEARCH_PREFIX)))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a+=a_length; + b+=b_length; + break; + } + else + { + uint length=(uint) (end-a), a_length=length, b_length=length; + if (piks && + (flag= ha_compare_text(keyseg->charset, a, a_length, b, b_length, + (my_bool) ((nextflag & SEARCH_PREFIX) && + next_key_length <= 0), + (my_bool)!(nextflag & SEARCH_PREFIX)))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a=end; + b+=length; + } + break; + case HA_KEYTYPE_BINARY: + case HA_KEYTYPE_BIT: + if (keyseg->flag & HA_SPACE_PACK) + { + int a_length,b_length,pack_length; + get_key_length(a_length,a); + get_key_pack_length(b_length,pack_length,b); + next_key_length=key_length-b_length-pack_length; + + if (piks && + (flag=compare_bin(a,a_length,b,b_length, + (my_bool) ((nextflag & SEARCH_PREFIX) && + next_key_length <= 0),1))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a+=a_length; + b+=b_length; + break; + } + else + { + uint length=keyseg->length; + if (piks && + (flag=compare_bin(a,length,b,length, + (my_bool) ((nextflag & SEARCH_PREFIX) && + next_key_length <= 0),0))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a+=length; + b+=length; + } + break; + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: + { + int a_length,b_length,pack_length; + get_key_length(a_length,a); + get_key_pack_length(b_length,pack_length,b); + next_key_length=key_length-b_length-pack_length; + + if (piks && + (flag= ha_compare_text(keyseg->charset,a,a_length,b,b_length, + (my_bool) ((nextflag & SEARCH_PREFIX) && + next_key_length <= 0), + (my_bool) ((nextflag & (SEARCH_FIND | + SEARCH_UPDATE)) == + SEARCH_FIND && + ! (keyseg->flag & + HA_END_SPACE_ARE_EQUAL))))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a+= a_length; + b+= b_length; + break; + } + break; + case HA_KEYTYPE_VARBINARY1: + case HA_KEYTYPE_VARBINARY2: + { + int a_length,b_length,pack_length; + get_key_length(a_length,a); + get_key_pack_length(b_length,pack_length,b); + next_key_length=key_length-b_length-pack_length; + + if (piks && + (flag=compare_bin(a,a_length,b,b_length, + (my_bool) ((nextflag & SEARCH_PREFIX) && + next_key_length <= 0), 0))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a+=a_length; + b+=b_length; + break; + } + break; + case HA_KEYTYPE_INT8: + { + int i_1= (int) *((signed char*) a); + int i_2= (int) *((signed char*) b); + if (piks && (flag = CMP_NUM(i_1,i_2))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b++; + break; + } + case HA_KEYTYPE_SHORT_INT: + s_1= mi_sint2korr(a); + s_2= mi_sint2korr(b); + if (piks && (flag = CMP_NUM(s_1,s_2))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+= 2; /* sizeof(short int); */ + break; + case HA_KEYTYPE_USHORT_INT: + { + uint16 us_1,us_2; + us_1= mi_sint2korr(a); + us_2= mi_sint2korr(b); + if (piks && (flag = CMP_NUM(us_1,us_2))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+=2; /* sizeof(short int); */ + break; + } + case HA_KEYTYPE_LONG_INT: + l_1= mi_sint4korr(a); + l_2= mi_sint4korr(b); + if (piks && (flag = CMP_NUM(l_1,l_2))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+= 4; /* sizeof(long int); */ + break; + case HA_KEYTYPE_ULONG_INT: + u_1= mi_sint4korr(a); + u_2= mi_sint4korr(b); + if (piks && (flag = CMP_NUM(u_1,u_2))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+= 4; /* sizeof(long int); */ + break; + case HA_KEYTYPE_INT24: + l_1=mi_sint3korr(a); + l_2=mi_sint3korr(b); + if (piks && (flag = CMP_NUM(l_1,l_2))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+= 3; + break; + case HA_KEYTYPE_UINT24: + l_1=mi_uint3korr(a); + l_2=mi_uint3korr(b); + if (piks && (flag = CMP_NUM(l_1,l_2))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+= 3; + break; + case HA_KEYTYPE_FLOAT: + mi_float4get(f_1,a); + mi_float4get(f_2,b); + /* + The following may give a compiler warning about floating point + comparison not being safe, but this is ok in this context as + we are bascily doing sorting + */ + if (piks && (flag = CMP_NUM(f_1,f_2))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+= 4; /* sizeof(float); */ + break; + case HA_KEYTYPE_DOUBLE: + mi_float8get(d_1,a); + mi_float8get(d_2,b); + /* + The following may give a compiler warning about floating point + comparison not being safe, but this is ok in this context as + we are bascily doing sorting + */ + if (piks && (flag = CMP_NUM(d_1,d_2))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+= 8; /* sizeof(double); */ + break; + case HA_KEYTYPE_NUM: /* Numeric key */ + { + int swap_flag= 0; + int alength,blength; + + if (keyseg->flag & HA_REVERSE_SORT) + { + swap_variables(uchar*, a, b); + swap_flag=1; /* Remember swap of a & b */ + end= a+ (int) (end-b); + } + if (keyseg->flag & HA_SPACE_PACK) + { + alength= *a++; blength= *b++; + end=a+alength; + next_key_length=key_length-blength-1; + } + else + { + alength= (int) (end-a); + blength=keyseg->length; + /* remove pre space from keys */ + for ( ; alength && *a == ' ' ; a++, alength--) ; + for ( ; blength && *b == ' ' ; b++, blength--) ; + } + if (piks) + { + if (*a == '-') + { + if (*b != '-') + return -1; + a++; b++; + swap_variables(uchar*, a, b); + swap_variables(int, alength, blength); + swap_flag=1-swap_flag; + alength--; blength--; + end=a+alength; + } + else if (*b == '-') + return 1; + while (alength && (*a == '+' || *a == '0')) + { + a++; alength--; + } + while (blength && (*b == '+' || *b == '0')) + { + b++; blength--; + } + if (alength != blength) + return (alength < blength) ? -1 : 1; + while (a < end) + if (*a++ != *b++) + return ((int) a[-1] - (int) b[-1]); + } + else + { + b+=(end-a); + a=end; + } + + if (swap_flag) /* Restore pointers */ + swap_variables(uchar*, a, b); + break; + } +#ifdef HAVE_LONG_LONG + case HA_KEYTYPE_LONGLONG: + { + longlong ll_a,ll_b; + ll_a= mi_sint8korr(a); + ll_b= mi_sint8korr(b); + if (piks && (flag = CMP_NUM(ll_a,ll_b))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+= 8; + break; + } + case HA_KEYTYPE_ULONGLONG: + { + ulonglong ll_a,ll_b; + ll_a= mi_uint8korr(a); + ll_b= mi_uint8korr(b); + if (piks && (flag = CMP_NUM(ll_a,ll_b))) + return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); + a= end; + b+= 8; + break; + } +#endif + case HA_KEYTYPE_END: /* Ready */ + goto end; /* diff_pos is incremented */ + } + } + (*diff_pos)++; +end: + if (!(nextflag & SEARCH_FIND)) + { + uint i; + if (nextflag & (SEARCH_NO_FIND | SEARCH_LAST)) /* Find record after key */ + return (nextflag & (SEARCH_BIGGER | SEARCH_LAST)) ? -1 : 1; + flag=0; + for (i=keyseg->length ; i-- > 0 ; ) + { + if (*a++ != *b++) + { + flag= FCMP(a[-1],b[-1]); + break; + } + } + if (nextflag & SEARCH_SAME) + return (flag); /* read same */ + if (nextflag & SEARCH_BIGGER) + return (flag <= 0 ? -1 : 1); /* read next */ + return (flag < 0 ? -1 : 1); /* read previous */ + } + return 0; +} /* ha_key_cmp */ diff --git a/mysys/my_gethostbyname.c b/mysys/my_gethostbyname.c deleted file mode 100644 index 12cf90271dd..00000000000 --- a/mysys/my_gethostbyname.c +++ /dev/null @@ -1,113 +0,0 @@ -/* Copyright (C) 2002, 2004 MySQL AB - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; version 2 - of the License. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, - MA 02111-1307, USA */ - -/* Thread safe version of gethostbyname_r() */ - -#include "mysys_priv.h" -#if !defined(__WIN__) -#include -#endif -#include - -/* This file is not needed if my_gethostbyname_r is a macro */ -#if !defined(my_gethostbyname_r) - -/* - Emulate SOLARIS style calls, not because it's better, but just to make the - usage of getbostbyname_r simpler. -*/ - -#if defined(HAVE_GETHOSTBYNAME_R) - -#if defined(HAVE_GETHOSTBYNAME_R_GLIBC2_STYLE) - -struct hostent *my_gethostbyname_r(const char *name, - struct hostent *result, char *buffer, - int buflen, int *h_errnop) -{ - struct hostent *hp; - DBUG_ASSERT((size_t) buflen >= sizeof(*result)); - if (gethostbyname_r(name,result, buffer, (size_t) buflen, &hp, h_errnop)) - return 0; - return hp; -} - -#elif defined(HAVE_GETHOSTBYNAME_R_RETURN_INT) - -struct hostent *my_gethostbyname_r(const char *name, - struct hostent *result, char *buffer, - int buflen, int *h_errnop) -{ - if (gethostbyname_r(name,result,(struct hostent_data *) buffer) == -1) - { - *h_errnop= errno; - return 0; - } - return result; -} - -#else - -/* gethostbyname_r with similar interface as gethostbyname() */ - -struct hostent *my_gethostbyname_r(const char *name, - struct hostent *result, char *buffer, - int buflen, int *h_errnop) -{ - struct hostent *hp; - DBUG_ASSERT(buflen >= sizeof(struct hostent_data)); - hp= gethostbyname_r(name,result,(struct hostent_data *) buffer); - *h_errnop= errno; - return hp; -} -#endif /* GLIBC2_STYLE_GETHOSTBYNAME_R */ - -#else /* !HAVE_GETHOSTBYNAME_R */ - -#ifdef THREAD -extern pthread_mutex_t LOCK_gethostbyname_r; -#endif - -/* - No gethostbyname_r() function exists. - In this case we have to keep a mutex over the call to ensure that no - other thread is going to reuse the internal memory. - - The user is responsible to call my_gethostbyname_r_free() when he - is finished with the structure. -*/ - -struct hostent *my_gethostbyname_r(const char *name, - struct hostent *res __attribute__((unused)), - char *buffer __attribute__((unused)), - int buflen __attribute__((unused)), - int *h_errnop) -{ - struct hostent *hp; - pthread_mutex_lock(&LOCK_gethostbyname_r); - hp= gethostbyname(name); - *h_errnop= h_errno; - return hp; -} - -void my_gethostbyname_r_free() -{ - pthread_mutex_unlock(&LOCK_gethostbyname_r); -} - -#endif /* !HAVE_GETHOSTBYNAME_R */ -#endif /* !my_gethostbyname_r */ diff --git a/mysys/my_handler.c b/mysys/my_handler.c deleted file mode 100644 index 7aa8177040d..00000000000 --- a/mysys/my_handler.c +++ /dev/null @@ -1,598 +0,0 @@ -/* Copyright (C) 2002-2006 MySQL AB - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; version 2 - of the License. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, - MA 02111-1307, USA */ - -#include -#include -#include -#include -#include - -#include "my_handler_errors.h" - -int ha_compare_text(CHARSET_INFO *charset_info, uchar *a, uint a_length, - uchar *b, uint b_length, my_bool part_key, - my_bool skip_end_space) -{ - if (!part_key) - return charset_info->coll->strnncollsp(charset_info, a, a_length, - b, b_length, (my_bool)!skip_end_space); - return charset_info->coll->strnncoll(charset_info, a, a_length, - b, b_length, part_key); -} - - -static int compare_bin(uchar *a, uint a_length, uchar *b, uint b_length, - my_bool part_key, my_bool skip_end_space) -{ - uint length= min(a_length,b_length); - uchar *end= a+ length; - int flag; - - while (a < end) - if ((flag= (int) *a++ - (int) *b++)) - return flag; - if (part_key && b_length < a_length) - return 0; - if (skip_end_space && a_length != b_length) - { - int swap= 1; - /* - We are using space compression. We have to check if longer key - has next character < ' ', in which case it's less than the shorter - key that has an implicite space afterwards. - - This code is identical to the one in - strings/ctype-simple.c:my_strnncollsp_simple - */ - if (a_length < b_length) - { - /* put shorter key in a */ - a_length= b_length; - a= b; - swap= -1; /* swap sign of result */ - } - for (end= a + a_length-length; a < end ; a++) - { - if (*a != ' ') - return (*a < ' ') ? -swap : swap; - } - return 0; - } - return (int) (a_length-b_length); -} - - -/* - Compare two keys - - SYNOPSIS - ha_key_cmp() - keyseg Array of key segments of key to compare - a First key to compare, in format from _mi_pack_key() - This is normally key specified by user - b Second key to compare. This is always from a row - key_length Length of key to compare. This can be shorter than - a to just compare sub keys - next_flag How keys should be compared - If bit SEARCH_FIND is not set the keys includes the row - position and this should also be compared - diff_pos OUT Number of first keypart where values differ, counting - from one. - diff_pos[1] OUT (b + diff_pos[1]) points to first value in tuple b - that is different from corresponding value in tuple a. - - EXAMPLES - Example1: if the function is called for tuples - ('aaa','bbb') and ('eee','fff'), then - diff_pos[0] = 1 (as 'aaa' != 'eee') - diff_pos[1] = 0 (offset from beggining of tuple b to 'eee' keypart). - - Example2: if the index function is called for tuples - ('aaa','bbb') and ('aaa','fff'), - diff_pos[0] = 2 (as 'aaa' != 'eee') - diff_pos[1] = 3 (offset from beggining of tuple b to 'fff' keypart, - here we assume that first key part is CHAR(3) NOT NULL) - - NOTES - Number-keys can't be splited - - RETURN VALUES - <0 If a < b - 0 If a == b - >0 If a > b -*/ - -#define FCMP(A,B) ((int) (A) - (int) (B)) - -int ha_key_cmp(register HA_KEYSEG *keyseg, register uchar *a, - register uchar *b, uint key_length, uint nextflag, - uint *diff_pos) -{ - int flag; - int16 s_1,s_2; - int32 l_1,l_2; - uint32 u_1,u_2; - float f_1,f_2; - double d_1,d_2; - uint next_key_length; - uchar *orig_b= b; - - *diff_pos=0; - for ( ; (int) key_length >0 ; key_length=next_key_length, keyseg++) - { - uchar *end; - uint piks=! (keyseg->flag & HA_NO_SORT); - (*diff_pos)++; - diff_pos[1]= (uint)(b - orig_b); - - /* Handle NULL part */ - if (keyseg->null_bit) - { - key_length--; - if (*a != *b && piks) - { - flag = (int) *a - (int) *b; - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - } - b++; - if (!*a++) /* If key was NULL */ - { - if (nextflag == (SEARCH_FIND | SEARCH_UPDATE)) - nextflag=SEARCH_SAME; /* Allow duplicate keys */ - else if (nextflag & SEARCH_NULL_ARE_NOT_EQUAL) - { - /* - This is only used from mi_check() to calculate cardinality. - It can't be used when searching for a key as this would cause - compare of (a,b) and (b,a) to return the same value. - */ - return -1; - } - next_key_length=key_length; - continue; /* To next key part */ - } - } - end= a+ min(keyseg->length,key_length); - next_key_length=key_length-keyseg->length; - - switch ((enum ha_base_keytype) keyseg->type) { - case HA_KEYTYPE_TEXT: /* Ascii; Key is converted */ - if (keyseg->flag & HA_SPACE_PACK) - { - int a_length,b_length,pack_length; - get_key_length(a_length,a); - get_key_pack_length(b_length,pack_length,b); - next_key_length=key_length-b_length-pack_length; - - if (piks && - (flag=ha_compare_text(keyseg->charset,a,a_length,b,b_length, - (my_bool) ((nextflag & SEARCH_PREFIX) && - next_key_length <= 0), - (my_bool)!(nextflag & SEARCH_PREFIX)))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a+=a_length; - b+=b_length; - break; - } - else - { - uint length=(uint) (end-a), a_length=length, b_length=length; - if (piks && - (flag= ha_compare_text(keyseg->charset, a, a_length, b, b_length, - (my_bool) ((nextflag & SEARCH_PREFIX) && - next_key_length <= 0), - (my_bool)!(nextflag & SEARCH_PREFIX)))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a=end; - b+=length; - } - break; - case HA_KEYTYPE_BINARY: - case HA_KEYTYPE_BIT: - if (keyseg->flag & HA_SPACE_PACK) - { - int a_length,b_length,pack_length; - get_key_length(a_length,a); - get_key_pack_length(b_length,pack_length,b); - next_key_length=key_length-b_length-pack_length; - - if (piks && - (flag=compare_bin(a,a_length,b,b_length, - (my_bool) ((nextflag & SEARCH_PREFIX) && - next_key_length <= 0),1))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a+=a_length; - b+=b_length; - break; - } - else - { - uint length=keyseg->length; - if (piks && - (flag=compare_bin(a,length,b,length, - (my_bool) ((nextflag & SEARCH_PREFIX) && - next_key_length <= 0),0))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a+=length; - b+=length; - } - break; - case HA_KEYTYPE_VARTEXT1: - case HA_KEYTYPE_VARTEXT2: - { - int a_length,b_length,pack_length; - get_key_length(a_length,a); - get_key_pack_length(b_length,pack_length,b); - next_key_length=key_length-b_length-pack_length; - - if (piks && - (flag= ha_compare_text(keyseg->charset,a,a_length,b,b_length, - (my_bool) ((nextflag & SEARCH_PREFIX) && - next_key_length <= 0), - (my_bool) ((nextflag & (SEARCH_FIND | - SEARCH_UPDATE)) == - SEARCH_FIND && - ! (keyseg->flag & - HA_END_SPACE_ARE_EQUAL))))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a+= a_length; - b+= b_length; - break; - } - break; - case HA_KEYTYPE_VARBINARY1: - case HA_KEYTYPE_VARBINARY2: - { - int a_length,b_length,pack_length; - get_key_length(a_length,a); - get_key_pack_length(b_length,pack_length,b); - next_key_length=key_length-b_length-pack_length; - - if (piks && - (flag=compare_bin(a,a_length,b,b_length, - (my_bool) ((nextflag & SEARCH_PREFIX) && - next_key_length <= 0), 0))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a+=a_length; - b+=b_length; - } - break; - case HA_KEYTYPE_INT8: - { - int i_1= (int) *((signed char*) a); - int i_2= (int) *((signed char*) b); - if (piks && (flag = CMP_NUM(i_1,i_2))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b++; - break; - } - case HA_KEYTYPE_SHORT_INT: - s_1= mi_sint2korr(a); - s_2= mi_sint2korr(b); - if (piks && (flag = CMP_NUM(s_1,s_2))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+= 2; /* sizeof(short int); */ - break; - case HA_KEYTYPE_USHORT_INT: - { - uint16 us_1,us_2; - us_1= mi_sint2korr(a); - us_2= mi_sint2korr(b); - if (piks && (flag = CMP_NUM(us_1,us_2))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+=2; /* sizeof(short int); */ - break; - } - case HA_KEYTYPE_LONG_INT: - l_1= mi_sint4korr(a); - l_2= mi_sint4korr(b); - if (piks && (flag = CMP_NUM(l_1,l_2))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+= 4; /* sizeof(long int); */ - break; - case HA_KEYTYPE_ULONG_INT: - u_1= mi_sint4korr(a); - u_2= mi_sint4korr(b); - if (piks && (flag = CMP_NUM(u_1,u_2))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+= 4; /* sizeof(long int); */ - break; - case HA_KEYTYPE_INT24: - l_1=mi_sint3korr(a); - l_2=mi_sint3korr(b); - if (piks && (flag = CMP_NUM(l_1,l_2))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+= 3; - break; - case HA_KEYTYPE_UINT24: - l_1=mi_uint3korr(a); - l_2=mi_uint3korr(b); - if (piks && (flag = CMP_NUM(l_1,l_2))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+= 3; - break; - case HA_KEYTYPE_FLOAT: - mi_float4get(f_1,a); - mi_float4get(f_2,b); - /* - The following may give a compiler warning about floating point - comparison not being safe, but this is ok in this context as - we are bascily doing sorting - */ - if (piks && (flag = CMP_NUM(f_1,f_2))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+= 4; /* sizeof(float); */ - break; - case HA_KEYTYPE_DOUBLE: - mi_float8get(d_1,a); - mi_float8get(d_2,b); - /* - The following may give a compiler warning about floating point - comparison not being safe, but this is ok in this context as - we are bascily doing sorting - */ - if (piks && (flag = CMP_NUM(d_1,d_2))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+= 8; /* sizeof(double); */ - break; - case HA_KEYTYPE_NUM: /* Numeric key */ - { - int swap_flag= 0; - int alength,blength; - - if (keyseg->flag & HA_REVERSE_SORT) - { - swap_variables(uchar*, a, b); - swap_flag=1; /* Remember swap of a & b */ - end= a+ (int) (end-b); - } - if (keyseg->flag & HA_SPACE_PACK) - { - alength= *a++; blength= *b++; - end=a+alength; - next_key_length=key_length-blength-1; - } - else - { - alength= (int) (end-a); - blength=keyseg->length; - /* remove pre space from keys */ - for ( ; alength && *a == ' ' ; a++, alength--) ; - for ( ; blength && *b == ' ' ; b++, blength--) ; - } - if (piks) - { - if (*a == '-') - { - if (*b != '-') - return -1; - a++; b++; - swap_variables(uchar*, a, b); - swap_variables(int, alength, blength); - swap_flag=1-swap_flag; - alength--; blength--; - end=a+alength; - } - else if (*b == '-') - return 1; - while (alength && (*a == '+' || *a == '0')) - { - a++; alength--; - } - while (blength && (*b == '+' || *b == '0')) - { - b++; blength--; - } - if (alength != blength) - return (alength < blength) ? -1 : 1; - while (a < end) - if (*a++ != *b++) - return ((int) a[-1] - (int) b[-1]); - } - else - { - b+=(end-a); - a=end; - } - - if (swap_flag) /* Restore pointers */ - swap_variables(uchar*, a, b); - break; - } -#ifdef HAVE_LONG_LONG - case HA_KEYTYPE_LONGLONG: - { - longlong ll_a,ll_b; - ll_a= mi_sint8korr(a); - ll_b= mi_sint8korr(b); - if (piks && (flag = CMP_NUM(ll_a,ll_b))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+= 8; - break; - } - case HA_KEYTYPE_ULONGLONG: - { - ulonglong ll_a,ll_b; - ll_a= mi_uint8korr(a); - ll_b= mi_uint8korr(b); - if (piks && (flag = CMP_NUM(ll_a,ll_b))) - return ((keyseg->flag & HA_REVERSE_SORT) ? -flag : flag); - a= end; - b+= 8; - break; - } -#endif - case HA_KEYTYPE_END: /* Ready */ - goto end; /* diff_pos is incremented */ - } - } - (*diff_pos)++; -end: - if (!(nextflag & SEARCH_FIND)) - { - uint i; - if (nextflag & (SEARCH_NO_FIND | SEARCH_LAST)) /* Find record after key */ - return (nextflag & (SEARCH_BIGGER | SEARCH_LAST)) ? -1 : 1; - flag=0; - for (i=keyseg->length ; i-- > 0 ; ) - { - if (*a++ != *b++) - { - flag= FCMP(a[-1],b[-1]); - break; - } - } - if (nextflag & SEARCH_SAME) - return (flag); /* read same */ - if (nextflag & SEARCH_BIGGER) - return (flag <= 0 ? -1 : 1); /* read next */ - return (flag < 0 ? -1 : 1); /* read previous */ - } - return 0; -} /* ha_key_cmp */ - - -/* - Find the first NULL value in index-suffix values tuple - - SYNOPSIS - ha_find_null() - keyseg Array of keyparts for key suffix - a Key suffix value tuple - - DESCRIPTION - Find the first NULL value in index-suffix values tuple. - - TODO - Consider optimizing this function or its use so we don't search for - NULL values in completely NOT NULL index suffixes. - - RETURN - First key part that has NULL as value in values tuple, or the last key - part (with keyseg->type==HA_TYPE_END) if values tuple doesn't contain - NULLs. -*/ - -HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a) -{ - for (; (enum ha_base_keytype) keyseg->type != HA_KEYTYPE_END; keyseg++) - { - uchar *end; - if (keyseg->null_bit) - { - if (!*a++) - return keyseg; - } - end= a+ keyseg->length; - - switch ((enum ha_base_keytype) keyseg->type) { - case HA_KEYTYPE_TEXT: - case HA_KEYTYPE_BINARY: - case HA_KEYTYPE_BIT: - if (keyseg->flag & HA_SPACE_PACK) - { - int a_length; - get_key_length(a_length, a); - a += a_length; - break; - } - else - a= end; - break; - case HA_KEYTYPE_VARTEXT1: - case HA_KEYTYPE_VARTEXT2: - case HA_KEYTYPE_VARBINARY1: - case HA_KEYTYPE_VARBINARY2: - { - int a_length; - get_key_length(a_length, a); - a+= a_length; - break; - } - case HA_KEYTYPE_NUM: - if (keyseg->flag & HA_SPACE_PACK) - { - int alength= *a++; - end= a+alength; - } - a= end; - break; - case HA_KEYTYPE_INT8: - case HA_KEYTYPE_SHORT_INT: - case HA_KEYTYPE_USHORT_INT: - case HA_KEYTYPE_LONG_INT: - case HA_KEYTYPE_ULONG_INT: - case HA_KEYTYPE_INT24: - case HA_KEYTYPE_UINT24: -#ifdef HAVE_LONG_LONG - case HA_KEYTYPE_LONGLONG: - case HA_KEYTYPE_ULONGLONG: -#endif - case HA_KEYTYPE_FLOAT: - case HA_KEYTYPE_DOUBLE: - a= end; - break; - case HA_KEYTYPE_END: /* purecov: inspected */ - /* keep compiler happy */ - DBUG_ASSERT(0); - break; - } - } - return keyseg; -} - - - -/* - Register handler error messages for usage with my_error() - - NOTES - This is safe to call multiple times as my_error_register() - will ignore calls to register already registered error numbers. -*/ - - -void my_handler_error_register(void) -{ - /* - If you got compilation error here about compile_time_assert array, check - that every HA_ERR_xxx constant has a corresponding error message in - handler_error_messages[] list (check mysys/ma_handler_errors.h and - include/my_base.h). - */ - compile_time_assert(HA_ERR_FIRST + array_elements(handler_error_messages) == - HA_ERR_LAST + 1); - my_error_register(handler_error_messages, HA_ERR_FIRST, - HA_ERR_FIRST+ array_elements(handler_error_messages)-1); -} - - -void my_handler_error_unregister(void) -{ - my_error_unregister(HA_ERR_FIRST, - HA_ERR_FIRST+ array_elements(handler_error_messages)-1); -} diff --git a/mysys/my_net.c b/mysys/my_net.c index 81d977210f8..3d139bb46c3 100644 --- a/mysys/my_net.c +++ b/mysys/my_net.c @@ -31,6 +31,8 @@ #include #endif #endif /* !defined(__WIN__) */ +#include "my_net.h" + void my_inet_ntoa(struct in_addr in, char *buf) { @@ -40,3 +42,90 @@ void my_inet_ntoa(struct in_addr in, char *buf) strmov(buf,ptr); pthread_mutex_unlock(&THR_LOCK_net); } + +/* This code is not needed if my_gethostbyname_r is a macro */ +#if !defined(my_gethostbyname_r) + +/* + Emulate SOLARIS style calls, not because it's better, but just to make the + usage of getbostbyname_r simpler. +*/ + +#if defined(HAVE_GETHOSTBYNAME_R) + +#if defined(HAVE_GETHOSTBYNAME_R_GLIBC2_STYLE) + +struct hostent *my_gethostbyname_r(const char *name, + struct hostent *result, char *buffer, + int buflen, int *h_errnop) +{ + struct hostent *hp; + DBUG_ASSERT((size_t) buflen >= sizeof(*result)); + if (gethostbyname_r(name,result, buffer, (size_t) buflen, &hp, h_errnop)) + return 0; + return hp; +} + +#elif defined(HAVE_GETHOSTBYNAME_R_RETURN_INT) + +struct hostent *my_gethostbyname_r(const char *name, + struct hostent *result, char *buffer, + int buflen, int *h_errnop) +{ + if (gethostbyname_r(name,result,(struct hostent_data *) buffer) == -1) + { + *h_errnop= errno; + return 0; + } + return result; +} + +#else + +/* gethostbyname_r with similar interface as gethostbyname() */ + +struct hostent *my_gethostbyname_r(const char *name, + struct hostent *result, char *buffer, + int buflen, int *h_errnop) +{ + struct hostent *hp; + DBUG_ASSERT(buflen >= sizeof(struct hostent_data)); + hp= gethostbyname_r(name,result,(struct hostent_data *) buffer); + *h_errnop= errno; + return hp; +} +#endif /* GLIBC2_STYLE_GETHOSTBYNAME_R */ + +#else /* !HAVE_GETHOSTBYNAME_R */ + +#ifdef THREAD +extern pthread_mutex_t LOCK_gethostbyname_r; +#endif + +/* + No gethostbyname_r() function exists. + In this case we have to keep a mutex over the call to ensure that no + other thread is going to reuse the internal memory. + + The user is responsible to call my_gethostbyname_r_free() when he + is finished with the structure. +*/ + +struct hostent *my_gethostbyname_r(const char *name, + struct hostent *result, char *buffer, + int buflen, int *h_errnop) +{ + struct hostent *hp; + pthread_mutex_lock(&LOCK_gethostbyname_r); + hp= gethostbyname(name); + *h_errnop= h_errno; + return hp; +} + +void my_gethostbyname_r_free() +{ + pthread_mutex_unlock(&LOCK_gethostbyname_r); +} + +#endif /* !HAVE_GETHOSTBYNAME_R */ +#endif /* !my_gethostbyname_r */ diff --git a/mysys/my_port.c b/mysys/my_port.c deleted file mode 100644 index 9ad333421ca..00000000000 --- a/mysys/my_port.c +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (C) 2002 MySQL AB - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; version 2 - of the License. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, - MA 02111-1307, USA */ - -/* - Small functions to make code portable -*/ - -#include "mysys_priv.h" - -#ifdef _AIX - -/* - On AIX, at least with gcc 3.1, the expression - '(double) (ulonglong) var' doesn't always work for big unsigned - integers like '18446744073709551615'. The end result is that the - high bit is simply dropped. (probably bug in gcc optimizations) - Handling the conversion in a sub function seems to work. -*/ - - - -double my_ulonglong2double(unsigned long long nr) -{ - return (double) nr; -} -#endif /* _AIX */ diff --git a/sql/field.h b/sql/field.h index cbdfa686ff8..285c8307634 100644 --- a/sql/field.h +++ b/sql/field.h @@ -13,6 +13,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "my_compare.h" /* for clr_rec_bits */ + /* Because of the function new_field() all field classes that have static variables must declare the size_of() member function. diff --git a/sql/handler.h b/sql/handler.h index dabc179079a..5f68bb6a8f8 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -20,7 +20,6 @@ #pragma interface /* gcc class implementation */ #endif -#include #include #include diff --git a/storage/myisam/ft_stopwords.c b/storage/myisam/ft_stopwords.c index 9838b15af34..dbab71f4381 100644 --- a/storage/myisam/ft_stopwords.c +++ b/storage/myisam/ft_stopwords.c @@ -16,7 +16,7 @@ /* Written by Sergei A. Golubchik, who has a shared copyright to this code */ #include "ftdefs.h" -#include "my_handler.h" +#include "my_compare.h" typedef struct st_ft_stopwords { diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index 935465e7edf..7bc26729e03 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -85,6 +85,7 @@ static SORT_KEY_BLOCKS *alloc_key_blocks(MI_CHECK *param, uint blocks, uint buffer_length); static ha_checksum mi_byte_checksum(const uchar *buf, uint length); static void set_data_file_type(SORT_INFO *sort_info, MYISAM_SHARE *share); +static HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a); void myisamchk_init(MI_CHECK *param) { @@ -4739,3 +4740,89 @@ set_data_file_type(SORT_INFO *sort_info, MYISAM_SHARE *share) share->delete_record=tmp.delete_record; } } + +/* + Find the first NULL value in index-suffix values tuple + + SYNOPSIS + ha_find_null() + keyseg Array of keyparts for key suffix + a Key suffix value tuple + + DESCRIPTION + Find the first NULL value in index-suffix values tuple. + TODO Consider optimizing this fuction or its use so we don't search for + NULL values in completely NOT NULL index suffixes. + + RETURN + First key part that has NULL as value in values tuple, or the last key part + (with keyseg->type==HA_TYPE_END) if values tuple doesn't contain NULLs. +*/ + +static HA_KEYSEG *ha_find_null(HA_KEYSEG *keyseg, uchar *a) +{ + for (; (enum ha_base_keytype) keyseg->type != HA_KEYTYPE_END; keyseg++) + { + uchar *end; + if (keyseg->null_bit) + { + if (!*a++) + return keyseg; + } + end= a+ keyseg->length; + + switch ((enum ha_base_keytype) keyseg->type) { + case HA_KEYTYPE_TEXT: + case HA_KEYTYPE_BINARY: + case HA_KEYTYPE_BIT: + if (keyseg->flag & HA_SPACE_PACK) + { + int a_length; + get_key_length(a_length, a); + a += a_length; + break; + } + else + a= end; + break; + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: + case HA_KEYTYPE_VARBINARY1: + case HA_KEYTYPE_VARBINARY2: + { + int a_length; + get_key_length(a_length, a); + a+= a_length; + break; + } + case HA_KEYTYPE_NUM: + if (keyseg->flag & HA_SPACE_PACK) + { + int alength= *a++; + end= a+alength; + } + a= end; + break; + case HA_KEYTYPE_INT8: + case HA_KEYTYPE_SHORT_INT: + case HA_KEYTYPE_USHORT_INT: + case HA_KEYTYPE_LONG_INT: + case HA_KEYTYPE_ULONG_INT: + case HA_KEYTYPE_INT24: + case HA_KEYTYPE_UINT24: +#ifdef HAVE_LONG_LONG + case HA_KEYTYPE_LONGLONG: + case HA_KEYTYPE_ULONGLONG: +#endif + case HA_KEYTYPE_FLOAT: + case HA_KEYTYPE_DOUBLE: + a= end; + break; + case HA_KEYTYPE_END: /* purecov: inspected */ + /* keep compiler happy */ + DBUG_ASSERT(0); + break; + } + } + return keyseg; +} diff --git a/storage/myisam/mi_test1.c b/storage/myisam/mi_test1.c index 363b024737a..142ee9b4909 100644 --- a/storage/myisam/mi_test1.c +++ b/storage/myisam/mi_test1.c @@ -16,6 +16,7 @@ /* Testing of the basic functions of a MyISAM table */ #include "myisam.h" +#include "myisamdef.h" #include #include diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c index 72a4e006cc6..3c8ebe5dbd8 100644 --- a/storage/myisam/mi_write.c +++ b/storage/myisam/mi_write.c @@ -17,6 +17,7 @@ #include "fulltext.h" #include "rt_index.h" +#include "my_compare.h" #define MAX_POINTER_LENGTH 8 diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index 962155e884c..c91601f6503 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -424,6 +424,8 @@ typedef struct st_mi_sort_param #define get_pack_length(length) ((length) >= 255 ? 3 : 1) +#define portable_sizeof_char_ptr 8 + #define MI_MIN_BLOCK_LENGTH 20 /* Because of delete-link */ #define MI_EXTEND_BLOCK_LENGTH 20 /* Don't use to small record-blocks */ #define MI_SPLIT_LENGTH ((MI_EXTEND_BLOCK_LENGTH+4)*2) diff --git a/storage/myisam/sp_test.c b/storage/myisam/sp_test.c index f572c7ab19b..7a30a742fd6 100644 --- a/storage/myisam/sp_test.c +++ b/storage/myisam/sp_test.c @@ -17,6 +17,7 @@ /* Written by Alex Barkov, who has a shared copyright to this code */ #include "myisam.h" +#include "myisamdef.h" #ifdef HAVE_SPATIAL #include "sp_defs.h" -- cgit v1.2.1 From a3ab0d92defbafb462c7c5c50bab324521558971 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Mon, 28 Mar 2011 13:25:03 +0300 Subject: Fixed a test failure in embedded because of the fix for BUG#11766769 --- mysql-test/r/variables-notembedded.result | 24 ++++++++++++++++++++++++ mysql-test/r/variables.result | 23 ----------------------- mysql-test/t/variables-notembedded.test | 27 +++++++++++++++++++++++++++ mysql-test/t/variables.test | 25 ------------------------- 4 files changed, 51 insertions(+), 48 deletions(-) diff --git a/mysql-test/r/variables-notembedded.result b/mysql-test/r/variables-notembedded.result index 8c6d54757ed..8056af49090 100644 --- a/mysql-test/r/variables-notembedded.result +++ b/mysql-test/r/variables-notembedded.result @@ -108,3 +108,27 @@ SET @@session.slave_skip_errors= 7; ERROR HY000: Variable 'slave_skip_errors' is a read only variable SET @@global.slave_skip_errors= 7; ERROR HY000: Variable 'slave_skip_errors' is a read only variable +# +# Bug #11766769 : 59959: SMALL VALUES OF --MAX-ALLOWED-PACKET +# ARE NOT BEING HONORED +# +CREATE TABLE t1 (a MEDIUMTEXT); +SET GLOBAL max_allowed_packet=2048; +Warnings: +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' +SET GLOBAL net_buffer_length=4096; +Warnings: +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' +SHOW SESSION VARIABLES LIKE 'max_allowed_packet'; +Variable_name Value +max_allowed_packet 2048 +SHOW SESSION VARIABLES LIKE 'net_buffer_length'; +Variable_name Value +net_buffer_length 4096 +ERROR 08S01: Got a packet bigger than 'max_allowed_packet' bytes +SELECT LENGTH(a) FROM t1; +LENGTH(a) +SET GLOBAL max_allowed_packet=default; +SET GLOBAL net_buffer_length=default; +DROP TABLE t1; +End of 5.1 tests diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index f92e1dec4c9..8cff6e99d4f 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -1567,27 +1567,4 @@ SET @@global.max_binlog_cache_size=DEFAULT; SET @@global.max_join_size=DEFAULT; SET @@global.key_buffer_size=@kbs; SET @@global.key_cache_block_size=@kcbs; -# -# Bug #11766769 : 59959: SMALL VALUES OF --MAX-ALLOWED-PACKET -# ARE NOT BEING HONORED -# -CREATE TABLE t1 (a MEDIUMTEXT); -SET GLOBAL max_allowed_packet=2048; -Warnings: -Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' -SET GLOBAL net_buffer_length=4096; -Warnings: -Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' -SHOW SESSION VARIABLES LIKE 'max_allowed_packet'; -Variable_name Value -max_allowed_packet 2048 -SHOW SESSION VARIABLES LIKE 'net_buffer_length'; -Variable_name Value -net_buffer_length 4096 -ERROR 08S01: Got a packet bigger than 'max_allowed_packet' bytes -SELECT LENGTH(a) FROM t1; -LENGTH(a) -SET GLOBAL max_allowed_packet=default; -SET GLOBAL net_buffer_length=default; -DROP TABLE t1; End of 5.1 tests diff --git a/mysql-test/t/variables-notembedded.test b/mysql-test/t/variables-notembedded.test index 7cc068c68c7..b440cfa47b0 100644 --- a/mysql-test/t/variables-notembedded.test +++ b/mysql-test/t/variables-notembedded.test @@ -109,3 +109,30 @@ SET @@session.slave_skip_errors= 7; --error ER_INCORRECT_GLOBAL_LOCAL_VAR SET @@global.slave_skip_errors= 7; # + +--echo # +--echo # Bug #11766769 : 59959: SMALL VALUES OF --MAX-ALLOWED-PACKET +--echo # ARE NOT BEING HONORED +--echo # + +CREATE TABLE t1 (a MEDIUMTEXT); + +SET GLOBAL max_allowed_packet=2048; +SET GLOBAL net_buffer_length=4096; +CONNECT (con1,localhost,root,,test); +SHOW SESSION VARIABLES LIKE 'max_allowed_packet'; +SHOW SESSION VARIABLES LIKE 'net_buffer_length'; +--disable_query_log +--error ER_NET_PACKET_TOO_LARGE +INSERT INTO t1 VALUES ('123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890'); +--enable_query_log + +CONNECTION default; +DISCONNECT con1; +SELECT LENGTH(a) FROM t1; + +SET GLOBAL max_allowed_packet=default; +SET GLOBAL net_buffer_length=default; +DROP TABLE t1; + +--echo End of 5.1 tests diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index 8f111e7cf3b..d00b77e64c0 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -1318,29 +1318,4 @@ SET @@global.key_buffer_size=@kbs; SET @@global.key_cache_block_size=@kcbs; ---echo # ---echo # Bug #11766769 : 59959: SMALL VALUES OF --MAX-ALLOWED-PACKET ---echo # ARE NOT BEING HONORED ---echo # - -CREATE TABLE t1 (a MEDIUMTEXT); - -SET GLOBAL max_allowed_packet=2048; -SET GLOBAL net_buffer_length=4096; -CONNECT (con1,localhost,root,,test); -SHOW SESSION VARIABLES LIKE 'max_allowed_packet'; -SHOW SESSION VARIABLES LIKE 'net_buffer_length'; ---disable_query_log ---error ER_NET_PACKET_TOO_LARGE -INSERT INTO t1 VALUES ('123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890'); ---enable_query_log -SELECT LENGTH(a) FROM t1; - -CONNECTION default; -DISCONNECT con1; -SET GLOBAL max_allowed_packet=default; -SET GLOBAL net_buffer_length=default; -DROP TABLE t1; - - --echo End of 5.1 tests -- cgit v1.2.1 From cd71e11cc825f55efceba44b055af47db88b5116 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Mon, 28 Mar 2011 13:32:25 +0300 Subject: Fixed a test failure becase of a new warning caused by the fix for Bug #11766769 --- mysql-test/r/shm.result | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mysql-test/r/shm.result b/mysql-test/r/shm.result index c504fe222ef..0e086e000c7 100644 --- a/mysql-test/r/shm.result +++ b/mysql-test/r/shm.result @@ -2155,6 +2155,8 @@ mysqld is alive SET @max_allowed_packet= @@global.max_allowed_packet; SET @net_buffer_length= @@global.net_buffer_length; SET GLOBAL max_allowed_packet= 1024; +Warnings: +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' SET GLOBAL net_buffer_length= 1024; ERROR 1153 (08S01) at line 1: Got a packet bigger than 'max_allowed_packet' bytes SET GLOBAL max_allowed_packet= @max_allowed_packet; -- cgit v1.2.1 From 4ed8cb4a76275a28231a842c9112834d384b7b4c Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Mon, 28 Mar 2011 13:43:30 +0300 Subject: Added support for VS10. Fixed RelWithDebugInfo bzr ignores. --- .bzrignore | 6 +++++- win/build-vs10.bat | 18 ++++++++++++++++++ win/build-vs10_x64.bat | 18 ++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 win/build-vs10.bat create mode 100644 win/build-vs10_x64.bat diff --git a/.bzrignore b/.bzrignore index 3d27c001e2b..0d668308193 100644 --- a/.bzrignore +++ b/.bzrignore @@ -9,6 +9,7 @@ *.core *.d *.da +*.dir *.dll *.exe *.exp @@ -30,6 +31,7 @@ *.pdb *.reject *.res +*.rule *.sbr *.so *.so.* @@ -37,6 +39,8 @@ *.user *.vcproj *.vcproj.cmake +*.vcxproj +*.vcxproj.filters */*.dir/* */*_pure_*warnings */.deps @@ -45,7 +49,7 @@ */debug/* */minsizerel/* */release/* -*/relwithdebinfo/* +RelWithDebInfo *~ .*.swp ./CMakeCache.txt diff --git a/win/build-vs10.bat b/win/build-vs10.bat new file mode 100644 index 00000000000..c2bc09b17df --- /dev/null +++ b/win/build-vs10.bat @@ -0,0 +1,18 @@ +@echo off + +REM Copyright (c) 2006,2010 Oracle and/or its affiliates. All rights reserved. +REM +REM This program is free software; you can redistribute it and/or modify +REM it under the terms of the GNU General Public License as published by +REM the Free Software Foundation; version 2 of the License. +REM +REM This program is distributed in the hope that it will be useful, +REM but WITHOUT ANY WARRANTY; without even the implied warranty of +REM MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +REM GNU General Public License for more details. +REM +REM You should have received a copy of the GNU General Public License +REM along with this program; if not, write to the Free Software +REM Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +cmake -G "Visual Studio 10" + diff --git a/win/build-vs10_x64.bat b/win/build-vs10_x64.bat new file mode 100644 index 00000000000..6fabe62a1ff --- /dev/null +++ b/win/build-vs10_x64.bat @@ -0,0 +1,18 @@ +@echo off + +REM Copyright (c) 2006,2010 Oracle and/or its affiliates. All rights reserved. +REM +REM This program is free software; you can redistribute it and/or modify +REM it under the terms of the GNU General Public License as published by +REM the Free Software Foundation; version 2 of the License. +REM +REM This program is distributed in the hope that it will be useful, +REM but WITHOUT ANY WARRANTY; without even the implied warranty of +REM MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +REM GNU General Public License for more details. +REM +REM You should have received a copy of the GNU General Public License +REM along with this program; if not, write to the Free Software +REM Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +cmake -G "Visual Studio 10 Win64" + -- cgit v1.2.1 From d6125b27b38432d1a465397c0c5a9e62bb1c65d3 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Mon, 28 Mar 2011 17:24:25 +0400 Subject: Bug#11765216 58154: UNINITIALIZED VARIABLE FORMAT IN STR_TO_DATE FUNCTION Valgrind warning happens due to uninitialized cached_format_type field which is used later in Item_func_str_to_date::val_str method. The fix is to init cached_format_type field. mysql-test/r/func_time.result: test case mysql-test/t/func_time.test: test case sql/item_timefunc.cc: init cached_format_type field --- mysql-test/r/func_time.result | 6 ++++++ mysql-test/t/func_time.test | 8 ++++++++ sql/item_timefunc.cc | 1 + 3 files changed, 15 insertions(+) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index f63860039d7..01743e4a1dc 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1381,4 +1381,10 @@ DROP TABLE t1; SELECT STR_TO_DATE(SPACE(2),'1'); STR_TO_DATE(SPACE(2),'1') 0000-00-00 +# +# Bug#11765216 58154: UNINITIALIZED VARIABLE FORMAT IN STR_TO_DATE FUNCTION +# +SET GLOBAL SQL_MODE=''; +DO STR_TO_DATE((''), FROM_DAYS(@@GLOBAL.SQL_MODE)); +SET GLOBAL SQL_MODE=DEFAULT; End of 5.1 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index c48351d33f2..3f441c42d48 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -887,4 +887,12 @@ DROP TABLE t1; SELECT STR_TO_DATE(SPACE(2),'1'); +--echo # +--echo # Bug#11765216 58154: UNINITIALIZED VARIABLE FORMAT IN STR_TO_DATE FUNCTION +--echo # + +SET GLOBAL SQL_MODE=''; +DO STR_TO_DATE((''), FROM_DAYS(@@GLOBAL.SQL_MODE)); +SET GLOBAL SQL_MODE=DEFAULT; + --echo End of 5.1 tests diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 71b2baf4fee..ecf790cc061 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -3293,6 +3293,7 @@ void Item_func_str_to_date::fix_length_and_dec() { maybe_null= 1; decimals=0; + cached_format_type= DATE_TIME; cached_field_type= MYSQL_TYPE_DATETIME; max_length= MAX_DATETIME_FULL_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; cached_timestamp_type= MYSQL_TIMESTAMP_NONE; -- cgit v1.2.1 From 47885f552b1822291584b71c791fd5175ba6567f Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Mon, 28 Mar 2011 17:27:44 +0400 Subject: Bug#11766087 59125: VALGRIND UNINITIALISED VALUE WARNING IN ULL2DEC, LONGLONG2DECIMAL Valgrind warning happens due to missing NULL value check in Item_func::val_decimal. The fix is to add this check. mysql-test/r/func_time.result: test case mysql-test/t/func_time.test: test case sql/item_func.cc: added check for NULL value --- mysql-test/r/func_time.result | 6 ++++++ mysql-test/t/func_time.test | 6 ++++++ sql/item_func.cc | 5 ++++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 01743e4a1dc..bbb506035dc 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1387,4 +1387,10 @@ STR_TO_DATE(SPACE(2),'1') SET GLOBAL SQL_MODE=''; DO STR_TO_DATE((''), FROM_DAYS(@@GLOBAL.SQL_MODE)); SET GLOBAL SQL_MODE=DEFAULT; +# +# Bug#11766087 59125: VALGRIND UNINITIALISED VALUE WARNING IN ULL2DEC, LONGLONG2DECIMAL +# +SELECT FORMAT(YEAR(STR_TO_DATE('',GET_FORMAT(TIME,''))),1); +FORMAT(YEAR(STR_TO_DATE('',GET_FORMAT(TIME,''))),1) +NULL End of 5.1 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 3f441c42d48..2c3d3849793 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -895,4 +895,10 @@ SET GLOBAL SQL_MODE=''; DO STR_TO_DATE((''), FROM_DAYS(@@GLOBAL.SQL_MODE)); SET GLOBAL SQL_MODE=DEFAULT; +--echo # +--echo # Bug#11766087 59125: VALGRIND UNINITIALISED VALUE WARNING IN ULL2DEC, LONGLONG2DECIMAL +--echo # + +SELECT FORMAT(YEAR(STR_TO_DATE('',GET_FORMAT(TIME,''))),1); + --echo End of 5.1 tests diff --git a/sql/item_func.cc b/sql/item_func.cc index 79fa37bd372..595629b51be 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -482,7 +482,10 @@ bool Item_func::is_expensive_processor(uchar *arg) my_decimal *Item_func::val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed); - int2my_decimal(E_DEC_FATAL_ERROR, val_int(), unsigned_flag, decimal_value); + longlong nr= val_int(); + if (null_value) + return 0; /* purecov: inspected */ + int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value); return decimal_value; } -- cgit v1.2.1 From 08e472ff349bb5d21b4881828ed62748ecf7aa40 Mon Sep 17 00:00:00 2001 From: Mayank Prasad Date: Mon, 28 Mar 2011 21:01:37 +0530 Subject: Bug#11751148 : show events shows events in other schema Issue: ====== Test case Correction for bug#11751148. mysql-test/r/events_bugs.result: Result file Correction for bug#11751148. mysql-test/t/events_bugs.test: Test case Correction for bug#11751148. --- mysql-test/r/events_bugs.result | 8 ++++---- mysql-test/t/events_bugs.test | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mysql-test/r/events_bugs.result b/mysql-test/r/events_bugs.result index ab1e9884efd..dfb8f008c5a 100644 --- a/mysql-test/r/events_bugs.result +++ b/mysql-test/r/events_bugs.result @@ -747,15 +747,15 @@ event_name originator ev1 4294967295 DROP EVENT ev1; SET GLOBAL server_id = @old_server_id; +CREATE DATABASE event_test12; +USE event_test12; +CREATE EVENT ev1 ON SCHEDULE EVERY 1 DAY DO SELECT 1; CREATE DATABASE event_test1; USE event_test1; -CREATE EVENT ev1 ON SCHEDULE EVERY 1 DAY DO SELECT 1; -CREATE DATABASE event_test2; -USE event_test2; SHOW EVENTS; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation DROP DATABASE event_test1; -DROP DATABASE event_test2; +DROP DATABASE event_test12; DROP DATABASE events_test; SET GLOBAL event_scheduler= 'ON'; SET @@global.concurrent_insert= @concurrent_insert; diff --git a/mysql-test/t/events_bugs.test b/mysql-test/t/events_bugs.test index 83e37cdccdb..420e7183621 100644 --- a/mysql-test/t/events_bugs.test +++ b/mysql-test/t/events_bugs.test @@ -1225,15 +1225,15 @@ SET GLOBAL server_id = @old_server_id; # Bug#11751148: show events shows events in other schema # +CREATE DATABASE event_test12; +USE event_test12; +CREATE EVENT ev1 ON SCHEDULE EVERY 1 DAY DO SELECT 1; CREATE DATABASE event_test1; USE event_test1; -CREATE EVENT ev1 ON SCHEDULE EVERY 1 DAY DO SELECT 1; -CREATE DATABASE event_test2; -USE event_test2; # Following show events should not show ev1 SHOW EVENTS; DROP DATABASE event_test1; -DROP DATABASE event_test2; +DROP DATABASE event_test12; ########################################################################### -- cgit v1.2.1 From 4e26a41f3e2cac5ec5016b862944c0116a18b0f6 Mon Sep 17 00:00:00 2001 From: Jon Olav Hauglid Date: Tue, 29 Mar 2011 10:09:05 +0200 Subject: Bug# 11763784 (former 56541) ASSERTION TABLE->DB_STAT FAILED IN SQL_BASE.CC::OPEN_TABLE() DURING I_S Q This assert could be triggered if a statement requiring a name lock on a table (e.g. DROP TRIGGER) executed concurrently with an I_S query which also used the table. One connection first started an I_S query that opened a given table. Then another connection started a statement requiring a name lock on the same table. This statement was blocked since the table was in use by the I_S query. When the I_S query resumed and tried to open the table again as part of get_all_tables(), it would encounter a table instance with an old version number representing the pending name lock. Since I_S queries ignore version checks and thus pending name locks, it would try to continue. This caused it to encounter the assert. The assert checked that the TABLE instance found with a different version, was a real, open table. However, since this TABLE instance instead represented a pending name lock, the check would fail and trigger the assert. This patch fixes the problem by removing the assert. It is ok for TABLE::db_stat to be 0 in this case since the TABLE instance can represent a pending name lock. Test case added to lock_sync.test. --- mysql-test/r/lock_sync.result | 27 ++++++++++++++++++++++ mysql-test/t/lock_sync.test | 54 +++++++++++++++++++++++++++++++++++++++++++ sql/sql_base.cc | 3 +-- 3 files changed, 82 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/lock_sync.result b/mysql-test/r/lock_sync.result index 752f278a2b4..8b662cc8a82 100644 --- a/mysql-test/r/lock_sync.result +++ b/mysql-test/r/lock_sync.result @@ -629,3 +629,30 @@ drop procedure p1; drop procedure p2; drop table t1, t2, t3, t4, t5, te; set @@global.concurrent_insert= @old_concurrent_insert; +# +# Bug#11763784 56541: ASSERTION TABLE->DB_STAT FAILED IN +# SQL_BASE.CC::OPEN_TABLE() DURING I_S Q +# +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a int); +INSERT INTO t1 VALUES (1), (2); +CREATE TRIGGER t1_bi BEFORE INSERT ON t1 FOR EACH ROW BEGIN END; +# Connection con2 +SET DEBUG_SYNC= 'before_open_in_get_all_tables SIGNAL is_waits WAIT_FOR is_cont'; +# Sending: +SELECT * FROM information_schema.table_constraints JOIN t1 ON table_name = a; +# Connection con1 +SET DEBUG_SYNC= 'now WAIT_FOR is_waits'; +# Sending: +DROP TRIGGER t1_bi; +# Connection default +# Wait until DROP TRIGGER is blocked, waiting for t1 +SET DEBUG_SYNC= 'now SIGNAL is_cont'; +# Connection con2 +# Reaping SELECT * FROM information_schema.table_constraints JOIN t1... +CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME CONSTRAINT_TYPE a +# Connection con1 +# Reaping DROP TRIGGER t1_bi +# Connection default +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/t/lock_sync.test b/mysql-test/t/lock_sync.test index 17f8abb75f3..1df09524140 100644 --- a/mysql-test/t/lock_sync.test +++ b/mysql-test/t/lock_sync.test @@ -862,6 +862,60 @@ disconnect con2; set @@global.concurrent_insert= @old_concurrent_insert; +--echo # +--echo # Bug#11763784 56541: ASSERTION TABLE->DB_STAT FAILED IN +--echo # SQL_BASE.CC::OPEN_TABLE() DURING I_S Q +--echo # + +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +CREATE TABLE t1(a int); +INSERT INTO t1 VALUES (1), (2); +CREATE TRIGGER t1_bi BEFORE INSERT ON t1 FOR EACH ROW BEGIN END; + +connect (con1, localhost, root); +--echo # Connection con2 +connect (con2, localhost, root); +SET DEBUG_SYNC= 'before_open_in_get_all_tables SIGNAL is_waits WAIT_FOR is_cont'; +--echo # Sending: +--send SELECT * FROM information_schema.table_constraints JOIN t1 ON table_name = a + +--echo # Connection con1 +connection con1; +SET DEBUG_SYNC= 'now WAIT_FOR is_waits'; +--echo # Sending: +--send DROP TRIGGER t1_bi + +--echo # Connection default +connection default; +--echo # Wait until DROP TRIGGER is blocked, waiting for t1 +let $wait_condition= + SELECT COUNT(*) = 1 FROM information_schema.processlist + WHERE state = "Waiting for table" AND + info = "DROP TRIGGER t1_bi"; +--source include/wait_condition.inc +SET DEBUG_SYNC= 'now SIGNAL is_cont'; + +--echo # Connection con2 +connection con2; +--echo # Reaping SELECT * FROM information_schema.table_constraints JOIN t1... +--reap + +--echo # Connection con1 +connection con1; +--echo # Reaping DROP TRIGGER t1_bi +--reap + +--echo # Connection default +connection default; +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; +disconnect con1; +disconnect con2; + + # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 9765148cda1..dc78f3b84c6 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2798,10 +2798,9 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, ("Found table '%s.%s' with different refresh version", table_list->db, table_list->table_name)); - /* Ignore FLUSH, but not name locks! */ + /* Ignore FLUSH and pending name locks, but not acquired name locks! */ if (flags & MYSQL_LOCK_IGNORE_FLUSH && !table->open_placeholder) { - DBUG_ASSERT(table->db_stat); /* Force close at once after usage */ thd->version= table->s->version; continue; -- cgit v1.2.1 From 3b7f044534ae24ce0a098d598bc7fc7a2b40fe4f Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Wed, 30 Mar 2011 11:00:41 +0400 Subject: Bug#11766126 59166: ANOTHER DATETIME VALGRIND UNINITIALIZED WARNING Valgrind warning happens because null values check happens too late in Item_func_month::val_str(after result string calculation).The fix is to check null value before result string calculation. mysql-test/r/func_time.result: test case mysql-test/t/func_time.test: test case sql/item_timefunc.h: check null value before result string calculation. --- mysql-test/r/func_time.result | 6 ++++++ mysql-test/t/func_time.test | 6 ++++++ sql/item_timefunc.h | 7 +++++-- 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index bbb506035dc..fd543ba4308 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1393,4 +1393,10 @@ SET GLOBAL SQL_MODE=DEFAULT; SELECT FORMAT(YEAR(STR_TO_DATE('',GET_FORMAT(TIME,''))),1); FORMAT(YEAR(STR_TO_DATE('',GET_FORMAT(TIME,''))),1) NULL +# +# Bug#11766126 59166: ANOTHER DATETIME VALGRIND UNINITIALIZED WARNING +# +SELECT CAST((MONTH(FROM_UNIXTIME(@@GLOBAL.SQL_MODE))) AS BINARY(1025)); +CAST((MONTH(FROM_UNIXTIME(@@GLOBAL.SQL_MODE))) AS BINARY(1025)) +NULL End of 5.1 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 2c3d3849793..1bc56c0f403 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -901,4 +901,10 @@ SET GLOBAL SQL_MODE=DEFAULT; SELECT FORMAT(YEAR(STR_TO_DATE('',GET_FORMAT(TIME,''))),1); +--echo # +--echo # Bug#11766126 59166: ANOTHER DATETIME VALGRIND UNINITIALIZED WARNING +--echo # + +SELECT CAST((MONTH(FROM_UNIXTIME(@@GLOBAL.SQL_MODE))) AS BINARY(1025)); + --echo End of 5.1 tests diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 9c1ac512bcb..396b5bbb200 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -106,8 +106,11 @@ public: { DBUG_ASSERT(fixed == 1); return (double) Item_func_month::val_int(); } String *val_str(String *str) { - str->set(val_int(), &my_charset_bin); - return null_value ? 0 : str; + longlong nr= val_int(); + if (null_value) + return 0; + str->set(nr, &my_charset_bin); + return str; } const char *func_name() const { return "month"; } enum Item_result result_type () const { return INT_RESULT; } -- cgit v1.2.1 From a7d383cbb8db5478c1c53a025afda967ad09299b Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Wed, 30 Mar 2011 11:08:35 +0400 Subject: Bug#11766124 59164: VALGRIND: UNINITIALIZED VALUE IN NUMBER_TO_DATETIME Valgrind warning happens due to missing NULL value check in Item::get_date. The fix is to add this check. mysql-test/r/func_time.result: test case mysql-test/t/func_time.test: test case sql/item.cc: added check for NULL value --- mysql-test/r/func_time.result | 6 ++++++ mysql-test/t/func_time.test | 6 ++++++ sql/item.cc | 6 +++++- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index fd543ba4308..f67171af99f 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1399,4 +1399,10 @@ NULL SELECT CAST((MONTH(FROM_UNIXTIME(@@GLOBAL.SQL_MODE))) AS BINARY(1025)); CAST((MONTH(FROM_UNIXTIME(@@GLOBAL.SQL_MODE))) AS BINARY(1025)) NULL +# +# Bug#11766124 59164: VALGRIND: UNINITIALIZED VALUE IN NUMBER_TO_DATETIME +# +SELECT ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR); +ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR) +NULL End of 5.1 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 1bc56c0f403..938359f8c11 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -907,4 +907,10 @@ SELECT FORMAT(YEAR(STR_TO_DATE('',GET_FORMAT(TIME,''))),1); SELECT CAST((MONTH(FROM_UNIXTIME(@@GLOBAL.SQL_MODE))) AS BINARY(1025)); +--echo # +--echo # Bug#11766124 59164: VALGRIND: UNINITIALIZED VALUE IN NUMBER_TO_DATETIME +--echo # + +SELECT ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR); + --echo End of 5.1 tests diff --git a/sql/item.cc b/sql/item.cc index 357cc6d7fe4..f90cf562c0b 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -926,8 +926,12 @@ bool Item::get_date(MYSQL_TIME *ltime,uint fuzzydate) } else { - longlong value= val_int(); int was_cut; + longlong value= val_int(); + + if (null_value) + goto err; + if (number_to_datetime(value, ltime, fuzzydate, &was_cut) == LL(-1)) { char buff[22], *end; -- cgit v1.2.1 From ddec6ecdd8521d6fd6e4c26498e7bd752fd3eddf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 30 Mar 2011 14:25:58 +0300 Subject: Bug#11877216 InnoDB too eager to commit suicide on a busy server sync_array_print_long_waits(): Return the longest waiting thread ID and the longest waited-for lock. Only if those remain unchanged between calls in srv_error_monitor_thread(), increment fatal_cnt. Otherwise, reset fatal_cnt. Background: There is a built-in watchdog in InnoDB whose purpose is to kill the server when some thread is stuck waiting for a mutex or rw-lock. Before this fix, the logic was flawed. The function sync_array_print_long_waits() returns TRUE if it finds a lock wait that exceeds 10 minutes (srv_fatal_semaphore_wait_threshold). The function srv_error_monitor_thread() will kill the server if this happens 10 times in a row (fatal_cnt reaches 10), checked every 30 seconds. This is wrong, because this situation does not mean that the server is hung. If the server is very busy for a little over 15 minutes, it will be killed. Consider this example. Thread T1 is waiting for mutex M. Some time later, threads T2..Tn start waiting for the same mutex M. If T1 keeps waiting for 600 seconds, fatal_cnt will be incremented to 1. So far, so good. Now, if M is granted to T1, the server was obviously not stuck. But, T2..Tn keeps waiting, and their wait time will be longer than 600 seconds. If 5 minutes later, some Tn has still been waiting for more than 10 minutes for the mutex M, the server can be killed, even though it is not stuck. rb:622 approved by Jimmy Yang --- storage/innobase/include/sync0arr.h | 11 ++++++---- storage/innobase/srv/srv0srv.c | 19 ++++++++++++----- storage/innobase/sync/sync0arr.c | 36 ++++++++++++++++++++++++-------- storage/innodb_plugin/ChangeLog | 5 +++++ storage/innodb_plugin/include/sync0arr.h | 7 +++++-- storage/innodb_plugin/srv/srv0srv.c | 11 +++++++++- storage/innodb_plugin/sync/sync0arr.c | 32 +++++++++++++++++++++------- 7 files changed, 93 insertions(+), 28 deletions(-) diff --git a/storage/innobase/include/sync0arr.h b/storage/innobase/include/sync0arr.h index fae26b7a63e..ec48059dbcb 100644 --- a/storage/innobase/include/sync0arr.h +++ b/storage/innobase/include/sync0arr.h @@ -93,10 +93,13 @@ sync_arr_wake_threads_if_sema_free(void); Prints warnings of long semaphore waits to stderr. */ ibool -sync_array_print_long_waits(void); -/*=============================*/ - /* out: TRUE if fatal semaphore wait threshold - was exceeded */ +sync_array_print_long_waits( +/*========================*/ + /* out: TRUE if fatal semaphore wait threshold + was exceeded */ + os_thread_id_t* waiter, /* out: longest waiting thread */ + const void** sema) /* out: longest-waited-for semaphore */ + __attribute__((nonnull)); /************************************************************************ Validates the integrity of the wait array. Checks that the number of reserved cells equals the count variable. */ diff --git a/storage/innobase/srv/srv0srv.c b/storage/innobase/srv/srv0srv.c index 9c34e73109c..3f6f1982992 100644 --- a/storage/innobase/srv/srv0srv.c +++ b/storage/innobase/srv/srv0srv.c @@ -2180,9 +2180,15 @@ srv_error_monitor_thread( os_thread_create */ { /* number of successive fatal timeouts observed */ - ulint fatal_cnt = 0; - dulint old_lsn; - dulint new_lsn; + ulint fatal_cnt = 0; + dulint old_lsn; + dulint new_lsn; + /* longest waiting thread for a semaphore */ + os_thread_id_t waiter = os_thread_get_curr_id(); + os_thread_id_t old_waiter = waiter; + /* the semaphore that is being waited for */ + const void* sema = NULL; + const void* old_sema = NULL; old_lsn = srv_start_lsn; @@ -2224,10 +2230,11 @@ loop: /* In case mutex_exit is not a memory barrier, it is theoretically possible some threads are left waiting though the semaphore is already released. Wake up those threads: */ - + sync_arr_wake_threads_if_sema_free(); - if (sync_array_print_long_waits()) { + if (sync_array_print_long_waits(&waiter, &sema) + && sema == old_sema && os_thread_eq(waiter, old_waiter)) { fatal_cnt++; if (fatal_cnt > 10) { @@ -2242,6 +2249,8 @@ loop: } } else { fatal_cnt = 0; + old_waiter = waiter; + old_sema = sema; } /* Flush stderr so that a database user gets the output diff --git a/storage/innobase/sync/sync0arr.c b/storage/innobase/sync/sync0arr.c index 41d3492c8c9..93a7398f252 100644 --- a/storage/innobase/sync/sync0arr.c +++ b/storage/innobase/sync/sync0arr.c @@ -916,10 +916,12 @@ sync_arr_wake_threads_if_sema_free(void) Prints warnings of long semaphore waits to stderr. */ ibool -sync_array_print_long_waits(void) -/*=============================*/ - /* out: TRUE if fatal semaphore wait threshold - was exceeded */ +sync_array_print_long_waits( +/*========================*/ + /* out: TRUE if fatal semaphore wait threshold + was exceeded */ + os_thread_id_t* waiter, /* out: longest waiting thread */ + const void** sema) /* out: longest-waited-for semaphore */ { sync_cell_t* cell; ibool old_val; @@ -927,24 +929,40 @@ sync_array_print_long_waits(void) ulint i; ulint fatal_timeout = srv_fatal_semaphore_wait_threshold; ibool fatal = FALSE; + double longest_diff = 0; for (i = 0; i < sync_primary_wait_array->n_cells; i++) { + double diff; + void* wait_object; + cell = sync_array_get_nth_cell(sync_primary_wait_array, i); - if (cell->wait_object != NULL && cell->waiting - && difftime(time(NULL), cell->reservation_time) > 240) { + wait_object = cell->wait_object; + + if (wait_object == NULL || !cell->waiting) { + + continue; + } + + diff = difftime(time(NULL), cell->reservation_time); + + if (diff > 240) { fputs("InnoDB: Warning: a long semaphore wait:\n", stderr); sync_array_cell_print(stderr, cell); noticed = TRUE; } - if (cell->wait_object != NULL && cell->waiting - && difftime(time(NULL), cell->reservation_time) - > fatal_timeout) { + if (diff > fatal_timeout) { fatal = TRUE; } + + if (diff > longest_diff) { + longest_diff = diff; + *sema = wait_object; + *waiter = cell->thread; + } } if (noticed) { diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index 7c82cd9c27f..100cf3690ce 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,8 @@ +2011-03-30 The InnoDB Team + + * srv/srv0srv.c, sync/sync0arr.h, sync/sync0arr.c: + Fix Bug#11877216 InnoDB too eager to commit suicide on a busy server + 2011-03-15 The InnoDB Team * btr/btr0cur.c, page/page0zip.c: diff --git a/storage/innodb_plugin/include/sync0arr.h b/storage/innodb_plugin/include/sync0arr.h index 5f1280f5e28..6e931346238 100644 --- a/storage/innodb_plugin/include/sync0arr.h +++ b/storage/innodb_plugin/include/sync0arr.h @@ -115,8 +115,11 @@ Prints warnings of long semaphore waits to stderr. @return TRUE if fatal semaphore wait threshold was exceeded */ UNIV_INTERN ibool -sync_array_print_long_waits(void); -/*=============================*/ +sync_array_print_long_waits( +/*========================*/ + os_thread_id_t* waiter, /*!< out: longest waiting thread */ + const void** sema) /*!< out: longest-waited-for semaphore */ + __attribute__((nonnull)); /********************************************************************//** Validates the integrity of the wait array. Checks that the number of reserved cells equals the count variable. */ diff --git a/storage/innodb_plugin/srv/srv0srv.c b/storage/innodb_plugin/srv/srv0srv.c index 3cf17f33c40..b1fc1ac67fd 100644 --- a/storage/innodb_plugin/srv/srv0srv.c +++ b/storage/innodb_plugin/srv/srv0srv.c @@ -2236,6 +2236,12 @@ srv_error_monitor_thread( ulint fatal_cnt = 0; ib_uint64_t old_lsn; ib_uint64_t new_lsn; + /* longest waiting thread for a semaphore */ + os_thread_id_t waiter = os_thread_get_curr_id(); + os_thread_id_t old_waiter = waiter; + /* the semaphore that is being waited for */ + const void* sema = NULL; + const void* old_sema = NULL; old_lsn = srv_start_lsn; @@ -2284,7 +2290,8 @@ loop: sync_arr_wake_threads_if_sema_free(); - if (sync_array_print_long_waits()) { + if (sync_array_print_long_waits(&waiter, &sema) + && sema == old_sema && os_thread_eq(waiter, old_waiter)) { fatal_cnt++; if (fatal_cnt > 10) { @@ -2299,6 +2306,8 @@ loop: } } else { fatal_cnt = 0; + old_waiter = waiter; + old_sema = sema; } /* Flush stderr so that a database user gets the output diff --git a/storage/innodb_plugin/sync/sync0arr.c b/storage/innodb_plugin/sync/sync0arr.c index ad29b90d344..13970023573 100644 --- a/storage/innodb_plugin/sync/sync0arr.c +++ b/storage/innodb_plugin/sync/sync0arr.c @@ -914,8 +914,10 @@ Prints warnings of long semaphore waits to stderr. @return TRUE if fatal semaphore wait threshold was exceeded */ UNIV_INTERN ibool -sync_array_print_long_waits(void) -/*=============================*/ +sync_array_print_long_waits( +/*========================*/ + os_thread_id_t* waiter, /*!< out: longest waiting thread */ + const void** sema) /*!< out: longest-waited-for semaphore */ { sync_cell_t* cell; ibool old_val; @@ -923,24 +925,40 @@ sync_array_print_long_waits(void) ulint i; ulint fatal_timeout = srv_fatal_semaphore_wait_threshold; ibool fatal = FALSE; + double longest_diff = 0; for (i = 0; i < sync_primary_wait_array->n_cells; i++) { + double diff; + void* wait_object; + cell = sync_array_get_nth_cell(sync_primary_wait_array, i); - if (cell->wait_object != NULL && cell->waiting - && difftime(time(NULL), cell->reservation_time) > 240) { + wait_object = cell->wait_object; + + if (wait_object == NULL || !cell->waiting) { + + continue; + } + + diff = difftime(time(NULL), cell->reservation_time); + + if (diff > 240) { fputs("InnoDB: Warning: a long semaphore wait:\n", stderr); sync_array_cell_print(stderr, cell); noticed = TRUE; } - if (cell->wait_object != NULL && cell->waiting - && difftime(time(NULL), cell->reservation_time) - > fatal_timeout) { + if (diff > fatal_timeout) { fatal = TRUE; } + + if (diff > longest_diff) { + longest_diff = diff; + *sema = wait_object; + *waiter = cell->thread; + } } if (noticed) { -- cgit v1.2.1 From 4c1eb0c1719004b66187a166ddf0765cb481a927 Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Wed, 30 Mar 2011 14:33:53 +0200 Subject: mtr: cleaned up some superfluos global warning suppressions --- .../extra/rpl_tests/rpl_extra_col_master.test | 6 +-- mysql-test/include/mix1.inc | 4 ++ mysql-test/include/mtr_warnings.sql | 46 +--------------------- mysql-test/r/ctype_cp932_binlog_stm.result | 1 + mysql-test/r/order_by.result | 1 + mysql-test/r/show_check.result | 1 + mysql-test/r/sp-destruct.result | 1 + .../suite/rpl/r/rpl_extra_col_master_innodb.result | 18 ++++----- .../suite/rpl/r/rpl_extra_col_master_myisam.result | 18 ++++----- mysql-test/t/ctype_cp932_binlog_stm.test | 2 + mysql-test/t/order_by.test | 3 +- mysql-test/t/show_check.test | 1 + mysql-test/t/sp-destruct.test | 1 + 13 files changed, 35 insertions(+), 68 deletions(-) diff --git a/mysql-test/extra/rpl_tests/rpl_extra_col_master.test b/mysql-test/extra/rpl_tests/rpl_extra_col_master.test index 6dba4202260..9bab1192d97 100644 --- a/mysql-test/extra/rpl_tests/rpl_extra_col_master.test +++ b/mysql-test/extra/rpl_tests/rpl_extra_col_master.test @@ -123,9 +123,9 @@ SELECT f1,f2,f3,f4,f5,f6,f7,f8,f9, #connection slave; call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); -call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); -call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); +call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); +call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); +call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); sync_slave_with_master; --echo diff --git a/mysql-test/include/mix1.inc b/mysql-test/include/mix1.inc index 194d9e41108..10f0d4546ed 100644 --- a/mysql-test/include/mix1.inc +++ b/mysql-test/include/mix1.inc @@ -634,6 +634,10 @@ drop table t1; drop table bug29807; create table bug29807 (a int); drop table bug29807; +--disable_query_log +call mtr.add_suppression("InnoDB: Error: table .test...bug29807. does not exist in the InnoDB internal"); +call mtr.add_suppression("Cannot find or open table test\/bug29807 from"); +--enable_query_log # diff --git a/mysql-test/include/mtr_warnings.sql b/mysql-test/include/mtr_warnings.sql index 30919dd10dc..03148f09fac 100644 --- a/mysql-test/include/mtr_warnings.sql +++ b/mysql-test/include/mtr_warnings.sql @@ -53,7 +53,7 @@ END -- Insert patterns that should always be suppressed -- INSERT INTO global_suppressions VALUES - ("'SELECT UNIX_TIMESTAMP\\(\\)' failed on master"), + (".SELECT UNIX_TIMESTAMP... failed on master"), ("Aborted connection"), ("Client requested master to start replication from impossible position"), ("Could not find first log file name in binary log"), @@ -110,7 +110,6 @@ INSERT INTO global_suppressions VALUES ("Sort aborted"), ("Time-out in NDB"), ("Warning:\s+One can only use the --user.*root"), - ("Warning:\s+Setting lower_case_table_names=2"), ("Warning:\s+Table:.* on (delete|rename)"), ("You have an error in your SQL syntax"), ("deprecated"), @@ -123,55 +122,21 @@ INSERT INTO global_suppressions VALUES ("slave SQL thread aborted"), ("Slave: .*Duplicate entry"), - /* - Special case, made as specific as possible, for: - Bug #28436: Incorrect position in SHOW BINLOG EVENTS causes - server coredump - */ - - ("Error in Log_event::read_log_event\\\(\\\): 'Sanity check failed', data_len: 258, event_type: 49"), - ("Statement may not be safe to log in statement format"), - /* test case for Bug#bug29807 copies a stray frm into database */ - ("InnoDB: Error: table `test`.`bug29807` does not exist in the InnoDB internal"), - ("Cannot find or open table test\/bug29807 from"), - /* innodb foreign key tests that fail in ALTER or RENAME produce this */ ("InnoDB: Error: in ALTER TABLE `test`.`t[123]`"), ("InnoDB: Error: in RENAME TABLE table `test`.`t1`"), ("InnoDB: Error: table `test`.`t[123]` does not exist in the InnoDB internal"), - /* Test case for Bug#14233 produces the following warnings: */ - ("Stored routine 'test'.'bug14233_1': invalid value in column mysql.proc"), - ("Stored routine 'test'.'bug14233_2': invalid value in column mysql.proc"), - ("Stored routine 'test'.'bug14233_3': invalid value in column mysql.proc"), - /* BUG#32080 - Excessive warnings on Solaris: setrlimit could not change the size of core files */ ("setrlimit could not change the size of core files to 'infinity'"), - /* - rpl_extrColmaster_*.test, the slave thread produces warnings - when it get updates to a table that has more columns on the - master - */ - ("Slave: Unknown column 'c7' in 't15' Error_code: 1054"), - ("Slave: Can't DROP 'c7'.* 1091"), - ("Slave: Key column 'c6'.* 1072"), ("The slave I.O thread stops because a fatal error is encountered when it try to get the value of SERVER_ID variable from master."), - (".SELECT UNIX_TIMESTAMP... failed on master, do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS"), - - /* Test case for Bug#31590 in order_by.test produces the following error */ - ("Out of sort memory; increase server sort buffer size"), - /* Special case for Bug #26402 in show_check.test - - Question marks are not valid file name parts on Windows. Ignore - this error message. - */ - ("Can't find file: '.\\\\test\\\\\\?{8}.frm'"), ("Slave: Unknown table 't1' Error_code: 1051"), /* Messages from valgrind */ @@ -189,15 +154,6 @@ INSERT INTO global_suppressions VALUES ("==[0-9]*== Warning: invalid file descriptor -1 in syscall write()"), ("==[0-9]*== Warning: invalid file descriptor -1 in syscall read()"), - /* - Transient network failures that cause warnings on reconnect. - BUG#47743 and BUG#47983. - */ - ("Slave I/O: Get master SERVER_ID failed with error:.*"), - ("Slave I/O: Get master clock failed with error:.*"), - ("Slave I/O: Get master COLLATION_SERVER failed with error:.*"), - ("Slave I/O: Get master TIME_ZONE failed with error:.*"), - ("THE_LAST_SUPPRESSION")|| diff --git a/mysql-test/r/ctype_cp932_binlog_stm.result b/mysql-test/r/ctype_cp932_binlog_stm.result index 8854a835e25..1841cc3ef69 100644 --- a/mysql-test/r/ctype_cp932_binlog_stm.result +++ b/mysql-test/r/ctype_cp932_binlog_stm.result @@ -44,6 +44,7 @@ master-bin.000001 # Query # # use `test`; INSERT INTO t4 VALUES ( NAME_CONST('in master-bin.000001 # Query # # use `test`; DROP PROCEDURE bug18293 master-bin.000001 # Query # # use `test`; DROP TABLE t4 End of 5.0 tests +call mtr.add_suppression("Error in Log_event::read_log_event\\\(\\\): 'Sanity check failed', data_len: 258, event_type: 49"); SHOW BINLOG EVENTS FROM 365; ERROR HY000: Error when executing command SHOW BINLOG EVENTS: Wrong offset or I/O error Bug#44352 UPPER/LOWER function doesn't work correctly on cp932 and sjis environment. diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result index 30879af418a..90b03711191 100644 --- a/mysql-test/r/order_by.result +++ b/mysql-test/r/order_by.result @@ -1428,6 +1428,7 @@ set session max_sort_length= 2180; select * from t1 order by b; ERROR HY001: Out of sort memory; increase server sort buffer size drop table t1; +call mtr.add_suppression("Out of sort memory; increase server sort buffer size"); # # Bug #39844: Query Crash Mysql Server 5.0.67 # diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index 1aa3d62fc70..7cb3f696449 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -1339,6 +1339,7 @@ drop table `été`; set names latin1; show columns from `#mysql50#????????`; Got one of the listed errors +call mtr.add_suppression("Can.t find file: '.\\\\test\\\\\\?{8}.frm'"); DROP TABLE IF EXISTS t1; DROP PROCEDURE IF EXISTS p1; CREATE TABLE t1(c1 INT); diff --git a/mysql-test/r/sp-destruct.result b/mysql-test/r/sp-destruct.result index b6891df2420..a9db461e84e 100644 --- a/mysql-test/r/sp-destruct.result +++ b/mysql-test/r/sp-destruct.result @@ -1,4 +1,5 @@ call mtr.add_suppression("Column count of mysql.proc is wrong. Expected 20, found 19. The table is probably corrupted"); +call mtr.add_suppression("Stored routine .test...bug14233_[123].: invalid value in column mysql.proc"); use test; drop procedure if exists bug14233; drop function if exists bug14233; diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result b/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result index f235c68cc95..160f970fd5e 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result @@ -59,9 +59,9 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); -call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); -call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); +call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); +call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); +call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * @@ -934,9 +934,9 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); -call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); -call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); +call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); +call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); +call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * @@ -1809,9 +1809,9 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); -call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); -call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); +call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); +call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); +call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result b/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result index 52f4a7a8453..11356dd8620 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result @@ -59,9 +59,9 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); -call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); -call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); +call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); +call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); +call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * @@ -934,9 +934,9 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); -call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); -call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); +call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); +call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); +call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * @@ -1809,9 +1809,9 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave SQL.*Error .Can.t DROP .c7.; check that column.key exists. on query.* 1091"); -call mtr.add_suppression("Slave SQL.*Error .Unknown column .c7. in .t15.. on query.* 1054"); -call mtr.add_suppression("Slave SQL.*Error .Key column .c6. doesn.t exist in table. on query.* 1072"); +call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); +call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); +call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * diff --git a/mysql-test/t/ctype_cp932_binlog_stm.test b/mysql-test/t/ctype_cp932_binlog_stm.test index f3038ccfa61..95252a95368 100644 --- a/mysql-test/t/ctype_cp932_binlog_stm.test +++ b/mysql-test/t/ctype_cp932_binlog_stm.test @@ -33,6 +33,8 @@ delimiter ;| # Note: 364 is a magic position (found experimentally, depends on # the log's contents) that caused the server crash. +call mtr.add_suppression("Error in Log_event::read_log_event\\\(\\\): 'Sanity check failed', data_len: 258, event_type: 49"); + --error 1220 SHOW BINLOG EVENTS FROM 365; diff --git a/mysql-test/t/order_by.test b/mysql-test/t/order_by.test index 1064320b65c..e310d960c97 100644 --- a/mysql-test/t/order_by.test +++ b/mysql-test/t/order_by.test @@ -846,8 +846,7 @@ set session max_sort_length= 2180; --error 1038 select * from t1 order by b; drop table t1; - - +call mtr.add_suppression("Out of sort memory; increase server sort buffer size"); --echo # --echo # Bug #39844: Query Crash Mysql Server 5.0.67 --echo # diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test index d46261f38d2..e5ca35bda32 100644 --- a/mysql-test/t/show_check.test +++ b/mysql-test/t/show_check.test @@ -1064,6 +1064,7 @@ set names latin1; # --error ER_NO_SUCH_TABLE,ER_FILE_NOT_FOUND show columns from `#mysql50#????????`; +call mtr.add_suppression("Can.t find file: '.\\\\test\\\\\\?{8}.frm'"); # # SHOW CREATE TRIGGER test. diff --git a/mysql-test/t/sp-destruct.test b/mysql-test/t/sp-destruct.test index 720c24b2c24..b006a36b8fd 100644 --- a/mysql-test/t/sp-destruct.test +++ b/mysql-test/t/sp-destruct.test @@ -14,6 +14,7 @@ # Supress warnings written to the log file call mtr.add_suppression("Column count of mysql.proc is wrong. Expected 20, found 19. The table is probably corrupted"); +call mtr.add_suppression("Stored routine .test...bug14233_[123].: invalid value in column mysql.proc"); # Backup proc table let $MYSQLD_DATADIR= `select @@datadir`; -- cgit v1.2.1 From b8faa8f2c69a13c83d763f8d8605dcf3612c1257 Mon Sep 17 00:00:00 2001 From: Magne Mahre Date: Wed, 30 Mar 2011 16:14:13 +0200 Subject: Fix-up after commit of Bug#11900714 The patch fixes a build problem on MacOSX, where the compiler complains about unused parameters. --- mysys/my_net.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mysys/my_net.c b/mysys/my_net.c index 3d139bb46c3..e2cc0679134 100644 --- a/mysys/my_net.c +++ b/mysys/my_net.c @@ -112,8 +112,10 @@ extern pthread_mutex_t LOCK_gethostbyname_r; */ struct hostent *my_gethostbyname_r(const char *name, - struct hostent *result, char *buffer, - int buflen, int *h_errnop) + struct hostent *result __attribute__((unused)), + char *buffer __attribute__((unused)), + int buflen __attribute__((unused)), + int *h_errnop) { struct hostent *hp; pthread_mutex_lock(&LOCK_gethostbyname_r); -- cgit v1.2.1 From 997eb49e49077fe3f65cfdf488f1f8807ba4b311 Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Thu, 31 Mar 2011 10:33:07 +0200 Subject: Small followup fix after MTR warning cleanup --- mysql-test/extra/rpl_tests/rpl_extra_col_master.test | 3 +++ mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result | 12 ------------ mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result | 12 ------------ 3 files changed, 3 insertions(+), 24 deletions(-) diff --git a/mysql-test/extra/rpl_tests/rpl_extra_col_master.test b/mysql-test/extra/rpl_tests/rpl_extra_col_master.test index 9bab1192d97..981adcf6d54 100644 --- a/mysql-test/extra/rpl_tests/rpl_extra_col_master.test +++ b/mysql-test/extra/rpl_tests/rpl_extra_col_master.test @@ -122,10 +122,13 @@ SELECT f1,f2,f3,f4,f5,f6,f7,f8,f9, #connection slave; +--disable_query_log call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); +call mtr.add_suppression("Slave I/O: Get master clock failed with error:.* Error_code: 2013"); +--enable_query_log sync_slave_with_master; --echo diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result b/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result index 160f970fd5e..affb179d50e 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result @@ -58,10 +58,6 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 -call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); -call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); -call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * @@ -933,10 +929,6 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 -call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); -call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); -call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * @@ -1808,10 +1800,6 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 -call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); -call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); -call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result b/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result index 11356dd8620..8aeb5bdc1c9 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result @@ -58,10 +58,6 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 -call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); -call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); -call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * @@ -933,10 +929,6 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 -call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); -call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); -call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * @@ -1808,10 +1800,6 @@ f1 f2 f3 f4 f5 f6 f7 f8 f9 hex(f10) hex(f11) 27 27 27 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 29 29 29 second 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 30 30 30 next 2 kaks 2 got stolen from the paradise very fat blob 1555 123456 -call mtr.add_suppression("Slave SQL.*Table definition on master and slave does not match: Column 2 type mismatch.* 1535"); -call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* Error_code: 1091"); -call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); -call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); * Select count and 20 rows from Slave * -- cgit v1.2.1 From fc7a12c05d179bf749a650b79c86574d095458b1 Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Thu, 31 Mar 2011 15:48:05 +0200 Subject: One more test suppression for rpl_extra_col_master tests --- mysql-test/extra/rpl_tests/rpl_extra_col_master.test | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/extra/rpl_tests/rpl_extra_col_master.test b/mysql-test/extra/rpl_tests/rpl_extra_col_master.test index 981adcf6d54..1c103512318 100644 --- a/mysql-test/extra/rpl_tests/rpl_extra_col_master.test +++ b/mysql-test/extra/rpl_tests/rpl_extra_col_master.test @@ -128,6 +128,7 @@ call mtr.add_suppression("Slave.*Can.t DROP .c7.; check that column.key exists.* call mtr.add_suppression("Slave.*Unknown column .c7. in .t15.* Error_code: 1054"); call mtr.add_suppression("Slave.*Key column .c6. doesn.t exist in table.* Error_code: 1072"); call mtr.add_suppression("Slave I/O: Get master clock failed with error:.* Error_code: 2013"); +call mtr.add_suppression("Slave I/O: Get master SERVER_ID failed with error:.* Error_code: 2013"); --enable_query_log sync_slave_with_master; -- cgit v1.2.1 From 7aa81e2a02e78200eec105b968bda675af6f4987 Mon Sep 17 00:00:00 2001 From: Gleb Shchepa Date: Thu, 31 Mar 2011 22:59:11 +0400 Subject: Bug #11766094 - 59132: MIN() AND MAX() REMOVE UNSIGNEDNESS In the string context the MIN() and MAX() functions don't take into account the unsignedness of the UNSIGNED BIGINT argument column. I.e.: CREATE TABLE t1 (a BIGINT UNSIGNED); INSERT INTO t1 VALUES (18446668621106209655); SELECT CONCAT(MAX(a)) FROM t1; returns -75452603341961. mysql-test/r/func_group.result: Test case for bug #11766094. mysql-test/t/func_group.test: Test case for bug #11766094. sql/item.cc: Bug #11766094 - 59132: MIN() AND MAX() REMOVE UNSIGNEDNESS The Item_cache_int::val_str() method has been modified to take into account the unsigned_flag value when converting data to string. --- mysql-test/r/func_group.result | 9 +++++++++ mysql-test/t/func_group.test | 10 ++++++++++ sql/item.cc | 2 +- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 1a21fb5872f..69bce1c8bd8 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -1737,4 +1737,13 @@ SELECT MIN(GET_LOCK('aaaaaaaaaaaaaaaaa',0) / '0b11111111111111111111111111111111 SELECT MIN(GET_LOCK('aaaaaaaaaaaaaaaaa',0) / '0b1111111111111111111111111111111111111111111111111111111111111111111111111' ^ (RAND())); SELECT RELEASE_LOCK('aaaaaaaaaaaaaaaaa'); # +# Bug #11766094 - 59132: MIN() AND MAX() REMOVE UNSIGNEDNESS +# +CREATE TABLE t1 (a BIGINT UNSIGNED); +INSERT INTO t1 VALUES (18446668621106209655); +SELECT MAX(LENGTH(a)), LENGTH(MAX(a)), MIN(a), MAX(a), CONCAT(MIN(a)), CONCAT(MAX(a)) FROM t1; +MAX(LENGTH(a)) LENGTH(MAX(a)) MIN(a) MAX(a) CONCAT(MIN(a)) CONCAT(MAX(a)) +20 20 18446668621106209655 18446668621106209655 18446668621106209655 18446668621106209655 +DROP TABLE t1; +# End of 5.1 tests diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 8839a28b9dd..600b46fcde6 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -1117,6 +1117,16 @@ SELECT RELEASE_LOCK('aaaaaaaaaaaaaaaaa'); --enable_result_log + +--echo # +--echo # Bug #11766094 - 59132: MIN() AND MAX() REMOVE UNSIGNEDNESS +--echo # + +CREATE TABLE t1 (a BIGINT UNSIGNED); +INSERT INTO t1 VALUES (18446668621106209655); +SELECT MAX(LENGTH(a)), LENGTH(MAX(a)), MIN(a), MAX(a), CONCAT(MIN(a)), CONCAT(MAX(a)) FROM t1; +DROP TABLE t1; + --echo # --echo End of 5.1 tests diff --git a/sql/item.cc b/sql/item.cc index f90cf562c0b..24c3107ece9 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -7109,7 +7109,7 @@ String *Item_cache_int::val_str(String *str) DBUG_ASSERT(fixed == 1); if (!value_cached && !cache_value()) return NULL; - str->set(value, default_charset()); + str->set_int(value, unsigned_flag, default_charset()); return str; } -- cgit v1.2.1 From 5321b3a57a5191471cba0db85a11e21fb702200a Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Mon, 4 Apr 2011 16:04:15 +0300 Subject: Bug #11758687: 50924: object names not resolved correctly on lctn2 systems There was a local variable in get_all_tables() to store the "original" value of the database name as it can get lowercased depending on the lower_case_table_name value. get_all_tables() iterates over database names and for each database iterates over the tables in it. The "original" db name was assigned in the table names loop. Thus the first table is ok, but the second and subsequent tables get the lowercased name from processing the first table. Fixed by moving the assignment of the original database name from the inner (table name) to the outer (database name) loop. Test suite added. --- mysql-test/r/lowercase_table2.result | 30 +++++++++++++++++++++++++++++ mysql-test/t/lowercase_table2.test | 37 ++++++++++++++++++++++++++++++++++++ mysys/my_net.c | 11 ++++++----- sql/sql_show.cc | 12 +++++++----- 4 files changed, 80 insertions(+), 10 deletions(-) diff --git a/mysql-test/r/lowercase_table2.result b/mysql-test/r/lowercase_table2.result index c9a46b70fab..ee555a9006c 100644 --- a/mysql-test/r/lowercase_table2.result +++ b/mysql-test/r/lowercase_table2.result @@ -175,3 +175,33 @@ TABLE_SCHEMA TABLE_NAME mysqltest_lc2 myUC use test; drop database mysqltest_LC2; +# +# Bug #11758687: 50924: object names not resolved correctly +# on lctn2 systems +# +CREATE DATABASE BUP_XPFM_COMPAT_DB2; +CREATE TABLE BUP_XPFM_COMPAT_DB2.TABLE2 (c13 INT) DEFAULT CHARSET latin1; +CREATE TABLE BUP_XPFM_COMPAT_DB2.table1 (c13 INT) DEFAULT CHARSET latin1; +CREATE TABLE bup_xpfm_compat_db2.table3 (c13 INT) DEFAULT CHARSET latin1; +CREATE TRIGGER BUP_XPFM_COMPAT_DB2.trigger1 AFTER INSERT +ON BUP_XPFM_COMPAT_DB2.table1 FOR EACH ROW +update BUP_XPFM_COMPAT_DB2.table1 set c13=12; +| +CREATE TRIGGER BUP_XPFM_COMPAT_DB2.TRIGGER2 AFTER INSERT +ON BUP_XPFM_COMPAT_DB2.TABLE2 FOR EACH ROW +update BUP_XPFM_COMPAT_DB2.table1 set c13=12; +| +CREATE TRIGGER BUP_XPFM_COMPAT_DB2.TrigGer3 AFTER INSERT +ON BUP_XPFM_COMPAT_DB2.TaBle3 FOR EACH ROW +update BUP_XPFM_COMPAT_DB2.table1 set c13=12; +| +SELECT trigger_schema, trigger_name, event_object_table FROM +INFORMATION_SCHEMA.TRIGGERS +WHERE trigger_schema COLLATE utf8_bin = 'BUP_XPFM_COMPAT_DB2' + ORDER BY trigger_schema, trigger_name; +trigger_schema trigger_name event_object_table +BUP_XPFM_COMPAT_DB2 trigger1 table1 +BUP_XPFM_COMPAT_DB2 TRIGGER2 TABLE2 +BUP_XPFM_COMPAT_DB2 TrigGer3 table3 +DROP DATABASE BUP_XPFM_COMPAT_DB2; +End of 5.1 tests diff --git a/mysql-test/t/lowercase_table2.test b/mysql-test/t/lowercase_table2.test index 521df01cc9b..b595d4b1775 100644 --- a/mysql-test/t/lowercase_table2.test +++ b/mysql-test/t/lowercase_table2.test @@ -150,3 +150,40 @@ select TABLE_SCHEMA,TABLE_NAME FROM information_schema.TABLES where TABLE_SCHEMA ='mysqltest_LC2'; use test; drop database mysqltest_LC2; + + +--echo # +--echo # Bug #11758687: 50924: object names not resolved correctly +--echo # on lctn2 systems +--echo # + +CREATE DATABASE BUP_XPFM_COMPAT_DB2; + +CREATE TABLE BUP_XPFM_COMPAT_DB2.TABLE2 (c13 INT) DEFAULT CHARSET latin1; +CREATE TABLE BUP_XPFM_COMPAT_DB2.table1 (c13 INT) DEFAULT CHARSET latin1; +CREATE TABLE bup_xpfm_compat_db2.table3 (c13 INT) DEFAULT CHARSET latin1; + +delimiter |; +# +CREATE TRIGGER BUP_XPFM_COMPAT_DB2.trigger1 AFTER INSERT + ON BUP_XPFM_COMPAT_DB2.table1 FOR EACH ROW + update BUP_XPFM_COMPAT_DB2.table1 set c13=12; +| +CREATE TRIGGER BUP_XPFM_COMPAT_DB2.TRIGGER2 AFTER INSERT + ON BUP_XPFM_COMPAT_DB2.TABLE2 FOR EACH ROW + update BUP_XPFM_COMPAT_DB2.table1 set c13=12; +| +CREATE TRIGGER BUP_XPFM_COMPAT_DB2.TrigGer3 AFTER INSERT + ON BUP_XPFM_COMPAT_DB2.TaBle3 FOR EACH ROW + update BUP_XPFM_COMPAT_DB2.table1 set c13=12; +| +delimiter ;| + +SELECT trigger_schema, trigger_name, event_object_table FROM +INFORMATION_SCHEMA.TRIGGERS + WHERE trigger_schema COLLATE utf8_bin = 'BUP_XPFM_COMPAT_DB2' + ORDER BY trigger_schema, trigger_name; + +DROP DATABASE BUP_XPFM_COMPAT_DB2; + +--echo End of 5.1 tests diff --git a/mysys/my_net.c b/mysys/my_net.c index e2cc0679134..820abf32386 100644 --- a/mysys/my_net.c +++ b/mysys/my_net.c @@ -111,11 +111,12 @@ extern pthread_mutex_t LOCK_gethostbyname_r; is finished with the structure. */ -struct hostent *my_gethostbyname_r(const char *name, - struct hostent *result __attribute__((unused)), - char *buffer __attribute__((unused)), - int buflen __attribute__((unused)), - int *h_errnop) +struct hostent * +my_gethostbyname_r(const char *name, + struct hostent *result __attribute__((unused)), + char *buffer __attribute__((unused)), + int buflen __attribute__((unused)), + int *h_errnop) { struct hostent *hp; pthread_mutex_lock(&LOCK_gethostbyname_r); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 1524a8fb87f..5b835096042 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3399,6 +3399,12 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) it.rewind(); /* To get access to new elements in basis list */ while ((db_name= it++)) { + LEX_STRING orig_db_name; + + /* db_name can be changed in make_table_list() func */ + if (!thd->make_lex_string(&orig_db_name, db_name->str, + db_name->length, FALSE)) + goto err; #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!(check_access(thd,SELECT_ACL, db_name->str, &thd->col_access, 0, 1, with_i_schema) || @@ -3461,17 +3467,13 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) } int res; - LEX_STRING tmp_lex_string, orig_db_name; + LEX_STRING tmp_lex_string; /* Set the parent lex of 'sel' because it is needed by sel.init_query() which is called inside make_table_list. */ thd->no_warnings_for_error= 1; sel.parent_lex= lex; - /* db_name can be changed in make_table_list() func */ - if (!thd->make_lex_string(&orig_db_name, db_name->str, - db_name->length, FALSE)) - goto err; if (make_table_list(thd, &sel, db_name, table_name)) goto err; TABLE_LIST *show_table_list= sel.table_list.first; -- cgit v1.2.1 From 402217544e74e17236128156951371a86c873e90 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Tue, 5 Apr 2011 11:08:36 +0300 Subject: Add the testcase for Bug#59410 to 5.1/builtin Bug#59410 read uncommitted: unlock row could not find a 3 mode lock on the record This bug is present only in 5.6 but I am adding the test case to earlier versions to ensure it never appears in earlier versions too. --- mysql-test/suite/innodb/r/innodb_bug59410.result | 17 +++++++++++++++++ mysql-test/suite/innodb/t/innodb_bug59410.test | 24 ++++++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 mysql-test/suite/innodb/r/innodb_bug59410.result create mode 100644 mysql-test/suite/innodb/t/innodb_bug59410.test diff --git a/mysql-test/suite/innodb/r/innodb_bug59410.result b/mysql-test/suite/innodb/r/innodb_bug59410.result new file mode 100644 index 00000000000..494d601ba4f --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_bug59410.result @@ -0,0 +1,17 @@ +create table `bug59410_1`(`a` int)engine=innodb; +insert into `bug59410_1` values (1),(2),(3); +select 1 from `bug59410_1` where `a` <> any ( +select 1 from `bug59410_1` where `a` <> 1 for update) +for update; +1 +1 +1 +drop table bug59410_1; +create table bug59410_2(`a` char(1),`b` int)engine=innodb; +insert into bug59410_2 values('0',0); +set transaction isolation level read uncommitted; +start transaction; +set @a=(select b from bug59410_2 where +(select 1 from bug59410_2 where a group by @a=b) +group by @a:=b); +drop table bug59410_2; diff --git a/mysql-test/suite/innodb/t/innodb_bug59410.test b/mysql-test/suite/innodb/t/innodb_bug59410.test new file mode 100644 index 00000000000..30bb0642679 --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_bug59410.test @@ -0,0 +1,24 @@ +# +# Bug#59410 read uncommitted: unlock row could not find a 3 mode lock on the record +# +-- source include/have_innodb.inc + +# only interested that the following do not produce something like +# InnoDB: Error: unlock row could not find a 2 mode lock on the record +# in the error log + +create table `bug59410_1`(`a` int)engine=innodb; +insert into `bug59410_1` values (1),(2),(3); +select 1 from `bug59410_1` where `a` <> any ( +select 1 from `bug59410_1` where `a` <> 1 for update) +for update; +drop table bug59410_1; + +create table bug59410_2(`a` char(1),`b` int)engine=innodb; +insert into bug59410_2 values('0',0); +set transaction isolation level read uncommitted; +start transaction; +set @a=(select b from bug59410_2 where +(select 1 from bug59410_2 where a group by @a=b) +group by @a:=b); +drop table bug59410_2; -- cgit v1.2.1 From b93cc42623ba99237c1c625e19e34a1f4419e899 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Tue, 5 Apr 2011 11:20:20 +0300 Subject: Add the testcase for Bug#59410 to 5.1/InnoDB Plugin Bug#59410 read uncommitted: unlock row could not find a 3 mode lock on the record This bug is present only in 5.6 but I am adding the test case to earlier versions to ensure it never appears in earlier versions too. --- .../suite/innodb_plugin/r/innodb_bug59410.result | 17 +++++++++++++++ .../suite/innodb_plugin/t/innodb_bug59410.test | 24 ++++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 mysql-test/suite/innodb_plugin/r/innodb_bug59410.result create mode 100644 mysql-test/suite/innodb_plugin/t/innodb_bug59410.test diff --git a/mysql-test/suite/innodb_plugin/r/innodb_bug59410.result b/mysql-test/suite/innodb_plugin/r/innodb_bug59410.result new file mode 100644 index 00000000000..494d601ba4f --- /dev/null +++ b/mysql-test/suite/innodb_plugin/r/innodb_bug59410.result @@ -0,0 +1,17 @@ +create table `bug59410_1`(`a` int)engine=innodb; +insert into `bug59410_1` values (1),(2),(3); +select 1 from `bug59410_1` where `a` <> any ( +select 1 from `bug59410_1` where `a` <> 1 for update) +for update; +1 +1 +1 +drop table bug59410_1; +create table bug59410_2(`a` char(1),`b` int)engine=innodb; +insert into bug59410_2 values('0',0); +set transaction isolation level read uncommitted; +start transaction; +set @a=(select b from bug59410_2 where +(select 1 from bug59410_2 where a group by @a=b) +group by @a:=b); +drop table bug59410_2; diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug59410.test b/mysql-test/suite/innodb_plugin/t/innodb_bug59410.test new file mode 100644 index 00000000000..30bb0642679 --- /dev/null +++ b/mysql-test/suite/innodb_plugin/t/innodb_bug59410.test @@ -0,0 +1,24 @@ +# +# Bug#59410 read uncommitted: unlock row could not find a 3 mode lock on the record +# +-- source include/have_innodb.inc + +# only interested that the following do not produce something like +# InnoDB: Error: unlock row could not find a 2 mode lock on the record +# in the error log + +create table `bug59410_1`(`a` int)engine=innodb; +insert into `bug59410_1` values (1),(2),(3); +select 1 from `bug59410_1` where `a` <> any ( +select 1 from `bug59410_1` where `a` <> 1 for update) +for update; +drop table bug59410_1; + +create table bug59410_2(`a` char(1),`b` int)engine=innodb; +insert into bug59410_2 values('0',0); +set transaction isolation level read uncommitted; +start transaction; +set @a=(select b from bug59410_2 where +(select 1 from bug59410_2 where a group by @a=b) +group by @a:=b); +drop table bug59410_2; -- cgit v1.2.1 From acedd7a4a5542288b30969ff5a6f11566458e9d4 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Wed, 6 Apr 2011 14:38:24 +0300 Subject: Load the innodb plugin instead of builtin in innodb_plugin.innodb_bug59410 Spotted by: Marko --- mysql-test/suite/innodb_plugin/t/innodb_bug59410.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug59410.test b/mysql-test/suite/innodb_plugin/t/innodb_bug59410.test index 30bb0642679..6eabe0a8403 100644 --- a/mysql-test/suite/innodb_plugin/t/innodb_bug59410.test +++ b/mysql-test/suite/innodb_plugin/t/innodb_bug59410.test @@ -1,7 +1,7 @@ # # Bug#59410 read uncommitted: unlock row could not find a 3 mode lock on the record # --- source include/have_innodb.inc +-- source include/have_innodb_plugin.inc # only interested that the following do not produce something like # InnoDB: Error: unlock row could not find a 2 mode lock on the record -- cgit v1.2.1 From 8028a1043c6a7662594d6d465f11e30a846df534 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Thu, 7 Apr 2011 14:44:26 +0300 Subject: fixed a missing warning --- mysql-test/r/cast.result | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mysql-test/r/cast.result b/mysql-test/r/cast.result index 974a6bee63f..44d57055e7f 100644 --- a/mysql-test/r/cast.result +++ b/mysql-test/r/cast.result @@ -455,6 +455,8 @@ DROP TABLE t1; # Bug #11765023: 57934: DOS POSSIBLE SINCE BINARY CASTING # DOESN'T ADHERE TO MAX_ALLOWED_PACKET SET @@GLOBAL.max_allowed_packet=2048; +Warnings: +Warning 1105 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length' SELECT CONVERT('a', BINARY(2049)); CONVERT('a', BINARY(2049)) NULL -- cgit v1.2.1 From dc65d9217c36a0edc8fab0a4a09fbeda7a5c278d Mon Sep 17 00:00:00 2001 From: Guilhem Bichot Date: Thu, 7 Apr 2011 15:09:19 +0200 Subject: Fix for Bug#11765141 - "58072: LOAD DATA INFILE: LEAKS IO CACHE MEMORY WHEN ERROR OCCURS" mysql-test/t/loaddata.test: test for bug; without fix, running the test with --valgrind would show the leak and make the test fail. sql/sql_load.cc: * In READ_INFO class, 'need_end_io_cache' is true as long as init_io_cache() was called, so if it's true, we need to call end_io_cache(), to free memory allocated by init_io_cache(). No matter the value of 'error'. In the bug's scenario, 'error' was set to true in read_sep_field() because '1' (read from file) isn't suitable to load into a geometric column. Because of 'error', end_io_cache() was not called. Note: end_io_cache() calls my_b_flush_io_cache(), which will do nothing wrong given that the file is opened for reads only; see the init_io_cache() call which uses only those read-only types: (get_it_from_net) ? READ_NET : (is_fifo ? READ_FIFO : READ_CACHE). IF the cache were rather used to write to the file, my_b_flush_io_cache() may write to it, and it may be questionable to write to the file if 'error' is true. But here there's no problem. * Now that 'need_end_io_cache' is checked even if 'error' is true, it needs to be initialized in all cases. * Bonus: move some variables to the initialization list. --- mysql-test/r/loaddata.result | 9 +++++++++ mysql-test/t/loaddata.test | 15 +++++++++++++++ sql/sql_load.cc | 9 ++++----- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/loaddata.result b/mysql-test/r/loaddata.result index 3a421b3ea3f..59a1b904744 100644 --- a/mysql-test/r/loaddata.result +++ b/mysql-test/r/loaddata.result @@ -539,4 +539,13 @@ CREATE TABLE t1(f1 INT); SELECT 0xE1BB30 INTO OUTFILE 't1.dat'; LOAD DATA INFILE 't1.dat' IGNORE INTO TABLE t1 CHARACTER SET utf8; DROP TABLE t1; +# +# Bug#11765141 - 58072: LOAD DATA INFILE: LEAKS IO CACHE MEMORY +# WHEN ERROR OCCURS +# +SELECT '1\n' INTO DUMPFILE 'MYSQLTEST_VARDIR/tmp/bug11735141.txt'; +create table t1(a point); +LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/bug11735141.txt' INTO TABLE t1; +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +drop table t1; End of 5.1 tests diff --git a/mysql-test/t/loaddata.test b/mysql-test/t/loaddata.test index e0764b67ec0..3d0fdea05ed 100644 --- a/mysql-test/t/loaddata.test +++ b/mysql-test/t/loaddata.test @@ -625,4 +625,19 @@ DROP TABLE t1; let $MYSQLD_DATADIR= `select @@datadir`; remove_file $MYSQLD_DATADIR/test/t1.dat; +--echo # +--echo # Bug#11765141 - 58072: LOAD DATA INFILE: LEAKS IO CACHE MEMORY +--echo # WHEN ERROR OCCURS +--echo # + +--let $file=$MYSQLTEST_VARDIR/tmp/bug11735141.txt +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--eval SELECT '1\n' INTO DUMPFILE '$file' + +create table t1(a point); +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--error ER_CANT_CREATE_GEOMETRY_OBJECT +--eval LOAD DATA INFILE '$file' INTO TABLE t1 +drop table t1; + --echo End of 5.1 tests diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 513cd62b510..b9b7bd74f6c 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -1075,9 +1075,10 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, String &field_term, String &line_start, String &line_term, String &enclosed_par, int escape, bool get_it_from_net, bool is_fifo) - :file(file_par),escape_char(escape) + :file(file_par), buff_length(tot_length), escape_char(escape), + found_end_of_line(false), eof(false), need_end_io_cache(false), + error(false), line_cuted(false), found_null(false), read_charset(cs) { - read_charset= cs; field_term_ptr=(char*) field_term.ptr(); field_term_length= field_term.length(); line_term_ptr=(char*) line_term.ptr(); @@ -1104,8 +1105,6 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, (uchar) enclosed_par[0] : INT_MAX; field_term_char= field_term_length ? (uchar) field_term_ptr[0] : INT_MAX; line_term_char= line_term_length ? (uchar) line_term_ptr[0] : INT_MAX; - error=eof=found_end_of_line=found_null=line_cuted=0; - buff_length=tot_length; /* Set of a stack for unget if long terminators */ @@ -1151,7 +1150,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, READ_INFO::~READ_INFO() { - if (!error && need_end_io_cache) + if (need_end_io_cache) ::end_io_cache(&cache); my_free(buffer, MYF(MY_ALLOW_ZERO_PTR)); -- cgit v1.2.1 From 0ff2a182b67ea433d9070628696e648dff170fdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 7 Apr 2011 21:12:54 +0300 Subject: Bug #11766513 - 59641: Prepared XA transaction in system after hard crash causes future shutdown hang InnoDB would hang on shutdown if any XA transactions exist in the system in the PREPARED state. This has been masked by the fact that MySQL would roll back any PREPARED transaction on shutdown, in the spirit of Bug #12161 Xa recovery and client disconnection. [mysql-test-run] do_shutdown_server: Interpret --shutdown_server 0 as a request to kill the server immediately without initiating a shutdown procedure. xid_cache_insert(): Initialize XID_STATE::rm_error in order to avoid a bogus error message on XA ROLLBACK of a recovered PREPARED transaction. innobase_commit_by_xid(), innobase_rollback_by_xid(): Free the InnoDB transaction object after rolling back a PREPARED transaction. trx_get_trx_by_xid(): Only consider transactions whose trx->is_prepared flag is set. The MySQL layer seems to prevent attempts to roll back connected transactions that are in the PREPARED state from another connection, but it is better to play it safe. The is_prepared flag was introduced in the InnoDB Plugin. trx_n_prepared: A new counter, counting the number of InnoDB transactions in the PREPARED state. logs_empty_and_mark_files_at_shutdown(): On shutdown, allow trx_n_prepared transactions to exist in the system. trx_undo_free_prepared(), trx_free_prepared(): New functions, to free the memory objects of PREPARED transactions on shutdown. This is not needed in the built-in InnoDB, because it would collect all allocated memory on shutdown. The InnoDB Plugin needs this because of innodb_use_sys_malloc. trx_sys_close(): Invoke trx_free_prepared() on all remaining transactions. --- client/mysqltest.cc | 12 ++-- mysql-test/suite/innodb/r/innodb_bug59641.result | 57 ++++++++++++++++++ mysql-test/suite/innodb/t/innodb_bug59641.test | 66 ++++++++++++++++++++ .../suite/innodb_plugin/r/innodb_bug59641.result | 57 ++++++++++++++++++ .../suite/innodb_plugin/t/innodb_bug59641.test | 70 ++++++++++++++++++++++ sql/sql_class.cc | 1 + storage/innobase/handler/ha_innodb.cc | 6 +- storage/innobase/include/trx0trx.h | 5 ++ storage/innobase/log/log0log.c | 9 +-- storage/innobase/trx/trx0trx.c | 11 ++++ storage/innodb_plugin/ChangeLog | 7 +++ storage/innodb_plugin/handler/ha_innodb.cc | 6 +- storage/innodb_plugin/include/trx0trx.h | 11 ++++ storage/innodb_plugin/include/trx0undo.h | 9 +++ storage/innodb_plugin/log/log0log.c | 9 +-- storage/innodb_plugin/trx/trx0sys.c | 10 ++++ storage/innodb_plugin/trx/trx0trx.c | 70 +++++++++++++++++++++- storage/innodb_plugin/trx/trx0undo.c | 28 +++++++++ 18 files changed, 425 insertions(+), 19 deletions(-) create mode 100644 mysql-test/suite/innodb/r/innodb_bug59641.result create mode 100644 mysql-test/suite/innodb/t/innodb_bug59641.test create mode 100644 mysql-test/suite/innodb_plugin/r/innodb_bug59641.result create mode 100644 mysql-test/suite/innodb_plugin/t/innodb_bug59641.test diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 52c76b8c68e..a1813838a24 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -4461,13 +4461,14 @@ static int my_kill(int pid, int sig) command called command DESCRIPTION - shutdown [] + shutdown_server [] */ void do_shutdown_server(struct st_command *command) { - int timeout=60, pid; + long timeout=60; + int pid; DYNAMIC_STRING ds_pidfile_name; MYSQL* mysql = &cur_con->mysql; static DYNAMIC_STRING ds_timeout; @@ -4482,8 +4483,9 @@ void do_shutdown_server(struct st_command *command) if (ds_timeout.length) { - timeout= atoi(ds_timeout.str); - if (timeout == 0) + char* endptr; + timeout= strtol(ds_timeout.str, &endptr, 10); + if (*endptr != '\0') die("Illegal argument for timeout: '%s'", ds_timeout.str); } dynstr_free(&ds_timeout); @@ -4525,7 +4527,7 @@ void do_shutdown_server(struct st_command *command) DBUG_PRINT("info", ("Process %d does not exist anymore", pid)); DBUG_VOID_RETURN; } - DBUG_PRINT("info", ("Sleeping, timeout: %d", timeout)); + DBUG_PRINT("info", ("Sleeping, timeout: %ld", timeout)); my_sleep(1000000L); } diff --git a/mysql-test/suite/innodb/r/innodb_bug59641.result b/mysql-test/suite/innodb/r/innodb_bug59641.result new file mode 100644 index 00000000000..361172aa82b --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb_bug59641.result @@ -0,0 +1,57 @@ +CREATE TABLE t(a INT PRIMARY KEY, b INT)ENGINE=InnoDB; +INSERT INTO t VALUES(2,2),(4,4),(8,8),(16,16),(32,32); +COMMIT; +XA START '123'; +INSERT INTO t VALUES(1,1); +XA END '123'; +XA PREPARE '123'; +XA START '456'; +INSERT INTO t VALUES(3,47),(5,67); +UPDATE t SET b=2*b WHERE a BETWEEN 5 AND 8; +XA END '456'; +XA PREPARE '456'; +XA START '789'; +UPDATE t SET b=4*a WHERE a=32; +XA END '789'; +XA PREPARE '789'; +call mtr.add_suppression("Found 3 prepared XA transactions"); +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM t; +a b +1 1 +2 2 +3 47 +4 4 +5 134 +8 16 +16 16 +32 128 +COMMIT; +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM t; +a b +1 1 +2 2 +3 47 +4 4 +5 134 +8 16 +16 16 +32 128 +COMMIT; +XA RECOVER; +formatID gtrid_length bqual_length data +1 3 0 789 +1 3 0 456 +1 3 0 123 +XA ROLLBACK '123'; +XA ROLLBACK '456'; +XA COMMIT '789'; +SELECT * FROM t; +a b +2 2 +4 4 +8 8 +16 16 +32 128 +DROP TABLE t; diff --git a/mysql-test/suite/innodb/t/innodb_bug59641.test b/mysql-test/suite/innodb/t/innodb_bug59641.test new file mode 100644 index 00000000000..0237673061c --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb_bug59641.test @@ -0,0 +1,66 @@ +# Bug #59641 Prepared XA transaction causes shutdown hang after a crash + +-- source include/not_embedded.inc +-- source include/have_innodb.inc + +CREATE TABLE t(a INT PRIMARY KEY, b INT)ENGINE=InnoDB; +INSERT INTO t VALUES(2,2),(4,4),(8,8),(16,16),(32,32); +COMMIT; +XA START '123'; +INSERT INTO t VALUES(1,1); +XA END '123'; +XA PREPARE '123'; + +CONNECT (con1,localhost,root,,); +CONNECTION con1; + +XA START '456'; +INSERT INTO t VALUES(3,47),(5,67); +UPDATE t SET b=2*b WHERE a BETWEEN 5 AND 8; +XA END '456'; +XA PREPARE '456'; + +CONNECT (con2,localhost,root,,); +CONNECTION con2; + +XA START '789'; +UPDATE t SET b=4*a WHERE a=32; +XA END '789'; +XA PREPARE '789'; + +# The server would issue this warning on restart. +call mtr.add_suppression("Found 3 prepared XA transactions"); + +# Kill the server without sending a shutdown command +-- exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- shutdown_server 0 +-- source include/wait_until_disconnected.inc + +# Restart the server. +-- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM t; +COMMIT; + +# Shut down the server. This would hang because of the bug. +-- exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- shutdown_server +-- source include/wait_until_disconnected.inc + +# Restart the server. +-- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM t; +COMMIT; +XA RECOVER; +XA ROLLBACK '123'; +XA ROLLBACK '456'; +XA COMMIT '789'; +SELECT * FROM t; + +DROP TABLE t; diff --git a/mysql-test/suite/innodb_plugin/r/innodb_bug59641.result b/mysql-test/suite/innodb_plugin/r/innodb_bug59641.result new file mode 100644 index 00000000000..361172aa82b --- /dev/null +++ b/mysql-test/suite/innodb_plugin/r/innodb_bug59641.result @@ -0,0 +1,57 @@ +CREATE TABLE t(a INT PRIMARY KEY, b INT)ENGINE=InnoDB; +INSERT INTO t VALUES(2,2),(4,4),(8,8),(16,16),(32,32); +COMMIT; +XA START '123'; +INSERT INTO t VALUES(1,1); +XA END '123'; +XA PREPARE '123'; +XA START '456'; +INSERT INTO t VALUES(3,47),(5,67); +UPDATE t SET b=2*b WHERE a BETWEEN 5 AND 8; +XA END '456'; +XA PREPARE '456'; +XA START '789'; +UPDATE t SET b=4*a WHERE a=32; +XA END '789'; +XA PREPARE '789'; +call mtr.add_suppression("Found 3 prepared XA transactions"); +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM t; +a b +1 1 +2 2 +3 47 +4 4 +5 134 +8 16 +16 16 +32 128 +COMMIT; +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM t; +a b +1 1 +2 2 +3 47 +4 4 +5 134 +8 16 +16 16 +32 128 +COMMIT; +XA RECOVER; +formatID gtrid_length bqual_length data +1 3 0 789 +1 3 0 456 +1 3 0 123 +XA ROLLBACK '123'; +XA ROLLBACK '456'; +XA COMMIT '789'; +SELECT * FROM t; +a b +2 2 +4 4 +8 8 +16 16 +32 128 +DROP TABLE t; diff --git a/mysql-test/suite/innodb_plugin/t/innodb_bug59641.test b/mysql-test/suite/innodb_plugin/t/innodb_bug59641.test new file mode 100644 index 00000000000..0fb24e47a54 --- /dev/null +++ b/mysql-test/suite/innodb_plugin/t/innodb_bug59641.test @@ -0,0 +1,70 @@ +# Bug #59641 Prepared XA transaction causes shutdown hang after a crash + +-- source include/not_embedded.inc +-- source include/have_innodb_plugin.inc + +let $innodb_file_format_check_orig=`select @@innodb_file_format_check`; + +CREATE TABLE t(a INT PRIMARY KEY, b INT)ENGINE=InnoDB; +INSERT INTO t VALUES(2,2),(4,4),(8,8),(16,16),(32,32); +COMMIT; +XA START '123'; +INSERT INTO t VALUES(1,1); +XA END '123'; +XA PREPARE '123'; + +CONNECT (con1,localhost,root,,); +CONNECTION con1; + +XA START '456'; +INSERT INTO t VALUES(3,47),(5,67); +UPDATE t SET b=2*b WHERE a BETWEEN 5 AND 8; +XA END '456'; +XA PREPARE '456'; + +CONNECT (con2,localhost,root,,); +CONNECTION con2; + +XA START '789'; +UPDATE t SET b=4*a WHERE a=32; +XA END '789'; +XA PREPARE '789'; + +# The server would issue this warning on restart. +call mtr.add_suppression("Found 3 prepared XA transactions"); + +# Kill the server without sending a shutdown command +-- exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- shutdown_server 0 +-- source include/wait_until_disconnected.inc + +# Restart the server. +-- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM t; +COMMIT; + +# Shut down the server. This would hang because of the bug. +-- exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- shutdown_server +-- source include/wait_until_disconnected.inc + +# Restart the server. +-- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +-- enable_reconnect +-- source include/wait_until_connected_again.inc + +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +SELECT * FROM t; +COMMIT; +XA RECOVER; +XA ROLLBACK '123'; +XA ROLLBACK '456'; +XA COMMIT '789'; +SELECT * FROM t; + +DROP TABLE t; +--disable_query_log +eval set global innodb_file_format_check=$innodb_file_format_check_orig; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index a61ce7bfd14..ae21a5335fd 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -3383,6 +3383,7 @@ bool xid_cache_insert(XID *xid, enum xa_states xa_state) xs->xa_state=xa_state; xs->xid.set(xid); xs->in_thd=0; + xs->rm_error=0; res=my_hash_insert(&xid_cache, (uchar*)xs); } pthread_mutex_unlock(&LOCK_xid_cache); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 6f58fd70fbd..75c732c44d4 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -8565,7 +8565,7 @@ innobase_commit_by_xid( if (trx) { innobase_commit_low(trx); - + trx_free_for_background(trx); return(XA_OK); } else { return(XAER_NOTA); @@ -8588,7 +8588,9 @@ innobase_rollback_by_xid( trx = trx_get_trx_by_xid(xid); if (trx) { - return(innobase_rollback_trx(trx)); + int ret = innobase_rollback_trx(trx); + trx_free_for_background(trx); + return(ret); } else { return(XAER_NOTA); } diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 4652f45892e..7cb16107746 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -19,7 +19,12 @@ Created 3/26/1996 Heikki Tuuri #include "dict0types.h" #include "trx0xa.h" +/* Number of transactions currently allocated for MySQL: protected by +the kernel mutex */ extern ulint trx_n_mysql_transactions; +/* Number of transactions currently in the XA PREPARED state: protected by +the kernel mutex */ +extern ulint trx_n_prepared; /************************************************************************ Releases the search latch if trx has reserved it. */ diff --git a/storage/innobase/log/log0log.c b/storage/innobase/log/log0log.c index 3300997112b..092e3bfe37f 100644 --- a/storage/innobase/log/log0log.c +++ b/storage/innobase/log/log0log.c @@ -3052,12 +3052,13 @@ loop: goto loop; } - /* Check that there are no longer transactions. We need this wait even - for the 'very fast' shutdown, because the InnoDB layer may have - committed or prepared transactions and we don't want to lose them. */ + /* Check that there are no longer transactions, except for + PREPARED ones. We need this wait even for the 'very fast' + shutdown, because the InnoDB layer may have committed or + prepared transactions and we don't want to lose them. */ if (trx_n_mysql_transactions > 0 - || UT_LIST_GET_LEN(trx_sys->trx_list) > 0) { + || UT_LIST_GET_LEN(trx_sys->trx_list) > trx_n_prepared) { mutex_exit(&kernel_mutex); diff --git a/storage/innobase/trx/trx0trx.c b/storage/innobase/trx/trx0trx.c index a82d7f452fc..d174f1e1b37 100644 --- a/storage/innobase/trx/trx0trx.c +++ b/storage/innobase/trx/trx0trx.c @@ -41,6 +41,9 @@ sess_t* trx_dummy_sess = NULL; /* Number of transactions currently allocated for MySQL: protected by the kernel mutex */ ulint trx_n_mysql_transactions = 0; +/* Number of transactions currently in the XA PREPARED state: protected by +the kernel mutex */ +ulint trx_n_prepared = 0; /***************************************************************** Starts the transaction if it is not yet started. */ @@ -480,6 +483,7 @@ trx_lists_init_at_db_start(void) if (srv_force_recovery == 0) { trx->conc_state = TRX_PREPARED; + trx_n_prepared++; } else { fprintf(stderr, "InnoDB: Since" @@ -558,6 +562,7 @@ trx_lists_init_at_db_start(void) trx->conc_state = TRX_PREPARED; + trx_n_prepared++; } else { fprintf(stderr, "InnoDB: Since" @@ -832,6 +837,11 @@ trx_commit_off_kernel( || trx->conc_state == TRX_PREPARED); ut_ad(mutex_own(&kernel_mutex)); + if (UNIV_UNLIKELY(trx->conc_state == TRX_PREPARED)) { + ut_a(trx_n_prepared > 0); + trx_n_prepared--; + } + /* The following assignment makes the transaction committed in memory and makes its changes to data visible to other transactions. NOTE that there is a small discrepancy from the strict formal @@ -1882,6 +1892,7 @@ trx_prepare_off_kernel( /*--------------------------------------*/ trx->conc_state = TRX_PREPARED; + trx_n_prepared++; /*--------------------------------------*/ if (must_flush_log) { diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index 100cf3690ce..d062fc7e648 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,10 @@ +2011-04-07 The InnoDB Team + + * handler/ha_innodb.cc, include/trx0trx.h, include/trx0undo.h, + log/log0log.c, trx/trx0sys.c, trx/trx0trx.c, trx/trx0undo.c: + Fix Bug #59641 Prepared XA transaction in system after hard crash + causes future shutdown hang + 2011-03-30 The InnoDB Team * srv/srv0srv.c, sync/sync0arr.h, sync/sync0arr.c: diff --git a/storage/innodb_plugin/handler/ha_innodb.cc b/storage/innodb_plugin/handler/ha_innodb.cc index dda2fbaa4d2..7f92d797d30 100644 --- a/storage/innodb_plugin/handler/ha_innodb.cc +++ b/storage/innodb_plugin/handler/ha_innodb.cc @@ -9998,7 +9998,7 @@ innobase_commit_by_xid( if (trx) { innobase_commit_low(trx); - + trx_free_for_background(trx); return(XA_OK); } else { return(XAER_NOTA); @@ -10024,7 +10024,9 @@ innobase_rollback_by_xid( trx = trx_get_trx_by_xid(xid); if (trx) { - return(innobase_rollback_trx(trx)); + int ret = innobase_rollback_trx(trx); + trx_free_for_background(trx); + return(ret); } else { return(XAER_NOTA); } diff --git a/storage/innodb_plugin/include/trx0trx.h b/storage/innodb_plugin/include/trx0trx.h index 833bae4a4ff..4bf3e75a5ee 100644 --- a/storage/innodb_plugin/include/trx0trx.h +++ b/storage/innodb_plugin/include/trx0trx.h @@ -44,6 +44,9 @@ extern sess_t* trx_dummy_sess; /** Number of transactions currently allocated for MySQL: protected by the kernel mutex */ extern ulint trx_n_mysql_transactions; +/** Number of transactions currently in the XA PREPARED state: protected by +the kernel mutex */ +extern ulint trx_n_prepared; /********************************************************************//** Releases the search latch if trx has reserved it. */ @@ -108,6 +111,14 @@ trx_free( /*=====*/ trx_t* trx); /*!< in, own: trx object */ /********************************************************************//** +At shutdown, frees a transaction object that is in the PREPARED state. */ +UNIV_INTERN +void +trx_free_prepared( +/*==============*/ + trx_t* trx) /*!< in, own: trx object */ + __attribute__((nonnull)); +/********************************************************************//** Frees a transaction object for MySQL. */ UNIV_INTERN void diff --git a/storage/innodb_plugin/include/trx0undo.h b/storage/innodb_plugin/include/trx0undo.h index a084f2394b5..4f15cd85833 100644 --- a/storage/innodb_plugin/include/trx0undo.h +++ b/storage/innodb_plugin/include/trx0undo.h @@ -298,6 +298,15 @@ void trx_undo_insert_cleanup( /*====================*/ trx_t* trx); /*!< in: transaction handle */ + +/********************************************************************//** +At shutdown, frees the undo logs of a PREPARED transaction. */ +UNIV_INTERN +void +trx_undo_free_prepared( +/*===================*/ + trx_t* trx) /*!< in/out: PREPARED transaction */ + __attribute__((nonnull)); #endif /* !UNIV_HOTBACKUP */ /***********************************************************//** Parses the redo log entry of an undo log page initialization. diff --git a/storage/innodb_plugin/log/log0log.c b/storage/innodb_plugin/log/log0log.c index 183c24d2147..4bb9abdc1a4 100644 --- a/storage/innodb_plugin/log/log0log.c +++ b/storage/innodb_plugin/log/log0log.c @@ -3085,12 +3085,13 @@ loop: goto loop; } - /* Check that there are no longer transactions. We need this wait even - for the 'very fast' shutdown, because the InnoDB layer may have - committed or prepared transactions and we don't want to lose them. */ + /* Check that there are no longer transactions, except for + PREPARED ones. We need this wait even for the 'very fast' + shutdown, because the InnoDB layer may have committed or + prepared transactions and we don't want to lose them. */ if (trx_n_mysql_transactions > 0 - || UT_LIST_GET_LEN(trx_sys->trx_list) > 0) { + || UT_LIST_GET_LEN(trx_sys->trx_list) > trx_n_prepared) { mutex_exit(&kernel_mutex); diff --git a/storage/innodb_plugin/trx/trx0sys.c b/storage/innodb_plugin/trx/trx0sys.c index 6eb356947cc..352fa6af219 100644 --- a/storage/innodb_plugin/trx/trx0sys.c +++ b/storage/innodb_plugin/trx/trx0sys.c @@ -37,6 +37,7 @@ Created 3/26/1996 Heikki Tuuri #include "trx0rseg.h" #include "trx0undo.h" #include "srv0srv.h" +#include "srv0start.h" #include "trx0purge.h" #include "log0log.h" #include "os0file.h" @@ -1548,10 +1549,12 @@ void trx_sys_close(void) /*===============*/ { + trx_t* trx; trx_rseg_t* rseg; read_view_t* view; ut_ad(trx_sys != NULL); + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS); /* Check that all read views are closed except read view owned by a purge. */ @@ -1583,6 +1586,13 @@ trx_sys_close(void) mem_free(trx_doublewrite); trx_doublewrite = NULL; + /* Only prepared transactions may be left in the system. Free them. */ + ut_a(UT_LIST_GET_LEN(trx_sys->trx_list) == trx_n_prepared); + + while ((trx = UT_LIST_GET_FIRST(trx_sys->trx_list)) != NULL) { + trx_free_prepared(trx); + } + /* There can't be any active transactions. */ rseg = UT_LIST_GET_FIRST(trx_sys->rseg_list); diff --git a/storage/innodb_plugin/trx/trx0trx.c b/storage/innodb_plugin/trx/trx0trx.c index f0bbf220815..7f3a3fcb4bf 100644 --- a/storage/innodb_plugin/trx/trx0trx.c +++ b/storage/innodb_plugin/trx/trx0trx.c @@ -50,6 +50,9 @@ UNIV_INTERN sess_t* trx_dummy_sess = NULL; /** Number of transactions currently allocated for MySQL: protected by the kernel mutex */ UNIV_INTERN ulint trx_n_mysql_transactions = 0; +/* Number of transactions currently in the XA PREPARED state: protected by +the kernel mutex */ +UNIV_INTERN ulint trx_n_prepared = 0; /*************************************************************//** Set detailed error message for the transaction. */ @@ -333,6 +336,60 @@ trx_free( mem_free(trx); } +/********************************************************************//** +At shutdown, frees a transaction object that is in the PREPARED state. */ +UNIV_INTERN +void +trx_free_prepared( +/*==============*/ + trx_t* trx) /*!< in, own: trx object */ +{ + ut_ad(mutex_own(&kernel_mutex)); + ut_a(trx->conc_state == TRX_PREPARED); + ut_a(trx->magic_n == TRX_MAGIC_N); + + /* Prepared transactions are sort of active; they allow + ROLLBACK and COMMIT operations. Because the system does not + contain any other transactions than prepared transactions at + the shutdown stage and because a transaction cannot become + PREPARED while holding locks, it is safe to release the locks + held by PREPARED transactions here at shutdown.*/ + lock_release_off_kernel(trx); + + trx_undo_free_prepared(trx); + + mutex_free(&trx->undo_mutex); + + if (trx->undo_no_arr) { + trx_undo_arr_free(trx->undo_no_arr); + } + + ut_a(UT_LIST_GET_LEN(trx->signals) == 0); + ut_a(UT_LIST_GET_LEN(trx->reply_signals) == 0); + + ut_a(trx->wait_lock == NULL); + ut_a(UT_LIST_GET_LEN(trx->wait_thrs) == 0); + + ut_a(!trx->has_search_latch); + + ut_a(trx->dict_operation_lock_mode == 0); + + if (trx->lock_heap) { + mem_heap_free(trx->lock_heap); + } + + if (trx->global_read_view_heap) { + mem_heap_free(trx->global_read_view_heap); + } + + ut_a(ib_vector_is_empty(trx->autoinc_locks)); + ib_vector_free(trx->autoinc_locks); + + UT_LIST_REMOVE(trx_list, trx_sys->trx_list, trx); + + mem_free(trx); +} + /********************************************************************//** Frees a transaction object for MySQL. */ UNIV_INTERN @@ -463,6 +520,7 @@ trx_lists_init_at_db_start(void) if (srv_force_recovery == 0) { trx->conc_state = TRX_PREPARED; + trx_n_prepared++; } else { fprintf(stderr, "InnoDB: Since" @@ -541,6 +599,7 @@ trx_lists_init_at_db_start(void) trx->conc_state = TRX_PREPARED; + trx_n_prepared++; } else { fprintf(stderr, "InnoDB: Since" @@ -820,6 +879,11 @@ trx_commit_off_kernel( || trx->conc_state == TRX_PREPARED); ut_ad(mutex_own(&kernel_mutex)); + if (UNIV_UNLIKELY(trx->conc_state == TRX_PREPARED)) { + ut_a(trx_n_prepared > 0); + trx_n_prepared--; + } + /* The following assignment makes the transaction committed in memory and makes its changes to data visible to other transactions. NOTE that there is a small discrepancy from the strict formal @@ -1857,6 +1921,7 @@ trx_prepare_off_kernel( /*--------------------------------------*/ trx->conc_state = TRX_PREPARED; + trx_n_prepared++; /*--------------------------------------*/ if (lsn) { @@ -2031,10 +2096,11 @@ trx_get_trx_by_xid( while (trx) { /* Compare two X/Open XA transaction id's: their length should be the same and binary comparison - of gtrid_lenght+bqual_length bytes should be + of gtrid_length+bqual_length bytes should be the same */ - if (trx->conc_state == TRX_PREPARED + if (trx->is_recovered + && trx->conc_state == TRX_PREPARED && xid->gtrid_length == trx->xid.gtrid_length && xid->bqual_length == trx->xid.bqual_length && memcmp(xid->data, trx->xid.data, diff --git a/storage/innodb_plugin/trx/trx0undo.c b/storage/innodb_plugin/trx/trx0undo.c index 76e88948e41..68ff82f618c 100644 --- a/storage/innodb_plugin/trx/trx0undo.c +++ b/storage/innodb_plugin/trx/trx0undo.c @@ -36,6 +36,7 @@ Created 3/26/1996 Heikki Tuuri #include "trx0rseg.h" #include "trx0trx.h" #include "srv0srv.h" +#include "srv0start.h" #include "trx0rec.h" #include "trx0purge.h" @@ -1976,4 +1977,31 @@ trx_undo_insert_cleanup( mutex_exit(&(rseg->mutex)); } + +/********************************************************************//** +At shutdown, frees the undo logs of a PREPARED transaction. */ +UNIV_INTERN +void +trx_undo_free_prepared( +/*===================*/ + trx_t* trx) /*!< in/out: PREPARED transaction */ +{ + mutex_enter(&trx->rseg->mutex); + + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS); + + if (trx->update_undo) { + ut_a(trx->update_undo->state == TRX_UNDO_PREPARED); + UT_LIST_REMOVE(undo_list, trx->rseg->update_undo_list, + trx->update_undo); + trx_undo_mem_free(trx->update_undo); + } + if (trx->insert_undo) { + ut_a(trx->insert_undo->state == TRX_UNDO_PREPARED); + UT_LIST_REMOVE(undo_list, trx->rseg->insert_undo_list, + trx->insert_undo); + trx_undo_mem_free(trx->insert_undo); + } + mutex_exit(&trx->rseg->mutex); +} #endif /* !UNIV_HOTBACKUP */ -- cgit v1.2.1 From cb0e49c000b65db94cb40c6331572440b4bc4c3e Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Fri, 8 Apr 2011 12:22:44 +0530 Subject: Bug#11765157 - 58090: mysqlslap drops schema specified in create_schema if auto-generate-sql also set. mysqlslap uses a schema to run its tests on and later drops it if auto-generate-sql is used. This can be a problem, if the schema is an already existing one. If create-schema is used with auto-generate-sql option, mysqlslap while performing the cleanup, drops the specified database. Fixed by introducing an option --no-drop, which, if used, will prevent the dropping of schema at the end of the test. client/client_priv.h: Bug#11765157 - 58090: mysqlslap drops schema specified in create_schema if auto-generate-sql also set. Added an option. client/mysqlslap.c: Bug#11765157 - 58090: mysqlslap drops schema specified in create_schema if auto-generate-sql also set. Introduced an option 'no-drop' to forbid the removal of schema even if 'create' or 'auto-generate-sql' options are used. mysql-test/r/mysqlslap.result: Added a testcase for Bug#11765157. mysql-test/t/mysqlslap.test: Added a testcase for Bug#11765157. --- client/client_priv.h | 1 + client/mysqlslap.c | 11 ++++++++--- mysql-test/r/mysqlslap.result | 20 ++++++++++++++++++++ mysql-test/t/mysqlslap.test | 15 +++++++++++++++ 4 files changed, 44 insertions(+), 3 deletions(-) diff --git a/client/client_priv.h b/client/client_priv.h index 689f7277c2e..92f495864bb 100644 --- a/client/client_priv.h +++ b/client/client_priv.h @@ -85,6 +85,7 @@ enum options_client OPT_SLAP_POST_SYSTEM, OPT_SLAP_COMMIT, OPT_SLAP_DETACH, + OPT_SLAP_NO_DROP, OPT_MYSQL_REPLACE_INTO, OPT_BASE64_OUTPUT_MODE, OPT_SERVER_ID, OPT_FIX_TABLE_NAMES, OPT_FIX_DB_NAMES, OPT_SSL_VERIFY_SERVER_CERT, OPT_DEBUG_INFO, OPT_DEBUG_CHECK, OPT_COLUMN_TYPES, OPT_ERROR_LOG_FILE, diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 3b5c14dd74b..851407a108f 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -131,7 +131,7 @@ const char *delimiter= "\n"; const char *create_schema_string= "mysqlslap"; -static my_bool opt_preserve= TRUE; +static my_bool opt_preserve= TRUE, opt_no_drop= FALSE; static my_bool debug_info_flag= 0, debug_check_flag= 0; static my_bool opt_only_print= FALSE; static my_bool opt_compress= FALSE, tty_password= FALSE, @@ -599,6 +599,8 @@ static struct my_option my_long_options[] = REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"iterations", 'i', "Number of times to run the tests.", &iterations, &iterations, 0, GET_UINT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0}, + {"no-drop", OPT_SLAP_NO_DROP, "Do not drop the schema after the test.", + &opt_no_drop, &opt_no_drop, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"number-char-cols", 'x', "Number of VARCHAR columns to create in table if specifying --auto-generate-sql.", &num_char_cols_opt, &num_char_cols_opt, 0, GET_STR, REQUIRED_ARG, @@ -1147,8 +1149,11 @@ get_options(int *argc,char ***argv) if (!user) user= (char *)"root"; - /* If something is created we clean it up, otherwise we leave schemas alone */ - if (create_string || auto_generate_sql) + /* + If something is created and --no-drop is not specified, we drop the + schema. + */ + if (!opt_no_drop && (create_string || auto_generate_sql)) opt_preserve= FALSE; if (auto_generate_sql && (create_string || user_supplied_query)) diff --git a/mysql-test/r/mysqlslap.result b/mysql-test/r/mysqlslap.result index 4cb01490407..a94d9156462 100644 --- a/mysql-test/r/mysqlslap.result +++ b/mysql-test/r/mysqlslap.result @@ -225,3 +225,23 @@ DROP SCHEMA IF EXISTS `mysqlslap`; DROP PROCEDURE IF EXISTS p1; CREATE PROCEDURE p1() SELECT 1; DROP PROCEDURE p1; +# +# Bug #11765157 - 58090: mysqlslap drops schema specified in +# create_schema if auto-generate-sql also set. +# +# 'bug58090' database should not be present. +SHOW DATABASES; +Database +information_schema +mtr +mysql +test +# 'bug58090' database should be present. +SHOW DATABASES; +Database +information_schema +bug58090 +mtr +mysql +test +DROP DATABASE bug58090; diff --git a/mysql-test/t/mysqlslap.test b/mysql-test/t/mysqlslap.test index 28042f62fe6..757d2813483 100644 --- a/mysql-test/t/mysqlslap.test +++ b/mysql-test/t/mysqlslap.test @@ -53,3 +53,18 @@ CREATE PROCEDURE p1() SELECT 1; --exec $MYSQL_SLAP --create-schema=test --delimiter=";" --query="CALL p1; SELECT 1;" --silent 2>&1 DROP PROCEDURE p1; + + +--echo # +--echo # Bug #11765157 - 58090: mysqlslap drops schema specified in +--echo # create_schema if auto-generate-sql also set. +--echo # + +--exec $MYSQL_SLAP --silent --create-schema=bug58090 --concurrency=5 --iterations=20 --auto-generate-sql +--echo # 'bug58090' database should not be present. +SHOW DATABASES; +--exec $MYSQL_SLAP --silent --create-schema=bug58090 --no-drop --auto-generate-sql +--echo # 'bug58090' database should be present. +SHOW DATABASES; +DROP DATABASE bug58090; + -- cgit v1.2.1 From a77bc59896ee3cd89a8f1d391a65722b443f1841 Mon Sep 17 00:00:00 2001 From: Gleb Shchepa Date: Fri, 8 Apr 2011 12:05:20 +0400 Subject: Bug #11829681 - 60295: ERROR 1356 ON VIEW THAT EXECUTES FINE AS A QUERY Select from a view with the underlying HAVING clause failed with a message: "1356: View '...' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them" The bug is a regression of the fix for bug 11750328 - 40825 (similar case, but the HAVING cause references an aliased field). In the old fix for bug 40825 the Item_field::name_length value has been used in place of the real length of Item_field::name. However, in some cases Item_field::name_length is not in sync with the actual name length (TODO: combine name and name_length into a solid String field). The Item_ref::print() method has been modified to calculate actual name length every time. mysql-test/r/view.result: Test case for bug #11829681 mysql-test/t/view.test: Test case for bug #11829681 sql/item.cc: Bug #11829681 - 60295: ERROR 1356 ON VIEW THAT EXECUTES FINE AS A QUERY The Item_ref::print() method has been modified to calculate actual name length every time. sql/item.h: Minor commentary. --- mysql-test/r/view.result | 9 +++++++++ mysql-test/t/view.test | 12 ++++++++++++ sql/item.cc | 2 +- sql/item.h | 4 ++++ 4 files changed, 26 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 6b0a2103afa..2ca81f20cbb 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -3897,6 +3897,15 @@ DROP TABLE t1; # CREATE VIEW v1 AS SELECT 1 IN (1 LIKE 2,0) AS f; DROP VIEW v1; +# +# Bug 11829681 - 60295: ERROR 1356 ON VIEW THAT EXECUTES FINE AS A QUERY +# +CREATE TABLE t1 (a INT); +CREATE VIEW v1 AS SELECT s.* FROM t1 s, t1 b HAVING a; +SELECT * FROM v1; +a +DROP VIEW v1; +DROP TABLE t1; # ----------------------------------------------------------------- # -- End of 5.1 tests. # ----------------------------------------------------------------- diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index b1b3b5f2a83..1543924a7ad 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -3941,6 +3941,18 @@ DROP TABLE t1; CREATE VIEW v1 AS SELECT 1 IN (1 LIKE 2,0) AS f; DROP VIEW v1; +--echo # +--echo # Bug 11829681 - 60295: ERROR 1356 ON VIEW THAT EXECUTES FINE AS A QUERY +--echo # + +CREATE TABLE t1 (a INT); +CREATE VIEW v1 AS SELECT s.* FROM t1 s, t1 b HAVING a; + +SELECT * FROM v1; + +DROP VIEW v1; +DROP TABLE t1; + --echo # ----------------------------------------------------------------- --echo # -- End of 5.1 tests. --echo # ----------------------------------------------------------------- diff --git a/sql/item.cc b/sql/item.cc index 24c3107ece9..40be8b205bd 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -6121,7 +6121,7 @@ void Item_ref::print(String *str, enum_query_type query_type) { THD *thd= current_thd; append_identifier(thd, str, (*ref)->real_item()->name, - (*ref)->real_item()->name_length); + strlen((*ref)->real_item()->name)); } else (*ref)->print(str, query_type); diff --git a/sql/item.h b/sql/item.h index 8568e89542e..8d7ad3c41d3 100644 --- a/sql/item.h +++ b/sql/item.h @@ -515,6 +515,10 @@ public: */ Item *next; uint32 max_length; + /* + TODO: convert name and name_length fields into String to keep them in sync + (see bug #11829681/60295 etc). + */ uint name_length; /* Length of name */ int8 marker; uint8 decimals; -- cgit v1.2.1 From 97e435dd18ae49847abb7775428bd308a400c3c5 Mon Sep 17 00:00:00 2001 From: Alexander Nozdrin Date: Mon, 11 Apr 2011 13:45:41 +0400 Subject: Bump version. --- configure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.in b/configure.in index 523d36afaea..e07ae7e3191 100644 --- a/configure.in +++ b/configure.in @@ -7,7 +7,7 @@ AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # The Docs Makefile.am parses this line! # remember to also change ndb version below and update version.c in ndb -AM_INIT_AUTOMAKE(mysql, 5.0.93) +AM_INIT_AUTOMAKE(mysql, 5.0.94) AM_CONFIG_HEADER([include/config.h:config.h.in]) PROTOCOL_VERSION=10 -- cgit v1.2.1 From ab86b40c05bcde5d27bcf016c4d8626a4ca5cd2a Mon Sep 17 00:00:00 2001 From: Alexander Nozdrin Date: Mon, 11 Apr 2011 13:57:45 +0400 Subject: Bump NDB-version. --- configure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.in b/configure.in index e07ae7e3191..fdfb7eae871 100644 --- a/configure.in +++ b/configure.in @@ -23,7 +23,7 @@ NDB_SHARED_LIB_VERSION=$NDB_SHARED_LIB_MAJOR_VERSION:0:0 # ndb version NDB_VERSION_MAJOR=5 NDB_VERSION_MINOR=0 -NDB_VERSION_BUILD=93 +NDB_VERSION_BUILD=94 NDB_VERSION_STATUS="" # Set all version vars based on $VERSION. How do we do this more elegant ? -- cgit v1.2.1 From 12fbe05c6a31a7958a4a1cae748477027fffa51f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 11 Apr 2011 16:40:28 +0300 Subject: Bug #11760042 - 52409: Assertion failure: long semaphore wait In ha_innobase::create(), we check some things while holding an exclusive lock on the data dictionary. Defer the locking and the creation of transactions until after the checks have passed. The THDVAR could hang due to a mutex wait (see Bug #11750569 - 41163: deadlock in mysqld: LOCK_global_system_variables and LOCK_open), and we want to avoid waiting while holding InnoDB mutexes. innobase_index_name_is_reserved(): Replace the parameter trx_t with THD, so that the test can be performed before starting an InnoDB transaction. We only needed trx->mysql_thd. ha_innobase::create(): Create transaction and lock the data dictionary only after passing the basic tests. create_table_def(): Move the IS_MAGIC_TABLE_AND_USER_DENIED_ACCESS check to ha_innobase::create(). Assign to srv_lower_case_table_names while holding dict_sys->mutex. ha_innobase::delete_table(), ha_innobase::rename_table(), innobase_rename_table(): Assign srv_lower_case_table_names as late as possible. Here, the variable is not necessarily protected by dict_sys->mutex. ha_innobase::add_index(): Invoke innobase_index_name_is_reserved() and innobase_check_index_keys() before allocating anything. rb:618 approved by Jimmy Yang --- storage/innobase/handler/ha_innodb.cc | 93 +++++++++++-------------- storage/innodb_plugin/ChangeLog | 5 ++ storage/innodb_plugin/handler/ha_innodb.cc | 94 +++++++++++--------------- storage/innodb_plugin/handler/ha_innodb.h | 11 ++- storage/innodb_plugin/handler/handler0alter.cc | 43 ++++++------ 5 files changed, 108 insertions(+), 138 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 75c732c44d4..2afacf6d2a8 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -189,7 +189,7 @@ innobase_index_name_is_reserved( /*============================*/ /* out: true if index name matches a reserved name */ - const trx_t* trx, /* in: InnoDB transaction handle */ + THD* thd, /* in/out: MySQL connection */ const TABLE* form, /* in: information on table columns and indexes */ const char* norm_name); /* in: table name */ @@ -5285,10 +5285,6 @@ create_table_def( DBUG_PRINT("enter", ("table_name: %s", table_name)); ut_a(trx->mysql_thd != NULL); - if (IS_MAGIC_TABLE_AND_USER_DENIED_ACCESS(table_name, - (THD*) trx->mysql_thd)) { - DBUG_RETURN(HA_ERR_GENERIC); - } n_cols = form->s->fields; @@ -5397,6 +5393,8 @@ err_col: col_len); } + srv_lower_case_table_names = lower_case_table_names; + error = row_create_table_for_mysql(table, trx); innodb_check_for_record_too_big_error(flags & DICT_TF_COMPACT, error); @@ -5642,6 +5640,35 @@ ha_innobase::create( DBUG_RETURN(HA_ERR_TO_BIG_ROW); } + strcpy(name2, name); + + normalize_table_name(norm_name, name2); + + /* Create the table definition in InnoDB */ + + flags = form->s->row_type != ROW_TYPE_REDUNDANT ? DICT_TF_COMPACT : 0; + + /* Look for a primary key */ + + primary_key_no= (form->s->primary_key != MAX_KEY ? + (int) form->s->primary_key : + -1); + + /* Our function row_get_mysql_key_number_for_index assumes + the primary key is always number 0, if it exists */ + + DBUG_ASSERT(primary_key_no == -1 || primary_key_no == 0); + + /* Check for name conflicts (with reserved name) for + any user indices to be created. */ + if (innobase_index_name_is_reserved(thd, form, norm_name)) { + DBUG_RETURN(-1); + } + + if (IS_MAGIC_TABLE_AND_USER_DENIED_ACCESS(norm_name, thd)) { + DBUG_RETURN(HA_ERR_GENERIC); + } + /* Get the transaction associated with the current thd, or create one if not yet created */ @@ -5665,48 +5692,12 @@ ha_innobase::create( trx->check_unique_secondary = FALSE; } - if (lower_case_table_names) { - srv_lower_case_table_names = TRUE; - } else { - srv_lower_case_table_names = FALSE; - } - - strcpy(name2, name); - - normalize_table_name(norm_name, name2); - /* Latch the InnoDB data dictionary exclusively so that no deadlocks or lock waits can happen in it during a table create operation. Drop table etc. do this latching in row0mysql.c. */ row_mysql_lock_data_dictionary(trx); - /* Create the table definition in InnoDB */ - - flags = 0; - - if (form->s->row_type != ROW_TYPE_REDUNDANT) { - flags |= DICT_TF_COMPACT; - } - - /* Look for a primary key */ - - primary_key_no= (form->s->primary_key != MAX_KEY ? - (int) form->s->primary_key : - -1); - - /* Our function row_get_mysql_key_number_for_index assumes - the primary key is always number 0, if it exists */ - - DBUG_ASSERT(primary_key_no == -1 || primary_key_no == 0); - - /* Check for name conflicts (with reserved name) for - any user indices to be created. */ - if (innobase_index_name_is_reserved(trx, form, norm_name)) { - error = -1; - goto cleanup; - } - error = create_table_def(trx, form, norm_name, create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL, flags); @@ -5936,12 +5927,6 @@ ha_innobase::delete_table( trx_search_latch_release_if_reserved(parent_trx); - if (lower_case_table_names) { - srv_lower_case_table_names = TRUE; - } else { - srv_lower_case_table_names = FALSE; - } - trx = trx_allocate_for_mysql(); trx->mysql_thd = thd; @@ -5961,6 +5946,8 @@ ha_innobase::delete_table( /* Drop the table in InnoDB */ + srv_lower_case_table_names = lower_case_table_names; + error = row_drop_table_for_mysql(norm_name, trx, thd_sql_command(thd) == SQLCOM_DROP_DB); @@ -6089,12 +6076,6 @@ ha_innobase::rename_table( trx_search_latch_release_if_reserved(parent_trx); - if (lower_case_table_names) { - srv_lower_case_table_names = TRUE; - } else { - srv_lower_case_table_names = FALSE; - } - trx = trx_allocate_for_mysql(); trx->mysql_thd = thd; INNOBASE_COPY_STMT(thd, trx); @@ -6114,6 +6095,8 @@ ha_innobase::rename_table( /* Rename the table in InnoDB */ + srv_lower_case_table_names = lower_case_table_names; + error = row_rename_table_for_mysql(norm_from, norm_to, trx); /* Flush the log to reduce probability that the .frm files and @@ -8826,7 +8809,7 @@ innobase_index_name_is_reserved( /*============================*/ /* out: true if an index name matches the reserved name */ - const trx_t* trx, /* in: InnoDB transaction handle */ + THD* thd, /* in/out: MySQL connection */ const TABLE* form, /* in: information on table columns and indexes */ const char* norm_name) /* in: table name */ @@ -8840,7 +8823,7 @@ innobase_index_name_is_reserved( if (innobase_strcasecmp(key->name, innobase_index_reserve_name) == 0) { /* Push warning to mysql */ - push_warning_printf((THD*) trx->mysql_thd, + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_CANT_CREATE_TABLE, "Cannot Create Index with name " diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index d062fc7e648..0b201816819 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,8 @@ +2011-04-07 The InnoDB Team + + * handler/ha_innodb.cc, handler/ha_innodb.h, handler/handler0alter.cc: + Fix Bug #52409 Assertion failure: long semaphore wait + 2011-04-07 The InnoDB Team * handler/ha_innodb.cc, include/trx0trx.h, include/trx0undo.h, diff --git a/storage/innodb_plugin/handler/ha_innodb.cc b/storage/innodb_plugin/handler/ha_innodb.cc index 7f92d797d30..2b0dbf82b34 100644 --- a/storage/innodb_plugin/handler/ha_innodb.cc +++ b/storage/innodb_plugin/handler/ha_innodb.cc @@ -6023,10 +6023,6 @@ create_table_def( DBUG_PRINT("enter", ("table_name: %s", table_name)); ut_a(trx->mysql_thd != NULL); - if (IS_MAGIC_TABLE_AND_USER_DENIED_ACCESS(table_name, - (THD*) trx->mysql_thd)) { - DBUG_RETURN(HA_ERR_GENERIC); - } /* MySQL does the name length check. But we do additional check on the name length here */ @@ -6146,6 +6142,8 @@ err_col: col_len); } + srv_lower_case_table_names = lower_case_table_names; + error = row_create_table_for_mysql(table, trx); if (error == DB_DUPLICATE_KEY) { @@ -6562,42 +6560,17 @@ ha_innobase::create( DBUG_RETURN(HA_ERR_TO_BIG_ROW); } - /* Get the transaction associated with the current thd, or create one - if not yet created */ - - parent_trx = check_trx_exists(thd); - - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ - - trx_search_latch_release_if_reserved(parent_trx); - - trx = innobase_trx_allocate(thd); - - if (lower_case_table_names) { - srv_lower_case_table_names = TRUE; - } else { - srv_lower_case_table_names = FALSE; - } - strcpy(name2, name); normalize_table_name(norm_name, name2); - /* Latch the InnoDB data dictionary exclusively so that no deadlocks - or lock waits can happen in it during a table create operation. - Drop table etc. do this latching in row0mysql.c. */ - - row_mysql_lock_data_dictionary(trx); - /* Create the table definition in InnoDB */ flags = 0; /* Validate create options if innodb_strict_mode is set. */ if (!create_options_are_valid(thd, form, create_info)) { - error = ER_ILLEGAL_HA_CREATE_OPTION; - goto cleanup; + DBUG_RETURN(ER_ILLEGAL_HA_CREATE_OPTION); } if (create_info->key_block_size) { @@ -6739,16 +6712,37 @@ ha_innobase::create( /* Check for name conflicts (with reserved name) for any user indices to be created. */ - if (innobase_index_name_is_reserved(trx, form->key_info, + if (innobase_index_name_is_reserved(thd, form->key_info, form->s->keys)) { - error = -1; - goto cleanup; + DBUG_RETURN(-1); + } + + if (IS_MAGIC_TABLE_AND_USER_DENIED_ACCESS(norm_name, thd)) { + DBUG_RETURN(HA_ERR_GENERIC); } if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { flags |= DICT_TF2_TEMPORARY << DICT_TF2_SHIFT; } + /* Get the transaction associated with the current thd, or create one + if not yet created */ + + parent_trx = check_trx_exists(thd); + + /* In case MySQL calls this in the middle of a SELECT query, release + possible adaptive hash latch to avoid deadlocks of threads */ + + trx_search_latch_release_if_reserved(parent_trx); + + trx = innobase_trx_allocate(thd); + + /* Latch the InnoDB data dictionary exclusively so that no deadlocks + or lock waits can happen in it during a table create operation. + Drop table etc. do this latching in row0mysql.c. */ + + row_mysql_lock_data_dictionary(trx); + error = create_table_def(trx, form, norm_name, create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL, flags); @@ -6992,18 +6986,14 @@ ha_innobase::delete_table( trx = innobase_trx_allocate(thd); - if (lower_case_table_names) { - srv_lower_case_table_names = TRUE; - } else { - srv_lower_case_table_names = FALSE; - } - name_len = strlen(name); ut_a(name_len < 1000); /* Drop the table in InnoDB */ + srv_lower_case_table_names = lower_case_table_names; + error = row_drop_table_for_mysql(norm_name, trx, thd_sql_command(thd) == SQLCOM_DROP_DB); @@ -7119,12 +7109,6 @@ innobase_rename_table( char* norm_to; char* norm_from; - if (lower_case_table_names) { - srv_lower_case_table_names = TRUE; - } else { - srv_lower_case_table_names = FALSE; - } - // Magic number 64 arbitrary norm_to = (char*) my_malloc(strlen(to) + 64, MYF(0)); norm_from = (char*) my_malloc(strlen(from) + 64, MYF(0)); @@ -7139,6 +7123,8 @@ innobase_rename_table( row_mysql_lock_data_dictionary(trx); } + srv_lower_case_table_names = lower_case_table_names; + error = row_rename_table_for_mysql( norm_from, norm_to, trx, lock_and_commit); @@ -10700,19 +10686,19 @@ static int show_innodb_vars(THD *thd, SHOW_VAR *var, char *buff) return 0; } -/*********************************************************************** +/*********************************************************************//** This function checks each index name for a table against reserved -system default primary index name 'GEN_CLUST_INDEX'. If a name matches, -this function pushes an warning message to the client, and returns true. */ +system default primary index name 'GEN_CLUST_INDEX'. If a name +matches, this function pushes an warning message to the client, +and returns true. +@return true if the index name matches the reserved name */ extern "C" UNIV_INTERN bool innobase_index_name_is_reserved( /*============================*/ - /* out: true if an index name - matches the reserved name */ - const trx_t* trx, /* in: InnoDB transaction handle */ - const KEY* key_info, /* in: Indexes to be created */ - ulint num_of_keys) /* in: Number of indexes to + THD* thd, /*!< in/out: MySQL connection */ + const KEY* key_info, /*!< in: Indexes to be created */ + ulint num_of_keys) /*!< in: Number of indexes to be created. */ { const KEY* key; @@ -10724,7 +10710,7 @@ innobase_index_name_is_reserved( if (innobase_strcasecmp(key->name, innobase_index_reserve_name) == 0) { /* Push warning to mysql */ - push_warning_printf((THD*) trx->mysql_thd, + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WRONG_NAME_FOR_INDEX, "Cannot Create Index with name " diff --git a/storage/innodb_plugin/handler/ha_innodb.h b/storage/innodb_plugin/handler/ha_innodb.h index 7a8f29853de..f7a5456b1a7 100644 --- a/storage/innodb_plugin/handler/ha_innodb.h +++ b/storage/innodb_plugin/handler/ha_innodb.h @@ -317,15 +317,14 @@ innobase_trx_allocate( This function checks each index name for a table against reserved system default primary index name 'GEN_CLUST_INDEX'. If a name matches, this function pushes an warning message to the client, -and returns true. */ +and returns true. +@return true if the index name matches the reserved name */ extern "C" bool innobase_index_name_is_reserved( /*============================*/ - /* out: true if the index name - matches the reserved name */ - const trx_t* trx, /* in: InnoDB transaction handle */ - const KEY* key_info, /* in: Indexes to be created */ - ulint num_of_keys); /* in: Number of indexes to + THD* thd, /*!< in/out: MySQL connection */ + const KEY* key_info, /*!< in: Indexes to be created */ + ulint num_of_keys); /*!< in: Number of indexes to be created. */ diff --git a/storage/innodb_plugin/handler/handler0alter.cc b/storage/innodb_plugin/handler/handler0alter.cc index dc1317d5c5a..485e03737e3 100644 --- a/storage/innodb_plugin/handler/handler0alter.cc +++ b/storage/innodb_plugin/handler/handler0alter.cc @@ -649,44 +649,37 @@ ha_innobase::add_index( update_thd(); - heap = mem_heap_create(1024); - /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads. */ trx_search_latch_release_if_reserved(prebuilt->trx); - trx_start_if_not_started(prebuilt->trx); - /* Create a background transaction for the operations on - the data dictionary tables. */ - trx = innobase_trx_allocate(user_thd); - trx_start_if_not_started(trx); + /* Check if the index name is reserved. */ + if (innobase_index_name_is_reserved(user_thd, key_info, num_of_keys)) { + DBUG_RETURN(-1); + } innodb_table = indexed_table = dict_table_get(prebuilt->table->name, FALSE); if (UNIV_UNLIKELY(!innodb_table)) { - error = HA_ERR_NO_SUCH_TABLE; - goto err_exit; + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); } - /* Check if the index name is reserved. */ - if (innobase_index_name_is_reserved(trx, key_info, num_of_keys)) { - error = -1; - } else { - /* Check that index keys are sensible */ - error = innobase_check_index_keys(key_info, num_of_keys, - innodb_table); - } + /* Check that index keys are sensible */ + error = innobase_check_index_keys(key_info, num_of_keys, innodb_table); if (UNIV_UNLIKELY(error)) { -err_exit: - mem_heap_free(heap); - trx_general_rollback_for_mysql(trx, NULL); - trx_free_for_mysql(trx); - trx_commit_for_mysql(prebuilt->trx); DBUG_RETURN(error); } + heap = mem_heap_create(1024); + trx_start_if_not_started(prebuilt->trx); + + /* Create a background transaction for the operations on + the data dictionary tables. */ + trx = innobase_trx_allocate(user_thd); + trx_start_if_not_started(trx); + /* Create table containing all indexes to be built in this alter table add index so that they are in the correct order in the table. */ @@ -758,8 +751,12 @@ err_exit: ut_d(dict_table_check_for_dup_indexes(innodb_table, FALSE)); + mem_heap_free(heap); + trx_general_rollback_for_mysql(trx, NULL); row_mysql_unlock_data_dictionary(trx); - goto err_exit; + trx_free_for_mysql(trx); + trx_commit_for_mysql(prebuilt->trx); + DBUG_RETURN(error); } trx->table_id = indexed_table->id; -- cgit v1.2.1 From e108b3f69eb74490cd1ddf8b53b9b6afd0ad054b Mon Sep 17 00:00:00 2001 From: Sven Sandberg Date: Mon, 11 Apr 2011 16:01:46 +0200 Subject: corrected bug reference for experimental test --- mysql-test/collections/default.experimental | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index 1e6ff625d39..703a8a18ef0 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -22,7 +22,7 @@ main.outfile_loaddata @solaris # joro : Bug #46895 ndb.* # joro : NDB tests marked as experimental as agreed with bochklin rpl.rpl_innodb_bug28430 @solaris # Bug#46029 -rpl.rpl_row_sp011 @solaris # Joro : Bug #54138 +rpl.rpl_row_sp011 @solaris # Joro : Bug #45445 rpl_ndb.* # joro : NDB tests marked as experimental as agreed with bochklin rpl_ndb.rpl_ndb_log # Bug#38998 -- cgit v1.2.1 From 914873674b4c08c3f4b726f3a4dba16bfb228ff9 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 12 Apr 2011 01:36:38 +0200 Subject: Bug#11867664: Fix server crashes on update with join on partitioned table. --- sql/ha_partition.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index f55c48189fe..bd8e0d397c4 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4317,7 +4317,8 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index, break; } } - m_last_part= part; + if (part <= m_part_spec.end_part) + m_last_part= part; } else { @@ -6237,7 +6238,14 @@ void ha_partition::print_error(int error, myf errflag) { /* In case m_file has not been initialized, like in bug#42438 */ if (m_file) + { + if (m_last_part >= m_tot_parts) + { + DBUG_ASSERT(0); + m_last_part= 0; + } m_file[m_last_part]->print_error(error, errflag); + } else handler::print_error(error, errflag); } -- cgit v1.2.1 From 33c2a5e7e3f5ed2dee0f50f6d02052d8bf2234b9 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Tue, 12 Apr 2011 13:51:36 +0400 Subject: Bug#11766212 59270: NOT IN (YEAR( ... ), ... ) PRODUCES MANY VALGRIND WARNINGS Valgrind warning happens due to early null values check in Item_func_in::fix_length_and_dec(before item evaluation). As result null value items with uninitialized values are placed into array and it leads to valgrind warnings during value array sorting. The fix is to check null value after item evaluation, item is evaluated in in_array::set() method. mysql-test/r/func_in.result: test case mysql-test/t/func_in.test: test case sql/item_cmpfunc.cc: The fix is to check null value after item evaluation. --- mysql-test/r/func_in.result | 6 ++++++ mysql-test/t/func_in.test | 6 ++++++ sql/item_cmpfunc.cc | 12 +++++------- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/func_in.result b/mysql-test/r/func_in.result index fdeec2755ca..0b6117581f3 100644 --- a/mysql-test/r/func_in.result +++ b/mysql-test/r/func_in.result @@ -770,4 +770,10 @@ CASE a WHEN a THEN a END NULL DROP TABLE t1; # +# Bug #11766212 59270: NOT IN (YEAR( ... ), ... ) PRODUCES MANY VALGRIND WARNINGS +# +SELECT 1 IN (YEAR(FROM_UNIXTIME(NULL)) ,1); +1 IN (YEAR(FROM_UNIXTIME(NULL)) ,1) +1 +# End of 5.1 tests diff --git a/mysql-test/t/func_in.test b/mysql-test/t/func_in.test index 6efeb2866e6..08469b37967 100644 --- a/mysql-test/t/func_in.test +++ b/mysql-test/t/func_in.test @@ -554,6 +554,12 @@ SELECT CASE a WHEN a THEN a END FROM t1 GROUP BY a WITH ROLLUP; DROP TABLE t1; +--echo # +--echo # Bug #11766212 59270: NOT IN (YEAR( ... ), ... ) PRODUCES MANY VALGRIND WARNINGS +--echo # + +SELECT 1 IN (YEAR(FROM_UNIXTIME(NULL)) ,1); + --echo # --echo End of 5.1 tests diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 36ca5537eb5..23f081e1cc0 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -4000,13 +4000,11 @@ void Item_func_in::fix_length_and_dec() uint j=0; for (uint i=1 ; i < arg_count ; i++) { - if (!args[i]->null_value) // Skip NULL values - { - array->set(j,args[i]); - j++; - } - else - have_null= 1; + array->set(j,args[i]); + if (!args[i]->null_value) // Skip NULL values + j++; + else + have_null= 1; } if ((array->used_count= j)) array->sort(); -- cgit v1.2.1 From 7fa7a0cad95b0c8cc4f7f450f7f3411fa632b148 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Tue, 12 Apr 2011 14:01:33 +0400 Subject: Bug#11766270 59343: YEAR(4): INCORRECT RESULT AND VALGRIND WARNINGS WITH MIN/MAX, UNION When we create temporary result table for UNION incorrect max_length for YEAR field is used and it leads to incorrect field value and incorrect result string length as YEAR field value calculation depends on field length. The fix is to use underlying item max_length for Item_sum_hybrid::max_length intialization. mysql-test/r/func_group.result: test case mysql-test/t/func_group.test: test case sql/field.cc: added assert sql/item_sum.cc: init Item_sum_hybrid::max_length with use underlying item max_length for INT result type. --- mysql-test/r/func_group.result | 11 +++++++++++ mysql-test/t/func_group.test | 12 ++++++++++++ sql/field.cc | 2 ++ sql/item_sum.cc | 6 +----- 4 files changed, 26 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 69bce1c8bd8..b90eb2a4c0f 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -1746,4 +1746,15 @@ MAX(LENGTH(a)) LENGTH(MAX(a)) MIN(a) MAX(a) CONCAT(MIN(a)) CONCAT(MAX(a)) 20 20 18446668621106209655 18446668621106209655 18446668621106209655 18446668621106209655 DROP TABLE t1; # +# Bug #11766270 59343: YEAR(4): INCORRECT RESULT AND VALGRIND WARNINGS WITH MIN/MAX, UNION +# +CREATE TABLE t1(f1 YEAR(4)); +INSERT INTO t1 VALUES (0000),(2001); +(SELECT MAX(f1) FROM t1) UNION (SELECT MAX(f1) FROM t1); +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def MAX(f1) MAX(f1) 13 4 4 Y 32864 0 63 +MAX(f1) +2001 +DROP TABLE t1; +# End of 5.1 tests diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 600b46fcde6..177a1ca2471 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -1127,6 +1127,18 @@ INSERT INTO t1 VALUES (18446668621106209655); SELECT MAX(LENGTH(a)), LENGTH(MAX(a)), MIN(a), MAX(a), CONCAT(MIN(a)), CONCAT(MAX(a)) FROM t1; DROP TABLE t1; +--echo # +--echo # Bug #11766270 59343: YEAR(4): INCORRECT RESULT AND VALGRIND WARNINGS WITH MIN/MAX, UNION +--echo # + +CREATE TABLE t1(f1 YEAR(4)); +INSERT INTO t1 VALUES (0000),(2001); +--enable_metadata +(SELECT MAX(f1) FROM t1) UNION (SELECT MAX(f1) FROM t1); +--disable_metadata +DROP TABLE t1; + + --echo # --echo End of 5.1 tests diff --git a/sql/field.cc b/sql/field.cc index 1ad5e408e07..3707c5b056f 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5467,6 +5467,7 @@ double Field_year::val_real(void) longlong Field_year::val_int(void) { ASSERT_COLUMN_MARKED_FOR_READ; + DBUG_ASSERT(field_length == 2 || field_length == 4); int tmp= (int) ptr[0]; if (field_length != 4) tmp%=100; // Return last 2 char @@ -5479,6 +5480,7 @@ longlong Field_year::val_int(void) String *Field_year::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + DBUG_ASSERT(field_length < 5); val_buffer->alloc(5); val_buffer->length(field_length); char *to=(char*) val_buffer->ptr(); diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 2a8aea68f7a..c62738abac0 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -612,17 +612,13 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref) switch (hybrid_type= item->result_type()) { case INT_RESULT: - max_length= 20; - break; case DECIMAL_RESULT: + case STRING_RESULT: max_length= item->max_length; break; case REAL_RESULT: max_length= float_length(decimals); break; - case STRING_RESULT: - max_length= item->max_length; - break; case ROW_RESULT: default: DBUG_ASSERT(0); -- cgit v1.2.1 From da267719197397fbf0ba70fe0749788a82581267 Mon Sep 17 00:00:00 2001 From: Sven Sandberg Date: Tue, 12 Apr 2011 13:14:49 +0200 Subject: marked rpl_stop_slave experimental due to BUG#12345981 --- mysql-test/collections/default.experimental | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index 703a8a18ef0..4e566436ac8 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -23,6 +23,7 @@ ndb.* # joro : NDB tests marked as experiment rpl.rpl_innodb_bug28430 @solaris # Bug#46029 rpl.rpl_row_sp011 @solaris # Joro : Bug #45445 +rpl.rpl_stop_slave @freebsd # Sven : BUG#12345981 rpl_ndb.* # joro : NDB tests marked as experimental as agreed with bochklin rpl_ndb.rpl_ndb_log # Bug#38998 -- cgit v1.2.1 From 729e9a65943823b2636ea8dc3c50486a3844c02c Mon Sep 17 00:00:00 2001 From: Serge Kozlov Date: Thu, 14 Apr 2011 00:18:08 +0400 Subject: WL#5867, reorganize test cases of bugs suite --- mysql-test/collections/default.experimental | 5 + mysql-test/suite/binlog/r/binlog_bug23533.result | 19 +++ mysql-test/suite/binlog/r/binlog_bug36391.result | 10 ++ mysql-test/suite/binlog/t/binlog_bug23533.test | 38 +++++ .../suite/binlog/t/binlog_bug36391-master.opt | 1 + mysql-test/suite/binlog/t/binlog_bug36391.test | 30 ++++ mysql-test/suite/bugs/combinations | 8 - mysql-test/suite/bugs/data/rpl_bug12691.dat | 3 - mysql-test/suite/bugs/r/rpl_bug12691.result | 33 ---- mysql-test/suite/bugs/r/rpl_bug23533.result | 23 --- mysql-test/suite/bugs/r/rpl_bug31582.result | 16 -- mysql-test/suite/bugs/r/rpl_bug31583.result | 16 -- mysql-test/suite/bugs/r/rpl_bug33029.result | 15 -- mysql-test/suite/bugs/r/rpl_bug36391.result | 18 --- mysql-test/suite/bugs/r/rpl_bug37426.result | 17 --- mysql-test/suite/bugs/r/rpl_bug38205.result | 56 ------- mysql-test/suite/bugs/t/rpl_bug12691.test | 49 ------ mysql-test/suite/bugs/t/rpl_bug23533.test | 36 ----- mysql-test/suite/bugs/t/rpl_bug31582.test | 25 ---- mysql-test/suite/bugs/t/rpl_bug31583.test | 25 ---- mysql-test/suite/bugs/t/rpl_bug33029.test | 26 ---- mysql-test/suite/bugs/t/rpl_bug36391-master.opt | 1 - mysql-test/suite/bugs/t/rpl_bug36391.test | 29 ---- mysql-test/suite/bugs/t/rpl_bug37426.test | 21 --- mysql-test/suite/bugs/t/rpl_bug38205.test | 166 --------------------- mysql-test/suite/rpl/r/rpl_bug37426.result | 12 ++ mysql-test/suite/rpl/t/rpl_bug37426.test | 22 +++ 27 files changed, 137 insertions(+), 583 deletions(-) create mode 100644 mysql-test/suite/binlog/r/binlog_bug23533.result create mode 100644 mysql-test/suite/binlog/r/binlog_bug36391.result create mode 100644 mysql-test/suite/binlog/t/binlog_bug23533.test create mode 100644 mysql-test/suite/binlog/t/binlog_bug36391-master.opt create mode 100644 mysql-test/suite/binlog/t/binlog_bug36391.test delete mode 100644 mysql-test/suite/bugs/combinations delete mode 100644 mysql-test/suite/bugs/data/rpl_bug12691.dat delete mode 100644 mysql-test/suite/bugs/r/rpl_bug12691.result delete mode 100644 mysql-test/suite/bugs/r/rpl_bug23533.result delete mode 100644 mysql-test/suite/bugs/r/rpl_bug31582.result delete mode 100644 mysql-test/suite/bugs/r/rpl_bug31583.result delete mode 100644 mysql-test/suite/bugs/r/rpl_bug33029.result delete mode 100644 mysql-test/suite/bugs/r/rpl_bug36391.result delete mode 100644 mysql-test/suite/bugs/r/rpl_bug37426.result delete mode 100644 mysql-test/suite/bugs/r/rpl_bug38205.result delete mode 100644 mysql-test/suite/bugs/t/rpl_bug12691.test delete mode 100644 mysql-test/suite/bugs/t/rpl_bug23533.test delete mode 100644 mysql-test/suite/bugs/t/rpl_bug31582.test delete mode 100644 mysql-test/suite/bugs/t/rpl_bug31583.test delete mode 100644 mysql-test/suite/bugs/t/rpl_bug33029.test delete mode 100644 mysql-test/suite/bugs/t/rpl_bug36391-master.opt delete mode 100644 mysql-test/suite/bugs/t/rpl_bug36391.test delete mode 100644 mysql-test/suite/bugs/t/rpl_bug37426.test delete mode 100644 mysql-test/suite/bugs/t/rpl_bug38205.test create mode 100644 mysql-test/suite/rpl/r/rpl_bug37426.result create mode 100644 mysql-test/suite/rpl/t/rpl_bug37426.test diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index 4e566436ac8..72e14135ef0 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -2,6 +2,9 @@ # in alphabetical order. This also helps with merge conflict resolution. binlog.binlog_multi_engine # joro : NDB tests marked as experimental as agreed with bochklin +binlog.binlog_bug23533 # WL#5867: skozlov: test case moved from unused bugs suite +binlog.binlog_bug36391 # WL#5867: skozlov: test case moved from unused bugs suite + funcs_1.charset_collation_1 # depends on compile-time decisions funcs_1.is_cml_ndb # joro : NDB tests marked as experimental as agreed with bochklin @@ -24,6 +27,8 @@ ndb.* # joro : NDB tests marked as experiment rpl.rpl_innodb_bug28430 @solaris # Bug#46029 rpl.rpl_row_sp011 @solaris # Joro : Bug #45445 rpl.rpl_stop_slave @freebsd # Sven : BUG#12345981 +rpl.rpl_bug37426 # WL#5867: skozlov: test case moved from unused bugs suite + rpl_ndb.* # joro : NDB tests marked as experimental as agreed with bochklin rpl_ndb.rpl_ndb_log # Bug#38998 diff --git a/mysql-test/suite/binlog/r/binlog_bug23533.result b/mysql-test/suite/binlog/r/binlog_bug23533.result new file mode 100644 index 00000000000..8a28867afb4 --- /dev/null +++ b/mysql-test/suite/binlog/r/binlog_bug23533.result @@ -0,0 +1,19 @@ +SET AUTOCOMMIT=0; +CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=InnoDB; +SELECT COUNT(*) FROM t1; +COUNT(*) +1000 +SHOW VARIABLES LIKE 'max_binlog_cache_size'; +Variable_name Value +max_binlog_cache_size 4294963200 +SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; +SET GLOBAL max_binlog_cache_size=4096; +START TRANSACTION; +CREATE TABLE t2 SELECT * FROM t1; +ERROR HY000: Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again +COMMIT; +SHOW TABLES LIKE 't%'; +Tables_in_test (t%) +t1 +SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size; +DROP TABLE t1; diff --git a/mysql-test/suite/binlog/r/binlog_bug36391.result b/mysql-test/suite/binlog/r/binlog_bug36391.result new file mode 100644 index 00000000000..551bfb9924d --- /dev/null +++ b/mysql-test/suite/binlog/r/binlog_bug36391.result @@ -0,0 +1,10 @@ +CREATE TABLE t1(id INT); +SHOW TABLES; +Tables_in_test +t1 +FLUSH LOGS; +DROP TABLE t1; +SHOW TABLES; +Tables_in_test +t1 +DROP TABLE t1; diff --git a/mysql-test/suite/binlog/t/binlog_bug23533.test b/mysql-test/suite/binlog/t/binlog_bug23533.test new file mode 100644 index 00000000000..3c9a7ab5896 --- /dev/null +++ b/mysql-test/suite/binlog/t/binlog_bug23533.test @@ -0,0 +1,38 @@ +############################################################# +# Bug#23533: CREATE SELECT max_binlog_cache_size test +# case needed +############################################################# + +--source include/have_innodb.inc +--source include/have_log_bin.inc +--source include/have_binlog_format_row.inc + +SET AUTOCOMMIT=0; + +# Create 1st table +CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=InnoDB; +--disable_query_log +let $i= 1000; +while ($i) +{ + eval INSERT INTO t1 VALUES($i, REPEAT('x', 4096)); + dec $i; +} +--enable_query_log +SELECT COUNT(*) FROM t1; + +# Set small value for max_binlog_cache_size +SHOW VARIABLES LIKE 'max_binlog_cache_size'; +SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; +SET GLOBAL max_binlog_cache_size=4096; + +# Copied data from t1 into t2 large than max_binlog_cache_size +START TRANSACTION; +--error 1197 +CREATE TABLE t2 SELECT * FROM t1; +COMMIT; +SHOW TABLES LIKE 't%'; + +# 5.1 End of Test +SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size; +DROP TABLE t1; diff --git a/mysql-test/suite/binlog/t/binlog_bug36391-master.opt b/mysql-test/suite/binlog/t/binlog_bug36391-master.opt new file mode 100644 index 00000000000..56273241f14 --- /dev/null +++ b/mysql-test/suite/binlog/t/binlog_bug36391-master.opt @@ -0,0 +1 @@ +--sql_mode=NO_BACKSLASH_ESCAPES diff --git a/mysql-test/suite/binlog/t/binlog_bug36391.test b/mysql-test/suite/binlog/t/binlog_bug36391.test new file mode 100644 index 00000000000..64d91dfafd9 --- /dev/null +++ b/mysql-test/suite/binlog/t/binlog_bug36391.test @@ -0,0 +1,30 @@ +# +# BUG#36391 and BUG#38731 +# +# The fix for BUG#20103 "Escaping with backslash does not work as expected" +# was implemented too greedy though in that it not only changes the behavior +# of backslashes within strings but in general, so disabling command shortcuts +# like \G or \C (which in turn leads to BUG#36391: "mysqlbinlog creates invalid charset statements". +# +# The test executes simple commands that are stored in the binary log and +# re-execute them through the mysql client which should have to process +# some command shortcuts. The backslashes within strings is disabled in the file +# rpl_bug36391-master.opt by the option --sql_mode=NO_BACKSLASH_ESCAPES. +# +# + +--source include/have_log_bin.inc +--source include/have_binlog_format_mixed.inc + +CREATE TABLE t1(id INT); +let $binlog= query_get_value(SHOW MASTER STATUS, File, 1); +let $binlog_path= `SELECT CONCAT(@@DATADIR, '$binlog')`; +SHOW TABLES; +FLUSH LOGS; +DROP TABLE t1; + +--exec $MYSQL_BINLOG $binlog_path | $MYSQL test +SHOW TABLES; + +# Clean up +DROP TABLE t1; diff --git a/mysql-test/suite/bugs/combinations b/mysql-test/suite/bugs/combinations deleted file mode 100644 index 07042c2cbec..00000000000 --- a/mysql-test/suite/bugs/combinations +++ /dev/null @@ -1,8 +0,0 @@ -[row] -binlog-format=row - -[stmt] -binlog-format=statement - -[mix] -binlog-format=mixed diff --git a/mysql-test/suite/bugs/data/rpl_bug12691.dat b/mysql-test/suite/bugs/data/rpl_bug12691.dat deleted file mode 100644 index de980441c3a..00000000000 --- a/mysql-test/suite/bugs/data/rpl_bug12691.dat +++ /dev/null @@ -1,3 +0,0 @@ -a -b -c diff --git a/mysql-test/suite/bugs/r/rpl_bug12691.result b/mysql-test/suite/bugs/r/rpl_bug12691.result deleted file mode 100644 index 8feeb0effc3..00000000000 --- a/mysql-test/suite/bugs/r/rpl_bug12691.result +++ /dev/null @@ -1,33 +0,0 @@ -stop slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -reset master; -reset slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -start slave; - -**** On Master **** -CREATE TABLE t1 (b CHAR(10)); - -**** On Slave **** -STOP SLAVE; - -**** On Master **** -LOAD DATA INFILE FILENAME -SELECT COUNT(*) FROM t1; -COUNT(*) -3 -show binlog events from ; -Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (b CHAR(10)) -master-bin.000001 # Begin_load_query # # ;file_id=#;block_len=# -master-bin.000001 # Execute_load_query # # use `test`; LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/rpl_bug12691.dat' INTO TABLE `t1` FIELDS TERMINATED BY '|' ENCLOSED BY '' ESCAPED BY '\\' LINES TERMINATED BY '\n' (`b`) ;file_id=# - -**** On Slave **** -SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; -START SLAVE; -SELECT COUNT(*) FROM t1; -COUNT(*) -0 - -**** On Master **** -DROP TABLE t1; diff --git a/mysql-test/suite/bugs/r/rpl_bug23533.result b/mysql-test/suite/bugs/r/rpl_bug23533.result deleted file mode 100644 index 1dda75a69b0..00000000000 --- a/mysql-test/suite/bugs/r/rpl_bug23533.result +++ /dev/null @@ -1,23 +0,0 @@ -stop slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -reset master; -reset slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -start slave; -DROP TABLE IF EXISTS t1,t2; -SET AUTOCOMMIT=0; -SET GLOBAL max_binlog_cache_size=4096; -SHOW VARIABLES LIKE 'max_binlog_cache_size'; -Variable_name Value -max_binlog_cache_size 4096 -CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=InnoDB; -SELECT COUNT(*) FROM t1; -COUNT(*) -1000 -START TRANSACTION; -CREATE TABLE t2 SELECT * FROM t1; -ERROR HY000: Writing one row to the row-based binary log failed -COMMIT; -SHOW TABLES LIKE 't%'; -Tables_in_test (t%) -t1 diff --git a/mysql-test/suite/bugs/r/rpl_bug31582.result b/mysql-test/suite/bugs/r/rpl_bug31582.result deleted file mode 100644 index 1f71fbf8fe7..00000000000 --- a/mysql-test/suite/bugs/r/rpl_bug31582.result +++ /dev/null @@ -1,16 +0,0 @@ -stop slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -reset master; -reset slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -start slave; -CREATE TABLE t1 (a VARCHAR(10) PRIMARY KEY) ENGINE=MyISAM; -INSERT INTO t1 VALUES ('a'); -UPDATE t1 SET a = 'MyISAM'; -SELECT * FROM t1 ORDER BY a; -a -MyISAM -SELECT * FROM t1 ORDER BY a; -a -MyISAM -DROP TABLE t1; diff --git a/mysql-test/suite/bugs/r/rpl_bug31583.result b/mysql-test/suite/bugs/r/rpl_bug31583.result deleted file mode 100644 index 74846607313..00000000000 --- a/mysql-test/suite/bugs/r/rpl_bug31583.result +++ /dev/null @@ -1,16 +0,0 @@ -stop slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -reset master; -reset slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -start slave; -CREATE TABLE t1 ( a INT, b INT DEFAULT -3 ); -INSERT INTO t1 VALUES (1, DEFAULT); -UPDATE t1 SET a = 3; -SELECT * FROM t1 ORDER BY a; -a b -3 -3 -SELECT * FROM t1 ORDER BY a; -a b -3 -3 -DROP TABLE t1; diff --git a/mysql-test/suite/bugs/r/rpl_bug33029.result b/mysql-test/suite/bugs/r/rpl_bug33029.result deleted file mode 100644 index d11ae1cc0be..00000000000 --- a/mysql-test/suite/bugs/r/rpl_bug33029.result +++ /dev/null @@ -1,15 +0,0 @@ -stop slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -reset master; -reset slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -start slave; -create table `t1` (`id` int not null auto_increment primary key); -create trigger `trg` before insert on `t1` for each row begin end; -set @@global.debug="+d,simulate_bug33029"; -stop slave; -start slave; -insert into `t1` values (); -select * from t1; -id -1 diff --git a/mysql-test/suite/bugs/r/rpl_bug36391.result b/mysql-test/suite/bugs/r/rpl_bug36391.result deleted file mode 100644 index 33175d89d30..00000000000 --- a/mysql-test/suite/bugs/r/rpl_bug36391.result +++ /dev/null @@ -1,18 +0,0 @@ -stop slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -reset master; -reset slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -start slave; -drop table if exists t1; -Warnings: -Note 1051 Unknown table 't1' -create table t1(id int); -show tables; -Tables_in_test -t1 -show master status; -File Position Binlog_Do_DB Binlog_Ignore_DB -master-bin.000001 # -flush logs; -drop table t1; diff --git a/mysql-test/suite/bugs/r/rpl_bug37426.result b/mysql-test/suite/bugs/r/rpl_bug37426.result deleted file mode 100644 index 24dfd27ca01..00000000000 --- a/mysql-test/suite/bugs/r/rpl_bug37426.result +++ /dev/null @@ -1,17 +0,0 @@ -stop slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -reset master; -reset slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -start slave; -CREATE TABLE char128_utf8 ( -i1 INT NOT NULL, -c CHAR(128) CHARACTER SET utf8 NOT NULL, -i2 INT NOT NULL); -INSERT INTO char128_utf8 VALUES ( 1, "123", 1 ); -SELECT * FROM char128_utf8; -i1 c i2 -1 123 1 -SELECT * FROM char128_utf8; -i1 c i2 -1 123 1 diff --git a/mysql-test/suite/bugs/r/rpl_bug38205.result b/mysql-test/suite/bugs/r/rpl_bug38205.result deleted file mode 100644 index 8f1dee344fa..00000000000 --- a/mysql-test/suite/bugs/r/rpl_bug38205.result +++ /dev/null @@ -1,56 +0,0 @@ -stop slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -reset master; -reset slave; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -start slave; -create table t1i(n int primary key) engine=innodb; -create table t2m(n int primary key) engine=myisam; -begin; -insert into t1i values (1); -insert into t1i values (2); -insert into t1i values (3); -commit; -begin; -insert into t1i values (5); -begin; -insert into t1i values (4); -insert into t2m values (1); -update t1i set n = 5 where n = 4; -commit; -zero -0 -*** kill sql thread *** -rollback; -*** sql thread is *not* running: No *** -*** the prove: the killed slave has not finished the current transaction *** -three -3 -one -1 -zero -0 -delete from t2m; -start slave sql_thread; -delete from t1i; -delete from t2m; -begin; -insert into t1i values (5); -begin; -insert into t1i values (4); -update t1i set n = 5 where n = 4; -commit; -zero -0 -stop slave sql_thread; -rollback; -*** sql thread is *not* running: No *** -*** the prove: the stopped slave has rolled back the current transaction *** -zero -0 -zero -0 -one -1 -start slave sql_thread; -drop table t1i, t2m; diff --git a/mysql-test/suite/bugs/t/rpl_bug12691.test b/mysql-test/suite/bugs/t/rpl_bug12691.test deleted file mode 100644 index 038f3e57b75..00000000000 --- a/mysql-test/suite/bugs/t/rpl_bug12691.test +++ /dev/null @@ -1,49 +0,0 @@ -# Bug#12691: Exec_master_log_pos corrupted with SQL_SLAVE_SKIP_COUNTER - ---source include/master-slave.inc ---connection master ---source include/have_binlog_format_mixed_or_statement.inc - ---echo ---echo **** On Master **** -CREATE TABLE t1 (b CHAR(10)); ---echo ---echo **** On Slave **** ---sync_slave_with_master -STOP SLAVE; ---source include/wait_for_slave_to_stop.inc - ---connection master - ---echo ---echo **** On Master **** ---exec cp $MYSQL_TEST_DIR/suite/bugs/data/rpl_bug12691.dat $MYSQLTEST_VARDIR/tmp/ ---echo LOAD DATA INFILE FILENAME ---disable_query_log ---eval LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/rpl_bug12691.dat' INTO TABLE t1 FIELDS TERMINATED BY '|' ---enable_query_log ---remove_file $MYSQLTEST_VARDIR/tmp/rpl_bug12691.dat - -SELECT COUNT(*) FROM t1; - -source include/show_binlog_events.inc; - ---save_master_pos - ---connection slave ---echo ---echo **** On Slave **** -SET GLOBAL SQL_SLAVE_SKIP_COUNTER=1; -START SLAVE; ---source include/wait_for_slave_to_start.inc ---sync_with_master - -SELECT COUNT(*) FROM t1; - -# Clean up ---connection master ---echo ---echo **** On Master **** -DROP TABLE t1; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/bugs/t/rpl_bug23533.test b/mysql-test/suite/bugs/t/rpl_bug23533.test deleted file mode 100644 index 337dddcef3d..00000000000 --- a/mysql-test/suite/bugs/t/rpl_bug23533.test +++ /dev/null @@ -1,36 +0,0 @@ -############################################################# -# Bug#23533: CREATE SELECT max_binlog_cache_size test -# case needed -############################################################# - ---source include/have_innodb.inc ---source include/have_binlog_format_row.inc ---source include/master-slave.inc - -SET AUTOCOMMIT=0; -SET GLOBAL max_binlog_cache_size=4096; -SHOW VARIABLES LIKE 'max_binlog_cache_size'; - -CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=InnoDB; - ---disable_query_log -let $i= 1000; -while ($i) -{ - eval INSERT INTO t1 VALUES($i, REPEAT('x', 4096)); - dec $i; -} ---enable_query_log - -SELECT COUNT(*) FROM t1; - -# Copied data from t1 into t2 large than max_binlog_cache_size -START TRANSACTION; ---error 1534 -CREATE TABLE t2 SELECT * FROM t1; -COMMIT; -SHOW TABLES LIKE 't%'; - - -# 5.1 End of Test ---source include/rpl_end.inc diff --git a/mysql-test/suite/bugs/t/rpl_bug31582.test b/mysql-test/suite/bugs/t/rpl_bug31582.test deleted file mode 100644 index 6bff8ef4172..00000000000 --- a/mysql-test/suite/bugs/t/rpl_bug31582.test +++ /dev/null @@ -1,25 +0,0 @@ - -# BUG#31582: 5.1-telco-6.1 -> 5.1.22. Slave crashes when reading -# UPDATE for VARCHAR - -# This is a problem for any update statement replicating from an old -# server to a new server. The bug consisted of a new slave trying to -# read two column bitmaps, but there is only one available in the old -# format. - -# This test case should be executed replicating from an old server to -# a new server, so make sure you have one handy. - -source include/master-slave.inc; - -CREATE TABLE t1 (a VARCHAR(10) PRIMARY KEY) ENGINE=MyISAM; -INSERT INTO t1 VALUES ('a'); -UPDATE t1 SET a = 'MyISAM'; -SELECT * FROM t1 ORDER BY a; -sync_slave_with_master; -SELECT * FROM t1 ORDER BY a; - -connection master; -DROP TABLE t1; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/bugs/t/rpl_bug31583.test b/mysql-test/suite/bugs/t/rpl_bug31583.test deleted file mode 100644 index ee5b7698016..00000000000 --- a/mysql-test/suite/bugs/t/rpl_bug31583.test +++ /dev/null @@ -1,25 +0,0 @@ -# -# BUG#31583: 5.1-telco-6.1 -> 5.1.22. Slave returns Error in unknown event - -# This is a problem for any update statement replicating from an old -# server to a new server. The bug consisted of a new slave trying to -# read two column bitmaps, but there is only one available in the old -# format. - -# This test case should be executed replicating from an old server to -# a new server, so make sure you have one handy. - -source include/master-slave.inc; - -CREATE TABLE t1 ( a INT, b INT DEFAULT -3 ); - -INSERT INTO t1 VALUES (1, DEFAULT); -UPDATE t1 SET a = 3; -SELECT * FROM t1 ORDER BY a; -sync_slave_with_master; -SELECT * FROM t1 ORDER BY a; - -connection master; -DROP TABLE t1; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/bugs/t/rpl_bug33029.test b/mysql-test/suite/bugs/t/rpl_bug33029.test deleted file mode 100644 index f5aad4de8df..00000000000 --- a/mysql-test/suite/bugs/t/rpl_bug33029.test +++ /dev/null @@ -1,26 +0,0 @@ -# -# Bug #36443 Server crashes when executing insert when insert trigger on table -# -# Emulating the former bug#33029 situation to see that there is no crash anymore. -# - - -source include/master-slave.inc; - -create table `t1` (`id` int not null auto_increment primary key); -create trigger `trg` before insert on `t1` for each row begin end; - -sync_slave_with_master; -set @@global.debug="+d,simulate_bug33029"; - -stop slave; -start slave; - -connection master; - -insert into `t1` values (); - -sync_slave_with_master; -select * from t1; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/bugs/t/rpl_bug36391-master.opt b/mysql-test/suite/bugs/t/rpl_bug36391-master.opt deleted file mode 100644 index 56273241f14..00000000000 --- a/mysql-test/suite/bugs/t/rpl_bug36391-master.opt +++ /dev/null @@ -1 +0,0 @@ ---sql_mode=NO_BACKSLASH_ESCAPES diff --git a/mysql-test/suite/bugs/t/rpl_bug36391.test b/mysql-test/suite/bugs/t/rpl_bug36391.test deleted file mode 100644 index 3961082273d..00000000000 --- a/mysql-test/suite/bugs/t/rpl_bug36391.test +++ /dev/null @@ -1,29 +0,0 @@ -# -# BUG#36391 and BUG#38731 -# -# The fix for BUG#20103 "Escaping with backslash does not work as expected" -# was implemented too greedy though in that it not only changes the behavior -# of backslashes within strings but in general, so disabling command shortcuts -# like \G or \C (which in turn leads to BUG#36391: "mysqlbinlog creates invalid charset statements". -# -# The test executes simple commands that are stored in the binary log and -# re-execute them through the mysql client which should have to process -# some command shortcuts. The backslashes within strings is disabled in the file -# rpl_bug36391-master.opt by the option --sql_mode=NO_BACKSLASH_ESCAPES. -# -# - ---source include/master-slave.inc - -create table t1(id int); - -show tables; - ---source include/show_master_status.inc - -flush logs; - ---exec $MYSQL_BINLOG $MYSQL_TEST_DIR/var/log/master-bin.000001 | $MYSQL test - -drop table t1; ---source include/rpl_end.inc diff --git a/mysql-test/suite/bugs/t/rpl_bug37426.test b/mysql-test/suite/bugs/t/rpl_bug37426.test deleted file mode 100644 index 4c7729ab837..00000000000 --- a/mysql-test/suite/bugs/t/rpl_bug37426.test +++ /dev/null @@ -1,21 +0,0 @@ -############################################################# -# Purpose: Test for BUG#37426 -# RBR breaks for CHAR() UTF8 fields > 85 chars -############################################################# - -source include/master-slave.inc; -source include/have_binlog_format_row.inc; - -connection master; -CREATE TABLE char128_utf8 ( - i1 INT NOT NULL, - c CHAR(128) CHARACTER SET utf8 NOT NULL, - i2 INT NOT NULL); - -INSERT INTO char128_utf8 VALUES ( 1, "123", 1 ); - -SELECT * FROM char128_utf8; -sync_slave_with_master; - -SELECT * FROM char128_utf8; ---source include/rpl_end.inc diff --git a/mysql-test/suite/bugs/t/rpl_bug38205.test b/mysql-test/suite/bugs/t/rpl_bug38205.test deleted file mode 100644 index 550746719f4..00000000000 --- a/mysql-test/suite/bugs/t/rpl_bug38205.test +++ /dev/null @@ -1,166 +0,0 @@ -# -# Bug #38205 Row-based Replication (RBR) causes inconsistencies: HA_ERR_FOUND_DUPP_KEY -# Bug#319 if while a non-transactional slave is replicating a transaction possible problem -# -# Verifying the fact that STOP SLAVE in the middle of a group execution waits -# for the end of the group before the slave sql thread will stop. -# The patch refines STOP SLAVE to not interrupt a transaction or other type of -# the replication events group (the part I). -# Killing the sql thread continues to provide a "hard" stop (the part II). -# -# Non-deterministic tests -# - -source include/master-slave.inc; -source include/have_innodb.inc; - - -# -# Part II, killed sql slave leaves instantly -# - -# A. multi-statement transaction as the replication group - -connection master; - -create table t1i(n int primary key) engine=innodb; -create table t2m(n int primary key) engine=myisam; - -sync_slave_with_master; - -connection master; - -begin; -insert into t1i values (1); -insert into t1i values (2); -insert into t1i values (3); -commit; - -sync_slave_with_master; - -# -# todo: first challenge is to find out the SQL thread id -# the following is not fully reliable -# - -let $id=`SELECT id from information_schema.processlist where user like 'system user' and state like '%Has read all relay log%' or user like 'system user' and state like '%Reading event from the relay log%'`; -connection slave; -begin; -insert into t1i values (5); - -connection master; -let $pos0_master= query_get_value(SHOW MASTER STATUS, Position, 1); -begin; -insert into t1i values (4); -insert into t2m values (1); # non-ta update -update t1i set n = 5 where n = 4; # to block at. can't be played with killed -commit; -let $pos1_master= query_get_value(SHOW MASTER STATUS, Position, 1); - -connection slave; -# slave sql thread must be locked out by the conn `slave' explicit lock -let $pos0_slave= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1); ---disable_query_log -eval select $pos0_master - $pos0_slave as zero; ---enable_query_log - -connection slave1; - -let $count= 1; -let $table= t2m; -source include/wait_until_rows_count.inc; -# -# todo: may fail as said above -# ---echo *** kill sql thread *** ---disable_query_log -eval kill connection $id; ---enable_query_log - -connection slave; -rollback; # release the sql thread - -connection slave1; - -source include/wait_for_slave_sql_to_stop.inc; -let $sql_status= query_get_value(SHOW SLAVE STATUS, Slave_SQL_Running, 1); ---echo *** sql thread is *not* running: $sql_status *** -let $pos1_slave= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1); - -connection slave; ---echo *** the prove: the killed slave has not finished the current transaction *** - ---disable_query_log -select count(*) as three from t1i; -eval select $pos1_master > $pos1_slave as one; -eval select $pos1_slave - $pos0_slave as zero; ---enable_query_log - -delete from t2m; # remove the row to be able to replay -start slave sql_thread; - -# -# Part I: B The homogenous transaction remains interuptable in between -# - -connection master; -delete from t1i; -delete from t2m; - -sync_slave_with_master; -begin; -insert into t1i values (5); - -connection master; -let $pos0_master= query_get_value(SHOW MASTER STATUS, Position, 1); -begin; -insert into t1i values (4); -update t1i set n = 5 where n = 4; # to block at. not to be played -commit; -let $pos1_master= query_get_value(SHOW MASTER STATUS, Position, 1); - - -connection slave1; -# slave sql can't advance as must be locked by the conn `slave' trans -let $pos0_slave= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1); ---disable_query_log -eval select $pos0_master - $pos0_slave as zero; ---enable_query_log - -# -# the replicated trans is blocked by the slave's local. -# However, it's not easy to catch the exact moment when it happens. -# The test issues sleep which makes the test either non-deterministic or -# wasting too much time. -# ---sleep 3 - -send stop slave sql_thread; - -connection slave; -rollback; # release the sql thread - -connection slave1; -reap; -source include/wait_for_slave_sql_to_stop.inc; -let $sql_status= query_get_value(SHOW SLAVE STATUS, Slave_SQL_Running, 1); ---echo *** sql thread is *not* running: $sql_status *** - -let $pos1_slave= query_get_value(SHOW SLAVE STATUS, Exec_Master_Log_Pos, 1); - ---echo *** the prove: the stopped slave has rolled back the current transaction *** - ---disable_query_log -select count(*) as zero from t1i; -eval select $pos0_master - $pos0_slave as zero; -eval select $pos1_master > $pos0_slave as one; ---enable_query_log - -start slave sql_thread; - -# clean-up - -connection master; -drop table t1i, t2m; - ---source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_bug37426.result b/mysql-test/suite/rpl/r/rpl_bug37426.result new file mode 100644 index 00000000000..bf96255c7b4 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_bug37426.result @@ -0,0 +1,12 @@ +include/master-slave.inc +[connection master] +CREATE TABLE char128_utf8 (i1 INT NOT NULL, c CHAR(128) CHARACTER SET utf8 NOT NULL, i2 INT NOT NULL); +INSERT INTO char128_utf8 VALUES ( 1, "123", 1 ); +SELECT * FROM char128_utf8; +i1 c i2 +1 123 1 +SELECT * FROM char128_utf8; +i1 c i2 +1 123 1 +DROP TABLE char128_utf8; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_bug37426.test b/mysql-test/suite/rpl/t/rpl_bug37426.test new file mode 100644 index 00000000000..d0a60524fef --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_bug37426.test @@ -0,0 +1,22 @@ +############################################################# +# Purpose: Test for BUG#37426 +# RBR breaks for CHAR() UTF8 fields > 85 chars +############################################################# + +source include/master-slave.inc; +source include/have_binlog_format_row.inc; + +connection master; +CREATE TABLE char128_utf8 (i1 INT NOT NULL, c CHAR(128) CHARACTER SET utf8 NOT NULL, i2 INT NOT NULL); +INSERT INTO char128_utf8 VALUES ( 1, "123", 1 ); + +SELECT * FROM char128_utf8; +sync_slave_with_master; + +SELECT * FROM char128_utf8; + +# Clean up +connection master; +DROP TABLE char128_utf8; +sync_slave_with_master; +--source include/rpl_end.inc -- cgit v1.2.1 From 3abe56f31d90f2cc84399e042b5f105b87b2b01a Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Thu, 14 Apr 2011 12:11:57 +0400 Subject: Bug#11756242 48137: PROCEDURE ANALYSE() LEAKS MEMORY WHEN RETURNING NULL There are two problems with ANALYSE(): 1. Memory leak it happens because do_select() can overwrite JOIN::procedure field(with zero value in our case) and JOIN destructor don't free the memory allocated for JOIN::procedure. The fix is to save original JOIN::procedure before do_select() call and restore it after do_select execution. 2. Wrong result If ANALYSE() procedure is used for the statement with LIMIT clause it could retrun empty result set. It happens because of missing analyse::end_of_records() call. First end_send() function call returns NESTED_LOOP_QUERY_LIMIT and second call of end_send() with end_of_records flag enabled does not happen. The fix is to return NESTED_LOOP_OK from end_send() if procedure is active. mysql-test/r/analyse.result: test case mysql-test/t/analyse.test: test case sql/sql_select.cc: --save original JOIN::procedure before do_select() call and restore it after do_select execution. --return NESTED_LOOP_OK from end_send() if procedure is active --- mysql-test/r/analyse.result | 13 +++++++++++++ mysql-test/t/analyse.test | 12 ++++++++++++ sql/sql_select.cc | 16 ++++++++++++---- 3 files changed, 37 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/analyse.result b/mysql-test/r/analyse.result index 92fc26e7ba3..f82439090f6 100644 --- a/mysql-test/r/analyse.result +++ b/mysql-test/r/analyse.result @@ -135,4 +135,17 @@ SELECT * FROM t1 PROCEDURE ANALYSE(); Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype test.t1.a e e- 1 2 0 0 1.3333 NULL ENUM('e','e-') NOT NULL DROP TABLE t1; +# +# Bug#11756242 48137: PROCEDURE ANALYSE() LEAKS MEMORY WHEN RETURNING NULL +# +CREATE TABLE t1(f1 INT) ENGINE=MYISAM; +CREATE TABLE t2(f2 INT) ENGINE=INNODB; +INSERT INTO t2 VALUES (1); +SELECT DISTINCTROW f1 FROM t1 NATURAL RIGHT OUTER JOIN t2 PROCEDURE ANALYSE(); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +test.t1.f1 NULL NULL 0 0 0 1 0.0 0.0 CHAR(0) +SELECT * FROM t2 LIMIT 1 PROCEDURE ANALYSE(); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +test.t2.f2 1 1 1 1 0 0 1.0000 0.0000 ENUM('1') NOT NULL +DROP TABLE t1, t2; End of 5.1 tests diff --git a/mysql-test/t/analyse.test b/mysql-test/t/analyse.test index 63929d8766b..c77967a0cc9 100644 --- a/mysql-test/t/analyse.test +++ b/mysql-test/t/analyse.test @@ -1,6 +1,7 @@ # # Test of procedure analyse # +-- source include/have_innodb.inc --disable_warnings drop table if exists t1,t2; @@ -144,4 +145,15 @@ INSERT INTO t1 VALUES ('e'),('e'),('e-'); SELECT * FROM t1 PROCEDURE ANALYSE(); DROP TABLE t1; +--echo # +--echo # Bug#11756242 48137: PROCEDURE ANALYSE() LEAKS MEMORY WHEN RETURNING NULL +--echo # + +CREATE TABLE t1(f1 INT) ENGINE=MYISAM; +CREATE TABLE t2(f2 INT) ENGINE=INNODB; +INSERT INTO t2 VALUES (1); +SELECT DISTINCTROW f1 FROM t1 NATURAL RIGHT OUTER JOIN t2 PROCEDURE ANALYSE(); +SELECT * FROM t2 LIMIT 1 PROCEDURE ANALYSE(); +DROP TABLE t1, t2; + --echo End of 5.1 tests diff --git a/sql/sql_select.cc b/sql/sql_select.cc index eb2559fc600..84a09fbc7e6 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1929,7 +1929,11 @@ JOIN::exec() if (!curr_join->sort_and_group && curr_join->const_tables != curr_join->tables) curr_join->join_tab[curr_join->const_tables].sorted= 0; - if ((tmp_error= do_select(curr_join, (List *) 0, curr_tmp_table, 0))) + + Procedure *save_proc= curr_join->procedure; + tmp_error= do_select(curr_join, (List *) 0, curr_tmp_table, 0); + curr_join->procedure= save_proc; + if (tmp_error) { error= tmp_error; DBUG_VOID_RETURN; @@ -12354,10 +12358,14 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), int error; if (join->having && join->having->val_int() == 0) DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having - error=0; if (join->procedure) - error=join->procedure->send_row(join->procedure_fields_list); - else if (join->do_send_rows) + { + if (join->procedure->send_row(join->procedure_fields_list)) + DBUG_RETURN(NESTED_LOOP_ERROR); + DBUG_RETURN(NESTED_LOOP_OK); + } + error=0; + if (join->do_send_rows) error=join->result->send_data(*join->fields); if (error) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ -- cgit v1.2.1 From 7634e724e786a91aeb9b450818230629beb66228 Mon Sep 17 00:00:00 2001 From: Serge Kozlov Date: Thu, 14 Apr 2011 15:24:11 +0400 Subject: WL#5867, postfix for binlog_bug23533 --- mysql-test/suite/binlog/r/binlog_bug23533.result | 3 --- mysql-test/suite/binlog/t/binlog_bug23533.test | 1 - 2 files changed, 4 deletions(-) diff --git a/mysql-test/suite/binlog/r/binlog_bug23533.result b/mysql-test/suite/binlog/r/binlog_bug23533.result index 8a28867afb4..07b124793d1 100644 --- a/mysql-test/suite/binlog/r/binlog_bug23533.result +++ b/mysql-test/suite/binlog/r/binlog_bug23533.result @@ -3,9 +3,6 @@ CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=I SELECT COUNT(*) FROM t1; COUNT(*) 1000 -SHOW VARIABLES LIKE 'max_binlog_cache_size'; -Variable_name Value -max_binlog_cache_size 4294963200 SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; SET GLOBAL max_binlog_cache_size=4096; START TRANSACTION; diff --git a/mysql-test/suite/binlog/t/binlog_bug23533.test b/mysql-test/suite/binlog/t/binlog_bug23533.test index 3c9a7ab5896..fb2fc808b7b 100644 --- a/mysql-test/suite/binlog/t/binlog_bug23533.test +++ b/mysql-test/suite/binlog/t/binlog_bug23533.test @@ -22,7 +22,6 @@ while ($i) SELECT COUNT(*) FROM t1; # Set small value for max_binlog_cache_size -SHOW VARIABLES LIKE 'max_binlog_cache_size'; SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; SET GLOBAL max_binlog_cache_size=4096; -- cgit v1.2.1 From e675ed063e890c4f442d61eaa6837119505449eb Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Thu, 14 Apr 2011 16:17:58 +0200 Subject: Bug #12351213 MTR --VS-CONFIG DOES NOT WORK LIKE MTR_VS_CONFIG Fix for --vs-config applied Find.pm incorrectly tested an unitialized local variable instead of the global, corrected. Find.pm is also wrong in 5.5: uses a non-existent global variable. Fix when merging up. --- mysql-test/lib/My/Find.pm | 6 ++---- mysql-test/mysql-test-run.pl | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/mysql-test/lib/My/Find.pm b/mysql-test/lib/My/Find.pm index 9c89a7e4e2a..8cbd6db3201 100644 --- a/mysql-test/lib/My/Find.pm +++ b/mysql-test/lib/My/Find.pm @@ -1,5 +1,5 @@ # -*- cperl -*- -# Copyright (C) 2008 MySQL AB +# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -28,8 +28,6 @@ use My::Platform; use base qw(Exporter); our @EXPORT= qw(my_find_bin my_find_dir my_find_file NOT_REQUIRED); -our $vs_config_dir; - my $bin_extension= ".exe" if IS_WINDOWS; # Helper function to be used for fourth parameter to find functions @@ -158,7 +156,7 @@ sub my_find_paths { # User can select to look in a special build dir # which is a subdirectory of any of the paths my @extra_dirs; - my $build_dir= $vs_config_dir || $ENV{MTR_VS_CONFIG} || $ENV{MTR_BUILD_DIR}; + my $build_dir= $::opt_vs_config || $ENV{MTR_VS_CONFIG} || $ENV{MTR_BUILD_DIR}; push(@extra_dirs, $build_dir) if defined $build_dir; if (defined $extension){ diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 2301b2444d3..9b8b1dc67cf 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -1,7 +1,7 @@ #!/usr/bin/perl # -*- cperl -*- -# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public @@ -893,7 +893,7 @@ sub command_line_setup { 'ssl|with-openssl' => \$opt_ssl, 'skip-ssl' => \$opt_skip_ssl, 'compress' => \$opt_compress, - 'vs-config' => \$opt_vs_config, + 'vs-config=s' => \$opt_vs_config, # Max number of parallel threads to use 'parallel=s' => \$opt_parallel, -- cgit v1.2.1 From dd3d9477b25b546407e18b4b474e766db1709aa7 Mon Sep 17 00:00:00 2001 From: Tor Didriksen Date: Thu, 14 Apr 2011 16:35:24 +0200 Subject: Bug#11765713 58705: OPTIMIZER LET ENGINE DEPEND ON UNINITIALIZED VALUES CREATED BY OPT_SUM_QU Valgrind warnings were caused by comparing index values to an un-initialized field. mysql-test/r/subselect.result: New test cases. mysql-test/t/subselect.test: New test cases. sql/opt_sum.cc: Add thd to opt_sum_query enabling it to test for errors. If we have a non-nullable index, we cannot use it to match null values, since set_null() will be ignored, and we might compare uninitialized data. sql/sql_select.cc: Add thd to opt_sum_query, enabling it to test for errors. sql/sql_select.h: Add thd to opt_sum_query, enabling it to test for errors. --- mysql-test/r/subselect.result | 18 ++++++++++++++++++ mysql-test/t/subselect.test | 22 ++++++++++++++++++++++ sql/opt_sum.cc | 43 +++++++++++++++++++++++++++++++------------ sql/sql_select.cc | 2 +- sql/sql_select.h | 3 ++- 5 files changed, 74 insertions(+), 14 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index dc40e42275b..5f86b0db132 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -4734,3 +4734,21 @@ SELECT * FROM t2 UNION SELECT * FROM t2 ORDER BY (SELECT * FROM t1 WHERE MATCH(a) AGAINST ('+abc' IN BOOLEAN MODE)); DROP TABLE t1,t2; End of 5.1 tests +# +# Bug #11765713 58705: +# OPTIMIZER LET ENGINE DEPEND ON UNINITIALIZED VALUES +# CREATED BY OPT_SUM_QUERY +# +CREATE TABLE t1(a INT NOT NULL, KEY (a)); +INSERT INTO t1 VALUES (0), (1); +SELECT 1 as foo FROM t1 WHERE a < SOME +(SELECT a FROM t1 WHERE a <=> +(SELECT a FROM t1) +); +ERROR 21000: Subquery returns more than 1 row +SELECT 1 as foo FROM t1 WHERE a < SOME +(SELECT a FROM t1 WHERE a <=> +(SELECT a FROM t1 where a is null) +); +foo +DROP TABLE t1; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 1f471b46c4e..94a3df21998 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -3726,3 +3726,25 @@ DROP TABLE t1,t2; --enable_result_log --echo End of 5.1 tests + +--echo # +--echo # Bug #11765713 58705: +--echo # OPTIMIZER LET ENGINE DEPEND ON UNINITIALIZED VALUES +--echo # CREATED BY OPT_SUM_QUERY +--echo # + +CREATE TABLE t1(a INT NOT NULL, KEY (a)); +INSERT INTO t1 VALUES (0), (1); + +--error ER_SUBQUERY_NO_1_ROW +SELECT 1 as foo FROM t1 WHERE a < SOME + (SELECT a FROM t1 WHERE a <=> + (SELECT a FROM t1) + ); + +SELECT 1 as foo FROM t1 WHERE a < SOME + (SELECT a FROM t1 WHERE a <=> + (SELECT a FROM t1 where a is null) + ); + +DROP TABLE t1; diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index b20a0c4fcbe..1eef3798908 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -211,6 +211,7 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) /** Substitutes constants for some COUNT(), MIN() and MAX() functions. + @param thd thread handler @param tables list of leaves of join table tree @param all_fields All fields to be returned @param conds WHERE clause @@ -228,9 +229,12 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) HA_ERR_KEY_NOT_FOUND on impossible conditions @retval HA_ERR_... if a deadlock or a lock wait timeout happens, for example + @retval + ER_... e.g. ER_SUBQUERY_NO_1_ROW */ -int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) +int opt_sum_query(THD *thd, + TABLE_LIST *tables, List &all_fields, COND *conds) { List_iterator_fast it(all_fields); int const_result= 1; @@ -242,6 +246,8 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) Item *item; int error; + DBUG_ENTER("opt_sum_query"); + if (conds) where_tables= conds->used_tables(); @@ -269,7 +275,7 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) WHERE t2.field IS NULL; */ if (tl->table->map & where_tables) - return 0; + DBUG_RETURN(0); } else used_tables|= tl->table->map; @@ -297,7 +303,7 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) { tl->table->file->print_error(error, MYF(0)); tl->table->in_use->fatal_error(); - return error; + DBUG_RETURN(error); } count*= tl->table->file->stats.records; } @@ -390,10 +396,10 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) if (error) { if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE) - return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); // No rows matching WHERE /* HA_ERR_LOCK_DEADLOCK or some other error */ table->file->print_error(error, MYF(0)); - return(error); + DBUG_RETURN(error); } removed_tables|= table->map; } @@ -437,6 +443,10 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) const_result= 0; } } + + if (thd->is_error()) + DBUG_RETURN(thd->main_da.sql_errno()); + /* If we have a where clause, we can only ignore searching in the tables if MIN/MAX optimisation replaced all used tables @@ -446,7 +456,7 @@ int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds) */ if (removed_tables && used_tables != removed_tables) const_result= 0; // We didn't remove all tables - return const_result; + DBUG_RETURN(const_result); } @@ -732,6 +742,12 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, if (is_null || (is_null_safe_eq && args[1]->is_null())) { + /* + If we have a non-nullable index, we cannot use it, + since set_null will be ignored, and we will compare uninitialized data. + */ + if (!part->field->real_maybe_null()) + DBUG_RETURN(false); part->field->set_null(); *key_ptr= (uchar) 1; } @@ -802,8 +818,9 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, @param[out] prefix_len Length of prefix for the search range @note - This function may set table->key_read to 1, which must be reset after - index is used! (This can only happen when function returns 1) + This function may set field->table->key_read to true, + which must be reset after index is used! + (This can only happen when function returns 1) @retval 0 Index can not be used to optimize MIN(field)/MAX(field) @@ -818,7 +835,9 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, uint *range_fl, uint *prefix_len) { if (!(field->flags & PART_KEY_FLAG)) - return 0; // Not key field + return false; // Not key field + + DBUG_ENTER("find_key_for_maxmin"); TABLE *table= field->table; uint idx= 0; @@ -843,7 +862,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, part++, jdx++, key_part_to_use= (key_part_to_use << 1) | 1) { if (!(table->file->index_flags(idx, jdx, 0) & HA_READ_ORDER)) - return 0; + DBUG_RETURN(false); /* Check whether the index component is partial */ Field *part_field= table->field[part->fieldnr-1]; @@ -892,12 +911,12 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, */ if (field->part_of_key.is_set(idx)) table->set_keyread(TRUE); - return 1; + DBUG_RETURN(true); } } } } - return 0; + DBUG_RETURN(false); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 84a09fbc7e6..ab287e57aa1 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -961,7 +961,7 @@ JOIN::optimize() If all items were resolved by opt_sum_query, there is no need to open any tables. */ - if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds))) + if ((res=opt_sum_query(thd, select_lex->leaf_tables, all_fields, conds))) { if (res == HA_ERR_KEY_NOT_FOUND) { diff --git a/sql/sql_select.h b/sql/sql_select.h index 5350e28d8ff..dd810ae5156 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -612,7 +612,8 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field, /* functions from opt_sum.cc */ bool simple_pred(Item_func *func_item, Item **args, bool *inv_order); -int opt_sum_query(TABLE_LIST *tables, List &all_fields,COND *conds); +int opt_sum_query(THD* thd, + TABLE_LIST *tables, List &all_fields, COND *conds); /* from sql_delete.cc, used by opt_range.cc */ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); -- cgit v1.2.1 From c95227ca54c291d372e1136d52a0ecc9eb0294cf Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Fri, 15 Apr 2011 10:30:52 +0200 Subject: Bug #12360195 MTR DOES NOT IGNORE TABS IN EXPERIMENTAL FILE Instead of just filtering space, filter white space (\s) I left the default.experimental file as is, with tabs. --- mysql-test/mysql-test-run.pl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 9b8b1dc67cf..2897ae3142a 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -1123,7 +1123,7 @@ sub command_line_setup { chomp; # remove comments (# foo) at the beginning of the line, or after a # blank at the end of the line - s/( +|^)#.*$//; + s/(\s+|^)#.*$//; # If @ platform specifier given, use this entry only if it contains # @ or @! where xxx != platform if (/\@.*/) @@ -1134,8 +1134,8 @@ sub command_line_setup { s/\@.*$//; } # remove whitespace - s/^ +//; - s/ +$//; + s/^\s+//; + s/\s+$//; # if nothing left, don't need to remember this line if ( $_ eq "" ) { next; -- cgit v1.2.1 From bba7b9ca0c96a1c140e725776b5e0382a4f62152 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Fri, 15 Apr 2011 12:51:34 +0400 Subject: Bug#11765139 58069: LOAD DATA INFILE: VALGRIND REPORTS INVALID MEMORY READS AND WRITES WITH U Some multibyte sequences could be considered by my_mbcharlen() functions as multibyte character but more exact my_ismbchar() does not think so. In such a case this multibyte sequences is pushed into 'stack' buffer which is too small to accommodate the sequence. The fix is to allocate stack buffer in compliance with max character length. mysql-test/r/loaddata.result: test case mysql-test/t/loaddata.test: test case sql/sql_load.cc: allocate stack buffer in compliance with max character length. --- mysql-test/r/loaddata.result | 7 +++++++ mysql-test/t/loaddata.test | 13 +++++++++++++ sql/sql_load.cc | 2 +- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/loaddata.result b/mysql-test/r/loaddata.result index 40c278380b1..3a421b3ea3f 100644 --- a/mysql-test/r/loaddata.result +++ b/mysql-test/r/loaddata.result @@ -532,4 +532,11 @@ a 0 1 DROP TABLE t1; +# +# Bug#11765139 58069: LOAD DATA INFILE: VALGRIND REPORTS INVALID MEMORY READS AND WRITES WITH U +# +CREATE TABLE t1(f1 INT); +SELECT 0xE1BB30 INTO OUTFILE 't1.dat'; +LOAD DATA INFILE 't1.dat' IGNORE INTO TABLE t1 CHARACTER SET utf8; +DROP TABLE t1; End of 5.1 tests diff --git a/mysql-test/t/loaddata.test b/mysql-test/t/loaddata.test index 821453777f5..e0764b67ec0 100644 --- a/mysql-test/t/loaddata.test +++ b/mysql-test/t/loaddata.test @@ -611,5 +611,18 @@ DROP TABLE t1; let $MYSQLD_DATADIR= `select @@datadir`; remove_file $MYSQLD_DATADIR/test/tmpp2.txt; +--echo # +--echo # Bug#11765139 58069: LOAD DATA INFILE: VALGRIND REPORTS INVALID MEMORY READS AND WRITES WITH U +--echo # + +CREATE TABLE t1(f1 INT); +EVAL SELECT 0xE1BB30 INTO OUTFILE 't1.dat'; +--disable_warnings +LOAD DATA INFILE 't1.dat' IGNORE INTO TABLE t1 CHARACTER SET utf8; +--enable_warnings + +DROP TABLE t1; +let $MYSQLD_DATADIR= `select @@datadir`; +remove_file $MYSQLD_DATADIR/test/t1.dat; --echo End of 5.1 tests diff --git a/sql/sql_load.cc b/sql/sql_load.cc index c227fe69b62..513cd62b510 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -1109,7 +1109,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, /* Set of a stack for unget if long terminators */ - uint length=max(field_term_length,line_term_length)+1; + uint length= max(cs->mbmaxlen, max(field_term_length, line_term_length)) + 1; set_if_bigger(length,line_start.length()); stack=stack_pos=(int*) sql_alloc(sizeof(int)*length); -- cgit v1.2.1 From 8dabe8aa92bb825d2bd2a78d2cb5ca30782576be Mon Sep 17 00:00:00 2001 From: Martin Hansson Date: Mon, 18 Apr 2011 10:44:41 +0200 Subject: Bug 11758558 - 50774: WRONG RESULTSET WHEN TIMESTAMP VALUES ARE APPENDED WITH .0 The bug was fixed by the patch for bug number BUG 11763109 - 55779: SELECT DOES NOT WORK PROPERLY IN MYSQL SERVER VERSION "5.1.42 SUSE MYSQL (Exact same fix as was proposed for this bug.) Since the motivation for the two bug reports was completely different, however, it still makes sense to push the test case. This patch contains only the test case. --- mysql-test/r/type_timestamp.result | 63 ++++++++++++++++++++++++++++++++++++++ mysql-test/t/type_timestamp.test | 47 ++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+) diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result index e88d3462466..3176879343c 100644 --- a/mysql-test/r/type_timestamp.result +++ b/mysql-test/r/type_timestamp.result @@ -547,4 +547,67 @@ a 2000-01-01 00:00:01 2000-01-01 00:00:01 DROP TABLE t1; +# +# Bug#50774: failed to get the correct resultset when timestamp values +# are appended with .0 +# +CREATE TABLE t1 ( a TIMESTAMP, KEY ( a ) ); +INSERT INTO t1 VALUES( '2010-02-01 09:31:01' ); +INSERT INTO t1 VALUES( '2010-02-01 09:31:02' ); +INSERT INTO t1 VALUES( '2010-02-01 09:31:03' ); +INSERT INTO t1 VALUES( '2010-02-01 09:31:04' ); +SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0'; +a +2010-02-01 09:31:02 +2010-02-01 09:31:03 +2010-02-01 09:31:04 +SELECT * FROM t1 WHERE '2010-02-01 09:31:02.0' <= a; +a +2010-02-01 09:31:02 +2010-02-01 09:31:03 +2010-02-01 09:31:04 +SELECT * FROM t1 WHERE a <= '2010-02-01 09:31:02.0'; +a +2010-02-01 09:31:01 +2010-02-01 09:31:02 +SELECT * FROM t1 WHERE '2010-02-01 09:31:02.0' >= a; +a +2010-02-01 09:31:01 +2010-02-01 09:31:02 +EXPLAIN +SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0'; +id select_type table type possible_keys key key_len ref rows Extra +x x x range x x x x x x +SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0'; +a +2010-02-01 09:31:02 +2010-02-01 09:31:03 +2010-02-01 09:31:04 +CREATE TABLE t2 ( a TIMESTAMP, KEY ( a DESC ) ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:01' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:02' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:03' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:04' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:05' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:06' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:07' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:08' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:09' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:10' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:11' ); +# The bug would cause the range optimizer's comparison to use an open +# interval here. This reveals itself only in the number of reads +# performed. +FLUSH STATUS; +EXPLAIN +SELECT * FROM t2 WHERE a < '2010-02-01 09:31:02.0'; +id select_type table type possible_keys key key_len ref rows Extra +x x x range x x x x x x +SELECT * FROM t2 WHERE a < '2010-02-01 09:31:02.0'; +a +2010-02-01 09:31:01 +SHOW STATUS LIKE 'Handler_read_next'; +Variable_name Value +Handler_read_next 1 +DROP TABLE t1, t2; End of 5.1 tests diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test index 602f6f089c2..53b45fc6732 100644 --- a/mysql-test/t/type_timestamp.test +++ b/mysql-test/t/type_timestamp.test @@ -373,4 +373,51 @@ SELECT a FROM t1 WHERE a >= '20000101000000'; DROP TABLE t1; +--echo # +--echo # Bug#50774: failed to get the correct resultset when timestamp values +--echo # are appended with .0 +--echo # +CREATE TABLE t1 ( a TIMESTAMP, KEY ( a ) ); + +INSERT INTO t1 VALUES( '2010-02-01 09:31:01' ); +INSERT INTO t1 VALUES( '2010-02-01 09:31:02' ); +INSERT INTO t1 VALUES( '2010-02-01 09:31:03' ); +INSERT INTO t1 VALUES( '2010-02-01 09:31:04' ); + +SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0'; +SELECT * FROM t1 WHERE '2010-02-01 09:31:02.0' <= a; +SELECT * FROM t1 WHERE a <= '2010-02-01 09:31:02.0'; +SELECT * FROM t1 WHERE '2010-02-01 09:31:02.0' >= a; + +--replace_column 1 x 2 x 3 x 5 x 6 x 7 x 8 x 9 x 10 x +EXPLAIN +SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0'; +SELECT * FROM t1 WHERE a >= '2010-02-01 09:31:02.0'; + +CREATE TABLE t2 ( a TIMESTAMP, KEY ( a DESC ) ); + +INSERT INTO t2 VALUES( '2010-02-01 09:31:01' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:02' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:03' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:04' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:05' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:06' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:07' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:08' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:09' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:10' ); +INSERT INTO t2 VALUES( '2010-02-01 09:31:11' ); + +--echo # The bug would cause the range optimizer's comparison to use an open +--echo # interval here. This reveals itself only in the number of reads +--echo # performed. +FLUSH STATUS; +--replace_column 1 x 2 x 3 x 5 x 6 x 7 x 8 x 9 x 10 x +EXPLAIN +SELECT * FROM t2 WHERE a < '2010-02-01 09:31:02.0'; +SELECT * FROM t2 WHERE a < '2010-02-01 09:31:02.0'; +SHOW STATUS LIKE 'Handler_read_next'; + +DROP TABLE t1, t2; + --echo End of 5.1 tests -- cgit v1.2.1 From 7b1967ad4e4eff38e3f7119ff53db49b5bdc3fa8 Mon Sep 17 00:00:00 2001 From: Sven Sandberg Date: Mon, 18 Apr 2011 14:42:14 +0200 Subject: test fails on more platforms, removed @freebsd from default.experimental. --- mysql-test/collections/default.experimental | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index 72e14135ef0..9e74fa9bc30 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -26,7 +26,7 @@ ndb.* # joro : NDB tests marked as experiment rpl.rpl_innodb_bug28430 @solaris # Bug#46029 rpl.rpl_row_sp011 @solaris # Joro : Bug #45445 -rpl.rpl_stop_slave @freebsd # Sven : BUG#12345981 +rpl.rpl_stop_slave # Sven : BUG#12345981 rpl.rpl_bug37426 # WL#5867: skozlov: test case moved from unused bugs suite -- cgit v1.2.1 From 060df92b2ff9afae08f0da9da807777e07d404c3 Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Mon, 18 Apr 2011 15:35:14 +0200 Subject: Bug #12365486 MTR FAILS TO FIND WARNINGS IN SERVER LOG WITH --VALGRIND COMBINED WITH --DEBUG With this combination, outoput was directed to .trace but not all parts of MTR was aware of this. Replace .err with .trace at the earliest possible place --- mysql-test/lib/My/ConfigFactory.pm | 8 ++++++-- mysql-test/mysql-test-run.pl | 9 +-------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/mysql-test/lib/My/ConfigFactory.pm b/mysql-test/lib/My/ConfigFactory.pm index bb990a9f8d2..7688283fdc1 100644 --- a/mysql-test/lib/My/ConfigFactory.pm +++ b/mysql-test/lib/My/ConfigFactory.pm @@ -1,5 +1,5 @@ # -*- cperl -*- -# Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Library General Public @@ -141,7 +141,11 @@ sub fix_tmpdir { sub fix_log_error { my ($self, $config, $group_name, $group)= @_; my $dir= $self->{ARGS}->{vardir}; - return "$dir/log/$group_name.err"; + if ( $::opt_valgrind and $::opt_debug ) { + return "$dir/log/$group_name.trace"; + } else { + return "$dir/log/$group_name.err"; + } } sub fix_log { diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 2897ae3142a..1c7efccc69d 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -256,7 +256,7 @@ my $opt_strace_client; our $opt_user = "root"; -my $opt_valgrind= 0; +our $opt_valgrind= 0; my $opt_valgrind_mysqld= 0; my $opt_valgrind_mysqltest= 0; my @default_valgrind_args= ("--show-reachable=yes"); @@ -4544,13 +4544,6 @@ sub mysqld_start ($$) { unlink($mysqld->value('pid-file')); my $output= $mysqld->value('#log-error'); - if ( $opt_valgrind and $opt_debug ) - { - # When both --valgrind and --debug is selected, send - # all output to the trace file, making it possible to - # see the exact location where valgrind complains - $output= "$opt_vardir/log/".$mysqld->name().".trace"; - } # Remember this log file for valgrind error report search $mysqld_logs{$output}= 1 if $opt_valgrind; # Remember data dir for gmon.out files if using gprof -- cgit v1.2.1 From dba184237a2504c880ed08e34e91e40a76f738e7 Mon Sep 17 00:00:00 2001 From: Serge Kozlov Date: Mon, 18 Apr 2011 23:59:15 +0400 Subject: BUG#12371924 Update test case --- mysql-test/collections/default.experimental | 5 +---- mysql-test/suite/binlog/r/binlog_bug23533.result | 3 +++ mysql-test/suite/binlog/t/binlog_bug23533.test | 5 +++++ 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index 9e74fa9bc30..fb8c6845a5f 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -2,8 +2,7 @@ # in alphabetical order. This also helps with merge conflict resolution. binlog.binlog_multi_engine # joro : NDB tests marked as experimental as agreed with bochklin -binlog.binlog_bug23533 # WL#5867: skozlov: test case moved from unused bugs suite -binlog.binlog_bug36391 # WL#5867: skozlov: test case moved from unused bugs suite +binlog.binlog_bug23533 # skozlov: BUG#12371924 funcs_1.charset_collation_1 # depends on compile-time decisions @@ -27,8 +26,6 @@ ndb.* # joro : NDB tests marked as experiment rpl.rpl_innodb_bug28430 @solaris # Bug#46029 rpl.rpl_row_sp011 @solaris # Joro : Bug #45445 rpl.rpl_stop_slave # Sven : BUG#12345981 -rpl.rpl_bug37426 # WL#5867: skozlov: test case moved from unused bugs suite - rpl_ndb.* # joro : NDB tests marked as experimental as agreed with bochklin rpl_ndb.rpl_ndb_log # Bug#38998 diff --git a/mysql-test/suite/binlog/r/binlog_bug23533.result b/mysql-test/suite/binlog/r/binlog_bug23533.result index 07b124793d1..02605839ab0 100644 --- a/mysql-test/suite/binlog/r/binlog_bug23533.result +++ b/mysql-test/suite/binlog/r/binlog_bug23533.result @@ -3,7 +3,9 @@ CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=I SELECT COUNT(*) FROM t1; COUNT(*) 1000 +SET @saved_binlog_cache_size=@@binlog_cache_size; SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; +SET GLOBAL binlog_cache_size=4096; SET GLOBAL max_binlog_cache_size=4096; START TRANSACTION; CREATE TABLE t2 SELECT * FROM t1; @@ -13,4 +15,5 @@ SHOW TABLES LIKE 't%'; Tables_in_test (t%) t1 SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size; +SET GLOBAL binlog_cache_size=@saved_binlog_cache_size; DROP TABLE t1; diff --git a/mysql-test/suite/binlog/t/binlog_bug23533.test b/mysql-test/suite/binlog/t/binlog_bug23533.test index fb2fc808b7b..05fe9fd9523 100644 --- a/mysql-test/suite/binlog/t/binlog_bug23533.test +++ b/mysql-test/suite/binlog/t/binlog_bug23533.test @@ -15,14 +15,18 @@ CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=I let $i= 1000; while ($i) { + BEGIN; eval INSERT INTO t1 VALUES($i, REPEAT('x', 4096)); + COMMIT; dec $i; } --enable_query_log SELECT COUNT(*) FROM t1; # Set small value for max_binlog_cache_size +SET @saved_binlog_cache_size=@@binlog_cache_size; SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; +SET GLOBAL binlog_cache_size=4096; SET GLOBAL max_binlog_cache_size=4096; # Copied data from t1 into t2 large than max_binlog_cache_size @@ -34,4 +38,5 @@ SHOW TABLES LIKE 't%'; # 5.1 End of Test SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size; +SET GLOBAL binlog_cache_size=@saved_binlog_cache_size; DROP TABLE t1; -- cgit v1.2.1 From 90bbf9d615a592c31464c1a689040a9758581fdd Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Wed, 20 Apr 2011 11:39:20 +0400 Subject: Bug#11765923 58937: MANY VALGRIND ERRORS AFTER GROUPING BY RESULT OF DECIMAL COLUMN FUNCTION Bug#11764671 57533: UNINITIALISED VALUES IN COPY_AND_CONVERT (SQL_STRING.CC) WITH CERTAIN CHA When ROUND evaluates decimal result it uses Item::decimal value as fraction value for the result. In some cases Item::decimal is greater than real result fraction value and uninitialised memory of result(decimal) buffer can be used in further calculations. Issue is introduced by Bug33143 fix. The fix is to remove erroneous assignment. mysql-test/r/func_math.result: test case mysql-test/t/func_math.test: test case sql/item_func.cc: remove erroneous assignment --- mysql-test/r/func_math.result | 22 ++++++++++++++++++++++ mysql-test/t/func_math.test | 16 ++++++++++++++++ sql/item_func.cc | 3 --- 3 files changed, 38 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result index ad0b872145b..b9118feab1a 100644 --- a/mysql-test/r/func_math.result +++ b/mysql-test/r/func_math.result @@ -518,4 +518,26 @@ CREATE TABLE t1 SELECT CEIL(LINESTRINGFROMWKB(1) DIV NULL); DROP TABLE t1; CREATE TABLE t1 SELECT FLOOR(LINESTRINGFROMWKB(1) DIV NULL); DROP TABLE t1; +# +# Bug#11765923 58937: MANY VALGRIND ERRORS AFTER GROUPING BY RESULT OF DECIMAL COLUMN FUNCTION +# +CREATE TABLE t1(f1 DECIMAL(22,1)); +INSERT INTO t1 VALUES (0),(1); +SELECT ROUND(f1, f1) FROM t1; +ROUND(f1, f1) +0.0 +1.0 +SELECT ROUND(f1, f1) FROM t1 GROUP BY 1; +ROUND(f1, f1) +0.0 +1.0 +DROP TABLE t1; +# +# Bug#11764671 57533: UNINITIALISED VALUES IN COPY_AND_CONVERT (SQL_STRING.CC) WITH CERTAIN CHA +# +SELECT ROUND(LEAST(15, -4939092, 0.2704), STDDEV('a')); +ROUND(LEAST(15, -4939092, 0.2704), STDDEV('a')) +-4939092.0000 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'a' End of 5.1 tests diff --git a/mysql-test/t/func_math.test b/mysql-test/t/func_math.test index 64b6a3a4ea6..9d51a5c94f9 100644 --- a/mysql-test/t/func_math.test +++ b/mysql-test/t/func_math.test @@ -333,4 +333,20 @@ DROP TABLE t1; CREATE TABLE t1 SELECT FLOOR(LINESTRINGFROMWKB(1) DIV NULL); DROP TABLE t1; +--echo # +--echo # Bug#11765923 58937: MANY VALGRIND ERRORS AFTER GROUPING BY RESULT OF DECIMAL COLUMN FUNCTION +--echo # + +CREATE TABLE t1(f1 DECIMAL(22,1)); +INSERT INTO t1 VALUES (0),(1); +SELECT ROUND(f1, f1) FROM t1; +SELECT ROUND(f1, f1) FROM t1 GROUP BY 1; +DROP TABLE t1; + +--echo # +--echo # Bug#11764671 57533: UNINITIALISED VALUES IN COPY_AND_CONVERT (SQL_STRING.CC) WITH CERTAIN CHA +--echo # + +SELECT ROUND(LEAST(15, -4939092, 0.2704), STDDEV('a')); + --echo End of 5.1 tests diff --git a/sql/item_func.cc b/sql/item_func.cc index 595629b51be..6a9c47954b7 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2122,10 +2122,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) if (!(null_value= (args[0]->null_value || args[1]->null_value || my_decimal_round(E_DEC_FATAL_ERROR, value, (int) dec, truncate, decimal_value) > 1))) - { - decimal_value->frac= decimals; return decimal_value; - } return 0; } -- cgit v1.2.1 From f3b024cafa4c316774c8122031a1bbbc08a83379 Mon Sep 17 00:00:00 2001 From: Jon Olav Hauglid Date: Wed, 20 Apr 2011 11:32:28 +0200 Subject: BUG#12377872 ASSERTION FAILED: !_ENTERED WHEN GETHOSTBYADDR_R FAILS ON SOLARIS This assertion was triggered if gethostbyaddr_r cannot do a reverse lookup on an ip address. The reason was a missing DBUG_RETURN macro. The problem affected only debug versions of the server. This patch fixes the problem by replacing return with DBUG_RETURN. No test case added. --- sql/hostname.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/hostname.cc b/sql/hostname.cc index c8cf46383a9..9796755e9fb 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -183,7 +183,7 @@ char * ip_to_hostname(struct in_addr *in, uint *errors) &tmp_hostent,buff,sizeof(buff),&tmp_errno))) { DBUG_PRINT("error",("gethostbyaddr_r returned %d",tmp_errno)); - return 0; + DBUG_RETURN(0); } if (!(check=my_gethostbyname_r(hp->h_name,&tmp_hostent2,buff2,sizeof(buff2), &tmp_errno))) -- cgit v1.2.1 From 91eebaaef47e3e49b0c0666d5c42321419d709f1 Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Wed, 20 Apr 2011 14:58:53 +0200 Subject: Bug #12379923 60907: MYSQL-TEST/LIB/MY/SAFEPROCESS/SAFE_PROCESS.PL USES HARDCODED SIGNAL NUMBE Replaced the hardcoded 9 with 'KILL' --- mysql-test/lib/My/SafeProcess/safe_process.pl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/lib/My/SafeProcess/safe_process.pl b/mysql-test/lib/My/SafeProcess/safe_process.pl index e3114a749d3..54b0073f8df 100644 --- a/mysql-test/lib/My/SafeProcess/safe_process.pl +++ b/mysql-test/lib/My/SafeProcess/safe_process.pl @@ -94,7 +94,7 @@ eval { local $SIG{INT}= \&handle_signal; local $SIG{CHLD}= sub { message("Got signal @_"); - kill(9, -$child_pid); + kill('KILL', -$child_pid); my $ret= waitpid($child_pid, 0); if ($? & 127){ exit(65); # Killed by signal @@ -134,7 +134,7 @@ if ( $@ ) { # Use negative pid in order to kill the whole # process group # -my $ret= kill(9, -$child_pid); +my $ret= kill('KILL', -$child_pid); message("Killed child: $child_pid, ret: $ret"); if ($ret > 0) { message("Killed child: $child_pid"); -- cgit v1.2.1 From bd92ea43116b8ce606de5e6fc825e1a8b87a7740 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Wed, 20 Apr 2011 17:52:33 +0200 Subject: Bug#11766249 bug#59316: PARTITIONING AND INDEX_MERGE MEMORY LEAK Update for previous patch according to reviewers comments. Updated the constructors for ha_partitions to use the common init_handler_variables functions Added use of defines for size and offset to get better readability for the code that reads and writes the .par file. Also refactored the get_from_handler_file function. --- sql/ha_partition.cc | 306 ++++++++++++++++++++++++++++++---------------------- sql/ha_partition.h | 17 ++- sql/handler.cc | 23 ++-- 3 files changed, 207 insertions(+), 139 deletions(-) diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 946ecc652ef..7685b3a8384 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -166,11 +166,6 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) :handler(hton, share) { DBUG_ENTER("ha_partition::ha_partition(table)"); - m_part_info= NULL; - m_create_handler= FALSE; - m_is_sub_partitioned= 0; - m_is_clone_of= NULL; - m_clone_mem_root= NULL; init_handler_variables(); DBUG_VOID_RETURN; } @@ -192,21 +187,21 @@ ha_partition::ha_partition(handlerton *hton, partition_info *part_info) { DBUG_ENTER("ha_partition::ha_partition(part_info)"); DBUG_ASSERT(part_info); + init_handler_variables(); m_part_info= part_info; m_create_handler= TRUE; m_is_sub_partitioned= m_part_info->is_sub_partitioned(); - init_handler_variables(); DBUG_VOID_RETURN; } /** ha_partition constructor method used by ha_partition::clone() - @param hton Handlerton (partition_hton) - @param share Table share object - @param part_info_arg partition_info to use - @param clone_arg ha_partition to clone - @param clme_mem_root_arg MEM_ROOT to use + @param hton Handlerton (partition_hton) + @param share Table share object + @param part_info_arg partition_info to use + @param clone_arg ha_partition to clone + @param clme_mem_root_arg MEM_ROOT to use @return New partition handler */ @@ -218,14 +213,12 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share, :handler(hton, share) { DBUG_ENTER("ha_partition::ha_partition(clone)"); + init_handler_variables(); m_part_info= part_info_arg; m_create_handler= TRUE; m_is_sub_partitioned= m_part_info->is_sub_partitioned(); m_is_clone_of= clone_arg; m_clone_mem_root= clone_mem_root_arg; - init_handler_variables(); - m_tot_parts= clone_arg->m_tot_parts; - DBUG_ASSERT(m_tot_parts); DBUG_VOID_RETURN; } @@ -286,6 +279,11 @@ void ha_partition::init_handler_variables() this allows blackhole to work properly */ m_no_locks= 0; + m_part_info= NULL; + m_create_handler= FALSE; + m_is_sub_partitioned= 0; + m_is_clone_of= NULL; + m_clone_mem_root= NULL; #ifdef DONT_HAVE_TO_BE_INITALIZED m_start_key.flag= 0; @@ -2099,18 +2097,16 @@ static uint name_add(char *dest, const char *first_name, const char *sec_name) } -/* +/** Create the special .par file - SYNOPSIS - create_handler_file() - name Full path of table name + @param name Full path of table name - RETURN VALUE - >0 Error code - 0 Success + @return Operation status + @retval FALSE Error code + @retval TRUE Success - DESCRIPTION + @note Method used to create handler file with names of partitions, their engine types and the number of partitions. */ @@ -2174,19 +2170,22 @@ bool ha_partition::create_handler_file(const char *name) Array of engine types n * 4 bytes where n = (m_tot_parts + 3)/4 Length of name part in bytes 4 bytes + (Names in filename format) Name part m * 4 bytes where m = ((length_name_part + 3)/4)*4 All padding bytes are zeroed */ - tot_partition_words= (tot_parts + 3) / 4; - tot_name_words= (tot_name_len + 3) / 4; + tot_partition_words= (tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + tot_name_words= (tot_name_len + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + /* 4 static words (tot words, checksum, tot partitions, name length) */ tot_len_words= 4 + tot_partition_words + tot_name_words; - tot_len_byte= 4 * tot_len_words; + tot_len_byte= PAR_WORD_SIZE * tot_len_words; if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL)))) DBUG_RETURN(TRUE); - engine_array= (file_buffer + 12); - name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4)); + engine_array= (file_buffer + PAR_ENGINES_OFFSET); + name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE + + PAR_WORD_SIZE); part_it.rewind(); for (i= 0; i < no_parts; i++) { @@ -2224,13 +2223,15 @@ bool ha_partition::create_handler_file(const char *name) } chksum= 0; int4store(file_buffer, tot_len_words); - int4store(file_buffer + 8, tot_parts); - int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len); + int4store(file_buffer + PAR_NUM_PARTS_OFFSET, tot_parts); + int4store(file_buffer + PAR_ENGINES_OFFSET + + (tot_partition_words * PAR_WORD_SIZE), + tot_name_len); for (i= 0; i < tot_len_words; i++) - chksum^= uint4korr(file_buffer + 4 * i); - int4store(file_buffer + 4, chksum); + chksum^= uint4korr(file_buffer + PAR_WORD_SIZE * i); + int4store(file_buffer + PAR_CHECKSUM_OFFSET, chksum); /* - Remove .frm extension and replace with .par + Add .par extension to the file name. Create and write and close file to be used at open, delete_table and rename_table */ @@ -2248,14 +2249,9 @@ bool ha_partition::create_handler_file(const char *name) DBUG_RETURN(result); } -/* - Clear handler variables and free some memory - SYNOPSIS - clear_handler_file() - - RETURN VALUE - NONE +/** + Clear handler variables and free some memory */ void ha_partition::clear_handler_file() @@ -2268,16 +2264,15 @@ void ha_partition::clear_handler_file() m_engine_array= NULL; } -/* + +/** Create underlying handler objects - SYNOPSIS - create_handlers() - mem_root Allocate memory through this + @para mem_root Allocate memory through this - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool ha_partition::create_handlers(MEM_ROOT *mem_root) @@ -2315,6 +2310,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) DBUG_RETURN(FALSE); } + /* Create underlying handler objects from partition info @@ -2386,108 +2382,164 @@ error_end: } -/* - Get info about partition engines and their names from the .par file +/** + Read the .par file to get the partitions engines and names - SYNOPSIS - get_from_handler_file() - name Full path of table name - mem_root Allocate memory through this + @param name Name of table file (without extention) - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval true Failure + @retval false Success - DESCRIPTION - Open handler file to get partition names, engine types and number of - partitions. + @note On success, m_file_buffer is allocated and must be + freed by the caller. m_name_buffer_ptr and m_tot_parts is also set. */ -bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, - bool clone) +bool ha_partition::read_par_file(const char *name) { - char buff[FN_REFLEN], *address_tot_name_len; + char buff[FN_REFLEN], *tot_name_len_offset; File file; - char *file_buffer, *name_buffer_ptr; - handlerton **engine_array; + char *file_buffer; uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum; - DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_ENTER("ha_partition::read_par_file"); DBUG_PRINT("enter", ("table name: '%s'", name)); if (m_file_buffer) - DBUG_RETURN(FALSE); + DBUG_RETURN(false); fn_format(buff, name, "", ha_par_ext, MY_APPEND_EXT); /* Following could be done with my_stat to read in whole file */ if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0) - DBUG_RETURN(TRUE); - if (my_read(file, (uchar *) & buff[0], 8, MYF(MY_NABP))) + DBUG_RETURN(true); + if (my_read(file, (uchar *) & buff[0], PAR_WORD_SIZE, MYF(MY_NABP))) goto err1; len_words= uint4korr(buff); - len_bytes= 4 * len_words; + len_bytes= PAR_WORD_SIZE * len_words; + if (my_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + goto err1; if (!(file_buffer= (char*) my_malloc(len_bytes, MYF(0)))) goto err1; - VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0))); if (my_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP))) goto err2; chksum= 0; for (i= 0; i < len_words; i++) - chksum ^= uint4korr((file_buffer) + 4 * i); + chksum ^= uint4korr((file_buffer) + PAR_WORD_SIZE * i); if (chksum) goto err2; - m_tot_parts= uint4korr((file_buffer) + 8); + m_tot_parts= uint4korr((file_buffer) + PAR_NUM_PARTS_OFFSET); DBUG_PRINT("info", ("No of parts = %u", m_tot_parts)); - tot_partition_words= (m_tot_parts + 3) / 4; - if (!clone) - { - engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); - for (i= 0; i < m_tot_parts; i++) - { - engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), - (enum legacy_db_type) - *(uchar *) ((file_buffer) + - 12 + i)); - if (!engine_array[i]) - goto err3; - } - } - address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words; - tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4; + tot_partition_words= (m_tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + + tot_name_len_offset= file_buffer + PAR_ENGINES_OFFSET + + PAR_WORD_SIZE * tot_partition_words; + tot_name_words= (uint4korr(tot_name_len_offset) + PAR_WORD_SIZE - 1) / + PAR_WORD_SIZE; + /* + Verify the total length = tot size word, checksum word, num parts word + + engines array + name length word + name array. + */ if (len_words != (tot_partition_words + tot_name_words + 4)) - goto err3; - name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words; + goto err2; VOID(my_close(file, MYF(0))); m_file_buffer= file_buffer; // Will be freed in clear_handler_file() - m_name_buffer_ptr= name_buffer_ptr; - - if (!clone) - { - if (!(m_engine_array= (plugin_ref*) - my_malloc(m_tot_parts * sizeof(plugin_ref), MYF(MY_WME)))) - goto err3; + m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE; - for (i= 0; i < m_tot_parts; i++) - m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); + DBUG_RETURN(false); + +err2: + my_free(file_buffer, MYF(0)); +err1: + VOID(my_close(file, MYF(0))); + DBUG_RETURN(true); +} + + +/** + Setup m_engine_array + + @param mem_root MEM_ROOT to use for allocating new handlers + + @return Operation status + @retval false Success + @retval true Failure +*/ + +bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) +{ + uint i; + uchar *buff; + handlerton **engine_array; + + DBUG_ASSERT(!m_file); + DBUG_ENTER("ha_partition::setup_engine_array"); + engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); + if (!engine_array) + DBUG_RETURN(true); - my_afree((gptr) engine_array); + buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); + for (i= 0; i < m_tot_parts; i++) + { + engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), + (enum legacy_db_type) + *(buff + i)); + if (!engine_array[i]) + goto err; } + if (!(m_engine_array= (plugin_ref*) + my_malloc(m_tot_parts * sizeof(plugin_ref), MYF(MY_WME)))) + goto err; + + for (i= 0; i < m_tot_parts; i++) + m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); + + my_afree((gptr) engine_array); - if (!clone && !m_file && create_handlers(mem_root)) + if (create_handlers(mem_root)) { clear_handler_file(); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } - DBUG_RETURN(FALSE); -err3: - if (!clone) - my_afree((gptr) engine_array); -err2: - my_free(file_buffer, MYF(0)); -err1: - VOID(my_close(file, MYF(0))); - DBUG_RETURN(TRUE); + DBUG_RETURN(false); + +err: + my_afree((gptr) engine_array); + DBUG_RETURN(true); +} + + +/** + Get info about partition engines and their names from the .par file + + @param name Full path of table name + @param mem_root Allocate memory through this + @param is_clone If it is a clone, don't create new handlers + + @return Operation status + @retval true Error + @retval false Success + + @note Open handler file to get partition names, engine types and number of + partitions. +*/ + +bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone) +{ + DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_PRINT("enter", ("table name: '%s'", name)); + + if (m_file_buffer) + DBUG_RETURN(false); + + if (read_par_file(name)) + DBUG_RETURN(true); + + if (!is_clone && setup_engine_array(mem_root)) + DBUG_RETURN(true); + + DBUG_RETURN(false); } @@ -2615,8 +2667,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, FALSE); - if (!(m_file[i]= file[i]->clone((const char*) name_buff, - m_clone_mem_root))) + if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root))) { error= HA_ERR_INITIALIZATION; file= &m_file[i]; @@ -2632,8 +2683,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, FALSE); - if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, - test_if_locked))) + if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked))) goto err_handler; m_no_locks+= (*file)->lock_count(); name_buffer_ptr+= strlen(name_buffer_ptr) + 1; @@ -2645,8 +2695,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) check_table_flags= (((*file)->ha_table_flags() & ~(PARTITION_DISABLED_TABLE_FLAGS)) | (PARTITION_ENABLED_TABLE_FLAGS)); - file++; - do + while (*(++file)) { DBUG_ASSERT(ref_length >= (*file)->ref_length); set_if_bigger(ref_length, ((*file)->ref_length)); @@ -2663,7 +2712,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) file = &m_file[m_tot_parts - 1]; goto err_handler; } - } while (*(++file)); + } key_used_on_scan= m_file[0]->key_used_on_scan; implicit_emptied= m_file[0]->implicit_emptied; /* @@ -2742,7 +2791,7 @@ err_alloc: /** Clone the open and locked partitioning handler. - @param mem_root MEM_ROOT to use. + @param mem_root MEM_ROOT to use. @return Pointer to the successfully created clone or NULL @@ -2750,33 +2799,32 @@ err_alloc: This function creates a new ha_partition handler as a clone/copy. The original (this) must already be opened and locked. The clone will use the originals m_part_info. - It also allocates memory to ref + ref_dup. + It also allocates memory for ref + ref_dup. In ha_partition::open() it will clone its original handlers partitions - which will allocate then om the correct MEM_ROOT and also open them. + which will allocate then on the correct MEM_ROOT and also open them. */ handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root) { ha_partition *new_handler; - + DBUG_ENTER("ha_partition::clone"); new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info, this, mem_root); - if (!new_handler) - DBUG_RETURN(NULL); - /* Allocate new_handler->ref here because otherwise ha_open will allocate it on this->table->mem_root and we will not be able to reclaim that memory when the clone handler object is destroyed. */ - new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(m_ref_length)*2); - if (!new_handler->ref) - DBUG_RETURN(NULL); + if (new_handler && + !(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(m_ref_length)*2))) + new_handler= NULL; - if (new_handler->ha_open(table, name, + if (new_handler && + new_handler->ha_open(table, name, table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) - DBUG_RETURN(NULL); + new_handler= NULL; DBUG_RETURN((handler*) new_handler); } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index a38d56af8ff..cd90c4cc1d5 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -55,6 +55,16 @@ typedef struct st_ha_data_partition HA_DUPLICATE_POS | \ HA_CAN_SQL_HANDLER | \ HA_CAN_INSERT_DELAYED) + +/* First 4 bytes in the .par file is the number of 32-bit words in the file */ +#define PAR_WORD_SIZE 4 +/* offset to the .par file checksum */ +#define PAR_CHECKSUM_OFFSET 4 +/* offset to the total number of partitions */ +#define PAR_NUM_PARTS_OFFSET 8 +/* offset to the engines array */ +#define PAR_ENGINES_OFFSET 12 + class ha_partition :public handler { private: @@ -71,7 +81,7 @@ private: /* Data for the partition handler */ int m_mode; // Open mode uint m_open_test_lock; // Open test_if_locked - char *m_file_buffer; // Buffer with names + char *m_file_buffer; // Content of the .par file char *m_name_buffer_ptr; // Pointer to first partition name plugin_ref *m_engine_array; // Array of types of the handlers handler **m_file; // Array of references to handler inst. @@ -281,7 +291,10 @@ private: And one method to read it in. */ bool create_handler_file(const char *name); - bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, bool clone); + bool setup_engine_array(MEM_ROOT *mem_root); + bool read_par_file(const char *name); + bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone); bool new_handlers_from_part_info(MEM_ROOT *mem_root); bool create_handlers(MEM_ROOT *mem_root); void clear_handler_file(); diff --git a/sql/handler.cc b/sql/handler.cc index 8adb8e061a3..718529fa5fc 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2045,14 +2045,21 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root) on this->table->mem_root and we will not be able to reclaim that memory when the clone handler object is destroyed. */ - if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2))) - return NULL; - if (new_handler && !new_handler->ha_open(table, - name, - table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) - return new_handler; - return NULL; + if (new_handler && + !(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(ref_length)*2))) + new_handler= NULL; + /* + TODO: Implement a more efficient way to have more than one index open for + the same table instance. The ha_open call is not cachable for clone. + */ + if (new_handler && new_handler->ha_open(table, + name, + table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED)) + new_handler= NULL; + + return new_handler; } -- cgit v1.2.1 From a5e8d9029b1340762bc88226c0a9344f241a044c Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Fri, 22 Apr 2011 11:20:55 +0400 Subject: Bug#11756928 48916: SERVER INCORRECTLY PROCESSING HAVING CLAUSES WITH AN ORDER BY CLAUSE Before sorting HAVING condition is split into two parts, first part is a table related condition and the rest of is HAVING part. Extraction of HAVING part does not take into account the fact that some of conditions might be non-const but have 'used_tables' == 0 (independent subqueries) and because of that these conditions are cut off by make_cond_for_table() function. The fix is to use (table_map) 0 instead of used_tables in third argument for make_cond_for_table() function. It allows to extract elements which belong to sorted table and in addition elements which are independend subqueries. mysql-test/r/having.result: test case mysql-test/t/having.test: test case sql/sql_select.cc: The fix is to use (table_map) 0 instead of used_tables in third argument for make_cond_for_table() function. It allows to extract elements which belong to sorted table and in addition elements which are independend subqueries. --- mysql-test/r/having.result | 22 ++++++++++++++++++++++ mysql-test/t/having.test | 26 ++++++++++++++++++++++++++ sql/sql_select.cc | 38 +++++++++++++++++++++++++++++++++++++- 3 files changed, 85 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/having.result b/mysql-test/r/having.result index cd1b4ae0218..4253ac7e5c3 100644 --- a/mysql-test/r/having.result +++ b/mysql-test/r/having.result @@ -545,4 +545,26 @@ FROM t1 JOIN t2 ON t2.f2 LIKE 'x' HAVING field1 < 7; field1 DROP TABLE t1,t2; +# +# Bug#48916 Server incorrectly processing HAVING clauses with an ORDER BY clause +# +CREATE TABLE t1 (f1 INT, f2 INT); +INSERT INTO t1 VALUES (1, 0), (2, 1), (3, 2); +CREATE TABLE t2 (f1 INT, f2 INT); +SELECT t1.f1 +FROM t1 +HAVING (3, 2) IN (SELECT f1, f2 FROM t2) AND t1.f1 >= 0 +ORDER BY t1.f1; +f1 +SELECT t1.f1 +FROM t1 +HAVING (3, 2) IN (SELECT 4, 2) AND t1.f1 >= 0 +ORDER BY t1.f1; +f1 +SELECT t1.f1 +FROM t1 +HAVING 2 IN (SELECT f2 FROM t2) AND t1.f1 >= 0 +ORDER BY t1.f1; +f1 +DROP TABLE t1,t2; End of 5.1 tests diff --git a/mysql-test/t/having.test b/mysql-test/t/having.test index c808e747523..2ed8b40b858 100644 --- a/mysql-test/t/having.test +++ b/mysql-test/t/having.test @@ -564,4 +564,30 @@ HAVING field1 < 7; DROP TABLE t1,t2; +--echo # +--echo # Bug#48916 Server incorrectly processing HAVING clauses with an ORDER BY clause +--echo # + +CREATE TABLE t1 (f1 INT, f2 INT); +INSERT INTO t1 VALUES (1, 0), (2, 1), (3, 2); +CREATE TABLE t2 (f1 INT, f2 INT); + +SELECT t1.f1 +FROM t1 +HAVING (3, 2) IN (SELECT f1, f2 FROM t2) AND t1.f1 >= 0 +ORDER BY t1.f1; + +SELECT t1.f1 +FROM t1 +HAVING (3, 2) IN (SELECT 4, 2) AND t1.f1 >= 0 +ORDER BY t1.f1; + +SELECT t1.f1 +FROM t1 +HAVING 2 IN (SELECT f2 FROM t2) AND t1.f1 >= 0 +ORDER BY t1.f1; + +DROP TABLE t1,t2; + + --echo End of 5.1 tests diff --git a/sql/sql_select.cc b/sql/sql_select.cc index ab287e57aa1..46e9ad242b3 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -2215,7 +2215,7 @@ JOIN::exec() Item* sort_table_cond= make_cond_for_table(curr_join->tmp_having, used_tables, - used_tables); + (table_map) 0); if (sort_table_cond) { if (!curr_table->select) @@ -12852,6 +12852,42 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) return 0; // keep test } +/** + Extract a condition that can be checked after reading given table + + @param cond Condition to analyze + @param tables Tables for which "current field values" are available + @param used_table Table that we're extracting the condition for (may + also include PSEUDO_TABLE_BITS, and may be zero) + @param exclude_expensive_cond Do not push expensive conditions + + @retval <>NULL Generated condition + @retval =NULL Already checked, OR error + + @details + Extract the condition that can be checked after reading the table + specified in 'used_table', given that current-field values for tables + specified in 'tables' bitmap are available. + If 'used_table' is 0 + - extract conditions for all tables in 'tables'. + - extract conditions are unrelated to any tables + in the same query block/level(i.e. conditions + which have used_tables == 0). + + The function assumes that + - Constant parts of the condition has already been checked. + - Condition that could be checked for tables in 'tables' has already + been checked. + + The function takes into account that some parts of the condition are + guaranteed to be true by employed 'ref' access methods (the code that + does this is located at the end, search down for "EQ_FUNC"). + + @note + Make sure to keep the implementations of make_cond_for_table() and + make_cond_after_sjm() synchronized. + make_cond_for_info_schema() uses similar algorithm as well. +*/ static COND * make_cond_for_table(COND *cond, table_map tables, table_map used_table) -- cgit v1.2.1 From c68a034e8382c03118f8c6708dd029a89aae30a7 Mon Sep 17 00:00:00 2001 From: Serge Kozlov Date: Mon, 25 Apr 2011 23:49:56 +0400 Subject: BUG#12371924. Fxi test case --- mysql-test/suite/binlog/r/binlog_bug23533.result | 4 ---- mysql-test/suite/binlog/t/binlog_bug23533.test | 16 ++++++++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/mysql-test/suite/binlog/r/binlog_bug23533.result b/mysql-test/suite/binlog/r/binlog_bug23533.result index 02605839ab0..d5cd93284a2 100644 --- a/mysql-test/suite/binlog/r/binlog_bug23533.result +++ b/mysql-test/suite/binlog/r/binlog_bug23533.result @@ -3,8 +3,6 @@ CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT, b TEXT, PRIMARY KEY(a)) ENGINE=I SELECT COUNT(*) FROM t1; COUNT(*) 1000 -SET @saved_binlog_cache_size=@@binlog_cache_size; -SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; SET GLOBAL binlog_cache_size=4096; SET GLOBAL max_binlog_cache_size=4096; START TRANSACTION; @@ -14,6 +12,4 @@ COMMIT; SHOW TABLES LIKE 't%'; Tables_in_test (t%) t1 -SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size; -SET GLOBAL binlog_cache_size=@saved_binlog_cache_size; DROP TABLE t1; diff --git a/mysql-test/suite/binlog/t/binlog_bug23533.test b/mysql-test/suite/binlog/t/binlog_bug23533.test index 05fe9fd9523..c05abe788c6 100644 --- a/mysql-test/suite/binlog/t/binlog_bug23533.test +++ b/mysql-test/suite/binlog/t/binlog_bug23533.test @@ -24,11 +24,15 @@ while ($i) SELECT COUNT(*) FROM t1; # Set small value for max_binlog_cache_size -SET @saved_binlog_cache_size=@@binlog_cache_size; -SET @saved_max_binlog_cache_size=@@max_binlog_cache_size; +let $saved_binlog_cache_size= query_get_value(SELECT @@binlog_cache_size AS Value, Value, 1); +let $saved_max_binlog_cache_size= query_get_value(SELECT @@max_binlog_cache_size AS Value, Value, 1); SET GLOBAL binlog_cache_size=4096; SET GLOBAL max_binlog_cache_size=4096; +# New value of max_binlog_cache_size will apply to new session +disconnect default; +connect(default,localhost,root,,test); + # Copied data from t1 into t2 large than max_binlog_cache_size START TRANSACTION; --error 1197 @@ -37,6 +41,10 @@ COMMIT; SHOW TABLES LIKE 't%'; # 5.1 End of Test -SET GLOBAL max_binlog_cache_size=@saved_max_binlog_cache_size; -SET GLOBAL binlog_cache_size=@saved_binlog_cache_size; +--disable_query_log +eval SET GLOBAL max_binlog_cache_size=$saved_max_binlog_cache_size; +eval SET GLOBAL binlog_cache_size=$saved_binlog_cache_size; +--enable_query_log DROP TABLE t1; +disconnect default; +connect(default,localhost,root,,test); -- cgit v1.2.1 From bdaaee5d0495ad66e95bcb42e06866855cf417b8 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Tue, 26 Apr 2011 10:21:09 +0200 Subject: post fix for werror build for bug#11766249. --- sql/ha_partition.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index a0f346f7a64..460d5826a91 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -2587,7 +2587,7 @@ void ha_data_partition_destroy(void *ha_data) int ha_partition::open(const char *name, int mode, uint test_if_locked) { char *name_buffer_ptr; - int error; + int error= HA_ERR_INITIALIZATION; uint alloc_len; handler **file; char name_buff[FN_REFLEN]; @@ -2601,7 +2601,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_open_test_lock= test_if_locked; m_part_field_array= m_part_info->full_part_field_array; if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of))) - DBUG_RETURN(1); + DBUG_RETURN(error); name_buffer_ptr= m_name_buffer_ptr; m_start_key.length= 0; m_rec0= table->record[0]; @@ -2612,7 +2612,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME)))) { - DBUG_RETURN(1); + DBUG_RETURN(error); } { /* @@ -2635,7 +2635,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) - DBUG_RETURN(1); + DBUG_RETURN(error); bitmap_clear_all(&m_bulk_insert_started); /* Initialize the bitmap we use to determine what partitions are used */ if (!m_is_clone_of) @@ -2644,7 +2644,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) { bitmap_free(&m_bulk_insert_started); - DBUG_RETURN(1); + DBUG_RETURN(error); } bitmap_set_all(&(m_part_info->used_partitions)); } -- cgit v1.2.1 From a60c39a2ffc7ec0c0b4ae8bbadf733773ec7557f Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Wed, 27 Apr 2011 11:35:57 +0400 Subject: Bug#11889186 60503: CRASH IN MAKE_DATE_TIME WITH DATE_FORMAT / STR_TO_DATE COMBINATION calc_daynr() function returns negative result if malformed date with zero year and month is used. Attempt to calculate week day on negative value leads to crash. The fix is return NULL for 'W', 'a', 'w' specifiers if zero year and month is used. Additional fix for calc_daynr(): --added assertion that result can not be negative --return 0 if zero year and month is used mysql-test/r/func_time.result: test case mysql-test/t/func_time.test: test case sql-common/my_time.c: --added assertion that result can not be negative --return 0 if zero year and month is used sql/item_timefunc.cc: eturn NULL for 'W', 'a', 'w' specifiers if zero year and month is used. --- mysql-test/r/func_time.result | 12 ++++++++++++ mysql-test/t/func_time.test | 8 ++++++++ sql-common/my_time.c | 3 ++- sql/item_timefunc.cc | 6 +++--- 4 files changed, 25 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index f67171af99f..1e05443d8ac 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1405,4 +1405,16 @@ NULL SELECT ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR); ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR) NULL +# +# Bug#11889186 60503: CRASH IN MAKE_DATE_TIME WITH DATE_FORMAT / STR_TO_DATE COMBINATION +# +SELECT DATE_FORMAT('0000-00-11', '%W'); +DATE_FORMAT('0000-00-11', '%W') +NULL +SELECT DATE_FORMAT('0000-00-11', '%a'); +DATE_FORMAT('0000-00-11', '%a') +NULL +SELECT DATE_FORMAT('0000-00-11', '%w'); +DATE_FORMAT('0000-00-11', '%w') +NULL End of 5.1 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 938359f8c11..2000d81f80d 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -913,4 +913,12 @@ SELECT CAST((MONTH(FROM_UNIXTIME(@@GLOBAL.SQL_MODE))) AS BINARY(1025)); SELECT ADDDATE(MONTH(FROM_UNIXTIME(NULL)),INTERVAL 1 HOUR); +--echo # +--echo # Bug#11889186 60503: CRASH IN MAKE_DATE_TIME WITH DATE_FORMAT / STR_TO_DATE COMBINATION +--echo # + +SELECT DATE_FORMAT('0000-00-11', '%W'); +SELECT DATE_FORMAT('0000-00-11', '%a'); +SELECT DATE_FORMAT('0000-00-11', '%w'); + --echo End of 5.1 tests diff --git a/sql-common/my_time.c b/sql-common/my_time.c index ca11c54a999..80a7e0daa2c 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -772,7 +772,7 @@ long calc_daynr(uint year,uint month,uint day) int y= year; /* may be < 0 temporarily */ DBUG_ENTER("calc_daynr"); - if (y == 0 && month == 0 && day == 0) + if (y == 0 && month == 0) DBUG_RETURN(0); /* Skip errors */ /* Cast to int to be able to handle month == 0 */ delsum= (long) (365 * y + 31 *((int) month - 1) + (int) day); @@ -783,6 +783,7 @@ long calc_daynr(uint year,uint month,uint day) temp=(int) ((y/100+1)*3)/4; DBUG_PRINT("exit",("year: %d month: %d day: %d -> daynr: %ld", y+(month <= 2),month,day,delsum+y/4-temp)); + DBUG_ASSERT(delsum+(int) y/4-temp > 0); DBUG_RETURN(delsum+(int) y/4-temp); } /* calc_daynr */ diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index ecf790cc061..1044b4682ef 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -648,7 +648,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'W': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday= calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -657,7 +657,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'a': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -816,7 +816,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, } break; case 'w': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),1); -- cgit v1.2.1 From a1f7ceb281f9d87c9baea125ebab26f99a0370f8 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 27 Apr 2011 17:24:10 +0530 Subject: BUG#12329909 - BUILDING MYSQL WITH DEBUG SUPPORT FAILS WITH LIBEDIT Fixed by checking the return value of the write() function calls and handling the open files and fd appropriately. cmd-line-utils/libedit/vi.c: BUG#12329909 - BUILDING MYSQL WITH DEBUG SUPPORT FAILS WITH LIBEDIT Added a check on the return value of the write() function calls. --- cmd-line-utils/libedit/vi.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cmd-line-utils/libedit/vi.c b/cmd-line-utils/libedit/vi.c index d628f076a1d..beffc7b40b5 100644 --- a/cmd-line-utils/libedit/vi.c +++ b/cmd-line-utils/libedit/vi.c @@ -1012,8 +1012,10 @@ vi_histedit(EditLine *el, int c __attribute__((__unused__))) if (fd < 0) return CC_ERROR; cp = el->el_line.buffer; - write(fd, cp, el->el_line.lastchar - cp +0u); - write(fd, "\n", 1); + if (write(fd, cp, el->el_line.lastchar - cp +0u) == -1) + goto error; + if (write(fd, "\n", 1) == -1) + goto error; pid = fork(); switch (pid) { case -1: @@ -1041,6 +1043,12 @@ vi_histedit(EditLine *el, int c __attribute__((__unused__))) unlink(tempfile); /* return CC_REFRESH; */ return ed_newline(el, 0); + +/* XXXMYSQL: Avoid compiler warnings. */ +error: + close(fd); + unlink(tempfile); + return CC_ERROR; } /* vi_history_word(): -- cgit v1.2.1 From 401941c25898300462b1adbd9886c8d55e92f7f2 Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Wed, 27 Apr 2011 17:51:06 +0200 Subject: Post push fix for bug#11766249 bug#59316 Partitions can have different ref_length (position data length). Removed DBUG_ASSERT which crashed debug builds when using MAX_ROWS on some partitions. --- mysql-test/r/partition_myisam.result | 90 ++++++++++++++++++++++++++++++ mysql-test/r/partition_not_embedded.result | 81 --------------------------- mysql-test/t/partition_myisam.test | 62 ++++++++++++++++++++ mysql-test/t/partition_not_embedded.test | 53 ------------------ sql/ha_partition.cc | 9 ++- 5 files changed, 158 insertions(+), 137 deletions(-) create mode 100644 mysql-test/r/partition_myisam.result delete mode 100644 mysql-test/r/partition_not_embedded.result create mode 100644 mysql-test/t/partition_myisam.test delete mode 100644 mysql-test/t/partition_not_embedded.test diff --git a/mysql-test/r/partition_myisam.result b/mysql-test/r/partition_myisam.result new file mode 100644 index 00000000000..1995c87eff2 --- /dev/null +++ b/mysql-test/r/partition_myisam.result @@ -0,0 +1,90 @@ +DROP TABLE IF EXISTS t1, t2; +# Bug#30102 test +CREATE TABLE t1 (a INT) +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (6), +PARTITION `p1....................` VALUES LESS THAN (9), +PARTITION p2 VALUES LESS THAN MAXVALUE); +# List of files in database `test`, all original t1-files here +t1#P#p0.MYD +t1#P#p0.MYI +t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYD +t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYI +t1#P#p2.MYD +t1#P#p2.MYI +t1.frm +t1.par +INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); +# Renaming to a file name where the first partition is 250 chars +# and the second partition is 350 chars +RENAME TABLE t1 TO `t2_new..............................................end`; +Got one of the listed errors +# List of files in database `test`, should not be any t2-files here +# List of files in database `test`, should be all t1-files here +t1#P#p0.MYD +t1#P#p0.MYI +t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYD +t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYI +t1#P#p2.MYD +t1#P#p2.MYI +t1.frm +t1.par +SELECT * FROM t1; +a +1 +10 +2 +3 +4 +5 +6 +7 +8 +9 +# List of files in database `test`, should be all t1-files here +t1#P#p0.MYD +t1#P#p0.MYI +t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYD +t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYI +t1#P#p2.MYD +t1#P#p2.MYI +t1.frm +t1.par +# Renaming to a file name where the first partition is 156 chars +# and the second partition is 256 chars +RENAME TABLE t1 TO `t2_............................_end`; +Got one of the listed errors +# List of files in database `test`, should not be any t2-files here +# List of files in database `test`, should be all t1-files here +t1#P#p0.MYD +t1#P#p0.MYI +t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYD +t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYI +t1#P#p2.MYD +t1#P#p2.MYI +t1.frm +t1.par +SELECT * FROM t1; +a +1 +10 +2 +3 +4 +5 +6 +7 +8 +9 +DROP TABLE t1; +# Should not be any files left here +# End of bug#30102 test. +# Test of post-push fix for bug#11766249/59316 +CREATE TABLE t1 (a INT, b VARCHAR(255), PRIMARY KEY (a)) +ENGINE = MyISAM +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (0) MAX_ROWS=100, +PARTITION p1 VALUES LESS THAN (100) MAX_ROWS=100, +PARTITION pMax VALUES LESS THAN MAXVALUE); +INSERT INTO t1 VALUES (1, "Partition p1, first row"); +DROP TABLE t1; diff --git a/mysql-test/r/partition_not_embedded.result b/mysql-test/r/partition_not_embedded.result deleted file mode 100644 index c942189a956..00000000000 --- a/mysql-test/r/partition_not_embedded.result +++ /dev/null @@ -1,81 +0,0 @@ -DROP TABLE IF EXISTS t1, t2; -# Bug#30102 test -CREATE TABLE t1 (a INT) -PARTITION BY RANGE (a) -(PARTITION p0 VALUES LESS THAN (6), -PARTITION `p1....................` VALUES LESS THAN (9), -PARTITION p2 VALUES LESS THAN MAXVALUE); -# List of files in database `test`, all original t1-files here -t1#P#p0.MYD -t1#P#p0.MYI -t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYD -t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYI -t1#P#p2.MYD -t1#P#p2.MYI -t1.frm -t1.par -INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); -# Renaming to a file name where the first partition is 250 chars -# and the second partition is 350 chars -RENAME TABLE t1 TO `t2_new..............................................end`; -Got one of the listed errors -# List of files in database `test`, should not be any t2-files here -# List of files in database `test`, should be all t1-files here -t1#P#p0.MYD -t1#P#p0.MYI -t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYD -t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYI -t1#P#p2.MYD -t1#P#p2.MYI -t1.frm -t1.par -SELECT * FROM t1; -a -1 -10 -2 -3 -4 -5 -6 -7 -8 -9 -# List of files in database `test`, should be all t1-files here -t1#P#p0.MYD -t1#P#p0.MYI -t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYD -t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYI -t1#P#p2.MYD -t1#P#p2.MYI -t1.frm -t1.par -# Renaming to a file name where the first partition is 156 chars -# and the second partition is 256 chars -RENAME TABLE t1 TO `t2_............................_end`; -Got one of the listed errors -# List of files in database `test`, should not be any t2-files here -# List of files in database `test`, should be all t1-files here -t1#P#p0.MYD -t1#P#p0.MYI -t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYD -t1#P#p1@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e@002e.MYI -t1#P#p2.MYD -t1#P#p2.MYI -t1.frm -t1.par -SELECT * FROM t1; -a -1 -10 -2 -3 -4 -5 -6 -7 -8 -9 -DROP TABLE t1; -# Should not be any files left here -# End of bug#30102 test. diff --git a/mysql-test/t/partition_myisam.test b/mysql-test/t/partition_myisam.test new file mode 100644 index 00000000000..51f46aa71be --- /dev/null +++ b/mysql-test/t/partition_myisam.test @@ -0,0 +1,62 @@ +-- source include/have_partition.inc +--disable_warnings +DROP TABLE IF EXISTS t1, t2; +--enable_warnings +let $MYSQLD_DATADIR= `SELECT @@datadir`; + +# +# Bug#30102: rename table does corrupt tables with partition files on failure +# +--echo # Bug#30102 test +CREATE TABLE t1 (a INT) +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (6), + PARTITION `p1....................` VALUES LESS THAN (9), + PARTITION p2 VALUES LESS THAN MAXVALUE); +# partition p1 is 't1#P#p1' + @002e * 20 = 107 characters + file ending +# total path lenght of './test/t1#P#p1@002e@002e<...>@002e.MY[ID]' is 118 chars +--echo # List of files in database `test`, all original t1-files here +--list_files $MYSQLD_DATADIR/test t1* +INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); +--echo # Renaming to a file name where the first partition is 250 chars +--echo # and the second partition is 350 chars +# 7,7 avoids the error message, which is not deterministic. +--error 7,7 +RENAME TABLE t1 TO `t2_new..............................................end`; +# 1234567890123456789012345678901234567890123456 +--echo # List of files in database `test`, should not be any t2-files here +--list_files $MYSQLD_DATADIR/test t2* +--echo # List of files in database `test`, should be all t1-files here +--list_files $MYSQLD_DATADIR/test t1* +--sorted_result +SELECT * FROM t1; +--echo # List of files in database `test`, should be all t1-files here +--list_files $MYSQLD_DATADIR/test t1* +--echo # Renaming to a file name where the first partition is 156 chars +--echo # and the second partition is 256 chars +# 7,7 avoids the error message, which is not deterministic. +--error 7,7 +RENAME TABLE t1 TO `t2_............................_end`; +# 1234567890123456789012345678 +# 7 + 4 + 5 + 28 * 5 = 16 + 140 = 156 +--echo # List of files in database `test`, should not be any t2-files here +--list_files $MYSQLD_DATADIR/test t2* +--echo # List of files in database `test`, should be all t1-files here +--list_files $MYSQLD_DATADIR/test t1* +--sorted_result +SELECT * FROM t1; +DROP TABLE t1; +--echo # Should not be any files left here +--list_files $MYSQLD_DATADIR/test t1* +--list_files $MYSQLD_DATADIR/test t2* +--echo # End of bug#30102 test. + +--echo # Test of post-push fix for bug#11766249/59316 +CREATE TABLE t1 (a INT, b VARCHAR(255), PRIMARY KEY (a)) +ENGINE = MyISAM +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (0) MAX_ROWS=100, + PARTITION p1 VALUES LESS THAN (100) MAX_ROWS=100, + PARTITION pMax VALUES LESS THAN MAXVALUE); +INSERT INTO t1 VALUES (1, "Partition p1, first row"); +DROP TABLE t1; diff --git a/mysql-test/t/partition_not_embedded.test b/mysql-test/t/partition_not_embedded.test deleted file mode 100644 index 5c512085a9e..00000000000 --- a/mysql-test/t/partition_not_embedded.test +++ /dev/null @@ -1,53 +0,0 @@ --- source include/have_partition.inc --- source include/not_embedded.inc ---disable_warnings -DROP TABLE IF EXISTS t1, t2; ---enable_warnings -let $MYSQLD_DATADIR= `SELECT @@datadir`; - -# -# Bug#30102: rename table does corrupt tables with partition files on failure -# ---echo # Bug#30102 test -CREATE TABLE t1 (a INT) -PARTITION BY RANGE (a) -(PARTITION p0 VALUES LESS THAN (6), - PARTITION `p1....................` VALUES LESS THAN (9), - PARTITION p2 VALUES LESS THAN MAXVALUE); -# partition p1 is 't1#P#p1' + @002e * 20 = 107 characters + file ending -# total path lenght of './test/t1#P#p1@002e@002e<...>@002e.MY[ID]' is 118 chars ---echo # List of files in database `test`, all original t1-files here ---list_files $MYSQLD_DATADIR/test t1* -INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); ---echo # Renaming to a file name where the first partition is 250 chars ---echo # and the second partition is 350 chars -# 7,7 avoids the error message, which is not deterministic. ---error 7,7 -RENAME TABLE t1 TO `t2_new..............................................end`; -# 1234567890123456789012345678901234567890123456 ---echo # List of files in database `test`, should not be any t2-files here ---list_files $MYSQLD_DATADIR/test t2* ---echo # List of files in database `test`, should be all t1-files here ---list_files $MYSQLD_DATADIR/test t1* ---sorted_result -SELECT * FROM t1; ---echo # List of files in database `test`, should be all t1-files here ---list_files $MYSQLD_DATADIR/test t1* ---echo # Renaming to a file name where the first partition is 156 chars ---echo # and the second partition is 256 chars -# 7,7 avoids the error message, which is not deterministic. ---error 7,7 -RENAME TABLE t1 TO `t2_............................_end`; -# 1234567890123456789012345678 -# 7 + 4 + 5 + 28 * 5 = 16 + 140 = 156 ---echo # List of files in database `test`, should not be any t2-files here ---list_files $MYSQLD_DATADIR/test t2* ---echo # List of files in database `test`, should be all t1-files here ---list_files $MYSQLD_DATADIR/test t1* ---sorted_result -SELECT * FROM t1; -DROP TABLE t1; ---echo # Should not be any files left here ---list_files $MYSQLD_DATADIR/test t1* ---list_files $MYSQLD_DATADIR/test t2* ---echo # End of bug#30102 test. diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 460d5826a91..4883e0a0571 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -2697,7 +2697,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) (PARTITION_ENABLED_TABLE_FLAGS)); while (*(++file)) { - DBUG_ASSERT(ref_length >= (*file)->ref_length); + /* MyISAM can have smaller ref_length for partitions with MAX_ROWS set */ set_if_bigger(ref_length, ((*file)->ref_length)); /* Verify that all partitions have the same set of table flags. @@ -3957,12 +3957,15 @@ end_dont_reset_start_part: void ha_partition::position(const uchar *record) { handler *file= m_file[m_last_part]; + uint pad_length; DBUG_ENTER("ha_partition::position"); file->position(record); int2store(ref, m_last_part); - memcpy((ref + PARTITION_BYTES_IN_POS), file->ref, - (ref_length - PARTITION_BYTES_IN_POS)); + memcpy((ref + PARTITION_BYTES_IN_POS), file->ref, file->ref_length); + pad_length= m_ref_length - PARTITION_BYTES_IN_POS - file->ref_length; + if (pad_length) + memset((ref + PARTITION_BYTES_IN_POS + file->ref_length), 0, pad_length); #ifdef SUPPORTING_PARTITION_OVER_DIFFERENT_ENGINES #ifdef HAVE_purify -- cgit v1.2.1 From 54c1da00ee2e6603366d87667c45eda784ba216f Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Fri, 29 Apr 2011 09:48:26 +0200 Subject: removed dead obsolete code --- sql/ha_partition.cc | 6 ------ 1 file changed, 6 deletions(-) diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 4883e0a0571..d09181822ee 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3967,12 +3967,6 @@ void ha_partition::position(const uchar *record) if (pad_length) memset((ref + PARTITION_BYTES_IN_POS + file->ref_length), 0, pad_length); -#ifdef SUPPORTING_PARTITION_OVER_DIFFERENT_ENGINES -#ifdef HAVE_purify - bzero(ref + PARTITION_BYTES_IN_POS + ref_length, - max_ref_length-ref_length); -#endif /* HAVE_purify */ -#endif DBUG_VOID_RETURN; } -- cgit v1.2.1 From 044bf3b6b3235cf2e87dbe2f500ccfab9612f5dd Mon Sep 17 00:00:00 2001 From: Mattias Jonsson Date: Fri, 29 Apr 2011 13:00:16 +0200 Subject: bug#11765667: bug#58655: ASSERTION FAILED, SERVER CRASHES WITH MYSQLD GOT SIGNAL 6 The partitioning engine checked the auto_increment column even if it was not to be written, triggering a DBUG_ASSERT. Fixed by checking if table->write_set for that column was set. --- mysql-test/r/partition.result | 29 +++++++++++++++++++++++++++++ mysql-test/t/partition.test | 31 +++++++++++++++++++++++++++++++ sql/ha_partition.cc | 11 ++++++++--- 3 files changed, 68 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index 138264fd4e1..379f7499e11 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -1,5 +1,34 @@ drop table if exists t1, t2; # +# Bug#11765667: bug#58655: ASSERTION FAILED, +# SERVER CRASHES WITH MYSQLD GOT SIGNAL 6 +# +CREATE TABLE t1 ( +id MEDIUMINT NOT NULL AUTO_INCREMENT, +dt DATE, st VARCHAR(255), uid INT, +id2nd LONGBLOB, filler VARCHAR(255), PRIMARY KEY(id, dt) +); +INSERT INTO t1 (dt, st, uid, id2nd, filler) VALUES +('1991-03-14', 'Initial Insert', 200, 1234567, 'No Data'), +('1991-02-26', 'Initial Insert', 201, 1234567, 'No Data'), +('1992-03-16', 'Initial Insert', 234, 1234567, 'No Data'), +('1992-07-02', 'Initial Insert', 287, 1234567, 'No Data'), +('1991-05-26', 'Initial Insert', 256, 1234567, 'No Data'), +('1991-04-25', 'Initial Insert', 222, 1234567, 'No Data'), +('1993-03-12', 'Initial Insert', 267, 1234567, 'No Data'), +('1993-03-14', 'Initial Insert', 291, 1234567, 'No Data'), +('1991-12-20', 'Initial Insert', 298, 1234567, 'No Data'), +('1994-10-31', 'Initial Insert', 220, 1234567, 'No Data'); +ALTER TABLE t1 PARTITION BY LIST (YEAR(dt)) ( +PARTITION d1 VALUES IN (1991, 1994), +PARTITION d2 VALUES IN (1993), +PARTITION d3 VALUES IN (1992, 1995, 1996) +); +INSERT INTO t1 (dt, st, uid, id2nd, filler) VALUES +('1991-07-14', 'After Partitioning Insert', 299, 1234567, 'Insert row'); +UPDATE t1 SET filler='Updating the row' WHERE uid=298; +DROP TABLE t1; +# # Bug#59297: Can't find record in 'tablename' on update inner join # CREATE TABLE t1 ( diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test index 7a0a5558d32..341e8780b2b 100644 --- a/mysql-test/t/partition.test +++ b/mysql-test/t/partition.test @@ -14,6 +14,37 @@ drop table if exists t1, t2; --enable_warnings +--echo # +--echo # Bug#11765667: bug#58655: ASSERTION FAILED, +--echo # SERVER CRASHES WITH MYSQLD GOT SIGNAL 6 +--echo # +CREATE TABLE t1 ( + id MEDIUMINT NOT NULL AUTO_INCREMENT, + dt DATE, st VARCHAR(255), uid INT, + id2nd LONGBLOB, filler VARCHAR(255), PRIMARY KEY(id, dt) +); +INSERT INTO t1 (dt, st, uid, id2nd, filler) VALUES + ('1991-03-14', 'Initial Insert', 200, 1234567, 'No Data'), + ('1991-02-26', 'Initial Insert', 201, 1234567, 'No Data'), + ('1992-03-16', 'Initial Insert', 234, 1234567, 'No Data'), + ('1992-07-02', 'Initial Insert', 287, 1234567, 'No Data'), + ('1991-05-26', 'Initial Insert', 256, 1234567, 'No Data'), + ('1991-04-25', 'Initial Insert', 222, 1234567, 'No Data'), + ('1993-03-12', 'Initial Insert', 267, 1234567, 'No Data'), + ('1993-03-14', 'Initial Insert', 291, 1234567, 'No Data'), + ('1991-12-20', 'Initial Insert', 298, 1234567, 'No Data'), + ('1994-10-31', 'Initial Insert', 220, 1234567, 'No Data'); +ALTER TABLE t1 PARTITION BY LIST (YEAR(dt)) ( + PARTITION d1 VALUES IN (1991, 1994), + PARTITION d2 VALUES IN (1993), + PARTITION d3 VALUES IN (1992, 1995, 1996) +); +INSERT INTO t1 (dt, st, uid, id2nd, filler) VALUES + ('1991-07-14', 'After Partitioning Insert', 299, 1234567, 'Insert row'); +UPDATE t1 SET filler='Updating the row' WHERE uid=298; + +DROP TABLE t1; + --echo # --echo # Bug#59297: Can't find record in 'tablename' on update inner join --echo # diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index d09181822ee..82e38be2238 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -3403,15 +3403,19 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data) exit: /* - if updating an auto_increment column, update + If updating an auto_increment column, update table_share->ha_data->next_auto_inc_val if needed. (not to be used if auto_increment on secondary field in a multi-column index) mysql_update does not set table->next_number_field, so we use table->found_next_number_field instead. + Also checking that the field is marked in the write set. */ - if (table->found_next_number_field && new_data == table->record[0] && - !table->s->next_number_keypart) + if (table->found_next_number_field && + new_data == table->record[0] && + !table->s->next_number_keypart && + bitmap_is_set(table->write_set, + table->found_next_number_field->field_index)) { HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data; if (!ha_data->auto_inc_initialized) @@ -3974,6 +3978,7 @@ void ha_partition::position(const uchar *record) void ha_partition::column_bitmaps_signal() { handler::column_bitmaps_signal(); + /* Must read all partition fields to make position() call possible */ bitmap_union(table->read_set, &m_part_info->full_part_field_set); } -- cgit v1.2.1 From 6f7d0f182d06fcebbae4af09cad030a5ea7331ed Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Fri, 29 Apr 2011 14:04:28 +0300 Subject: Sync 5.1 .inc file with 5.5 due to a missing changeset Add extra codes to wait_until_disconnected.inc that are present in 5.5, but not in 5.1. The missing codes cause innodb_bug59641 to fail in 5.1 on Windows PB2 runs. The addition of those codes in 5.5 was done in luis.soares@sun.com-20090930233215-aup3kxy4j6ltvjfp --- mysql-test/include/wait_until_disconnected.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/include/wait_until_disconnected.inc b/mysql-test/include/wait_until_disconnected.inc index a4362e52d01..8a989becc18 100644 --- a/mysql-test/include/wait_until_disconnected.inc +++ b/mysql-test/include/wait_until_disconnected.inc @@ -7,7 +7,7 @@ let $counter= 500; let $mysql_errno= 0; while (!$mysql_errno) { - --error 0,1053,2002,2006,2013 + --error 0,1040,1053,2002,2003,2006,2013 show status; dec $counter; -- cgit v1.2.1 From 8843aea78a6ddb99598ad77818e5f71fd993ed54 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Fri, 29 Apr 2011 18:52:46 +0530 Subject: Bug#11757855 - 49967: built-in libedit doesn't read .editrc on linux. MySQL client when build with libedit support ignores .editrc at startup. The reason for this regression was the incluison of a safety check, issetugid(), which is not available on some linux platforms. Fixed by adding an equivalent check for platforms which have get[e][u|g]id() set of functions. cmd-line-utils/libedit/el.c: Bug#11757855 - 49967: built-in libedit doesn't read .editrc on linux. Added function calls to check user/group IDs on linux systems which does not have issetugid() function. configure.in: Bug#11757855 - 49967: built-in libedit doesn't read .editrc on linux. Added check for getuid, geteuid, getgid, getegid functions. --- cmd-line-utils/libedit/el.c | 21 ++++++++++++++++----- configure.in | 2 +- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/cmd-line-utils/libedit/el.c b/cmd-line-utils/libedit/el.c index d99946eb68f..c7f8386773d 100644 --- a/cmd-line-utils/libedit/el.c +++ b/cmd-line-utils/libedit/el.c @@ -478,7 +478,13 @@ el_source(EditLine *el, const char *fname) fp = NULL; if (fname == NULL) { -#ifdef HAVE_ISSETUGID +/* XXXMYSQL: Bug#49967 */ +#if defined(HAVE_GETUID) && defined(HAVE_GETEUID) && \ + defined(HAVE_GETGID) && defined(HAVE_GETEGID) +#define HAVE_IDENTITY_FUNCS 1 +#endif + +#if (defined(HAVE_ISSETUGID) || defined(HAVE_IDENTITY_FUNCS)) static const char elpath[] = "/.editrc"; /* XXXMYSQL: Portability fix (for which platforms?) */ #ifdef MAXPATHLEN @@ -486,9 +492,13 @@ el_source(EditLine *el, const char *fname) #else char path[4096]; #endif - +#ifdef HAVE_ISSETUGID if (issetugid()) return (-1); +#elif defined(HAVE_IDENTITY_FUNCS) + if (getuid() != geteuid() || getgid() != getegid()) + return (-1); +#endif if ((ptr = getenv("HOME")) == NULL) return (-1); if (strlcpy(path, ptr, sizeof(path)) >= sizeof(path)) @@ -498,9 +508,10 @@ el_source(EditLine *el, const char *fname) fname = path; #else /* - * If issetugid() is missing, always return an error, in order - * to keep from inadvertently opening up the user to a security - * hole. + * If issetugid() or the above mentioned get[e][u|g]id() + * functions are missing, always return an error, in order + * to keep from inadvertently opening up the user to a + * security hole. */ return (-1); #endif diff --git a/configure.in b/configure.in index 5bd823ab879..8ba208b1ef5 100644 --- a/configure.in +++ b/configure.in @@ -1963,7 +1963,7 @@ AC_CHECK_HEADER(vis.h, [AC_DEFINE([HAVE_VIS_H], [1],[Found vis.h and the strvis() function])])]) AC_CHECK_FUNCS(strlcat strlcpy) -AC_CHECK_FUNCS(issetugid) +AC_CHECK_FUNCS(issetugid getuid geteuid getgid getegid) AC_CHECK_FUNCS(fgetln) AC_CHECK_FUNCS(getline flockfile) -- cgit v1.2.1 From f9abd1ab314d3f36ad6d2fe708e6a0ba6a7cb058 Mon Sep 17 00:00:00 2001 From: Kent Boortz Date: Tue, 3 May 2011 16:02:31 +0200 Subject: Remove soft links in the build directory, not the source directory (Bug#43312) --- client/Makefile.am | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/Makefile.am b/client/Makefile.am index ccd0d8aada0..b455a34a58b 100644 --- a/client/Makefile.am +++ b/client/Makefile.am @@ -116,10 +116,10 @@ link_sources: @LN_CP_F@ $(top_srcdir)/sql/$$f $$f; \ done; \ for f in $(strings_src) ; do \ - rm -f $(srcdir)/$$f; \ + rm -f $$f; \ @LN_CP_F@ $(top_srcdir)/strings/$$f $$f; \ done; \ - rm -f $(srcdir)/my_user.c; \ + rm -f my_user.c; \ @LN_CP_F@ $(top_srcdir)/sql-common/my_user.c my_user.c; echo timestamp > link_sources; -- cgit v1.2.1 From 659a5469c3baad4f7f7e76c69ba5f317cf7112b6 Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Tue, 3 May 2011 16:08:25 +0200 Subject: Bug #11752142 43247: SUITE//INCLUDE: NO SUCH FILE OR DIRECTORY The originally reported dirs have been removed But found suite/bugs, removed from mysql-test/Makefile.am --- mysql-test/Makefile.am | 1 - 1 file changed, 1 deletion(-) diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am index f3b4343f9a0..aa4c8bad141 100644 --- a/mysql-test/Makefile.am +++ b/mysql-test/Makefile.am @@ -80,7 +80,6 @@ TEST_DIRS = t r include std_data std_data/parts collections \ std_data/funcs_1 \ extra/binlog_tests/ extra/rpl_tests \ suite/binlog suite/binlog/t suite/binlog/r suite/binlog/std_data \ - suite/bugs suite/bugs/data suite/bugs/t suite/bugs/r \ suite/federated \ suite/funcs_1 suite/funcs_1/bitdata \ suite/funcs_1/include suite/funcs_1/lib suite/funcs_1/r \ -- cgit v1.2.1 From 3fa4aaac719d41cd8dafe1bc51f33e4c819eaca4 Mon Sep 17 00:00:00 2001 From: Jimmy Yang Date: Wed, 4 May 2011 03:06:21 -0700 Subject: Fix bug #11796673 address backward compatibility on index with large prefix (>=768). Table with such large prefix index will not be loaded into memory (for its metadata), unless innodb_force_recovery is on. rb://604 Approved by Marko --- storage/innobase/dict/dict0load.c | 64 ++++++++++++++++++++++++++++++---- storage/innodb_plugin/dict/dict0load.c | 63 +++++++++++++++++++++++++++++---- 2 files changed, 115 insertions(+), 12 deletions(-) diff --git a/storage/innobase/dict/dict0load.c b/storage/innobase/dict/dict0load.c index c505bfbd6c4..7e820cfb08d 100644 --- a/storage/innobase/dict/dict0load.c +++ b/storage/innobase/dict/dict0load.c @@ -454,9 +454,11 @@ dict_load_report_deleted_index( /************************************************************************ Loads definitions for index fields. */ static -void +ulint dict_load_fields( /*=============*/ + /* out: DB_SUCCESS if ok, DB_CORRUPTION + if failed */ dict_table_t* table, /* in: table */ dict_index_t* index, /* in: index whose fields to load */ mem_heap_t* heap) /* in: memory heap for temporary storage */ @@ -474,6 +476,7 @@ dict_load_fields( byte* buf; ulint i; mtr_t mtr; + ulint error = DB_SUCCESS; ut_ad(mutex_own(&(dict_sys->mutex))); @@ -535,6 +538,26 @@ dict_load_fields( field = rec_get_nth_field_old(rec, 4, &len); + if (prefix_len >= DICT_MAX_INDEX_COL_LEN) { + fprintf(stderr, "InnoDB: Error: load index" + " '%s' failed.\n" + "InnoDB: index field '%s' has a prefix" + " length of %lu bytes,\n" + "InnoDB: which exceeds the" + " maximum limit of %lu bytes.\n" + "InnoDB: Please use server that" + " supports long index prefix\n" + "InnoDB: or turn on" + " innodb_force_recovery to load" + " the table\n", + index->name, mem_heap_strdupl( + heap, (char*) field, len), + (ulong) prefix_len, + (ulong) (DICT_MAX_INDEX_COL_LEN - 1)); + error = DB_CORRUPTION; + goto func_exit; + } + dict_mem_index_add_field(index, mem_heap_strdupl(heap, (char*) field, len), @@ -543,8 +566,10 @@ dict_load_fields( btr_pcur_move_to_next_user_rec(&pcur, &mtr); } +func_exit: btr_pcur_close(&pcur); mtr_commit(&mtr); + return(error); } /************************************************************************ @@ -701,10 +726,28 @@ dict_load_indexes( space, type, n_fields); index->id = id; - dict_load_fields(table, index, heap); + error = dict_load_fields(table, index, heap); + + if (error != DB_SUCCESS) { + fprintf(stderr, "InnoDB: Error: load index '%s'" + " for table '%s' failed\n", + index->name, table->name); + + /* If the force recovery flag is set, and + if the failed index is not the primary index, we + will continue and open other indexes */ + if (srv_force_recovery + && !(index->type & DICT_CLUSTERED)) { + error = DB_SUCCESS; + goto next_rec; + } else { + goto func_exit; + } + } + dict_index_add_to_cache(table, index, page_no); } - +next_rec: btr_pcur_move_to_next_user_rec(&pcur, &mtr); } @@ -881,9 +924,18 @@ err_exit: } else { table->fk_max_recusive_level = 0; } - } else if (!srv_force_recovery) { - dict_table_remove_from_cache(table); - table = NULL; + } else { + dict_index_t* index; + + /* Make sure that at least the clustered index was loaded. + Otherwise refuse to load the table */ + index = dict_table_get_first_index(table); + + if (!srv_force_recovery || !index + || !(index->type & DICT_CLUSTERED)) { + dict_table_remove_from_cache(table); + table = NULL; + } } #if 0 if (err != DB_SUCCESS && table != NULL) { diff --git a/storage/innodb_plugin/dict/dict0load.c b/storage/innodb_plugin/dict/dict0load.c index c3825902536..7a0b6edcb08 100644 --- a/storage/innodb_plugin/dict/dict0load.c +++ b/storage/innodb_plugin/dict/dict0load.c @@ -553,9 +553,10 @@ dict_load_columns( } /********************************************************************//** -Loads definitions for index fields. */ +Loads definitions for index fields. +@return DB_SUCCESS if ok, DB_CORRUPTION if failed */ static -void +ulint dict_load_fields( /*=============*/ dict_index_t* index, /*!< in: index whose fields to load */ @@ -574,6 +575,7 @@ dict_load_fields( byte* buf; ulint i; mtr_t mtr; + ulint error = DB_SUCCESS; ut_ad(mutex_own(&(dict_sys->mutex))); @@ -640,6 +642,26 @@ dict_load_fields( field = rec_get_nth_field_old(rec, 4, &len); + if (prefix_len >= DICT_MAX_INDEX_COL_LEN) { + fprintf(stderr, "InnoDB: Error: load index" + " '%s' failed.\n" + "InnoDB: index field '%s' has a prefix" + " length of %lu bytes,\n" + "InnoDB: which exceeds the" + " maximum limit of %lu bytes.\n" + "InnoDB: Please use server that" + " supports long index prefix\n" + "InnoDB: or turn on" + " innodb_force_recovery to load" + " the table\n", + index->name, mem_heap_strdupl( + heap, (char*) field, len), + (ulong) prefix_len, + (ulong) (DICT_MAX_INDEX_COL_LEN - 1)); + error = DB_CORRUPTION; + goto func_exit; + } + dict_mem_index_add_field(index, mem_heap_strdupl(heap, (char*) field, len), @@ -649,8 +671,10 @@ next_rec: btr_pcur_move_to_next_user_rec(&pcur, &mtr); } +func_exit: btr_pcur_close(&pcur); mtr_commit(&mtr); + return(error); } /********************************************************************//** @@ -801,7 +825,25 @@ dict_load_indexes( space, type, n_fields); index->id = id; - dict_load_fields(index, heap); + error = dict_load_fields(index, heap); + + if (error != DB_SUCCESS) { + fprintf(stderr, "InnoDB: Error: load index '%s'" + " for table '%s' failed\n", + index->name, table->name); + + /* If the force recovery flag is set, and + if the failed index is not the primary index, we + will continue and open other indexes */ + if (srv_force_recovery + && !(index->type & DICT_CLUSTERED)) { + error = DB_SUCCESS; + goto next_rec; + } else { + goto func_exit; + } + } + error = dict_index_add_to_cache(table, index, page_no, FALSE); /* The data dictionary tables should never contain @@ -1027,9 +1069,18 @@ err_exit: } else { table->fk_max_recusive_level = 0; } - } else if (!srv_force_recovery) { - dict_table_remove_from_cache(table); - table = NULL; + } else { + dict_index_t* index; + + /* Make sure that at least the clustered index was loaded. + Otherwise refuse to load the table */ + index = dict_table_get_first_index(table); + + if (!srv_force_recovery || !index + || !(index->type & DICT_CLUSTERED)) { + dict_table_remove_from_cache(table); + table = NULL; + } } #if 0 if (err != DB_SUCCESS && table != NULL) { -- cgit v1.2.1 From 16f26d2aaf65c2d69e24b7d644cc48a628a55862 Mon Sep 17 00:00:00 2001 From: Alexander Nozdrin Date: Wed, 4 May 2011 16:59:24 +0400 Subject: Patch for Bug#12394306: the sever may crash if mysql.event is corrupted. The problem was that wrong structure of mysql.event was not detected and the server continued to use wrongly-structured data. The fix is to check the structure of mysql.event after opening before any use. That makes operations with events more strict -- some operations that might work before throw errors now. That seems to be Ok. Another side-effect of the patch is that if mysql.event is corrupted, unrelated DROP DATABASE statements issue an SQL warning about inability to open mysql.event table. --- mysql-test/r/events_1.result | 106 ++++++++++++++++++++++++++---------- mysql-test/r/events_restart.result | 3 + mysql-test/t/events_1.test | 109 ++++++++++++++++++++++++++++--------- mysql-test/t/events_restart.test | 2 + sql/event_db_repository.cc | 8 +++ 5 files changed, 172 insertions(+), 56 deletions(-) diff --git a/mysql-test/r/events_1.result b/mysql-test/r/events_1.result index e7b645f5556..e0c137ea877 100644 --- a/mysql-test/r/events_1.result +++ b/mysql-test/r/events_1.result @@ -1,3 +1,4 @@ +call mtr.add_suppression("Column count of mysql.event is wrong. Expected .*, found .*\. The table is probably corrupted"); drop database if exists events_test; drop database if exists db_x; drop database if exists mysqltest_db2; @@ -259,33 +260,36 @@ events_test intact_check root@localhost SYSTEM RECURRING NULL 10 # # NULL ENABLE Try to alter mysql.event: the server should fail to load event information after mysql.event was tampered with. -First, let's add a column to the end and make sure everything -works as before +First, let's add a column to the end and check the error is emitted. ALTER TABLE mysql.event ADD dummy INT; SHOW EVENTS; -Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation -events_test intact_check root@localhost SYSTEM RECURRING NULL 10 # # NULL ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci +ERROR HY000: Failed to open mysql.event SELECT event_name FROM INFORMATION_SCHEMA.events; -event_name -intact_check +ERROR HY000: Failed to open mysql.event SHOW CREATE EVENT intact_check; -Event sql_mode time_zone Create Event character_set_client collation_connection Database Collation -intact_check SYSTEM CREATE EVENT `intact_check` ON SCHEDULE EVERY 10 HOUR STARTS '#' ON COMPLETION NOT PRESERVE ENABLE DO SELECT "nothing" latin1 latin1_swedish_ci latin1_swedish_ci +ERROR HY000: Failed to open mysql.event DROP EVENT no_such_event; -ERROR HY000: Unknown event 'no_such_event' +ERROR HY000: Failed to open mysql.event CREATE EVENT intact_check_1 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; +ERROR HY000: Failed to open mysql.event ALTER EVENT intact_check_1 ON SCHEDULE EVERY 8 HOUR DO SELECT 8; +ERROR HY000: Failed to open mysql.event ALTER EVENT intact_check_1 RENAME TO intact_check_2; +ERROR HY000: Failed to open mysql.event DROP EVENT intact_check_1; -ERROR HY000: Unknown event 'intact_check_1' +ERROR HY000: Failed to open mysql.event DROP EVENT intact_check_2; +ERROR HY000: Failed to open mysql.event DROP EVENT intact_check; +ERROR HY000: Failed to open mysql.event DROP DATABASE IF EXISTS mysqltest_no_such_database; Warnings: Note 1008 Can't drop database 'mysqltest_no_such_database'; database doesn't exist CREATE DATABASE mysqltest_db2; DROP DATABASE mysqltest_db2; +Warnings: +Error 1545 Failed to open mysql.event SELECT @@event_scheduler; @@event_scheduler OFF @@ -294,6 +298,7 @@ Variable_name Value event_scheduler OFF SET GLOBAL event_scheduler=OFF; ALTER TABLE mysql.event DROP dummy; +DROP EVENT intact_check; CREATE EVENT intact_check ON SCHEDULE EVERY 10 HOUR DO SELECT "nothing"; Now let's add a column to the first position: the server @@ -301,30 +306,32 @@ expects to see event schema name there ALTER TABLE mysql.event ADD dummy INT FIRST; SHOW EVENTS; -ERROR HY000: Cannot load from mysql.event. The table is probably corrupted +ERROR HY000: Failed to open mysql.event SELECT event_name FROM INFORMATION_SCHEMA.events; -ERROR HY000: Cannot load from mysql.event. The table is probably corrupted +ERROR HY000: Failed to open mysql.event SHOW CREATE EVENT intact_check; -ERROR HY000: Unknown event 'intact_check' +ERROR HY000: Failed to open mysql.event DROP EVENT no_such_event; -ERROR HY000: Unknown event 'no_such_event' +ERROR HY000: Failed to open mysql.event CREATE EVENT intact_check_1 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; -ERROR HY000: Failed to store event name. Error code 2 from storage engine. +ERROR HY000: Failed to open mysql.event ALTER EVENT intact_check_1 ON SCHEDULE EVERY 8 HOUR DO SELECT 8; -ERROR HY000: Unknown event 'intact_check_1' +ERROR HY000: Failed to open mysql.event ALTER EVENT intact_check_1 RENAME TO intact_check_2; -ERROR HY000: Unknown event 'intact_check_1' +ERROR HY000: Failed to open mysql.event DROP EVENT intact_check_1; -ERROR HY000: Unknown event 'intact_check_1' +ERROR HY000: Failed to open mysql.event DROP EVENT intact_check_2; -ERROR HY000: Unknown event 'intact_check_2' +ERROR HY000: Failed to open mysql.event DROP EVENT intact_check; -ERROR HY000: Unknown event 'intact_check' +ERROR HY000: Failed to open mysql.event DROP DATABASE IF EXISTS mysqltest_no_such_database; Warnings: Note 1008 Can't drop database 'mysqltest_no_such_database'; database doesn't exist CREATE DATABASE mysqltest_db2; DROP DATABASE mysqltest_db2; +Warnings: +Error 1545 Failed to open mysql.event SELECT @@event_scheduler; @@event_scheduler OFF @@ -345,29 +352,32 @@ Drop some columns and try more checks. ALTER TABLE mysql.event DROP comment, DROP starts; SHOW EVENTS; -ERROR HY000: Cannot load from mysql.event. The table is probably corrupted +ERROR HY000: Failed to open mysql.event SELECT event_name FROM INFORMATION_SCHEMA.EVENTS; -ERROR HY000: Cannot load from mysql.event. The table is probably corrupted +ERROR HY000: Failed to open mysql.event SHOW CREATE EVENT intact_check; -ERROR HY000: Cannot load from mysql.event. The table is probably corrupted +ERROR HY000: Failed to open mysql.event DROP EVENT no_such_event; -ERROR HY000: Unknown event 'no_such_event' +ERROR HY000: Failed to open mysql.event CREATE EVENT intact_check_1 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; -ERROR HY000: Column count of mysql.event is wrong. Expected 22, found 20. The table is probably corrupted +ERROR HY000: Failed to open mysql.event ALTER EVENT intact_check_1 ON SCHEDULE EVERY 8 HOUR DO SELECT 8; -ERROR HY000: Unknown event 'intact_check_1' +ERROR HY000: Failed to open mysql.event ALTER EVENT intact_check_1 RENAME TO intact_check_2; -ERROR HY000: Unknown event 'intact_check_1' +ERROR HY000: Failed to open mysql.event DROP EVENT intact_check_1; -ERROR HY000: Unknown event 'intact_check_1' +ERROR HY000: Failed to open mysql.event DROP EVENT intact_check_2; -ERROR HY000: Unknown event 'intact_check_2' +ERROR HY000: Failed to open mysql.event DROP EVENT intact_check; +ERROR HY000: Failed to open mysql.event DROP DATABASE IF EXISTS mysqltest_no_such_database; Warnings: Note 1008 Can't drop database 'mysqltest_no_such_database'; database doesn't exist CREATE DATABASE mysqltest_db2; DROP DATABASE mysqltest_db2; +Warnings: +Error 1545 Failed to open mysql.event SELECT @@event_scheduler; @@event_scheduler OFF @@ -425,4 +435,42 @@ CREATE TABLE mysql.event like event_like; DROP TABLE event_like; SHOW EVENTS; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation + +# +# Bug#12394306: the sever may crash if mysql.event is corrupted +# + +CREATE EVENT ev1 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; +ALTER EVENT ev1 ON SCHEDULE EVERY 8 HOUR DO SELECT 8; + +CREATE TABLE event_original LIKE mysql.event; +INSERT INTO event_original SELECT * FROM mysql.event; + +ALTER TABLE mysql.event MODIFY modified CHAR(1); +Warnings: +Warning 1265 Data truncated for column 'modified' at row 1 + +SHOW EVENTS; +ERROR HY000: Failed to open mysql.event + +SELECT event_name, created, last_altered FROM information_schema.events; +ERROR HY000: Failed to open mysql.event + +CREATE EVENT ev2 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; +ERROR HY000: Failed to open mysql.event + +ALTER EVENT ev1 ON SCHEDULE EVERY 9 HOUR DO SELECT 9; +ERROR HY000: Failed to open mysql.event + +DROP TABLE mysql.event; +RENAME TABLE event_original TO mysql.event; + +DROP EVENT ev1; + +SHOW EVENTS; +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation + +# +# End of tests +# drop database events_test; diff --git a/mysql-test/r/events_restart.result b/mysql-test/r/events_restart.result index 4db61d357ce..6a751fa29f8 100644 --- a/mysql-test/r/events_restart.result +++ b/mysql-test/r/events_restart.result @@ -1,3 +1,4 @@ +call mtr.add_suppression("Column count of mysql.event is wrong. Expected .*, found .*\. The table is probably corrupted"); set global event_scheduler=off; drop database if exists events_test; create database events_test; @@ -52,6 +53,8 @@ Warnings: Note 1008 Can't drop database 'mysqltest_database_not_exists'; database doesn't exist create database mysqltest_db1; drop database mysqltest_db1; +Warnings: +Error 1545 Failed to open mysql.event Restore the original mysql.event table drop table mysql.event; rename table event_like to mysql.event; diff --git a/mysql-test/t/events_1.test b/mysql-test/t/events_1.test index ccdeb70d291..7f31e3fc881 100644 --- a/mysql-test/t/events_1.test +++ b/mysql-test/t/events_1.test @@ -4,6 +4,8 @@ # Can't test with embedded server that doesn't support grants -- source include/not_embedded.inc +call mtr.add_suppression("Column count of mysql.event is wrong. Expected .*, found .*\. The table is probably corrupted"); + --disable_warnings drop database if exists events_test; drop database if exists db_x; @@ -270,23 +272,28 @@ SHOW EVENTS; --echo Try to alter mysql.event: the server should fail to load --echo event information after mysql.event was tampered with. --echo ---echo First, let's add a column to the end and make sure everything ---echo works as before +--echo First, let's add a column to the end and check the error is emitted. --echo ALTER TABLE mysql.event ADD dummy INT; ---replace_column 8 # 9 # +--error ER_EVENT_OPEN_TABLE_FAILED SHOW EVENTS; +--error ER_EVENT_OPEN_TABLE_FAILED SELECT event_name FROM INFORMATION_SCHEMA.events; ---replace_regex /STARTS '[^']+'/STARTS '#'/ +--error ER_EVENT_OPEN_TABLE_FAILED SHOW CREATE EVENT intact_check; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT no_such_event; +--error ER_EVENT_OPEN_TABLE_FAILED CREATE EVENT intact_check_1 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; +--error ER_EVENT_OPEN_TABLE_FAILED ALTER EVENT intact_check_1 ON SCHEDULE EVERY 8 HOUR DO SELECT 8; +--error ER_EVENT_OPEN_TABLE_FAILED ALTER EVENT intact_check_1 RENAME TO intact_check_2; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT intact_check_1; +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT intact_check_2; +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT intact_check; DROP DATABASE IF EXISTS mysqltest_no_such_database; CREATE DATABASE mysqltest_db2; @@ -296,6 +303,7 @@ SHOW VARIABLES LIKE 'event_scheduler'; SET GLOBAL event_scheduler=OFF; # Clean up ALTER TABLE mysql.event DROP dummy; +DROP EVENT intact_check; CREATE EVENT intact_check ON SCHEDULE EVERY 10 HOUR DO SELECT "nothing"; --echo --echo Now let's add a column to the first position: the server @@ -303,24 +311,26 @@ CREATE EVENT intact_check ON SCHEDULE EVERY 10 HOUR DO SELECT "nothing"; --echo ALTER TABLE mysql.event ADD dummy INT FIRST; --error ER_CANNOT_LOAD_FROM_TABLE +--error ER_EVENT_OPEN_TABLE_FAILED SHOW EVENTS; --error ER_CANNOT_LOAD_FROM_TABLE +--error ER_EVENT_OPEN_TABLE_FAILED SELECT event_name FROM INFORMATION_SCHEMA.events; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED SHOW CREATE EVENT intact_check; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT no_such_event; ---error ER_EVENT_STORE_FAILED +--error ER_EVENT_OPEN_TABLE_FAILED CREATE EVENT intact_check_1 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED ALTER EVENT intact_check_1 ON SCHEDULE EVERY 8 HOUR DO SELECT 8; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED ALTER EVENT intact_check_1 RENAME TO intact_check_2; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT intact_check_1; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT intact_check_2; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT intact_check; # Should work OK DROP DATABASE IF EXISTS mysqltest_no_such_database; @@ -341,25 +351,25 @@ INSERT INTO event_like SELECT * FROM mysql.event; --echo --echo ALTER TABLE mysql.event DROP comment, DROP starts; ---error ER_CANNOT_LOAD_FROM_TABLE +--error ER_EVENT_OPEN_TABLE_FAILED SHOW EVENTS; ---error ER_CANNOT_LOAD_FROM_TABLE +--error ER_EVENT_OPEN_TABLE_FAILED SELECT event_name FROM INFORMATION_SCHEMA.EVENTS; ---error ER_CANNOT_LOAD_FROM_TABLE +--error ER_EVENT_OPEN_TABLE_FAILED SHOW CREATE EVENT intact_check; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT no_such_event; ---error ER_COL_COUNT_DOESNT_MATCH_CORRUPTED +--error ER_EVENT_OPEN_TABLE_FAILED CREATE EVENT intact_check_1 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED ALTER EVENT intact_check_1 ON SCHEDULE EVERY 8 HOUR DO SELECT 8; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED ALTER EVENT intact_check_1 RENAME TO intact_check_2; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT intact_check_1; ---error ER_EVENT_DOES_NOT_EXIST +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT intact_check_2; -# Should succeed +--error ER_EVENT_OPEN_TABLE_FAILED DROP EVENT intact_check; DROP DATABASE IF EXISTS mysqltest_no_such_database; CREATE DATABASE mysqltest_db2; @@ -407,9 +417,54 @@ CREATE TABLE mysql.event like event_like; DROP TABLE event_like; --replace_column 8 # 9 # SHOW EVENTS; -# -# End of tests -# + +--echo +--echo # +--echo # Bug#12394306: the sever may crash if mysql.event is corrupted +--echo # + +--echo +CREATE EVENT ev1 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; +ALTER EVENT ev1 ON SCHEDULE EVERY 8 HOUR DO SELECT 8; + +--echo +CREATE TABLE event_original LIKE mysql.event; +INSERT INTO event_original SELECT * FROM mysql.event; + +--echo +ALTER TABLE mysql.event MODIFY modified CHAR(1); + +--echo +--error ER_EVENT_OPEN_TABLE_FAILED +SHOW EVENTS; + +--echo +--error ER_EVENT_OPEN_TABLE_FAILED +SELECT event_name, created, last_altered FROM information_schema.events; + +--echo +--error ER_EVENT_OPEN_TABLE_FAILED +CREATE EVENT ev2 ON SCHEDULE EVERY 5 HOUR DO SELECT 5; + +--echo +--error ER_EVENT_OPEN_TABLE_FAILED +ALTER EVENT ev1 ON SCHEDULE EVERY 9 HOUR DO SELECT 9; + +--echo +DROP TABLE mysql.event; +RENAME TABLE event_original TO mysql.event; + +--echo +DROP EVENT ev1; + +--echo +SHOW EVENTS; + + +--echo +--echo # +--echo # End of tests +--echo # let $wait_condition= select count(*) = 0 from information_schema.processlist diff --git a/mysql-test/t/events_restart.test b/mysql-test/t/events_restart.test index e155fe2ea16..facf2912087 100644 --- a/mysql-test/t/events_restart.test +++ b/mysql-test/t/events_restart.test @@ -1,6 +1,8 @@ # Can't test with embedded server that doesn't support grants -- source include/not_embedded.inc +call mtr.add_suppression("Column count of mysql.event is wrong. Expected .*, found .*\. The table is probably corrupted"); + # # Test that when the server is restarted, it checks mysql.event table, # and disables the scheduler if it's not up to date. diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 7473cf47188..a0765dc6d15 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -582,6 +582,14 @@ Event_db_repository::open_event_table(THD *thd, enum thr_lock_type lock_type, *table= tables.table; tables.table->use_all_columns(); + + if (table_intact.check(*table, &event_table_def)) + { + close_thread_tables(thd); + my_error(ER_EVENT_OPEN_TABLE_FAILED, MYF(0)); + DBUG_RETURN(TRUE); + } + DBUG_RETURN(FALSE); } -- cgit v1.2.1 From 0efb452e5e3c201274755731d1867b309a34ae37 Mon Sep 17 00:00:00 2001 From: Luis Soares Date: Thu, 5 May 2011 23:48:15 +0100 Subject: BUG#12354268: MYSQLBINLOG --BASE64-OUTPUT=DECODE-ROWS DOES NOT WORK WITH --START-POSITION If setting --start-position to start after the FD event, mysqlbinlog will output an error stating that it has not found an FD event. However, its not that mysqlbinlog does not find it but rather that it does not processes it in the regular way (i.e., it does not print it). Given that one is using --base64-output=DECODE-ROWS then not printing it is actually fine. To fix this, we make mysqlbinlog not to complain when it has not printed the FD event, is outputing in base64, but is decoding the rows. --- client/mysqlbinlog.cc | 3 ++- mysql-test/r/mysqlbinlog_base64.result | 10 ++++++++++ mysql-test/t/mysqlbinlog_base64.test | 29 +++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 30a8bddc17c..f451e28de86 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -951,7 +951,8 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev, passed --short-form, because --short-form disables printing row events. */ - if (!print_event_info->printed_fd_event && !short_form) + if (!print_event_info->printed_fd_event && !short_form && + opt_base64_output_mode != BASE64_OUTPUT_DECODE_ROWS) { const char* type_str= ev->get_type_str(); if (opt_base64_output_mode == BASE64_OUTPUT_NEVER) diff --git a/mysql-test/r/mysqlbinlog_base64.result b/mysql-test/r/mysqlbinlog_base64.result index c5e1e2f8ca1..72d49c16cc8 100644 --- a/mysql-test/r/mysqlbinlog_base64.result +++ b/mysql-test/r/mysqlbinlog_base64.result @@ -109,3 +109,13 @@ count(*) 35840 drop table t1; drop table t2; +RESET MASTER; +USE test; +SET @old_binlog_format= @@binlog_format; +SET SESSION binlog_format=ROW; +CREATE TABLE t1(c1 INT); +INSERT INTO t1 VALUES (1); +FLUSH LOGS; +DROP TABLE t1; +SET SESSION binlog_format= @old_binlog_format; +RESET MASTER; diff --git a/mysql-test/t/mysqlbinlog_base64.test b/mysql-test/t/mysqlbinlog_base64.test index fb21e28fdcb..3d3444cea1c 100644 --- a/mysql-test/t/mysqlbinlog_base64.test +++ b/mysql-test/t/mysqlbinlog_base64.test @@ -71,3 +71,32 @@ select count(*) from t2; --remove_file $MYSQLTEST_VARDIR/tmp/mysqlbinlog_base64.sql drop table t1; drop table t2; + +# +# BUG#12354268 +# +# This test verifies that using --start-position with DECODE-ROWS +# does not make mysqlbinlog to output an error stating that it +# does not contain any FD event. +# + +RESET MASTER; +USE test; +SET @old_binlog_format= @@binlog_format; +SET SESSION binlog_format=ROW; +CREATE TABLE t1(c1 INT); +--let $master_binlog= query_get_value(SHOW MASTER STATUS, File, 1) +--let $master_pos= query_get_value(SHOW MASTER STATUS, Position, 1) +--let $MYSQLD_DATADIR= `SELECT @@datadir` + +INSERT INTO t1 VALUES (1); + +FLUSH LOGS; + +--disable_result_log +--exec $MYSQL_BINLOG --base64-output=DECODE-ROWS --start-position=$master_pos -v $MYSQLD_DATADIR/$master_binlog +--enable_result_log + +DROP TABLE t1; +SET SESSION binlog_format= @old_binlog_format; +RESET MASTER; -- cgit v1.2.1 From 8a08fd43411725545a61f16c5c78994d845f9352 Mon Sep 17 00:00:00 2001 From: Luis Soares Date: Fri, 6 May 2011 00:46:53 +0100 Subject: BUG#11762616: BUG#55229: 'POSTION' Fix for all "postion" in Oracle files (s/postion/position). Updated the copyright notices where needed. --- client/mysqltest.cc | 4 ++-- extra/replace.c | 18 +++++++++-------- mysql-test/suite/rpl/r/rpl_server_id2.result | 2 +- mysql-test/suite/rpl/t/rpl_row_until.test | 10 +++++----- mysql-test/suite/rpl/t/rpl_server_id2.test | 2 +- sql/handler.h | 17 ++++++++-------- sql/slave.cc | 6 +++--- storage/archive/ha_archive.cc | 26 +++++++++++++----------- storage/ndb/src/kernel/blocks/lgman.cpp | 16 ++++++++------- vio/viosocket.c | 30 +++++++++++++++------------- 10 files changed, 70 insertions(+), 61 deletions(-) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index a1813838a24..c2410b14c19 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -9739,7 +9739,7 @@ int find_set(REP_SETS *sets,REP_SET *find) return i; } } - return i; /* return new postion */ + return i; /* return new position */ } /* find if there is a found_set with same table_offset & found_offset @@ -9759,7 +9759,7 @@ int find_found(FOUND_SET *found_set,uint table_offset, int found_offset) found_set[i].table_offset=table_offset; found_set[i].found_offset=found_offset; found_sets++; - return -i-2; /* return new postion */ + return -i-2; /* return new position */ } /* Return 1 if regexp starts with \b or ends with \b*/ diff --git a/extra/replace.c b/extra/replace.c index fd2d860c212..2df8a58e16a 100644 --- a/extra/replace.c +++ b/extra/replace.c @@ -1,17 +1,19 @@ -/* Copyright (C) 2000 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; version 2 of + the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + 02110-1301 USA */ /* Replace strings in textfile @@ -819,7 +821,7 @@ static short find_set(REP_SETS *sets,REP_SET *find) return (short) i; } } - return (short) i; /* return new postion */ + return (short) i; /* return new position */ } @@ -842,7 +844,7 @@ static short find_found(FOUND_SET *found_set,uint table_offset, found_set[i].table_offset=table_offset; found_set[i].found_offset=found_offset; found_sets++; - return (short) (-i-2); /* return new postion */ + return (short) (-i-2); /* return new position */ } /* Return 1 if regexp starts with \b or ends with \b*/ diff --git a/mysql-test/suite/rpl/r/rpl_server_id2.result b/mysql-test/suite/rpl/r/rpl_server_id2.result index dacb69bc7cb..4f299a1b23b 100644 --- a/mysql-test/suite/rpl/r/rpl_server_id2.result +++ b/mysql-test/suite/rpl/r/rpl_server_id2.result @@ -19,7 +19,7 @@ change master to master_port=MASTER_PORT; start slave until master_log_file='master-bin.000001', master_log_pos=UNTIL_POS; include/wait_for_slave_io_to_start.inc include/wait_for_slave_sql_to_stop.inc -*** checking until postion execution: must be only t1 in the list *** +*** checking until position execution: must be only t1 in the list *** show tables; Tables_in_test t1 diff --git a/mysql-test/suite/rpl/t/rpl_row_until.test b/mysql-test/suite/rpl/t/rpl_row_until.test index afd964ca81a..bf38bd487ea 100644 --- a/mysql-test/suite/rpl/t/rpl_row_until.test +++ b/mysql-test/suite/rpl/t/rpl_row_until.test @@ -9,29 +9,29 @@ connection master; CREATE TABLE t1(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY); INSERT INTO t1 VALUES (1),(2),(3),(4); DROP TABLE t1; -# Save master log postion for query DROP TABLE t1 +# Save master log position for query DROP TABLE t1 save_master_pos; let $master_pos_drop_t1= query_get_value(SHOW BINLOG EVENTS, Pos, 7); let $master_log_file= query_get_value(SHOW BINLOG EVENTS, Log_name, 7); CREATE TABLE t2(n INT NOT NULL AUTO_INCREMENT PRIMARY KEY); -# Save master log postion for query CREATE TABLE t2 +# Save master log position for query CREATE TABLE t2 save_master_pos; let $master_pos_create_t2= query_get_value(SHOW BINLOG EVENTS, Pos, 8); INSERT INTO t2 VALUES (1),(2); save_master_pos; -# Save master log postion for query INSERT INTO t2 VALUES (1),(2); +# Save master log position for query INSERT INTO t2 VALUES (1),(2); let $master_pos_insert1_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 12); sync_slave_with_master; -# Save relay log postion for query INSERT INTO t2 VALUES (1),(2); +# Save relay log position for query INSERT INTO t2 VALUES (1),(2); let $relay_pos_insert1_t2= query_get_value(show slave status, Relay_Log_Pos, 1); connection master; INSERT INTO t2 VALUES (3),(4); DROP TABLE t2; -# Save master log postion for query INSERT INTO t2 VALUES (1),(2); +# Save master log position for query INSERT INTO t2 VALUES (1),(2); let $master_pos_drop_t2= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 17); sync_slave_with_master; diff --git a/mysql-test/suite/rpl/t/rpl_server_id2.test b/mysql-test/suite/rpl/t/rpl_server_id2.test index 32d5e1ec8f2..aeb7292ed17 100644 --- a/mysql-test/suite/rpl/t/rpl_server_id2.test +++ b/mysql-test/suite/rpl/t/rpl_server_id2.test @@ -47,7 +47,7 @@ eval start slave until master_log_file='master-bin.000001', master_log_pos=$unti --source include/wait_for_slave_io_to_start.inc --source include/wait_for_slave_sql_to_stop.inc ---echo *** checking until postion execution: must be only t1 in the list *** +--echo *** checking until position execution: must be only t1 in the list *** show tables; # cleanup diff --git a/sql/handler.h b/sql/handler.h index 9acdac700cd..03b0555ae86 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1,18 +1,19 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; version 2 of + the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + 02110-1301 USA */ /* Definitions for parameters to do with handler-routines */ @@ -56,7 +57,7 @@ a table with rnd_next() - We will see all rows (including deleted ones) - Row positions are 'table->s->db_record_offset' apart - If this flag is not set, filesort will do a postion() call for each matched + If this flag is not set, filesort will do a position() call for each matched row to be able to find the row later. */ #define HA_REC_NOT_IN_SEQ (1 << 3) diff --git a/sql/slave.cc b/sql/slave.cc index 6d266245460..dd578064f24 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -97,7 +97,7 @@ static const char *reconnect_messages[SLAVE_RECON_ACT_MAX][SLAVE_RECON_MSG_MAX]= registration on master", "Reconnecting after a failed registration on master", "failed registering on master, reconnecting to try again, \ -log '%s' at postion %s", +log '%s' at position %s", "COM_REGISTER_SLAVE", "Slave I/O thread killed during or after reconnect" }, @@ -105,7 +105,7 @@ log '%s' at postion %s", "Waiting to reconnect after a failed binlog dump request", "Slave I/O thread killed while retrying master dump", "Reconnecting after a failed binlog dump request", - "failed dump request, reconnecting to try again, log '%s' at postion %s", + "failed dump request, reconnecting to try again, log '%s' at position %s", "COM_BINLOG_DUMP", "Slave I/O thread killed during or after reconnect" }, @@ -114,7 +114,7 @@ log '%s' at postion %s", "Slave I/O thread killed while waiting to reconnect after a failed read", "Reconnecting after a failed master event read", "Slave I/O thread: Failed reading log event, reconnecting to retry, \ -log '%s' at postion %s", +log '%s' at position %s", "", "Slave I/O thread killed during or after a reconnect done to recover from \ failed read" diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 988337ec50e..764ed16e931 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -1,17 +1,19 @@ -/* Copyright (C) 2003 MySQL AB +/* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; version 2 of + the License. - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + 02110-1301 USA */ #ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation @@ -864,7 +866,7 @@ int ha_archive::write_row(uchar *buf) */ azflush(&(share->archive_write), Z_SYNC_FLUSH); /* - Set the position of the local read thread to the beginning postion. + Set the position of the local read thread to the beginning position. */ if (read_data_header(&archive)) { diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp index 53cb1e113e1..7dc71e7399a 100644 --- a/storage/ndb/src/kernel/blocks/lgman.cpp +++ b/storage/ndb/src/kernel/blocks/lgman.cpp @@ -1,17 +1,19 @@ -/* Copyright (C) 2003 MySQL AB +/* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; version 2 of + the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + 02110-1301 USA */ #include "lgman.hpp" #include "diskpage.hpp" @@ -2501,7 +2503,7 @@ Lgman::init_run_undo_log(Signal* signal) sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); /** - * Insert in correct postion in list of logfile_group's + * Insert in correct position in list of logfile_group's */ Ptr pos; for(tmp.first(pos); !pos.isNull(); tmp.next(pos)) diff --git a/vio/viosocket.c b/vio/viosocket.c index f73b890c697..15942fb3e31 100644 --- a/vio/viosocket.c +++ b/vio/viosocket.c @@ -1,17 +1,19 @@ -/* Copyright (C) 2000 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; version 2 of + the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + 02110-1301 USA */ /* Note that we can't have assertion on file descriptors; The reason for @@ -548,7 +550,7 @@ size_t vio_read_shared_memory(Vio * vio, uchar* buf, size_t size) { size_t length; size_t remain_local; - char *current_postion; + char *current_position; HANDLE events[2]; DBUG_ENTER("vio_read_shared_memory"); @@ -556,7 +558,7 @@ size_t vio_read_shared_memory(Vio * vio, uchar* buf, size_t size) size)); remain_local = size; - current_postion=buf; + current_position=buf; events[0]= vio->event_server_wrote; events[1]= vio->event_conn_closed; @@ -590,11 +592,11 @@ size_t vio_read_shared_memory(Vio * vio, uchar* buf, size_t size) if (length > remain_local) length = remain_local; - memcpy(current_postion,vio->shared_memory_pos,length); + memcpy(current_position,vio->shared_memory_pos,length); vio->shared_memory_remain-=length; vio->shared_memory_pos+=length; - current_postion+=length; + current_position+=length; remain_local-=length; if (!vio->shared_memory_remain) @@ -614,7 +616,7 @@ size_t vio_write_shared_memory(Vio * vio, const uchar* buf, size_t size) { size_t length, remain, sz; HANDLE pos; - const uchar *current_postion; + const uchar *current_position; HANDLE events[2]; DBUG_ENTER("vio_write_shared_memory"); @@ -622,7 +624,7 @@ size_t vio_write_shared_memory(Vio * vio, const uchar* buf, size_t size) size)); remain = size; - current_postion = buf; + current_position = buf; events[0]= vio->event_server_read; events[1]= vio->event_conn_closed; @@ -640,9 +642,9 @@ size_t vio_write_shared_memory(Vio * vio, const uchar* buf, size_t size) int4store(vio->handle_map,sz); pos = vio->handle_map + 4; - memcpy(pos,current_postion,sz); + memcpy(pos,current_position,sz); remain-=sz; - current_postion+=sz; + current_position+=sz; if (!SetEvent(vio->event_client_wrote)) DBUG_RETURN((size_t) -1); } -- cgit v1.2.1 From 79e4b561b721aa78b6d0840ef76529cf4cf31d1c Mon Sep 17 00:00:00 2001 From: Serge Kozlov Date: Mon, 9 May 2011 23:14:24 +0400 Subject: WL#5867 Replaced the error code by error name --- mysql-test/suite/binlog/t/binlog_bug23533.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/binlog/t/binlog_bug23533.test b/mysql-test/suite/binlog/t/binlog_bug23533.test index c05abe788c6..ca610e399e4 100644 --- a/mysql-test/suite/binlog/t/binlog_bug23533.test +++ b/mysql-test/suite/binlog/t/binlog_bug23533.test @@ -35,7 +35,7 @@ connect(default,localhost,root,,test); # Copied data from t1 into t2 large than max_binlog_cache_size START TRANSACTION; ---error 1197 +--error ER_TRANS_CACHE_FULL CREATE TABLE t2 SELECT * FROM t1; COMMIT; SHOW TABLES LIKE 't%'; -- cgit v1.2.1 From f4c75353a726f6e0f2b57775b403529464f88e85 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Tue, 10 May 2011 15:43:30 +0300 Subject: Increment InnoDB Plugin version from 1.0.16 to 1.0.17. InnoDB Plugin 1.0.16 has been released with MySQL 5.1.57. --- storage/innodb_plugin/include/univ.i | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innodb_plugin/include/univ.i b/storage/innodb_plugin/include/univ.i index dc75879dbad..6ac227a59a6 100644 --- a/storage/innodb_plugin/include/univ.i +++ b/storage/innodb_plugin/include/univ.i @@ -46,7 +46,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 1 #define INNODB_VERSION_MINOR 0 -#define INNODB_VERSION_BUGFIX 16 +#define INNODB_VERSION_BUGFIX 17 /* The following is the InnoDB version as shown in SELECT plugin_version FROM information_schema.plugins; -- cgit v1.2.1 From 91133804d1a800bed4e43c07e8f26d9345c78420 Mon Sep 17 00:00:00 2001 From: Vinay Fisrekar Date: Sat, 14 May 2011 21:44:49 +0530 Subject: Adding bug scenario for data types in main suite Impementing Test Review Comment. Bug test scenario: SELECT is not returning result set for "equal" (=) and "NULL safe equal operator" (<=>) on BIT data type. Extending this scenario for all data types --- .../r/implicit_char_to_num_conversion.result | 366 +++++++++++++++++++++ mysql-test/t/implicit_char_to_num_conversion.test | 174 ++++++++++ 2 files changed, 540 insertions(+) create mode 100644 mysql-test/r/implicit_char_to_num_conversion.result create mode 100644 mysql-test/t/implicit_char_to_num_conversion.test diff --git a/mysql-test/r/implicit_char_to_num_conversion.result b/mysql-test/r/implicit_char_to_num_conversion.result new file mode 100644 index 00000000000..8f24a6b293c --- /dev/null +++ b/mysql-test/r/implicit_char_to_num_conversion.result @@ -0,0 +1,366 @@ +DROP TABLE IF EXISTS t5; +CREATE TABLE t5(c1 BIT(2) PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (0), (1), (2); +SELECT HEX(c1) FROM t5 ORDER BY c1; +HEX(c1) +0 +1 +2 +SELECT HEX(c1) FROM t5 WHERE c1 = b'1' ORDER BY c1; +HEX(c1) +1 +SELECT HEX(c1) FROM t5 WHERE c1 <=> b'1' ORDER BY c1; +HEX(c1) +1 +SELECT HEX(c1) FROM t5 WHERE c1 != b'1' ORDER BY c1; +HEX(c1) +0 +2 +SELECT HEX(c1) FROM t5 WHERE c1 >= '1' ORDER BY c1; +HEX(c1) +1 +2 +SELECT HEX(c1) FROM t5 WHERE c1 <= '1' ORDER BY c1; +HEX(c1) +0 +1 +SELECT HEX(c1) FROM t5 WHERE c1 < '1' ORDER BY c1; +HEX(c1) +0 +SELECT HEX(c1) FROM t5 WHERE c1 > '0' ORDER BY c1; +HEX(c1) +1 +2 +DROP TABLE t5; +CREATE TABLE t5(c1 FLOAT(5,2) PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (95.95), (-10.10), (1), (0); +SELECT c1 FROM t5 ORDER BY c1; +c1 +-10.10 +0.00 +1.00 +95.95 +SELECT c1 FROM t5 WHERE c1 >= '95' ORDER BY c1; +c1 +95.95 +SELECT c1 FROM t5 WHERE c1 <= '10.10' ORDER BY c1; +c1 +-10.10 +0.00 +1.00 +SELECT c1 FROM t5 WHERE c1 != '1' ORDER BY c1; +c1 +-10.10 +0.00 +95.95 +SELECT c1 FROM t5 WHERE c1 < '1' ORDER BY c1; +c1 +-10.10 +0.00 +SELECT c1 FROM t5 WHERE c1 > '0' ORDER BY c1; +c1 +1.00 +95.95 +DROP TABLE t5; +CREATE TABLE t5(c1 TINYINT PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (95), (10),(11),(-8); +SELECT c1 FROM t5 ORDER BY c1; +c1 +-8 +10 +11 +95 +SELECT c1 FROM t5 WHERE c1 = '10' ORDER BY c1; +c1 +10 +SELECT c1 FROM t5 WHERE c1 <=> '10' ORDER BY c1; +c1 +10 +SELECT c1 FROM t5 WHERE c1 >= '95' ORDER BY c1; +c1 +95 +SELECT c1 FROM t5 WHERE c1 <= '11' ORDER BY c1; +c1 +-8 +10 +11 +SELECT c1 FROM t5 WHERE c1 != '-8' ORDER BY c1; +c1 +10 +11 +95 +SELECT c1 FROM t5 WHERE c1 < '11' ORDER BY c1; +c1 +-8 +10 +SELECT c1 FROM t5 WHERE c1 > '10' ORDER BY c1; +c1 +11 +95 +DROP TABLE t5; +CREATE TABLE t5(c1 SMALLINT PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (395), (-200), (100), (111); +SELECT c1 FROM t5 ORDER BY c1; +c1 +-200 +100 +111 +395 +SELECT c1 FROM t5 WHERE c1 = '100' ORDER BY c1; +c1 +100 +SELECT c1 FROM t5 WHERE c1 <=> '100' ORDER BY c1; +c1 +100 +SELECT c1 FROM t5 WHERE c1 >= '395' ORDER BY c1; +c1 +395 +SELECT c1 FROM t5 WHERE c1 <= '-200' ORDER BY c1; +c1 +-200 +SELECT c1 FROM t5 WHERE c1 != '100' ORDER BY c1; +c1 +-200 +111 +395 +SELECT c1 FROM t5 WHERE c1 < '111' ORDER BY c1; +c1 +-200 +100 +SELECT c1 FROM t5 WHERE c1 > '111' ORDER BY c1; +c1 +395 +DROP TABLE t5; +CREATE TABLE t5(c1 MEDIUMINT PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (-8388607), (311),(215),(88608); +SELECT c1 FROM t5 ORDER BY c1; +c1 +-8388607 +215 +311 +88608 +SELECT c1 FROM t5 WHERE c1 = '311' ORDER BY c1; +c1 +311 +SELECT c1 FROM t5 WHERE c1 <=> '311' ORDER BY c1; +c1 +311 +SELECT c1 FROM t5 WHERE c1 >= '215' ORDER BY c1; +c1 +215 +311 +88608 +SELECT c1 FROM t5 WHERE c1 <= '88608' ORDER BY c1; +c1 +-8388607 +215 +311 +88608 +SELECT c1 FROM t5 WHERE c1 != '-8388607' ORDER BY c1; +c1 +215 +311 +88608 +SELECT c1 FROM t5 WHERE c1 < '215' ORDER BY c1; +c1 +-8388607 +SELECT c1 FROM t5 WHERE c1 > '215' ORDER BY c1; +c1 +311 +88608 +DROP TABLE t5; +CREATE TABLE t5(c1 INT PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (-2147483647), (1011),(15),(9388607); +SELECT c1 FROM t5 ORDER BY c1; +c1 +-2147483647 +15 +1011 +9388607 +SELECT c1 FROM t5 WHERE c1 = '9388607' ORDER BY c1; +c1 +9388607 +SELECT c1 FROM t5 WHERE c1 <=> '9388607' ORDER BY c1; +c1 +9388607 +SELECT c1 FROM t5 WHERE c1 >= '15' ORDER BY c1; +c1 +15 +1011 +9388607 +SELECT c1 FROM t5 WHERE c1 <= '1011' ORDER BY c1; +c1 +-2147483647 +15 +1011 +SELECT c1 FROM t5 WHERE c1 != '-2147483647' ORDER BY c1; +c1 +15 +1011 +9388607 +SELECT c1 FROM t5 WHERE c1 < '15' ORDER BY c1; +c1 +-2147483647 +SELECT c1 FROM t5 WHERE c1 > '15' ORDER BY c1; +c1 +1011 +9388607 +DROP TABLE t5; +CREATE TABLE t5(c1 BIGINT PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (-9223372036854775807), (12011),(500),(3372036854775808); +SELECT c1 FROM t5 ORDER BY c1; +c1 +-9223372036854775807 +500 +12011 +3372036854775808 +SELECT c1 FROM t5 WHERE c1 = '-9223372036854775807' ORDER BY c1; +c1 +-9223372036854775807 +SELECT c1 FROM t5 WHERE c1 <=> '-9223372036854775807' ORDER BY c1; +c1 +-9223372036854775807 +SELECT c1 FROM t5 WHERE c1 >= '12011' ORDER BY c1; +c1 +12011 +3372036854775808 +SELECT c1 FROM t5 WHERE c1 <= '500' ORDER BY c1; +c1 +-9223372036854775807 +500 +SELECT c1 FROM t5 WHERE c1 != '3372036854775808' ORDER BY c1; +c1 +-9223372036854775807 +500 +12011 +SELECT c1 FROM t5 WHERE c1 < '12011' ORDER BY c1; +c1 +-9223372036854775807 +500 +SELECT c1 FROM t5 WHERE c1 > '12011' ORDER BY c1; +c1 +3372036854775808 +DROP TABLE t5; +CREATE TABLE t5(c1 DOUBLE(5,2) PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (95.95), (11.11),(5),(-908.92); +SELECT c1 FROM t5 ORDER BY c1; +c1 +-908.92 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 = '11.11' ORDER BY c1; +c1 +11.11 +SELECT c1 FROM t5 WHERE c1 <=> '11.11' ORDER BY c1; +c1 +11.11 +SELECT c1 FROM t5 WHERE c1 >= '5' ORDER BY c1; +c1 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 <= '95.95' ORDER BY c1; +c1 +-908.92 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 != '-908.92' ORDER BY c1; +c1 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 < '95.95' ORDER BY c1; +c1 +-908.92 +5.00 +11.11 +SELECT c1 FROM t5 WHERE c1 > '-908.92' ORDER BY c1; +c1 +5.00 +11.11 +95.95 +DROP TABLE t5; +CREATE TABLE t5(c1 NUMERIC(5,2) PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (95.95), (11.11),(5),(-908.92); +SELECT c1 FROM t5 ORDER BY c1; +c1 +-908.92 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 = '11.11' ORDER BY c1; +c1 +11.11 +SELECT c1 FROM t5 WHERE c1 <=> '11.11' ORDER BY c1; +c1 +11.11 +SELECT c1 FROM t5 WHERE c1 >= '5' ORDER BY c1; +c1 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 <= '95.95' ORDER BY c1; +c1 +-908.92 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 != '-908.92' ORDER BY c1; +c1 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 < '95.95' ORDER BY c1; +c1 +-908.92 +5.00 +11.11 +SELECT c1 FROM t5 WHERE c1 > '-908.92' ORDER BY c1; +c1 +5.00 +11.11 +95.95 +DROP TABLE t5; +CREATE TABLE t5(c1 DECIMAL(5,2) PRIMARY KEY) ENGINE = ; +INSERT INTO t5 VALUES (95.95), (11.11),(5),(-908.92); +SELECT c1 FROM t5 ORDER BY c1; +c1 +-908.92 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 = '11.11' ORDER BY c1; +c1 +11.11 +SELECT c1 FROM t5 WHERE c1 <=> '11.11' ORDER BY c1; +c1 +11.11 +SELECT c1 FROM t5 WHERE c1 >= '5' ORDER BY c1; +c1 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 <= '95.95' ORDER BY c1; +c1 +-908.92 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 != '-908.92' ORDER BY c1; +c1 +5.00 +11.11 +95.95 +SELECT c1 FROM t5 WHERE c1 < '95.95' ORDER BY c1; +c1 +-908.92 +5.00 +11.11 +SELECT c1 FROM t5 WHERE c1 > '-908.92' ORDER BY c1; +c1 +5.00 +11.11 +95.95 +DROP TABLE t5; diff --git a/mysql-test/t/implicit_char_to_num_conversion.test b/mysql-test/t/implicit_char_to_num_conversion.test new file mode 100644 index 00000000000..b4948fbc69f --- /dev/null +++ b/mysql-test/t/implicit_char_to_num_conversion.test @@ -0,0 +1,174 @@ +########### implicit_char_to_num_conversion.test ####################### +# # +# This test aims at using string/char literal in comparison operators # +# without explicit type-cast. This is a bug test for Bug#11766521 # +# - Incorrect result is returned if string/char literal is used with # +# comparision operator and bit data type column. Test is extended to # +# include numeric data type comparison with string/char literal # +# # +# # +# Creation: # +# 2011-05-10 vfisrekar Implement this test as part of Bug#11766521 # +# # +######################################################################## + +--disable_warnings +DROP TABLE IF EXISTS t5; +--enable_warnings + +let $default_engine = `select @@SESSION.storage_engine`; + +# Bug#11766521 - BIT Datatype comparison in where clause return incorrect +# result for '=' , '<=>' operators +--replace_result $default_engine +eval CREATE TABLE t5(c1 BIT(2) PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (0), (1), (2); +SELECT HEX(c1) FROM t5 ORDER BY c1; +# Enable Following two select after Bug#11766521 fix +# SELECT HEX(c1) FROM t5 WHERE c1 = '1' ORDER BY c1; +# SELECT HEX(c1) FROM t5 WHERE c1 <=> '1' ORDER BY c1; +SELECT HEX(c1) FROM t5 WHERE c1 = b'1' ORDER BY c1; +SELECT HEX(c1) FROM t5 WHERE c1 <=> b'1' ORDER BY c1; +SELECT HEX(c1) FROM t5 WHERE c1 != b'1' ORDER BY c1; +SELECT HEX(c1) FROM t5 WHERE c1 >= '1' ORDER BY c1; +SELECT HEX(c1) FROM t5 WHERE c1 <= '1' ORDER BY c1; +SELECT HEX(c1) FROM t5 WHERE c1 < '1' ORDER BY c1; +SELECT HEX(c1) FROM t5 WHERE c1 > '0' ORDER BY c1; +DROP TABLE t5; + +# FLOAT Data-type +--replace_result $default_engine +eval CREATE TABLE t5(c1 FLOAT(5,2) PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (95.95), (-10.10), (1), (0); +SELECT c1 FROM t5 ORDER BY c1; +# Compare with string literal +# Following two queries does not return result may be due to Bug#11766521. +# Enable them after Bug#11766521 fix. +# SELECT c1 FROM t5 WHERE c1 = '10.10' ORDER BY c1; +# SELECT c1 FROM t5 WHERE c2 <=> '11.11' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 >= '95' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <= '10.10' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 != '1' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 < '1' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 > '0' ORDER BY c1; +DROP TABLE t5; + +# TINYINT Datatype +--replace_result $default_engine +eval CREATE TABLE t5(c1 TINYINT PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (95), (10),(11),(-8); +SELECT c1 FROM t5 ORDER BY c1; +# Compare with string literal +SELECT c1 FROM t5 WHERE c1 = '10' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <=> '10' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 >= '95' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <= '11' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 != '-8' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 < '11' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 > '10' ORDER BY c1; +DROP TABLE t5; + +# SMALLINT Datatype +--replace_result $default_engine +eval CREATE TABLE t5(c1 SMALLINT PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (395), (-200), (100), (111); +SELECT c1 FROM t5 ORDER BY c1; +# Compare with string literal +SELECT c1 FROM t5 WHERE c1 = '100' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <=> '100' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 >= '395' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <= '-200' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 != '100' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 < '111' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 > '111' ORDER BY c1; +DROP TABLE t5; + +# MEDIUMINT Datatype +--replace_result $default_engine +eval CREATE TABLE t5(c1 MEDIUMINT PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (-8388607), (311),(215),(88608); +SELECT c1 FROM t5 ORDER BY c1; +# Compare with string literal +SELECT c1 FROM t5 WHERE c1 = '311' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <=> '311' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 >= '215' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <= '88608' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 != '-8388607' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 < '215' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 > '215' ORDER BY c1; +DROP TABLE t5; + +# INT Datatype +--replace_result $default_engine +eval CREATE TABLE t5(c1 INT PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (-2147483647), (1011),(15),(9388607); +SELECT c1 FROM t5 ORDER BY c1; +# Compare with string literal +SELECT c1 FROM t5 WHERE c1 = '9388607' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <=> '9388607' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 >= '15' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <= '1011' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 != '-2147483647' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 < '15' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 > '15' ORDER BY c1; +DROP TABLE t5; + +# BIGINT Data-type +--replace_result $default_engine +eval CREATE TABLE t5(c1 BIGINT PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (-9223372036854775807), (12011),(500),(3372036854775808); +SELECT c1 FROM t5 ORDER BY c1; +# Compare with string literal +SELECT c1 FROM t5 WHERE c1 = '-9223372036854775807' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <=> '-9223372036854775807' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 >= '12011' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <= '500' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 != '3372036854775808' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 < '12011' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 > '12011' ORDER BY c1; +DROP TABLE t5; + +# DOUBLE Datatype +--replace_result $default_engine +eval CREATE TABLE t5(c1 DOUBLE(5,2) PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (95.95), (11.11),(5),(-908.92); +SELECT c1 FROM t5 ORDER BY c1; +# Compare with string literal +SELECT c1 FROM t5 WHERE c1 = '11.11' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <=> '11.11' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 >= '5' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <= '95.95' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 != '-908.92' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 < '95.95' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 > '-908.92' ORDER BY c1; +DROP TABLE t5; + +# NUMERIC Datatype +--replace_result $default_engine +eval CREATE TABLE t5(c1 NUMERIC(5,2) PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (95.95), (11.11),(5),(-908.92); +SELECT c1 FROM t5 ORDER BY c1; +# Compare with string literal +SELECT c1 FROM t5 WHERE c1 = '11.11' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <=> '11.11' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 >= '5' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <= '95.95' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 != '-908.92' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 < '95.95' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 > '-908.92' ORDER BY c1; +DROP TABLE t5; + +# DECIMAL Datatype +--replace_result $default_engine +eval CREATE TABLE t5(c1 DECIMAL(5,2) PRIMARY KEY) ENGINE = $default_engine; +INSERT INTO t5 VALUES (95.95), (11.11),(5),(-908.92); +SELECT c1 FROM t5 ORDER BY c1; +# Compare with string literal +SELECT c1 FROM t5 WHERE c1 = '11.11' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <=> '11.11' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 >= '5' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 <= '95.95' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 != '-908.92' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 < '95.95' ORDER BY c1; +SELECT c1 FROM t5 WHERE c1 > '-908.92' ORDER BY c1; +DROP TABLE t5; -- cgit v1.2.1 From 12c42b980aa94105a9108d4f58c5dc75d96ec824 Mon Sep 17 00:00:00 2001 From: Guilhem Bichot Date: Mon, 16 May 2011 22:04:01 +0200 Subject: Fix for BUG#11755168 '46895: test "outfile_loaddata" fails (reproducible)'. In sql_class.cc, 'row_count', of type 'ha_rows', was used as last argument for ER_TRUNCATED_WRONG_VALUE_FOR_FIELD which is "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %ld". So 'ha_rows' was used as 'long'. On SPARC32 Solaris builds, 'long' is 4 bytes and 'ha_rows' is 'longlong' i.e. 8 bytes. So the printf-like code was reading only the first 4 bytes. Because the CPU is big-endian, 1LL is 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x01 so the first four bytes yield 0. So the warning message had "row 0" instead of "row 1" in test outfile_loaddata.test: -Warning 1366 Incorrect string value: '\xE1\xE2\xF7' for column 'b' at row 1 +Warning 1366 Incorrect string value: '\xE1\xE2\xF7' for column 'b' at row 0 All error-messaging functions which internally invoke some printf-life function are potential candidate for such mistakes. One apparently easy way to catch such mistakes is to use ATTRIBUTE_FORMAT (from my_attribute.h). But this works only when call site has both: a) the format as a string literal b) the types of arguments. So: func(ER(ER_BLAH), 10); will silently not be checked, because ER(ER_BLAH) is not known at compile time (it is known at run-time, and depends on the chosen language). And func("%s", a va_list argument); has the same problem, as the *real* type of arguments is not known at this site at compile time (it's known in some caller). Moreover, func(ER(ER_BLAH)); though possibly correct (if ER(ER_BLAH) has no '%' markers), will not compile (gcc says "error: format not a string literal and no format arguments"). Consequences: 1) ATTRIBUTE_FORMAT is here added only to functions which in practice take "string literal" formats: "my_error_reporter" and "print_admin_msg". 2) it cannot be added to the other functions: my_error(), push_warning_printf(), Table_check_intact::report_error(), general_log_print(). To do a one-time check of functions listed in (2), the following "static code analysis" has been done: 1) replace my_error(ER_xxx, arguments for substitution in format) with the equivalent my_printf_error(ER_xxx,ER(ER_xxx), arguments for substitution in format), so that we have ER(ER_xxx) and the arguments *in the same call site* 2) add ATTRIBUTE_FORMAT to push_warning_printf(), Table_check_intact::report_error(), general_log_print() 3) replace ER(xxx) with the hard-coded English text found in errmsg.txt (like: ER(ER_UNKNOWN_ERROR) is replaced with "Unknown error"), so that a call site has the format as string literal 4) this way, ATTRIBUTE_FORMAT can effectively do its job 5) compile, fix errors detected by ATTRIBUTE_FORMAT 6) revert steps 1-2-3. The present patch has no compiler error when submitted again to the static code analysis above. It cannot catch all problems though: see Field::set_warning(), in which a call to push_warning_printf() has a variable error (thus, not replacable by a string literal); I checked set_warning() calls by hand though. See also WL 5883 for one proposal to avoid such bugs from appearing again in the future. The issues fixed in the patch are: a) mismatch in types (like 'int' passed to '%ld') b) more arguments passed than specified in the format. This patch resolves mismatches by changing the type/number of arguments, not by changing error messages of sql/share/errmsg.txt. The latter would be wrong, per the following old rule: errmsg.txt must be as stable as possible; no insertions or deletions of messages, no changes of type or number of printf-like format specifiers, are allowed, as long as the change impacts a message already released in a GA version. If this rule is not followed: - Connectors, which use error message numbers, will be confused (by insertions/deletions of messages) - using errmsg.sys of MySQL 5.1.n with mysqld of MySQL 5.1.(n+1) could produce wrong messages or crash; such usage can easily happen if installing 5.1.(n+1) while /etc/my.cnf still has --language=/path/to/5.1.n/xxx; or if copying mysqld from 5.1.(n+1) into a 5.1.n installation. When fixing b), I have verified that the superfluous arguments were not used in the format in the first 5.1 GA (5.1.30 'bteam@astra04-20081114162938-z8mctjp6st27uobm'). Had they been used, then passing them today, even if the message doesn't use them anymore, would have been necessary, as explained above. include/my_getopt.h: this function pointer is used only with "string literal" formats, so we can add ATTRIBUTE_FORMAT. mysql-test/collections/default.experimental: test should pass now sql/derror.cc: by having a format as string literal, ATTRIBUTE_FORMAT check becomes effective. sql/events.cc: Change justified by the following excerpt from sql/share/errmsg.txt: ER_EVENT_SAME_NAME eng "Same old and new event name" ER_EVENT_SET_VAR_ERROR eng "Error during starting/stopping of the scheduler. Error code %u" sql/field.cc: ER_TOO_BIG_SCALE 42000 S1009 eng "Too big scale %d specified for column '%-.192s'. Maximum is %lu." ER_TOO_BIG_PRECISION 42000 S1009 eng "Too big precision %d specified for column '%-.192s'. Maximum is %lu." ER_TOO_BIG_DISPLAYWIDTH 42000 S1009 eng "Display width out of range for column '%-.192s' (max = %lu)" sql/ha_ndbcluster.cc: ER_OUTOFMEMORY HY001 S1001 eng "Out of memory; restart server and try again (needed %d bytes)" (sizeof() returns size_t) sql/ha_ndbcluster_binlog.cc: Too many arguments for: ER_GET_ERRMSG eng "Got error %d '%-.100s' from %s" Patch by Jonas Oreland. sql/ha_partition.cc: print_admin_msg() is used only with a literal as format, so ATTRIBUTE_FORMAT works. sql/handler.cc: ER_OUTOFMEMORY HY001 S1001 eng "Out of memory; restart server and try again (needed %d bytes)" (sizeof() returns size_t) sql/item_create.cc: ER_TOO_BIG_SCALE 42000 S1009 eng "Too big scale %d specified for column '%-.192s'. Maximum is %lu." ER_TOO_BIG_PRECISION 42000 S1009 eng "Too big precision %d specified for column '%-.192s'. Maximum is %lu." 'c_len' and 'c_dec' are char*, passed as %d !! We don't know their value (as strtoul() failed), but they are likely big, so we use INT_MAX. 'len' is ulong. sql/item_func.cc: ER_WARN_DATA_OUT_OF_RANGE 22003 eng "Out of range value for column '%s' at row %ld" ER_CANT_FIND_UDF eng "Can't load function '%-.192s'" sql/item_strfunc.cc: ER_TOO_BIG_FOR_UNCOMPRESS eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)" max_allowed_packet is ulong. sql/mysql_priv.h: sql_print_message_func is a function _pointer_. sql/sp_head.cc: ER_SP_RECURSION_LIMIT eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.192s" max_sp_recursion_depth is ulong sql/sql_acl.cc: ER_PASSWORD_NO_MATCH 42000 eng "Can't find any matching row in the user table" ER_CANT_CREATE_USER_WITH_GRANT 42000 eng "You are not allowed to create a user with GRANT" sql/sql_base.cc: ER_NOT_KEYFILE eng "Incorrect key file for table '%-.200s'; try to repair it" ER_TOO_MANY_TABLES eng "Too many tables; MySQL can only use %d tables in a join" MAX_TABLES is size_t. sql/sql_binlog.cc: ER_UNKNOWN_ERROR eng "Unknown error" sql/sql_class.cc: ER_TRUNCATED_WRONG_VALUE_FOR_FIELD eng "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %ld" WARN_DATA_TRUNCATED 01000 eng "Data truncated for column '%s' at row %ld" sql/sql_connect.cc: ER_HANDSHAKE_ERROR 08S01 eng "Bad handshake" ER_BAD_HOST_ERROR 08S01 eng "Can't get hostname for your address" sql/sql_insert.cc: ER_WRONG_VALUE_COUNT_ON_ROW 21S01 eng "Column count doesn't match value count at row %ld" sql/sql_parse.cc: ER_WARN_HOSTNAME_WONT_WORK eng "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work" ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT eng "Too high level of nesting for select" ER_UNKNOWN_ERROR eng "Unknown error" sql/sql_partition.cc: ER_OUTOFMEMORY HY001 S1001 eng "Out of memory; restart server and try again (needed %d bytes)" sql/sql_plugin.cc: ER_OUTOFMEMORY HY001 S1001 eng "Out of memory; restart server and try again (needed %d bytes)" sql/sql_prepare.cc: ER_OUTOFMEMORY HY001 S1001 eng "Out of memory; restart server and try again (needed %d bytes)" ER_UNKNOWN_STMT_HANDLER eng "Unknown prepared statement handler (%.*s) given to %s" length value (for '%.*s') must be 'int', per the doc of printf() and the code of my_vsnprintf(). sql/sql_show.cc: ER_OUTOFMEMORY HY001 S1001 eng "Out of memory; restart server and try again (needed %d bytes)" sql/sql_table.cc: ER_TOO_BIG_FIELDLENGTH 42000 S1009 eng "Column length too big for column '%-.192s' (max = %lu); use BLOB or TEXT instead" sql/table.cc: ER_NOT_FORM_FILE eng "Incorrect information in file: '%-.200s'" ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysql_upgrade to fix this error." table->s->mysql_version is ulong. sql/unireg.cc: ER_TOO_LONG_TABLE_COMMENT eng "Comment for table '%-.64s' is too long (max = %lu)" ER_TOO_LONG_FIELD_COMMENT eng "Comment for field '%-.64s' is too long (max = %lu)" ER_TOO_BIG_ROWSIZE 42000 eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs" --- include/my_getopt.h | 8 +++++--- mysql-test/collections/default.experimental | 1 - sql/derror.cc | 21 +++++---------------- sql/events.cc | 8 ++++---- sql/field.cc | 13 ++++++------- sql/ha_ndbcluster.cc | 7 +++---- sql/ha_ndbcluster_binlog.cc | 15 ++++++++------- sql/ha_partition.cc | 8 ++++++-- sql/handler.cc | 5 +++-- sql/item_create.cc | 18 +++++++++--------- sql/item_func.cc | 9 ++++----- sql/item_strfunc.cc | 8 ++++---- sql/mysql_priv.h | 6 +++--- sql/sp_head.cc | 6 +++--- sql/sql_acl.cc | 5 ++--- sql/sql_base.cc | 4 ++-- sql/sql_binlog.cc | 4 ++-- sql/sql_class.cc | 9 ++++----- sql/sql_connect.cc | 28 +++++++++++++--------------- sql/sql_insert.cc | 7 +++---- sql/sql_parse.cc | 11 +++++------ sql/sql_partition.cc | 2 +- sql/sql_plugin.cc | 21 +++++++++++++-------- sql/sql_prepare.cc | 16 ++++++++-------- sql/sql_show.cc | 2 +- sql/sql_table.cc | 6 ++++-- sql/table.cc | 5 +++-- sql/unireg.cc | 17 +++++++++-------- 28 files changed, 133 insertions(+), 137 deletions(-) diff --git a/include/my_getopt.h b/include/my_getopt.h index d7c996302fd..a379e8c4716 100644 --- a/include/my_getopt.h +++ b/include/my_getopt.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2002-2004 MySQL AB +/* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _my_getopt_h #define _my_getopt_h @@ -59,7 +59,9 @@ struct my_option }; typedef my_bool (*my_get_one_option)(int, const struct my_option *, char *); -typedef void (*my_error_reporter)(enum loglevel level, const char *format, ...); +typedef void (*my_error_reporter)(enum loglevel level, const char *format, ...) + ATTRIBUTE_FORMAT_FPTR(printf, 2, 3); + /** Used to retrieve a reference to the object (variable) that holds the value for the given option. For example, if var_type is GET_UINT, the function diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index fb8c6845a5f..da2df7a72e8 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -19,7 +19,6 @@ innodb_plugin.* @solaris # Bug#56063 InnoDB Plugin mysql-tests f main.ctype_gbk_binlog @solaris # Bug#46010: main.ctype_gbk_binlog fails sporadically : Table 't2' already exists main.func_str @solaris # joro: Bug#40928 main.sp @solaris # joro : Bug#54138 -main.outfile_loaddata @solaris # joro : Bug #46895 ndb.* # joro : NDB tests marked as experimental as agreed with bochklin diff --git a/sql/derror.cc b/sql/derror.cc index a8cfa00ad1d..fa10a22dca4 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2005 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** @file @@ -96,7 +95,6 @@ static bool read_texts(const char *file_name,const char ***point, char name[FN_REFLEN]; uchar *buff; uchar head[32],*pos; - const char *errmsg; DBUG_ENTER("read_texts"); LINT_INIT(buff); @@ -168,18 +166,9 @@ Check that the above file is the right version for this program!", DBUG_RETURN(0); err: - switch (funktpos) { - case 2: - errmsg= "Not enough memory for messagefile '%s'"; - break; - case 1: - errmsg= "Can't read from messagefile '%s'"; - break; - default: - errmsg= "Can't find messagefile '%s'"; - break; - } - sql_print_error(errmsg, name); + sql_print_error((funktpos == 2) ? "Not enough memory for messagefile '%s'" : + ((funktpos == 1) ? "Can't read from messagefile '%s'" : + "Can't find messagefile '%s'"), name); err1: if (file != FERR) VOID(my_close(file,MYF(MY_WME))); diff --git a/sql/events.cc b/sql/events.cc index afae512c61d..7edd863ac41 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2004-2006 MySQL AB +/* Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mysql_priv.h" #include "events.h" @@ -547,7 +547,7 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, !sortcmp_lex_string(parse_data->name, *new_name, system_charset_info)) { - my_error(ER_EVENT_SAME_NAME, MYF(0), parse_data->name.str); + my_error(ER_EVENT_SAME_NAME, MYF(0)); DBUG_RETURN(TRUE); } @@ -1150,7 +1150,7 @@ Events::switch_event_scheduler_state(enum_opt_event_scheduler new_state) if (ret) { - my_error(ER_EVENT_SET_VAR_ERROR, MYF(0)); + my_error(ER_EVENT_SET_VAR_ERROR, MYF(0), 0); goto end; } diff --git a/sql/field.cc b/sql/field.cc index a5f946d600c..6bd4e4beda1 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** @file @@ -9585,7 +9584,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, if (decimals >= NOT_FIXED_DEC) { my_error(ER_TOO_BIG_SCALE, MYF(0), decimals, fld_name, - NOT_FIXED_DEC-1); + static_cast(NOT_FIXED_DEC - 1)); DBUG_RETURN(TRUE); } @@ -9655,8 +9654,8 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, my_decimal_trim(&length, &decimals); if (length > DECIMAL_MAX_PRECISION) { - my_error(ER_TOO_BIG_PRECISION, MYF(0), length, fld_name, - DECIMAL_MAX_PRECISION); + my_error(ER_TOO_BIG_PRECISION, MYF(0), static_cast(length), + fld_name, static_cast(DECIMAL_MAX_PRECISION)); DBUG_RETURN(TRUE); } if (length < decimals) @@ -9881,7 +9880,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, if (length > MAX_BIT_FIELD_LENGTH) { my_error(ER_TOO_BIG_DISPLAYWIDTH, MYF(0), fld_name, - MAX_BIT_FIELD_LENGTH); + static_cast(MAX_BIT_FIELD_LENGTH)); DBUG_RETURN(TRUE); } pack_length= (length + 7) / 8; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 4f99d354754..eefdbd3b01b 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2003 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** @file @@ -8411,7 +8410,7 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, DBUG_PRINT("error", ("get_share: failed to alloc share")); if (!have_lock) pthread_mutex_unlock(&ndbcluster_mutex); - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(*share)); + my_error(ER_OUTOFMEMORY, MYF(0), static_cast(sizeof(*share))); DBUG_RETURN(0); } } diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 27af3f2cf2f..631391e7408 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2003 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mysql_priv.h" #include "sql_show.h" @@ -1198,12 +1197,14 @@ ndbcluster_update_slock(THD *thd, } if (ndb_error) + { + char buf[1024]; + my_snprintf(buf, sizeof(buf), "Could not release lock on '%s.%s'", + db, table_name); push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - ndb_error->code, - ndb_error->message, - "Could not release lock on '%s.%s'", - db, table_name); + ndb_error->code, ndb_error->message, buf); + } if (trans) ndb->closeTransaction(trans); ndb->setDatabaseName(save_db); diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index d3858eae0d4..b5363f8235c 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1,4 +1,4 @@ -/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* This handler was developed by Mikael Ronstrom for version 5.1 of MySQL. @@ -1023,6 +1023,10 @@ static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt, (modelled after mi_check_print_msg) TODO: move this into the handler, or rewrite mysql_admin_table. */ +static bool print_admin_msg(THD* thd, const char* msg_type, + const char* db_name, const char* table_name, + const char* op_name, const char *fmt, ...) + ATTRIBUTE_FORMAT(printf, 6, 7); static bool print_admin_msg(THD* thd, const char* msg_type, const char* db_name, const char* table_name, const char* op_name, const char *fmt, ...) diff --git a/sql/handler.cc b/sql/handler.cc index 718529fa5fc..82f5f2ee841 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -275,7 +275,7 @@ handler *get_ha_partition(partition_info *part_info) } else { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(ha_partition)); + my_error(ER_OUTOFMEMORY, MYF(0), static_cast(sizeof(ha_partition))); } DBUG_RETURN(((handler*) partition)); } @@ -1604,7 +1604,8 @@ int ha_recover(HASH *commit_list) } if (!info.list) { - sql_print_error(ER(ER_OUTOFMEMORY), info.len*sizeof(XID)); + sql_print_error(ER(ER_OUTOFMEMORY), + static_cast(info.len*sizeof(XID))); DBUG_RETURN(1); } diff --git a/sql/item_create.cc b/sql/item_create.cc index 5726e987ef6..d1938abf264 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** @file @@ -5083,8 +5083,8 @@ create_func_cast(THD *thd, Item *a, Cast_target cast_type, decoded_size= strtoul(c_len, NULL, 10); if (errno != 0) { - my_error(ER_TOO_BIG_PRECISION, MYF(0), c_len, a->name, - DECIMAL_MAX_PRECISION); + my_error(ER_TOO_BIG_PRECISION, MYF(0), INT_MAX, a->name, + static_cast(DECIMAL_MAX_PRECISION)); return NULL; } len= decoded_size; @@ -5097,8 +5097,8 @@ create_func_cast(THD *thd, Item *a, Cast_target cast_type, decoded_size= strtoul(c_dec, NULL, 10); if ((errno != 0) || (decoded_size > UINT_MAX)) { - my_error(ER_TOO_BIG_SCALE, MYF(0), c_dec, a->name, - DECIMAL_MAX_SCALE); + my_error(ER_TOO_BIG_SCALE, MYF(0), INT_MAX, a->name, + static_cast(DECIMAL_MAX_SCALE)); return NULL; } dec= decoded_size; @@ -5111,14 +5111,14 @@ create_func_cast(THD *thd, Item *a, Cast_target cast_type, } if (len > DECIMAL_MAX_PRECISION) { - my_error(ER_TOO_BIG_PRECISION, MYF(0), len, a->name, - DECIMAL_MAX_PRECISION); + my_error(ER_TOO_BIG_PRECISION, MYF(0), static_cast(len), a->name, + static_cast(DECIMAL_MAX_PRECISION)); return 0; } if (dec > DECIMAL_MAX_SCALE) { my_error(ER_TOO_BIG_SCALE, MYF(0), dec, a->name, - DECIMAL_MAX_SCALE); + static_cast(DECIMAL_MAX_SCALE)); return 0; } res= new (thd->mem_root) Item_decimal_typecast(a, len, dec); diff --git a/sql/item_func.cc b/sql/item_func.cc index 6a9c47954b7..b1398b78b84 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** @file @@ -1066,7 +1065,7 @@ err: push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_WARN_DATA_OUT_OF_RANGE, ER(ER_WARN_DATA_OUT_OF_RANGE), - name, 1); + name, 1L); return dec; } @@ -2851,7 +2850,7 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func, if (!tmp_udf) { - my_error(ER_CANT_FIND_UDF, MYF(0), u_d->name.str, errno); + my_error(ER_CANT_FIND_UDF, MYF(0), u_d->name.str); DBUG_RETURN(TRUE); } u_d=tmp_udf; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index affe0f8e17d..d72e15c3636 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** @file @@ -3465,7 +3464,8 @@ String *Item_func_uncompress::val_str(String *str) push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR, ER_TOO_BIG_FOR_UNCOMPRESS, ER(ER_TOO_BIG_FOR_UNCOMPRESS), - current_thd->variables.max_allowed_packet); + static_cast(current_thd->variables. + max_allowed_packet)); goto err; } if (buffer.realloc((uint32)new_size)) diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 8f9a9080d12..664092adbc1 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1,4 +1,4 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** @file @@ -829,7 +829,7 @@ void sql_print_warning(const char *format, ...) ATTRIBUTE_FORMAT(printf, 1, 2); void sql_print_information(const char *format, ...) ATTRIBUTE_FORMAT(printf, 1, 2); typedef void (*sql_print_message_func)(const char *format, ...) - ATTRIBUTE_FORMAT(printf, 1, 2); + ATTRIBUTE_FORMAT_FPTR(printf, 1, 2); extern sql_print_message_func sql_print_message_handlers[]; int error_log_print(enum loglevel level, const char *format, diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 2473abea3c7..a4dd51d8a4a 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -1,4 +1,4 @@ -/* Copyright 2002-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mysql_priv.h" #ifdef USE_PRAGMA_IMPLEMENTATION @@ -1060,7 +1060,7 @@ void sp_head::recursion_level_error(THD *thd) if (m_type == TYPE_ENUM_PROCEDURE) { my_error(ER_SP_RECURSION_LIMIT, MYF(0), - thd->variables.max_sp_recursion_depth, + static_cast(thd->variables.max_sp_recursion_depth), m_name.str); } else diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 718da07bc86..6d5d34d0602 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1970,13 +1970,12 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, */ else if (!password_len && no_auto_create) { - my_error(ER_PASSWORD_NO_MATCH, MYF(0), combo.user.str, combo.host.str); + my_error(ER_PASSWORD_NO_MATCH, MYF(0)); goto end; } else if (!can_create_user) { - my_error(ER_CANT_CREATE_USER_WITH_GRANT, MYF(0), - thd->security_ctx->user, thd->security_ctx->host_or_ip); + my_error(ER_CANT_CREATE_USER_WITH_GRANT, MYF(0)); goto end; } old_row_exists = 0; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index dc78f3b84c6..87d28402e01 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -4008,7 +4008,7 @@ retry: { /* Give right error message */ thd->clear_error(); - my_error(ER_NOT_KEYFILE, MYF(0), share->table_name.str, my_errno); + my_error(ER_NOT_KEYFILE, MYF(0), share->table_name.str); sql_print_error("Couldn't repair table: %s.%s", share->db.str, share->table_name.str); if (entry->file) @@ -7686,7 +7686,7 @@ bool setup_tables(THD *thd, Name_resolution_context *context, } if (tablenr > MAX_TABLES) { - my_error(ER_TOO_MANY_TABLES,MYF(0),MAX_TABLES); + my_error(ER_TOO_MANY_TABLES, MYF(0), static_cast(MAX_TABLES)); DBUG_RETURN(1); } for (table_list= tables; diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc index 31fd2de3722..6cd747de492 100644 --- a/sql/sql_binlog.cc +++ b/sql/sql_binlog.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2005-2006 MySQL AB +/* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -232,7 +232,7 @@ void mysql_client_binlog_statement(THD* thd) TODO: Maybe a better error message since the BINLOG statement now contains several events. */ - my_error(ER_UNKNOWN_ERROR, MYF(0), "Error executing BINLOG statement"); + my_error(ER_UNKNOWN_ERROR, MYF(0)); goto end; } } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index ae21a5335fd..100ccc46371 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1,4 +1,4 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /***************************************************************************** ** @@ -2040,7 +2039,7 @@ bool select_export::send_data(List &items) ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), "string", printable_buff, - item->name, row_count); + item->name, static_cast(row_count)); } else if (from_end_pos < res->ptr() + res->length()) { @@ -2049,7 +2048,7 @@ bool select_export::send_data(List &items) */ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, ER(WARN_DATA_TRUNCATED), - item->full_name(), row_count); + item->full_name(), static_cast(row_count)); } cvt_str.length(bytes); res= &cvt_str; diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 406998537e4..1b27efdd39a 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2007 MySQL AB +/* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Functions to autenticate and handle reqests for a connection @@ -342,7 +341,7 @@ check_user(THD *thd, enum enum_server_command command, passwd_len != SCRAMBLE_LENGTH && passwd_len != SCRAMBLE_LENGTH_323) { - my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); DBUG_RETURN(1); } @@ -373,7 +372,7 @@ check_user(THD *thd, enum enum_server_command command, my_net_read(net) != SCRAMBLE_LENGTH_323 + 1) { inc_host_errors(&thd->remote.sin_addr); - my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); DBUG_RETURN(1); } /* Final attempt to check the user based on reply */ @@ -773,7 +772,7 @@ static int check_connection(THD *thd) if (vio_peer_addr(net->vio, ip, &thd->peer_port)) { - my_error(ER_BAD_HOST_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_BAD_HOST_ERROR, MYF(0)); return 1; } if (!(thd->main_security_ctx.ip= my_strdup(ip,MYF(MY_WME)))) @@ -873,8 +872,7 @@ static int check_connection(THD *thd) pkt_len < MIN_HANDSHAKE_SIZE) { inc_host_errors(&thd->remote.sin_addr); - my_error(ER_HANDSHAKE_ERROR, MYF(0), - thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); return 1; } } @@ -918,7 +916,7 @@ static int check_connection(THD *thd) if (!ssl_acceptor_fd) { inc_host_errors(&thd->remote.sin_addr); - my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); return 1; } DBUG_PRINT("info", ("IO layer change in progress...")); @@ -926,7 +924,7 @@ static int check_connection(THD *thd) { DBUG_PRINT("error", ("Failed to accept new SSL connection")); inc_host_errors(&thd->remote.sin_addr); - my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); return 1; } DBUG_PRINT("info", ("Reading user information over SSL layer")); @@ -936,7 +934,7 @@ static int check_connection(THD *thd) DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)", pkt_len)); inc_host_errors(&thd->remote.sin_addr); - my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); return 1; } } @@ -945,7 +943,7 @@ static int check_connection(THD *thd) if (end > (char *)net->read_pos + pkt_len) { inc_host_errors(&thd->remote.sin_addr); - my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); return 1; } @@ -968,7 +966,7 @@ static int check_connection(THD *thd) if (user == NULL) { inc_host_errors(&thd->remote.sin_addr); - my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); return 1; } @@ -1000,7 +998,7 @@ static int check_connection(THD *thd) if (passwd == NULL) { inc_host_errors(&thd->remote.sin_addr); - my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); return 1; } @@ -1014,7 +1012,7 @@ static int check_connection(THD *thd) if (db == NULL) { inc_host_errors(&thd->remote.sin_addr); - my_error(ER_HANDSHAKE_ERROR, MYF(0), thd->main_security_ctx.host_or_ip); + my_error(ER_HANDSHAKE_ERROR, MYF(0)); return 1; } } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index e2f93ee4de5..19f3255184e 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1,4 +1,4 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Insert of records */ @@ -3780,7 +3779,7 @@ select_create::prepare(List &values, SELECT_LEX_UNIT *u) if (table->s->fields < values.elements) { - my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1L); DBUG_RETURN(-1); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 8ef23806d91..ecc43f54fa5 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #define MYSQL_LEX 1 #include "mysql_priv.h" @@ -3981,8 +3981,7 @@ end_with_restore_list: hostname_requires_resolving(user->host.str)) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_HOSTNAME_WONT_WORK, - ER(ER_WARN_HOSTNAME_WONT_WORK), - user->host.str); + ER(ER_WARN_HOSTNAME_WONT_WORK)); // Are we trying to change a password of another user DBUG_ASSERT(user->host.str != 0); if (strcmp(thd->security_ctx->user, user->user.str) || @@ -5889,7 +5888,7 @@ mysql_new_select(LEX *lex, bool move_down) lex->nest_level++; if (lex->nest_level > (int) MAX_SELECT_NESTING) { - my_error(ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT,MYF(0),MAX_SELECT_NESTING); + my_error(ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT, MYF(0)); DBUG_RETURN(1); } select_lex->nest_level= lex->nest_level; @@ -6936,7 +6935,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, When an error is returned, my_message may have not been called and the client will hang waiting for a response. */ - my_error(ER_UNKNOWN_ERROR, MYF(0), "FLUSH PRIVILEGES failed"); + my_error(ER_UNKNOWN_ERROR, MYF(0)); } } diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index d10f695f535..d743c5908ca 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -6571,7 +6571,7 @@ void set_key_field_ptr(KEY *key_info, const uchar *new_buf, void mem_alloc_error(size_t size) { - my_error(ER_OUTOFMEMORY, MYF(0), size); + my_error(ER_OUTOFMEMORY, MYF(0), static_cast(size)); } #ifdef WITH_PARTITION_STORAGE_ENGINE diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 03a729258ca..15e2f2494b7 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2005 MySQL AB +/* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mysql_priv.h" #include @@ -495,9 +495,11 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report) { free_plugin_mem(&plugin_dl); if (report & REPORT_TO_USER) - my_error(ER_OUTOFMEMORY, MYF(0), plugin_dl.dl.length); + my_error(ER_OUTOFMEMORY, MYF(0), + static_cast(plugin_dl.dl.length)); if (report & REPORT_TO_LOG) - sql_print_error(ER(ER_OUTOFMEMORY), plugin_dl.dl.length); + sql_print_error(ER(ER_OUTOFMEMORY), + static_cast(plugin_dl.dl.length)); DBUG_RETURN(0); } /* @@ -520,9 +522,10 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report) { free_plugin_mem(&plugin_dl); if (report & REPORT_TO_USER) - my_error(ER_OUTOFMEMORY, MYF(0), plugin_dl.dl.length); + my_error(ER_OUTOFMEMORY, MYF(0), static_cast(plugin_dl.dl.length)); if (report & REPORT_TO_LOG) - sql_print_error(ER(ER_OUTOFMEMORY), plugin_dl.dl.length); + sql_print_error(ER(ER_OUTOFMEMORY), + static_cast(plugin_dl.dl.length)); DBUG_RETURN(0); } plugin_dl.dl.length= copy_and_convert(plugin_dl.dl.str, plugin_dl.dl.length, @@ -534,9 +537,11 @@ static st_plugin_dl *plugin_dl_add(const LEX_STRING *dl, int report) { free_plugin_mem(&plugin_dl); if (report & REPORT_TO_USER) - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(struct st_plugin_dl)); + my_error(ER_OUTOFMEMORY, MYF(0), + static_cast(sizeof(struct st_plugin_dl))); if (report & REPORT_TO_LOG) - sql_print_error(ER(ER_OUTOFMEMORY), sizeof(struct st_plugin_dl)); + sql_print_error(ER(ER_OUTOFMEMORY), + static_cast(sizeof(struct st_plugin_dl))); DBUG_RETURN(0); } DBUG_RETURN(tmp); diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index a94d1e519db..b296eb22cdb 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** @file @@ -1375,7 +1375,7 @@ static int mysql_test_select(Prepared_statement *stmt, if (!lex->result && !(lex->result= new (stmt->mem_root) select_send)) { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(select_send)); + my_error(ER_OUTOFMEMORY, MYF(0), static_cast(sizeof(select_send))); goto error; } @@ -2478,7 +2478,7 @@ void mysqld_stmt_execute(THD *thd, char *packet_arg, uint packet_length) if (!(stmt= find_prepared_statement(thd, stmt_id))) { char llbuf[22]; - my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), sizeof(llbuf), + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), static_cast(sizeof(llbuf)), llstr(stmt_id, llbuf), "mysqld_stmt_execute"); DBUG_VOID_RETURN; } @@ -2536,7 +2536,7 @@ void mysql_sql_stmt_execute(THD *thd) if (!(stmt= (Prepared_statement*) thd->stmt_map.find_by_name(name))) { my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), - name->length, name->str, "EXECUTE"); + static_cast(name->length), name->str, "EXECUTE"); DBUG_VOID_RETURN; } @@ -2578,7 +2578,7 @@ void mysqld_stmt_fetch(THD *thd, char *packet, uint packet_length) if (!(stmt= find_prepared_statement(thd, stmt_id))) { char llbuf[22]; - my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), sizeof(llbuf), + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), static_cast(sizeof(llbuf)), llstr(stmt_id, llbuf), "mysqld_stmt_fetch"); DBUG_VOID_RETURN; } @@ -2645,7 +2645,7 @@ void mysqld_stmt_reset(THD *thd, char *packet) if (!(stmt= find_prepared_statement(thd, stmt_id))) { char llbuf[22]; - my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), sizeof(llbuf), + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), static_cast(sizeof(llbuf)), llstr(stmt_id, llbuf), "mysqld_stmt_reset"); DBUG_VOID_RETURN; } @@ -2720,7 +2720,7 @@ void mysql_sql_stmt_close(THD *thd) if (! (stmt= (Prepared_statement*) thd->stmt_map.find_by_name(name))) my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), - name->length, name->str, "DEALLOCATE PREPARE"); + static_cast(name->length), name->str, "DEALLOCATE PREPARE"); else if (stmt->is_in_use()) my_error(ER_PS_NO_RECURSION, MYF(0)); else diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 5b835096042..3c185e4c088 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -7150,7 +7150,7 @@ static TABLE_LIST *get_trigger_table_impl( if (!(table= (TABLE_LIST *)thd->calloc(sizeof(TABLE_LIST)))) { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(TABLE_LIST)); + my_error(ER_OUTOFMEMORY, MYF(0), static_cast(sizeof(TABLE_LIST))); return NULL; } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index c5fc037a49e..58e2684e5b7 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2452,7 +2452,8 @@ int prepare_create_field(Create_field *sql_field, MAX_FIELD_CHARLENGTH) { my_printf_error(ER_TOO_BIG_FIELDLENGTH, ER(ER_TOO_BIG_FIELDLENGTH), - MYF(0), sql_field->field_name, MAX_FIELD_CHARLENGTH); + MYF(0), sql_field->field_name, + static_cast(MAX_FIELD_CHARLENGTH)); DBUG_RETURN(1); } } @@ -3504,7 +3505,8 @@ static bool prepare_blob_field(THD *thd, Create_field *sql_field) MODE_STRICT_ALL_TABLES))) { my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name, - MAX_FIELD_VARCHARLENGTH / sql_field->charset->mbmaxlen); + static_cast(MAX_FIELD_VARCHARLENGTH / + sql_field->charset->mbmaxlen)); DBUG_RETURN(1); } sql_field->sql_type= MYSQL_TYPE_BLOB; diff --git a/sql/table.cc b/sql/table.cc index 7dbf02027fa..22333a2b76b 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2265,7 +2265,7 @@ void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg) default: /* Better wrong error than none */ case 4: strxmov(buff, share->normalized_path.str, reg_ext, NullS); - my_error(ER_NOT_FORM_FILE, errortype, buff, 0); + my_error(ER_NOT_FORM_FILE, errortype, buff); break; } DBUG_VOID_RETURN; @@ -2835,7 +2835,8 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def) report_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, ER(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE), table->alias, table_def->count, table->s->fields, - table->s->mysql_version, MYSQL_VERSION_ID); + static_cast(table->s->mysql_version), + MYSQL_VERSION_ID); DBUG_RETURN(TRUE); } else if (MYSQL_VERSION_ID == table->s->mysql_version) diff --git a/sql/unireg.cc b/sql/unireg.cc index 84160da9d77..e4fdf2af713 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -11,8 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Functions to create a unireg form-file from a FIELD and a fieldname-fieldinfo @@ -237,13 +236,14 @@ bool mysql_create_frm(THD *thd, const char *file_name, if ((thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))) { - my_error(ER_TOO_LONG_TABLE_COMMENT, MYF(0), table, tmp_len); + my_error(ER_TOO_LONG_TABLE_COMMENT, MYF(0), table, + static_cast(tmp_len)); goto err; } push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_TOO_LONG_TABLE_COMMENT, ER(ER_TOO_LONG_TABLE_COMMENT), - table, tmp_len); + table, static_cast(tmp_len)); create_info->comment.length= tmp_len; } @@ -621,13 +621,14 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type, if ((current_thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))) { - my_error(ER_TOO_LONG_FIELD_COMMENT, MYF(0), field->field_name, tmp_len); + my_error(ER_TOO_LONG_FIELD_COMMENT, MYF(0), field->field_name, + static_cast(tmp_len)); DBUG_RETURN(1); } push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_TOO_LONG_FIELD_COMMENT, ER(ER_TOO_LONG_FIELD_COMMENT), - field->field_name, tmp_len); + field->field_name, static_cast(tmp_len)); field->comment.length= tmp_len; } @@ -711,7 +712,7 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type, if (reclength > (ulong) file->max_record_length()) { - my_error(ER_TOO_BIG_ROWSIZE, MYF(0), (uint) file->max_record_length()); + my_error(ER_TOO_BIG_ROWSIZE, MYF(0), static_cast(file->max_record_length())); DBUG_RETURN(1); } /* Hack to avoid bugs with small static rows in MySQL */ -- cgit v1.2.1 From 3201f92cb3a8e564387d656f7a7bb566d4776bb5 Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Wed, 18 May 2011 10:47:43 +0400 Subject: Bug#12403504 AFTER FIX FOR #11889186 : ASSERTION FAILED: DELSUM+(INT) Y/4-TEMP > 0 There are two problems: 1. There is a missing check for 'year' parameter(year can not be greater than 9999) in makedate function. fix: added check that year can not be greater than 9999. 2. There is a missing check for zero date in from_days() function. fix: added zero date check into Item_func_from_days::get_date() function. mysql-test/r/func_time.result: test case mysql-test/t/func_time.test: test case sql/item_timefunc.cc: --added check that year can not be greater than 9999 for makedate() function --added zero date check into Item_func_from_days::get_date() function --- mysql-test/r/func_time.result | 9 +++++++++ mysql-test/t/func_time.test | 7 +++++++ sql/item_timefunc.cc | 9 +++++++-- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 1e05443d8ac..63744d4ef29 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1417,4 +1417,13 @@ NULL SELECT DATE_FORMAT('0000-00-11', '%w'); DATE_FORMAT('0000-00-11', '%w') NULL +# +# Bug#12403504 AFTER FIX FOR #11889186 : ASSERTION FAILED: DELSUM+(INT) Y/4-TEMP > 0 +# +SELECT MAKEDATE(11111111,1); +MAKEDATE(11111111,1) +NULL +SELECT WEEK(DATE_ADD(FROM_DAYS(1),INTERVAL 1 MONTH), 1); +WEEK(DATE_ADD(FROM_DAYS(1),INTERVAL 1 MONTH), 1) +NULL End of 5.1 tests diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 2000d81f80d..de92f313992 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -921,4 +921,11 @@ SELECT DATE_FORMAT('0000-00-11', '%W'); SELECT DATE_FORMAT('0000-00-11', '%a'); SELECT DATE_FORMAT('0000-00-11', '%w'); +--echo # +--echo # Bug#12403504 AFTER FIX FOR #11889186 : ASSERTION FAILED: DELSUM+(INT) Y/4-TEMP > 0 +--echo # + +SELECT MAKEDATE(11111111,1); +SELECT WEEK(DATE_ADD(FROM_DAYS(1),INTERVAL 1 MONTH), 1); + --echo End of 5.1 tests diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index a96922b94a1..9b312247017 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1519,6 +1519,11 @@ bool Item_func_from_days::get_date(MYSQL_TIME *ltime, uint fuzzy_date) return 1; bzero(ltime, sizeof(MYSQL_TIME)); get_date_from_daynr((long) value, <ime->year, <ime->month, <ime->day); + + if ((null_value= (fuzzy_date & TIME_NO_ZERO_DATE) && + (ltime->year == 0 || ltime->month == 0 || ltime->day == 0))) + return TRUE; + ltime->time_type= MYSQL_TIMESTAMP_DATE; return 0; } @@ -2697,7 +2702,7 @@ String *Item_func_makedate::val_str(String *str) long days; if (args[0]->null_value || args[1]->null_value || - year < 0 || daynr <= 0) + year < 0 || year > 9999 || daynr <= 0) goto err; if (year < 100) @@ -2740,7 +2745,7 @@ longlong Item_func_makedate::val_int() long days; if (args[0]->null_value || args[1]->null_value || - year < 0 || daynr <= 0) + year < 0 || year > 9999 || daynr <= 0) goto err; if (year < 100) -- cgit v1.2.1 From c5dd72b50660701cdba62df8b3c6e32bd1f66872 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Wed, 18 May 2011 14:01:43 +0400 Subject: BUG#12402794 - 60976: CRASH, VALGRIND WARNING AND MEMORY LEAK WITH PARTITIONED ARCHIVE TABLES CHECK TABLE against archive table, when file descriptors are exhausted, caused server crash. Archive didn't handle errors when opening data file for CHECK TABLE. mysql-test/r/archive_debug.result: A test case for BUG#12402794. mysql-test/t/archive_debug.test: A test case for BUG#12402794. storage/archive/azio.c: A test case for BUG#12402794. storage/archive/ha_archive.cc: Handle init_archive_reader() failure. --- mysql-test/r/archive_debug.result | 12 ++++++++++++ mysql-test/t/archive_debug.test | 13 +++++++++++++ storage/archive/azio.c | 9 +++++++++ storage/archive/ha_archive.cc | 3 ++- 4 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 mysql-test/r/archive_debug.result create mode 100644 mysql-test/t/archive_debug.test diff --git a/mysql-test/r/archive_debug.result b/mysql-test/r/archive_debug.result new file mode 100644 index 00000000000..cc5a3761a99 --- /dev/null +++ b/mysql-test/r/archive_debug.result @@ -0,0 +1,12 @@ +# +# BUG#12402794 - 60976: CRASH, VALGRIND WARNING AND MEMORY LEAK +# WITH PARTITIONED ARCHIVE TABLES +# +CREATE TABLE t1(a INT) ENGINE=ARCHIVE; +INSERT INTO t1 VALUES(1); +SET SESSION debug='d,simulate_archive_open_failure'; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check error Corrupt +SET SESSION debug=DEFAULT; +DROP TABLE t1; diff --git a/mysql-test/t/archive_debug.test b/mysql-test/t/archive_debug.test new file mode 100644 index 00000000000..9cece254140 --- /dev/null +++ b/mysql-test/t/archive_debug.test @@ -0,0 +1,13 @@ +--source include/have_archive.inc +--source include/have_debug.inc + +--echo # +--echo # BUG#12402794 - 60976: CRASH, VALGRIND WARNING AND MEMORY LEAK +--echo # WITH PARTITIONED ARCHIVE TABLES +--echo # +CREATE TABLE t1(a INT) ENGINE=ARCHIVE; +INSERT INTO t1 VALUES(1); +SET SESSION debug='d,simulate_archive_open_failure'; +CHECK TABLE t1; +SET SESSION debug=DEFAULT; +DROP TABLE t1; diff --git a/storage/archive/azio.c b/storage/archive/azio.c index c1dd6e6f38c..aaf8233a30c 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -114,6 +114,15 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) errno = 0; s->file = fd < 0 ? my_open(path, Flags, MYF(0)) : fd; + DBUG_EXECUTE_IF("simulate_archive_open_failure", + { + if (s->file >= 0) + { + my_close(s->file, MYF(0)); + s->file= -1; + my_errno= EMFILE; + } + }); if (s->file < 0 ) { diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index e5c483daac5..4da98507dcf 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -1586,11 +1586,12 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt) azflush(&(share->archive_write), Z_SYNC_FLUSH); pthread_mutex_unlock(&share->mutex); + if (init_archive_reader()) + DBUG_RETURN(HA_ADMIN_CORRUPT); /* Now we will rewind the archive file so that we are positioned at the start of the file. */ - init_archive_reader(); read_data_header(&archive); while (!(rc= get_row(&archive, table->record[0]))) count--; -- cgit v1.2.1 From d608ad2dd6ea45e3ef96a98f41702971a5edd215 Mon Sep 17 00:00:00 2001 From: Mayank Prasad Date: Wed, 18 May 2011 20:10:01 +0530 Subject: Bug#11764633 : 57491: THD->MAIN_DA.IS_OK() ASSERT IN EMBEDDED Issue: While running embedded server, if client issues TEE command (\T foo/bar) and "foo/bar" directory doesn't exist, it is suppose to give error. But it was aborting. This was happening because wrong error handler was being called. Solution: Modified calls to correct error handler. In embedded server case, there are two error handler (client and server) which are supposed to be called based on which context code is in. If it is in client context, client error handler should be called otherwise server. Test case: Test case automation is not possible as current (following) code doesn't allow '\T' to be executed from command line (OR command read from a file): [client/mysql.cc] ... static int com_tee(String *buffer __attribute__((unused)), char *line __attribute__((unused))) { char file_name[FN_REFLEN], *end, *param; if (status.batch) << THIS IS TRUE WHILE EXECUTING FROM COMMAND LINE. return 0; ... So, not adding test case in GA. WIll add a test case in mysql-trunk after removing above code so that this could be properly tested before GA. libmysqld/lib_sql.cc: Added code to call client/server error handler based on in control is in client/server code respectively. sql/mysql_priv.h: Added comments for THR_THD, THR_MALLOC keys. sql/sql_class.cc: Function definition of new function restore_global to removes thread specific data from stack (which was stored in store global). sql/sql_class.h: Function declaration of new function restore_global. --- libmysqld/lib_sql.cc | 28 ++++++++++++++++++++++++++-- sql/mysql_priv.h | 8 ++++++++ sql/sql_class.cc | 19 +++++++++++++++++++ sql/sql_class.h | 1 + 4 files changed, 54 insertions(+), 2 deletions(-) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 778d4874ad4..b6da6172039 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -51,6 +51,23 @@ extern "C" void unireg_clear(int exit_code) DBUG_VOID_RETURN; } +/* + Wrapper error handler for embedded server to call client/server error + handler based on whether thread is in client/server context +*/ + +static int embedded_error_handler(uint error, const char *str, myf MyFlags) +{ + DBUG_ENTER("embedded_error_handler"); + + /* + If current_thd is NULL, it means restore_global has been called and + thread is in client context, then call client error handler else call + server error handler. + */ + DBUG_RETURN(current_thd ? my_message_sql(error, str, MyFlags): + my_message_no_curses(error, str, MyFlags)); +} /* Reads error information from the MYSQL_DATA and puts @@ -107,7 +124,8 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, if (mysql->status != MYSQL_STATUS_READY) { set_mysql_error(mysql, CR_COMMANDS_OUT_OF_SYNC, unknown_sqlstate); - return 1; + result= 1; + goto end; } /* Clear result variables */ @@ -147,6 +165,9 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, #if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER) thd->profiling.finish_current_query(); #endif + +end: + thd->restore_globals(); return result; } @@ -525,7 +546,10 @@ int init_embedded_server(int argc, char **argv, char **groups) return 1; } - error_handler_hook = my_message_sql; + /* + set error_handler_hook to embedded_error_handler wrapper. + */ + error_handler_hook= embedded_error_handler; acl_error= 0; #ifndef NO_EMBEDDED_ACCESS_CHECKS diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 8f9a9080d12..3ce73291206 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -670,6 +670,10 @@ enum enum_check_fields extern "C" THD *_current_thd_noinline(); #define _current_thd() _current_thd_noinline() #else +/* + THR_THD is a key which will be used to set/get THD* for a thread, + using my_pthread_setspecific_ptr()/my_thread_getspecific_ptr(). +*/ extern pthread_key(THD*, THR_THD); inline THD *_current_thd(void) { @@ -2022,6 +2026,10 @@ extern TABLE_LIST general_log, slow_log; extern FILE *bootstrap_file; extern int bootstrap_error; extern FILE *stderror_file; +/* + THR_MALLOC is a key which will be used to set/get MEM_ROOT** for a thread, + using my_pthread_setspecific_ptr()/my_thread_getspecific_ptr(). +*/ extern pthread_key(MEM_ROOT**,THR_MALLOC); extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, LOCK_lock_db, LOCK_mapped_file,LOCK_user_locks, LOCK_status, diff --git a/sql/sql_class.cc b/sql/sql_class.cc index ae21a5335fd..04f981c6d6a 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1196,6 +1196,25 @@ bool THD::store_globals() return 0; } +/* + Remove the thread specific info (THD and mem_root pointer) stored during + store_global call for this thread. +*/ +bool THD::restore_globals() +{ + /* + Assert that thread_stack is initialized: it's necessary to be able + to track stack overrun. + */ + DBUG_ASSERT(thread_stack); + + /* Undocking the thread specific data. */ + my_pthread_setspecific_ptr(THR_THD, NULL); + my_pthread_setspecific_ptr(THR_MALLOC, NULL); + + return 0; +} + /* Cleanup after query. diff --git a/sql/sql_class.h b/sql/sql_class.h index b3e8fde8cda..6b82512677a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1943,6 +1943,7 @@ public: void cleanup(void); void cleanup_after_query(); bool store_globals(); + bool restore_globals(); #ifdef SIGNAL_WITH_VIO_CLOSE inline void set_active_vio(Vio* vio) { -- cgit v1.2.1 From 4f03d60096a599adfc8d7cf83aac480260c3348f Mon Sep 17 00:00:00 2001 From: Luis Soares Date: Thu, 19 May 2011 16:45:45 +0100 Subject: BUG#11746302: 25228: RPL_RELAYSPACE.TEST FAILS ON POWERMACG5, VM-WIN2003-32-A, SLES10-IA64-A The test case waits for master_pos_wait not to timeout, which means that the deadlock between SQL and IO threads was succesfully and automatically dealt with. However, very rarely, master_pos_wait reports a timeout. This happens because the time set for master_pos_wait to wait was too small (6 seconds). On slow test env this could be a problem. We fix this by setting the timeout inline with the one used in sync_slave_with_master (300 seconds). In addition we refactored the test case and refined some comments. --- mysql-test/suite/rpl/r/rpl_relayspace.result | 10 +++---- mysql-test/suite/rpl/t/rpl_relayspace.test | 42 ++++++++++++++++++---------- 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/mysql-test/suite/rpl/r/rpl_relayspace.result b/mysql-test/suite/rpl/r/rpl_relayspace.result index f12f177ff7c..fb21540aa31 100644 --- a/mysql-test/suite/rpl/r/rpl_relayspace.result +++ b/mysql-test/suite/rpl/r/rpl_relayspace.result @@ -1,6 +1,6 @@ include/master-slave.inc [connection master] -stop slave; +include/stop_slave.inc create table t1 (a int); drop table t1; create table t1 (a int); @@ -8,10 +8,8 @@ drop table t1; reset slave; start slave io_thread; include/wait_for_slave_param.inc [Slave_IO_State] -stop slave io_thread; +include/stop_slave_io.inc reset slave; -start slave; -select master_pos_wait('master-bin.001',200,6)=-1; -master_pos_wait('master-bin.001',200,6)=-1 -0 +include/start_slave.inc +include/assert.inc [Assert that master_pos_wait does not timeout nor it returns NULL] include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_relayspace.test b/mysql-test/suite/rpl/t/rpl_relayspace.test index bb34ec25bcd..fc33d6bc0ba 100644 --- a/mysql-test/suite/rpl/t/rpl_relayspace.test +++ b/mysql-test/suite/rpl/t/rpl_relayspace.test @@ -2,8 +2,9 @@ # to force the deadlock after one event. source include/master-slave.inc; +--let $master_log_file= query_get_value(SHOW MASTER STATUS, File, 1) connection slave; -stop slave; +--source include/stop_slave.inc connection master; # This will generate a master's binlog > 10 bytes create table t1 (a int); @@ -19,20 +20,33 @@ let $slave_param_value= Waiting for the slave SQL thread to free enough relay lo source include/wait_for_slave_param.inc; # A bug caused the I/O thread to refuse stopping. -stop slave io_thread; +--source include/stop_slave_io.inc reset slave; -start slave; -# The I/O thread stops filling the relay log when -# it's >10b. And the SQL thread cannot purge this relay log -# as purge is done only when the SQL thread switches to another -# relay log, which does not exist here. -# So we should have a deadlock. -# if it is not resolved automatically we'll detect -# it with master_pos_wait that waits for farther than 1Ob; -# it will timeout after 10 seconds; -# also the slave will probably not cooperate to shutdown -# (as 2 threads are locked) -select master_pos_wait('master-bin.001',200,6)=-1; +--source include/start_slave.inc + +# The I/O thread stops filling the relay log when it's >10b. And the +# SQL thread cannot purge this relay log as purge is done only when +# the SQL thread switches to another relay log, which does not exist +# here. So we should have a deadlock. If it is not resolved +# automatically we'll detect it with master_pos_wait that waits for +# farther than 1Ob; it will timeout after 300 seconds (which is inline +# with the default used for sync_slave_with_master and will protect us +# against slow test envs); also the slave will probably not cooperate +# to shutdown (as 2 threads are locked) +--let $outcome= `SELECT MASTER_POS_WAIT('$master_log_file',200,300) AS mpw;` + +# master_pos_wait returns: +# +# * >= 0, the number of events the slave had to wait to advance to the +# position +# +# * -1, if there was a timeout +# +# * NULL, if an error occurred, or the SQL thread was not started, +# slave master info is not initialized, the arguments are incorrect +--let $assert_text= Assert that master_pos_wait does not timeout nor it returns NULL +--let $assert_cond= $outcome IS NOT NULL AND $outcome <> -1 +--source include/assert.inc # End of 4.1 tests --source include/rpl_end.inc -- cgit v1.2.1 From 733893ba502cfd1378ece9c1e0dbae98bf7ba9df Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 20 May 2011 23:52:52 +0700 Subject: Fixed bug#11749345 (formerly bug#38813) - increasing memory consumption when selecting from I_S and views exist, in SP. Symptoms: re-execution of prepared statement (or statement in a stored routine) which read from one of I_S tables and which in order to fill this I_S table had to open a view led to increasing memory consumption. What happened in this situation was that during the process of view opening for purpose of I_S filling view-related structures (like its LEX) were allocated on persistent MEM_ROOT of prepared statement (or stored routine). Since this MEM_ROOT is not freed until prepared statement deallocation (or expulsion of stored routine from the cache) and code responsible for filling I_S is not able to re-use results of view opening from previous executions this allocation ended up in memory hogging. This patch solves the problem by ensuring that when a view opened for the purpose of I_S filling all its structures are allocated on non-persistent runtime MEM_ROOT. This is achieved by activating a temporary Query_arena bound to this MEM_ROOT. Since this step makes impossible linking of view structures into LEX of our prepared statement (or stored routine statement) this patch also changes code filling I_S table to install a proxy LEX before trying to open a view or a table. Consequently some code which was responsible for backing-up/restoring parts of LEX when view/table was opened during filling of I_S table became redundant and was removed. This patch doesn't contain test case for this bug as it is hard to test memory hogging in our test suite. --- sql/sql_show.cc | 320 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 180 insertions(+), 140 deletions(-) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 5b835096042..310ec3b8e4f 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2428,12 +2428,11 @@ bool schema_table_store_record(THD *thd, TABLE *table) } -int make_table_list(THD *thd, SELECT_LEX *sel, - LEX_STRING *db_name, LEX_STRING *table_name) +static int make_table_list(THD *thd, SELECT_LEX *sel, + LEX_STRING *db_name, LEX_STRING *table_name) { Table_ident *table_ident; table_ident= new Table_ident(thd, *db_name, *table_name, 1); - sel->init_query(); if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ)) return 1; return 0; @@ -3003,79 +3002,179 @@ make_table_name_list(THD *thd, List *table_names, LEX *lex, /** - @brief Fill I_S table for SHOW COLUMNS|INDEX commands + Fill I_S table with data obtained by performing full-blown table open. + + @param thd Thread handler. + @param is_show_fields_or_keys Indicates whether it is a legacy SHOW + COLUMNS or SHOW KEYS statement. + @param table TABLE object for I_S table to be filled. + @param schema_table I_S table description structure. + @param orig_db_name Database name. + @param orig_table_name Table name. + @param open_tables_state_backup Open_tables_state object which is used + to save/restore original status of + variables related to open tables state. + + @retval FALSE - Success. + @retval TRUE - Failure. +*/ - @param[in] thd thread handler - @param[in] tables TABLE_LIST for I_S table - @param[in] schema_table pointer to I_S structure - @param[in] open_tables_state_backup pointer to Open_tables_state object - which is used to save|restore original - status of variables related to - open tables state +static bool +fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys, + TABLE *table, ST_SCHEMA_TABLE *schema_table, + LEX_STRING *orig_db_name, + LEX_STRING *orig_table_name, + Open_tables_state *open_tables_state_backup) +{ + Query_arena i_s_arena(thd->mem_root, + Query_arena::CONVENTIONAL_EXECUTION), + backup_arena, *old_arena; + LEX *old_lex= thd->lex, temp_lex, *lex; + LEX_STRING db_name, table_name; + TABLE_LIST *table_list; + bool result= true; - @return Operation status - @retval 0 success - @retval 1 error -*/ + /* + When a view is opened its structures are allocated on a permanent + statement arena and linked into the LEX tree for the current statement + (this happens even in cases when view is handled through TEMPTABLE + algorithm). -static int -fill_schema_show_cols_or_idxs(THD *thd, TABLE_LIST *tables, - ST_SCHEMA_TABLE *schema_table, - Open_tables_state *open_tables_state_backup) -{ - LEX *lex= thd->lex; - bool res; - LEX_STRING tmp_lex_string, tmp_lex_string1, *db_name, *table_name; - enum_sql_command save_sql_command= lex->sql_command; - TABLE_LIST *show_table_list= tables->schema_select_lex->table_list.first; - TABLE *table= tables->table; - int error= 1; - DBUG_ENTER("fill_schema_show"); + To prevent this process from unnecessary hogging of memory in the permanent + arena of our I_S query and to avoid damaging its LEX we use temporary + arena and LEX for table/view opening. + + Use temporary arena instead of statement permanent arena. Also make + it active arena and save original one for successive restoring. + */ + old_arena= thd->stmt_arena; + thd->stmt_arena= &i_s_arena; + thd->set_n_backup_active_arena(&i_s_arena, &backup_arena); + + /* Prepare temporary LEX. */ + thd->lex= lex= &temp_lex; + lex_start(thd); + + /* Disable constant subquery evaluation as we won't be locking tables. */ + lex->context_analysis_only= CONTEXT_ANALYSIS_ONLY_VIEW; - lex->all_selects_list= tables->schema_select_lex; /* - Restore thd->temporary_tables to be able to process - temporary tables(only for 'show index' & 'show columns'). - This should be changed when processing of temporary tables for - I_S tables will be done. + Some of process_table() functions rely on wildcard being passed from + old LEX (or at least being initialized). */ - thd->temporary_tables= open_tables_state_backup->temporary_tables; + lex->wild= old_lex->wild; + + /* + Since make_table_list() might change database and table name passed + to it we create copies of orig_db_name and orig_table_name here. + These copies are used for make_table_list() while unaltered values + are passed to process_table() functions. + */ + if (!thd->make_lex_string(&db_name, orig_db_name->str, + orig_db_name->length, FALSE) || + !thd->make_lex_string(&table_name, orig_table_name->str, + orig_table_name->length, FALSE)) + goto end; + + /* + Create table list element for table to be open. Link it with the + temporary LEX. The latter is required to correctly open views and + produce table describing their structure. + */ + if (make_table_list(thd, &lex->select_lex, &db_name, &table_name)) + goto end; + + table_list= lex->select_lex.table_list.first; + + if (is_show_fields_or_keys) + { + /* + Restore thd->temporary_tables to be able to process + temporary tables (only for 'show index' & 'show columns'). + This should be changed when processing of temporary tables for + I_S tables will be done. + */ + thd->temporary_tables= open_tables_state_backup->temporary_tables; + } + else + { + /* + Apply optimization flags for table opening which are relevant for + this I_S table. We can't do this for SHOW COLUMNS/KEYS because of + backward compatibility. + */ + table_list->i_s_requested_object= schema_table->i_s_requested_object; + } + /* Let us set fake sql_command so views won't try to merge themselves into main statement. If we don't do this, SELECT * from information_schema.xxxx will cause problems. - SQLCOM_SHOW_FIELDS is used because it satisfies 'only_view_structure()' + SQLCOM_SHOW_FIELDS is used because it satisfies + 'only_view_structure()'. */ lex->sql_command= SQLCOM_SHOW_FIELDS; - res= open_normal_and_derived_tables(thd, show_table_list, - MYSQL_LOCK_IGNORE_FLUSH); - lex->sql_command= save_sql_command; + + result= open_normal_and_derived_tables(thd, table_list, + MYSQL_LOCK_IGNORE_FLUSH); + /* - get_all_tables() returns 1 on failure and 0 on success thus - return only these and not the result code of ::process_table() - - We should use show_table_list->alias instead of - show_table_list->table_name because table_name - could be changed during opening of I_S tables. It's safe - to use alias because alias contains original table name - in this case(this part of code is used only for - 'show columns' & 'show statistics' commands). + Restore old value of sql_command back as it is being looked at in + process_table() function. */ - table_name= thd->make_lex_string(&tmp_lex_string1, show_table_list->alias, - strlen(show_table_list->alias), FALSE); - if (!show_table_list->view) - db_name= thd->make_lex_string(&tmp_lex_string, show_table_list->db, - show_table_list->db_length, FALSE); - else - db_name= &show_table_list->view_db; - - - error= test(schema_table->process_table(thd, show_table_list, - table, res, db_name, - table_name)); - thd->temporary_tables= 0; - close_tables_for_reopen(thd, &show_table_list); - DBUG_RETURN(error); + lex->sql_command= old_lex->sql_command; + + /* + XXX: show_table_list has a flag i_is_requested, + and when it's set, open_normal_and_derived_tables() + can return an error without setting an error message + in THD, which is a hack. This is why we have to + check for res, then for thd->is_error() and only then + for thd->main_da.sql_errno(). + + Again we don't do this for SHOW COLUMNS/KEYS because + of backward compatibility. + */ + if (!is_show_fields_or_keys && result && thd->is_error() && + thd->main_da.sql_errno() == ER_NO_SUCH_TABLE) + { + /* + Hide error for a non-existing table. + For example, this error can occur when we use a where condition + with a db name and table, but the table does not exist. + */ + result= 0; + thd->clear_error(); + } + else + { + result= schema_table->process_table(thd, table_list, + table, result, + orig_db_name, + orig_table_name); + } + +end: + lex->unit.cleanup(); + + /* Restore original LEX value, statement's arena and THD arena values. */ + lex_end(thd->lex); + + if (i_s_arena.free_list) + i_s_arena.free_items(); + + /* + For safety reset list of open temporary tables before closing + all tables open within this Open_tables_state. + */ + thd->temporary_tables= NULL; + close_thread_tables(thd); + thd->lex= old_lex; + + thd->stmt_arena= old_arena; + thd->restore_active_arena(&i_s_arena, &backup_arena); + + return result; } @@ -3300,11 +3399,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) { LEX *lex= thd->lex; TABLE *table= tables->table; - SELECT_LEX *old_all_select_lex= lex->all_selects_list; - enum_sql_command save_sql_command= lex->sql_command; SELECT_LEX *lsel= tables->schema_select_lex; ST_SCHEMA_TABLE *schema_table= tables->schema_table; - SELECT_LEX sel; LOOKUP_FIELD_VALUES lookup_field_vals; LEX_STRING *db_name, *table_name; bool with_i_schema; @@ -3312,20 +3408,14 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) List db_names; List_iterator_fast it(db_names); COND *partial_cond= 0; - uint derived_tables= lex->derived_tables; int error= 1; Open_tables_state open_tables_state_backup; - uint8 save_context_analysis_only= lex->context_analysis_only; - Query_tables_list query_tables_list_backup; #ifndef NO_EMBEDDED_ACCESS_CHECKS Security_context *sctx= thd->security_ctx; #endif uint table_open_method; DBUG_ENTER("get_all_tables"); - lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_VIEW; - lex->reset_n_backup_query_tables_list(&query_tables_list_backup); - /* We should not introduce deadlocks even if we already have some tables open and locked, since we won't lock tables which we will @@ -3340,8 +3430,18 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) */ if (lsel && lsel->table_list.first) { - error= fill_schema_show_cols_or_idxs(thd, tables, schema_table, - &open_tables_state_backup); + LEX_STRING db_name, table_name; + + db_name.str= lsel->table_list.first->db; + db_name.length= lsel->table_list.first->db_length; + + table_name.str= lsel->table_list.first->table_name; + table_name.length= lsel->table_list.first->table_name_length; + + error= fill_schema_table_by_open(thd, TRUE, + table, schema_table, + &db_name, &table_name, + &open_tables_state_backup); goto err; } @@ -3399,12 +3499,6 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) it.rewind(); /* To get access to new elements in basis list */ while ((db_name= it++)) { - LEX_STRING orig_db_name; - - /* db_name can be changed in make_table_list() func */ - if (!thd->make_lex_string(&orig_db_name, db_name->str, - db_name->length, FALSE)) - goto err; #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!(check_access(thd,SELECT_ACL, db_name->str, &thd->col_access, 0, 1, with_i_schema) || @@ -3466,64 +3560,14 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) continue; } - int res; - LEX_STRING tmp_lex_string; - /* - Set the parent lex of 'sel' because it is needed by - sel.init_query() which is called inside make_table_list. - */ thd->no_warnings_for_error= 1; - sel.parent_lex= lex; - if (make_table_list(thd, &sel, db_name, table_name)) - goto err; - TABLE_LIST *show_table_list= sel.table_list.first; - lex->all_selects_list= &sel; - lex->derived_tables= 0; - lex->sql_command= SQLCOM_SHOW_FIELDS; - show_table_list->i_s_requested_object= - schema_table->i_s_requested_object; + DEBUG_SYNC(thd, "before_open_in_get_all_tables"); - res= open_normal_and_derived_tables(thd, show_table_list, - MYSQL_LOCK_IGNORE_FLUSH); - lex->sql_command= save_sql_command; - /* - XXX: show_table_list has a flag i_is_requested, - and when it's set, open_normal_and_derived_tables() - can return an error without setting an error message - in THD, which is a hack. This is why we have to - check for res, then for thd->is_error() only then - for thd->main_da.sql_errno(). - */ - if (res && thd->is_error() && - thd->main_da.sql_errno() == ER_NO_SUCH_TABLE) - { - /* - Hide error for not existing table. - This error can occur for example when we use - where condition with db name and table name and this - table does not exist. - */ - res= 0; - thd->clear_error(); - } - else - { - /* - We should use show_table_list->alias instead of - show_table_list->table_name because table_name - could be changed during opening of I_S tables. It's safe - to use alias because alias contains original table name - in this case. - */ - thd->make_lex_string(&tmp_lex_string, show_table_list->alias, - strlen(show_table_list->alias), FALSE); - res= schema_table->process_table(thd, show_table_list, table, - res, &orig_db_name, - &tmp_lex_string); - close_tables_for_reopen(thd, &show_table_list); - } - DBUG_ASSERT(!lex->query_tables_own_last); - if (res) + + if (fill_schema_table_by_open(thd, FALSE, + table, schema_table, + db_name, table_name, + &open_tables_state_backup)) goto err; } } @@ -3539,11 +3583,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) error= 0; err: thd->restore_backup_open_tables_state(&open_tables_state_backup); - lex->restore_backup_query_tables_list(&query_tables_list_backup); - lex->derived_tables= derived_tables; - lex->all_selects_list= old_all_select_lex; - lex->context_analysis_only= save_context_analysis_only; - lex->sql_command= save_sql_command; + DBUG_RETURN(error); } -- cgit v1.2.1 From faf747bf52529c03c0d8818eaa7655be897f4629 Mon Sep 17 00:00:00 2001 From: Anitha Gopi Date: Tue, 24 May 2011 09:56:24 +0530 Subject: Bug#12584161 - Moved test from disabled to experimental group --- mysql-test/collections/default.experimental | 2 ++ mysql-test/t/disabled.def | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index da2df7a72e8..d8acbfd03c3 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -19,6 +19,8 @@ innodb_plugin.* @solaris # Bug#56063 InnoDB Plugin mysql-tests f main.ctype_gbk_binlog @solaris # Bug#46010: main.ctype_gbk_binlog fails sporadically : Table 't2' already exists main.func_str @solaris # joro: Bug#40928 main.sp @solaris # joro : Bug#54138 +query_cache_28249 # Bug#12584161 2009-03-25 main.query_cache_28249 fails sporadically + ndb.* # joro : NDB tests marked as experimental as agreed with bochklin diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index c244d08e308..2fe8f5c32ae 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -10,6 +10,5 @@ # ############################################################################## kill : Bug#37780 2008-12-03 HHunger need some changes to be robust enough for pushbuild. -query_cache_28249 : Bug#43861 2009-03-25 main.query_cache_28249 fails sporadically log_tables-big : Bug#48646 2010-11-15 mattiasj report already exists read_many_rows_innodb : Bug#37635 2010-11-15 mattiasj report already exists -- cgit v1.2.1 From 0464f964adc65e34708b5e792bf292ec07343d7b Mon Sep 17 00:00:00 2001 From: Anitha Gopi Date: Tue, 24 May 2011 10:22:00 +0530 Subject: Bug#11756699: Move test from disabled to experimental group --- mysql-test/collections/default.experimental | 3 ++- mysql-test/t/disabled.def | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index d8acbfd03c3..680f05e9f24 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -19,7 +19,8 @@ innodb_plugin.* @solaris # Bug#56063 InnoDB Plugin mysql-tests f main.ctype_gbk_binlog @solaris # Bug#46010: main.ctype_gbk_binlog fails sporadically : Table 't2' already exists main.func_str @solaris # joro: Bug#40928 main.sp @solaris # joro : Bug#54138 -query_cache_28249 # Bug#12584161 2009-03-25 main.query_cache_28249 fails sporadically +main.query_cache_28249 # Bug#12584161 2009-03-25 main.query_cache_28249 fails sporadically +main.log_tables-big # Bug#11756699 2010-11-15 mattiasj report already exists ndb.* # joro : NDB tests marked as experimental as agreed with bochklin diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 2fe8f5c32ae..4d26b80c0f8 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -10,5 +10,4 @@ # ############################################################################## kill : Bug#37780 2008-12-03 HHunger need some changes to be robust enough for pushbuild. -log_tables-big : Bug#48646 2010-11-15 mattiasj report already exists read_many_rows_innodb : Bug#37635 2010-11-15 mattiasj report already exists -- cgit v1.2.1 From 099fbeb93dd8c8610e3a7ac2d60e0c542dfbe4bb Mon Sep 17 00:00:00 2001 From: Anitha Gopi Date: Tue, 24 May 2011 12:08:13 +0530 Subject: Changed to Oracle bug numbers --- mysql-test/collections/default.experimental | 20 ++++++++++---------- mysql-test/suite/binlog/t/disabled.def | 4 ++-- mysql-test/suite/federated/disabled.def | 2 +- mysql-test/suite/funcs_1/t/disabled.def | 2 +- mysql-test/suite/funcs_2/t/disabled.def | 8 ++++---- mysql-test/suite/ndb/t/disabled.def | 2 +- mysql-test/suite/ndb_team/t/disabled.def | 4 ++-- mysql-test/suite/parts/t/disabled.def | 4 ++-- mysql-test/suite/rpl/t/disabled.def | 4 ++-- mysql-test/suite/rpl_ndb/t/disabled.def | 2 +- mysql-test/suite/sys_vars/t/disabled.def | 4 ++-- mysql-test/t/disabled.def | 4 ++-- 12 files changed, 30 insertions(+), 30 deletions(-) diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index 680f05e9f24..9996da1dfb4 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -14,23 +14,23 @@ funcs_1.ndb* # joro : NDB tests marked as experiment funcs_2.ndb_charset # joro : NDB tests marked as experimental as agreed with bochklin -innodb_plugin.* @solaris # Bug#56063 InnoDB Plugin mysql-tests fail on Solaris +innodb_plugin.* @solaris # Bug#11763366 InnoDB Plugin mysql-tests fail on Solaris -main.ctype_gbk_binlog @solaris # Bug#46010: main.ctype_gbk_binlog fails sporadically : Table 't2' already exists -main.func_str @solaris # joro: Bug#40928 -main.sp @solaris # joro : Bug#54138 +main.ctype_gbk_binlog @solaris # Bug#11754407: main.ctype_gbk_binlog fails sporadically : Table 't2' already exists +main.func_str @solaris # joro: Bug#11750406 +main.sp @solaris # joro : Bug#11761625 main.query_cache_28249 # Bug#12584161 2009-03-25 main.query_cache_28249 fails sporadically main.log_tables-big # Bug#11756699 2010-11-15 mattiasj report already exists ndb.* # joro : NDB tests marked as experimental as agreed with bochklin -rpl.rpl_innodb_bug28430 @solaris # Bug#46029 -rpl.rpl_row_sp011 @solaris # Joro : Bug #45445 +rpl.rpl_innodb_bug28430 @solaris # Bug#11754425 +rpl.rpl_row_sp011 @solaris # Joro : Bug #11753919 rpl.rpl_stop_slave # Sven : BUG#12345981 rpl_ndb.* # joro : NDB tests marked as experimental as agreed with bochklin -rpl_ndb.rpl_ndb_log # Bug#38998 +rpl_ndb.rpl_ndb_log # Bug#11749433 stress.ddl_ndb # joro : NDB tests marked as experimental as agreed with bochklin @@ -48,6 +48,6 @@ parts.partition_mgm_lc1_ndb # joro : NDB tests marked as experiment parts.partition_mgm_lc2_ndb # joro : NDB tests marked as experimental as agreed with bochklin parts.partition_syntax_ndb # joro : NDB tests marked as experimental as agreed with bochklin parts.partition_value_ndb # joro : NDB tests marked as experimental as agreed with bochklin -main.gis-rtree # svoj: due to BUG#38965 -main.type_float # svoj: due to BUG#38965 -main.type_newdecimal # svoj: due to BUG#38965 +main.gis-rtree # svoj: due to BUG#11749418 +main.type_float # svoj: due to BUG#11749418 +main.type_newdecimal # svoj: due to BUG#11749418 diff --git a/mysql-test/suite/binlog/t/disabled.def b/mysql-test/suite/binlog/t/disabled.def index 85313982869..0c8884488cc 100644 --- a/mysql-test/suite/binlog/t/disabled.def +++ b/mysql-test/suite/binlog/t/disabled.def @@ -9,5 +9,5 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -binlog_truncate_innodb : BUG#57291 2010-10-20 anitha Originally disabled due to BUG#42643. Product bug fixed, but test changes needed -binlog_row_failure_mixing_engines : BUG#58416 2010-11-23 ramil Fails on win x86 debug_max +binlog_truncate_innodb : BUG#11764459 2010-10-20 anitha Originally disabled due to BUG#42643. Product bug fixed, but test changes needed +binlog_row_failure_mixing_engines : BUG#11765446 2010-11-23 ramil Fails on win x86 debug_max diff --git a/mysql-test/suite/federated/disabled.def b/mysql-test/suite/federated/disabled.def index 9a9149ec80a..3b114aa380b 100644 --- a/mysql-test/suite/federated/disabled.def +++ b/mysql-test/suite/federated/disabled.def @@ -9,4 +9,4 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -federated_transactions : Bug#29523 Transactions do not work +federated_transactions : Bug#11746899 Transactions do not work diff --git a/mysql-test/suite/funcs_1/t/disabled.def b/mysql-test/suite/funcs_1/t/disabled.def index 3f260ca49ba..032a8f59d2e 100644 --- a/mysql-test/suite/funcs_1/t/disabled.def +++ b/mysql-test/suite/funcs_1/t/disabled.def @@ -10,4 +10,4 @@ # ############################################################################## -ndb_trig_1011ext: Bug#32656 NDB: Duplicate key error aborts transaction in handler. Doesn't talk back to SQL +ndb_trig_1011ext: Bug#11747493 NDB: Duplicate key error aborts transaction in handler. Doesn't talk back to SQL diff --git a/mysql-test/suite/funcs_2/t/disabled.def b/mysql-test/suite/funcs_2/t/disabled.def index da6230bd7ed..50c65c30ed0 100644 --- a/mysql-test/suite/funcs_2/t/disabled.def +++ b/mysql-test/suite/funcs_2/t/disabled.def @@ -1,6 +1,6 @@ # Disabled by hhunger (2008-03-03) due to WL4204 -innodb_charset : Bug#20447 Problem with prefix keys with contractions and expansions -myisam_charset : Bug#20447 Problem with prefix keys with contractions and expansions -memory_charset : Bug#20447 Problem with prefix keys with contractions and expansions -ndb_charset : Bug#20447 Problem with prefix keys with contractions and expansions +innodb_charset : Bug#11745840 Problem with prefix keys with contractions and expansions +myisam_charset : Bug#11745840 Problem with prefix keys with contractions and expansions +memory_charset : Bug#11745840 Problem with prefix keys with contractions and expansions +ndb_charset : Bug#11745840 Problem with prefix keys with contractions and expansions diff --git a/mysql-test/suite/ndb/t/disabled.def b/mysql-test/suite/ndb/t/disabled.def index 0fc9a5d3ad6..1b730642235 100644 --- a/mysql-test/suite/ndb/t/disabled.def +++ b/mysql-test/suite/ndb/t/disabled.def @@ -10,6 +10,6 @@ # ############################################################################## -ndb_partition_error2 : Bug#40989 ndb_partition_error2 needs maintenance +ndb_partition_error2 : Bug#11750448 ndb_partition_error2 needs maintenance # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open diff --git a/mysql-test/suite/ndb_team/t/disabled.def b/mysql-test/suite/ndb_team/t/disabled.def index 714f1014a10..e70e23b8303 100644 --- a/mysql-test/suite/ndb_team/t/disabled.def +++ b/mysql-test/suite/ndb_team/t/disabled.def @@ -9,8 +9,8 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog -ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog +ndb_autodiscover : BUG#11745709 2006-02-16 jmiller Needs to be fixed w.r.t binlog +ndb_autodiscover2 : BUG#11745709 2006-02-16 jmiller Needs to be fixed w.r.t binlog # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open #ndb_autodiscover3 : bug#21806 diff --git a/mysql-test/suite/parts/t/disabled.def b/mysql-test/suite/parts/t/disabled.def index 518a3c90422..113fe6fdd44 100644 --- a/mysql-test/suite/parts/t/disabled.def +++ b/mysql-test/suite/parts/t/disabled.def @@ -1,3 +1,3 @@ -partition_basic_ndb : Bug#19899 Crashing the server +partition_basic_ndb : Bug#11745782 Crashing the server # http://dev.mysql.com/doc/refman/5.1/en/mysql-cluster-limitations-syntax.html -partition_syntax_ndb : Bug#36735 Not supported +partition_syntax_ndb : Bug#11748568 Not supported diff --git a/mysql-test/suite/rpl/t/disabled.def b/mysql-test/suite/rpl/t/disabled.def index 33f65ff3ecc..0a8f3cb2a50 100644 --- a/mysql-test/suite/rpl/t/disabled.def +++ b/mysql-test/suite/rpl/t/disabled.def @@ -10,7 +10,7 @@ # ############################################################################## -rpl_row_create_table : Bug#51574 Feb 27 2010 andrei failed different way than earlier with bug#45576 -rpl_get_master_version_and_clock : Bug#59178 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock +rpl_row_create_table : Bug#11759274 Feb 27 2010 andrei failed different way than earlier with bug#45576 +rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock rpl_row_until : BUG#59543 Jan 26 2011 alfranio Replication test from eits suite rpl_row_until times out rpl_stm_until : BUG#59543 Jan 26 2011 alfranio Replication test from eits suite rpl_row_until times out diff --git a/mysql-test/suite/rpl_ndb/t/disabled.def b/mysql-test/suite/rpl_ndb/t/disabled.def index 2f15112515e..8fa6e9c099e 100644 --- a/mysql-test/suite/rpl_ndb/t/disabled.def +++ b/mysql-test/suite/rpl_ndb/t/disabled.def @@ -11,4 +11,4 @@ ############################################################################## # the below testcase have been reworked to avoid the bug, test contains comment, keep bug open -rpl_ndb_2ndb : Bug#45974: rpl_ndb_2ndb fails sporadically +rpl_ndb_2ndb : Bug#11754374 : rpl_ndb_2ndb fails sporadically diff --git a/mysql-test/suite/sys_vars/t/disabled.def b/mysql-test/suite/sys_vars/t/disabled.def index 915ea7a6f5c..866172fb51a 100644 --- a/mysql-test/suite/sys_vars/t/disabled.def +++ b/mysql-test/suite/sys_vars/t/disabled.def @@ -9,5 +9,5 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -sys_vars.max_binlog_cache_size_basic_64 : bug#56408 2010-08-31 Horst -sys_vars.max_binlog_cache_size_basic_32 : bug#56408 2010-08-31 Horst +sys_vars.max_binlog_cache_size_basic_64 : bug#11763668 2010-08-31 Horst +sys_vars.max_binlog_cache_size_basic_32 : bug#11763668 2010-08-31 Horst diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 4d26b80c0f8..f33fd0352a0 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -9,5 +9,5 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -kill : Bug#37780 2008-12-03 HHunger need some changes to be robust enough for pushbuild. -read_many_rows_innodb : Bug#37635 2010-11-15 mattiasj report already exists +kill : Bug#11748945 2008-12-03 HHunger need some changes to be robust enough for pushbuild. +read_many_rows_innodb : Bug#11748886 2010-11-15 mattiasj report already exists -- cgit v1.2.1 From cce8a658436c2ae9052d38e7ca8c2fdf4d212b7d Mon Sep 17 00:00:00 2001 From: Anitha Gopi Date: Tue, 24 May 2011 15:46:14 +0530 Subject: BUG#12371924 # Bug is fixed. Move test out of experimental group --- mysql-test/collections/default.experimental | 1 - 1 file changed, 1 deletion(-) diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index 9996da1dfb4..cae23aa94d3 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -2,7 +2,6 @@ # in alphabetical order. This also helps with merge conflict resolution. binlog.binlog_multi_engine # joro : NDB tests marked as experimental as agreed with bochklin -binlog.binlog_bug23533 # skozlov: BUG#12371924 funcs_1.charset_collation_1 # depends on compile-time decisions -- cgit v1.2.1 From 3efbf30457d76a3e47371f6d4bd4b2ebd7141ffd Mon Sep 17 00:00:00 2001 From: Sergey Glukhov Date: Thu, 26 May 2011 14:06:39 +0400 Subject: Bug#12392636 ASSERTION FAILED: SCALE >= 0 && PRECISION > 0 && SCALE <= PRECISION Assertion happens due to missing NULL value check in Item_func_round::fix_length_and_dec() function. The fix: added NULL value check for second parameter. mysql-test/r/func_math.result: test case mysql-test/t/func_math.test: test case sql/item_func.cc: added NULL value check for second parameter. --- mysql-test/r/func_math.result | 6 ++++++ mysql-test/t/func_math.test | 6 ++++++ sql/item_func.cc | 3 +++ 3 files changed, 15 insertions(+) diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result index b9118feab1a..4e3608240d4 100644 --- a/mysql-test/r/func_math.result +++ b/mysql-test/r/func_math.result @@ -540,4 +540,10 @@ ROUND(LEAST(15, -4939092, 0.2704), STDDEV('a')) -4939092.0000 Warnings: Warning 1292 Truncated incorrect DOUBLE value: 'a' +# +# Bug#12392636 ASSERTION FAILED: SCALE >= 0 && PRECISION > 0 && SCALE <= PRECISION +# +SELECT SUM(DISTINCT (TRUNCATE((.1), NULL))); +SUM(DISTINCT (TRUNCATE((.1), NULL))) +NULL End of 5.1 tests diff --git a/mysql-test/t/func_math.test b/mysql-test/t/func_math.test index 9d51a5c94f9..1906d2d347a 100644 --- a/mysql-test/t/func_math.test +++ b/mysql-test/t/func_math.test @@ -349,4 +349,10 @@ DROP TABLE t1; SELECT ROUND(LEAST(15, -4939092, 0.2704), STDDEV('a')); +--echo # +--echo # Bug#12392636 ASSERTION FAILED: SCALE >= 0 && PRECISION > 0 && SCALE <= PRECISION +--echo # + +SELECT SUM(DISTINCT (TRUNCATE((.1), NULL))); + --echo End of 5.1 tests diff --git a/sql/item_func.cc b/sql/item_func.cc index b1398b78b84..feb87fe5fd7 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -1972,6 +1972,9 @@ void Item_func_round::fix_length_and_dec() } val1= args[1]->val_int(); + if ((null_value= args[1]->is_null())) + return; + val1_unsigned= args[1]->unsigned_flag; if (val1 < 0) decimals_to_set= val1_unsigned ? INT_MAX : 0; -- cgit v1.2.1 From de3776819c2163a8974af7e4e77e5a648e72f5ab Mon Sep 17 00:00:00 2001 From: Sven Sandberg Date: Thu, 26 May 2011 12:50:43 +0200 Subject: BUG#12574820: binlog.binlog_tmp_table timing out in daily and weekly trunk run Problem: MYSQL_BIN_LOG::reset_logs acquires mutexes in wrong order. The correct order is first LOCK_thread_count and then LOCK_log. This function does it the other way around. This leads to deadlock when run in parallel with a thread that takes the two locks in correct order. For example, a thread that disconnects will take the locks in the correct order. Fix: change order of the locks in MYSQL_BIN_LOG::reset_logs: first LOCK_thread_count and then LOCK_log. mysql-test/suite/binlog/r/binlog_reset_master.result: added result file mysql-test/suite/binlog/t/binlog_reset_master.test: Added test case that demonstrates deadlock because of wrong mutex order. The deadlock is between two threads: - RESET MASTER acquires mutexes in wrong order. - client thread shutdown code acquires mutexes in right order. Actually, this test case does not produce deadlock in 5.1, probably the client thread shutdown code does not hold both mutexes at the same time. However, the bug existed in 5.1 (mutexes are taken in the wrong order) so we push the test case to 5.1 too, to prevent future regressions. sql/log.cc: Change mutex acquisition to the correct order: first LOCK_thread_count, then LOCK_log. sql/mysqld.cc: Add debug code to synchronize test case. --- .../suite/binlog/r/binlog_reset_master.result | 1 + mysql-test/suite/binlog/t/binlog_reset_master.test | 26 ++++++++++++++++++++++ sql/log.cc | 15 +++++++------ sql/mysqld.cc | 6 +++++ 4 files changed, 41 insertions(+), 7 deletions(-) create mode 100644 mysql-test/suite/binlog/r/binlog_reset_master.result create mode 100644 mysql-test/suite/binlog/t/binlog_reset_master.test diff --git a/mysql-test/suite/binlog/r/binlog_reset_master.result b/mysql-test/suite/binlog/r/binlog_reset_master.result new file mode 100644 index 00000000000..b3d605560ff --- /dev/null +++ b/mysql-test/suite/binlog/r/binlog_reset_master.result @@ -0,0 +1 @@ +RESET MASTER; diff --git a/mysql-test/suite/binlog/t/binlog_reset_master.test b/mysql-test/suite/binlog/t/binlog_reset_master.test new file mode 100644 index 00000000000..b7ad69da3ea --- /dev/null +++ b/mysql-test/suite/binlog/t/binlog_reset_master.test @@ -0,0 +1,26 @@ +# ==== Purpose ==== +# +# Test bugs in RESET MASTER. + +--source include/have_debug.inc +--source include/have_log_bin.inc + +####################################################################### +# BUG#12574820: binlog.binlog_tmp_table timing out in daily and weekly trunk run +# Problem: MYSQL_BIN_LOG::reset_logs acquired LOCK_thread_count and +# LOCK_log in the wrong order. This could cause a deadlock when +# RESET MASTER was run concurrently with a disconnecting thread. +####################################################################### + +# We use sleep, not debug_sync, because the sync point needs to be in +# the thread shut down code after the debug sync facility has been +# shut down. +--let $write_var= SET DEBUG="+d,sleep_after_lock_thread_count_before_delete_thd"; CREATE TEMPORARY TABLE test.t1 (a INT); +--let $write_to_file= GENERATE +--disable_query_log +--source include/write_var_to_file.inc +--enable_query_log + +--exec $MYSQL < $write_to_file +RESET MASTER; +--remove_file $write_to_file diff --git a/sql/log.cc b/sql/log.cc index 17642696e7d..77d12641442 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2989,12 +2989,6 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd) DBUG_ENTER("reset_logs"); ha_reset_logs(thd); - /* - We need to get both locks to be sure that no one is trying to - write to the index log file. - */ - pthread_mutex_lock(&LOCK_log); - pthread_mutex_lock(&LOCK_index); /* The following mutex is needed to ensure that no threads call @@ -3002,7 +2996,14 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd) thread. If the transaction involved MyISAM tables, it should go into binlog even on rollback. */ - VOID(pthread_mutex_lock(&LOCK_thread_count)); + pthread_mutex_lock(&LOCK_thread_count); + + /* + We need to get both locks to be sure that no one is trying to + write to the index log file. + */ + pthread_mutex_lock(&LOCK_log); + pthread_mutex_lock(&LOCK_index); /* Save variables so that we can reopen the log */ save_name=name; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 54850f36d10..2d1290bf88a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1903,6 +1903,12 @@ void unlink_thd(THD *thd) pthread_mutex_unlock(&LOCK_connection_count); (void) pthread_mutex_lock(&LOCK_thread_count); + /* + Used by binlog_reset_master. It would be cleaner to use + DEBUG_SYNC here, but that's not possible because the THD's debug + sync feature has been shut down at this point. + */ + DBUG_EXECUTE_IF("sleep_after_lock_thread_count_before_delete_thd", sleep(5);); thread_count--; delete thd; DBUG_VOID_RETURN; -- cgit v1.2.1 From 861291f1ab26266b167cd644f189883528c66943 Mon Sep 17 00:00:00 2001 From: Dmitry Lenev Date: Thu, 26 May 2011 17:14:47 +0400 Subject: Fix for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE::UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK". Attempt to update an InnoDB temporary table under LOCK TABLES led to assertion failure in both debug and production builds if this temporary table was explicitly locked for READ. The same scenario works fine for MyISAM temporary tables. The assertion failure was caused by discrepancy between lock that was requested on the rows of temporary table at LOCK TABLES time and by update operation. Since SQL-layer requested a read-lock at LOCK TABLES time InnoDB engine assumed that upcoming statements which are going to be executed under LOCK TABLES will only read table and therefore should acquire only S-lock. An update operation broken this assumption by requesting X-lock. Possible approaches to fixing this problem are: 1) Skip locking of temporary tables as locking doesn't make any sense for connection-local objects. 2) Prohibit changing of temporary table locked by LOCK TABLES ... READ. Unfortunately both of these approaches have drawbacks which make them unviable for stable versions of server. So this patch takes another approach and changes code in such way that LOCK TABLES for a temporary table will always request write lock. In 5.1 version of this patch switch from read lock to write lock is done inside of InnoDBs handler methods as doing it on SQL-layer causes compatibility troubles with FLUSH TABLES WITH READ LOCK. mysql-test/suite/innodb/r/innodb_mysql.result: Added test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE::UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK". mysql-test/suite/innodb/t/innodb_mysql.test: Added test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE::UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK". mysql-test/suite/innodb_plugin/r/innodb_mysql.result: Added test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE::UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK". mysql-test/suite/innodb_plugin/t/innodb_mysql.test: Added test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE::UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK". storage/innobase/handler/ha_innodb.cc: Assume that a temporary table locked by LOCK TABLES can be updated even if it was only locked for read and therefore an X-lock should be always requested for such tables. storage/innodb_plugin/handler/ha_innodb.cc: Assume that a temporary table locked by LOCK TABLES can be updated even if it was only locked for read and therefore an X-lock should be always requested for such tables. --- mysql-test/suite/innodb/r/innodb_mysql.result | 18 +++++++++++++++- mysql-test/suite/innodb/t/innodb_mysql.test | 25 +++++++++++++++++++--- .../suite/innodb_plugin/r/innodb_mysql.result | 16 ++++++++++++++ mysql-test/suite/innodb_plugin/t/innodb_mysql.test | 20 +++++++++++++++++ storage/innobase/handler/ha_innodb.cc | 12 +++++++++-- storage/innodb_plugin/handler/ha_innodb.cc | 12 +++++++++-- 6 files changed, 95 insertions(+), 8 deletions(-) diff --git a/mysql-test/suite/innodb/r/innodb_mysql.result b/mysql-test/suite/innodb/r/innodb_mysql.result index fa26b8b1d01..a7feb692091 100644 --- a/mysql-test/suite/innodb/r/innodb_mysql.result +++ b/mysql-test/suite/innodb/r/innodb_mysql.result @@ -2639,7 +2639,6 @@ COUNT(*) 1537 SET SESSION sort_buffer_size = DEFAULT; DROP TABLE t1; -End of 5.1 tests # # Test for bug #39932 "create table fails if column for FK is in different # case than in corr index". @@ -2668,3 +2667,20 @@ DROP TABLE IF EXISTS t1, t2; CREATE TABLE t1 (a INT, INDEX(a)) engine=innodb; ALTER TABLE t1 RENAME TO t2, DISABLE KEYS; DROP TABLE IF EXISTS t1, t2; +# +# Test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE:: +# UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK". +# +DROP TABLE IF EXISTS t1; +CREATE TEMPORARY TABLE t1 (c int) ENGINE = InnoDB; +INSERT INTO t1 VALUES (1); +LOCK TABLES t1 READ; +# Even though temporary table was locked for READ we +# still allow writes to it to be compatible with MyISAM. +# This is possible since due to fact that temporary tables +# are specific to connection and therefore locking for them +# is irrelevant. +UPDATE t1 SET c = 5; +UNLOCK TABLES; +DROP TEMPORARY TABLE t1; +End of 5.1 tests diff --git a/mysql-test/suite/innodb/t/innodb_mysql.test b/mysql-test/suite/innodb/t/innodb_mysql.test index cb6082be0d0..12bd2801556 100644 --- a/mysql-test/suite/innodb/t/innodb_mysql.test +++ b/mysql-test/suite/innodb/t/innodb_mysql.test @@ -868,9 +868,6 @@ SET SESSION sort_buffer_size = DEFAULT; DROP TABLE t1; ---echo End of 5.1 tests - - --echo # --echo # Test for bug #39932 "create table fails if column for FK is in different --echo # case than in corr index". @@ -900,3 +897,25 @@ CREATE TABLE t1 (a INT, INDEX(a)) engine=innodb; ALTER TABLE t1 RENAME TO t2, DISABLE KEYS; DROP TABLE IF EXISTS t1, t2; --enable_warnings + + +--echo # +--echo # Test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE:: +--echo # UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK". +--echo # +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +CREATE TEMPORARY TABLE t1 (c int) ENGINE = InnoDB; +INSERT INTO t1 VALUES (1); +LOCK TABLES t1 READ; +--echo # Even though temporary table was locked for READ we +--echo # still allow writes to it to be compatible with MyISAM. +--echo # This is possible since due to fact that temporary tables +--echo # are specific to connection and therefore locking for them +--echo # is irrelevant. +UPDATE t1 SET c = 5; +UNLOCK TABLES; +DROP TEMPORARY TABLE t1; + +--echo End of 5.1 tests diff --git a/mysql-test/suite/innodb_plugin/r/innodb_mysql.result b/mysql-test/suite/innodb_plugin/r/innodb_mysql.result index 448bf54ce51..ebaa52d3b4d 100644 --- a/mysql-test/suite/innodb_plugin/r/innodb_mysql.result +++ b/mysql-test/suite/innodb_plugin/r/innodb_mysql.result @@ -2438,4 +2438,20 @@ COUNT(*) 1537 SET SESSION sort_buffer_size = DEFAULT; DROP TABLE t1; +# +# Test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE:: +# UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK". +# +DROP TABLE IF EXISTS t1; +CREATE TEMPORARY TABLE t1 (c int) ENGINE = InnoDB; +INSERT INTO t1 VALUES (1); +LOCK TABLES t1 READ; +# Even though temporary table was locked for READ we +# still allow writes to it to be compatible with MyISAM. +# This is possible since due to fact that temporary tables +# are specific to connection and therefore locking for them +# is irrelevant. +UPDATE t1 SET c = 5; +UNLOCK TABLES; +DROP TEMPORARY TABLE t1; End of 5.1 tests diff --git a/mysql-test/suite/innodb_plugin/t/innodb_mysql.test b/mysql-test/suite/innodb_plugin/t/innodb_mysql.test index 08d77ba448a..24beaef2fe7 100644 --- a/mysql-test/suite/innodb_plugin/t/innodb_mysql.test +++ b/mysql-test/suite/innodb_plugin/t/innodb_mysql.test @@ -689,4 +689,24 @@ SET SESSION sort_buffer_size = DEFAULT; DROP TABLE t1; + +--echo # +--echo # Test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE:: +--echo # UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK". +--echo # +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +CREATE TEMPORARY TABLE t1 (c int) ENGINE = InnoDB; +INSERT INTO t1 VALUES (1); +LOCK TABLES t1 READ; +--echo # Even though temporary table was locked for READ we +--echo # still allow writes to it to be compatible with MyISAM. +--echo # This is possible since due to fact that temporary tables +--echo # are specific to connection and therefore locking for them +--echo # is irrelevant. +UPDATE t1 SET c = 5; +UNLOCK TABLES; +DROP TEMPORARY TABLE t1; + --echo End of 5.1 tests diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 2afacf6d2a8..dfe13ccbbfe 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -7325,10 +7325,18 @@ ha_innobase::external_lock( reset_template(prebuilt); - if (lock_type == F_WRLCK) { + if (lock_type == F_WRLCK + || (table->s->tmp_table + && thd_sql_command(thd) == SQLCOM_LOCK_TABLES)) { /* If this is a SELECT, then it is in UPDATE TABLE ... - or SELECT ... FOR UPDATE */ + or SELECT ... FOR UPDATE + + For temporary tables which are locked for READ by LOCK TABLES + updates are still allowed by SQL-layer. In order to accomodate + for such a situation we always request X-lock for such table + at LOCK TABLES time. + */ prebuilt->select_lock_type = LOCK_X; prebuilt->stored_select_lock_type = LOCK_X; } diff --git a/storage/innodb_plugin/handler/ha_innodb.cc b/storage/innodb_plugin/handler/ha_innodb.cc index 2b0dbf82b34..a9c9f379528 100644 --- a/storage/innodb_plugin/handler/ha_innodb.cc +++ b/storage/innodb_plugin/handler/ha_innodb.cc @@ -8642,10 +8642,18 @@ ha_innobase::external_lock( reset_template(prebuilt); - if (lock_type == F_WRLCK) { + if (lock_type == F_WRLCK + || (table->s->tmp_table + && thd_sql_command(thd) == SQLCOM_LOCK_TABLES)) { /* If this is a SELECT, then it is in UPDATE TABLE ... - or SELECT ... FOR UPDATE */ + or SELECT ... FOR UPDATE + + For temporary tables which are locked for READ by LOCK TABLES + updates are still allowed by SQL-layer. In order to accomodate + for such a situation we always request X-lock for such table + at LOCK TABLES time. + */ prebuilt->select_lock_type = LOCK_X; prebuilt->stored_select_lock_type = LOCK_X; } -- cgit v1.2.1 From 56a735b78226f6de336c520936df3485d929d685 Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 27 May 2011 16:23:08 +0700 Subject: Fixed bug#12546938 (formerly known as 61005) - CREATE IF NOT EXIST EVENT will create multiple running events. A CREATE IF NOT EXIST on an event that existed and was enabled caused multiple instances of the event to run. Disabling the event didn't help. If the event was dropped, the event stopped running, but when created again, multiple instances of the event were still running. The only way to get out of this situation was to restart the server. The problem was that Event_db_repository::create_event() didn't return enough information to discriminate between situation when event didn't exist and was created and when event did exist and was not created (but a warning was emitted). As result in the latter case event was added to in-memory queue of events second time. And this led to unwarranted multiple executions of the same event. The solution is to add out-parameter to Event_db_repository::create_event() method which will signal that event was not created because it already exists and so it should not be added to the in-memory queue. mysql-test/r/events_bugs.result: Added results for test for Bug#12546938. mysql-test/t/events_bugs.test: Added test for Bug#12546938. sql/event_db_repository.cc: Event_db_repository::create_event was modified: set newly added out-parameter event_already_exists to true value if event wasn't created because event already existed and IF NOT EXIST clause was present. sql/event_db_repository.h: Added out-parameter 'event_already_exists' to create_event() method. sql/events.cc: Events::create_event was modified: insert new element into event queue only if event was actually created. --- mysql-test/r/events_bugs.result | 40 +++++++++++++++++++++++++++++ mysql-test/t/events_bugs.test | 50 ++++++++++++++++++++++++++++++++++++ sql/event_db_repository.cc | 20 +++++++++------ sql/event_db_repository.h | 3 ++- sql/events.cc | 56 +++++++++++++++++++++++------------------ 5 files changed, 136 insertions(+), 33 deletions(-) diff --git a/mysql-test/r/events_bugs.result b/mysql-test/r/events_bugs.result index dfb8f008c5a..740f94fb061 100644 --- a/mysql-test/r/events_bugs.result +++ b/mysql-test/r/events_bugs.result @@ -535,6 +535,7 @@ DROP EVENT e3; DROP EVENT e2; DROP EVENT e1; SET TIME_ZONE=@save_time_zone; +SET TIMESTAMP=DEFAULT; drop event if exists new_event; CREATE EVENT new_event ON SCHEDULE EVERY 0 SECOND DO SELECT 1; ERROR HY000: INTERVAL is either not positive or too big @@ -756,6 +757,45 @@ SHOW EVENTS; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation DROP DATABASE event_test1; DROP DATABASE event_test12; +# +# Bug#12546938 (formerly known as bug#61005): +# CREATE IF NOT EXIST EVENT WILL CREATE MULTIPLE RUNNING EVENTS +# +USE events_test; +SET GLOBAL event_scheduler = ON; +DROP TABLE IF EXISTS table_bug12546938; +DROP EVENT IF EXISTS event_Bug12546938; +CREATE TABLE table_bug12546938 (i INT); +# Create an event which will be executed with a small delay +# and won't be automatically dropped after that. +CREATE EVENT event_Bug12546938 +ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 SECOND ON COMPLETION PRESERVE +ENABLE DO +BEGIN +INSERT INTO table_bug12546938 VALUES(1); +END +| +# Now try to create the same event using CREATE EVENT IF NOT EXISTS. +# A warning should be emitted. A new event should not be created nor +# the old event should be re-executed. +CREATE EVENT IF NOT EXISTS event_bug12546938 +ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 SECOND ON COMPLETION PRESERVE +ENABLE DO +BEGIN +INSERT INTO table_bug12546938 VALUES (1); +END +| +Warnings: +Note 1537 Event 'event_bug12546938' already exists +# Wait until at least one instance of event is executed. +# Check that only one instance of our event was executed. +SELECT COUNT(*) FROM table_bug12546938; +COUNT(*) +1 +# Clean-up. +DROP EVENT IF EXISTS event_Bug12546938; +DROP TABLE table_bug12546938; +SET GLOBAL EVENT_SCHEDULER = OFF; DROP DATABASE events_test; SET GLOBAL event_scheduler= 'ON'; SET @@global.concurrent_insert= @concurrent_insert; diff --git a/mysql-test/t/events_bugs.test b/mysql-test/t/events_bugs.test index 420e7183621..4601448763c 100644 --- a/mysql-test/t/events_bugs.test +++ b/mysql-test/t/events_bugs.test @@ -857,6 +857,7 @@ DROP EVENT e2; DROP EVENT e1; SET TIME_ZONE=@save_time_zone; +SET TIMESTAMP=DEFAULT; # # START - BUG#28666 CREATE EVENT ... EVERY 0 SECOND let server crash @@ -1235,6 +1236,55 @@ SHOW EVENTS; DROP DATABASE event_test1; DROP DATABASE event_test12; +--echo # +--echo # Bug#12546938 (formerly known as bug#61005): +--echo # CREATE IF NOT EXIST EVENT WILL CREATE MULTIPLE RUNNING EVENTS +--echo # +USE events_test; +SET GLOBAL event_scheduler = ON; + +--disable_warnings +DROP TABLE IF EXISTS table_bug12546938; +DROP EVENT IF EXISTS event_Bug12546938; +--enable_warnings +CREATE TABLE table_bug12546938 (i INT); + +delimiter |; + +--echo # Create an event which will be executed with a small delay +--echo # and won't be automatically dropped after that. +CREATE EVENT event_Bug12546938 +ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 SECOND ON COMPLETION PRESERVE +ENABLE DO +BEGIN + INSERT INTO table_bug12546938 VALUES(1); +END +| + +--echo # Now try to create the same event using CREATE EVENT IF NOT EXISTS. +--echo # A warning should be emitted. A new event should not be created nor +--echo # the old event should be re-executed. +CREATE EVENT IF NOT EXISTS event_bug12546938 +ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 SECOND ON COMPLETION PRESERVE +ENABLE DO +BEGIN + INSERT INTO table_bug12546938 VALUES (1); +END +| + +delimiter ;| + +--echo # Wait until at least one instance of event is executed. +let $wait_condition= SELECT COUNT(*) FROM table_bug12546938; +--source include/wait_condition.inc + +--echo # Check that only one instance of our event was executed. +SELECT COUNT(*) FROM table_bug12546938; + +--echo # Clean-up. +DROP EVENT IF EXISTS event_Bug12546938; +DROP TABLE table_bug12546938; +SET GLOBAL EVENT_SCHEDULER = OFF; ########################################################################### # diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index a0765dc6d15..9efd3bca675 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -604,18 +604,21 @@ Event_db_repository::open_event_table(THD *thd, enum thr_lock_type lock_type, only creates a record on disk. @pre The thread handle has no open tables. - @param[in,out] thd THD - @param[in] parse_data Parsed event definition - @param[in] create_if_not TRUE if IF NOT EXISTS clause was provided - to CREATE EVENT statement - + @param[in,out] thd THD + @param[in] parse_data Parsed event definition + @param[in] create_if_not TRUE if IF NOT EXISTS clause was provided + to CREATE EVENT statement + @param[out] event_already_exists When method is completed successfully + set to true if event already exists else + set to false @retval FALSE success @retval TRUE error */ bool Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, - my_bool create_if_not) + bool create_if_not, + bool *event_already_exists) { int ret= 1; TABLE *table= NULL; @@ -641,6 +644,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, { if (create_if_not) { + *event_already_exists= true; push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_EVENT_ALREADY_EXISTS, ER(ER_EVENT_ALREADY_EXISTS), parse_data->name.str); @@ -648,8 +652,10 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, } else my_error(ER_EVENT_ALREADY_EXISTS, MYF(0), parse_data->name.str); + goto end; - } + } else + *event_already_exists= false; DBUG_PRINT("info", ("non-existent, go forward")); diff --git a/sql/event_db_repository.h b/sql/event_db_repository.h index ef778407d1e..a7d7b97ae3c 100644 --- a/sql/event_db_repository.h +++ b/sql/event_db_repository.h @@ -73,7 +73,8 @@ public: Event_db_repository(){} bool - create_event(THD *thd, Event_parse_data *parse_data, my_bool create_if_not); + create_event(THD *thd, Event_parse_data *parse_data, bool create_if_not, + bool *event_already_exists); bool update_event(THD *thd, Event_parse_data *parse_data, LEX_STRING *new_dbname, diff --git a/sql/events.cc b/sql/events.cc index 7edd863ac41..08360ee5f04 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -370,6 +370,7 @@ create_query_string(THD *thd, String *buf) return 0; } + /** Create a new event. @@ -390,8 +391,8 @@ bool Events::create_event(THD *thd, Event_parse_data *parse_data, bool if_not_exists) { - int ret; - bool save_binlog_row_based; + bool ret; + bool save_binlog_row_based, event_already_exists; DBUG_ENTER("Events::create_event"); /* @@ -440,28 +441,32 @@ Events::create_event(THD *thd, Event_parse_data *parse_data, pthread_mutex_lock(&LOCK_event_metadata); /* On error conditions my_error() is called so no need to handle here */ - if (!(ret= db_repository->create_event(thd, parse_data, if_not_exists))) + if (!(ret= db_repository->create_event(thd, parse_data, if_not_exists, + &event_already_exists))) { Event_queue_element *new_element; bool dropped= 0; - if (!(new_element= new Event_queue_element())) - ret= TRUE; // OOM - else if ((ret= db_repository->load_named_event(thd, parse_data->dbname, - parse_data->name, - new_element))) - { - if (!db_repository->drop_event(thd, parse_data->dbname, parse_data->name, - TRUE)) - dropped= 1; - delete new_element; - } - else + if (!event_already_exists) { - /* TODO: do not ignore the out parameter and a possible OOM error! */ - bool created; - if (event_queue) - event_queue->create_event(thd, new_element, &created); + if (!(new_element= new Event_queue_element())) + ret= TRUE; // OOM + else if ((ret= db_repository->load_named_event(thd, parse_data->dbname, + parse_data->name, + new_element))) + { + if (!db_repository->drop_event(thd, parse_data->dbname, parse_data->name, + TRUE)) + dropped= 1; + delete new_element; + } + else + { + /* TODO: do not ignore the out parameter and a possible OOM error! */ + bool created; + if (event_queue) + event_queue->create_event(thd, new_element, &created); + } } /* binlog the create event unless it's been successfully dropped @@ -475,13 +480,14 @@ Events::create_event(THD *thd, Event_parse_data *parse_data, { sql_print_error("Event Error: An error occurred while creating query string, " "before writing it into binary log."); - /* Restore the state of binlog format */ - thd->current_stmt_binlog_row_based= save_binlog_row_based; - DBUG_RETURN(TRUE); + ret= true; } - /* If the definer is not set or set to CURRENT_USER, the value of CURRENT_USER - will be written into the binary log as the definer for the SQL thread. */ - ret= write_bin_log(thd, TRUE, log_query.c_ptr(), log_query.length()); + else + /* + If the definer is not set or set to CURRENT_USER, the value of CURRENT_USER + will be written into the binary log as the definer for the SQL thread. + */ + ret= write_bin_log(thd, TRUE, log_query.c_ptr(), log_query.length()); } } pthread_mutex_unlock(&LOCK_event_metadata); -- cgit v1.2.1 From 05098831606c3267fc6f80f6af65a8069e82b56a Mon Sep 17 00:00:00 2001 From: Davi Arnaut Date: Fri, 27 May 2011 08:09:25 -0300 Subject: BUG 11763056 - 55721: AIX 5.1.50 build failing, cannot locate bzero The problem is that although AIX implements bzero, its prototype is not declared by default. Since AC_CHECK_FUNC(bzero) succeeds even though a prototype is not declared, this breaks compilation in C++ files where a prototype is required. The solution is to only use bzero if a prototype is also declared. configure.in: Check if bzero is declared. No need to specify the includes, unisted.h and strings.h are already part of AC_INCLUDES_DEFAULT. --- configure.in | 7 +------ include/m_string.h | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/configure.in b/configure.in index 8ba208b1ef5..3603512f3f1 100644 --- a/configure.in +++ b/configure.in @@ -2042,12 +2042,7 @@ MYSQL_TYPE_QSORT AC_FUNC_UTIME_NULL AC_FUNC_VPRINTF -AC_CHECK_DECLS([fdatasync],,, -[ -#ifdef HAVE_UNISTD_H -# include -#endif -]) +AC_CHECK_DECLS([fdatasync, bzero]) AC_CHECK_FUNCS(alarm bfill bmove bsearch bzero \ chsize cuserid fchmod fcntl \ diff --git a/include/m_string.h b/include/m_string.h index 0d248ea0ad3..53572a4ac9e 100644 --- a/include/m_string.h +++ b/include/m_string.h @@ -58,7 +58,7 @@ # define bfill(A,B,C) memset((A),(C),(B)) #endif -#if !defined(bzero) && !defined(HAVE_BZERO) +#if !defined(bzero) && (!defined(HAVE_BZERO) || !defined(HAVE_DECL_BZERO)) # define bzero(A,B) memset((A),0,(B)) #endif -- cgit v1.2.1 From 9b68760fd61869626808bf47ac75a9024ea662b7 Mon Sep 17 00:00:00 2001 From: Davi Arnaut Date: Mon, 30 May 2011 07:42:30 -0300 Subject: Bug#12563279: REGRESSION IN HANDLING PRE-4.1 AUTHENTICATION PACKET The problem is that clients implementing the 4.0 version of the protocol (that is, mysql-4.0) do not null terminate a string at the end of the authentication packet. These clients denote the end of the string with the end of the packet. Although this goes against the documented (see MySQL Internals ClientServer Protocol wiki) description of the protocol, these old clients still need to be supported. The solution is to support the documented and actual behavior of the clients. If a client is using the pre-4.1 version of the protocol, the end of a string in the authentication packet can either be denoted with a null character or by the end of the packet. This restores backwards compatibility with old clients implementing either the documented or actual behavior. sql/password.c: The scrambled message, as provided by the user, might not be properly null terminated. If this is the case, uninitialized memory past the end of the buffer could theoretically be accessed. To ensure that this is never the case, copy the scrambled message over to a null terminated auxiliar buffer. sql/sql_connect.cc: Use different execution paths to read strings depending on the protocol being used. If version 4.0 of the protocol is used, end of string can be denoted with a NUL character or by the end of the packet. If there are not enough bytes left after the current position of the buffer to satisfy the current string, the string is considered to be empty. This is required because old clients do not send the password string field if the password is empty. --- sql/password.c | 32 +++++++------- sql/sql_connect.cc | 121 +++++++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 115 insertions(+), 38 deletions(-) diff --git a/sql/password.c b/sql/password.c index 9204c660b77..29a501986f4 100644 --- a/sql/password.c +++ b/sql/password.c @@ -204,21 +204,16 @@ void scramble_323(char *to, const char *message, const char *password) } -/* - Check scrambled message - Used in pre 4.1 password handling - SYNOPSIS - check_scramble_323() - scrambled scrambled message to check. - message original random message which was used for scrambling; must - be exactly SCRAMBLED_LENGTH_323 bytes long and - NULL-terminated. - hash_pass password which should be used for scrambling - All params are IN. +/** + Check scrambled message. Used in pre 4.1 password handling. - RETURN VALUE - 0 - password correct - !0 - password invalid + @param scrambled Scrambled message to check. + @param message Original random message which was used for scrambling. + @param hash_pass Password which should be used for scrambling. + + @remark scrambled and message must be SCRAMBLED_LENGTH_323 bytes long. + + @return FALSE if password is correct, TRUE otherwise. */ my_bool @@ -227,9 +222,16 @@ check_scramble_323(const char *scrambled, const char *message, { struct rand_struct rand_st; ulong hash_message[2]; - char buff[16],*to,extra; /* Big enough for check */ + /* Big enough for checks. */ + char buff[16], scrambled_buff[SCRAMBLE_LENGTH_323 + 1]; + char *to, extra; const char *pos; + /* Ensure that the scrambled message is null-terminated. */ + memcpy(scrambled_buff, scrambled, SCRAMBLE_LENGTH_323); + scrambled_buff[SCRAMBLE_LENGTH_323]= '\0'; + scrambled= scrambled_buff; + hash_password(hash_message, message, SCRAMBLE_LENGTH_323); randominit(&rand_st,hash_pass[0] ^ hash_message[0], hash_pass[1] ^ hash_message[1]); diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 1b27efdd39a..26600584256 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -653,14 +653,21 @@ bool init_new_connection_handler_thread() } #ifndef EMBEDDED_LIBRARY + +/** Get a string according to the protocol of the underlying buffer. */ +typedef char * (*get_proto_string_func_t) (char **, size_t *, size_t *); + /** - Get a null character terminated string from a user-supplied buffer. + Get a string formatted according to the 4.1 version of the MySQL protocol. - @param buffer[in, out] Pointer to the buffer to be scanned. + @param buffer[in, out] Pointer to the user-supplied buffer to be scanned. @param max_bytes_available[in, out] Limit the bytes to scan. @param string_length[out] The number of characters scanned not including the null character. + @remark Strings are always null character terminated in this version of the + protocol. + @remark The string_length does not include the terminating null character. However, after the call, the buffer is increased by string_length+1 bytes, beyond the null character if there still available bytes to @@ -671,9 +678,9 @@ bool init_new_connection_handler_thread() */ static -char *get_null_terminated_string(char **buffer, - size_t *max_bytes_available, - size_t *string_length) +char *get_41_protocol_string(char **buffer, + size_t *max_bytes_available, + size_t *string_length) { char *str= (char *)memchr(*buffer, '\0', *max_bytes_available); @@ -683,7 +690,60 @@ char *get_null_terminated_string(char **buffer, *string_length= (size_t)(str - *buffer); *max_bytes_available-= *string_length + 1; str= *buffer; - *buffer += *string_length + 1; + *buffer += *string_length + 1; + + return str; +} + + +/** + Get a string formatted according to the 4.0 version of the MySQL protocol. + + @param buffer[in, out] Pointer to the user-supplied buffer to be scanned. + @param max_bytes_available[in, out] Limit the bytes to scan. + @param string_length[out] The number of characters scanned not including + the null character. + + @remark If there are not enough bytes left after the current position of + the buffer to satisfy the current string, the string is considered + to be empty and a pointer to empty_c_string is returned. + + @remark A string at the end of the packet is not null terminated. + + @return Pointer to beginning of the string scanned, or a pointer to a empty + string. +*/ +static +char *get_40_protocol_string(char **buffer, + size_t *max_bytes_available, + size_t *string_length) +{ + char *str; + size_t len; + + /* No bytes to scan left, treat string as empty. */ + if ((*max_bytes_available) == 0) + { + *string_length= 0; + return empty_c_string; + } + + str= (char *) memchr(*buffer, '\0', *max_bytes_available); + + /* + If the string was not null terminated by the client, + the remainder of the packet is the string. Otherwise, + advance the buffer past the end of the null terminated + string. + */ + if (str == NULL) + len= *string_length= *max_bytes_available; + else + len= (*string_length= (size_t)(str - *buffer)) + 1; + + str= *buffer; + *buffer+= len; + *max_bytes_available-= len; return str; } @@ -695,7 +755,7 @@ char *get_null_terminated_string(char **buffer, @param buffer[in, out] The buffer to scan; updates position after scan. @param max_bytes_available[in, out] Limit the number of bytes to scan @param string_length[out] Number of characters scanned - + @remark In case the length is zero, then the total size of the string is considered to be 1 byte; the size byte. @@ -854,7 +914,7 @@ static int check_connection(THD *thd) part at the end of packet. */ end= strmake(end, thd->scramble, SCRAMBLE_LENGTH_323) + 1; - + int2store(end, server_capabilites); /* write server characteristics: up to 16 bytes allowed */ end[2]=(char) default_charset_info->number; @@ -952,7 +1012,20 @@ static int check_connection(THD *thd) if ((thd->client_capabilities & CLIENT_TRANSACTIONS) && opt_using_transactions) net->return_status= &thd->server_status; - + + /* + The 4.0 and 4.1 versions of the protocol differ on how strings + are terminated. In the 4.0 version, if a string is at the end + of the packet, the string is not null terminated. Do not assume + that the returned string is always null terminated. + */ + get_proto_string_func_t get_string; + + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + get_string= get_41_protocol_string; + else + get_string= get_40_protocol_string; + /* In order to safely scan a head for '\0' string terminators we must keep track of how many bytes remain in the allocated @@ -961,8 +1034,7 @@ static int check_connection(THD *thd) size_t bytes_remaining_in_packet= pkt_len - (end - (char *)net->read_pos); size_t user_len; - char *user= get_null_terminated_string(&end, &bytes_remaining_in_packet, - &user_len); + char *user= get_string(&end, &bytes_remaining_in_packet, &user_len); if (user == NULL) { inc_host_errors(&thd->remote.sin_addr); @@ -991,8 +1063,7 @@ static int check_connection(THD *thd) /* Old passwords are zero terminated strings. */ - passwd= get_null_terminated_string(&end, &bytes_remaining_in_packet, - &passwd_len); + passwd= get_string(&end, &bytes_remaining_in_packet, &passwd_len); } if (passwd == NULL) @@ -1007,8 +1078,7 @@ static int check_connection(THD *thd) if (thd->client_capabilities & CLIENT_CONNECT_WITH_DB) { - db= get_null_terminated_string(&end, &bytes_remaining_in_packet, - &db_len); + db= get_string(&end, &bytes_remaining_in_packet, &db_len); if (db == NULL) { inc_host_errors(&thd->remote.sin_addr); @@ -1021,19 +1091,24 @@ static int check_connection(THD *thd) char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8 uint dummy_errors; - /* Since 4.1 all database names are stored in utf8 */ + /* + Copy and convert the user and database names to the character set used + by the server. Since 4.1 all database names are stored in UTF-8. Also, + ensure that the names are properly null-terminated as this is relied + upon later. + */ if (db) { - db_buff[copy_and_convert(db_buff, sizeof(db_buff)-1, - system_charset_info, - db, db_len, - thd->charset(), &dummy_errors)]= 0; + db_len= copy_and_convert(db_buff, sizeof(db_buff)-1, system_charset_info, + db, db_len, thd->charset(), &dummy_errors); + db_buff[db_len]= '\0'; db= db_buff; } - user_buff[user_len= copy_and_convert(user_buff, sizeof(user_buff)-1, - system_charset_info, user, user_len, - thd->charset(), &dummy_errors)]= '\0'; + user_len= copy_and_convert(user_buff, sizeof(user_buff)-1, + system_charset_info, user, user_len, + thd->charset(), &dummy_errors); + user_buff[user_len]= '\0'; user= user_buff; /* If username starts and ends in "'", chop them off */ -- cgit v1.2.1 From 6444fa5327623dacd1030ae0e49cb8bcda8945d9 Mon Sep 17 00:00:00 2001 From: Anitha Gopi Date: Fri, 3 Jun 2011 14:13:10 +0530 Subject: Bug#11756699 : Move test to disabled group --- mysql-test/collections/default.experimental | 1 - mysql-test/t/disabled.def | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index cae23aa94d3..941606a37a5 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -19,7 +19,6 @@ main.ctype_gbk_binlog @solaris # Bug#11754407: main.ctype_gbk_binlog f main.func_str @solaris # joro: Bug#11750406 main.sp @solaris # joro : Bug#11761625 main.query_cache_28249 # Bug#12584161 2009-03-25 main.query_cache_28249 fails sporadically -main.log_tables-big # Bug#11756699 2010-11-15 mattiasj report already exists ndb.* # joro : NDB tests marked as experimental as agreed with bochklin diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index f33fd0352a0..84c981c08c1 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -11,3 +11,5 @@ ############################################################################## kill : Bug#11748945 2008-12-03 HHunger need some changes to be robust enough for pushbuild. read_many_rows_innodb : Bug#11748886 2010-11-15 mattiasj report already exists +main.log_tables-big : Bug#11756699 2010-11-15 mattiasj report already exists + -- cgit v1.2.1 From b502a64bba3143a77632042f02876086ab7dff7b Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Mon, 6 Jun 2011 13:13:54 +0300 Subject: Bug #11749418: 38965: TEST CASES GIS-RTREE, TYPE_FLOAT, TYPE_NEWDECIMAL FAIL IN EMBEDDED SERVER FreeBSD 64 bit needs the FP_X_DNML to fpsetmask() to prevent exceptions from propagating into mysql (as a threaded application). However fpsetmask() itself is deprecated in favor of fedisableexcept(). 1. Fixed the #ifdef to check for FP_X_DNML instead of i386. 2. Added a configure.in check for fedisableexcept() and, if present, this function is called insted of the fpsetmask(). No need for new tests, as the existing tests cover this already. Removed the affected tests from the experimental list. --- configure.in | 3 ++- mysql-test/collections/default.experimental | 3 --- sql/mysqld.cc | 19 ++++++++++++------- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/configure.in b/configure.in index 3603512f3f1..901515fecb6 100644 --- a/configure.in +++ b/configure.in @@ -2061,7 +2061,8 @@ AC_CHECK_FUNCS(alarm bfill bmove bsearch bzero \ sighold sigset sigthreadmask port_create sleep \ snprintf socket stpcpy strcasecmp strerror strsignal strnlen strpbrk strstr \ strtol strtoll strtoul strtoull tell tempnam thr_setconcurrency vidattr \ - posix_fallocate backtrace backtrace_symbols backtrace_symbols_fd printstack) + posix_fallocate backtrace backtrace_symbols backtrace_symbols_fd printstack \ + fedisableexcept) # # diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index 941606a37a5..8883acfa606 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -46,6 +46,3 @@ parts.partition_mgm_lc1_ndb # joro : NDB tests marked as experiment parts.partition_mgm_lc2_ndb # joro : NDB tests marked as experimental as agreed with bochklin parts.partition_syntax_ndb # joro : NDB tests marked as experimental as agreed with bochklin parts.partition_value_ndb # joro : NDB tests marked as experimental as agreed with bochklin -main.gis-rtree # svoj: due to BUG#11749418 -main.type_float # svoj: due to BUG#11749418 -main.type_newdecimal # svoj: due to BUG#11749418 diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 2d1290bf88a..36f195e6232 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -171,12 +171,12 @@ static void getvolumeID(BYTE *volumeName); int initgroups(const char *,unsigned int); #endif -#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H) +#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H) && !defined(HAVE_FEDISABLEEXCEPT) #include #ifdef HAVE_FP_EXCEPT // Fix type conflict typedef fp_except fp_except_t; #endif -#endif /* __FreeBSD__ && HAVE_IEEEFP_H */ +#endif /* __FreeBSD__ && HAVE_IEEEFP_H && !HAVE_FEDISABLEEXCEPT */ #ifdef HAVE_SYS_FPU_H /* for IRIX to use set_fpc_csr() */ #include @@ -202,19 +202,24 @@ extern "C" my_bool reopen_fstreams(const char *filename, inline void setup_fpu() { -#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H) +#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H) && !defined(HAVE_FEDISABLEEXCEPT) /* We can't handle floating point exceptions with threads, so disable this on freebsd - Don't fall for overflow, underflow,divide-by-zero or loss of precision + Don't fall for overflow, underflow,divide-by-zero or loss of precision. + fpsetmask() is deprecated in favor of fedisableexcept() in C99. */ -#if defined(__i386__) +#if defined(FP_X_DNML) fpsetmask(~(FP_X_INV | FP_X_DNML | FP_X_OFL | FP_X_UFL | FP_X_DZ | FP_X_IMP)); #else fpsetmask(~(FP_X_INV | FP_X_OFL | FP_X_UFL | FP_X_DZ | FP_X_IMP)); -#endif /* __i386__ */ -#endif /* __FreeBSD__ && HAVE_IEEEFP_H */ +#endif /* FP_X_DNML */ +#endif /* __FreeBSD__ && HAVE_IEEEFP_H && !HAVE_FEDISABLEEXCEPT */ + +#ifdef HAVE_FEDISABLEEXCEPT + fedisableexcept(FE_ALL_EXCEPT); +#endif #ifdef HAVE_FESETROUND /* Set FPU rounding mode to "round-to-nearest" */ -- cgit v1.2.1 From 768b9a0ef68b9cf1ff29e46a5a9e0e295da8b8d8 Mon Sep 17 00:00:00 2001 From: Ramil Kalimullin Date: Tue, 7 Jun 2011 19:30:43 +0400 Subject: Bug#11764487: myisam corruption with insert ignore and invalid spatial data Problem: in case of wrong data insert into indexed GEOMETRY fields (e.g. NULL value for a not NULL field) MyISAM reported "ERROR 126 (HY000): Incorrect key file for table; try to repair it" due to misuse of the key deletion function. Fix: always use R-tree key functions for R-tree based indexes and B-tree key functions for B-tree based indexes. mysql-test/r/gis-rtree.result: Bug#11764487: myisam corruption with insert ignore and invalid spatial data - test result. mysql-test/t/gis-rtree.test: Bug#11764487: myisam corruption with insert ignore and invalid spatial data - test case. storage/myisam/mi_update.c: Bug#11764487: myisam corruption with insert ignore and invalid spatial data - handling update errors check for HA_ERR_NULL_IN_SPATIAL as well to be consistent with mi_write(); - always use keyinfo->ck_delete()/ck_insert() instead of _mi_ck_delete()/_mi_ck_write() to handle index properly, as it may be of B-tree or R-tree type. storage/myisam/mi_write.c: Bug#11764487: myisam corruption with insert ignore and invalid spatial data - always use keyinfo->ck_delete() instead of _mi_ck_delete() to handle index properly, as it may be of B-tree or R-tree type. --- mysql-test/r/gis-rtree.result | 27 +++++++++++++++++++++++++++ mysql-test/t/gis-rtree.test | 28 ++++++++++++++++++++++++++++ storage/myisam/mi_update.c | 9 +++++---- storage/myisam/mi_write.c | 2 +- 4 files changed, 61 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/gis-rtree.result b/mysql-test/r/gis-rtree.result index eb9c350f589..1f6d6fa851e 100644 --- a/mysql-test/r/gis-rtree.result +++ b/mysql-test/r/gis-rtree.result @@ -1549,3 +1549,30 @@ HANDLER t1 READ a NEXT; HANDLER t1 CLOSE; DROP TABLE t1; End of 5.0 tests. +# +# Bug #57323/11764487: myisam corruption with insert ignore +# and invalid spatial data +# +CREATE TABLE t1(a LINESTRING NOT NULL, b GEOMETRY NOT NULL, +SPATIAL KEY(a), SPATIAL KEY(b)) ENGINE=MyISAM; +INSERT INTO t1 VALUES(GEOMFROMTEXT("point (0 0)"), GEOMFROMTEXT("point (1 1)")); +INSERT IGNORE INTO t1 SET a=GEOMFROMTEXT("point (-6 0)"), b=GEOMFROMTEXT("error"); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +INSERT IGNORE INTO t1 SET a=GEOMFROMTEXT("point (-6 0)"), b=NULL; +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +SELECT ASTEXT(a), ASTEXT(b) FROM t1; +ASTEXT(a) ASTEXT(b) +POINT(0 0) POINT(1 1) +DROP TABLE t1; +CREATE TABLE t1(a INT NOT NULL, b GEOMETRY NOT NULL, +KEY(a), SPATIAL KEY(b)) ENGINE=MyISAM; +INSERT INTO t1 VALUES(0, GEOMFROMTEXT("point (1 1)")); +INSERT IGNORE INTO t1 SET a=0, b=GEOMFROMTEXT("error"); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +INSERT IGNORE INTO t1 SET a=1, b=NULL; +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +SELECT a, ASTEXT(b) FROM t1; +a ASTEXT(b) +0 POINT(1 1) +DROP TABLE t1; +End of 5.1 tests diff --git a/mysql-test/t/gis-rtree.test b/mysql-test/t/gis-rtree.test index b006096528e..e7e6fa59df0 100644 --- a/mysql-test/t/gis-rtree.test +++ b/mysql-test/t/gis-rtree.test @@ -928,3 +928,31 @@ DROP TABLE t1; --echo End of 5.0 tests. + + +--echo # +--echo # Bug #57323/11764487: myisam corruption with insert ignore +--echo # and invalid spatial data +--echo # + +CREATE TABLE t1(a LINESTRING NOT NULL, b GEOMETRY NOT NULL, + SPATIAL KEY(a), SPATIAL KEY(b)) ENGINE=MyISAM; +INSERT INTO t1 VALUES(GEOMFROMTEXT("point (0 0)"), GEOMFROMTEXT("point (1 1)")); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +INSERT IGNORE INTO t1 SET a=GEOMFROMTEXT("point (-6 0)"), b=GEOMFROMTEXT("error"); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +INSERT IGNORE INTO t1 SET a=GEOMFROMTEXT("point (-6 0)"), b=NULL; +SELECT ASTEXT(a), ASTEXT(b) FROM t1; +DROP TABLE t1; + +CREATE TABLE t1(a INT NOT NULL, b GEOMETRY NOT NULL, + KEY(a), SPATIAL KEY(b)) ENGINE=MyISAM; +INSERT INTO t1 VALUES(0, GEOMFROMTEXT("point (1 1)")); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +INSERT IGNORE INTO t1 SET a=0, b=GEOMFROMTEXT("error"); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +INSERT IGNORE INTO t1 SET a=1, b=NULL; +SELECT a, ASTEXT(b) FROM t1; +DROP TABLE t1; + +--echo End of 5.1 tests diff --git a/storage/myisam/mi_update.c b/storage/myisam/mi_update.c index a18bb5f1443..c767fa44ed5 100644 --- a/storage/myisam/mi_update.c +++ b/storage/myisam/mi_update.c @@ -193,8 +193,8 @@ err: save_errno=my_errno; if (changed) key_changed|= HA_STATE_CHANGED; - if (my_errno == HA_ERR_FOUND_DUPP_KEY || my_errno == HA_ERR_OUT_OF_MEM || - my_errno == HA_ERR_RECORD_FILE_FULL) + if (my_errno == HA_ERR_FOUND_DUPP_KEY || my_errno == HA_ERR_RECORD_FILE_FULL || + my_errno == HA_ERR_NULL_IN_SPATIAL || my_errno == HA_ERR_OUT_OF_MEM) { info->errkey= (int) i; flag=0; @@ -212,8 +212,9 @@ err: { uint new_length=_mi_make_key(info,i,new_key,newrec,pos); uint old_length= _mi_make_key(info,i,old_key,oldrec,pos); - if ((flag++ && _mi_ck_delete(info,i,new_key,new_length)) || - _mi_ck_write(info,i,old_key,old_length)) + if ((flag++ && + share->keyinfo[i].ck_delete(info, i, new_key, new_length)) || + share->keyinfo[i].ck_insert(info, i, old_key, old_length)) break; } } diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c index 3c8ebe5dbd8..51ae06b117d 100644 --- a/storage/myisam/mi_write.c +++ b/storage/myisam/mi_write.c @@ -211,7 +211,7 @@ err: else { uint key_length=_mi_make_key(info,i,buff,record,filepos); - if (_mi_ck_delete(info,i,buff,key_length)) + if (share->keyinfo[i].ck_delete(info, i, buff, key_length)) { if (local_lock_tree) rw_unlock(&share->key_root_lock[i]); -- cgit v1.2.1 From 6348b7375a506bfe340a390ed6ac832d1e224333 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 9 Jun 2011 13:31:15 +0300 Subject: BLOB instrumentation for Bug#12612184 Race condition in row_upd_clust_rec() If UNIV_DEBUG or UNIV_BLOB_LIGHT_DEBUG is enabled, add !rec_offs_any_null_extern() assertions, ensuring that records do not contain null pointers to externally stored columns in inappropriate places. btr_cur_optimistic_update(): Assert !rec_offs_any_null_extern(). Incomplete records must never be updated or deleted. This assertion will cover also the pessimistic route. row_build(): Assert !rec_offs_any_null_extern(). Search tuples must never be built from incomplete index entries. row_rec_to_index_entry(): Assert !rec_offs_any_null_extern() unless ROW_COPY_DATA is requested. ROW_COPY_DATA is used for multi-versioning, and therefore it might be valid to copy the most recent (uncommitted) version while it contains a null pointer to off-page columns. row_vers_build_for_consistent_read(), row_vers_build_for_semi_consistent_read(): Assert !rec_offs_any_null_extern() on all versions except the most recent one. trx_undo_prev_version_build(): Assert !rec_offs_any_null_extern() on the previous version. rb:682 approved by Sunny Bains --- storage/innobase/btr/btr0cur.c | 10 ++++++++ storage/innobase/include/btr0types.h | 5 ++++ storage/innobase/include/rem0rec.h | 13 ++++++++++ storage/innobase/include/rem0rec.ic | 37 ++++++++++++++++++++++++++++ storage/innobase/include/univ.i | 2 ++ storage/innobase/row/row0row.c | 8 +++++++ storage/innobase/row/row0vers.c | 16 +++++++++++++ storage/innobase/trx/trx0rec.c | 4 ++++ storage/innodb_plugin/ChangeLog | 5 ++++ storage/innodb_plugin/btr/btr0cur.c | 5 +++- storage/innodb_plugin/include/rem0rec.h | 14 ++++++++++- storage/innodb_plugin/include/rem0rec.ic | 41 +++++++++++++++++++++++++++++++- storage/innodb_plugin/row/row0row.c | 10 +++++++- storage/innodb_plugin/row/row0vers.c | 18 +++++++++++++- storage/innodb_plugin/trx/trx0rec.c | 6 ++++- 15 files changed, 188 insertions(+), 6 deletions(-) diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c index 6c0497cbd41..1d17aa998f6 100644 --- a/storage/innobase/btr/btr0cur.c +++ b/storage/innobase/btr/btr0cur.c @@ -73,6 +73,13 @@ this many index pages */ + not_empty) \ / (BTR_KEY_VAL_ESTIMATE_N_PAGES + ext_size)) +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +/* A BLOB field reference full of zero, for use in assertions and tests. +Initially, BLOB field references are set to zero, in +dtuple_convert_big_rec(). */ +const byte field_ref_zero[BTR_EXTERN_FIELD_REF_SIZE]; +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + /*********************************************************************** Marks all extern fields in a record as owned by the record. This function should be called if the delete mark of a record is removed: a not delete @@ -1585,6 +1592,9 @@ btr_cur_optimistic_update( heap = mem_heap_create(1024); offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap); +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(rec, offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ #ifdef UNIV_DEBUG if (btr_cur_print_record_ops && thr) { diff --git a/storage/innobase/include/btr0types.h b/storage/innobase/include/btr0types.h index 8fa0bf0602d..eaa1f36e781 100644 --- a/storage/innobase/include/btr0types.h +++ b/storage/innobase/include/btr0types.h @@ -18,4 +18,9 @@ typedef struct btr_pcur_struct btr_pcur_t; typedef struct btr_cur_struct btr_cur_t; typedef struct btr_search_struct btr_search_t; +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#define BTR_EXTERN_FIELD_REF_SIZE 20 +extern const byte field_ref_zero[BTR_EXTERN_FIELD_REF_SIZE]; +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + #endif diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h index 58762fc3111..67baeb7d8d2 100644 --- a/storage/innobase/include/rem0rec.h +++ b/storage/innobase/include/rem0rec.h @@ -339,6 +339,19 @@ rec_offs_any_extern( /*================*/ /* out: TRUE if a field is stored externally */ const ulint* offsets);/* in: array returned by rec_get_offsets() */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +/******************************************************** +Determine if the offsets are for a record containing null BLOB pointers. */ +UNIV_INLINE +const byte* +rec_offs_any_null_extern( +/*=====================*/ + /* out: first field containing + a null BLOB pointer, + or NULL if none found */ + rec_t* rec, /*!< in: record */ + const ulint* offsets); /*!< in: rec_get_offsets(rec) */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ /*************************************************************** Sets the value of the ith field extern storage bit. */ UNIV_INLINE diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic index df66bb13aeb..566c62e30f2 100644 --- a/storage/innobase/include/rem0rec.ic +++ b/storage/innobase/include/rem0rec.ic @@ -9,6 +9,7 @@ Created 5/30/1994 Heikki Tuuri #include "mach0data.h" #include "ut0byte.h" #include "dict0dict.h" +#include "btr0types.h" /* Compact flag ORed to the extra size returned by rec_get_offsets() */ #define REC_OFFS_COMPACT ((ulint) 1 << 31) @@ -1020,6 +1021,42 @@ rec_offs_any_extern( return(FALSE); } +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +/******************************************************** +Determine if the offsets are for a record containing null BLOB pointers. */ +UNIV_INLINE +const byte* +rec_offs_any_null_extern( +/*=====================*/ + /* out: first field containing + a null BLOB pointer, + or NULL if none found */ + rec_t* rec, /*!< in: record */ + const ulint* offsets) /*!< in: rec_get_offsets(rec) */ +{ + ulint i; + ut_ad(rec_offs_validate(rec, NULL, offsets)); + + for (i = 0; i < rec_offs_n_fields(offsets); i++) { + if (rec_offs_nth_extern(offsets, i)) { + ulint len; + const byte* field + = rec_get_nth_field(rec, offsets, i, &len); + + ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); + if (!memcmp(field + len + - BTR_EXTERN_FIELD_REF_SIZE, + field_ref_zero, + BTR_EXTERN_FIELD_REF_SIZE)) { + return(field); + } + } + } + + return(NULL); +} +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + /*************************************************************** Sets the value of the ith field extern storage bit. */ UNIV_INLINE diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index ce5d8a092bf..a67b1b3895e 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -88,6 +88,8 @@ memory is read outside the allocated blocks. */ #if 0 #define UNIV_DEBUG_VALGRIND /* Enable extra Valgrind instrumentation */ +#define UNIV_BLOB_LIGHT_DEBUG /* Enable off-page column + debugging without UNIV_DEBUG */ #define UNIV_DEBUG /* Enable ut_ad() assertions */ #define UNIV_LIST_DEBUG /* debug UT_LIST_ macros */ #define UNIV_MEM_DEBUG /* detect memory leaks etc */ diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c index 08e50817db9..f30965d32bb 100644 --- a/storage/innobase/row/row0row.c +++ b/storage/innobase/row/row0row.c @@ -210,6 +210,10 @@ row_build( ut_ad(rec_offs_validate(rec, index, offsets)); } +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(rec, offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + if (type != ROW_COPY_POINTERS) { /* Take a copy of rec to heap */ buf = mem_heap_alloc(heap, rec_offs_size(offsets)); @@ -302,6 +306,10 @@ row_rec_to_index_entry( rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, offsets); +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + } else { + ut_a(!rec_offs_any_null_extern(rec, offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ } rec_len = rec_offs_n_fields(offsets); diff --git a/storage/innobase/row/row0vers.c b/storage/innobase/row/row0vers.c index 23aca8c3f2e..906b46fb51b 100644 --- a/storage/innobase/row/row0vers.c +++ b/storage/innobase/row/row0vers.c @@ -473,6 +473,11 @@ row_vers_build_for_consistent_read( /* The view already sees this version: we can copy it to in_heap and return */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern( + version, *offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); @@ -506,6 +511,10 @@ row_vers_build_for_consistent_read( *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + trx_id = row_get_rec_trx_id(prev_version, index, *offsets); if (read_view_sees_trx_id(view, trx_id)) { @@ -606,6 +615,10 @@ row_vers_build_for_semi_consistent_read( /* We found a version that belongs to a committed transaction: return it. */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(version, *offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + if (rec == version) { *old_vers = rec; err = DB_SUCCESS; @@ -663,6 +676,9 @@ row_vers_build_for_semi_consistent_read( version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(version, *offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ }/* for (;;) */ if (heap) { diff --git a/storage/innobase/trx/trx0rec.c b/storage/innobase/trx/trx0rec.c index 38ad53fcfb0..730ac6a6f60 100644 --- a/storage/innobase/trx/trx0rec.c +++ b/storage/innobase/trx/trx0rec.c @@ -1397,6 +1397,10 @@ trx_undo_prev_version_build( return(DB_ERROR); } +# if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(rec, offsets)); +# endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + if (row_upd_changes_field_size_or_external(index, offsets, update)) { ulint* ext_vect; ulint n_ext_vect; diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index 0b201816819..fa8b4727cb1 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,8 @@ +2011-06-09 The InnoDB Team + * btr/btr0cur.c, include/rem0rec.h, include/rem0rec.ic, + * row/row0row.c, row/row0vers.c, trx/trx0rec.c: + Instrumentation for Bug#12612184 Race condition in row_upd_clust_rec() + 2011-04-07 The InnoDB Team * handler/ha_innodb.cc, handler/ha_innodb.h, handler/handler0alter.cc: diff --git a/storage/innodb_plugin/btr/btr0cur.c b/storage/innodb_plugin/btr/btr0cur.c index d7b5ed0d135..ea59fe9d025 100644 --- a/storage/innodb_plugin/btr/btr0cur.c +++ b/storage/innodb_plugin/btr/btr0cur.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -1871,6 +1871,9 @@ btr_cur_optimistic_update( heap = mem_heap_create(1024); offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap); +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(rec, offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ #ifdef UNIV_DEBUG if (btr_cur_print_record_ops && thr) { diff --git a/storage/innodb_plugin/include/rem0rec.h b/storage/innodb_plugin/include/rem0rec.h index 17d08afabb9..06de23be757 100644 --- a/storage/innodb_plugin/include/rem0rec.h +++ b/storage/innodb_plugin/include/rem0rec.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -480,6 +480,18 @@ ulint rec_offs_any_extern( /*================*/ const ulint* offsets);/*!< in: array returned by rec_get_offsets() */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +/******************************************************//** +Determine if the offsets are for a record containing null BLOB pointers. +@return first field containing a null BLOB pointer, or NULL if none found */ +UNIV_INLINE +const byte* +rec_offs_any_null_extern( +/*=====================*/ + const rec_t* rec, /*!< in: record */ + const ulint* offsets) /*!< in: rec_get_offsets(rec) */ + __attribute__((nonnull, warn_unused_result)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ /******************************************************//** Returns nonzero if the extern bit is set in nth field of rec. @return nonzero if externally stored */ diff --git a/storage/innodb_plugin/include/rem0rec.ic b/storage/innodb_plugin/include/rem0rec.ic index 8e5bd9a7fcd..7cff36fee6c 100644 --- a/storage/innodb_plugin/include/rem0rec.ic +++ b/storage/innodb_plugin/include/rem0rec.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,6 +26,7 @@ Created 5/30/1994 Heikki Tuuri #include "mach0data.h" #include "ut0byte.h" #include "dict0dict.h" +#include "btr0types.h" /* Compact flag ORed to the extra size returned by rec_get_offsets() */ #define REC_OFFS_COMPACT ((ulint) 1 << 31) @@ -1087,6 +1088,44 @@ rec_offs_any_extern( return(UNIV_UNLIKELY(*rec_offs_base(offsets) & REC_OFFS_EXTERNAL)); } +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +/******************************************************//** +Determine if the offsets are for a record containing null BLOB pointers. +@return first field containing a null BLOB pointer, or NULL if none found */ +UNIV_INLINE +const byte* +rec_offs_any_null_extern( +/*=====================*/ + const rec_t* rec, /*!< in: record */ + const ulint* offsets) /*!< in: rec_get_offsets(rec) */ +{ + ulint i; + ut_ad(rec_offs_validate(rec, NULL, offsets)); + + if (!rec_offs_any_extern(offsets)) { + return(NULL); + } + + for (i = 0; i < rec_offs_n_fields(offsets); i++) { + if (rec_offs_nth_extern(offsets, i)) { + ulint len; + const byte* field + = rec_get_nth_field(rec, offsets, i, &len); + + ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); + if (!memcmp(field + len + - BTR_EXTERN_FIELD_REF_SIZE, + field_ref_zero, + BTR_EXTERN_FIELD_REF_SIZE)) { + return(field); + } + } + } + + return(NULL); +} +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + /******************************************************//** Returns nonzero if the extern bit is set in nth field of rec. @return nonzero if externally stored */ diff --git a/storage/innodb_plugin/row/row0row.c b/storage/innodb_plugin/row/row0row.c index 8e806a14a98..7260855858f 100644 --- a/storage/innodb_plugin/row/row0row.c +++ b/storage/innodb_plugin/row/row0row.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -231,6 +231,10 @@ row_build( ut_ad(rec_offs_validate(rec, index, offsets)); } +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(rec, offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + if (type != ROW_COPY_POINTERS) { /* Take a copy of rec to heap */ buf = mem_heap_alloc(heap, rec_offs_size(offsets)); @@ -415,6 +419,10 @@ row_rec_to_index_entry( rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, offsets); +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + } else { + ut_a(!rec_offs_any_null_extern(rec, offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ } entry = row_rec_to_index_entry_low(rec, index, offsets, n_ext, heap); diff --git a/storage/innodb_plugin/row/row0vers.c b/storage/innodb_plugin/row/row0vers.c index d4fde0b939b..8a7bb842293 100644 --- a/storage/innodb_plugin/row/row0vers.c +++ b/storage/innodb_plugin/row/row0vers.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -550,6 +550,11 @@ row_vers_build_for_consistent_read( /* The view already sees this version: we can copy it to in_heap and return */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern( + version, *offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); @@ -583,6 +588,10 @@ row_vers_build_for_consistent_read( *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + trx_id = row_get_rec_trx_id(prev_version, index, *offsets); if (read_view_sees_trx_id(view, trx_id)) { @@ -682,6 +691,10 @@ row_vers_build_for_semi_consistent_read( /* We found a version that belongs to a committed transaction: return it. */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(version, *offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + if (rec == version) { *old_vers = rec; err = DB_SUCCESS; @@ -739,6 +752,9 @@ row_vers_build_for_semi_consistent_read( version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(version, *offsets)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ }/* for (;;) */ if (heap) { diff --git a/storage/innodb_plugin/trx/trx0rec.c b/storage/innodb_plugin/trx/trx0rec.c index f50e10ed756..9f2fd59d82b 100644 --- a/storage/innodb_plugin/trx/trx0rec.c +++ b/storage/innodb_plugin/trx/trx0rec.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1577,6 +1577,10 @@ trx_undo_prev_version_build( return(DB_ERROR); } +# if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(rec, offsets)); +# endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + if (row_upd_changes_field_size_or_external(index, offsets, update)) { ulint n_ext; -- cgit v1.2.1 From fa913a0b987d94164449511cf0cef5baf1fb9a6f Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 10 Jun 2011 00:03:17 +0700 Subject: Fixed bug#11764334 (formerly bug#57156): ALTER EVENT CHANGES THE EVENT STATUS. Any ALTER EVENT statement on a disabled event enabled it back (unless this ALTER EVENT statement explicitly disabled the event). The problem was that during processing of an ALTER EVENT statement value of status field was overwritten unconditionally even if new value was not specified explicitly. As a consequence this field was set to default value for status which corresponds to ENABLE. The solution is to check if status field was explicitly specified in ALTER EVENT statement before assigning new value to status field. mysql-test/r/events_bugs.result: test's result for Bug#11764334 was added. mysql-test/t/events_bugs.test: new test for Bug#11764334 was added. sql/event_db_repository.cc: mysql_event_fill_row() was modified: set value for status field in events tables only in case if statement CREATE EVENT is being processed or if this value was set in ALTER EVENT statement. Event_db_repository::create_event was modified: removed redundant setting of status field after return from call to mysql_event_fill_row(). sql/event_parse_data.h: Event_parse_data structure was modified: added flag status_changed that is set to true if status's value was changed in ALTER EVENT statement. sql/sql_yacc.yy: Set flag status_changed if status was set in ALTER EVENT statement. --- mysql-test/r/events_bugs.result | 16 +++++++++++++++- mysql-test/t/events_bugs.test | 15 +++++++++++++++ sql/event_db_repository.cc | 13 +++++++++---- sql/event_parse_data.cc | 11 +++++++---- sql/event_parse_data.h | 1 + sql/sql_yacc.yy | 3 +++ 6 files changed, 50 insertions(+), 9 deletions(-) diff --git a/mysql-test/r/events_bugs.result b/mysql-test/r/events_bugs.result index 740f94fb061..73cb4d58cdc 100644 --- a/mysql-test/r/events_bugs.result +++ b/mysql-test/r/events_bugs.result @@ -419,7 +419,7 @@ SET TIME_ZONE= '+04:00'; ALTER EVENT e1 DO SELECT 2; SHOW EVENTS; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation -events_test e1 root@localhost -03:00 RECURRING NULL 1 DAY 2005-12-31 20:58:59 2030-01-03 00:00:00 ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci +events_test e1 root@localhost -03:00 RECURRING NULL 1 DAY 2005-12-31 20:58:59 2030-01-03 00:00:00 DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci DROP EVENT e1; SET TIME_ZONE='+05:00'; CREATE EVENT e1 ON SCHEDULE EVERY 1 DAY STARTS '2006-01-01 00:00:00' DO @@ -796,6 +796,20 @@ COUNT(*) DROP EVENT IF EXISTS event_Bug12546938; DROP TABLE table_bug12546938; SET GLOBAL EVENT_SCHEDULER = OFF; +DROP DATABASE IF EXISTS event_test11764334; +CREATE DATABASE event_test11764334; +USE event_test11764334; +CREATE EVENT ev1 ON SCHEDULE EVERY 3 SECOND DISABLE DO SELECT 1; +SHOW EVENTS IN event_test11764334 WHERE NAME='ev1'; +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation +event_test11764334 ev1 root@localhost SYSTEM RECURRING NULL 3 SECOND 2011-06-09 19:59:01 NULL DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci +ALTER EVENT ev1 ON SCHEDULE EVERY 4 SECOND; +SHOW EVENTS IN event_test11764334 WHERE NAME='ev1'; +Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation +event_test11764334 ev1 root@localhost SYSTEM RECURRING NULL 4 SECOND 2011-06-09 19:59:01 NULL DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci +DROP EVENT ev1; +DROP DATABASE event_test11764334; +USE test; DROP DATABASE events_test; SET GLOBAL event_scheduler= 'ON'; SET @@global.concurrent_insert= @concurrent_insert; diff --git a/mysql-test/t/events_bugs.test b/mysql-test/t/events_bugs.test index 4601448763c..3617a96c008 100644 --- a/mysql-test/t/events_bugs.test +++ b/mysql-test/t/events_bugs.test @@ -1286,6 +1286,21 @@ DROP EVENT IF EXISTS event_Bug12546938; DROP TABLE table_bug12546938; SET GLOBAL EVENT_SCHEDULER = OFF; +# +# Bug#11764334 - 57156: ALTER EVENT CHANGES THE EVENT STATUS +# +--disable_warnings +DROP DATABASE IF EXISTS event_test11764334; +--enable_warnings +CREATE DATABASE event_test11764334; +USE event_test11764334; +CREATE EVENT ev1 ON SCHEDULE EVERY 3 SECOND DISABLE DO SELECT 1; +SHOW EVENTS IN event_test11764334 WHERE NAME='ev1'; +ALTER EVENT ev1 ON SCHEDULE EVERY 4 SECOND; +SHOW EVENTS IN event_test11764334 WHERE NAME='ev1'; +DROP EVENT ev1; +DROP DATABASE event_test11764334; +USE test; ########################################################################### # # End of tests diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 9efd3bca675..50339165b20 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -226,9 +226,16 @@ mysql_event_fill_row(THD *thd, if (fields[f_num= ET_FIELD_NAME]->store(et->name.str, et->name.length, scs)) goto err_truncate; - /* both ON_COMPLETION and STATUS are NOT NULL thus not calling set_notnull()*/ + /* ON_COMPLETION field is NOT NULL thus not calling set_notnull()*/ rs|= fields[ET_FIELD_ON_COMPLETION]->store((longlong)et->on_completion, TRUE); - rs|= fields[ET_FIELD_STATUS]->store((longlong)et->status, TRUE); + + /* + Set STATUS value unconditionally in case of CREATE EVENT. + For ALTER EVENT set it only if value of this field was changed. + Since STATUS field is NOT NULL call to set_notnull() is not needed. + */ + if (!is_update || et->status_changed) + rs|= fields[ET_FIELD_STATUS]->store((longlong)et->status, TRUE); rs|= fields[ET_FIELD_ORIGINATOR]->store((longlong)et->originator, TRUE); /* @@ -694,8 +701,6 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, if (mysql_event_fill_row(thd, table, parse_data, sp, saved_mode, FALSE)) goto end; - table->field[ET_FIELD_STATUS]->store((longlong)parse_data->status, TRUE); - if ((ret= table->file->ha_write_row(table->record[0]))) { table->file->print_error(ret, MYF(0)); diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc index 86905b38627..fb5c69ea2c5 100644 --- a/sql/event_parse_data.cc +++ b/sql/event_parse_data.cc @@ -46,9 +46,8 @@ Event_parse_data::new_instance(THD *thd) Event_parse_data::Event_parse_data() :on_completion(Event_parse_data::ON_COMPLETION_DEFAULT), - status(Event_parse_data::ENABLED), - do_not_create(FALSE), - body_changed(FALSE), + status(Event_parse_data::ENABLED), status_changed(false), + do_not_create(FALSE), body_changed(FALSE), item_starts(NULL), item_ends(NULL), item_execute_at(NULL), starts_null(TRUE), ends_null(TRUE), execute_at_null(TRUE), item_expression(NULL), expression(0) @@ -140,6 +139,7 @@ Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc) else if (status == Event_parse_data::ENABLED) { status= Event_parse_data::DISABLED; + status_changed= true; push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_EVENT_EXEC_TIME_IN_THE_PAST, ER(ER_EVENT_EXEC_TIME_IN_THE_PAST)); @@ -569,7 +569,10 @@ void Event_parse_data::check_originator_id(THD *thd) DBUG_PRINT("info", ("Invoked object status set to SLAVESIDE_DISABLED.")); if ((status == Event_parse_data::ENABLED) || (status == Event_parse_data::DISABLED)) - status = Event_parse_data::SLAVESIDE_DISABLED; + { + status= Event_parse_data::SLAVESIDE_DISABLED; + status_changed= true; + } originator = thd->server_id; } else diff --git a/sql/event_parse_data.h b/sql/event_parse_data.h index 8b42eb23937..6ab2812f8b9 100644 --- a/sql/event_parse_data.h +++ b/sql/event_parse_data.h @@ -49,6 +49,7 @@ public: int on_completion; int status; + bool status_changed; longlong originator; /* do_not_create will be set if STARTS time is in the past and diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 4e24e69af42..3cd36be944f 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -2024,16 +2024,19 @@ opt_ev_status: | ENABLE_SYM { Lex->event_parse_data->status= Event_parse_data::ENABLED; + Lex->event_parse_data->status_changed= true; $$= 1; } | DISABLE_SYM ON SLAVE { Lex->event_parse_data->status= Event_parse_data::SLAVESIDE_DISABLED; + Lex->event_parse_data->status_changed= true; $$= 1; } | DISABLE_SYM { Lex->event_parse_data->status= Event_parse_data::DISABLED; + Lex->event_parse_data->status_changed= true; $$= 1; } ; -- cgit v1.2.1 From 53e4a8520493a33916c7c8079dcc4c09898ee4e4 Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 10 Jun 2011 01:05:10 +0700 Subject: Follow-up for patch of bug#11764334. --- mysql-test/r/events_bugs.result | 4 ++-- mysql-test/t/events_bugs.test | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/events_bugs.result b/mysql-test/r/events_bugs.result index 73cb4d58cdc..8a34a1366b9 100644 --- a/mysql-test/r/events_bugs.result +++ b/mysql-test/r/events_bugs.result @@ -802,11 +802,11 @@ USE event_test11764334; CREATE EVENT ev1 ON SCHEDULE EVERY 3 SECOND DISABLE DO SELECT 1; SHOW EVENTS IN event_test11764334 WHERE NAME='ev1'; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation -event_test11764334 ev1 root@localhost SYSTEM RECURRING NULL 3 SECOND 2011-06-09 19:59:01 NULL DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci +event_test11764334 ev1 root@localhost SYSTEM RECURRING NULL 3 SECOND # # DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci ALTER EVENT ev1 ON SCHEDULE EVERY 4 SECOND; SHOW EVENTS IN event_test11764334 WHERE NAME='ev1'; Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation -event_test11764334 ev1 root@localhost SYSTEM RECURRING NULL 4 SECOND 2011-06-09 19:59:01 NULL DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci +event_test11764334 ev1 root@localhost SYSTEM RECURRING NULL 4 SECOND # # DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci DROP EVENT ev1; DROP DATABASE event_test11764334; USE test; diff --git a/mysql-test/t/events_bugs.test b/mysql-test/t/events_bugs.test index 3617a96c008..a57235d744b 100644 --- a/mysql-test/t/events_bugs.test +++ b/mysql-test/t/events_bugs.test @@ -1295,8 +1295,10 @@ DROP DATABASE IF EXISTS event_test11764334; CREATE DATABASE event_test11764334; USE event_test11764334; CREATE EVENT ev1 ON SCHEDULE EVERY 3 SECOND DISABLE DO SELECT 1; +--replace_column 9 # 10 # SHOW EVENTS IN event_test11764334 WHERE NAME='ev1'; ALTER EVENT ev1 ON SCHEDULE EVERY 4 SECOND; +--replace_column 9 # 10 # SHOW EVENTS IN event_test11764334 WHERE NAME='ev1'; DROP EVENT ev1; DROP DATABASE event_test11764334; -- cgit v1.2.1 From 4412b5dab64be1c1c69ec6f5941809189545787b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 9 Jun 2011 21:50:41 +0300 Subject: Disable a debug assertion that was added to track down Bug#12612184. row_build(): The record may contain null BLOB pointers when the server is rolling back an insert that was interrupted by a server crash. --- storage/innobase/row/row0row.c | 6 +++++- storage/innodb_plugin/row/row0row.c | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c index f30965d32bb..247f19e097c 100644 --- a/storage/innobase/row/row0row.c +++ b/storage/innobase/row/row0row.c @@ -210,7 +210,11 @@ row_build( ut_ad(rec_offs_validate(rec, index, offsets)); } -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#if 0/* defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG*/ + /* This one can fail in trx_rollback_or_clean_all_without_sess() + if the server crashed during an insert before the + btr_store_big_rec_extern_fields() did mtr_commit() + all BLOB pointers to the clustered index record. */ ut_a(!rec_offs_any_null_extern(rec, offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ diff --git a/storage/innodb_plugin/row/row0row.c b/storage/innodb_plugin/row/row0row.c index 7260855858f..80b449a416b 100644 --- a/storage/innodb_plugin/row/row0row.c +++ b/storage/innodb_plugin/row/row0row.c @@ -231,7 +231,11 @@ row_build( ut_ad(rec_offs_validate(rec, index, offsets)); } -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#if 0 /* defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG */ + /* This one can fail in trx_rollback_active() if + the server crashed during an insert before the + btr_store_big_rec_extern_fields() did mtr_commit() + all BLOB pointers to the clustered index record. */ ut_a(!rec_offs_any_null_extern(rec, offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ -- cgit v1.2.1 From 1fea8c1b9070018be12f9c748fec781322944d7c Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 10 Jun 2011 10:52:39 +0700 Subject: Fixed bug#11753738 (formely known as bug#45235) - 5.1 DOES NOT SUPPORT 5.0-ONLY SYNTAX TRIGGERS IN ANY WAY Table with triggers which were using deprecated (5.0-only) syntax became unavailable for any DML and DDL after upgrade to 5.1 version of server. Attempt to execute any statement on such a table resulted in parsing error reported. Since this included DROP TRIGGER and DROP TABLE statements (actually, the latter was allowed but was not functioning properly for such tables) it was impossible to fix the problem without manual operations on .TRG and .TRN files in data directory. The problem was that failure to parse trigger body (due to 5.0-only syntax) when opening trigger file for a table prevented the table from being open. This made all operations on the table impossible (except DROP TABLE which due to peculiarity in its implementation dropped the table but left trigger files around). This patch solves this problem by silencing error which occurs when we parse trigger body during table open. Error message is preserved for the future use and table is marked as having a broken trigger. We also try to analyze parse tree to recover trigger name, which will be needed in order to drop the broken trigger. DML statements which invoke triggers on the table marked as having broken trigger are prohibited and emit saved error message. The same happens for DDL which change triggers except DROP TRIGGER and DROP TABLE which try their best to do what was requested. Table becomes no longer marked as having broken trigger when last such trigger is dropped. mysql-test/r/trigger-compat.result: Add results for test case for bug#45235 mysql-test/t/trigger-compat.test: Add test case for bug#45235. sql/sp_head.cc: Added protection against MEM_ROOT double restoring to sp_head::restore_thd_mem_root() method. Since this method can be sometimes called twice during parsing of stored routine (the first time during normal flow of parsing, and the second time when a syntax error is detected) we need to shortcut execution of the method to avoid damaging MEM_ROOT by the second consecutive call to this method. sql/sql_trigger.cc: Added error handler Deprecated_trigger_syntax_handler to catch non-OOM errors during parsing of trigger body. Added handling of parse errors into method Table_triggers_list::check_n_load(). sql/sql_trigger.h: Added new members to handle broken triggers and error messages. --- mysql-test/r/trigger-compat.result | 98 +++++++++++++++++++++ mysql-test/r/trigger.result | 4 +- mysql-test/t/trigger-compat.test | 174 +++++++++++++++++++++++++++++++++++++ sql/sp_head.cc | 15 ++++ sql/sql_parse.cc | 8 +- sql/sql_trigger.cc | 148 ++++++++++++++++++++++++++++--- sql/sql_trigger.h | 35 +++++++- 7 files changed, 464 insertions(+), 18 deletions(-) diff --git a/mysql-test/r/trigger-compat.result b/mysql-test/r/trigger-compat.result index 2bcd919e0db..d456ce8253f 100644 --- a/mysql-test/r/trigger-compat.result +++ b/mysql-test/r/trigger-compat.result @@ -43,3 +43,101 @@ DROP TABLE t2; DROP USER mysqltest_dfn@localhost; DROP USER mysqltest_inv@localhost; DROP DATABASE mysqltest_db1; +USE test; +# +# Bug#45235: 5.1 does not support 5.0-only syntax triggers in any way +# +DROP TABLE IF EXISTS t1, t2, t3; +CREATE TABLE t1 ( a INT ); +CREATE TABLE t2 ( a INT ); +CREATE TABLE t3 ( a INT ); +INSERT INTO t1 VALUES (1), (2), (3); +INSERT INTO t2 VALUES (1), (2), (3); +INSERT INTO t3 VALUES (1), (2), (3); +# We simulate importing a trigger from 5.0 by writing a .TRN file for +# each trigger plus a .TRG file the way MySQL 5.0 would have done it, +# with syntax allowed in 5.0 only. +# +# Note that in 5.0 the following lines are missing from t1.TRG: +# +# client_cs_names='latin1' +# connection_cl_names='latin1_swedish_ci' +# db_cl_names='latin1_swedish_ci' +# We will get parse errors for most DDL and DML statements when the table +# has broken triggers. The parse error refers to the first broken +# trigger. +CREATE TRIGGER tr16 AFTER UPDATE ON t1 FOR EACH ROW INSERT INTO t1 VALUES (1); +ERROR 42000: Trigger 'tr13' has an error in its body: 'You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'a USING t1 a' at line 1' +CREATE TRIGGER tr22 BEFORE INSERT ON t2 FOR EACH ROW DELETE FROM non_existing_table; +ERROR 42000: Unknown trigger has an error in its body: 'You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'Not allowed syntax here, and trigger name cant be extracted either.' at line 1' +SHOW TRIGGERS; +Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation +tr11 INSERT t1 DELETE FROM t3 BEFORE NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci +tr12 INSERT t1 DELETE FROM t3 AFTER NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci +tr14 DELETE t1 DELETE FROM non_existing_table AFTER NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci +Warnings: +Warning 1603 Triggers for table `test`.`t1` have no creation context +Warning 1603 Triggers for table `test`.`t2` have no creation context +INSERT INTO t1 VALUES (1); +ERROR 42000: Trigger 'tr13' has an error in its body: 'You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'a USING t1 a' at line 1' +INSERT INTO t2 VALUES (1); +ERROR 42000: Unknown trigger has an error in its body: 'You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'Not allowed syntax here, and trigger name cant be extracted either.' at line 1' +DELETE FROM t1; +ERROR 42000: Trigger 'tr13' has an error in its body: 'You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'a USING t1 a' at line 1' +UPDATE t1 SET a = 1 WHERE a = 1; +ERROR 42000: Trigger 'tr13' has an error in its body: 'You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'a USING t1 a' at line 1' +SELECT * FROM t1; +a +1 +2 +3 +RENAME TABLE t1 TO t1_2; +ERROR 42000: Trigger 'tr13' has an error in its body: 'You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'a USING t1 a' at line 1' +SHOW TRIGGERS; +Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation +tr11 INSERT t1 DELETE FROM t3 BEFORE NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci +tr12 INSERT t1 DELETE FROM t3 AFTER NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci +tr14 DELETE t1 DELETE FROM non_existing_table AFTER NULL root@localhost latin1 latin1_swedish_ci latin1_swedish_ci +Warnings: +Warning 1603 Triggers for table `test`.`t1` have no creation context +DROP TRIGGER tr11; +Warnings: +Warning 1603 Triggers for table `test`.`t1` have no creation context +DROP TRIGGER tr12; +DROP TRIGGER tr13; +DROP TRIGGER tr14; +DROP TRIGGER tr15; +SHOW TRIGGERS; +Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation +# Make sure there is no trigger file left. +# We write the same trigger files one more time to test DROP TABLE. +DROP TABLE t1; +Warnings: +Warning 1603 Triggers for table `test`.`t1` have no creation context +DROP TABLE t2; +Warnings: +Warning 1603 Triggers for table `test`.`t2` have no creation context +DROP TABLE t3; +# Make sure there is no trigger file left. +CREATE TABLE t1 ( a INT ); +CREATE TABLE t2 ( a INT ); +INSERT INTO t1 VALUES (1), (2), (3); +INSERT INTO t2 VALUES (1), (2), (3); +# We write three trigger files. First trigger is syntaxically incorrect, next trigger is correct +# and last trigger is broken. +# Next we try to execute SHOW CREATE TRGGIR command for broken trigger and then try to drop one. +FLUSH TABLE t1; +SHOW CREATE TRIGGER tr12; +Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation +tr12 CREATE DEFINER=`root`@`localhost` TRIGGER tr12 BEFORE INSERT ON t1 FOR EACH ROW DELETE FROM t2 latin1 latin1_swedish_ci latin1_swedish_ci +Warnings: +Warning 1603 Triggers for table `test`.`t1` have no creation context +SHOW CREATE TRIGGER tr11; +Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation +tr11 CREATE DEFINER=`root`@`localhost` TRIGGER tr11 BEFORE DELETE ON t1 FOR EACH ROW DELETE FROM t1 a USING t1 a latin1 latin1_swedish_ci latin1_swedish_ci +DROP TRIGGER tr12; +Warnings: +Warning 1603 Triggers for table `test`.`t1` have no creation context +DROP TRIGGER tr11; +DROP TABLE t1; +DROP TABLE t2; diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result index 16b165cdc11..3d7ef5f34bd 100644 --- a/mysql-test/r/trigger.result +++ b/mysql-test/r/trigger.result @@ -2134,10 +2134,8 @@ CREATE TRIGGER trg1 BEFORE INSERT ON t2 FOR EACH ROW INSERT/*!INTO*/t1 VALUES (1 # Used to crash SHOW TRIGGERS IN db1; Trigger Event Table Statement Timing Created sql_mode Definer character_set_client collation_connection Database Collation -Warnings: -Warning 1064 You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'VALUES (1)' at line 1 INSERT INTO t2 VALUES (1); -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'VALUES (1)' at line 1 +ERROR 42000: Trigger 'trg1' has an error in its body: 'You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'VALUES (1)' at line 1' SELECT * FROM t1; b # Work around Bug#45235 diff --git a/mysql-test/t/trigger-compat.test b/mysql-test/t/trigger-compat.test index 2cc86692d0d..216e64f8543 100644 --- a/mysql-test/t/trigger-compat.test +++ b/mysql-test/t/trigger-compat.test @@ -106,4 +106,178 @@ DROP TABLE t2; DROP USER mysqltest_dfn@localhost; DROP USER mysqltest_inv@localhost; DROP DATABASE mysqltest_db1; +USE test; + +--echo # +--echo # Bug#45235: 5.1 does not support 5.0-only syntax triggers in any way +--echo # +let $MYSQLD_DATADIR=`SELECT @@datadir`; + +--disable_warnings +DROP TABLE IF EXISTS t1, t2, t3; +--enable_warnings + +CREATE TABLE t1 ( a INT ); +CREATE TABLE t2 ( a INT ); +CREATE TABLE t3 ( a INT ); +INSERT INTO t1 VALUES (1), (2), (3); +INSERT INTO t2 VALUES (1), (2), (3); +INSERT INTO t3 VALUES (1), (2), (3); + +--echo # We simulate importing a trigger from 5.0 by writing a .TRN file for +--echo # each trigger plus a .TRG file the way MySQL 5.0 would have done it, +--echo # with syntax allowed in 5.0 only. +--echo # +--echo # Note that in 5.0 the following lines are missing from t1.TRG: +--echo # +--echo # client_cs_names='latin1' +--echo # connection_cl_names='latin1_swedish_ci' +--echo # db_cl_names='latin1_swedish_ci' + +--write_file $MYSQLD_DATADIR/test/tr11.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/tr12.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/tr13.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/tr14.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/tr15.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/t1.TRG +TYPE=TRIGGERS +triggers='CREATE DEFINER=`root`@`localhost` TRIGGER tr11 BEFORE INSERT ON t1 FOR EACH ROW DELETE FROM t3' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr12 AFTER INSERT ON t1 FOR EACH ROW DELETE FROM t3' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr13 BEFORE DELETE ON t1 FOR EACH ROW DELETE FROM t1 a USING t1 a' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr14 AFTER DELETE ON t1 FOR EACH ROW DELETE FROM non_existing_table' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr15 BEFORE UPDATE ON t1 FOR EACH ROW DELETE FROM non_existing_table a USING non_existing_table a' +sql_modes=0 0 0 0 0 +definers='root@localhost' 'root@localhost' 'root@localhost' 'root@localhost' 'root@localhost' +EOF + +--write_file $MYSQLD_DATADIR/test/t2.TRG +TYPE=TRIGGERS +triggers='Not allowed syntax here, and trigger name cant be extracted either.' +sql_modes=0 +definers='root@localhost' +EOF + +--echo # We will get parse errors for most DDL and DML statements when the table +--echo # has broken triggers. The parse error refers to the first broken +--echo # trigger. +--error ER_PARSE_ERROR +CREATE TRIGGER tr16 AFTER UPDATE ON t1 FOR EACH ROW INSERT INTO t1 VALUES (1); +--error ER_PARSE_ERROR +CREATE TRIGGER tr22 BEFORE INSERT ON t2 FOR EACH ROW DELETE FROM non_existing_table; +SHOW TRIGGERS; +--error ER_PARSE_ERROR +INSERT INTO t1 VALUES (1); +--error ER_PARSE_ERROR +INSERT INTO t2 VALUES (1); +--error ER_PARSE_ERROR +DELETE FROM t1; +--error ER_PARSE_ERROR +UPDATE t1 SET a = 1 WHERE a = 1; +SELECT * FROM t1; +--error ER_PARSE_ERROR +RENAME TABLE t1 TO t1_2; +SHOW TRIGGERS; + +DROP TRIGGER tr11; +DROP TRIGGER tr12; +DROP TRIGGER tr13; +DROP TRIGGER tr14; +DROP TRIGGER tr15; + +SHOW TRIGGERS; + +--echo # Make sure there is no trigger file left. +--list_files $MYSQLD_DATADIR/test/ tr* + +--echo # We write the same trigger files one more time to test DROP TABLE. +--write_file $MYSQLD_DATADIR/test/tr11.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/tr12.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/tr13.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/tr14.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/tr15.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/t1.TRG +TYPE=TRIGGERS +triggers='CREATE DEFINER=`root`@`localhost` TRIGGER tr11 BEFORE INSERT ON t1 FOR EACH ROW DELETE FROM t3' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr12 AFTER INSERT ON t1 FOR EACH ROW DELETE FROM t3' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr13 BEFORE DELETE ON t1 FOR EACH ROW DELETE FROM t1 a USING t1 a' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr14 AFTER DELETE ON t1 FOR EACH ROW DELETE FROM non_existing_table' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr15 BEFORE UPDATE ON t1 FOR EACH ROW DELETE FROM non_existing_table a USING non_existing_table a' +sql_modes=0 0 0 0 0 +definers='root@localhost' 'root@localhost' 'root@localhost' 'root@localhost' 'root@localhost' +EOF + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; + +--echo # Make sure there is no trigger file left. + +--list_files $MYSQLD_DATADIR/test/ tr* + +CREATE TABLE t1 ( a INT ); +CREATE TABLE t2 ( a INT ); +INSERT INTO t1 VALUES (1), (2), (3); +INSERT INTO t2 VALUES (1), (2), (3); + +--echo # We write three trigger files. First trigger is syntaxically incorrect, next trigger is correct +--echo # and last trigger is broken. +--echo # Next we try to execute SHOW CREATE TRGGIR command for broken trigger and then try to drop one. +--write_file $MYSQLD_DATADIR/test/tr11.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/tr12.TRN +TYPE=TRIGGERNAME +trigger_table=t1 +EOF + +--write_file $MYSQLD_DATADIR/test/t1.TRG +TYPE=TRIGGERS +triggers='CREATE the wrongest trigger_in_the_world' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr11 BEFORE DELETE ON t1 FOR EACH ROW DELETE FROM t1 a USING t1 a' 'CREATE DEFINER=`root`@`localhost` TRIGGER tr12 BEFORE INSERT ON t1 FOR EACH ROW DELETE FROM t2' +sql_modes=0 0 0 +definers='root@localhost' 'root@localhost' 'root@localhost' +EOF + +FLUSH TABLE t1; + +SHOW CREATE TRIGGER tr12; +SHOW CREATE TRIGGER tr11; +DROP TRIGGER tr12; +DROP TRIGGER tr11; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/sql/sp_head.cc b/sql/sp_head.cc index a4dd51d8a4a..e5a06566a7d 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -2354,6 +2354,21 @@ void sp_head::restore_thd_mem_root(THD *thd) { DBUG_ENTER("sp_head::restore_thd_mem_root"); + + /* + In some cases our parser detects a syntax error and calls + LEX::cleanup_lex_after_parse_error() method only after + finishing parsing the whole routine. In such a situation + sp_head::restore_thd_mem_root() will be called twice - the + first time as part of normal parsing process and the second + time by cleanup_lex_after_parse_error(). + To avoid ruining active arena/mem_root state in this case we + skip restoration of old arena/mem_root if this method has been + already called for this routine. + */ + if (!m_thd) + DBUG_VOID_RETURN; + Item *flist= free_list; // The old list set_query_arena(thd); // Get new free_list and mem_root state= INITIALIZED_FOR_SP; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ecc43f54fa5..b336a06e519 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -7972,10 +7972,14 @@ bool parse_sql(THD *thd, bool mysql_parse_status= MYSQLparse(thd) != 0; - /* Check that if MYSQLparse() failed, thd->is_error() is set. */ + /* + Check that if MYSQLparse() failed, thd->is_error() is set (unless + we have an error handler installed, which might have silenced error). + */ DBUG_ASSERT(!mysql_parse_status || - (mysql_parse_status && thd->is_error())); + (mysql_parse_status && thd->is_error()) || + (mysql_parse_status && thd->get_internal_handler())); /* Reset parser state. */ diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 9348feaa22a..89ee411c8da 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -19,6 +19,7 @@ #include "sp_head.h" #include "sql_trigger.h" #include "parse_file.h" +#include /*************************************************************************/ @@ -292,6 +293,52 @@ private: }; +/** + An error handler that catches all non-OOM errors which can occur during + parsing of trigger body. Such errors are ignored and corresponding error + message is used to construct a more verbose error message which contains + name of problematic trigger. This error message is later emitted when + one tries to perform DML or some of DDL on this table. + Also, if possible, grabs name of the trigger being parsed so it can be + used to correctly drop problematic trigger. +*/ +class Deprecated_trigger_syntax_handler : public Internal_error_handler +{ +private: + + char m_message[MYSQL_ERRMSG_SIZE]; + LEX_STRING *m_trigger_name; + +public: + + Deprecated_trigger_syntax_handler() : m_trigger_name(NULL) {} + + virtual bool handle_error(uint sql_errno, const char *message, + MYSQL_ERROR::enum_warning_level level, THD *thd) + { + if (sql_errno != EE_OUTOFMEMORY && + sql_errno != ER_OUT_OF_RESOURCES) + { + if(thd->lex->spname) + m_trigger_name= &thd->lex->spname->m_name; + if (m_trigger_name) + my_snprintf(m_message, sizeof(m_message), + "Trigger '%s' has an error in its body: '%s'", + m_trigger_name->str, message); + else + my_snprintf(m_message, sizeof(m_message), + "Unknown trigger has an error in its body: '%s'", + message); + return true; + } + return false; + } + + LEX_STRING *get_trigger_name() { return m_trigger_name; } + char *get_error_message() { return m_message; } +}; + + /** Create or drop trigger for table. @@ -575,6 +622,8 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, LEX_STRING *trg_connection_cl_name; LEX_STRING *trg_db_cl_name; + if (check_for_broken_triggers()) + return true; /* Trigger must be in the same schema as target table. */ if (my_strcasecmp(table_alias_charset, table->s->db.str, @@ -848,7 +897,7 @@ static bool rm_trigger_file(char *path, const char *db, @param path char buffer of size FN_REFLEN to be used for constructing path to .TRN file. @param db trigger's database name - @param table_name trigger's name + @param trigger_name trigger's name @retval False success @@ -1312,12 +1361,11 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, lex_start(thd); thd->spcont= NULL; - if (parse_sql(thd, & parser_state, creation_ctx)) - { - /* Currently sphead is always deleted in case of a parse error */ - DBUG_ASSERT(lex.sphead == 0); - goto err_with_lex_cleanup; - } + Deprecated_trigger_syntax_handler error_handler; + thd->push_internal_handler(&error_handler); + bool parse_error= parse_sql(thd, & parser_state, creation_ctx); + thd->pop_internal_handler(); + /* Not strictly necessary to invoke this method here, since we know that we've parsed CREATE TRIGGER and not an @@ -1328,6 +1376,52 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, */ lex.set_trg_event_type_for_tables(); + if (parse_error) + { + if (!triggers->m_has_unparseable_trigger) + triggers->set_parse_error_message(error_handler.get_error_message()); + /* Currently sphead is always set to NULL in case of a parse error */ + DBUG_ASSERT(lex.sphead == 0); + if (error_handler.get_trigger_name()) + { + LEX_STRING *trigger_name; + const LEX_STRING *orig_trigger_name= error_handler.get_trigger_name(); + + if (!(trigger_name= alloc_lex_string(&table->mem_root)) || + !(trigger_name->str= strmake_root(&table->mem_root, + orig_trigger_name->str, + orig_trigger_name->length))) + goto err_with_lex_cleanup; + + trigger_name->length= orig_trigger_name->length; + + if (triggers->names_list.push_back(trigger_name, + &table->mem_root)) + goto err_with_lex_cleanup; + } + else + { + /* + The Table_triggers_list is not constructed as a list of + trigger objects as one would expect, but rather of lists of + properties of equal length. Thus, even if we don't get the + trigger name, we still fill all in all the lists with + placeholders as we might otherwise create a skew in the + lists. Obviously, this has to be refactored. + */ + LEX_STRING *empty= alloc_lex_string(&table->mem_root); + if (!empty) + goto err_with_lex_cleanup; + + empty->str= const_cast(""); + empty->length= 0; + if (triggers->names_list.push_back(empty, &table->mem_root)) + goto err_with_lex_cleanup; + } + lex_end(&lex); + continue; + } + lex.sphead->set_info(0, 0, &lex.sp_chistics, (ulong) *trg_sql_mode); int event= lex.trg_chistics.event; @@ -1368,8 +1462,8 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, if (triggers->names_list.push_back(&lex.sphead->m_name, &table->mem_root)) - goto err_with_lex_cleanup; - + goto err_with_lex_cleanup; + if (!(on_table_name= alloc_lex_string(&table->mem_root))) goto err_with_lex_cleanup; @@ -1394,9 +1488,8 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, char fname[NAME_LEN + 1]; DBUG_ASSERT((!my_strcasecmp(table_alias_charset, lex.query_tables->db, db) || (check_n_cut_mysql50_prefix(db, fname, sizeof(fname)) && - !my_strcasecmp(table_alias_charset, lex.query_tables->db, fname))) && - (!my_strcasecmp(table_alias_charset, lex.query_tables->table_name, - table_name) || + !my_strcasecmp(table_alias_charset, lex.query_tables->db, fname)))); + DBUG_ASSERT((!my_strcasecmp(table_alias_charset, lex.query_tables->table_name, table_name) || (check_n_cut_mysql50_prefix(table_name, fname, sizeof(fname)) && !my_strcasecmp(table_alias_charset, lex.query_tables->table_name, fname)))); #endif @@ -1680,6 +1773,13 @@ bool Table_triggers_list::drop_all_triggers(THD *thd, char *db, char *name) while ((trigger= it_name++)) { + /* + Trigger, which body we failed to parse during call + Table_triggers_list::check_n_load(), might be missing name. + Such triggers have zero-length name and are skipped here. + */ + if (trigger->length == 0) + continue; if (rm_trigname_file(path, db, trigger->str)) { /* @@ -1903,6 +2003,11 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db, } if (table.triggers) { + if (table.triggers->check_for_broken_triggers()) + { + result= 1; + goto end; + } LEX_STRING old_table_name= { (char *) old_table, strlen(old_table) }; LEX_STRING new_table_name= { (char *) new_table, strlen(new_table) }; /* @@ -1991,6 +2096,9 @@ bool Table_triggers_list::process_triggers(THD *thd, sp_head *sp_trigger= bodies[event][time_type]; SELECT_LEX *save_current_select; + if (check_for_broken_triggers()) + return true; + if (sp_trigger == NULL) return FALSE; @@ -2069,6 +2177,22 @@ void Table_triggers_list::mark_fields_used(trg_event_type event) } +/** + Signals to the Table_triggers_list that a parse error has occured when + reading a trigger from file. This makes the Table_triggers_list enter an + error state flagged by m_has_unparseable_trigger == true. The error message + will be used whenever a statement invoking or manipulating triggers is + issued against the Table_triggers_list's table. + + @param error_message The error message thrown by the parser. + */ +void Table_triggers_list::set_parse_error_message(char *error_message) +{ + m_has_unparseable_trigger= true; + strcpy(m_parse_error_message, error_message); +} + + /** Trigger BUG#14090 compatibility hook. diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index f6754a75284..c077d9567f8 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -62,6 +62,27 @@ class Table_triggers_list: public Sql_alloc */ GRANT_INFO subject_table_grants[TRG_EVENT_MAX][TRG_ACTION_MAX]; + /** + This flag indicates that one of the triggers was not parsed successfully, + and as a precaution the object has entered a state where all trigger + access results in errors until all such triggers are dropped. It is not + safe to add triggers since we don't know if the broken trigger has the + same name or event type. Nor is it safe to invoke any trigger for the + aforementioned reasons. The only safe operations are drop_trigger and + drop_all_triggers. + + @see Table_triggers_list::set_parse_error + */ + bool m_has_unparseable_trigger; + + /** + This error will be displayed when the user tries to manipulate or invoke + triggers on a table that has broken triggers. It will get set only once + per statement and thus will contain the first parse error encountered in + the trigger file. + */ + char m_parse_error_message[MYSQL_ERRMSG_SIZE]; + public: /** Field responsible for storing triggers definitions in file. @@ -84,7 +105,7 @@ public: /* End of character ser context. */ Table_triggers_list(TABLE *table_arg): - record1_field(0), trigger_table(table_arg) + record1_field(0), trigger_table(table_arg), m_has_unparseable_trigger(false) { bzero((char *)bodies, sizeof(bodies)); bzero((char *)trigger_fields, sizeof(trigger_fields)); @@ -140,6 +161,8 @@ public: void mark_fields_used(trg_event_type event); + void set_parse_error_message(char *error_message); + friend class Item_trigger_field; friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex, TABLE_LIST *table); @@ -155,6 +178,16 @@ private: const char *new_db_name, LEX_STRING *old_table_name, LEX_STRING *new_table_name); + + bool check_for_broken_triggers() + { + if (m_has_unparseable_trigger) + { + my_message(ER_PARSE_ERROR, m_parse_error_message, MYF(0)); + return true; + } + return false; + } }; extern const LEX_STRING trg_action_time_type_names[]; -- cgit v1.2.1 From 9cfba6a7e129d398150d4f700946477ecb165530 Mon Sep 17 00:00:00 2001 From: Karen Langford Date: Fri, 10 Jun 2011 16:37:11 +0200 Subject: Raise version number after cloning 5.1.58 --- configure.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.in b/configure.in index 901515fecb6..0948fe1349d 100644 --- a/configure.in +++ b/configure.in @@ -12,7 +12,7 @@ dnl dnl When changing the major version number please also check the switch dnl statement in mysqlbinlog::check_master_version(). You may also need dnl to update version.c in ndb. -AC_INIT([MySQL Server], [5.1.58], [], [mysql]) +AC_INIT([MySQL Server], [5.1.59], [], [mysql]) AC_CONFIG_SRCDIR([sql/mysqld.cc]) AC_CANONICAL_SYSTEM -- cgit v1.2.1 From 93823c525020e02109746dcdb5c2449658fa82e9 Mon Sep 17 00:00:00 2001 From: Karen Langford Date: Fri, 10 Jun 2011 18:12:18 +0200 Subject: increase version number to 5.0.95 --- configure.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.in b/configure.in index fdfb7eae871..a06f726c738 100644 --- a/configure.in +++ b/configure.in @@ -7,7 +7,7 @@ AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # The Docs Makefile.am parses this line! # remember to also change ndb version below and update version.c in ndb -AM_INIT_AUTOMAKE(mysql, 5.0.94) +AM_INIT_AUTOMAKE(mysql, 5.0.95) AM_CONFIG_HEADER([include/config.h:config.h.in]) PROTOCOL_VERSION=10 @@ -23,7 +23,7 @@ NDB_SHARED_LIB_VERSION=$NDB_SHARED_LIB_MAJOR_VERSION:0:0 # ndb version NDB_VERSION_MAJOR=5 NDB_VERSION_MINOR=0 -NDB_VERSION_BUILD=94 +NDB_VERSION_BUILD=95 NDB_VERSION_STATUS="" # Set all version vars based on $VERSION. How do we do this more elegant ? -- cgit v1.2.1 From 98d527d3cb89b5e7f8e14d4e5ccf43b03b6f6e25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 14 Jun 2011 08:40:32 +0300 Subject: Merge a fix from mysql-5.5 to mysql-5.1: revno 2995.37.209 revision id marko.makela@oracle.com-20110518120508-qhn7vz814vn77v5k parent marko.makela@oracle.com-20110517121555-lmple24qzxqkzep4 timestamp: Wed 2011-05-18 15:05:08 +0300 message: Fix a bogus UNIV_SYNC_DEBUG failure in the fix of Bug #59641 or Oracle Bug #11766513. trx_undo_free_prepared(): Do not acquire or release trx->rseg->mutex. This code is invoked in the single-threaded part of shutdown, therefore a mutex is not needed. --- storage/innodb_plugin/trx/trx0undo.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/storage/innodb_plugin/trx/trx0undo.c b/storage/innodb_plugin/trx/trx0undo.c index 68ff82f618c..7f03b68fb55 100644 --- a/storage/innodb_plugin/trx/trx0undo.c +++ b/storage/innodb_plugin/trx/trx0undo.c @@ -1986,8 +1986,6 @@ trx_undo_free_prepared( /*===================*/ trx_t* trx) /*!< in/out: PREPARED transaction */ { - mutex_enter(&trx->rseg->mutex); - ut_ad(srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS); if (trx->update_undo) { @@ -2002,6 +2000,5 @@ trx_undo_free_prepared( trx->insert_undo); trx_undo_mem_free(trx->insert_undo); } - mutex_exit(&trx->rseg->mutex); } #endif /* !UNIV_HOTBACKUP */ -- cgit v1.2.1 From a8629376994c1365923ac9144354d7cd99d8288a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 15 Jun 2011 10:16:59 +0300 Subject: Introduce UNIV_BLOB_NULL_DEBUG for temporarily hiding Bug#12650861. Some ut_a(!rec_offs_any_null_extern()) assertion failures are indicating genuine BLOB bugs, others are bogus failures when rolling back incomplete transactions at crash recovery. This needs more work, and until I get a chance to work on it, other testing must not be disrupted by this. --- storage/innobase/btr/btr0cur.c | 4 ++-- storage/innobase/include/rem0rec.h | 4 ++-- storage/innobase/include/rem0rec.ic | 4 ++-- storage/innobase/include/univ.i | 4 ++-- storage/innobase/row/row0row.c | 8 ++++---- storage/innobase/row/row0vers.c | 16 ++++++++-------- storage/innobase/trx/trx0rec.c | 4 ++-- storage/innodb_plugin/btr/btr0cur.c | 4 ++-- storage/innodb_plugin/include/rem0rec.h | 4 ++-- storage/innodb_plugin/include/rem0rec.ic | 4 ++-- storage/innodb_plugin/include/univ.i | 2 ++ storage/innodb_plugin/row/row0row.c | 8 ++++---- storage/innodb_plugin/row/row0vers.c | 16 ++++++++-------- storage/innodb_plugin/trx/trx0rec.c | 4 ++-- 14 files changed, 44 insertions(+), 42 deletions(-) diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c index 1d17aa998f6..d5bed3bec99 100644 --- a/storage/innobase/btr/btr0cur.c +++ b/storage/innobase/btr/btr0cur.c @@ -1592,9 +1592,9 @@ btr_cur_optimistic_update( heap = mem_heap_create(1024); offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ #ifdef UNIV_DEBUG if (btr_cur_print_record_ops && thr) { diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h index 67baeb7d8d2..a1a206c3281 100644 --- a/storage/innobase/include/rem0rec.h +++ b/storage/innobase/include/rem0rec.h @@ -339,7 +339,7 @@ rec_offs_any_extern( /*================*/ /* out: TRUE if a field is stored externally */ const ulint* offsets);/* in: array returned by rec_get_offsets() */ -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG /******************************************************** Determine if the offsets are for a record containing null BLOB pointers. */ UNIV_INLINE @@ -351,7 +351,7 @@ rec_offs_any_null_extern( or NULL if none found */ rec_t* rec, /*!< in: record */ const ulint* offsets); /*!< in: rec_get_offsets(rec) */ -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ /*************************************************************** Sets the value of the ith field extern storage bit. */ UNIV_INLINE diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic index 566c62e30f2..9e659f12881 100644 --- a/storage/innobase/include/rem0rec.ic +++ b/storage/innobase/include/rem0rec.ic @@ -1021,7 +1021,7 @@ rec_offs_any_extern( return(FALSE); } -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG /******************************************************** Determine if the offsets are for a record containing null BLOB pointers. */ UNIV_INLINE @@ -1055,7 +1055,7 @@ rec_offs_any_null_extern( return(NULL); } -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ /*************************************************************** Sets the value of the ith field extern storage bit. */ diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index a67b1b3895e..8eb78491b04 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -88,8 +88,8 @@ memory is read outside the allocated blocks. */ #if 0 #define UNIV_DEBUG_VALGRIND /* Enable extra Valgrind instrumentation */ -#define UNIV_BLOB_LIGHT_DEBUG /* Enable off-page column - debugging without UNIV_DEBUG */ +#define UNIV_BLOB_NULL_DEBUG /* Enable deep off-page + column debugging */ #define UNIV_DEBUG /* Enable ut_ad() assertions */ #define UNIV_LIST_DEBUG /* debug UT_LIST_ macros */ #define UNIV_MEM_DEBUG /* detect memory leaks etc */ diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c index 247f19e097c..b9efdcfbfdd 100644 --- a/storage/innobase/row/row0row.c +++ b/storage/innobase/row/row0row.c @@ -210,13 +210,13 @@ row_build( ut_ad(rec_offs_validate(rec, index, offsets)); } -#if 0/* defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG*/ +#if 0 && defined UNIV_BLOB_NULL_DEBUG /* This one can fail in trx_rollback_or_clean_all_without_sess() if the server crashed during an insert before the btr_store_big_rec_extern_fields() did mtr_commit() all BLOB pointers to the clustered index record. */ ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* 0 && UNIV_BLOB_NULL_DEBUG */ if (type != ROW_COPY_POINTERS) { /* Take a copy of rec to heap */ @@ -310,10 +310,10 @@ row_rec_to_index_entry( rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, offsets); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG } else { ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ } rec_len = rec_offs_n_fields(offsets); diff --git a/storage/innobase/row/row0vers.c b/storage/innobase/row/row0vers.c index 906b46fb51b..a52ef3cc083 100644 --- a/storage/innobase/row/row0vers.c +++ b/storage/innobase/row/row0vers.c @@ -473,10 +473,10 @@ row_vers_build_for_consistent_read( /* The view already sees this version: we can copy it to in_heap and return */ -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#if defined UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern( version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); @@ -511,9 +511,9 @@ row_vers_build_for_consistent_read( *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ trx_id = row_get_rec_trx_id(prev_version, index, *offsets); @@ -615,9 +615,9 @@ row_vers_build_for_semi_consistent_read( /* We found a version that belongs to a committed transaction: return it. */ -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ if (rec == version) { *old_vers = rec; @@ -676,9 +676,9 @@ row_vers_build_for_semi_consistent_read( version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ }/* for (;;) */ if (heap) { diff --git a/storage/innobase/trx/trx0rec.c b/storage/innobase/trx/trx0rec.c index 730ac6a6f60..2a9224b0a72 100644 --- a/storage/innobase/trx/trx0rec.c +++ b/storage/innobase/trx/trx0rec.c @@ -1397,9 +1397,9 @@ trx_undo_prev_version_build( return(DB_ERROR); } -# if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +# ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(rec, offsets)); -# endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +# endif /* UNIV_BLOB_NULL_DEBUG */ if (row_upd_changes_field_size_or_external(index, offsets, update)) { ulint* ext_vect; diff --git a/storage/innodb_plugin/btr/btr0cur.c b/storage/innodb_plugin/btr/btr0cur.c index ea59fe9d025..84ba0b99e58 100644 --- a/storage/innodb_plugin/btr/btr0cur.c +++ b/storage/innodb_plugin/btr/btr0cur.c @@ -1871,9 +1871,9 @@ btr_cur_optimistic_update( heap = mem_heap_create(1024); offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ #ifdef UNIV_DEBUG if (btr_cur_print_record_ops && thr) { diff --git a/storage/innodb_plugin/include/rem0rec.h b/storage/innodb_plugin/include/rem0rec.h index 06de23be757..fff44eecb00 100644 --- a/storage/innodb_plugin/include/rem0rec.h +++ b/storage/innodb_plugin/include/rem0rec.h @@ -480,7 +480,7 @@ ulint rec_offs_any_extern( /*================*/ const ulint* offsets);/*!< in: array returned by rec_get_offsets() */ -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG /******************************************************//** Determine if the offsets are for a record containing null BLOB pointers. @return first field containing a null BLOB pointer, or NULL if none found */ @@ -491,7 +491,7 @@ rec_offs_any_null_extern( const rec_t* rec, /*!< in: record */ const ulint* offsets) /*!< in: rec_get_offsets(rec) */ __attribute__((nonnull, warn_unused_result)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ /******************************************************//** Returns nonzero if the extern bit is set in nth field of rec. @return nonzero if externally stored */ diff --git a/storage/innodb_plugin/include/rem0rec.ic b/storage/innodb_plugin/include/rem0rec.ic index 7cff36fee6c..252484a7433 100644 --- a/storage/innodb_plugin/include/rem0rec.ic +++ b/storage/innodb_plugin/include/rem0rec.ic @@ -1088,7 +1088,7 @@ rec_offs_any_extern( return(UNIV_UNLIKELY(*rec_offs_base(offsets) & REC_OFFS_EXTERNAL)); } -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG /******************************************************//** Determine if the offsets are for a record containing null BLOB pointers. @return first field containing a null BLOB pointer, or NULL if none found */ @@ -1124,7 +1124,7 @@ rec_offs_any_null_extern( return(NULL); } -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ /******************************************************//** Returns nonzero if the extern bit is set in nth field of rec. diff --git a/storage/innodb_plugin/include/univ.i b/storage/innodb_plugin/include/univ.i index 6ac227a59a6..63d8e7140b5 100644 --- a/storage/innodb_plugin/include/univ.i +++ b/storage/innodb_plugin/include/univ.i @@ -179,6 +179,8 @@ command. Not tested on Windows. */ debugging without UNIV_DEBUG */ #define UNIV_BLOB_LIGHT_DEBUG /* Enable off-page column debugging without UNIV_DEBUG */ +#define UNIV_BLOB_NULL_DEBUG /* Enable deep off-page + column debugging */ #define UNIV_DEBUG /* Enable ut_ad() assertions and disable UNIV_INLINE */ #define UNIV_DEBUG_LOCK_VALIDATE /* Enable diff --git a/storage/innodb_plugin/row/row0row.c b/storage/innodb_plugin/row/row0row.c index 80b449a416b..8aba375d046 100644 --- a/storage/innodb_plugin/row/row0row.c +++ b/storage/innodb_plugin/row/row0row.c @@ -231,13 +231,13 @@ row_build( ut_ad(rec_offs_validate(rec, index, offsets)); } -#if 0 /* defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG */ +#if 0 && defined UNIV_BLOB_NULL_DEBUG /* This one can fail in trx_rollback_active() if the server crashed during an insert before the btr_store_big_rec_extern_fields() did mtr_commit() all BLOB pointers to the clustered index record. */ ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* 0 && UNIV_BLOB_NULL_DEBUG */ if (type != ROW_COPY_POINTERS) { /* Take a copy of rec to heap */ @@ -423,10 +423,10 @@ row_rec_to_index_entry( rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, offsets); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG } else { ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ } entry = row_rec_to_index_entry_low(rec, index, offsets, n_ext, heap); diff --git a/storage/innodb_plugin/row/row0vers.c b/storage/innodb_plugin/row/row0vers.c index 8a7bb842293..2d39f92d18f 100644 --- a/storage/innodb_plugin/row/row0vers.c +++ b/storage/innodb_plugin/row/row0vers.c @@ -550,10 +550,10 @@ row_vers_build_for_consistent_read( /* The view already sees this version: we can copy it to in_heap and return */ -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern( version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); @@ -588,9 +588,9 @@ row_vers_build_for_consistent_read( *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ trx_id = row_get_rec_trx_id(prev_version, index, *offsets); @@ -691,9 +691,9 @@ row_vers_build_for_semi_consistent_read( /* We found a version that belongs to a committed transaction: return it. */ -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ if (rec == version) { *old_vers = rec; @@ -752,9 +752,9 @@ row_vers_build_for_semi_consistent_read( version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +#ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +#endif /* UNIV_BLOB_NULL_DEBUG */ }/* for (;;) */ if (heap) { diff --git a/storage/innodb_plugin/trx/trx0rec.c b/storage/innodb_plugin/trx/trx0rec.c index 9f2fd59d82b..297838365d5 100644 --- a/storage/innodb_plugin/trx/trx0rec.c +++ b/storage/innodb_plugin/trx/trx0rec.c @@ -1577,9 +1577,9 @@ trx_undo_prev_version_build( return(DB_ERROR); } -# if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +# ifdef UNIV_BLOB_NULL_DEBUG ut_a(!rec_offs_any_null_extern(rec, offsets)); -# endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ +# endif /* UNIV_BLOB_NULL_DEBUG */ if (row_upd_changes_field_size_or_external(index, offsets, update)) { ulint n_ext; -- cgit v1.2.1 From 5b4ceba58d1c9c35e0cba1f126290009bd7643ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 16 Jun 2011 10:27:21 +0300 Subject: Bug#12612184 Race condition after btr_cur_pessimistic_update() btr_cur_compress_if_useful(), btr_compress(): Add the parameter ibool adjust. If adjust=TRUE, adjust the cursor position after compressing the page. btr_lift_page_up(): Return a pointer to the father page. BTR_KEEP_POS_FLAG: A new flag for btr_cur_pessimistic_update(). btr_cur_pessimistic_update(): If *big_rec != NULL and flags & BTR_KEEP_POS_FLAG, keep the cursor positioned on the updated record. Also, do not release the index tree x-lock if *big_rec != NULL. btr_cur_mtr_commit_and_start(): Commits and restarts a mini-transaction so that it will retain an x-lock on index->lock and the page of the cursor. This is invoked when btr_cur_pessimistic_update() returns *big_rec != NULL. In all callers of btr_cur_pessimistic_update() that do not pass BTR_KEEP_POS_FLAG, assert that *big_rec == NULL. btr_cur_compress(): Unused function [in the built-in MySQL 5.1], remove. page_rec_get_nth(): Return the nth record on the page (an inverse function of page_rec_get_n_recs_before()). Refactored from page_get_middle_rec(). page_get_middle_rec(): Invoke page_rec_get_nth(). page_cur_insert_rec_zip_reorg(): Make use of the page directory shortcuts in page_rec_get_nth() instead of scanning the whole list of records. row_ins_clust_index_entry_by_modify(): Pass BTR_KEEP_POS_FLAG to btr_cur_pessimistic_update(). row_ins_index_entry_low(): If row_ins_clust_index_entry_by_modify() returns a big_rec, invoke btr_cur_mtr_commit_and_start() in order to commit and start the mini-transaction without releasing the x-locks on index->lock and the cursor page, and write the big_rec. Releasing the page latch in mtr_commit() caused a race condition. row_upd_clust_rec(): Pass BTR_KEEP_POS_FLAG to btr_cur_pessimistic_update(). If it returns a big_rec, invoke btr_cur_mtr_commit_and_start() in order to commit and start the mini-transaction without releasing the x-locks on index->lock and the cursor page, and write the big_rec. Releasing the page latch in mtr_commit() caused a race condition. sync_thread_add_level(): Add the parameter ibool relock. When TRUE, bypass the latching order rules. rw_lock_add_debug_info(): For nested X-lock requests, pass relock=TRUE to sync_thread_add_level(). rb:678 approved by Jimmy Yang --- storage/innobase/btr/btr0btr.c | 40 ++++++++--- storage/innobase/btr/btr0cur.c | 106 ++++++++++++++++++++--------- storage/innobase/include/btr0btr.h | 12 ++-- storage/innobase/include/btr0cur.h | 39 ++++++----- storage/innobase/include/buf0buf.h | 19 ++++++ storage/innobase/include/buf0buf.ic | 2 +- storage/innobase/include/page0page.h | 20 ++++-- storage/innobase/include/page0page.ic | 16 +++++ storage/innobase/include/sync0sync.h | 3 +- storage/innobase/page/page0page.c | 31 ++++----- storage/innobase/row/row0ins.c | 44 ++++++++++-- storage/innobase/row/row0umod.c | 2 + storage/innobase/row/row0upd.c | 34 ++++++--- storage/innobase/sync/sync0rw.c | 4 +- storage/innobase/sync/sync0sync.c | 10 ++- storage/innodb_plugin/ChangeLog | 10 +++ storage/innodb_plugin/btr/btr0btr.c | 52 ++++++++++---- storage/innodb_plugin/btr/btr0cur.c | 80 +++++++++++++++++++--- storage/innodb_plugin/include/btr0btr.h | 15 ++-- storage/innodb_plugin/include/btr0cur.h | 26 +++++-- storage/innodb_plugin/include/btr0cur.ic | 4 +- storage/innodb_plugin/include/buf0buf.h | 27 +++++++- storage/innodb_plugin/include/buf0buf.ic | 17 +---- storage/innodb_plugin/include/page0cur.ic | 5 +- storage/innodb_plugin/include/page0page.h | 39 +++++++++-- storage/innodb_plugin/include/page0page.ic | 32 ++++++++- storage/innodb_plugin/include/sync0rw.ic | 10 +-- storage/innodb_plugin/include/sync0sync.h | 6 +- storage/innodb_plugin/page/page0cur.c | 17 +++-- storage/innodb_plugin/page/page0page.c | 52 +++++++------- storage/innodb_plugin/row/row0ins.c | 48 +++++++++++-- storage/innodb_plugin/row/row0upd.c | 35 +++++++--- storage/innodb_plugin/sync/sync0rw.c | 4 +- storage/innodb_plugin/sync/sync0sync.c | 12 +++- 34 files changed, 650 insertions(+), 223 deletions(-) diff --git a/storage/innobase/btr/btr0btr.c b/storage/innobase/btr/btr0btr.c index 9438277050d..41e0bf6e067 100644 --- a/storage/innobase/btr/btr0btr.c +++ b/storage/innobase/btr/btr0btr.c @@ -1937,7 +1937,7 @@ btr_node_ptr_delete( ut_a(err == DB_SUCCESS); if (!compressed) { - btr_cur_compress_if_useful(&cursor, mtr); + btr_cur_compress_if_useful(&cursor, FALSE, mtr); } } @@ -1945,9 +1945,10 @@ btr_node_ptr_delete( If page is the only on its level, this function moves its records to the father page, thus reducing the tree height. */ static -void +page_t* btr_lift_page_up( /*=============*/ + /* out: father page */ dict_index_t* index, /* in: index tree */ page_t* page, /* in: page which is the only on its level; must not be empty: use @@ -2023,6 +2024,8 @@ btr_lift_page_up( ibuf_reset_free_bits(index, father_page); ut_ad(page_validate(father_page, index)); ut_ad(btr_check_node_ptr(index, father_page, mtr)); + + return(father_page); } /***************************************************************** @@ -2039,11 +2042,13 @@ enough free extents so that the compression will always succeed if done! */ void btr_compress( /*=========*/ - btr_cur_t* cursor, /* in: cursor on the page to merge or lift; - the page must not be empty: in record delete - use btr_discard_page if the page would become - empty */ - mtr_t* mtr) /* in: mtr */ + btr_cur_t* cursor, /* in/out: cursor on the page to merge + or lift; the page must not be empty: + when deleting records, use btr_discard_page() + if the page would become empty */ + ibool adjust, /* in: TRUE if should adjust the + cursor position even if compression occurs */ + mtr_t* mtr) /* in/out: mini-transaction */ { dict_index_t* index; ulint space; @@ -2058,6 +2063,7 @@ btr_compress( rec_t* node_ptr; ulint data_size; ulint n_recs; + ulint nth_rec; ulint max_ins_size; ulint max_ins_size_reorg; ulint comp; @@ -2065,6 +2071,7 @@ btr_compress( page = btr_cur_get_page(cursor); index = btr_cur_get_index(cursor); comp = page_is_comp(page); + ut_a((ibool)!!comp == dict_table_is_comp(index->table)); ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), @@ -2086,6 +2093,10 @@ btr_compress( father_page = buf_frame_align(node_ptr); ut_a(comp == page_is_comp(father_page)); + if (adjust) { + nth_rec = page_rec_get_n_recs_before(btr_cur_get_rec(cursor)); + } + /* Decide the page to which we try to merge and which will inherit the locks */ @@ -2110,9 +2121,8 @@ btr_compress( } else { /* The page is the only one on the level, lift the records to the father */ - btr_lift_page_up(index, page, mtr); - - return; + merge_page = btr_lift_page_up(index, page, mtr); + goto func_exit; } n_recs = page_get_n_recs(page); @@ -2188,6 +2198,10 @@ btr_compress( index, mtr); lock_update_merge_left(merge_page, orig_pred, page); + + if (adjust) { + nth_rec += page_rec_get_n_recs_before(orig_pred); + } } else { orig_succ = page_rec_get_next( page_get_infimum_rec(merge_page)); @@ -2208,6 +2222,12 @@ btr_compress( btr_page_free(index, page, mtr); ut_ad(btr_check_node_ptr(index, merge_page, mtr)); + +func_exit: + if (adjust) { + btr_cur_position(index, page_rec_get_nth(merge_page, nth_rec), + cursor); + } } /***************************************************************** diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c index d5bed3bec99..7bdf87f2793 100644 --- a/storage/innobase/btr/btr0cur.c +++ b/storage/innobase/btr/btr0cur.c @@ -1791,7 +1791,9 @@ btr_cur_pessimistic_update( /* out: DB_SUCCESS or error code */ ulint flags, /* in: undo logging, locking, and rollback flags */ - btr_cur_t* cursor, /* in: cursor on the record to update */ + btr_cur_t* cursor, /* in/out: cursor on the record to update; + cursor may become invalid if *big_rec == NULL + || !(flags & BTR_KEEP_POS_FLAG) */ big_rec_t** big_rec,/* out: big rec vector whose fields have to be stored externally by the caller, or NULL */ upd_t* update, /* in: update vector; this is allowed also @@ -1926,6 +1928,10 @@ btr_cur_pessimistic_update( err = DB_TOO_BIG_RECORD; goto return_after_reservations; } + + ut_ad(index->type & DICT_CLUSTERED); + ut_ad(btr_page_get_level(page, mtr) == 0); + ut_ad(flags & BTR_KEEP_POS_FLAG); } page_cursor = btr_cur_get_page_cur(cursor); @@ -1952,6 +1958,8 @@ btr_cur_pessimistic_update( ut_a(rec || optim_err != DB_UNDERFLOW); if (rec) { + page_cursor->rec = rec; + lock_rec_restore_from_page_infimum(rec, page); rec_set_field_extern_bits(rec, index, ext_vect, n_ext_vect, mtr); @@ -1965,12 +1973,30 @@ btr_cur_pessimistic_update( btr_cur_unmark_extern_fields(rec, mtr, offsets); } - btr_cur_compress_if_useful(cursor, mtr); + btr_cur_compress_if_useful( + cursor, + big_rec_vec != NULL && (flags & BTR_KEEP_POS_FLAG), + mtr); err = DB_SUCCESS; goto return_after_reservations; } + if (big_rec_vec) { + ut_ad(index->type & DICT_CLUSTERED); + ut_ad(btr_page_get_level(page, mtr) == 0); + ut_ad(flags & BTR_KEEP_POS_FLAG); + + /* btr_page_split_and_insert() in + btr_cur_pessimistic_insert() invokes + mtr_memo_release(mtr, index->lock, MTR_MEMO_X_LOCK). + We must keep the index->lock when we created a + big_rec, so that row_upd_clust_rec() can store the + big_rec in the same mini-transaction. */ + + mtr_x_lock(dict_index_get_lock(index), mtr); + } + if (page_cur_is_before_first(page_cursor)) { /* The record to be updated was positioned as the first user record on its page */ @@ -1991,6 +2017,7 @@ btr_cur_pessimistic_update( ut_a(rec); ut_a(err == DB_SUCCESS); ut_a(dummy_big_rec == NULL); + page_cursor->rec = rec; rec_set_field_extern_bits(rec, index, ext_vect, n_ext_vect, mtr); offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); @@ -2025,6 +2052,43 @@ return_after_reservations: return(err); } +/***************************************************************** +Commits and restarts a mini-transaction so that it will retain an +x-lock on index->lock and the cursor page. */ + +void +btr_cur_mtr_commit_and_start( +/*=========================*/ + btr_cur_t* cursor, /* in: cursor */ + mtr_t* mtr) /* in/out: mini-transaction */ +{ + buf_block_t* block; + + block = buf_block_align(btr_cur_get_rec(cursor)); + + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(cursor->index), + MTR_MEMO_X_LOCK)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + /* Keep the locks across the mtr_commit(mtr). */ + rw_lock_x_lock(dict_index_get_lock(cursor->index)); + rw_lock_x_lock(&block->lock); + mutex_enter(&block->mutex); +#ifdef UNIV_SYNC_DEBUG + buf_block_buf_fix_inc_debug(block, __FILE__, __LINE__); +#else + buf_block_buf_fix_inc(block); +#endif + mutex_exit(&block->mutex); + /* Write out the redo log. */ + mtr_commit(mtr); + mtr_start(mtr); + /* Reassociate the locks with the mini-transaction. + They will be released on mtr_commit(mtr). */ + mtr_memo_push(mtr, dict_index_get_lock(cursor->index), + MTR_MEMO_X_LOCK); + mtr_memo_push(mtr, block, MTR_MEMO_PAGE_X_FIX); +} + /*==================== B-TREE DELETE MARK AND UNMARK ===============*/ /******************************************************************** @@ -2392,30 +2456,6 @@ btr_cur_del_unmark_for_ibuf( /*==================== B-TREE RECORD REMOVE =========================*/ -/***************************************************************** -Tries to compress a page of the tree on the leaf level. It is assumed -that mtr holds an x-latch on the tree and on the cursor page. To avoid -deadlocks, mtr must also own x-latches to brothers of page, if those -brothers exist. NOTE: it is assumed that the caller has reserved enough -free extents so that the compression will always succeed if done! */ - -void -btr_cur_compress( -/*=============*/ - btr_cur_t* cursor, /* in: cursor on the page to compress; - cursor does not stay valid */ - mtr_t* mtr) /* in: mtr */ -{ - ut_ad(mtr_memo_contains(mtr, - dict_index_get_lock(btr_cur_get_index(cursor)), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(mtr, buf_block_align(btr_cur_get_rec(cursor)), - MTR_MEMO_PAGE_X_FIX)); - ut_ad(btr_page_get_level(btr_cur_get_page(cursor), mtr) == 0); - - btr_compress(cursor, mtr); -} - /***************************************************************** Tries to compress a page of the tree if it seems useful. It is assumed that mtr holds an x-latch on the tree and on the cursor page. To avoid @@ -2427,10 +2467,12 @@ ibool btr_cur_compress_if_useful( /*=======================*/ /* out: TRUE if compression occurred */ - btr_cur_t* cursor, /* in: cursor on the page to compress; - cursor does not stay valid if compression - occurs */ - mtr_t* mtr) /* in: mtr */ + btr_cur_t* cursor, /* in/out: cursor on the page to compress; + cursor does not stay valid if !adjust and + compression occurs */ + ibool adjust, /* in: TRUE if should adjust the + cursor position even if compression occurs */ + mtr_t* mtr) /* in/out: mini-transaction */ { ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(btr_cur_get_index(cursor)), @@ -2440,7 +2482,7 @@ btr_cur_compress_if_useful( if (btr_cur_compress_recommendation(cursor, mtr)) { - btr_compress(cursor, mtr); + btr_compress(cursor, adjust, mtr); return(TRUE); } @@ -2653,7 +2695,7 @@ return_after_reservations: mem_heap_free(heap); if (ret == FALSE) { - ret = btr_cur_compress_if_useful(cursor, mtr); + ret = btr_cur_compress_if_useful(cursor, FALSE, mtr); } if (n_extents > 0) { diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h index 1573de7e818..269fa355558 100644 --- a/storage/innobase/include/btr0btr.h +++ b/storage/innobase/include/btr0btr.h @@ -312,11 +312,13 @@ enough free extents so that the compression will always succeed if done! */ void btr_compress( /*=========*/ - btr_cur_t* cursor, /* in: cursor on the page to merge or lift; - the page must not be empty: in record delete - use btr_discard_page if the page would become - empty */ - mtr_t* mtr); /* in: mtr */ + btr_cur_t* cursor, /* in/out: cursor on the page to merge + or lift; the page must not be empty: + when deleting records, use btr_discard_page() + if the page would become empty */ + ibool adjust, /* in: TRUE if should adjust the + cursor position even if compression occurs */ + mtr_t* mtr); /* in/out: mini-transaction */ /***************************************************************** Discards a page from a B-tree. This is used to remove the last record from a B-tree page: the whole page must be removed at the same time. This cannot diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index 20235c55f22..c068d8d3318 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -23,6 +23,9 @@ Created 10/16/1994 Heikki Tuuri #define BTR_NO_LOCKING_FLAG 2 /* do no record lock checking */ #define BTR_KEEP_SYS_FLAG 4 /* sys fields will be found from the update vector or inserted entry */ +#define BTR_KEEP_POS_FLAG 8 /* btr_cur_pessimistic_update() + must keep cursor position when + moving columns to big_rec */ #define BTR_CUR_ADAPT #define BTR_CUR_HASH_ADAPT @@ -237,7 +240,9 @@ btr_cur_pessimistic_update( /* out: DB_SUCCESS or error code */ ulint flags, /* in: undo logging, locking, and rollback flags */ - btr_cur_t* cursor, /* in: cursor on the record to update */ + btr_cur_t* cursor, /* in/out: cursor on the record to update; + cursor may become invalid if *big_rec == NULL + || !(flags & BTR_KEEP_POS_FLAG) */ big_rec_t** big_rec,/* out: big rec vector whose fields have to be stored externally by the caller, or NULL */ upd_t* update, /* in: update vector; this is allowed also @@ -247,6 +252,15 @@ btr_cur_pessimistic_update( updates */ que_thr_t* thr, /* in: query thread */ mtr_t* mtr); /* in: mtr */ +/***************************************************************** +Commits and restarts a mini-transaction so that it will retain an +x-lock on index->lock and the cursor page. */ + +void +btr_cur_mtr_commit_and_start( +/*=========================*/ + btr_cur_t* cursor, /* in: cursor */ + mtr_t* mtr); /* in/out: mini-transaction */ /*************************************************************** Marks a clustered index record deleted. Writes an undo log record to undo log on this delete marking. Writes in the trx id field the id @@ -286,19 +300,6 @@ btr_cur_del_unmark_for_ibuf( rec_t* rec, /* in: record to delete unmark */ mtr_t* mtr); /* in: mtr */ /***************************************************************** -Tries to compress a page of the tree on the leaf level. It is assumed -that mtr holds an x-latch on the tree and on the cursor page. To avoid -deadlocks, mtr must also own x-latches to brothers of page, if those -brothers exist. NOTE: it is assumed that the caller has reserved enough -free extents so that the compression will always succeed if done! */ - -void -btr_cur_compress( -/*=============*/ - btr_cur_t* cursor, /* in: cursor on the page to compress; - cursor does not stay valid */ - mtr_t* mtr); /* in: mtr */ -/***************************************************************** Tries to compress a page of the tree if it seems useful. It is assumed that mtr holds an x-latch on the tree and on the cursor page. To avoid deadlocks, mtr must also own x-latches to brothers of page, if those @@ -309,10 +310,12 @@ ibool btr_cur_compress_if_useful( /*=======================*/ /* out: TRUE if compression occurred */ - btr_cur_t* cursor, /* in: cursor on the page to compress; - cursor does not stay valid if compression - occurs */ - mtr_t* mtr); /* in: mtr */ + btr_cur_t* cursor, /* in/out: cursor on the page to compress; + cursor does not stay valid if !adjust and + compression occurs */ + ibool adjust, /* in: TRUE if should adjust the + cursor position even if compression occurs */ + mtr_t* mtr); /* in/out: mini-transaction */ /*********************************************************** Removes the record on which the tree cursor is positioned. It is assumed that the mtr has an x-latch on the page where the cursor is positioned, diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 3e8972d9182..7479ce9cbf0 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -682,6 +682,25 @@ buf_page_address_fold( /* out: the folded value */ ulint space, /* in: space id */ ulint offset);/* in: offset of the page within space */ +#ifdef UNIV_SYNC_DEBUG +/*********************************************************************** +Increments the bufferfix count. */ +UNIV_INLINE +void +buf_block_buf_fix_inc_debug( +/*========================*/ + buf_block_t* block, /* in: block to bufferfix */ + const char* file __attribute__ ((unused)), /* in: file name */ + ulint line __attribute__ ((unused))); /* in: line */ +#else /* UNIV_SYNC_DEBUG */ +/*********************************************************************** +Increments the bufferfix count. */ +UNIV_INLINE +void +buf_block_buf_fix_inc( +/*==================*/ + buf_block_t* block); /* in: block to bufferfix */ +#endif /* UNIV_SYNC_DEBUG */ /********************************************************************** Returns the control block of a file page, NULL if not found. */ UNIV_INLINE diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic index 58c5fd9ef3d..f4d3619f73f 100644 --- a/storage/innobase/include/buf0buf.ic +++ b/storage/innobase/include/buf0buf.ic @@ -660,6 +660,6 @@ buf_page_dbg_add_level( ulint level __attribute__((unused))) /* in: latching order level */ { - sync_thread_add_level(&(buf_block_align(frame)->lock), level); + sync_thread_add_level(&(buf_block_align(frame)->lock), level, FALSE); } #endif /* UNIV_SYNC_DEBUG */ diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h index 273007c2778..24698557e77 100644 --- a/storage/innobase/include/page0page.h +++ b/storage/innobase/include/page0page.h @@ -234,10 +234,21 @@ page_get_supremum_rec( /*==================*/ /* out: the last record in record list */ page_t* page); /* in: page which must have record(s) */ -/**************************************************************** -Returns the middle record of record list. If there are an even number -of records in the list, returns the first record of upper half-list. */ +/************************************************************//** +Returns the nth record of the record list. +This is the inverse function of page_rec_get_n_recs_before(). */ +rec_t* +page_rec_get_nth( +/*=============*/ + /* out: nth record */ + page_t* page, /* in: page */ + ulint nth); /* in: nth record */ +/***************************************************************** +Returns the middle record of the records on the page. If there is an +even number of records in the list, returns the first record of the +upper half-list. */ +UNIV_INLINE rec_t* page_get_middle_rec( /*================*/ @@ -280,7 +291,8 @@ page_get_n_recs( page_t* page); /* in: index page */ /******************************************************************* Returns the number of records before the given record in chain. -The number includes infimum and supremum records. */ +The number includes infimum and supremum records. +This is the inverse function of page_rec_get_nth(). */ ulint page_rec_get_n_recs_before( diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic index d9e67f3eeeb..a019aa28515 100644 --- a/storage/innobase/include/page0page.ic +++ b/storage/innobase/include/page0page.ic @@ -340,6 +340,22 @@ page_rec_is_infimum( return(page_rec_is_infimum_low(page_offset(rec))); } +/***************************************************************** +Returns the middle record of the records on the page. If there is an +even number of records in the list, returns the first record of the +upper half-list. */ +UNIV_INLINE +rec_t* +page_get_middle_rec( +/*================*/ + /* out: middle record */ + page_t* page) /* in: page */ +{ + ulint middle = (page_get_n_recs(page) + 2) / 2; + + return(page_rec_get_nth(page, middle)); +} + /***************************************************************** Compares a data tuple to a physical record. Differs from the function cmp_dtuple_rec_with_match in the way that the record must reside on an diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h index 9430d4cb723..595dca0da6d 100644 --- a/storage/innobase/include/sync0sync.h +++ b/storage/innobase/include/sync0sync.h @@ -198,8 +198,9 @@ void sync_thread_add_level( /*==================*/ void* latch, /* in: pointer to a mutex or an rw-lock */ - ulint level); /* in: level in the latching order; if + ulint level, /* in: level in the latching order; if SYNC_LEVEL_VARYING, nothing is done */ + ibool relock);/* in: TRUE if re-entering an x-lock */ /********************************************************************** Removes a latch from the thread level array if it is found there. */ diff --git a/storage/innobase/page/page0page.c b/storage/innobase/page/page0page.c index 543cf9e34eb..6a89df7de22 100644 --- a/storage/innobase/page/page0page.c +++ b/storage/innobase/page/page0page.c @@ -1194,49 +1194,42 @@ page_dir_balance_slot( } /**************************************************************** -Returns the middle record of the record list. If there are an even number -of records in the list, returns the first record of the upper half-list. */ +Returns the nth record of the record list. */ rec_t* -page_get_middle_rec( -/*================*/ - /* out: middle record */ - page_t* page) /* in: page */ +page_rec_get_nth( +/*=============*/ + /* out: nth record */ + page_t* page, /* in: page */ + ulint nth) /* in: nth record */ { page_dir_slot_t* slot; - ulint middle; ulint i; ulint n_owned; - ulint count; rec_t* rec; - /* This many records we must leave behind */ - middle = (page_get_n_recs(page) + 2) / 2; - - count = 0; + ut_ad(nth < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1)); for (i = 0;; i++) { slot = page_dir_get_nth_slot(page, i); n_owned = page_dir_slot_get_n_owned(slot); - if (count + n_owned > middle) { + if (n_owned > nth) { break; } else { - count += n_owned; + nth -= n_owned; } } ut_ad(i > 0); slot = page_dir_get_nth_slot(page, i - 1); rec = page_dir_slot_get_rec(slot); - rec = page_rec_get_next(rec); - - /* There are now count records behind rec */ - for (i = 0; i < middle - count; i++) { + do { rec = page_rec_get_next(rec); - } + ut_ad(rec); + } while (nth--); return(rec); } diff --git a/storage/innobase/row/row0ins.c b/storage/innobase/row/row0ins.c index 9786f90fd39..7ff443a11ad 100644 --- a/storage/innobase/row/row0ins.c +++ b/storage/innobase/row/row0ins.c @@ -259,6 +259,7 @@ row_ins_sec_index_entry_by_modify( err = btr_cur_pessimistic_update(BTR_KEEP_SYS_FLAG, cursor, &dummy_big_rec, update, 0, thr, mtr); + ut_a(!dummy_big_rec); } func_exit: mem_heap_free(heap); @@ -329,8 +330,9 @@ row_ins_clust_index_entry_by_modify( goto func_exit; } - err = btr_cur_pessimistic_update(0, cursor, big_rec, update, - 0, thr, mtr); + err = btr_cur_pessimistic_update( + BTR_KEEP_POS_FLAG, cursor, big_rec, update, + 0, thr, mtr); } func_exit: mem_heap_free(heap); @@ -2083,6 +2085,41 @@ row_ins_index_entry_low( err = row_ins_clust_index_entry_by_modify( mode, &cursor, &big_rec, entry, ext_vec, n_ext_vec, thr, &mtr); + + if (big_rec) { + ut_a(err == DB_SUCCESS); + /* Write out the externally stored + columns while still x-latching + index->lock and block->lock. We have + to mtr_commit(mtr) first, so that the + redo log will be written in the + correct order. Otherwise, we would run + into trouble on crash recovery if mtr + freed B-tree pages on which some of + the big_rec fields will be written. */ + btr_cur_mtr_commit_and_start(&cursor, &mtr); + + rec = btr_cur_get_rec(&cursor); + offsets = rec_get_offsets(rec, index, offsets, + ULINT_UNDEFINED, + &heap); + + err = btr_store_big_rec_extern_fields( + index, rec, offsets, big_rec, &mtr); + /* If writing big_rec fails (for + example, because of DB_OUT_OF_FILE_SPACE), + the record will be corrupted. Even if + we did not update any externally + stored columns, our update could cause + the record to grow so that a + non-updated column was selected for + external storage. This non-update + would not have been written to the + undo log, and thus the record cannot + be rolled back. */ + ut_a(err == DB_SUCCESS); + goto stored_big_rec; + } } else { err = row_ins_sec_index_entry_by_modify( mode, &cursor, entry, thr, &mtr); @@ -2119,7 +2156,6 @@ function_exit: mtr_commit(&mtr); if (big_rec) { - rec_t* rec; mtr_start(&mtr); btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, @@ -2130,7 +2166,7 @@ function_exit: err = btr_store_big_rec_extern_fields(index, rec, offsets, big_rec, &mtr); - +stored_big_rec: if (modify) { dtuple_big_rec_free(big_rec); } else { diff --git a/storage/innobase/row/row0umod.c b/storage/innobase/row/row0umod.c index a3333fcc536..0b00aa2411a 100644 --- a/storage/innobase/row/row0umod.c +++ b/storage/innobase/row/row0umod.c @@ -119,6 +119,7 @@ row_undo_mod_clust_low( | BTR_KEEP_SYS_FLAG, btr_cur, &dummy_big_rec, node->update, node->cmpl_info, thr, mtr); + ut_ad(!dummy_big_rec); } return(err); @@ -471,6 +472,7 @@ row_undo_mod_del_unmark_sec_and_undo_update( BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG, btr_cur, &dummy_big_rec, update, 0, thr, &mtr); + ut_ad(!dummy_big_rec); } mem_heap_free(heap); diff --git a/storage/innobase/row/row0upd.c b/storage/innobase/row/row0upd.c index 0790cfe02e2..694b00ea265 100644 --- a/storage/innobase/row/row0upd.c +++ b/storage/innobase/row/row0upd.c @@ -1580,32 +1580,48 @@ row_upd_clust_rec( ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur), dict_table_is_comp(index->table))); - err = btr_cur_pessimistic_update(BTR_NO_LOCKING_FLAG, btr_cur, - &big_rec, node->update, - node->cmpl_info, thr, mtr); - mtr_commit(mtr); + err = btr_cur_pessimistic_update( + BTR_NO_LOCKING_FLAG | BTR_KEEP_POS_FLAG, btr_cur, + &big_rec, node->update, node->cmpl_info, thr, mtr); - if (err == DB_SUCCESS && big_rec) { + if (big_rec) { mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; rec_t* rec; *offsets_ = (sizeof offsets_) / sizeof *offsets_; - mtr_start(mtr); + ut_a(err == DB_SUCCESS); + /* Write out the externally stored columns while still + x-latching index->lock and block->lock. We have to + mtr_commit(mtr) first, so that the redo log will be + written in the correct order. Otherwise, we would run + into trouble on crash recovery if mtr freed B-tree + pages on which some of the big_rec fields will be + written. */ + btr_cur_mtr_commit_and_start(btr_cur, mtr); - ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr)); rec = btr_cur_get_rec(btr_cur); err = btr_store_big_rec_extern_fields( index, rec, rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &heap), - big_rec, mtr); + big_rec, mtr); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } - mtr_commit(mtr); + /* If writing big_rec fails (for example, because of + DB_OUT_OF_FILE_SPACE), the record will be corrupted. + Even if we did not update any externally stored + columns, our update could cause the record to grow so + that a non-updated column was selected for external + storage. This non-update would not have been written + to the undo log, and thus the record cannot be rolled + back. */ + ut_a(err == DB_SUCCESS); } + mtr_commit(mtr); + if (big_rec) { dtuple_big_rec_free(big_rec); } diff --git a/storage/innobase/sync/sync0rw.c b/storage/innobase/sync/sync0rw.c index ef4c07e8c26..089e87a8a5c 100644 --- a/storage/innobase/sync/sync0rw.c +++ b/storage/innobase/sync/sync0rw.c @@ -663,7 +663,9 @@ rw_lock_add_debug_info( rw_lock_debug_mutex_exit(); if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) { - sync_thread_add_level(lock, lock->level); + sync_thread_add_level(lock, lock->level, + lock_type == RW_LOCK_EX + && lock->writer_count > 1); } } diff --git a/storage/innobase/sync/sync0sync.c b/storage/innobase/sync/sync0sync.c index 944fd2a97fc..1099dff798e 100644 --- a/storage/innobase/sync/sync0sync.c +++ b/storage/innobase/sync/sync0sync.c @@ -641,7 +641,7 @@ mutex_set_debug_info( ut_ad(mutex); ut_ad(file_name); - sync_thread_add_level(mutex, mutex->level); + sync_thread_add_level(mutex, mutex->level, FALSE); mutex->file_name = file_name; mutex->line = line; @@ -1011,8 +1011,9 @@ void sync_thread_add_level( /*==================*/ void* latch, /* in: pointer to a mutex or an rw-lock */ - ulint level) /* in: level in the latching order; if + ulint level, /* in: level in the latching order; if SYNC_LEVEL_VARYING, nothing is done */ + ibool relock) /* in: TRUE if re-entering an x-lock */ { sync_level_t* array; sync_level_t* slot; @@ -1060,6 +1061,10 @@ sync_thread_add_level( array = thread_slot->levels; + if (relock) { + goto levels_ok; + } + /* NOTE that there is a problem with _NODE and _LEAF levels: if the B-tree height changes, then a leaf can change to an internal node or the other way around. We do not know at present if this can cause @@ -1209,6 +1214,7 @@ sync_thread_add_level( ut_error; } +levels_ok: for (i = 0; i < SYNC_THREAD_N_LEVELS; i++) { slot = sync_thread_levels_get_nth(array, i); diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index fa8b4727cb1..ef5f87172c6 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,13 @@ +2011-06-16 The InnoDB Team + + * btr/btr0btr.c, btr/btr0cur.c, include/btr0btr.h, include/btr0cur.h, + include/btr0cur.ic, include/buf0buf.h, include/buf0buf.ic, + include/page0cur.ic, include/page0page.h, include/page0page.ic, + include/sync0rw.ic, include/sync0sync.h, page/page0cur.c, + page/page0page.c, row/row0ins.c, row/row0upd.c, + sync/sync0rw.c, sync/sync0sync.c: + Fix Bug#12612184 Race condition after btr_cur_pessimistic_update() + 2011-06-09 The InnoDB Team * btr/btr0cur.c, include/rem0rec.h, include/rem0rec.ic, * row/row0row.c, row/row0vers.c, trx/trx0rec.c: diff --git a/storage/innodb_plugin/btr/btr0btr.c b/storage/innodb_plugin/btr/btr0btr.c index 46810c011c4..fb472062fe6 100644 --- a/storage/innodb_plugin/btr/btr0btr.c +++ b/storage/innodb_plugin/btr/btr0btr.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2272,7 +2272,7 @@ btr_attach_half_pages( /*==================*/ dict_index_t* index, /*!< in: the index tree */ buf_block_t* block, /*!< in/out: page to be split */ - rec_t* split_rec, /*!< in: first record on upper + const rec_t* split_rec, /*!< in: first record on upper half page */ buf_block_t* new_block, /*!< in/out: the new half page */ ulint direction, /*!< in: FSP_UP or FSP_DOWN */ @@ -2964,15 +2964,16 @@ btr_node_ptr_delete( ut_a(err == DB_SUCCESS); if (!compressed) { - btr_cur_compress_if_useful(&cursor, mtr); + btr_cur_compress_if_useful(&cursor, FALSE, mtr); } } /*************************************************************//** If page is the only on its level, this function moves its records to the -father page, thus reducing the tree height. */ +father page, thus reducing the tree height. +@return father block */ static -void +buf_block_t* btr_lift_page_up( /*=============*/ dict_index_t* index, /*!< in: index tree */ @@ -3089,6 +3090,8 @@ btr_lift_page_up( } ut_ad(page_validate(father_page, index)); ut_ad(btr_check_node_ptr(index, father_block, mtr)); + + return(father_block); } /*************************************************************//** @@ -3105,11 +3108,13 @@ UNIV_INTERN ibool btr_compress( /*=========*/ - btr_cur_t* cursor, /*!< in: cursor on the page to merge or lift; - the page must not be empty: in record delete - use btr_discard_page if the page would become - empty */ - mtr_t* mtr) /*!< in: mtr */ + btr_cur_t* cursor, /*!< in/out: cursor on the page to merge + or lift; the page must not be empty: + when deleting records, use btr_discard_page() + if the page would become empty */ + ibool adjust, /*!< in: TRUE if should adjust the + cursor position even if compression occurs */ + mtr_t* mtr) /*!< in/out: mini-transaction */ { dict_index_t* index; ulint space; @@ -3127,12 +3132,14 @@ btr_compress( ulint* offsets; ulint data_size; ulint n_recs; + ulint nth_rec; ulint max_ins_size; ulint max_ins_size_reorg; block = btr_cur_get_block(cursor); page = btr_cur_get_page(cursor); index = btr_cur_get_index(cursor); + ut_a((ibool) !!page_is_comp(page) == dict_table_is_comp(index->table)); ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), @@ -3153,6 +3160,10 @@ btr_compress( offsets = btr_page_get_father_block(NULL, heap, index, block, mtr, &father_cursor); + if (adjust) { + nth_rec = page_rec_get_n_recs_before(btr_cur_get_rec(cursor)); + } + /* Decide the page to which we try to merge and which will inherit the locks */ @@ -3179,9 +3190,9 @@ btr_compress( } else { /* The page is the only one on the level, lift the records to the father */ - btr_lift_page_up(index, block, mtr); - mem_heap_free(heap); - return(TRUE); + + merge_block = btr_lift_page_up(index, block, mtr); + goto func_exit; } n_recs = page_get_n_recs(page); @@ -3263,6 +3274,10 @@ err_exit: btr_node_ptr_delete(index, block, mtr); lock_update_merge_left(merge_block, orig_pred, block); + + if (adjust) { + nth_rec += page_rec_get_n_recs_before(orig_pred); + } } else { rec_t* orig_succ; #ifdef UNIV_BTR_DEBUG @@ -3327,7 +3342,6 @@ err_exit: } btr_blob_dbg_remove(page, index, "btr_compress"); - mem_heap_free(heap); if (!dict_index_is_clust(index) && page_is_leaf(merge_page)) { /* Update the free bits of the B-tree page in the @@ -3379,6 +3393,16 @@ err_exit: btr_page_free(index, block, mtr); ut_ad(btr_check_node_ptr(index, merge_block, mtr)); +func_exit: + mem_heap_free(heap); + + if (adjust) { + btr_cur_position( + index, + page_rec_get_nth(merge_block->frame, nth_rec), + merge_block, cursor); + } + return(TRUE); } diff --git a/storage/innodb_plugin/btr/btr0cur.c b/storage/innodb_plugin/btr/btr0cur.c index 84ba0b99e58..a551b4dfcb9 100644 --- a/storage/innodb_plugin/btr/btr0cur.c +++ b/storage/innodb_plugin/btr/btr0cur.c @@ -2088,7 +2088,9 @@ btr_cur_pessimistic_update( /*=======================*/ ulint flags, /*!< in: undo logging, locking, and rollback flags */ - btr_cur_t* cursor, /*!< in: cursor on the record to update */ + btr_cur_t* cursor, /*!< in/out: cursor on the record to update; + cursor may become invalid if *big_rec == NULL + || !(flags & BTR_KEEP_POS_FLAG) */ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to be stored externally by the caller, or NULL */ @@ -2227,7 +2229,7 @@ btr_cur_pessimistic_update( record to be inserted: we have to remember which fields were such */ ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec)); - offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, heap); + ut_ad(rec_offs_validate(rec, index, offsets)); n_ext += btr_push_update_extern_fields(new_entry, update, *heap); if (UNIV_LIKELY_NULL(page_zip)) { @@ -2250,6 +2252,10 @@ make_external: err = DB_TOO_BIG_RECORD; goto return_after_reservations; } + + ut_ad(page_is_leaf(page)); + ut_ad(dict_index_is_clust(index)); + ut_ad(flags & BTR_KEEP_POS_FLAG); } /* Store state of explicit locks on rec on the page infimum record, @@ -2277,6 +2283,8 @@ make_external: rec = btr_cur_insert_if_possible(cursor, new_entry, n_ext, mtr); if (rec) { + page_cursor->rec = rec; + lock_rec_restore_from_page_infimum(btr_cur_get_block(cursor), rec, block); @@ -2290,7 +2298,10 @@ make_external: rec, index, offsets, mtr); } - btr_cur_compress_if_useful(cursor, mtr); + btr_cur_compress_if_useful( + cursor, + big_rec_vec != NULL && (flags & BTR_KEEP_POS_FLAG), + mtr); if (page_zip && !dict_index_is_clust(index) && page_is_leaf(page)) { @@ -2310,6 +2321,21 @@ make_external: } } + if (big_rec_vec) { + ut_ad(page_is_leaf(page)); + ut_ad(dict_index_is_clust(index)); + ut_ad(flags & BTR_KEEP_POS_FLAG); + + /* btr_page_split_and_insert() in + btr_cur_pessimistic_insert() invokes + mtr_memo_release(mtr, index->lock, MTR_MEMO_X_LOCK). + We must keep the index->lock when we created a + big_rec, so that row_upd_clust_rec() can store the + big_rec in the same mini-transaction. */ + + mtr_x_lock(dict_index_get_lock(index), mtr); + } + /* Was the record to be updated positioned as the first user record on its page? */ was_first = page_cur_is_before_first(page_cursor); @@ -2325,6 +2351,7 @@ make_external: ut_a(rec); ut_a(err == DB_SUCCESS); ut_a(dummy_big_rec == NULL); + page_cursor->rec = rec; if (dict_index_is_sec_or_ibuf(index)) { /* Update PAGE_MAX_TRX_ID in the index page header. @@ -2383,6 +2410,39 @@ return_after_reservations: return(err); } +/**************************************************************//** +Commits and restarts a mini-transaction so that it will retain an +x-lock on index->lock and the cursor page. */ +UNIV_INTERN +void +btr_cur_mtr_commit_and_start( +/*=========================*/ + btr_cur_t* cursor, /*!< in: cursor */ + mtr_t* mtr) /*!< in/out: mini-transaction */ +{ + buf_block_t* block; + + block = btr_cur_get_block(cursor); + + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(cursor->index), + MTR_MEMO_X_LOCK)); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + /* Keep the locks across the mtr_commit(mtr). */ + rw_lock_x_lock(dict_index_get_lock(cursor->index)); + rw_lock_x_lock(&block->lock); + mutex_enter(&block->mutex); + buf_block_buf_fix_inc(block, __FILE__, __LINE__); + mutex_exit(&block->mutex); + /* Write out the redo log. */ + mtr_commit(mtr); + mtr_start(mtr); + /* Reassociate the locks with the mini-transaction. + They will be released on mtr_commit(mtr). */ + mtr_memo_push(mtr, dict_index_get_lock(cursor->index), + MTR_MEMO_X_LOCK); + mtr_memo_push(mtr, block, MTR_MEMO_PAGE_X_FIX); +} + /*==================== B-TREE DELETE MARK AND UNMARK ===============*/ /****************************************************************//** @@ -2762,10 +2822,12 @@ UNIV_INTERN ibool btr_cur_compress_if_useful( /*=======================*/ - btr_cur_t* cursor, /*!< in: cursor on the page to compress; - cursor does not stay valid if compression - occurs */ - mtr_t* mtr) /*!< in: mtr */ + btr_cur_t* cursor, /*!< in/out: cursor on the page to compress; + cursor does not stay valid if !adjust and + compression occurs */ + ibool adjust, /*!< in: TRUE if should adjust the + cursor position even if compression occurs */ + mtr_t* mtr) /*!< in/out: mini-transaction */ { ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(btr_cur_get_index(cursor)), @@ -2774,7 +2836,7 @@ btr_cur_compress_if_useful( MTR_MEMO_PAGE_X_FIX)); return(btr_cur_compress_recommendation(cursor, mtr) - && btr_compress(cursor, mtr)); + && btr_compress(cursor, adjust, mtr)); } /*******************************************************//** @@ -3016,7 +3078,7 @@ return_after_reservations: mem_heap_free(heap); if (ret == FALSE) { - ret = btr_cur_compress_if_useful(cursor, mtr); + ret = btr_cur_compress_if_useful(cursor, FALSE, mtr); } if (n_extents > 0) { diff --git a/storage/innodb_plugin/include/btr0btr.h b/storage/innodb_plugin/include/btr0btr.h index 5aa02694e0e..987de03b349 100644 --- a/storage/innodb_plugin/include/btr0btr.h +++ b/storage/innodb_plugin/include/btr0btr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -470,11 +470,14 @@ UNIV_INTERN ibool btr_compress( /*=========*/ - btr_cur_t* cursor, /*!< in: cursor on the page to merge or lift; - the page must not be empty: in record delete - use btr_discard_page if the page would become - empty */ - mtr_t* mtr); /*!< in: mtr */ + btr_cur_t* cursor, /*!< in/out: cursor on the page to merge + or lift; the page must not be empty: + when deleting records, use btr_discard_page() + if the page would become empty */ + ibool adjust, /*!< in: TRUE if should adjust the + cursor position even if compression occurs */ + mtr_t* mtr) /*!< in/out: mini-transaction */ + __attribute__((nonnull)); /*************************************************************//** Discards a page from a B-tree. This is used to remove the last record from a B-tree page: the whole page must be removed at the same time. This cannot diff --git a/storage/innodb_plugin/include/btr0cur.h b/storage/innodb_plugin/include/btr0cur.h index ece3621fa97..6094a2a6c7a 100644 --- a/storage/innodb_plugin/include/btr0cur.h +++ b/storage/innodb_plugin/include/btr0cur.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -36,6 +36,9 @@ Created 10/16/1994 Heikki Tuuri #define BTR_NO_LOCKING_FLAG 2 /* do no record lock checking */ #define BTR_KEEP_SYS_FLAG 4 /* sys fields will be found from the update vector or inserted entry */ +#define BTR_KEEP_POS_FLAG 8 /* btr_cur_pessimistic_update() + must keep cursor position when + moving columns to big_rec */ #ifndef UNIV_HOTBACKUP #include "que0types.h" @@ -309,7 +312,9 @@ btr_cur_pessimistic_update( /*=======================*/ ulint flags, /*!< in: undo logging, locking, and rollback flags */ - btr_cur_t* cursor, /*!< in: cursor on the record to update */ + btr_cur_t* cursor, /*!< in/out: cursor on the record to update; + cursor may become invalid if *big_rec == NULL + || !(flags & BTR_KEEP_POS_FLAG) */ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to be stored externally by the caller, or NULL */ @@ -321,6 +326,16 @@ btr_cur_pessimistic_update( que_thr_t* thr, /*!< in: query thread */ mtr_t* mtr); /*!< in: mtr; must be committed before latching any further pages */ +/***************************************************************** +Commits and restarts a mini-transaction so that it will retain an +x-lock on index->lock and the cursor page. */ +UNIV_INTERN +void +btr_cur_mtr_commit_and_start( +/*=========================*/ + btr_cur_t* cursor, /*!< in: cursor */ + mtr_t* mtr) /*!< in/out: mini-transaction */ + __attribute__((nonnull)); /***********************************************************//** Marks a clustered index record deleted. Writes an undo log record to undo log on this delete marking. Writes in the trx id field the id @@ -376,10 +391,13 @@ UNIV_INTERN ibool btr_cur_compress_if_useful( /*=======================*/ - btr_cur_t* cursor, /*!< in: cursor on the page to compress; + btr_cur_t* cursor, /*!< in/out: cursor on the page to compress; cursor does not stay valid if compression occurs */ - mtr_t* mtr); /*!< in: mtr */ + ibool adjust, /*!< in: TRUE if should adjust the + cursor position even if compression occurs */ + mtr_t* mtr) /*!< in/out: mini-transaction */ + __attribute__((nonnull)); /*******************************************************//** Removes the record on which the tree cursor is positioned. It is assumed that the mtr has an x-latch on the page where the cursor is positioned, diff --git a/storage/innodb_plugin/include/btr0cur.ic b/storage/innodb_plugin/include/btr0cur.ic index 280583f6ccf..c833b3e8572 100644 --- a/storage/innodb_plugin/include/btr0cur.ic +++ b/storage/innodb_plugin/include/btr0cur.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -139,7 +139,7 @@ btr_cur_compress_recommendation( btr_cur_t* cursor, /*!< in: btr cursor */ mtr_t* mtr) /*!< in: mtr */ { - page_t* page; + const page_t* page; ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), MTR_MEMO_PAGE_X_FIX)); diff --git a/storage/innodb_plugin/include/buf0buf.h b/storage/innodb_plugin/include/buf0buf.h index 05dead5ac9e..775ddc758d0 100644 --- a/storage/innodb_plugin/include/buf0buf.h +++ b/storage/innodb_plugin/include/buf0buf.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -467,6 +467,31 @@ buf_block_get_modify_clock( #else /* !UNIV_HOTBACKUP */ # define buf_block_modify_clock_inc(block) ((void) 0) #endif /* !UNIV_HOTBACKUP */ +/*******************************************************************//** +Increments the bufferfix count. */ +UNIV_INLINE +void +buf_block_buf_fix_inc_func( +/*=======================*/ +#ifdef UNIV_SYNC_DEBUG + const char* file, /*!< in: file name */ + ulint line, /*!< in: line */ +#endif /* UNIV_SYNC_DEBUG */ + buf_block_t* block) /*!< in/out: block to bufferfix */ + __attribute__((nonnull)); +#ifdef UNIV_SYNC_DEBUG +/** Increments the bufferfix count. +@param b in/out: block to bufferfix +@param f in: file name where requested +@param l in: line number where requested */ +# define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(f,l,b) +#else /* UNIV_SYNC_DEBUG */ +/** Increments the bufferfix count. +@param b in/out: block to bufferfix +@param f in: file name where requested +@param l in: line number where requested */ +# define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(b) +#endif /* UNIV_SYNC_DEBUG */ /********************************************************************//** Calculates a page checksum which is stored to the page when it is written to a file. Note that we must be careful to calculate the same value diff --git a/storage/innodb_plugin/include/buf0buf.ic b/storage/innodb_plugin/include/buf0buf.ic index 0025bef5aac..d4ca07a4cd8 100644 --- a/storage/innodb_plugin/include/buf0buf.ic +++ b/storage/innodb_plugin/include/buf0buf.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -871,19 +871,6 @@ buf_block_buf_fix_inc_func( block->page.buf_fix_count++; } -#ifdef UNIV_SYNC_DEBUG -/** Increments the bufferfix count. -@param b in/out: block to bufferfix -@param f in: file name where requested -@param l in: line number where requested */ -# define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(f,l,b) -#else /* UNIV_SYNC_DEBUG */ -/** Increments the bufferfix count. -@param b in/out: block to bufferfix -@param f in: file name where requested -@param l in: line number where requested */ -# define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(b) -#endif /* UNIV_SYNC_DEBUG */ /*******************************************************************//** Decrements the bufferfix count. */ @@ -1071,7 +1058,7 @@ buf_block_dbg_add_level( where we have acquired latch */ ulint level) /*!< in: latching order level */ { - sync_thread_add_level(&block->lock, level); + sync_thread_add_level(&block->lock, level, FALSE); } #endif /* UNIV_SYNC_DEBUG */ #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innodb_plugin/include/page0cur.ic b/storage/innodb_plugin/include/page0cur.ic index 3520677dfb3..81474fa35f5 100644 --- a/storage/innodb_plugin/include/page0cur.ic +++ b/storage/innodb_plugin/include/page0cur.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -27,6 +27,8 @@ Created 10/4/1994 Heikki Tuuri #include "buf0types.h" #ifdef UNIV_DEBUG +# include "rem0cmp.h" + /*********************************************************//** Gets pointer to the page frame where the cursor is positioned. @return page */ @@ -268,6 +270,7 @@ page_cur_tuple_insert( index, rec, offsets, mtr); } + ut_ad(!rec || !cmp_dtuple_rec(tuple, rec, offsets)); mem_heap_free(heap); return(rec); } diff --git a/storage/innodb_plugin/include/page0page.h b/storage/innodb_plugin/include/page0page.h index 3899499fb6a..9099fd7b65d 100644 --- a/storage/innodb_plugin/include/page0page.h +++ b/storage/innodb_plugin/include/page0page.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -284,16 +284,42 @@ page_get_supremum_offset( const page_t* page); /*!< in: page which must have record(s) */ #define page_get_infimum_rec(page) ((page) + page_get_infimum_offset(page)) #define page_get_supremum_rec(page) ((page) + page_get_supremum_offset(page)) + /************************************************************//** -Returns the middle record of record list. If there are an even number -of records in the list, returns the first record of upper half-list. -@return middle record */ +Returns the nth record of the record list. +This is the inverse function of page_rec_get_n_recs_before(). +@return nth record */ UNIV_INTERN +const rec_t* +page_rec_get_nth_const( +/*===================*/ + const page_t* page, /*!< in: page */ + ulint nth) /*!< in: nth record */ + __attribute__((nonnull, warn_unused_result)); +/************************************************************//** +Returns the nth record of the record list. +This is the inverse function of page_rec_get_n_recs_before(). +@return nth record */ +UNIV_INLINE +rec_t* +page_rec_get_nth( +/*=============*/ + page_t* page, /*< in: page */ + ulint nth) /*!< in: nth record */ + __attribute__((nonnull, warn_unused_result)); + +#ifndef UNIV_HOTBACKUP +/************************************************************//** +Returns the middle record of the records on the page. If there is an +even number of records in the list, returns the first record of the +upper half-list. +@return middle record */ +UNIV_INLINE rec_t* page_get_middle_rec( /*================*/ - page_t* page); /*!< in: page */ -#ifndef UNIV_HOTBACKUP + page_t* page) /*!< in: page */ + __attribute__((nonnull, warn_unused_result)); /*************************************************************//** Compares a data tuple to a physical record. Differs from the function cmp_dtuple_rec_with_match in the way that the record must reside on an @@ -348,6 +374,7 @@ page_get_n_recs( /***************************************************************//** Returns the number of records before the given record in chain. The number includes infimum and supremum records. +This is the inverse function of page_rec_get_nth(). @return number of records */ UNIV_INTERN ulint diff --git a/storage/innodb_plugin/include/page0page.ic b/storage/innodb_plugin/include/page0page.ic index 8f794410f20..1450b0892b3 100644 --- a/storage/innodb_plugin/include/page0page.ic +++ b/storage/innodb_plugin/include/page0page.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -420,7 +420,37 @@ page_rec_is_infimum( return(page_rec_is_infimum_low(page_offset(rec))); } +/************************************************************//** +Returns the nth record of the record list. +This is the inverse function of page_rec_get_n_recs_before(). +@return nth record */ +UNIV_INLINE +rec_t* +page_rec_get_nth( +/*=============*/ + page_t* page, /*!< in: page */ + ulint nth) /*!< in: nth record */ +{ + return((rec_t*) page_rec_get_nth_const(page, nth)); +} + #ifndef UNIV_HOTBACKUP +/************************************************************//** +Returns the middle record of the records on the page. If there is an +even number of records in the list, returns the first record of the +upper half-list. +@return middle record */ +UNIV_INLINE +rec_t* +page_get_middle_rec( +/*================*/ + page_t* page) /*!< in: page */ +{ + ulint middle = (page_get_n_recs(page) + PAGE_HEAP_NO_USER_LOW) / 2; + + return(page_rec_get_nth(page, middle)); +} + /*************************************************************//** Compares a data tuple to a physical record. Differs from the function cmp_dtuple_rec_with_match in the way that the record must reside on an diff --git a/storage/innodb_plugin/include/sync0rw.ic b/storage/innodb_plugin/include/sync0rw.ic index 7116f1b7c9b..485a63a1b18 100644 --- a/storage/innodb_plugin/include/sync0rw.ic +++ b/storage/innodb_plugin/include/sync0rw.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -603,16 +603,16 @@ rw_lock_x_unlock_direct( ut_ad((lock->lock_word % X_LOCK_DECR) == 0); -#ifdef UNIV_SYNC_DEBUG - rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX); -#endif - if (lock->lock_word == 0) { lock->recursive = FALSE; UNIV_MEM_INVALID(&lock->writer_thread, sizeof lock->writer_thread); } +#ifdef UNIV_SYNC_DEBUG + rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX); +#endif + lock->lock_word += X_LOCK_DECR; ut_ad(!lock->waiters); diff --git a/storage/innodb_plugin/include/sync0sync.h b/storage/innodb_plugin/include/sync0sync.h index 71c9920a10b..d9dcf91accb 100644 --- a/storage/innodb_plugin/include/sync0sync.h +++ b/storage/innodb_plugin/include/sync0sync.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -219,8 +219,10 @@ void sync_thread_add_level( /*==================*/ void* latch, /*!< in: pointer to a mutex or an rw-lock */ - ulint level); /*!< in: level in the latching order; if + ulint level, /*!< in: level in the latching order; if SYNC_LEVEL_VARYING, nothing is done */ + ibool relock) /*!< in: TRUE if re-entering an x-lock */ + __attribute__((nonnull)); /******************************************************************//** Removes a latch from the thread level array if it is found there. @return TRUE if found in the array; it is no error if the latch is diff --git a/storage/innodb_plugin/page/page0cur.c b/storage/innodb_plugin/page/page0cur.c index 936762b986a..b8c492328e8 100644 --- a/storage/innodb_plugin/page/page0cur.c +++ b/storage/innodb_plugin/page/page0cur.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1180,14 +1180,15 @@ page_cur_insert_rec_zip_reorg( /* Before trying to reorganize the page, store the number of preceding records on the page. */ pos = page_rec_get_n_recs_before(rec); + ut_ad(pos > 0); if (page_zip_reorganize(block, index, mtr)) { /* The page was reorganized: Find rec by seeking to pos, and update *current_rec. */ - rec = page + PAGE_NEW_INFIMUM; - - while (--pos) { - rec = page + rec_get_next_offs(rec, TRUE); + if (pos > 1) { + rec = page_rec_get_nth(page, pos - 1); + } else { + rec = page + PAGE_NEW_INFIMUM; } *current_rec = rec; @@ -1283,6 +1284,12 @@ page_cur_insert_rec_zip( insert_rec = page_cur_insert_rec_zip_reorg( current_rec, block, index, insert_rec, page, page_zip, mtr); +#ifdef UNIV_DEBUG + if (insert_rec) { + rec_offs_make_valid( + insert_rec, index, offsets); + } +#endif /* UNIV_DEBUG */ } return(insert_rec); diff --git a/storage/innodb_plugin/page/page0page.c b/storage/innodb_plugin/page/page0page.c index 6cae03e8829..a284b1480a3 100644 --- a/storage/innodb_plugin/page/page0page.c +++ b/storage/innodb_plugin/page/page0page.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1487,55 +1487,54 @@ page_dir_balance_slot( } } -#ifndef UNIV_HOTBACKUP /************************************************************//** -Returns the middle record of the record list. If there are an even number -of records in the list, returns the first record of the upper half-list. -@return middle record */ +Returns the nth record of the record list. +This is the inverse function of page_rec_get_n_recs_before(). +@return nth record */ UNIV_INTERN -rec_t* -page_get_middle_rec( -/*================*/ - page_t* page) /*!< in: page */ +const rec_t* +page_rec_get_nth_const( +/*===================*/ + const page_t* page, /*!< in: page */ + ulint nth) /*!< in: nth record */ { - page_dir_slot_t* slot; - ulint middle; + const page_dir_slot_t* slot; ulint i; ulint n_owned; - ulint count; - rec_t* rec; + const rec_t* rec; - /* This many records we must leave behind */ - middle = (page_get_n_recs(page) + PAGE_HEAP_NO_USER_LOW) / 2; - - count = 0; + ut_ad(nth < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1)); for (i = 0;; i++) { slot = page_dir_get_nth_slot(page, i); n_owned = page_dir_slot_get_n_owned(slot); - if (count + n_owned > middle) { + if (n_owned > nth) { break; } else { - count += n_owned; + nth -= n_owned; } } ut_ad(i > 0); slot = page_dir_get_nth_slot(page, i - 1); - rec = (rec_t*) page_dir_slot_get_rec(slot); - rec = page_rec_get_next(rec); - - /* There are now count records behind rec */ + rec = page_dir_slot_get_rec(slot); - for (i = 0; i < middle - count; i++) { - rec = page_rec_get_next(rec); + if (page_is_comp(page)) { + do { + rec = page_rec_get_next_low(rec, TRUE); + ut_ad(rec); + } while (nth--); + } else { + do { + rec = page_rec_get_next_low(rec, FALSE); + ut_ad(rec); + } while (nth--); } return(rec); } -#endif /* !UNIV_HOTBACKUP */ /***************************************************************//** Returns the number of records before the given record in chain. @@ -1597,6 +1596,7 @@ page_rec_get_n_recs_before( n--; ut_ad(n >= 0); + ut_ad(n < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1)); return((ulint) n); } diff --git a/storage/innodb_plugin/row/row0ins.c b/storage/innodb_plugin/row/row0ins.c index 8050c099751..ea43cbfb5f1 100644 --- a/storage/innodb_plugin/row/row0ins.c +++ b/storage/innodb_plugin/row/row0ins.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -345,9 +345,9 @@ row_ins_clust_index_entry_by_modify( return(DB_LOCK_TABLE_FULL); } - err = btr_cur_pessimistic_update(0, cursor, - heap, big_rec, update, - 0, thr, mtr); + err = btr_cur_pessimistic_update( + BTR_KEEP_POS_FLAG, cursor, heap, big_rec, update, + 0, thr, mtr); } return(err); @@ -1986,6 +1986,7 @@ row_ins_index_entry_low( ulint modify = 0; /* remove warning */ rec_t* insert_rec; rec_t* rec; + ulint* offsets; ulint err; ulint n_unique; big_rec_t* big_rec = NULL; @@ -2089,6 +2090,42 @@ row_ins_index_entry_low( err = row_ins_clust_index_entry_by_modify( mode, &cursor, &heap, &big_rec, entry, thr, &mtr); + + if (big_rec) { + ut_a(err == DB_SUCCESS); + /* Write out the externally stored + columns while still x-latching + index->lock and block->lock. We have + to mtr_commit(mtr) first, so that the + redo log will be written in the + correct order. Otherwise, we would run + into trouble on crash recovery if mtr + freed B-tree pages on which some of + the big_rec fields will be written. */ + btr_cur_mtr_commit_and_start(&cursor, &mtr); + + rec = btr_cur_get_rec(&cursor); + offsets = rec_get_offsets( + rec, index, NULL, + ULINT_UNDEFINED, &heap); + + err = btr_store_big_rec_extern_fields( + index, btr_cur_get_block(&cursor), + rec, offsets, &mtr, FALSE, big_rec); + /* If writing big_rec fails (for + example, because of DB_OUT_OF_FILE_SPACE), + the record will be corrupted. Even if + we did not update any externally + stored columns, our update could cause + the record to grow so that a + non-updated column was selected for + external storage. This non-update + would not have been written to the + undo log, and thus the record cannot + be rolled back. */ + ut_a(err == DB_SUCCESS); + goto stored_big_rec; + } } else { ut_ad(!n_ext); err = row_ins_sec_index_entry_by_modify( @@ -2117,8 +2154,6 @@ function_exit: mtr_commit(&mtr); if (UNIV_LIKELY_NULL(big_rec)) { - rec_t* rec; - ulint* offsets; mtr_start(&mtr); btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, @@ -2132,6 +2167,7 @@ function_exit: index, btr_cur_get_block(&cursor), rec, offsets, &mtr, FALSE, big_rec); +stored_big_rec: if (modify) { dtuple_big_rec_free(big_rec); } else { diff --git a/storage/innodb_plugin/row/row0upd.c b/storage/innodb_plugin/row/row0upd.c index 3a6de4b94a7..b5952ff0a78 100644 --- a/storage/innodb_plugin/row/row0upd.c +++ b/storage/innodb_plugin/row/row0upd.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1969,28 +1969,43 @@ row_upd_clust_rec( ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur), dict_table_is_comp(index->table))); - err = btr_cur_pessimistic_update(BTR_NO_LOCKING_FLAG, btr_cur, - &heap, &big_rec, node->update, - node->cmpl_info, thr, mtr); - mtr_commit(mtr); - - if (err == DB_SUCCESS && big_rec) { + err = btr_cur_pessimistic_update( + BTR_NO_LOCKING_FLAG | BTR_KEEP_POS_FLAG, btr_cur, + &heap, &big_rec, node->update, node->cmpl_info, thr, mtr); + if (big_rec) { ulint offsets_[REC_OFFS_NORMAL_SIZE]; rec_t* rec; rec_offs_init(offsets_); - mtr_start(mtr); + ut_a(err == DB_SUCCESS); + /* Write out the externally stored columns while still + x-latching index->lock and block->lock. We have to + mtr_commit(mtr) first, so that the redo log will be + written in the correct order. Otherwise, we would run + into trouble on crash recovery if mtr freed B-tree + pages on which some of the big_rec fields will be + written. */ + btr_cur_mtr_commit_and_start(btr_cur, mtr); - ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr)); rec = btr_cur_get_rec(btr_cur); err = btr_store_big_rec_extern_fields( index, btr_cur_get_block(btr_cur), rec, rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &heap), mtr, TRUE, big_rec); - mtr_commit(mtr); + /* If writing big_rec fails (for example, because of + DB_OUT_OF_FILE_SPACE), the record will be corrupted. + Even if we did not update any externally stored + columns, our update could cause the record to grow so + that a non-updated column was selected for external + storage. This non-update would not have been written + to the undo log, and thus the record cannot be rolled + back. */ + ut_a(err == DB_SUCCESS); } + mtr_commit(mtr); + if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } diff --git a/storage/innodb_plugin/sync/sync0rw.c b/storage/innodb_plugin/sync/sync0rw.c index a5da606ad80..3df2b4e9bbd 100644 --- a/storage/innodb_plugin/sync/sync0rw.c +++ b/storage/innodb_plugin/sync/sync0rw.c @@ -766,7 +766,9 @@ rw_lock_add_debug_info( rw_lock_debug_mutex_exit(); if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) { - sync_thread_add_level(lock, lock->level); + sync_thread_add_level(lock, lock->level, + lock_type == RW_LOCK_EX + && lock->lock_word < 0); } } diff --git a/storage/innodb_plugin/sync/sync0sync.c b/storage/innodb_plugin/sync/sync0sync.c index 2be9d667705..59498386fda 100644 --- a/storage/innodb_plugin/sync/sync0sync.c +++ b/storage/innodb_plugin/sync/sync0sync.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -656,7 +656,7 @@ mutex_set_debug_info( ut_ad(mutex); ut_ad(file_name); - sync_thread_add_level(mutex, mutex->level); + sync_thread_add_level(mutex, mutex->level, FALSE); mutex->file_name = file_name; mutex->line = line; @@ -1083,8 +1083,9 @@ void sync_thread_add_level( /*==================*/ void* latch, /*!< in: pointer to a mutex or an rw-lock */ - ulint level) /*!< in: level in the latching order; if + ulint level, /*!< in: level in the latching order; if SYNC_LEVEL_VARYING, nothing is done */ + ibool relock) /*!< in: TRUE if re-entering an x-lock */ { sync_level_t* array; sync_level_t* slot; @@ -1132,6 +1133,10 @@ sync_thread_add_level( array = thread_slot->levels; + if (relock) { + goto levels_ok; + } + /* NOTE that there is a problem with _NODE and _LEAF levels: if the B-tree height changes, then a leaf can change to an internal node or the other way around. We do not know at present if this can cause @@ -1269,6 +1274,7 @@ sync_thread_add_level( ut_error; } +levels_ok: for (i = 0; i < SYNC_THREAD_N_LEVELS; i++) { slot = sync_thread_levels_get_nth(array, i); -- cgit v1.2.1 From 417a267927b9249868e8ca7bd3cb7b6e09485f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 16 Jun 2011 11:51:04 +0300 Subject: Re-enable the debug assertions for Bug#12650861. Replace UNIV_BLOB_NULL_DEBUG with UNIV_DEBUG||UNIV_BLOB_LIGHT_DEBUG. Fix known bogus failures. btr_cur_optimistic_update(): If rec_offs_any_null_extern(), assert that the current transaction is an incomplete transaction that is being rolled back in crash recovery. row_build(): If rec_offs_any_null_extern(), assert that the transaction that last updated the record was recovered during crash recovery (and will soon be rolled back). --- storage/innobase/btr/btr0cur.c | 13 +++++----- storage/innobase/include/rem0rec.h | 4 +-- storage/innobase/include/rem0rec.ic | 4 +-- storage/innobase/include/trx0roll.h | 3 +++ storage/innobase/include/univ.i | 4 +-- storage/innobase/row/row0row.c | 43 +++++++++++++++++++++++++------- storage/innobase/row/row0vers.c | 16 ++++++------ storage/innobase/trx/trx0rec.c | 4 +-- storage/innodb_plugin/btr/btr0cur.c | 12 ++++----- storage/innodb_plugin/include/rem0rec.h | 4 +-- storage/innodb_plugin/include/rem0rec.ic | 4 +-- storage/innodb_plugin/include/univ.i | 5 +--- storage/innodb_plugin/row/row0row.c | 41 +++++++++++++++++++++++------- storage/innodb_plugin/row/row0vers.c | 16 ++++++------ storage/innodb_plugin/trx/trx0rec.c | 4 +-- 15 files changed, 111 insertions(+), 66 deletions(-) diff --git a/storage/innobase/btr/btr0cur.c b/storage/innobase/btr/btr0cur.c index 7bdf87f2793..9ce09929f9a 100644 --- a/storage/innobase/btr/btr0cur.c +++ b/storage/innobase/btr/btr0cur.c @@ -31,6 +31,7 @@ Created 10/16/1994 Heikki Tuuri #include "btr0sea.h" #include "row0upd.h" #include "trx0rec.h" +#include "trx0roll.h" /* trx_roll_crash_recv_trx */ #include "que0que.h" #include "row0row.h" #include "srv0srv.h" @@ -1579,7 +1580,6 @@ btr_cur_optimistic_update( ulint old_rec_size; dtuple_t* new_entry; dulint roll_ptr; - trx_t* trx; mem_heap_t* heap; ibool reorganized = FALSE; ulint i; @@ -1592,9 +1592,10 @@ btr_cur_optimistic_update( heap = mem_heap_create(1024); offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap); -#ifdef UNIV_BLOB_NULL_DEBUG - ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(rec, offsets) + || thr_get_trx(thr) == trx_roll_crash_recv_trx); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ #ifdef UNIV_DEBUG if (btr_cur_print_record_ops && thr) { @@ -1701,13 +1702,11 @@ btr_cur_optimistic_update( page_cur_move_to_prev(page_cursor); - trx = thr_get_trx(thr); - if (!(flags & BTR_KEEP_SYS_FLAG)) { row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR, roll_ptr); row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID, - trx->id); + thr_get_trx(thr)->id); } rec = btr_cur_insert_if_possible(cursor, new_entry, &reorganized, mtr); diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h index a1a206c3281..67baeb7d8d2 100644 --- a/storage/innobase/include/rem0rec.h +++ b/storage/innobase/include/rem0rec.h @@ -339,7 +339,7 @@ rec_offs_any_extern( /*================*/ /* out: TRUE if a field is stored externally */ const ulint* offsets);/* in: array returned by rec_get_offsets() */ -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG /******************************************************** Determine if the offsets are for a record containing null BLOB pointers. */ UNIV_INLINE @@ -351,7 +351,7 @@ rec_offs_any_null_extern( or NULL if none found */ rec_t* rec, /*!< in: record */ const ulint* offsets); /*!< in: rec_get_offsets(rec) */ -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ /*************************************************************** Sets the value of the ith field extern storage bit. */ UNIV_INLINE diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic index 9e659f12881..566c62e30f2 100644 --- a/storage/innobase/include/rem0rec.ic +++ b/storage/innobase/include/rem0rec.ic @@ -1021,7 +1021,7 @@ rec_offs_any_extern( return(FALSE); } -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG /******************************************************** Determine if the offsets are for a record containing null BLOB pointers. */ UNIV_INLINE @@ -1055,7 +1055,7 @@ rec_offs_any_null_extern( return(NULL); } -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ /*************************************************************** Sets the value of the ith field extern storage bit. */ diff --git a/storage/innobase/include/trx0roll.h b/storage/innobase/include/trx0roll.h index c1eca3d5753..4fabb83b025 100644 --- a/storage/innobase/include/trx0roll.h +++ b/storage/innobase/include/trx0roll.h @@ -15,6 +15,9 @@ Created 3/26/1996 Heikki Tuuri #include "mtr0mtr.h" #include "trx0sys.h" +/* In crash recovery, the current trx to be rolled back */ +extern trx_t* trx_roll_crash_recv_trx; + #define trx_roll_free_all_savepoints(s) trx_roll_savepoints_free((s), NULL) /*********************************************************************** diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index 8eb78491b04..a67b1b3895e 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -88,8 +88,8 @@ memory is read outside the allocated blocks. */ #if 0 #define UNIV_DEBUG_VALGRIND /* Enable extra Valgrind instrumentation */ -#define UNIV_BLOB_NULL_DEBUG /* Enable deep off-page - column debugging */ +#define UNIV_BLOB_LIGHT_DEBUG /* Enable off-page column + debugging without UNIV_DEBUG */ #define UNIV_DEBUG /* Enable ut_ad() assertions */ #define UNIV_LIST_DEBUG /* debug UT_LIST_ macros */ #define UNIV_MEM_DEBUG /* detect memory leaks etc */ diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c index b9efdcfbfdd..751de98deba 100644 --- a/storage/innobase/row/row0row.c +++ b/storage/innobase/row/row0row.c @@ -202,6 +202,7 @@ row_build( ut_ad(index && rec && heap); ut_ad(index->type & DICT_CLUSTERED); + ut_ad(!mutex_own(&kernel_mutex)); if (!offsets) { offsets = rec_get_offsets(rec, index, offsets_, @@ -210,13 +211,37 @@ row_build( ut_ad(rec_offs_validate(rec, index, offsets)); } -#if 0 && defined UNIV_BLOB_NULL_DEBUG - /* This one can fail in trx_rollback_or_clean_all_without_sess() - if the server crashed during an insert before the - btr_store_big_rec_extern_fields() did mtr_commit() - all BLOB pointers to the clustered index record. */ - ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* 0 && UNIV_BLOB_NULL_DEBUG */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + if (UNIV_LIKELY_NULL(rec_offs_any_null_extern(rec, offsets))) { + /* This condition can occur during crash recovery before + trx_rollback_or_clean_all_without_sess() has completed + execution. + + This condition is possible if the server crashed + during an insert or update before + btr_store_big_rec_extern_fields() did mtr_commit() all + BLOB pointers to the clustered index record. + + Look up the transaction that holds the implicit lock + on this record, and assert that it was recovered (and + will soon be rolled back). */ + + ulint trx_id_pos = dict_index_get_sys_col_pos( + index, DATA_TRX_ID); + ulint len; + dulint trx_id = trx_read_trx_id( + rec_get_nth_field(rec, offsets, trx_id_pos, &len)); + trx_t* trx; + ut_a(len == 6); + + mutex_enter(&kernel_mutex); + trx = trx_get_on_id(trx_id); + ut_a(trx); + /* This field does not exist in this version of InnoDB. */ + /* ut_a(trx->is_recovered); */ + mutex_exit(&kernel_mutex); + } +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (type != ROW_COPY_POINTERS) { /* Take a copy of rec to heap */ @@ -310,10 +335,10 @@ row_rec_to_index_entry( rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, offsets); -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG } else { ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ } rec_len = rec_offs_n_fields(offsets); diff --git a/storage/innobase/row/row0vers.c b/storage/innobase/row/row0vers.c index a52ef3cc083..906b46fb51b 100644 --- a/storage/innobase/row/row0vers.c +++ b/storage/innobase/row/row0vers.c @@ -473,10 +473,10 @@ row_vers_build_for_consistent_read( /* The view already sees this version: we can copy it to in_heap and return */ -#if defined UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern( version, *offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); @@ -511,9 +511,9 @@ row_vers_build_for_consistent_read( *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ trx_id = row_get_rec_trx_id(prev_version, index, *offsets); @@ -615,9 +615,9 @@ row_vers_build_for_semi_consistent_read( /* We found a version that belongs to a committed transaction: return it. */ -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (rec == version) { *old_vers = rec; @@ -676,9 +676,9 @@ row_vers_build_for_semi_consistent_read( version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ }/* for (;;) */ if (heap) { diff --git a/storage/innobase/trx/trx0rec.c b/storage/innobase/trx/trx0rec.c index 2a9224b0a72..730ac6a6f60 100644 --- a/storage/innobase/trx/trx0rec.c +++ b/storage/innobase/trx/trx0rec.c @@ -1397,9 +1397,9 @@ trx_undo_prev_version_build( return(DB_ERROR); } -# ifdef UNIV_BLOB_NULL_DEBUG +# if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(rec, offsets)); -# endif /* UNIV_BLOB_NULL_DEBUG */ +# endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (row_upd_changes_field_size_or_external(index, offsets, update)) { ulint* ext_vect; diff --git a/storage/innodb_plugin/btr/btr0cur.c b/storage/innodb_plugin/btr/btr0cur.c index a551b4dfcb9..651c2c342a5 100644 --- a/storage/innodb_plugin/btr/btr0cur.c +++ b/storage/innodb_plugin/btr/btr0cur.c @@ -1854,7 +1854,6 @@ btr_cur_optimistic_update( ulint old_rec_size; dtuple_t* new_entry; roll_ptr_t roll_ptr; - trx_t* trx; mem_heap_t* heap; ulint i; ulint n_ext; @@ -1871,9 +1870,10 @@ btr_cur_optimistic_update( heap = mem_heap_create(1024); offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap); -#ifdef UNIV_BLOB_NULL_DEBUG - ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + ut_a(!rec_offs_any_null_extern(rec, offsets) + || trx_is_recv(thr_get_trx(thr))); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ #ifdef UNIV_DEBUG if (btr_cur_print_record_ops && thr) { @@ -1996,13 +1996,11 @@ any_extern: page_cur_move_to_prev(page_cursor); - trx = thr_get_trx(thr); - if (!(flags & BTR_KEEP_SYS_FLAG)) { row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR, roll_ptr); row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID, - trx->id); + thr_get_trx(thr)->id); } /* There are no externally stored columns in new_entry */ diff --git a/storage/innodb_plugin/include/rem0rec.h b/storage/innodb_plugin/include/rem0rec.h index fff44eecb00..06de23be757 100644 --- a/storage/innodb_plugin/include/rem0rec.h +++ b/storage/innodb_plugin/include/rem0rec.h @@ -480,7 +480,7 @@ ulint rec_offs_any_extern( /*================*/ const ulint* offsets);/*!< in: array returned by rec_get_offsets() */ -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG /******************************************************//** Determine if the offsets are for a record containing null BLOB pointers. @return first field containing a null BLOB pointer, or NULL if none found */ @@ -491,7 +491,7 @@ rec_offs_any_null_extern( const rec_t* rec, /*!< in: record */ const ulint* offsets) /*!< in: rec_get_offsets(rec) */ __attribute__((nonnull, warn_unused_result)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ /******************************************************//** Returns nonzero if the extern bit is set in nth field of rec. @return nonzero if externally stored */ diff --git a/storage/innodb_plugin/include/rem0rec.ic b/storage/innodb_plugin/include/rem0rec.ic index 252484a7433..7cff36fee6c 100644 --- a/storage/innodb_plugin/include/rem0rec.ic +++ b/storage/innodb_plugin/include/rem0rec.ic @@ -1088,7 +1088,7 @@ rec_offs_any_extern( return(UNIV_UNLIKELY(*rec_offs_base(offsets) & REC_OFFS_EXTERNAL)); } -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG /******************************************************//** Determine if the offsets are for a record containing null BLOB pointers. @return first field containing a null BLOB pointer, or NULL if none found */ @@ -1124,7 +1124,7 @@ rec_offs_any_null_extern( return(NULL); } -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ /******************************************************//** Returns nonzero if the extern bit is set in nth field of rec. diff --git a/storage/innodb_plugin/include/univ.i b/storage/innodb_plugin/include/univ.i index 63d8e7140b5..5e36867f05a 100644 --- a/storage/innodb_plugin/include/univ.i +++ b/storage/innodb_plugin/include/univ.i @@ -1,8 +1,7 @@ /***************************************************************************** -Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. -Copyright (c) 2009, Sun Microsystems, Inc. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -179,8 +178,6 @@ command. Not tested on Windows. */ debugging without UNIV_DEBUG */ #define UNIV_BLOB_LIGHT_DEBUG /* Enable off-page column debugging without UNIV_DEBUG */ -#define UNIV_BLOB_NULL_DEBUG /* Enable deep off-page - column debugging */ #define UNIV_DEBUG /* Enable ut_ad() assertions and disable UNIV_INLINE */ #define UNIV_DEBUG_LOCK_VALIDATE /* Enable diff --git a/storage/innodb_plugin/row/row0row.c b/storage/innodb_plugin/row/row0row.c index 8aba375d046..3b610d735b0 100644 --- a/storage/innodb_plugin/row/row0row.c +++ b/storage/innodb_plugin/row/row0row.c @@ -223,6 +223,7 @@ row_build( ut_ad(index && rec && heap); ut_ad(dict_index_is_clust(index)); + ut_ad(!mutex_own(&kernel_mutex)); if (!offsets) { offsets = rec_get_offsets(rec, index, offsets_, @@ -231,13 +232,35 @@ row_build( ut_ad(rec_offs_validate(rec, index, offsets)); } -#if 0 && defined UNIV_BLOB_NULL_DEBUG - /* This one can fail in trx_rollback_active() if - the server crashed during an insert before the - btr_store_big_rec_extern_fields() did mtr_commit() - all BLOB pointers to the clustered index record. */ - ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* 0 && UNIV_BLOB_NULL_DEBUG */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG + if (UNIV_LIKELY_NULL(rec_offs_any_null_extern(rec, offsets))) { + /* This condition can occur during crash recovery before + trx_rollback_active() has completed execution. + + This condition is possible if the server crashed + during an insert or update before + btr_store_big_rec_extern_fields() did mtr_commit() all + BLOB pointers to the clustered index record. + + Look up the transaction that holds the implicit lock + on this record, and assert that it was recovered (and + will soon be rolled back). */ + + ulint trx_id_pos = dict_index_get_sys_col_pos( + index, DATA_TRX_ID); + ulint len; + trx_id_t trx_id = trx_read_trx_id( + rec_get_nth_field(rec, offsets, trx_id_pos, &len)); + trx_t* trx; + ut_a(len == 6); + + mutex_enter(&kernel_mutex); + trx = trx_get_on_id(trx_id); + ut_a(trx); + ut_a(trx->is_recovered); + mutex_exit(&kernel_mutex); + } +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (type != ROW_COPY_POINTERS) { /* Take a copy of rec to heap */ @@ -423,10 +446,10 @@ row_rec_to_index_entry( rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, offsets); -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG } else { ut_a(!rec_offs_any_null_extern(rec, offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ } entry = row_rec_to_index_entry_low(rec, index, offsets, n_ext, heap); diff --git a/storage/innodb_plugin/row/row0vers.c b/storage/innodb_plugin/row/row0vers.c index 2d39f92d18f..8a7bb842293 100644 --- a/storage/innodb_plugin/row/row0vers.c +++ b/storage/innodb_plugin/row/row0vers.c @@ -550,10 +550,10 @@ row_vers_build_for_consistent_read( /* The view already sees this version: we can copy it to in_heap and return */ -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern( version, *offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); @@ -588,9 +588,9 @@ row_vers_build_for_consistent_read( *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ trx_id = row_get_rec_trx_id(prev_version, index, *offsets); @@ -691,9 +691,9 @@ row_vers_build_for_semi_consistent_read( /* We found a version that belongs to a committed transaction: return it. */ -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (rec == version) { *old_vers = rec; @@ -752,9 +752,9 @@ row_vers_build_for_semi_consistent_read( version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); -#ifdef UNIV_BLOB_NULL_DEBUG +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); -#endif /* UNIV_BLOB_NULL_DEBUG */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ }/* for (;;) */ if (heap) { diff --git a/storage/innodb_plugin/trx/trx0rec.c b/storage/innodb_plugin/trx/trx0rec.c index 297838365d5..9f2fd59d82b 100644 --- a/storage/innodb_plugin/trx/trx0rec.c +++ b/storage/innodb_plugin/trx/trx0rec.c @@ -1577,9 +1577,9 @@ trx_undo_prev_version_build( return(DB_ERROR); } -# ifdef UNIV_BLOB_NULL_DEBUG +# if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(rec, offsets)); -# endif /* UNIV_BLOB_NULL_DEBUG */ +# endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (row_upd_changes_field_size_or_external(index, offsets, update)) { ulint n_ext; -- cgit v1.2.1 From e4aa6667a1da2f769ed3af819e6eb4c519431af5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 16 Jun 2011 14:22:12 +0300 Subject: Bug#12595087 - 61191: Question about page_zip_available There is an apparent problem with page_zip_clear_rec(). In btr_cur_optimistic_update() we do this: page_cur_delete_rec(page_cursor, index, offsets, mtr); ... rec = btr_cur_insert_if_possible(cursor, new_entry, 0/*n_ext*/, mtr); ut_a(rec); /* <- We calculated above the insert would fit */ The problem is that page_cur_delete_rec() could fill the modification log while doing page_zip_clear_rec(), requiring recompression for the btr_cur_insert_if_possible(). In a pathological case, the data could fail to recompress. page_zip_clear_rec(): Leave the page modification log alone. Only clear the necessary fields. rb:673 approved by Jimmy Yang --- storage/innodb_plugin/ChangeLog | 5 ++ storage/innodb_plugin/page/page0zip.c | 116 +++++++++++++++------------------- storage/innodb_plugin/rem/rem0rec.c | 10 +-- 3 files changed, 60 insertions(+), 71 deletions(-) diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index ef5f87172c6..d747a45d434 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,8 @@ +2011-06-16 The InnoDB Team + + * page/page0zip.c, rem/rem0rec.c: + Fix Bug#61191 question about page_zip_available() + 2011-06-16 The InnoDB Team * btr/btr0btr.c, btr/btr0cur.c, include/btr0btr.h, include/btr0cur.h, diff --git a/storage/innodb_plugin/page/page0zip.c b/storage/innodb_plugin/page/page0zip.c index 6e866b3f016..2d97b9a0d64 100644 --- a/storage/innodb_plugin/page/page0zip.c +++ b/storage/innodb_plugin/page/page0zip.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2005, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 2005, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -3912,17 +3912,9 @@ page_zip_write_trx_id_and_roll_ptr( UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip)); } -#ifdef UNIV_ZIP_DEBUG -/** Set this variable in a debugger to disable page_zip_clear_rec(). -The only observable effect should be the compression ratio due to -deleted records not being zeroed out. In rare cases, there can be -page_zip_validate() failures on the node_ptr, trx_id and roll_ptr -columns if the space is reallocated for a smaller record. */ -UNIV_INTERN ibool page_zip_clear_rec_disable; -#endif /* UNIV_ZIP_DEBUG */ - /**********************************************************************//** -Clear an area on the uncompressed and compressed page, if possible. */ +Clear an area on the uncompressed and compressed page. +Do not clear the data payload, as that would grow the modification log. */ static void page_zip_clear_rec( @@ -3934,6 +3926,9 @@ page_zip_clear_rec( { ulint heap_no; page_t* page = page_align(rec); + byte* storage; + byte* field; + ulint len; /* page_zip_validate() would fail here if a record containing externally stored columns is being deleted. */ ut_ad(rec_offs_validate(rec, index, offsets)); @@ -3949,60 +3944,46 @@ page_zip_clear_rec( UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets), rec_offs_extra_size(offsets)); - if ( -#ifdef UNIV_ZIP_DEBUG - !page_zip_clear_rec_disable && -#endif /* UNIV_ZIP_DEBUG */ - page_zip->m_end - + 1 + ((heap_no - 1) >= 64)/* size of the log entry */ - + page_zip_get_trailer_len(page_zip, - dict_index_is_clust(index), NULL) - < page_zip_get_size(page_zip)) { - byte* data; - - /* Clear only the data bytes, because the allocator and - the decompressor depend on the extra bytes. */ - memset(rec, 0, rec_offs_data_size(offsets)); - - if (!page_is_leaf(page)) { - /* Clear node_ptr on the compressed page. */ - byte* storage = page_zip->data - + page_zip_get_size(page_zip) - - (page_dir_get_n_heap(page) - - PAGE_HEAP_NO_USER_LOW) - * PAGE_ZIP_DIR_SLOT_SIZE; - - memset(storage - (heap_no - 1) * REC_NODE_PTR_SIZE, - 0, REC_NODE_PTR_SIZE); - } else if (dict_index_is_clust(index)) { - /* Clear trx_id and roll_ptr on the compressed page. */ - byte* storage = page_zip->data - + page_zip_get_size(page_zip) - - (page_dir_get_n_heap(page) - - PAGE_HEAP_NO_USER_LOW) - * PAGE_ZIP_DIR_SLOT_SIZE; - - memset(storage - (heap_no - 1) - * (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN), - 0, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); - } + if (!page_is_leaf(page)) { + /* Clear node_ptr. On the compressed page, + there is an array of node_ptr immediately before the + dense page directory, at the very end of the page. */ + storage = page_zip->data + + page_zip_get_size(page_zip) + - (page_dir_get_n_heap(page) + - PAGE_HEAP_NO_USER_LOW) + * PAGE_ZIP_DIR_SLOT_SIZE; + ut_ad(dict_index_get_n_unique_in_tree(index) == + rec_offs_n_fields(offsets) - 1); + field = rec_get_nth_field(rec, offsets, + rec_offs_n_fields(offsets) - 1, + &len); + ut_ad(len == REC_NODE_PTR_SIZE); - /* Log that the data was zeroed out. */ - data = page_zip->data + page_zip->m_end; - ut_ad(!*data); - if (UNIV_UNLIKELY(heap_no - 1 >= 64)) { - *data++ = (byte) (0x80 | (heap_no - 1) >> 7); - ut_ad(!*data); - } - *data++ = (byte) ((heap_no - 1) << 1 | 1); - ut_ad(!*data); - ut_ad((ulint) (data - page_zip->data) - < page_zip_get_size(page_zip)); - page_zip->m_end = data - page_zip->data; - page_zip->m_nonempty = TRUE; - } else if (page_is_leaf(page) && dict_index_is_clust(index)) { - /* Do not clear the record, because there is not enough space - to log the operation. */ + ut_ad(!rec_offs_any_extern(offsets)); + memset(field, 0, REC_NODE_PTR_SIZE); + memset(storage - (heap_no - 1) * REC_NODE_PTR_SIZE, + 0, REC_NODE_PTR_SIZE); + } else if (dict_index_is_clust(index)) { + /* Clear trx_id and roll_ptr. On the compressed page, + there is an array of these fields immediately before the + dense page directory, at the very end of the page. */ + const ulint trx_id_pos + = dict_col_get_clust_pos( + dict_table_get_sys_col( + index->table, DATA_TRX_ID), index); + storage = page_zip->data + + page_zip_get_size(page_zip) + - (page_dir_get_n_heap(page) + - PAGE_HEAP_NO_USER_LOW) + * PAGE_ZIP_DIR_SLOT_SIZE; + field = rec_get_nth_field(rec, offsets, trx_id_pos, &len); + ut_ad(len == DATA_TRX_ID_LEN); + + memset(field, 0, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); + memset(storage - (heap_no - 1) + * (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN), + 0, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); if (rec_offs_any_extern(offsets)) { ulint i; @@ -4011,15 +3992,18 @@ page_zip_clear_rec( /* Clear all BLOB pointers in order to make page_zip_validate() pass. */ if (rec_offs_nth_extern(offsets, i)) { - ulint len; - byte* field = rec_get_nth_field( + field = rec_get_nth_field( rec, offsets, i, &len); + ut_ad(len + == BTR_EXTERN_FIELD_REF_SIZE); memset(field + len - BTR_EXTERN_FIELD_REF_SIZE, 0, BTR_EXTERN_FIELD_REF_SIZE); } } } + } else { + ut_ad(!rec_offs_any_extern(offsets)); } #ifdef UNIV_ZIP_DEBUG diff --git a/storage/innodb_plugin/rem/rem0rec.c b/storage/innodb_plugin/rem/rem0rec.c index 37ba8ca2ffe..9f90d2940dd 100644 --- a/storage/innodb_plugin/rem/rem0rec.c +++ b/storage/innodb_plugin/rem/rem0rec.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -408,7 +408,7 @@ rec_init_offsets( do { ulint len; if (UNIV_UNLIKELY(i == n_node_ptr_field)) { - len = offs += 4; + len = offs += REC_NODE_PTR_SIZE; goto resolved; } @@ -640,7 +640,7 @@ rec_get_offsets_reverse( do { ulint len; if (UNIV_UNLIKELY(i == n_node_ptr_field)) { - len = offs += 4; + len = offs += REC_NODE_PTR_SIZE; goto resolved; } @@ -1131,9 +1131,9 @@ rec_convert_dtuple_to_rec_comp( if (UNIV_UNLIKELY(i == n_node_ptr_field)) { ut_ad(dtype_get_prtype(type) & DATA_NOT_NULL); - ut_ad(len == 4); + ut_ad(len == REC_NODE_PTR_SIZE); memcpy(end, dfield_get_data(field), len); - end += 4; + end += REC_NODE_PTR_SIZE; break; } -- cgit v1.2.1 From b0fc27dc0a9a0f0a48a1f0cbbdcc0015bbad24bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 16 Jun 2011 14:55:46 +0300 Subject: Bug #61341 buf_LRU_insert_zip_clean can be O(N) on LRU length The buf_pool->zip_clean list is only needed for debugging, or for recomputing buf_pool->page_hash when resizing the buffer pool. Buffer pool resizing was never fully implemented. Remove the resizing code, and define buf_pool->zip_clean only in debug builds. buf_pool->zip_clean, buf_LRU_insert_zip_clean(): Enclose in #if defined UNIV_DEBUG || UNIV_BUF_DEBUG. buf_chunk_free(), buf_chunk_all_free(), buf_pool_shrink(), buf_pool_page_hash_rebuild(), buf_pool_resize(): Remove (unreachable code). rb:671 approved by Inaam Rana --- storage/innodb_plugin/ChangeLog | 6 + storage/innodb_plugin/buf/buf0buddy.c | 7 +- storage/innodb_plugin/buf/buf0buf.c | 379 +------------------------------- storage/innodb_plugin/buf/buf0flu.c | 4 +- storage/innodb_plugin/buf/buf0lru.c | 8 +- storage/innodb_plugin/include/buf0buf.h | 8 +- storage/innodb_plugin/include/buf0lru.h | 4 +- 7 files changed, 31 insertions(+), 385 deletions(-) diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index d747a45d434..cf060ed3d84 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,9 @@ +2011-06-16 The InnoDB Team + + * buf/buf0buddy.c, buf/buf0buf.c, buf/buf0flu.c, buf/buf0lru.c, + include/buf0buf.h, include/buf0lru.h: + Fix Bug#61341 buf_LRU_insert_zip_clean can be O(N) on LRU length + 2011-06-16 The InnoDB Team * page/page0zip.c, rem/rem0rec.c: diff --git a/storage/innodb_plugin/buf/buf0buddy.c b/storage/innodb_plugin/buf/buf0buddy.c index 63c99571510..9a95f2c639b 100644 --- a/storage/innodb_plugin/buf/buf0buddy.c +++ b/storage/innodb_plugin/buf/buf0buddy.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2006, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -351,7 +351,9 @@ buf_buddy_relocate_block( buf_page_t* bpage, /*!< in: block to relocate */ buf_page_t* dpage) /*!< in: free block to relocate to */ { +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG buf_page_t* b; +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ ut_ad(buf_pool_mutex_own()); @@ -380,7 +382,7 @@ buf_buddy_relocate_block( buf_relocate(bpage, dpage); ut_d(bpage->state = BUF_BLOCK_ZIP_FREE); - +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /* relocate buf_pool->zip_clean */ b = UT_LIST_GET_PREV(list, dpage); UT_LIST_REMOVE(list, buf_pool->zip_clean, dpage); @@ -390,6 +392,7 @@ buf_buddy_relocate_block( } else { UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, dpage); } +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ UNIV_MEM_INVALID(bpage, sizeof *bpage); diff --git a/storage/innodb_plugin/buf/buf0buf.c b/storage/innodb_plugin/buf/buf0buf.c index 14ec7b75911..0426d5ec872 100644 --- a/storage/innodb_plugin/buf/buf0buf.c +++ b/storage/innodb_plugin/buf/buf0buf.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -873,72 +873,6 @@ buf_chunk_not_freed( return(NULL); } -/*********************************************************************//** -Checks that all blocks in the buffer chunk are in BUF_BLOCK_NOT_USED state. -@return TRUE if all freed */ -static -ibool -buf_chunk_all_free( -/*===============*/ - const buf_chunk_t* chunk) /*!< in: chunk being checked */ -{ - const buf_block_t* block; - ulint i; - - ut_ad(buf_pool); - ut_ad(buf_pool_mutex_own()); - - block = chunk->blocks; - - for (i = chunk->size; i--; block++) { - - if (buf_block_get_state(block) != BUF_BLOCK_NOT_USED) { - - return(FALSE); - } - } - - return(TRUE); -} - -/********************************************************************//** -Frees a chunk of buffer frames. */ -static -void -buf_chunk_free( -/*===========*/ - buf_chunk_t* chunk) /*!< out: chunk of buffers */ -{ - buf_block_t* block; - const buf_block_t* block_end; - - ut_ad(buf_pool_mutex_own()); - - block_end = chunk->blocks + chunk->size; - - for (block = chunk->blocks; block < block_end; block++) { - ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED); - ut_a(!block->page.zip.data); - - ut_ad(!block->page.in_LRU_list); - ut_ad(!block->in_unzip_LRU_list); - ut_ad(!block->page.in_flush_list); - /* Remove the block from the free list. */ - ut_ad(block->page.in_free_list); - UT_LIST_REMOVE(list, buf_pool->free, (&block->page)); - - /* Free the latches. */ - mutex_free(&block->mutex); - rw_lock_free(&block->lock); -#ifdef UNIV_SYNC_DEBUG - rw_lock_free(&block->debug_latch); -#endif /* UNIV_SYNC_DEBUG */ - UNIV_MEM_UNDESC(block); - } - - os_mem_free_large(chunk->mem, chunk->mem_size); -} - /********************************************************************//** Creates the buffer pool. @return own: buf_pool object, NULL if not enough memory or error */ @@ -1017,8 +951,6 @@ buf_pool_free(void) chunk = chunks + buf_pool->n_chunks; while (--chunk >= chunks) { - /* Bypass the checks of buf_chunk_free(), since they - would fail at shutdown. */ os_mem_free_large(chunk->mem, chunk->mem_size); } @@ -1193,311 +1125,6 @@ buf_relocate( HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, dpage); } -/********************************************************************//** -Shrinks the buffer pool. */ -static -void -buf_pool_shrink( -/*============*/ - ulint chunk_size) /*!< in: number of pages to remove */ -{ - buf_chunk_t* chunks; - buf_chunk_t* chunk; - ulint max_size; - ulint max_free_size; - buf_chunk_t* max_chunk; - buf_chunk_t* max_free_chunk; - - ut_ad(!buf_pool_mutex_own()); - -try_again: - btr_search_disable(); /* Empty the adaptive hash index again */ - buf_pool_mutex_enter(); - -shrink_again: - if (buf_pool->n_chunks <= 1) { - - /* Cannot shrink if there is only one chunk */ - goto func_done; - } - - /* Search for the largest free chunk - not larger than the size difference */ - chunks = buf_pool->chunks; - chunk = chunks + buf_pool->n_chunks; - max_size = max_free_size = 0; - max_chunk = max_free_chunk = NULL; - - while (--chunk >= chunks) { - if (chunk->size <= chunk_size - && chunk->size > max_free_size) { - if (chunk->size > max_size) { - max_size = chunk->size; - max_chunk = chunk; - } - - if (buf_chunk_all_free(chunk)) { - max_free_size = chunk->size; - max_free_chunk = chunk; - } - } - } - - if (!max_free_size) { - - ulint dirty = 0; - ulint nonfree = 0; - buf_block_t* block; - buf_block_t* bend; - - /* Cannot shrink: try again later - (do not assign srv_buf_pool_old_size) */ - if (!max_chunk) { - - goto func_exit; - } - - block = max_chunk->blocks; - bend = block + max_chunk->size; - - /* Move the blocks of chunk to the end of the - LRU list and try to flush them. */ - for (; block < bend; block++) { - switch (buf_block_get_state(block)) { - case BUF_BLOCK_NOT_USED: - continue; - case BUF_BLOCK_FILE_PAGE: - break; - default: - nonfree++; - continue; - } - - mutex_enter(&block->mutex); - /* The following calls will temporarily - release block->mutex and buf_pool_mutex. - Therefore, we have to always retry, - even if !dirty && !nonfree. */ - - if (!buf_flush_ready_for_replace(&block->page)) { - - buf_LRU_make_block_old(&block->page); - dirty++; - } else if (buf_LRU_free_block(&block->page, TRUE) - != BUF_LRU_FREED) { - nonfree++; - } - - mutex_exit(&block->mutex); - } - - buf_pool_mutex_exit(); - - /* Request for a flush of the chunk if it helps. - Do not flush if there are non-free blocks, since - flushing will not make the chunk freeable. */ - if (nonfree) { - /* Avoid busy-waiting. */ - os_thread_sleep(100000); - } else if (dirty - && buf_flush_batch(BUF_FLUSH_LRU, dirty, 0) - == ULINT_UNDEFINED) { - - buf_flush_wait_batch_end(BUF_FLUSH_LRU); - } - - goto try_again; - } - - max_size = max_free_size; - max_chunk = max_free_chunk; - - srv_buf_pool_old_size = srv_buf_pool_size; - - /* Rewrite buf_pool->chunks. Copy everything but max_chunk. */ - chunks = mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks); - memcpy(chunks, buf_pool->chunks, - (max_chunk - buf_pool->chunks) * sizeof *chunks); - memcpy(chunks + (max_chunk - buf_pool->chunks), - max_chunk + 1, - buf_pool->chunks + buf_pool->n_chunks - - (max_chunk + 1)); - ut_a(buf_pool->curr_size > max_chunk->size); - buf_pool->curr_size -= max_chunk->size; - srv_buf_pool_curr_size = buf_pool->curr_size * UNIV_PAGE_SIZE; - chunk_size -= max_chunk->size; - buf_chunk_free(max_chunk); - mem_free(buf_pool->chunks); - buf_pool->chunks = chunks; - buf_pool->n_chunks--; - - /* Allow a slack of one megabyte. */ - if (chunk_size > 1048576 / UNIV_PAGE_SIZE) { - - goto shrink_again; - } - -func_done: - srv_buf_pool_old_size = srv_buf_pool_size; -func_exit: - buf_pool_mutex_exit(); - btr_search_enable(); -} - -/********************************************************************//** -Rebuild buf_pool->page_hash. */ -static -void -buf_pool_page_hash_rebuild(void) -/*============================*/ -{ - ulint i; - ulint n_chunks; - buf_chunk_t* chunk; - hash_table_t* page_hash; - hash_table_t* zip_hash; - buf_page_t* b; - - buf_pool_mutex_enter(); - - /* Free, create, and populate the hash table. */ - hash_table_free(buf_pool->page_hash); - buf_pool->page_hash = page_hash = hash_create(2 * buf_pool->curr_size); - zip_hash = hash_create(2 * buf_pool->curr_size); - - HASH_MIGRATE(buf_pool->zip_hash, zip_hash, buf_page_t, hash, - BUF_POOL_ZIP_FOLD_BPAGE); - - hash_table_free(buf_pool->zip_hash); - buf_pool->zip_hash = zip_hash; - - /* Insert the uncompressed file pages to buf_pool->page_hash. */ - - chunk = buf_pool->chunks; - n_chunks = buf_pool->n_chunks; - - for (i = 0; i < n_chunks; i++, chunk++) { - ulint j; - buf_block_t* block = chunk->blocks; - - for (j = 0; j < chunk->size; j++, block++) { - if (buf_block_get_state(block) - == BUF_BLOCK_FILE_PAGE) { - ut_ad(!block->page.in_zip_hash); - ut_ad(block->page.in_page_hash); - - HASH_INSERT(buf_page_t, hash, page_hash, - buf_page_address_fold( - block->page.space, - block->page.offset), - &block->page); - } - } - } - - /* Insert the compressed-only pages to buf_pool->page_hash. - All such blocks are either in buf_pool->zip_clean or - in buf_pool->flush_list. */ - - for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b; - b = UT_LIST_GET_NEXT(list, b)) { - ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE); - ut_ad(!b->in_flush_list); - ut_ad(b->in_LRU_list); - ut_ad(b->in_page_hash); - ut_ad(!b->in_zip_hash); - - HASH_INSERT(buf_page_t, hash, page_hash, - buf_page_address_fold(b->space, b->offset), b); - } - - for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b; - b = UT_LIST_GET_NEXT(list, b)) { - ut_ad(b->in_flush_list); - ut_ad(b->in_LRU_list); - ut_ad(b->in_page_hash); - ut_ad(!b->in_zip_hash); - - switch (buf_page_get_state(b)) { - case BUF_BLOCK_ZIP_DIRTY: - HASH_INSERT(buf_page_t, hash, page_hash, - buf_page_address_fold(b->space, - b->offset), b); - break; - case BUF_BLOCK_FILE_PAGE: - /* uncompressed page */ - break; - case BUF_BLOCK_ZIP_FREE: - case BUF_BLOCK_ZIP_PAGE: - case BUF_BLOCK_NOT_USED: - case BUF_BLOCK_READY_FOR_USE: - case BUF_BLOCK_MEMORY: - case BUF_BLOCK_REMOVE_HASH: - ut_error; - break; - } - } - - buf_pool_mutex_exit(); -} - -/********************************************************************//** -Resizes the buffer pool. */ -UNIV_INTERN -void -buf_pool_resize(void) -/*=================*/ -{ - buf_pool_mutex_enter(); - - if (srv_buf_pool_old_size == srv_buf_pool_size) { - - buf_pool_mutex_exit(); - return; - } - - if (srv_buf_pool_curr_size + 1048576 > srv_buf_pool_size) { - - buf_pool_mutex_exit(); - - /* Disable adaptive hash indexes and empty the index - in order to free up memory in the buffer pool chunks. */ - buf_pool_shrink((srv_buf_pool_curr_size - srv_buf_pool_size) - / UNIV_PAGE_SIZE); - } else if (srv_buf_pool_curr_size + 1048576 < srv_buf_pool_size) { - - /* Enlarge the buffer pool by at least one megabyte */ - - ulint mem_size - = srv_buf_pool_size - srv_buf_pool_curr_size; - buf_chunk_t* chunks; - buf_chunk_t* chunk; - - chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks); - - memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks - * sizeof *chunks); - - chunk = &chunks[buf_pool->n_chunks]; - - if (!buf_chunk_init(chunk, mem_size)) { - mem_free(chunks); - } else { - buf_pool->curr_size += chunk->size; - srv_buf_pool_curr_size = buf_pool->curr_size - * UNIV_PAGE_SIZE; - mem_free(buf_pool->chunks); - buf_pool->chunks = chunks; - buf_pool->n_chunks++; - } - - srv_buf_pool_old_size = srv_buf_pool_size; - buf_pool_mutex_exit(); - } - - buf_pool_page_hash_rebuild(); -} - /********************************************************************//** Moves a page to the start of the buffer pool LRU list. This high-level function can be used to prevent an important page from slipping out of @@ -2233,8 +1860,10 @@ wait_until_unfixed: if (buf_page_get_state(&block->page) == BUF_BLOCK_ZIP_PAGE) { +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG UT_LIST_REMOVE(list, buf_pool->zip_clean, &block->page); +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ ut_ad(!block->page.in_flush_list); } else { /* Relocate buf_pool->flush_list. */ @@ -2975,7 +2604,9 @@ err_exit: /* The block must be put to the LRU list, to the old blocks */ buf_LRU_add_block(bpage, TRUE/* to old blocks */); +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG buf_LRU_insert_zip_clean(bpage); +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ buf_page_set_io_fix(bpage, BUF_IO_READ); diff --git a/storage/innodb_plugin/buf/buf0flu.c b/storage/innodb_plugin/buf/buf0flu.c index 3a9975ce4b7..60cf8d58bc7 100644 --- a/storage/innodb_plugin/buf/buf0flu.c +++ b/storage/innodb_plugin/buf/buf0flu.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -435,7 +435,9 @@ buf_flush_remove( case BUF_BLOCK_ZIP_DIRTY: buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE); UT_LIST_REMOVE(list, buf_pool->flush_list, bpage); +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG buf_LRU_insert_zip_clean(bpage); +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ break; case BUF_BLOCK_FILE_PAGE: UT_LIST_REMOVE(list, buf_pool->flush_list, bpage); diff --git a/storage/innodb_plugin/buf/buf0lru.c b/storage/innodb_plugin/buf/buf0lru.c index a69b2658c51..01e7e9a5f69 100644 --- a/storage/innodb_plugin/buf/buf0lru.c +++ b/storage/innodb_plugin/buf/buf0lru.c @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -501,6 +501,7 @@ next_page_no_mutex: } } +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /********************************************************************//** Insert a compressed block into buf_pool->zip_clean in the LRU order. */ UNIV_INTERN @@ -532,6 +533,7 @@ buf_LRU_insert_zip_clean( UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, bpage); } } +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ /******************************************************************//** Try to free an uncompressed page of a compressed block from the unzip @@ -1518,7 +1520,9 @@ alloc: } if (b->state == BUF_BLOCK_ZIP_PAGE) { +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG buf_LRU_insert_zip_clean(b); +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ } else { /* Relocate on buf_pool->flush_list. */ buf_flush_relocate_on_flush_list(bpage, b); @@ -1797,7 +1801,9 @@ buf_LRU_block_remove_hashed_page( ut_a(bpage->zip.data); ut_a(buf_page_get_zip_size(bpage)); +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage); +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ mutex_exit(&buf_pool_zip_mutex); buf_pool_mutex_exit_forbid(); diff --git a/storage/innodb_plugin/include/buf0buf.h b/storage/innodb_plugin/include/buf0buf.h index 775ddc758d0..2dfb821e199 100644 --- a/storage/innodb_plugin/include/buf0buf.h +++ b/storage/innodb_plugin/include/buf0buf.h @@ -141,12 +141,6 @@ buf_relocate( BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */ buf_page_t* dpage) /*!< in/out: destination control block */ __attribute__((nonnull)); -/********************************************************************//** -Resizes the buffer pool. */ -UNIV_INTERN -void -buf_pool_resize(void); -/*=================*/ /*********************************************************************//** Gets the current size of buffer buf_pool in bytes. @return size in bytes */ @@ -1446,8 +1440,10 @@ struct buf_pool_struct{ frames and buf_page_t descriptors of blocks that exist in the buffer pool only in compressed form. */ /* @{ */ +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG UT_LIST_BASE_NODE_T(buf_page_t) zip_clean; /*!< unmodified compressed pages */ +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ UT_LIST_BASE_NODE_T(buf_page_t) zip_free[BUF_BUDDY_SIZES]; /*!< buddy free lists */ #if BUF_BUDDY_HIGH != UNIV_PAGE_SIZE diff --git a/storage/innodb_plugin/include/buf0lru.h b/storage/innodb_plugin/include/buf0lru.h index d543bce53cd..bea1f7d5b1e 100644 --- a/storage/innodb_plugin/include/buf0lru.h +++ b/storage/innodb_plugin/include/buf0lru.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -84,6 +84,7 @@ void buf_LRU_invalidate_tablespace( /*==========================*/ ulint id); /*!< in: space id */ +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /********************************************************************//** Insert a compressed block into buf_pool->zip_clean in the LRU order. */ UNIV_INTERN @@ -91,6 +92,7 @@ void buf_LRU_insert_zip_clean( /*=====================*/ buf_page_t* bpage); /*!< in: pointer to the block in question */ +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ /******************************************************************//** Try to free a block. If bpage is a descriptor of a compressed-only -- cgit v1.2.1 From 0dfe86f53f1a54f0d260ac8bd3c1bd712331e34d Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Thu, 16 Jun 2011 16:11:43 +0300 Subject: Silence bogus compiler warning introduced in marko.makela@oracle.com-20110616072721-8bo92ctixq6eqavr --- storage/innobase/btr/btr0btr.c | 2 +- storage/innodb_plugin/btr/btr0btr.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/innobase/btr/btr0btr.c b/storage/innobase/btr/btr0btr.c index 41e0bf6e067..790582815a3 100644 --- a/storage/innobase/btr/btr0btr.c +++ b/storage/innobase/btr/btr0btr.c @@ -2063,7 +2063,7 @@ btr_compress( rec_t* node_ptr; ulint data_size; ulint n_recs; - ulint nth_rec; + ulint nth_rec = 0; /* remove bogus warning */ ulint max_ins_size; ulint max_ins_size_reorg; ulint comp; diff --git a/storage/innodb_plugin/btr/btr0btr.c b/storage/innodb_plugin/btr/btr0btr.c index fb472062fe6..ac188c8c2fd 100644 --- a/storage/innodb_plugin/btr/btr0btr.c +++ b/storage/innodb_plugin/btr/btr0btr.c @@ -3132,7 +3132,7 @@ btr_compress( ulint* offsets; ulint data_size; ulint n_recs; - ulint nth_rec; + ulint nth_rec = 0; /* remove bogus warning */ ulint max_ins_size; ulint max_ins_size_reorg; -- cgit v1.2.1 From edfd31a06a223620a32ea996f974d4dd3decfb80 Mon Sep 17 00:00:00 2001 From: Dmitry Lenev Date: Fri, 17 Jun 2011 02:02:52 +0400 Subject: Fix for bug #12652385 - "61493: REORDERING COLUMNS TO POSITION FIRST CAN CAUSE DATA TO BE CORRUPTED". ALTER TABLE MODIFY/CHANGE ... FIRST did nothing except renaming columns if new version of the table had exactly the same structure as the old one (i.e. as result of such statement, names of columns changed their order as specified but data in columns didn't). The same thing happened for ALTER TABLE DROP COLUMN/ADD COLUMN statements which were supposed to produce new version of table with exactly the same structure as the old version of table. I.e. in the latter case the result was the same as if old column was renamed instead of being dropped and new column with default as value being created. Both these problems were caused by the fact that ALTER TABLE implementation incorrectly interpreted both these situations as simple renaming of columns and assumed that in-place ALTER TABLE algorithm could have been used for them. This patch fixes this problem by ensuring that in cases when some column is moved to the first position or some column is dropped the default ALTER TABLE algorithm involving table copying is always used. This is achieved by detecting such situations in mysql_prepare_alter_table() and setting Alter_info::change_level to ALTER_TABLE_DATA_CHANGED for them. mysql-test/r/alter_table.result: Added test for bug #12652385 - "61493: REORDERING COLUMNS TO POSITION FIRST CAN CAUSE DATA TO BE CORRUPTED". mysql-test/t/alter_table.test: Added test for bug #12652385 - "61493: REORDERING COLUMNS TO POSITION FIRST CAN CAUSE DATA TO BE CORRUPTED". sql/sql_table.cc: Changed mysql_prepare_alter_table() to detect situations in which we some column moved to the first position or some column is dropped and ensure that such ALTER TABLE statements won't be carried out using in-place algorithm. The latter could have happened before this patch if new version of table had the same structure as the old one (except the column names). --- mysql-test/r/alter_table.result | 29 +++++++++++++++++++++++++++++ mysql-test/t/alter_table.test | 27 +++++++++++++++++++++++++++ sql/sql_table.cc | 17 +++++++++++++++++ 3 files changed, 73 insertions(+) diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result index 004e2031fb1..674238b14cd 100644 --- a/mysql-test/r/alter_table.result +++ b/mysql-test/r/alter_table.result @@ -1345,4 +1345,33 @@ DROP TABLE t1; CREATE TABLE t1 (a TEXT, id INT, b INT); ALTER TABLE t1 DROP COLUMN a, ADD COLUMN c TEXT FIRST; DROP TABLE t1; +# +# Test for bug #12652385 - "61493: REORDERING COLUMNS TO POSITION +# FIRST CAN CAUSE DATA TO BE CORRUPTED". +# +drop table if exists t1; +# Use MyISAM engine as the fact that InnoDB doesn't support +# in-place ALTER TABLE in cases when columns are being renamed +# hides some bugs. +create table t1 (i int, j int) engine=myisam; +insert into t1 value (1, 2); +# First, test for original problem described in the bug report. +select * from t1; +i j +1 2 +# Change of column order by the below ALTER TABLE statement should +# affect both column names and column contents. +alter table t1 modify column j int first; +select * from t1; +j i +2 1 +# Now test for similar problem with the same root. +# The below ALTER TABLE should change not only the name but +# also the value for the last column of the table. +alter table t1 drop column i, add column k int default 0; +select * from t1; +j k +2 0 +# Clean-up. +drop table t1; End of 5.1 tests diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test index 4989a6c380c..2a8ac2a00d6 100644 --- a/mysql-test/t/alter_table.test +++ b/mysql-test/t/alter_table.test @@ -1073,4 +1073,31 @@ ALTER TABLE t1 DROP COLUMN a, ADD COLUMN c TEXT FIRST; DROP TABLE t1; +--echo # +--echo # Test for bug #12652385 - "61493: REORDERING COLUMNS TO POSITION +--echo # FIRST CAN CAUSE DATA TO BE CORRUPTED". +--echo # +--disable_warnings +drop table if exists t1; +--enable_warnings +--echo # Use MyISAM engine as the fact that InnoDB doesn't support +--echo # in-place ALTER TABLE in cases when columns are being renamed +--echo # hides some bugs. +create table t1 (i int, j int) engine=myisam; +insert into t1 value (1, 2); +--echo # First, test for original problem described in the bug report. +select * from t1; +--echo # Change of column order by the below ALTER TABLE statement should +--echo # affect both column names and column contents. +alter table t1 modify column j int first; +select * from t1; +--echo # Now test for similar problem with the same root. +--echo # The below ALTER TABLE should change not only the name but +--echo # also the value for the last column of the table. +alter table t1 drop column i, add column k int default 0; +select * from t1; +--echo # Clean-up. +drop table t1; + + --echo End of 5.1 tests diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 58e2684e5b7..077f00f9ea8 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -6170,6 +6170,12 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, if (drop) { drop_it.remove(); + /* + ALTER TABLE DROP COLUMN always changes table data even in cases + when new version of the table has the same structure as the old + one. + */ + alter_info->change_level= ALTER_TABLE_DATA_CHANGED; continue; } /* Check if field is changed */ @@ -6247,7 +6253,14 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, if (!def->after) new_create_list.push_back(def); else if (def->after == first_keyword) + { new_create_list.push_front(def); + /* + Re-ordering columns in table can't be done using in-place algorithm + as it always changes table data. + */ + alter_info->change_level= ALTER_TABLE_DATA_CHANGED; + } else { Create_field *find; @@ -6263,6 +6276,10 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, goto err; } find_it.after(def); // Put element after this + /* + Re-ordering columns in table can't be done using in-place algorithm + as it always changes table data. + */ alter_info->change_level= ALTER_TABLE_DATA_CHANGED; } } -- cgit v1.2.1 From 7de029da9a1ef8841c2f7d21299583a5e5671e46 Mon Sep 17 00:00:00 2001 From: Jon Olav Hauglid Date: Fri, 17 Jun 2011 09:51:34 +0200 Subject: Bug#12657095 YASSL ERROR MESSAGE CONTAINS TYPO This patch fixes a typo in a YaSSL error message. --- extra/yassl/src/yassl_error.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extra/yassl/src/yassl_error.cpp b/extra/yassl/src/yassl_error.cpp index dd30348cd93..862cd82fb3b 100644 --- a/extra/yassl/src/yassl_error.cpp +++ b/extra/yassl/src/yassl_error.cpp @@ -128,7 +128,7 @@ void SetErrorString(unsigned long error, char* buffer) break; case badVersion_error : - strncpy(buffer, "protocl version mismatch", max); + strncpy(buffer, "protocol version mismatch", max); break; case compress_error : -- cgit v1.2.1 From e60e65052f4a1310a2a725652c195ef74dee046e Mon Sep 17 00:00:00 2001 From: Inaam Rana Date: Fri, 17 Jun 2011 16:20:20 -0400 Subject: Bug 12635227 - 61188: DROP TABLE EXTREMELY SLOW approved by: Marko rb://681 Coalescing of free buf_page_t descriptors can prove to be one severe bottleneck in performance of compression. One such workload where it hurts badly is DROP TABLE. This patch removes buf_page_t allocations from buf_buddy and uses ut_malloc instead. In order to further reduce overhead of colaescing we no longer attempt to coalesce a block if the corresponding free_list is less than 16 in size. --- storage/innodb_plugin/ChangeLog | 7 + storage/innodb_plugin/btr/btr0cur.c | 2 +- storage/innodb_plugin/buf/buf0buddy.c | 410 ++++++++--------------------- storage/innodb_plugin/buf/buf0buf.c | 29 +- storage/innodb_plugin/buf/buf0lru.c | 204 +++++--------- storage/innodb_plugin/include/buf0buddy.h | 21 +- storage/innodb_plugin/include/buf0buddy.ic | 37 +-- storage/innodb_plugin/include/buf0buf.h | 17 ++ storage/innodb_plugin/include/buf0buf.ic | 29 ++ storage/innodb_plugin/include/buf0lru.h | 19 +- storage/innodb_plugin/include/buf0types.h | 17 +- 11 files changed, 276 insertions(+), 516 deletions(-) diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index cf060ed3d84..6d9e074d202 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,10 @@ +2011-06-16 The InnoDB Team + + * btr/btr0cur.c, buf/buf0buddy.c, buf/buf0buf.c, buf/buf0lru.c, + include/buf0buddy.h, include/buf0buddy.ic, include/buf0buf.h, + include/buf0buf.ic, include/buf0lru.h, include/buf0types.h: + Fix Bug#61188 DROP TABLE extremely slow + 2011-06-16 The InnoDB Team * buf/buf0buddy.c, buf/buf0buf.c, buf/buf0flu.c, buf/buf0lru.c, diff --git a/storage/innodb_plugin/btr/btr0cur.c b/storage/innodb_plugin/btr/btr0cur.c index 651c2c342a5..f378c477537 100644 --- a/storage/innodb_plugin/btr/btr0cur.c +++ b/storage/innodb_plugin/btr/btr0cur.c @@ -3864,7 +3864,7 @@ btr_blob_free( && buf_block_get_space(block) == space && buf_block_get_page_no(block) == page_no) { - if (buf_LRU_free_block(&block->page, all) != BUF_LRU_FREED + if (!buf_LRU_free_block(&block->page, all) && all && block->page.zip.data) { /* Attempt to deallocate the uncompressed page if the whole block cannot be deallocted. */ diff --git a/storage/innodb_plugin/buf/buf0buddy.c b/storage/innodb_plugin/buf/buf0buddy.c index 9a95f2c639b..de571743361 100644 --- a/storage/innodb_plugin/buf/buf0buddy.c +++ b/storage/innodb_plugin/buf/buf0buddy.c @@ -45,6 +45,14 @@ static ulint buf_buddy_n_frames; Protected by buf_pool_mutex. */ UNIV_INTERN buf_buddy_stat_t buf_buddy_stat[BUF_BUDDY_SIZES + 1]; +/** Validate a given zip_free list. */ +#define BUF_BUDDY_LIST_VALIDATE(i) \ + UT_LIST_VALIDATE(list, buf_page_t, \ + buf_pool->zip_free[i], \ + ut_ad(buf_page_get_state( \ + ut_list_node_313) \ + == BUF_BLOCK_ZIP_FREE)) + /**********************************************************************//** Get the offset of the buddy of a compressed page frame. @return the buddy relative of page */ @@ -76,21 +84,10 @@ buf_buddy_add_to_free( buf_page_t* bpage, /*!< in,own: block to be freed */ ulint i) /*!< in: index of buf_pool->zip_free[] */ { -#ifdef UNIV_DEBUG_VALGRIND - buf_page_t* b = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); - - if (b) UNIV_MEM_VALID(b, BUF_BUDDY_LOW << i); -#endif /* UNIV_DEBUG_VALGRIND */ - ut_ad(buf_pool_mutex_own()); ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); ut_ad(buf_pool->zip_free[i].start != bpage); UT_LIST_ADD_FIRST(list, buf_pool->zip_free[i], bpage); - -#ifdef UNIV_DEBUG_VALGRIND - if (b) UNIV_MEM_FREE(b, BUF_BUDDY_LOW << i); - UNIV_MEM_ASSERT_AND_FREE(bpage, BUF_BUDDY_LOW << i); -#endif /* UNIV_DEBUG_VALGRIND */ } /**********************************************************************//** @@ -102,25 +99,17 @@ buf_buddy_remove_from_free( buf_page_t* bpage, /*!< in: block to be removed */ ulint i) /*!< in: index of buf_pool->zip_free[] */ { -#ifdef UNIV_DEBUG_VALGRIND +#ifdef UNIV_DEBUG buf_page_t* prev = UT_LIST_GET_PREV(list, bpage); buf_page_t* next = UT_LIST_GET_NEXT(list, bpage); - if (prev) UNIV_MEM_VALID(prev, BUF_BUDDY_LOW << i); - if (next) UNIV_MEM_VALID(next, BUF_BUDDY_LOW << i); - ut_ad(!prev || buf_page_get_state(prev) == BUF_BLOCK_ZIP_FREE); ut_ad(!next || buf_page_get_state(next) == BUF_BLOCK_ZIP_FREE); -#endif /* UNIV_DEBUG_VALGRIND */ +#endif /* UNIV_DEBUG */ ut_ad(buf_pool_mutex_own()); ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); UT_LIST_REMOVE(list, buf_pool->zip_free[i], bpage); - -#ifdef UNIV_DEBUG_VALGRIND - if (prev) UNIV_MEM_FREE(prev, BUF_BUDDY_LOW << i); - if (next) UNIV_MEM_FREE(next, BUF_BUDDY_LOW << i); -#endif /* UNIV_DEBUG_VALGRIND */ } /**********************************************************************//** @@ -136,17 +125,13 @@ buf_buddy_alloc_zip( ut_ad(buf_pool_mutex_own()); ut_a(i < BUF_BUDDY_SIZES); + ut_a(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)); + + ut_d(BUF_BUDDY_LIST_VALIDATE(i)); -#ifndef UNIV_DEBUG_VALGRIND - /* Valgrind would complain about accessing free memory. */ - ut_d(UT_LIST_VALIDATE(list, buf_page_t, buf_pool->zip_free[i], - ut_ad(buf_page_get_state(ut_list_node_313) - == BUF_BLOCK_ZIP_FREE))); -#endif /* !UNIV_DEBUG_VALGRIND */ bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); if (bpage) { - UNIV_MEM_VALID(bpage, BUF_BUDDY_LOW << i); ut_a(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); buf_buddy_remove_from_free(bpage, i); @@ -165,13 +150,10 @@ buf_buddy_alloc_zip( } } -#ifdef UNIV_DEBUG if (bpage) { - memset(bpage, ~i, BUF_BUDDY_LOW << i); + ut_d(memset(bpage, ~i, BUF_BUDDY_LOW << i)); + UNIV_MEM_ALLOC(bpage, BUF_BUDDY_SIZES << i); } -#endif /* UNIV_DEBUG */ - - UNIV_MEM_ALLOC(bpage, BUF_BUDDY_SIZES << i); return(bpage); } @@ -255,6 +237,7 @@ buf_buddy_alloc_from( { ulint offs = BUF_BUDDY_LOW << j; ut_ad(j <= BUF_BUDDY_SIZES); + ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)); ut_ad(j >= i); ut_ad(!ut_align_offset(buf, offs)); @@ -268,13 +251,7 @@ buf_buddy_alloc_from( bpage = (buf_page_t*) ((byte*) buf + offs); ut_d(memset(bpage, j, BUF_BUDDY_LOW << j)); bpage->state = BUF_BLOCK_ZIP_FREE; -#ifndef UNIV_DEBUG_VALGRIND - /* Valgrind would complain about accessing free memory. */ - ut_d(UT_LIST_VALIDATE(list, buf_page_t, buf_pool->zip_free[i], - ut_ad(buf_page_get_state( - ut_list_node_313) - == BUF_BLOCK_ZIP_FREE))); -#endif /* !UNIV_DEBUG_VALGRIND */ + ut_d(BUF_BUDDY_LIST_VALIDATE(i)); buf_buddy_add_to_free(bpage, j); } @@ -284,8 +261,8 @@ buf_buddy_alloc_from( /**********************************************************************//** Allocate a block. The thread calling this function must hold buf_pool_mutex and must not hold buf_pool_zip_mutex or any block->mutex. -The buf_pool_mutex may only be released and reacquired if lru != NULL. -@return allocated block, possibly NULL if lru==NULL */ +The buf_pool_mutex may be released and reacquired. +@return allocated block, never NULL */ UNIV_INTERN void* buf_buddy_alloc_low( @@ -294,13 +271,14 @@ buf_buddy_alloc_low( or BUF_BUDDY_SIZES */ ibool* lru) /*!< in: pointer to a variable that will be assigned TRUE if storage was allocated from the LRU list - and buf_pool_mutex was temporarily released, - or NULL if the LRU list should not be used */ + and buf_pool_mutex was temporarily released */ { buf_block_t* block; + ut_ad(lru); ut_ad(buf_pool_mutex_own()); ut_ad(!mutex_own(&buf_pool_zip_mutex)); + ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)); if (i < BUF_BUDDY_SIZES) { /* Try to allocate from the buddy system. */ @@ -320,11 +298,6 @@ buf_buddy_alloc_low( goto alloc_big; } - if (!lru) { - - return(NULL); - } - /* Try replacing an uncompressed page in the buffer pool. */ buf_pool_mutex_exit(); block = buf_LRU_get_free_block(); @@ -341,65 +314,6 @@ func_exit: return(block); } -/**********************************************************************//** -Try to relocate the control block of a compressed page. -@return TRUE if relocated */ -static -ibool -buf_buddy_relocate_block( -/*=====================*/ - buf_page_t* bpage, /*!< in: block to relocate */ - buf_page_t* dpage) /*!< in: free block to relocate to */ -{ -#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG - buf_page_t* b; -#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ - - ut_ad(buf_pool_mutex_own()); - - switch (buf_page_get_state(bpage)) { - case BUF_BLOCK_ZIP_FREE: - case BUF_BLOCK_NOT_USED: - case BUF_BLOCK_READY_FOR_USE: - case BUF_BLOCK_FILE_PAGE: - case BUF_BLOCK_MEMORY: - case BUF_BLOCK_REMOVE_HASH: - ut_error; - case BUF_BLOCK_ZIP_DIRTY: - /* Cannot relocate dirty pages. */ - return(FALSE); - - case BUF_BLOCK_ZIP_PAGE: - break; - } - - mutex_enter(&buf_pool_zip_mutex); - - if (!buf_page_can_relocate(bpage)) { - mutex_exit(&buf_pool_zip_mutex); - return(FALSE); - } - - buf_relocate(bpage, dpage); - ut_d(bpage->state = BUF_BLOCK_ZIP_FREE); -#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG - /* relocate buf_pool->zip_clean */ - b = UT_LIST_GET_PREV(list, dpage); - UT_LIST_REMOVE(list, buf_pool->zip_clean, dpage); - - if (b) { - UT_LIST_INSERT_AFTER(list, buf_pool->zip_clean, b, dpage); - } else { - UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, dpage); - } -#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ - - UNIV_MEM_INVALID(bpage, sizeof *bpage); - - mutex_exit(&buf_pool_zip_mutex); - return(TRUE); -} - /**********************************************************************//** Try to relocate a block. @return TRUE if relocated */ @@ -414,106 +328,89 @@ buf_buddy_relocate( buf_page_t* bpage; const ulint size = BUF_BUDDY_LOW << i; ullint usec = ut_time_us(NULL); + mutex_t* mutex; + ulint space; + ulint page_no; ut_ad(buf_pool_mutex_own()); ut_ad(!mutex_own(&buf_pool_zip_mutex)); ut_ad(!ut_align_offset(src, size)); ut_ad(!ut_align_offset(dst, size)); + ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)); UNIV_MEM_ASSERT_W(dst, size); /* We assume that all memory from buf_buddy_alloc() - is used for either compressed pages or buf_page_t - objects covering compressed pages. */ + is used for compressed page frames. */ /* We look inside the allocated objects returned by - buf_buddy_alloc() and assume that anything of - PAGE_ZIP_MIN_SIZE or larger is a compressed page that contains - a valid space_id and page_no in the page header. Should the - fields be invalid, we will be unable to relocate the block. - We also assume that anything that fits sizeof(buf_page_t) - actually is a properly initialized buf_page_t object. */ - - if (size >= PAGE_ZIP_MIN_SIZE) { - /* This is a compressed page. */ - mutex_t* mutex; - - /* The src block may be split into smaller blocks, - some of which may be free. Thus, the - mach_read_from_4() calls below may attempt to read - from free memory. The memory is "owned" by the buddy - allocator (and it has been allocated from the buffer - pool), so there is nothing wrong about this. The - mach_read_from_4() calls here will only trigger bogus - Valgrind memcheck warnings in UNIV_DEBUG_VALGRIND builds. */ - ulint space = mach_read_from_4( - (const byte*) src + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); - ulint page_no = mach_read_from_4( - (const byte*) src + FIL_PAGE_OFFSET); - /* Suppress Valgrind warnings about conditional jump - on uninitialized value. */ - UNIV_MEM_VALID(&space, sizeof space); - UNIV_MEM_VALID(&page_no, sizeof page_no); - bpage = buf_page_hash_get(space, page_no); - - if (!bpage || bpage->zip.data != src) { - /* The block has probably been freshly - allocated by buf_LRU_get_free_block() but not - added to buf_pool->page_hash yet. Obviously, - it cannot be relocated. */ - - return(FALSE); - } + buf_buddy_alloc() and assume that each block is a compressed + page that contains a valid space_id and page_no in the page + header. Should the fields be invalid, we will be unable to + relocate the block. */ + + /* The src block may be split into smaller blocks, + some of which may be free. Thus, the + mach_read_from_4() calls below may attempt to read + from free memory. The memory is "owned" by the buddy + allocator (and it has been allocated from the buffer + pool), so there is nothing wrong about this. The + mach_read_from_4() calls here will only trigger bogus + Valgrind memcheck warnings in UNIV_DEBUG_VALGRIND builds. */ + space = mach_read_from_4((const byte *) src + + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); + page_no = mach_read_from_4((const byte *) src + + FIL_PAGE_OFFSET); + /* Suppress Valgrind warnings about conditional jump + on uninitialized value. */ + UNIV_MEM_VALID(&space, sizeof space); + UNIV_MEM_VALID(&page_no, sizeof page_no); + bpage = buf_page_hash_get(space, page_no); + + if (!bpage || bpage->zip.data != src) { + /* The block has probably been freshly + allocated by buf_LRU_get_free_block() but not + added to buf_pool->page_hash yet. Obviously, + it cannot be relocated. */ - if (page_zip_get_size(&bpage->zip) != size) { - /* The block is of different size. We would - have to relocate all blocks covered by src. - For the sake of simplicity, give up. */ - ut_ad(page_zip_get_size(&bpage->zip) < size); + return(FALSE); + } - return(FALSE); - } + if (page_zip_get_size(&bpage->zip) != size) { + /* The block is of different size. We would + have to relocate all blocks covered by src. + For the sake of simplicity, give up. */ + ut_ad(page_zip_get_size(&bpage->zip) < size); - /* The block must have been allocated, but it may - contain uninitialized data. */ - UNIV_MEM_ASSERT_W(src, size); - - mutex = buf_page_get_mutex(bpage); - - mutex_enter(mutex); - - if (buf_page_can_relocate(bpage)) { - /* Relocate the compressed page. */ - ut_a(bpage->zip.data == src); - memcpy(dst, src, size); - bpage->zip.data = dst; - mutex_exit(mutex); -success: - UNIV_MEM_INVALID(src, size); - { - buf_buddy_stat_t* buddy_stat - = &buf_buddy_stat[i]; - buddy_stat->relocated++; - buddy_stat->relocated_usec - += ut_time_us(NULL) - usec; - } - return(TRUE); - } + return(FALSE); + } - mutex_exit(mutex); - } else if (i == buf_buddy_get_slot(sizeof(buf_page_t))) { - /* This must be a buf_page_t object. */ -#if UNIV_WORD_SIZE == 4 - /* On 32-bit systems, there is no padding in - buf_page_t. On other systems, Valgrind could complain - about uninitialized pad bytes. */ - UNIV_MEM_ASSERT_RW(src, size); -#endif - if (buf_buddy_relocate_block(src, dst)) { + /* The block must have been allocated, but it may + contain uninitialized data. */ + UNIV_MEM_ASSERT_W(src, size); + + mutex = buf_page_get_mutex(bpage); + + mutex_enter(mutex); - goto success; + if (buf_page_can_relocate(bpage)) { + /* Relocate the compressed page. */ + ut_a(bpage->zip.data == src); + memcpy(dst, src, size); + bpage->zip.data = dst; + mutex_exit(mutex); + UNIV_MEM_INVALID(src, size); + { + buf_buddy_stat_t* buddy_stat + = &buf_buddy_stat[i]; + buddy_stat->relocated++; + buddy_stat->relocated_usec + += ut_time_us(NULL) - usec; } + return(TRUE); } + mutex_exit(mutex); + return(FALSE); } @@ -534,12 +431,14 @@ buf_buddy_free_low( ut_ad(buf_pool_mutex_own()); ut_ad(!mutex_own(&buf_pool_zip_mutex)); ut_ad(i <= BUF_BUDDY_SIZES); + ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)); ut_ad(buf_buddy_stat[i].used > 0); buf_buddy_stat[i].used--; + recombine: UNIV_MEM_ASSERT_AND_ALLOC(buf, BUF_BUDDY_LOW << i); - ut_d(((buf_page_t*) buf)->state = BUF_BLOCK_ZIP_FREE); + ((buf_page_t*) buf)->state = BUF_BLOCK_ZIP_FREE; if (i == BUF_BUDDY_SIZES) { buf_buddy_block_free(buf); @@ -550,32 +449,36 @@ recombine: ut_ad(buf == ut_align_down(buf, BUF_BUDDY_LOW << i)); ut_ad(!buf_pool_contains_zip(buf)); - /* Try to combine adjacent blocks. */ + /* Do not recombine blocks if there are few free blocks. + We may waste up to 15360*max_len bytes to free blocks + (1024 + 2048 + 4096 + 8192 = 15360) */ + if (UT_LIST_GET_LEN(buf_pool->zip_free[i]) < 16) { + goto func_exit; + } + /* Try to combine adjacent blocks. */ buddy = (buf_page_t*) buf_buddy_get(((byte*) buf), BUF_BUDDY_LOW << i); #ifndef UNIV_DEBUG_VALGRIND - /* Valgrind would complain about accessing free memory. */ + /* When Valgrind instrumentation is not enabled, we can read + buddy->state to quickly determine that a block is not free. + When the block is not free, buddy->state belongs to a compressed + page frame that may be flagged uninitialized in our Valgrind + instrumentation. */ if (buddy->state != BUF_BLOCK_ZIP_FREE) { goto buddy_nonfree; } - - /* The field buddy->state can only be trusted for free blocks. - If buddy->state == BUF_BLOCK_ZIP_FREE, the block is free if - it is in the free list. */ #endif /* !UNIV_DEBUG_VALGRIND */ for (bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); bpage; ) { - UNIV_MEM_VALID(bpage, BUF_BUDDY_LOW << i); ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); if (bpage == buddy) { -buddy_free: /* The buddy is free: recombine */ buf_buddy_remove_from_free(bpage, i); -buddy_free2: +buddy_is_free: ut_ad(buf_page_get_state(buddy) == BUF_BLOCK_ZIP_FREE); ut_ad(!buf_pool_contains_zip(buddy)); i++; @@ -585,122 +488,43 @@ buddy_free2: } ut_a(bpage != buf); - - { - buf_page_t* next = UT_LIST_GET_NEXT(list, bpage); - UNIV_MEM_ASSERT_AND_FREE(bpage, BUF_BUDDY_LOW << i); - bpage = next; - } + UNIV_MEM_ASSERT_W(bpage, BUF_BUDDY_LOW << i); + bpage = UT_LIST_GET_NEXT(list, bpage); } #ifndef UNIV_DEBUG_VALGRIND buddy_nonfree: - /* Valgrind would complain about accessing free memory. */ - ut_d(UT_LIST_VALIDATE(list, buf_page_t, buf_pool->zip_free[i], - ut_ad(buf_page_get_state(ut_list_node_313) - == BUF_BLOCK_ZIP_FREE))); -#endif /* UNIV_DEBUG_VALGRIND */ +#endif /* !UNIV_DEBUG_VALGRIND */ + + ut_d(BUF_BUDDY_LIST_VALIDATE(i)); /* The buddy is not free. Is there a free block of this size? */ bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); if (bpage) { + /* Remove the block from the free list, because a successful buf_buddy_relocate() will overwrite bpage->list. */ - - UNIV_MEM_VALID(bpage, BUF_BUDDY_LOW << i); buf_buddy_remove_from_free(bpage, i); /* Try to relocate the buddy of buf to the free block. */ if (buf_buddy_relocate(buddy, bpage, i)) { - ut_d(buddy->state = BUF_BLOCK_ZIP_FREE); - goto buddy_free2; + buddy->state = BUF_BLOCK_ZIP_FREE; + goto buddy_is_free; } buf_buddy_add_to_free(bpage, i); - - /* Try to relocate the buddy of the free block to buf. */ - buddy = (buf_page_t*) buf_buddy_get(((byte*) bpage), - BUF_BUDDY_LOW << i); - -#ifndef UNIV_DEBUG_VALGRIND - /* Valgrind would complain about accessing free memory. */ - - /* The buddy must not be (completely) free, because we - always recombine adjacent free blocks. - - (Parts of the buddy can be free in - buf_pool->zip_free[j] with j < i.) */ - ut_d(UT_LIST_VALIDATE(list, buf_page_t, buf_pool->zip_free[i], - ut_ad(buf_page_get_state( - ut_list_node_313) - == BUF_BLOCK_ZIP_FREE - && ut_list_node_313 != buddy))); -#endif /* !UNIV_DEBUG_VALGRIND */ - - if (buf_buddy_relocate(buddy, buf, i)) { - - buf = bpage; - UNIV_MEM_VALID(bpage, BUF_BUDDY_LOW << i); - ut_d(buddy->state = BUF_BLOCK_ZIP_FREE); - goto buddy_free; - } } +func_exit: /* Free the block to the buddy list. */ bpage = buf; -#ifdef UNIV_DEBUG - if (i < buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)) { - /* This area has most likely been allocated for at - least one compressed-only block descriptor. Check - that there are no live objects in the area. This is - not a complete check: it may yield false positives as - well as false negatives. Also, due to buddy blocks - being recombined, it is possible (although unlikely) - that this branch is never reached. */ - - char* c; - -# ifndef UNIV_DEBUG_VALGRIND - /* Valgrind would complain about accessing - uninitialized memory. Besides, Valgrind performs a - more exhaustive check, at every memory access. */ - const buf_page_t* b = buf; - const buf_page_t* const b_end = (buf_page_t*) - ((char*) b + (BUF_BUDDY_LOW << i)); - - for (; b < b_end; b++) { - /* Avoid false positives (and cause false - negatives) by checking for b->space < 1000. */ - - if ((b->state == BUF_BLOCK_ZIP_PAGE - || b->state == BUF_BLOCK_ZIP_DIRTY) - && b->space > 0 && b->space < 1000) { - fprintf(stderr, - "buddy dirty %p %u (%u,%u) %p,%lu\n", - (void*) b, - b->state, b->space, b->offset, - buf, i); - } - } -# endif /* !UNIV_DEBUG_VALGRIND */ - - /* Scramble the block. This should make any pointers - invalid and trigger a segmentation violation. Because - the scrambling can be reversed, it may be possible to - track down the object pointing to the freed data by - dereferencing the unscrambled bpage->LRU or - bpage->list pointers. */ - for (c = (char*) buf + (BUF_BUDDY_LOW << i); - c-- > (char*) buf; ) { - *c = ~*c ^ i; - } - } else { - /* Fill large blocks with a constant pattern. */ - memset(bpage, i, BUF_BUDDY_LOW << i); - } -#endif /* UNIV_DEBUG */ + + /* Fill large blocks with a constant pattern. */ + ut_d(memset(bpage, i, BUF_BUDDY_LOW << i)); + UNIV_MEM_INVALID(bpage, BUF_BUDDY_LOW << i); + bpage->state = BUF_BLOCK_ZIP_FREE; buf_buddy_add_to_free(bpage, i); } diff --git a/storage/innodb_plugin/buf/buf0buf.c b/storage/innodb_plugin/buf/buf0buf.c index 0426d5ec872..c0da7dbdbe5 100644 --- a/storage/innodb_plugin/buf/buf0buf.c +++ b/storage/innodb_plugin/buf/buf0buf.c @@ -1358,7 +1358,7 @@ err_exit: mutex_enter(block_mutex); /* Discard the uncompressed page frame if possible. */ - if (buf_LRU_free_block(bpage, FALSE) == BUF_LRU_FREED) { + if (buf_LRU_free_block(bpage, FALSE)) { mutex_exit(block_mutex); goto lookup; @@ -1699,13 +1699,8 @@ loop: if (block) { /* If the guess is a compressed page descriptor that - has been allocated by buf_buddy_alloc(), it may have - been invalidated by buf_buddy_relocate(). In that - case, block could point to something that happens to - contain the expected bits in block->page. Similarly, - the guess may be pointing to a buffer pool chunk that - has been released when resizing the buffer pool. */ - + has been allocated by buf_page_alloc_descriptor(), + it may have been freed by buf_relocate(). */ if (!buf_block_is_uncompressed(block) || offset != block->page.offset || space != block->page.space @@ -1889,11 +1884,10 @@ wait_until_unfixed: mutex_exit(&buf_pool_zip_mutex); buf_pool->n_pend_unzip++; - bpage->state = BUF_BLOCK_ZIP_FREE; - buf_buddy_free(bpage, sizeof *bpage); - buf_pool_mutex_exit(); + buf_page_free_descriptor(bpage); + /* Decompress the page and apply buffered operations while not holding buf_pool_mutex or block->mutex. */ success = buf_zip_decompress(block, srv_use_checksums); @@ -1937,7 +1931,7 @@ wait_until_unfixed: /* Try to evict the block from the buffer pool, to use the insert buffer as much as possible. */ - if (buf_LRU_free_block(&block->page, TRUE) == BUF_LRU_FREED) { + if (buf_LRU_free_block(&block->page, TRUE)) { buf_pool_mutex_exit(); mutex_exit(&block->mutex); fprintf(stderr, @@ -2551,17 +2545,12 @@ err_exit: mutex_exit(&block->mutex); } else { - /* Defer buf_buddy_alloc() until after the block has - been found not to exist. The buf_buddy_alloc() and - buf_buddy_free() calls may be expensive because of - buf_buddy_relocate(). */ /* The compressed page must be allocated before the control block (bpage), in order to avoid the invocation of buf_buddy_relocate_block() on uninitialized data. */ data = buf_buddy_alloc(zip_size, &lru); - bpage = buf_buddy_alloc(sizeof *bpage, &lru); /* If buf_buddy_alloc() allocated storage from the LRU list, it released and reacquired buf_pool_mutex. Thus, we must @@ -2569,15 +2558,13 @@ err_exit: if (UNIV_UNLIKELY(lru) && UNIV_LIKELY_NULL(buf_page_hash_get(space, offset))) { - /* The block was added by some other thread. */ - bpage->state = BUF_BLOCK_ZIP_FREE; - buf_buddy_free(bpage, sizeof *bpage); buf_buddy_free(data, zip_size); - bpage = NULL; goto func_exit; } + bpage = buf_page_alloc_descriptor(); + page_zip_des_init(&bpage->zip); page_zip_set_size(&bpage->zip, zip_size); bpage->zip.data = data; diff --git a/storage/innodb_plugin/buf/buf0lru.c b/storage/innodb_plugin/buf/buf0lru.c index 01e7e9a5f69..ad6feef5f2f 100644 --- a/storage/innodb_plugin/buf/buf0lru.c +++ b/storage/innodb_plugin/buf/buf0lru.c @@ -355,7 +355,7 @@ scan_again: while (bpage != NULL) { buf_page_t* prev_bpage; - ibool prev_bpage_buf_fix = FALSE; + mutex_t* block_mutex = NULL; ut_a(buf_page_in_file(bpage)); @@ -368,18 +368,21 @@ scan_again: if (buf_page_get_space(bpage) != id) { /* Skip this block, as it does not belong to the space that is being invalidated. */ + goto next_page; } else if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) { /* We cannot remove this page during this scan yet; maybe the system is currently reading it in, or flushing the modifications to the file */ all_freed = FALSE; + goto next_page; } else { - mutex_t* block_mutex = buf_page_get_mutex(bpage); + block_mutex = buf_page_get_mutex(bpage); mutex_enter(block_mutex); if (bpage->buf_fix_count > 0) { + mutex_exit(block_mutex); /* We cannot remove this page during this scan yet; maybe the system is currently reading it in, or flushing @@ -389,106 +392,59 @@ scan_again: goto next_page; } + } + + ut_ad(mutex_own(block_mutex)); #ifdef UNIV_DEBUG - if (buf_debug_prints) { - fprintf(stderr, - "Dropping space %lu page %lu\n", - (ulong) buf_page_get_space(bpage), - (ulong) buf_page_get_page_no(bpage)); - } + if (buf_debug_prints) { + fprintf(stderr, + "Dropping space %lu page %lu\n", + (ulong) buf_page_get_space(bpage), + (ulong) buf_page_get_page_no(bpage)); + } #endif - if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { - /* This is a compressed-only block - descriptor. Ensure that prev_bpage - cannot be relocated when bpage is freed. */ - if (UNIV_LIKELY(prev_bpage != NULL)) { - switch (buf_page_get_state( - prev_bpage)) { - case BUF_BLOCK_FILE_PAGE: - /* Descriptors of uncompressed - blocks will not be relocated, - because we are holding the - buf_pool_mutex. */ - break; - case BUF_BLOCK_ZIP_PAGE: - case BUF_BLOCK_ZIP_DIRTY: - /* Descriptors of compressed- - only blocks can be relocated, - unless they are buffer-fixed. - Because both bpage and - prev_bpage are protected by - buf_pool_zip_mutex, it is - not necessary to acquire - further mutexes. */ - ut_ad(&buf_pool_zip_mutex - == block_mutex); - ut_ad(mutex_own(block_mutex)); - prev_bpage_buf_fix = TRUE; - prev_bpage->buf_fix_count++; - break; - default: - ut_error; - } - } - } else if (((buf_block_t*) bpage)->is_hashed) { - ulint page_no; - ulint zip_size; - - buf_pool_mutex_exit(); + if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { + /* This is a compressed-only block + descriptor. Do nothing. */ + } else if (((buf_block_t*) bpage)->is_hashed) { + ulint page_no; + ulint zip_size; - zip_size = buf_page_get_zip_size(bpage); - page_no = buf_page_get_page_no(bpage); + buf_pool_mutex_exit(); - mutex_exit(block_mutex); + zip_size = buf_page_get_zip_size(bpage); + page_no = buf_page_get_page_no(bpage); - /* Note that the following call will acquire - an S-latch on the page */ + mutex_exit(block_mutex); - btr_search_drop_page_hash_when_freed( - id, zip_size, page_no); - goto scan_again; - } + /* Note that the following call will acquire + an S-latch on the page */ - if (bpage->oldest_modification != 0) { + btr_search_drop_page_hash_when_freed( + id, zip_size, page_no); + goto scan_again; + } - buf_flush_remove(bpage); - } + if (bpage->oldest_modification != 0) { - /* Remove from the LRU list. */ + buf_flush_remove(bpage); + } - if (buf_LRU_block_remove_hashed_page(bpage, TRUE) - != BUF_BLOCK_ZIP_FREE) { - buf_LRU_block_free_hashed_page((buf_block_t*) - bpage); - } else { - /* The block_mutex should have been - released by buf_LRU_block_remove_hashed_page() - when it returns BUF_BLOCK_ZIP_FREE. */ - ut_ad(block_mutex == &buf_pool_zip_mutex); - ut_ad(!mutex_own(block_mutex)); - - if (prev_bpage_buf_fix) { - /* We temporarily buffer-fixed - prev_bpage, so that - buf_buddy_free() could not - relocate it, in case it was a - compressed-only block - descriptor. */ - - mutex_enter(block_mutex); - ut_ad(prev_bpage->buf_fix_count > 0); - prev_bpage->buf_fix_count--; - mutex_exit(block_mutex); - } + /* Remove from the LRU list. */ - goto next_page_no_mutex; - } -next_page: + if (buf_LRU_block_remove_hashed_page(bpage, TRUE) + != BUF_BLOCK_ZIP_FREE) { + buf_LRU_block_free_hashed_page((buf_block_t*) bpage); mutex_exit(block_mutex); + } else { + /* The block_mutex should have been released + by buf_LRU_block_remove_hashed_page() when it + returns BUF_BLOCK_ZIP_FREE. */ + ut_ad(block_mutex == &buf_pool_zip_mutex); + ut_ad(!mutex_own(block_mutex)); } - -next_page_no_mutex: +next_page: bpage = prev_bpage; } @@ -574,7 +530,7 @@ buf_LRU_free_from_unzip_LRU_list( UNIV_LIKELY(block != NULL) && UNIV_LIKELY(distance > 0); block = UT_LIST_GET_PREV(unzip_LRU, block), distance--) { - enum buf_lru_free_block_status freed; + ibool freed; ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); ut_ad(block->in_unzip_LRU_list); @@ -584,24 +540,9 @@ buf_LRU_free_from_unzip_LRU_list( freed = buf_LRU_free_block(&block->page, FALSE); mutex_exit(&block->mutex); - switch (freed) { - case BUF_LRU_FREED: + if (freed) { return(TRUE); - - case BUF_LRU_CANNOT_RELOCATE: - /* If we failed to relocate, try - regular LRU eviction. */ - return(FALSE); - - case BUF_LRU_NOT_FREED: - /* The block was buffer-fixed or I/O-fixed. - Keep looking. */ - continue; } - - /* inappropriate return value from - buf_LRU_free_block() */ - ut_error; } return(FALSE); @@ -632,10 +573,9 @@ buf_LRU_free_from_common_LRU_list( UNIV_LIKELY(bpage != NULL) && UNIV_LIKELY(distance > 0); bpage = UT_LIST_GET_PREV(LRU, bpage), distance--) { - enum buf_lru_free_block_status freed; - unsigned accessed; - mutex_t* block_mutex - = buf_page_get_mutex(bpage); + ibool freed; + unsigned accessed; + mutex_t* block_mutex = buf_page_get_mutex(bpage); ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); @@ -645,8 +585,7 @@ buf_LRU_free_from_common_LRU_list( freed = buf_LRU_free_block(bpage, TRUE); mutex_exit(block_mutex); - switch (freed) { - case BUF_LRU_FREED: + if (freed) { /* Keep track of pages that are evicted without ever being accessed. This gives us a measure of the effectiveness of readahead */ @@ -654,21 +593,7 @@ buf_LRU_free_from_common_LRU_list( ++buf_pool->stat.n_ra_pages_evicted; } return(TRUE); - - case BUF_LRU_NOT_FREED: - /* The block was dirty, buffer-fixed, or I/O-fixed. - Keep looking. */ - continue; - - case BUF_LRU_CANNOT_RELOCATE: - /* This should never occur, because we - want to discard the compressed page too. */ - break; } - - /* inappropriate return value from - buf_LRU_free_block() */ - ut_error; } return(FALSE); @@ -1350,17 +1275,16 @@ buf_LRU_make_block_old( Try to free a block. If bpage is a descriptor of a compressed-only page, the descriptor object will be freed as well. -NOTE: If this function returns BUF_LRU_FREED, it will temporarily +NOTE: If this function returns TRUE, it will temporarily release buf_pool_mutex. Furthermore, the page frame will no longer be accessible via bpage. The caller must hold buf_pool_mutex and buf_page_get_mutex(bpage) and release these two mutexes after the call. No other buf_page_get_mutex() may be held when calling this function. -@return BUF_LRU_FREED if freed, BUF_LRU_CANNOT_RELOCATE or -BUF_LRU_NOT_FREED otherwise. */ +@return TRUE if freed, FALSE otherwise. */ UNIV_INTERN -enum buf_lru_free_block_status +ibool buf_LRU_free_block( /*===============*/ buf_page_t* bpage, /*!< in: block to be freed */ @@ -1385,7 +1309,7 @@ buf_LRU_free_block( if (!buf_page_can_relocate(bpage)) { /* Do not free buffer-fixed or I/O-fixed blocks. */ - return(BUF_LRU_NOT_FREED); + return(FALSE); } #ifdef UNIV_IBUF_COUNT_DEBUG @@ -1397,7 +1321,7 @@ buf_LRU_free_block( /* Do not completely free dirty blocks. */ if (bpage->oldest_modification) { - return(BUF_LRU_NOT_FREED); + return(FALSE); } } else if (bpage->oldest_modification) { /* Do not completely free dirty blocks. */ @@ -1405,7 +1329,7 @@ buf_LRU_free_block( if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY); - return(BUF_LRU_NOT_FREED); + return(FALSE); } goto alloc; @@ -1414,14 +1338,8 @@ buf_LRU_free_block( If it cannot be allocated (without freeing a block from the LRU list), refuse to free bpage. */ alloc: - buf_pool_mutex_exit_forbid(); - b = buf_buddy_alloc(sizeof *b, NULL); - buf_pool_mutex_exit_allow(); - - if (UNIV_UNLIKELY(!b)) { - return(BUF_LRU_CANNOT_RELOCATE); - } - + b = buf_page_alloc_descriptor(); + ut_a(b); memcpy(b, bpage, sizeof *b); } @@ -1589,7 +1507,7 @@ alloc: mutex_enter(block_mutex); } - return(BUF_LRU_FREED); + return(TRUE); } /******************************************************************//** @@ -1809,10 +1727,8 @@ buf_LRU_block_remove_hashed_page( buf_pool_mutex_exit_forbid(); buf_buddy_free(bpage->zip.data, page_zip_get_size(&bpage->zip)); - bpage->state = BUF_BLOCK_ZIP_FREE; - buf_buddy_free(bpage, sizeof(*bpage)); buf_pool_mutex_exit_allow(); - UNIV_MEM_UNDESC(bpage); + buf_page_free_descriptor(bpage); return(BUF_BLOCK_ZIP_FREE); case BUF_BLOCK_FILE_PAGE: diff --git a/storage/innodb_plugin/include/buf0buddy.h b/storage/innodb_plugin/include/buf0buddy.h index 7648950d5d1..d218a7112f1 100644 --- a/storage/innodb_plugin/include/buf0buddy.h +++ b/storage/innodb_plugin/include/buf0buddy.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2006, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -37,24 +37,19 @@ Created December 2006 by Marko Makela /**********************************************************************//** Allocate a block. The thread calling this function must hold buf_pool_mutex and must not hold buf_pool_zip_mutex or any -block->mutex. The buf_pool_mutex may only be released and reacquired -if lru != NULL. This function should only be used for allocating -compressed page frames or control blocks (buf_page_t). Allocated -control blocks must be properly initialized immediately after -buf_buddy_alloc() has returned the memory, before releasing -buf_pool_mutex. -@return allocated block, possibly NULL if lru == NULL */ +block->mutex. The buf_pool_mutex may be released and reacquired. +This function should only be used for allocating compressed page frames. +@return allocated block, never NULL */ UNIV_INLINE void* buf_buddy_alloc( /*============*/ - ulint size, /*!< in: block size, up to UNIV_PAGE_SIZE */ + ulint size, /*!< in: compressed page size + (between PAGE_ZIP_MIN_SIZE and UNIV_PAGE_SIZE) */ ibool* lru) /*!< in: pointer to a variable that will be assigned TRUE if storage was allocated from the LRU list - and buf_pool_mutex was temporarily released, - or NULL if the LRU list should not be used */ - __attribute__((malloc)); - + and buf_pool_mutex was temporarily released */ + __attribute__((malloc, nonnull)); /**********************************************************************//** Release a block. */ UNIV_INLINE diff --git a/storage/innodb_plugin/include/buf0buddy.ic b/storage/innodb_plugin/include/buf0buddy.ic index c419a2374d9..1087b45ee61 100644 --- a/storage/innodb_plugin/include/buf0buddy.ic +++ b/storage/innodb_plugin/include/buf0buddy.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2006, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -36,8 +36,8 @@ Created December 2006 by Marko Makela /**********************************************************************//** Allocate a block. The thread calling this function must hold buf_pool_mutex and must not hold buf_pool_zip_mutex or any block->mutex. -The buf_pool_mutex may only be released and reacquired if lru != NULL. -@return allocated block, possibly NULL if lru==NULL */ +The buf_pool_mutex may be released and reacquired. +@return allocated block, never NULL */ UNIV_INTERN void* buf_buddy_alloc_low( @@ -46,9 +46,8 @@ buf_buddy_alloc_low( or BUF_BUDDY_SIZES */ ibool* lru) /*!< in: pointer to a variable that will be assigned TRUE if storage was allocated from the LRU list - and buf_pool_mutex was temporarily released, - or NULL if the LRU list should not be used */ - __attribute__((malloc)); + and buf_pool_mutex was temporarily released */ + __attribute__((malloc, nonnull)); /**********************************************************************//** Deallocate a block. */ @@ -74,6 +73,8 @@ buf_buddy_get_slot( ulint i; ulint s; + ut_ad(size >= PAGE_ZIP_MIN_SIZE); + for (i = 0, s = BUF_BUDDY_LOW; s < size; i++, s <<= 1) { } @@ -84,26 +85,25 @@ buf_buddy_get_slot( /**********************************************************************//** Allocate a block. The thread calling this function must hold buf_pool_mutex and must not hold buf_pool_zip_mutex or any -block->mutex. The buf_pool_mutex may only be released and reacquired -if lru != NULL. This function should only be used for allocating -compressed page frames or control blocks (buf_page_t). Allocated -control blocks must be properly initialized immediately after -buf_buddy_alloc() has returned the memory, before releasing -buf_pool_mutex. -@return allocated block, possibly NULL if lru == NULL */ +block->mutex. The buf_pool_mutex may be released and reacquired. +This function should only be used for allocating compressed page frames. +@return allocated block, never NULL */ UNIV_INLINE void* buf_buddy_alloc( /*============*/ - ulint size, /*!< in: block size, up to UNIV_PAGE_SIZE */ + ulint size, /*!< in: compressed page size + (between PAGE_ZIP_MIN_SIZE and UNIV_PAGE_SIZE) */ ibool* lru) /*!< in: pointer to a variable that will be assigned TRUE if storage was allocated from the LRU list - and buf_pool_mutex was temporarily released, - or NULL if the LRU list should not be used */ + and buf_pool_mutex was temporarily released */ { ut_ad(buf_pool_mutex_own()); + ut_ad(ut_is_2pow(size)); + ut_ad(size >= PAGE_ZIP_MIN_SIZE); + ut_ad(size <= UNIV_PAGE_SIZE); - return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru)); + return((byte*) buf_buddy_alloc_low(buf_buddy_get_slot(size), lru)); } /**********************************************************************//** @@ -117,6 +117,9 @@ buf_buddy_free( ulint size) /*!< in: block size, up to UNIV_PAGE_SIZE */ { ut_ad(buf_pool_mutex_own()); + ut_ad(ut_is_2pow(size)); + ut_ad(size >= PAGE_ZIP_MIN_SIZE); + ut_ad(size <= UNIV_PAGE_SIZE); buf_buddy_free_low(buf, buf_buddy_get_slot(size)); } diff --git a/storage/innodb_plugin/include/buf0buf.h b/storage/innodb_plugin/include/buf0buf.h index 2dfb821e199..86c47a6edba 100644 --- a/storage/innodb_plugin/include/buf0buf.h +++ b/storage/innodb_plugin/include/buf0buf.h @@ -156,6 +156,23 @@ UNIV_INLINE ib_uint64_t buf_pool_get_oldest_modification(void); /*==================================*/ +/********************************************************************//** +Allocates a buf_page_t descriptor. This function must succeed. In case +of failure we assert in this function. */ +UNIV_INLINE +buf_page_t* +buf_page_alloc_descriptor(void) +/*===========================*/ + __attribute__((malloc)); +/********************************************************************//** +Free a buf_page_t descriptor. */ +UNIV_INLINE +void +buf_page_free_descriptor( +/*=====================*/ + buf_page_t* bpage) /*!< in: bpage descriptor to free. */ + __attribute__((nonnull)); + /********************************************************************//** Allocates a buffer block. @return own: the allocated block, in state BUF_BLOCK_MEMORY */ diff --git a/storage/innodb_plugin/include/buf0buf.ic b/storage/innodb_plugin/include/buf0buf.ic index d4ca07a4cd8..35c63f034f5 100644 --- a/storage/innodb_plugin/include/buf0buf.ic +++ b/storage/innodb_plugin/include/buf0buf.ic @@ -714,6 +714,35 @@ buf_block_get_lock_hash_val( return(block->lock_hash_val); } +/********************************************************************//** +Allocates a buf_page_t descriptor. This function must succeed. In case +of failure we assert in this function. +@return: the allocated descriptor. */ +UNIV_INLINE +buf_page_t* +buf_page_alloc_descriptor(void) +/*===========================*/ +{ + buf_page_t* bpage; + + bpage = (buf_page_t*) ut_malloc(sizeof *bpage); + ut_d(memset(bpage, 0, sizeof *bpage)); + UNIV_MEM_ALLOC(bpage, sizeof *bpage); + + return(bpage); +} + +/********************************************************************//** +Free a buf_page_t descriptor. */ +UNIV_INLINE +void +buf_page_free_descriptor( +/*=====================*/ + buf_page_t* bpage) /*!< in: bpage descriptor to free. */ +{ + ut_free(bpage); +} + /********************************************************************//** Allocates a buffer block. @return own: the allocated block, in state BUF_BLOCK_MEMORY */ diff --git a/storage/innodb_plugin/include/buf0lru.h b/storage/innodb_plugin/include/buf0lru.h index bea1f7d5b1e..0c2102b9549 100644 --- a/storage/innodb_plugin/include/buf0lru.h +++ b/storage/innodb_plugin/include/buf0lru.h @@ -30,18 +30,6 @@ Created 11/5/1995 Heikki Tuuri #include "ut0byte.h" #include "buf0types.h" -/** The return type of buf_LRU_free_block() */ -enum buf_lru_free_block_status { - /** freed */ - BUF_LRU_FREED = 0, - /** not freed because the caller asked to remove the - uncompressed frame but the control block cannot be - relocated */ - BUF_LRU_CANNOT_RELOCATE, - /** not freed because of some other reason */ - BUF_LRU_NOT_FREED -}; - /******************************************************************//** Tries to remove LRU flushed blocks from the end of the LRU list and put them to the free list. This is beneficial for the efficiency of the insert buffer @@ -98,17 +86,16 @@ buf_LRU_insert_zip_clean( Try to free a block. If bpage is a descriptor of a compressed-only page, the descriptor object will be freed as well. -NOTE: If this function returns BUF_LRU_FREED, it will temporarily +NOTE: If this function returns TRUE, it will temporarily release buf_pool_mutex. Furthermore, the page frame will no longer be accessible via bpage. The caller must hold buf_pool_mutex and buf_page_get_mutex(bpage) and release these two mutexes after the call. No other buf_page_get_mutex() may be held when calling this function. -@return BUF_LRU_FREED if freed, BUF_LRU_CANNOT_RELOCATE or -BUF_LRU_NOT_FREED otherwise. */ +@return TRUE if freed, FALSE otherwise. */ UNIV_INTERN -enum buf_lru_free_block_status +ibool buf_LRU_free_block( /*===============*/ buf_page_t* bpage, /*!< in: block to be freed */ diff --git a/storage/innodb_plugin/include/buf0types.h b/storage/innodb_plugin/include/buf0types.h index bfae6477135..4fe0b4483c8 100644 --- a/storage/innodb_plugin/include/buf0types.h +++ b/storage/innodb_plugin/include/buf0types.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,6 +26,8 @@ Created 11/17/1995 Heikki Tuuri #ifndef buf0types_h #define buf0types_h +#include "page0types.h" + /** Buffer page (uncompressed or compressed) */ typedef struct buf_page_struct buf_page_t; /** Buffer block for which an uncompressed page exists */ @@ -58,17 +60,10 @@ enum buf_io_fix { /** Parameters of binary buddy system for compressed pages (buf0buddy.h) */ /* @{ */ -#if UNIV_WORD_SIZE <= 4 /* 32-bit system */ -/** Base-2 logarithm of the smallest buddy block size */ -# define BUF_BUDDY_LOW_SHIFT 6 -#else /* 64-bit system */ -/** Base-2 logarithm of the smallest buddy block size */ -# define BUF_BUDDY_LOW_SHIFT 7 -#endif +#define BUF_BUDDY_LOW_SHIFT PAGE_ZIP_MIN_SIZE_SHIFT + #define BUF_BUDDY_LOW (1 << BUF_BUDDY_LOW_SHIFT) - /*!< minimum block size in the binary - buddy system; must be at least - sizeof(buf_page_t) */ + #define BUF_BUDDY_SIZES (UNIV_PAGE_SIZE_SHIFT - BUF_BUDDY_LOW_SHIFT) /*!< number of buddy sizes */ -- cgit v1.2.1 From bc7af1757939f1ec3c389da73176c7c68a8bafd5 Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Thu, 23 Jun 2011 20:41:04 +0700 Subject: Fixed Bug#11756013 (formerly known as bug#47870): BOGUS "THE TABLE MYSQL.PROC IS MISSING,..." There was a race condition between loading a stored routine (function/procedure/trigger) specified by fully qualified name SCHEMA_NAME.PROC_NAME and dropping the stored routine database. The problem was that there is a window for race condition when one server thread tries to load a stored routine being executed and the other thread tries to drop the stored routine schema. This condition race window exists in implementation of function mysql_change_db() called by db_load_routine() during loading of stored routine to cache. Function mysql_change_db() calls check_db_dir_existence() that might failed because specified database was dropped during concurrent execution of DROP SCHEMA statement. db_load_routine() calls mysql_change_db() with flag 'force_switch' set to 'true' value so when referenced db is not found then my_error() is not called and function mysql_change_db() returns ok. This shadows information about schema opening error in db_load_routine(). Then db_load_routine() makes attempt to parse stored routine that is failed. This makes to return error to sp_cache_routines_and_add_tables_aux() but since during error generation a call to my_error wasn't made and hence THD::main_da wasn't set we set the generic "mysql.proc table corrupt" error when running sp_cache_routines_and_add_tables_aux(). The fix is to install an error handler inside db_load_routine() for the mysql_op_change_db() call, and check later if the ER_BAD_DB_ERROR was caught. sql/sql_db.cc: Added synchronization point "before_db_dir_check" to emulate a race condition during processing of CALL/DROP SCHEMA. --- mysql-test/r/sp_sync.result | 14 +++++++++++++- mysql-test/t/sp_sync.test | 31 ++++++++++++++++++++++++++++++- sql/sp.cc | 42 +++++++++++++++++++++++++++++++++++++++++- sql/sql_db.cc | 3 +++ 4 files changed, 87 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/sp_sync.result b/mysql-test/r/sp_sync.result index afa37e70531..17292f4c1a8 100644 --- a/mysql-test/r/sp_sync.result +++ b/mysql-test/r/sp_sync.result @@ -1,4 +1,4 @@ -Tests of syncronization of stored procedure execution. +Tests of synchronization of stored procedure execution. # # Bug#48157: crash in Item_field::used_tables # @@ -20,4 +20,16 @@ SET DEBUG_SYNC = 'now SIGNAL go'; # code, this test statement will hang. DROP TABLE t1, t2; DROP PROCEDURE p1; +# +# test for bug#11756013 +# +DROP SCHEMA IF EXISTS s1; +CREATE SCHEMA s1; +CREATE PROCEDURE s1.p1() BEGIN END; +SET DEBUG_SYNC='before_db_dir_check SIGNAL check_db WAIT_FOR dropped_schema'; +CALL s1.p1; +SET DEBUG_SYNC='now WAIT_FOR check_db'; +DROP SCHEMA s1; +SET DEBUG_SYNC='now SIGNAL dropped_schema'; +ERROR 42000: Unknown database 's1' SET DEBUG_SYNC = 'RESET'; diff --git a/mysql-test/t/sp_sync.test b/mysql-test/t/sp_sync.test index f9dae17b039..d8458f69eef 100644 --- a/mysql-test/t/sp_sync.test +++ b/mysql-test/t/sp_sync.test @@ -1,7 +1,10 @@ # This test should work in embedded server after mysqltest is fixed -- source include/not_embedded.inc ---echo Tests of syncronization of stored procedure execution. +# Save the initial number of concurrent sessions +--source include/count_sessions.inc + +--echo Tests of synchronization of stored procedure execution. --source include/have_debug_sync.inc @@ -54,5 +57,31 @@ connection default; DROP TABLE t1, t2; DROP PROCEDURE p1; +--echo # +--echo # test for bug#11756013 +--echo # +--disable_warnings +DROP SCHEMA IF EXISTS s1; +--enable_warnings +CREATE SCHEMA s1; +CREATE PROCEDURE s1.p1() BEGIN END; + +connect (con3, localhost, root); +SET DEBUG_SYNC='before_db_dir_check SIGNAL check_db WAIT_FOR dropped_schema'; +--send CALL s1.p1 + +connection default; +SET DEBUG_SYNC='now WAIT_FOR check_db'; +DROP SCHEMA s1; +SET DEBUG_SYNC='now SIGNAL dropped_schema'; + +connection con3; +--error ER_BAD_DB_ERROR +--reap +connection default; +disconnect con3; + SET DEBUG_SYNC = 'RESET'; +# Wait till we reached the initial number of concurrent sessions +--source include/wait_until_count_sessions.inc diff --git a/sql/sp.cc b/sql/sp.cc index ddddaee2e10..78d1dc7e22c 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -708,6 +708,37 @@ Silence_deprecated_warning::handle_error(uint sql_errno, const char *message, } +class Bad_db_error_handler : public Internal_error_handler +{ +public: + Bad_db_error_handler() + :m_error_caught(false) + {} + + virtual bool handle_error(uint sql_errno, const char *message, + MYSQL_ERROR::enum_warning_level level, + THD *thd); + + bool error_caught() const { return m_error_caught; } + +private: + bool m_error_caught; +}; + +bool +Bad_db_error_handler::handle_error(uint sql_errno, const char *message, + MYSQL_ERROR::enum_warning_level level, + THD *thd) +{ + if (sql_errno == ER_BAD_DB_ERROR) + { + m_error_caught= true; + return true; + } + return false; +} + + static int db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, ulong sql_mode, const char *params, const char *returns, @@ -725,7 +756,7 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, ha_rows old_select_limit= thd->variables.select_limit; sp_rcontext *old_spcont= thd->spcont; Silence_deprecated_warning warning_handler; - + Bad_db_error_handler db_not_exists_handler; char definer_user_name_holder[USERNAME_LENGTH + 1]; LEX_STRING definer_user_name= { definer_user_name_holder, USERNAME_LENGTH }; @@ -766,6 +797,7 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, goto end; } + thd->push_internal_handler(&db_not_exists_handler); /* Change the current database (if needed). @@ -776,9 +808,17 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, &cur_db_changed)) { ret= SP_INTERNAL_ERROR; + thd->pop_internal_handler(); goto end; } + thd->pop_internal_handler(); + if (db_not_exists_handler.error_caught()) + { + ret= SP_INTERNAL_ERROR; + my_error(ER_BAD_DB_ERROR, MYF(0), name->m_db.str); + goto end; + } thd->spcont= NULL; { diff --git a/sql/sql_db.cc b/sql/sql_db.cc index d7d7f43a7aa..e0d848321a7 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -26,6 +26,7 @@ #ifdef __WIN__ #include #endif +#include "debug_sync.h" #define MAX_DROP_TABLE_Q_LEN 1024 @@ -1702,6 +1703,8 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch) } #endif + DEBUG_SYNC(thd, "before_db_dir_check"); + if (check_db_dir_existence(new_db_file_name.str)) { if (force_switch) -- cgit v1.2.1 From 0c54d44fef3306143dd2133617dede11828c78e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 28 Jun 2011 11:57:09 +0300 Subject: Bug#12595087 - 61191: Question about page_zip_available (clean up page0zip.c) page_zip_dir_elems(): New function, refactored from page_zip_dir_size(). page_zip_dir_size(): Use page_zip_dir_elems() page_zip_dir_start_offs(): New function: Gets an offset to the compressed page trailer (the dense page directory), including deleted records (the free list) page_zip_dir_start_low(page_zip, n_dense): Constness-preserving wrapper macro for page_zip_dir_start_offs(). page_zip_dir_start(page_zip): Constness-preserving wrapper macro for page_zip_dir_start_offs(). page_zip_decompress_node_ptrs(), page_zip_decompress_clust(): Replace a formula with a fully equivalent page_zip_dir_start_low() call. page_zip_write_rec(), page_zip_parse_write_node_ptr(), page_zip_write_node_ptr(), page_zip_write_trx_id_and_roll_ptr(), page_zip_clear_rec(): Replace a formula with an almost equivalent page_zip_dir_start() call. It is OK to replace page_dir_get_n_heap(page) with page_dir_get_n_heap(page_zip->data), because ut_ad(page_zip_header_cmp(page_zip, page)) or page_zip_validate(page_zip, page) asserts that the page headers are identical. rb:687 approved by Jimmy Yang --- storage/innodb_plugin/page/page0zip.c | 90 +++++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 31 deletions(-) diff --git a/storage/innodb_plugin/page/page0zip.c b/storage/innodb_plugin/page/page0zip.c index 2d97b9a0d64..9f00fb4d1e0 100644 --- a/storage/innodb_plugin/page/page0zip.c +++ b/storage/innodb_plugin/page/page0zip.c @@ -150,6 +150,20 @@ page_zip_empty_size( } #endif /* !UNIV_HOTBACKUP */ +/*************************************************************//** +Gets the number of elements in the dense page directory, +including deleted records (the free list). +@return number of elements in the dense page directory */ +UNIV_INLINE +ulint +page_zip_dir_elems( +/*===============*/ + const page_zip_des_t* page_zip) /*!< in: compressed page */ +{ + /* Exclude the page infimum and supremum from the record count. */ + return(page_dir_get_n_heap(page_zip->data) - PAGE_HEAP_NO_USER_LOW); +} + /*************************************************************//** Gets the size of the compressed page trailer (the dense page directory), including deleted records (the free list). @@ -160,13 +174,41 @@ page_zip_dir_size( /*==============*/ const page_zip_des_t* page_zip) /*!< in: compressed page */ { - /* Exclude the page infimum and supremum from the record count. */ - ulint size = PAGE_ZIP_DIR_SLOT_SIZE - * (page_dir_get_n_heap(page_zip->data) - - PAGE_HEAP_NO_USER_LOW); - return(size); + return(PAGE_ZIP_DIR_SLOT_SIZE * page_zip_dir_elems(page_zip)); +} + +/*************************************************************//** +Gets an offset to the compressed page trailer (the dense page directory), +including deleted records (the free list). +@return offset of the dense page directory */ +UNIV_INLINE +ulint +page_zip_dir_start_offs( +/*====================*/ + const page_zip_des_t* page_zip, /*!< in: compressed page */ + ulint n_dense) /*!< in: directory size */ +{ + ut_ad(n_dense * PAGE_ZIP_DIR_SLOT_SIZE < page_zip_get_size(page_zip)); + + return(page_zip_get_size(page_zip) - n_dense * PAGE_ZIP_DIR_SLOT_SIZE); } +/*************************************************************//** +Gets a pointer to the compressed page trailer (the dense page directory), +including deleted records (the free list). +@param[in] page_zip compressed page +@param[in] n_dense number of entries in the directory +@return pointer to the dense page directory */ +#define page_zip_dir_start_low(page_zip, n_dense) \ + ((page_zip)->data + page_zip_dir_start_offs(page_zip, n_dense)) +/*************************************************************//** +Gets a pointer to the compressed page trailer (the dense page directory), +including deleted records (the free list). +@param[in] page_zip compressed page +@return pointer to the dense page directory */ +#define page_zip_dir_start(page_zip) \ + page_zip_dir_start_low(page_zip, page_zip_dir_elems(page_zip)) + /*************************************************************//** Gets the size of the compressed page trailer (the dense page directory), only including user records (excluding the free list). @@ -2242,8 +2284,7 @@ zlib_done: } /* Restore the uncompressed columns in heap_no order. */ - storage = page_zip->data + page_zip_get_size(page_zip) - - n_dense * PAGE_ZIP_DIR_SLOT_SIZE; + storage = page_zip_dir_start_low(page_zip, n_dense); for (slot = 0; slot < n_dense; slot++) { rec_t* rec = recs[slot]; @@ -2728,8 +2769,7 @@ zlib_done: return(FALSE); } - storage = page_zip->data + page_zip_get_size(page_zip) - - n_dense * PAGE_ZIP_DIR_SLOT_SIZE; + storage = page_zip_dir_start_low(page_zip, n_dense); externs = storage - n_dense * (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); @@ -3457,9 +3497,7 @@ page_zip_write_rec( } /* Write the data bytes. Store the uncompressed bytes separately. */ - storage = page_zip->data + page_zip_get_size(page_zip) - - (page_dir_get_n_heap(page) - PAGE_HEAP_NO_USER_LOW) - * PAGE_ZIP_DIR_SLOT_SIZE; + storage = page_zip_dir_start(page_zip); if (page_is_leaf(page)) { ulint len; @@ -3755,9 +3793,7 @@ corrupt: field = page + offset; storage = page_zip->data + z_offset; - storage_end = page_zip->data + page_zip_get_size(page_zip) - - (page_dir_get_n_heap(page) - PAGE_HEAP_NO_USER_LOW) - * PAGE_ZIP_DIR_SLOT_SIZE; + storage_end = page_zip_dir_start(page_zip); heap_no = 1 + (storage_end - storage) / REC_NODE_PTR_SIZE; @@ -3793,7 +3829,9 @@ page_zip_write_node_ptr( { byte* field; byte* storage; +#ifdef UNIV_DEBUG page_t* page = page_align(rec); +#endif /* UNIV_DEBUG */ ut_ad(PAGE_ZIP_MATCH(rec, page_zip)); ut_ad(page_simple_validate_new(page)); @@ -3810,9 +3848,7 @@ page_zip_write_node_ptr( UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip)); UNIV_MEM_ASSERT_RW(rec, size); - storage = page_zip->data + page_zip_get_size(page_zip) - - (page_dir_get_n_heap(page) - PAGE_HEAP_NO_USER_LOW) - * PAGE_ZIP_DIR_SLOT_SIZE + storage = page_zip_dir_start(page_zip) - (rec_get_heap_no_new(rec) - 1) * REC_NODE_PTR_SIZE; field = rec + size - REC_NODE_PTR_SIZE; @@ -3861,7 +3897,9 @@ page_zip_write_trx_id_and_roll_ptr( { byte* field; byte* storage; +#ifdef UNIV_DEBUG page_t* page = page_align(rec); +#endif /* UNIV_DEBUG */ ulint len; ut_ad(PAGE_ZIP_MATCH(rec, page_zip)); @@ -3879,9 +3917,7 @@ page_zip_write_trx_id_and_roll_ptr( UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip)); - storage = page_zip->data + page_zip_get_size(page_zip) - - (page_dir_get_n_heap(page) - PAGE_HEAP_NO_USER_LOW) - * PAGE_ZIP_DIR_SLOT_SIZE + storage = page_zip_dir_start(page_zip) - (rec_get_heap_no_new(rec) - 1) * (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN); @@ -3948,11 +3984,7 @@ page_zip_clear_rec( /* Clear node_ptr. On the compressed page, there is an array of node_ptr immediately before the dense page directory, at the very end of the page. */ - storage = page_zip->data - + page_zip_get_size(page_zip) - - (page_dir_get_n_heap(page) - - PAGE_HEAP_NO_USER_LOW) - * PAGE_ZIP_DIR_SLOT_SIZE; + storage = page_zip_dir_start(page_zip); ut_ad(dict_index_get_n_unique_in_tree(index) == rec_offs_n_fields(offsets) - 1); field = rec_get_nth_field(rec, offsets, @@ -3972,11 +4004,7 @@ page_zip_clear_rec( = dict_col_get_clust_pos( dict_table_get_sys_col( index->table, DATA_TRX_ID), index); - storage = page_zip->data - + page_zip_get_size(page_zip) - - (page_dir_get_n_heap(page) - - PAGE_HEAP_NO_USER_LOW) - * PAGE_ZIP_DIR_SLOT_SIZE; + storage = page_zip_dir_start(page_zip); field = rec_get_nth_field(rec, offsets, trx_id_pos, &len); ut_ad(len == DATA_TRX_ID_LEN); -- cgit v1.2.1 From 0f37ccb30f9ca0855b4494a7c7c10d293e316c47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 29 Jun 2011 09:57:15 +0300 Subject: Bug #12612184 BLOB debug code cleanup: Refactor the !rec_offs_any_extern relaxation in row_build(). trx_assert_active(trx_id): Assert that the given transaction is active. (In the 5.1 built-in InnoDB, there is no trx->is_recovered field.) trx_assert_recovered(trx_id): Assert that the given transaction is active and has been recovered after a crash. row_build(): Replace a bunch of code with an assertion that invokes trx_assert_active() or trx_assert_recovered() and row_get_rec_trx_id(). row_get_trx_id_offset(): Make the function inlined. Remove the unused parameter rec, and make all parameters const. row_get_rec_trx_id(), row_get_rec_roll_ptr(): Make all parameters const. rb:691 approved by Jimmy Yang --- storage/innobase/include/trx0sys.h | 10 +++++ storage/innobase/include/trx0sys.ic | 21 ++++++++++ storage/innobase/row/row0row.c | 46 ++++++++------------- storage/innodb_plugin/include/row0row.h | 26 ++++++------ storage/innodb_plugin/include/row0row.ic | 45 ++++++++++++++++----- storage/innodb_plugin/include/row0upd.ic | 4 +- storage/innodb_plugin/include/trx0sys.h | 11 ++++- storage/innodb_plugin/include/trx0sys.ic | 24 ++++++++++- storage/innodb_plugin/row/row0row.c | 69 ++++++-------------------------- 9 files changed, 146 insertions(+), 110 deletions(-) diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h index bad3c9d570c..7ea981eb85c 100644 --- a/storage/innobase/include/trx0sys.h +++ b/storage/innobase/include/trx0sys.h @@ -256,6 +256,16 @@ trx_in_trx_list( /*============*/ /* out: TRUE if is in */ trx_t* in_trx);/* in: trx */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +/******************************************************** +Assert that a transaction is active. */ +UNIV_INLINE +ibool +trx_assert_active( +/*==============*/ + /* out: TRUE */ + dulint trx_id); /* in: transaction identifier */ +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ /********************************************************************* Updates the offset information about the end of the MySQL binlog entry which corresponds to the transaction just being committed. In a MySQL diff --git a/storage/innobase/include/trx0sys.ic b/storage/innobase/include/trx0sys.ic index 1142fb60398..f5033c5778a 100644 --- a/storage/innobase/include/trx0sys.ic +++ b/storage/innobase/include/trx0sys.ic @@ -257,6 +257,27 @@ trx_get_on_id( return(NULL); } +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +/******************************************************** +Assert that a transaction is active. */ +UNIV_INLINE +ibool +trx_assert_active( +/*==============*/ + /* out: TRUE */ + dulint trx_id) /* in: transaction identifier */ +{ + trx_t* trx; + + mutex_enter(&kernel_mutex); + trx = trx_get_on_id(trx_id); + ut_a(trx); + mutex_exit(&kernel_mutex); + + return(TRUE); +} +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + /******************************************************************** Returns the minumum trx id in trx list. This is the smallest id for which the trx can possibly be active. (But, you must look at the trx->conc_state to diff --git a/storage/innobase/row/row0row.c b/storage/innobase/row/row0row.c index 751de98deba..171039e34ac 100644 --- a/storage/innobase/row/row0row.c +++ b/storage/innobase/row/row0row.c @@ -212,35 +212,23 @@ row_build( } #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG - if (UNIV_LIKELY_NULL(rec_offs_any_null_extern(rec, offsets))) { - /* This condition can occur during crash recovery before - trx_rollback_or_clean_all_without_sess() has completed - execution. - - This condition is possible if the server crashed - during an insert or update before - btr_store_big_rec_extern_fields() did mtr_commit() all - BLOB pointers to the clustered index record. - - Look up the transaction that holds the implicit lock - on this record, and assert that it was recovered (and - will soon be rolled back). */ - - ulint trx_id_pos = dict_index_get_sys_col_pos( - index, DATA_TRX_ID); - ulint len; - dulint trx_id = trx_read_trx_id( - rec_get_nth_field(rec, offsets, trx_id_pos, &len)); - trx_t* trx; - ut_a(len == 6); - - mutex_enter(&kernel_mutex); - trx = trx_get_on_id(trx_id); - ut_a(trx); - /* This field does not exist in this version of InnoDB. */ - /* ut_a(trx->is_recovered); */ - mutex_exit(&kernel_mutex); - } + /* This condition can occur during crash recovery before + trx_rollback_or_clean_all_without_sess() has completed + execution. + + This condition is possible if the server crashed + during an insert or update before + btr_store_big_rec_extern_fields() did mtr_commit() all + BLOB pointers to the clustered index record. + + If the record contains a null BLOB pointer, look up the + transaction that holds the implicit lock on this record, and + assert that it is active. (In this version of InnoDB, we + cannot assert that it was recovered, because there is no + trx->is_recovered field.) */ + + ut_a(!rec_offs_any_null_extern(rec, offsets) + || trx_assert_active(row_get_rec_trx_id(rec, index, offsets))); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (type != ROW_COPY_POINTERS) { diff --git a/storage/innodb_plugin/include/row0row.h b/storage/innodb_plugin/include/row0row.h index 723b7b53395..36fb26482ce 100644 --- a/storage/innodb_plugin/include/row0row.h +++ b/storage/innodb_plugin/include/row0row.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -38,16 +38,16 @@ Created 4/20/1996 Heikki Tuuri #include "btr0types.h" /*********************************************************************//** -Gets the offset of the trx id field, in bytes relative to the origin of +Gets the offset of the DB_TRX_ID field, in bytes relative to the origin of a clustered index record. @return offset of DATA_TRX_ID */ -UNIV_INTERN +UNIV_INLINE ulint row_get_trx_id_offset( /*==================*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in: clustered index */ - const ulint* offsets);/*!< in: rec_get_offsets(rec, index) */ + const dict_index_t* index, /*!< in: clustered index */ + const ulint* offsets)/*!< in: record offsets */ + __attribute__((nonnull, warn_unused_result)); /*********************************************************************//** Reads the trx id field from a clustered index record. @return value of the field */ @@ -55,9 +55,10 @@ UNIV_INLINE trx_id_t row_get_rec_trx_id( /*===============*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in: clustered index */ - const ulint* offsets);/*!< in: rec_get_offsets(rec, index) */ + const rec_t* rec, /*!< in: record */ + const dict_index_t* index, /*!< in: clustered index */ + const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ + __attribute__((nonnull, warn_unused_result)); /*********************************************************************//** Reads the roll pointer field from a clustered index record. @return value of the field */ @@ -65,9 +66,10 @@ UNIV_INLINE roll_ptr_t row_get_rec_roll_ptr( /*=================*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in: clustered index */ - const ulint* offsets);/*!< in: rec_get_offsets(rec, index) */ + const rec_t* rec, /*!< in: record */ + const dict_index_t* index, /*!< in: clustered index */ + const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ + __attribute__((nonnull, warn_unused_result)); /*****************************************************************//** When an insert or purge to a table is performed, this function builds the entry to be inserted into or purged from an index on the table. diff --git a/storage/innodb_plugin/include/row0row.ic b/storage/innodb_plugin/include/row0row.ic index 05c007641af..0b9ca982af8 100644 --- a/storage/innodb_plugin/include/row0row.ic +++ b/storage/innodb_plugin/include/row0row.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -27,6 +27,33 @@ Created 4/20/1996 Heikki Tuuri #include "rem0rec.h" #include "trx0undo.h" +/*********************************************************************//** +Gets the offset of the DB_TRX_ID field, in bytes relative to the origin of +a clustered index record. +@return offset of DATA_TRX_ID */ +UNIV_INLINE +ulint +row_get_trx_id_offset( +/*==================*/ + const dict_index_t* index, /*!< in: clustered index */ + const ulint* offsets)/*!< in: record offsets */ +{ + ulint pos; + ulint offset; + ulint len; + + ut_ad(dict_index_is_clust(index)); + ut_ad(rec_offs_validate(NULL, index, offsets)); + + pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID); + + offset = rec_get_nth_field_offs(offsets, pos, &len); + + ut_ad(len == DATA_TRX_ID_LEN); + + return(offset); +} + /*********************************************************************//** Reads the trx id field from a clustered index record. @return value of the field */ @@ -34,9 +61,9 @@ UNIV_INLINE trx_id_t row_get_rec_trx_id( /*===============*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in: clustered index */ - const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ + const rec_t* rec, /*!< in: record */ + const dict_index_t* index, /*!< in: clustered index */ + const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ { ulint offset; @@ -46,7 +73,7 @@ row_get_rec_trx_id( offset = index->trx_id_offset; if (!offset) { - offset = row_get_trx_id_offset(rec, index, offsets); + offset = row_get_trx_id_offset(index, offsets); } return(trx_read_trx_id(rec + offset)); @@ -59,9 +86,9 @@ UNIV_INLINE roll_ptr_t row_get_rec_roll_ptr( /*=================*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in: clustered index */ - const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ + const rec_t* rec, /*!< in: record */ + const dict_index_t* index, /*!< in: clustered index */ + const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ { ulint offset; @@ -71,7 +98,7 @@ row_get_rec_roll_ptr( offset = index->trx_id_offset; if (!offset) { - offset = row_get_trx_id_offset(rec, index, offsets); + offset = row_get_trx_id_offset(index, offsets); } return(trx_read_roll_ptr(rec + offset + DATA_TRX_ID_LEN)); diff --git a/storage/innodb_plugin/include/row0upd.ic b/storage/innodb_plugin/include/row0upd.ic index 18e22f1eca9..0894ed373b0 100644 --- a/storage/innodb_plugin/include/row0upd.ic +++ b/storage/innodb_plugin/include/row0upd.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -171,7 +171,7 @@ row_upd_rec_sys_fields( ulint offset = index->trx_id_offset; if (!offset) { - offset = row_get_trx_id_offset(rec, index, offsets); + offset = row_get_trx_id_offset(index, offsets); } #if DATA_TRX_ID + 1 != DATA_ROLL_PTR diff --git a/storage/innodb_plugin/include/trx0sys.h b/storage/innodb_plugin/include/trx0sys.h index cbb89689748..ed62375797a 100644 --- a/storage/innodb_plugin/include/trx0sys.h +++ b/storage/innodb_plugin/include/trx0sys.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -284,6 +284,15 @@ ibool trx_in_trx_list( /*============*/ trx_t* in_trx);/*!< in: trx */ +/***********************************************************//** +Assert that a transaction has been recovered. +@return TRUE */ +UNIV_INLINE +ibool +trx_assert_recovered( +/*=================*/ + trx_id_t trx_id) /*!< in: transaction identifier */ + __attribute__((warn_unused_result)); /*****************************************************************//** Updates the offset information about the end of the MySQL binlog entry which corresponds to the transaction just being committed. In a MySQL diff --git a/storage/innodb_plugin/include/trx0sys.ic b/storage/innodb_plugin/include/trx0sys.ic index 820d31d0692..6246debac0a 100644 --- a/storage/innodb_plugin/include/trx0sys.ic +++ b/storage/innodb_plugin/include/trx0sys.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. +Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -277,6 +277,28 @@ trx_get_on_id( return(NULL); } +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG +/***********************************************************//** +Assert that a transaction has been recovered. +@return TRUE */ +UNIV_INLINE +ibool +trx_assert_recovered( +/*=================*/ + trx_id_t trx_id) /*!< in: transaction identifier */ +{ + trx_t* trx; + + mutex_enter(&kernel_mutex); + trx = trx_get_on_id(trx_id); + ut_a(trx); + ut_a(trx->is_recovered); + mutex_exit(&kernel_mutex); + + return(TRUE); +} +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + /****************************************************************//** Returns the minumum trx id in trx list. This is the smallest id for which the trx can possibly be active. (But, you must look at the trx->conc_state to diff --git a/storage/innodb_plugin/row/row0row.c b/storage/innodb_plugin/row/row0row.c index 3b610d735b0..dd94132eebc 100644 --- a/storage/innodb_plugin/row/row0row.c +++ b/storage/innodb_plugin/row/row0row.c @@ -47,35 +47,6 @@ Created 4/20/1996 Heikki Tuuri #include "read0read.h" #include "ut0mem.h" -/*********************************************************************//** -Gets the offset of trx id field, in bytes relative to the origin of -a clustered index record. -@return offset of DATA_TRX_ID */ -UNIV_INTERN -ulint -row_get_trx_id_offset( -/*==================*/ - const rec_t* rec __attribute__((unused)), - /*!< in: record */ - dict_index_t* index, /*!< in: clustered index */ - const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ -{ - ulint pos; - ulint offset; - ulint len; - - ut_ad(dict_index_is_clust(index)); - ut_ad(rec_offs_validate(rec, index, offsets)); - - pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID); - - offset = rec_get_nth_field_offs(offsets, pos, &len); - - ut_ad(len == DATA_TRX_ID_LEN); - - return(offset); -} - /*****************************************************************//** When an insert or purge to a table is performed, this function builds the entry to be inserted into or purged from an index on the table. @@ -233,33 +204,19 @@ row_build( } #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG - if (UNIV_LIKELY_NULL(rec_offs_any_null_extern(rec, offsets))) { - /* This condition can occur during crash recovery before - trx_rollback_active() has completed execution. - - This condition is possible if the server crashed - during an insert or update before - btr_store_big_rec_extern_fields() did mtr_commit() all - BLOB pointers to the clustered index record. - - Look up the transaction that holds the implicit lock - on this record, and assert that it was recovered (and - will soon be rolled back). */ - - ulint trx_id_pos = dict_index_get_sys_col_pos( - index, DATA_TRX_ID); - ulint len; - trx_id_t trx_id = trx_read_trx_id( - rec_get_nth_field(rec, offsets, trx_id_pos, &len)); - trx_t* trx; - ut_a(len == 6); - - mutex_enter(&kernel_mutex); - trx = trx_get_on_id(trx_id); - ut_a(trx); - ut_a(trx->is_recovered); - mutex_exit(&kernel_mutex); - } + /* This condition can occur during crash recovery before + trx_rollback_active() has completed execution. + + This condition is possible if the server crashed + during an insert or update before + btr_store_big_rec_extern_fields() did mtr_commit() all + BLOB pointers to the clustered index record. + + If the record contains a null BLOB pointer, look up the + transaction that holds the implicit lock on this record, and + assert that it was recovered (and will soon be rolled back). */ + ut_a(!rec_offs_any_null_extern(rec, offsets) + || trx_assert_recovered(row_get_rec_trx_id(rec, index, offsets))); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (type != ROW_COPY_POINTERS) { -- cgit v1.2.1 From 67ea0a59e5b3c4e990c946c3df5af46ecbc4182b Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Wed, 29 Jun 2011 14:28:30 +0300 Subject: Bug #12696083 FIX OUTDATED COPYRIGHT NOTICES IN INNODB RELATED CLIENT TOOLS Update copyright comment in innochecksum. --- extra/innochecksum.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extra/innochecksum.c b/extra/innochecksum.c index 33f925a4cad..ecdf84d5181 100644 --- a/extra/innochecksum.c +++ b/extra/innochecksum.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2005 MySQL AB & Innobase Oy +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by -- cgit v1.2.1 From e25fb73be22b496a61a28b82de746f990822ae50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 29 Jun 2011 16:48:41 +0300 Subject: Bug #12612184 BLOB debug code cleanup: Forgot an #if around the declaration of trx_assert_recovered(). --- storage/innodb_plugin/include/trx0sys.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/innodb_plugin/include/trx0sys.h b/storage/innodb_plugin/include/trx0sys.h index ed62375797a..78bb6fc349b 100644 --- a/storage/innodb_plugin/include/trx0sys.h +++ b/storage/innodb_plugin/include/trx0sys.h @@ -284,6 +284,7 @@ ibool trx_in_trx_list( /*============*/ trx_t* in_trx);/*!< in: trx */ +#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG /***********************************************************//** Assert that a transaction has been recovered. @return TRUE */ @@ -293,6 +294,7 @@ trx_assert_recovered( /*=================*/ trx_id_t trx_id) /*!< in: transaction identifier */ __attribute__((warn_unused_result)); +#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ /*****************************************************************//** Updates the offset information about the end of the MySQL binlog entry which corresponds to the transaction just being committed. In a MySQL -- cgit v1.2.1 From eeb028bbc1695b8727ee9e58c173c85f1abcb29c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 30 Jun 2011 13:18:54 +0300 Subject: Bug#12637786 Wrong secondary index entries on CHAR and VARCHAR columns row_build_index_entry(): In innodb_file_format=Barracuda (ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED), a secondary index on a full column can refer to a field that is stored off-page in the clustered index record. Take that into account. rb:692 approved by Jimmy Yang --- .../suite/innodb_plugin/r/innodb-index.result | 13 +++++++ mysql-test/suite/innodb_plugin/t/innodb-index.test | 16 +++++++++ storage/innodb_plugin/ChangeLog | 6 ++++ storage/innodb_plugin/row/row0row.c | 42 ++++++++++++++++++++-- 4 files changed, 74 insertions(+), 3 deletions(-) diff --git a/mysql-test/suite/innodb_plugin/r/innodb-index.result b/mysql-test/suite/innodb_plugin/r/innodb-index.result index f86fcd4a8ef..547d253911a 100644 --- a/mysql-test/suite/innodb_plugin/r/innodb-index.result +++ b/mysql-test/suite/innodb_plugin/r/innodb-index.result @@ -967,6 +967,19 @@ ERROR HY000: Too big row alter table t1 row_format=compact; create index t1u on t1 (u(1)); drop table t1; +SET @r=REPEAT('a',500); +CREATE TABLE t1(a INT, +v1 VARCHAR(500), v2 VARCHAR(500), v3 VARCHAR(500), +v4 VARCHAR(500), v5 VARCHAR(500), v6 VARCHAR(500), +v7 VARCHAR(500), v8 VARCHAR(500), v9 VARCHAR(500), +v10 VARCHAR(500), v11 VARCHAR(500), v12 VARCHAR(500), +v13 VARCHAR(500), v14 VARCHAR(500), v15 VARCHAR(500), +v16 VARCHAR(500), v17 VARCHAR(500), v18 VARCHAR(500) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +CREATE INDEX idx1 ON t1(a,v1); +INSERT INTO t1 VALUES(9,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +UPDATE t1 SET a=1000; +DROP TABLE t1; set global innodb_file_per_table=0; set global innodb_file_format=Antelope; set global innodb_file_format_check=Antelope; diff --git a/mysql-test/suite/innodb_plugin/t/innodb-index.test b/mysql-test/suite/innodb_plugin/t/innodb-index.test index 717c7d4e032..d8a4a13edd7 100644 --- a/mysql-test/suite/innodb_plugin/t/innodb-index.test +++ b/mysql-test/suite/innodb_plugin/t/innodb-index.test @@ -404,6 +404,22 @@ alter table t1 row_format=compact; create index t1u on t1 (u(1)); drop table t1; + +# Bug#12637786 +SET @r=REPEAT('a',500); +CREATE TABLE t1(a INT, + v1 VARCHAR(500), v2 VARCHAR(500), v3 VARCHAR(500), + v4 VARCHAR(500), v5 VARCHAR(500), v6 VARCHAR(500), + v7 VARCHAR(500), v8 VARCHAR(500), v9 VARCHAR(500), + v10 VARCHAR(500), v11 VARCHAR(500), v12 VARCHAR(500), + v13 VARCHAR(500), v14 VARCHAR(500), v15 VARCHAR(500), + v16 VARCHAR(500), v17 VARCHAR(500), v18 VARCHAR(500) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +CREATE INDEX idx1 ON t1(a,v1); +INSERT INTO t1 VALUES(9,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +UPDATE t1 SET a=1000; +DROP TABLE t1; + eval set global innodb_file_per_table=$per_table; eval set global innodb_file_format=$format; eval set global innodb_file_format_check=$format; diff --git a/storage/innodb_plugin/ChangeLog b/storage/innodb_plugin/ChangeLog index 6d9e074d202..b348e19033b 100644 --- a/storage/innodb_plugin/ChangeLog +++ b/storage/innodb_plugin/ChangeLog @@ -1,3 +1,9 @@ +2011-06-30 The InnoDB Team + + * row/row0row.c: + Fix Bug#12637786 Wrong secondary index entries on CHAR and VARCHAR + columns in ROW_FORMAT=DYNAMIC and ROW_FORMAT=COMPRESSED + 2011-06-16 The InnoDB Team * btr/btr0cur.c, buf/buf0buddy.c, buf/buf0buf.c, buf/buf0lru.c, diff --git a/storage/innodb_plugin/row/row0row.c b/storage/innodb_plugin/row/row0row.c index dd94132eebc..4bad2cb9144 100644 --- a/storage/innodb_plugin/row/row0row.c +++ b/storage/innodb_plugin/row/row0row.c @@ -101,12 +101,27 @@ row_build_index_entry( dfield_copy(dfield, dfield2); - if (dfield_is_null(dfield) || ind_field->prefix_len == 0) { + if (dfield_is_null(dfield)) { continue; } - /* If a column prefix index, take only the prefix. - Prefix-indexed columns may be externally stored. */ + if (ind_field->prefix_len == 0 + && (!dfield_is_ext(dfield) + || dict_index_is_clust(index))) { + /* The dfield_copy() above suffices for + columns that are stored in-page, or for + clustered index record columns that are not + part of a column prefix in the PRIMARY KEY. */ + continue; + } + + /* If the column is stored externally (off-page) in + the clustered index, it must be an ordering field in + the secondary index. In the Antelope format, only + prefix-indexed columns may be stored off-page in the + clustered index record. In the Barracuda format, also + fully indexed long CHAR or VARCHAR columns may be + stored off-page. */ ut_ad(col->ord_part); if (UNIV_LIKELY_NULL(ext)) { @@ -119,13 +134,34 @@ row_build_index_entry( } dfield_set_data(dfield, buf, len); } + + if (ind_field->prefix_len == 0) { + /* In the Barracuda format + (ROW_FORMAT=DYNAMIC or + ROW_FORMAT=COMPRESSED), we can have a + secondary index on an entire column + that is stored off-page in the + clustered index. As this is not a + prefix index (prefix_len == 0), + include the entire off-page column in + the secondary index record. */ + continue; + } } else if (dfield_is_ext(dfield)) { + /* This table should be in Antelope format + (ROW_FORMAT=REDUNDANT or ROW_FORMAT=COMPACT). + In that format, the maximum column prefix + index length is 767 bytes, and the clustered + index record contains a 768-byte prefix of + each off-page column. */ ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); len -= BTR_EXTERN_FIELD_REF_SIZE; ut_a(ind_field->prefix_len <= len || dict_index_is_clust(index)); } + /* If a column prefix index, take only the prefix. */ + ut_ad(ind_field->prefix_len); len = dtype_get_at_most_n_mbchars( col->prtype, col->mbminlen, col->mbmaxlen, ind_field->prefix_len, len, dfield_get_data(dfield)); -- cgit v1.2.1 From dd4957965411c1b67ebfb1ba8650a3090d305f63 Mon Sep 17 00:00:00 2001 From: Georgi Kodinov Date: Tue, 7 Jun 2011 15:43:16 +0300 Subject: Bug #12589928: MEMORY LEAK WHEN RUNNING SYS_VARS.SECURE_FILE_PRIV This is the 5.1 version of the fix. Need to free the memory allocated by the option parsing code for empty strings when resetting the pointer to NULL. No test case needed, as the existing ones already cover this path. --- sql/mysqld.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 36f195e6232..13395cd9e07 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -9063,6 +9063,7 @@ static int fix_paths(void) { if (*opt_secure_file_priv == 0) { + my_free(opt_secure_file_priv, MYF(0)); opt_secure_file_priv= 0; } else -- cgit v1.2.1