summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2019-02-21 22:42:00 +0100
committerSergei Golubchik <serg@mariadb.org>2019-02-22 12:33:08 +0100
commitbd7f7b14163d0696aa92c90deeab16d63ae6bbd6 (patch)
tree4548b3da88d7d5231cc936a5d7f281379f1ad9e6
parentf6000782fbcd8c0576cb89737ad66902f6113966 (diff)
downloadmariadb-git-bd7f7b14163d0696aa92c90deeab16d63ae6bbd6.tar.gz
MDEV-371 Unique Index for long columns
post-merge fixes
-rw-r--r--mysql-test/main/long_unique.result4
-rw-r--r--mysql-test/main/long_unique.test6
-rw-r--r--mysql-test/main/long_unique_debug.test6
-rw-r--r--mysql-test/main/long_unique_innodb.result10
-rw-r--r--mysql-test/main/long_unique_innodb.test11
-rw-r--r--mysql-test/main/long_unique_update.test3
-rw-r--r--mysql-test/main/long_unique_using_hash.test3
-rw-r--r--sql/share/errmsg-utf8.txt5
-rw-r--r--sql/sql_show.cc8
-rw-r--r--sql/sql_table.cc169
-rw-r--r--sql/table.cc42
11 files changed, 133 insertions, 134 deletions
diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result
index 38ec65700a0..082eb4302ee 100644
--- a/mysql-test/main/long_unique.result
+++ b/mysql-test/main/long_unique.result
@@ -1387,13 +1387,13 @@ create table t1(a blob unique) partition by hash(a);
ERROR HY000: A BLOB field is not allowed in partition function
#key length > 2^16 -1
create table t1(a blob, unique(a(65536)));
-ERROR HY000: Max key segment length is 65535
+ERROR 42000: Specified key part was too long; max key part length is 65535 bytes
create table t1(a blob, unique(a(65535)));
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` blob DEFAULT NULL,
- UNIQUE KEY `a` (`a`) USING HASH
+ UNIQUE KEY `a` (`a`(65535)) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
#64 indexes
diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test
index 11ab5038809..6016c168c36 100644
--- a/mysql-test/main/long_unique.test
+++ b/mysql-test/main/long_unique.test
@@ -1,6 +1,10 @@
let datadir=`select @@datadir`;
--source include/have_partition.inc
+#
+# MDEV-371 Unique indexes for blobs
+#
+
--echo #Structure of tests
--echo #First we will check all option for
--echo #table containing single unique column
@@ -475,7 +479,7 @@ drop table t1;
--error ER_BLOB_FIELD_IN_PART_FUNC_ERROR
create table t1(a blob unique) partition by hash(a);
--echo #key length > 2^16 -1
---error ER_TOO_LONG_HASH_KEYSEG
+--error ER_TOO_LONG_KEYPART
create table t1(a blob, unique(a(65536)));
create table t1(a blob, unique(a(65535)));
show create table t1;
diff --git a/mysql-test/main/long_unique_debug.test b/mysql-test/main/long_unique_debug.test
index 63ebfa89b48..560f6499be6 100644
--- a/mysql-test/main/long_unique_debug.test
+++ b/mysql-test/main/long_unique_debug.test
@@ -1,5 +1,9 @@
--source include/have_debug.inc
---source include/have_innodb.inc
+
+#
+# MDEV-371 Unique indexes for blobs
+#
+
--echo #In this test case we will check what will happen in the case of hash collision
SET debug_dbug="d,same_long_unique_hash";
diff --git a/mysql-test/main/long_unique_innodb.result b/mysql-test/main/long_unique_innodb.result
index efbddfb30a8..cb8c3ea4858 100644
--- a/mysql-test/main/long_unique_innodb.result
+++ b/mysql-test/main/long_unique_innodb.result
@@ -3,6 +3,16 @@ insert into t1 values('RUC');
insert into t1 values ('RUC');
ERROR 23000: Duplicate entry 'RUC' for key 'a'
drop table t1;
+create table t1 (a blob unique , c int unique) engine=innodb;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+drop table t1;
#test for concurrent insert of long unique in innodb
create table t1(a blob unique) engine= InnoDB;
show create table t1;
diff --git a/mysql-test/main/long_unique_innodb.test b/mysql-test/main/long_unique_innodb.test
index f3c7f7d952d..aac68cd2271 100644
--- a/mysql-test/main/long_unique_innodb.test
+++ b/mysql-test/main/long_unique_innodb.test
@@ -1,11 +1,19 @@
--source include/have_innodb.inc
+#
+# MDEV-371 Unique indexes for blobs
+#
+
create table t1(a blob unique) engine= InnoDB;
insert into t1 values('RUC');
--error ER_DUP_ENTRY
insert into t1 values ('RUC');
drop table t1;
+create table t1 (a blob unique , c int unique) engine=innodb;
+show create table t1;
+drop table t1;
+
--echo #test for concurrent insert of long unique in innodb
create table t1(a blob unique) engine= InnoDB;
show create table t1;
@@ -33,7 +41,6 @@ insert into t1 values('RC');
commit;
set transaction isolation level READ COMMITTED;
start transaction;
---error ER_DUP_ENTRY
--error ER_LOCK_WAIT_TIMEOUT
insert into t1 values ('RC');
commit;
@@ -47,7 +54,6 @@ insert into t1 values('RR');
commit;
set transaction isolation level REPEATABLE READ;
start transaction;
---error ER_DUP_ENTRY
--error ER_LOCK_WAIT_TIMEOUT
insert into t1 values ('RR');
@@ -60,7 +66,6 @@ insert into t1 values('S');
commit;
set transaction isolation level SERIALIZABLE;
start transaction;
---error ER_DUP_ENTRY
--error ER_LOCK_WAIT_TIMEOUT
insert into t1 values ('S');
commit;
diff --git a/mysql-test/main/long_unique_update.test b/mysql-test/main/long_unique_update.test
index b160ebad9f1..98c3aaefe17 100644
--- a/mysql-test/main/long_unique_update.test
+++ b/mysql-test/main/long_unique_update.test
@@ -1,3 +1,6 @@
+#
+# MDEV-371 Unique indexes for blobs
+#
--echo #structure of tests;
--echo #1 test of table containing single unique blob column;
--echo #2 test of table containing another unique int/ varchar etc column;
diff --git a/mysql-test/main/long_unique_using_hash.test b/mysql-test/main/long_unique_using_hash.test
index 50f7a4e1920..1e19cd66b02 100644
--- a/mysql-test/main/long_unique_using_hash.test
+++ b/mysql-test/main/long_unique_using_hash.test
@@ -1,3 +1,6 @@
+#
+# MDEV-371 Unique indexes for blobs
+#
create table t1(a blob , unique(a) using hash);
--query_vertical show keys from t1;
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 35d328f11c3..9f50aea1e22 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -7950,8 +7950,7 @@ ER_PERIOD_NOT_FOUND
eng "Period %`s is not found in table"
ER_PERIOD_COLUMNS_UPDATED
eng "Column %`s used in period %`s specified in update SET list"
-
ER_PERIOD_CONSTRAINT_DROP
eng "Can't DROP CONSTRAINT `%s`. Use DROP PERIOD `%s` for this"
-ER_TOO_LONG_HASH_KEYSEG
- eng "Max key segment length is 65535"
+ER_TOO_LONG_KEYPART 42000 S1009
+ eng "Specified key part was too long; max key part length is %u bytes"
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 76c2793e8c7..c7b4fc6b2a7 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -2352,9 +2352,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
if (key_part->field &&
(key_part->length !=
table->field[key_part->fieldnr-1]->key_length() &&
- !(key_info->flags & (HA_FULLTEXT | HA_SPATIAL))) &&
- (key_info->algorithm != HA_KEY_ALG_LONG_HASH ||
- key_info->algorithm == HA_KEY_ALG_LONG_HASH && key_part->length))
+ !(key_info->flags & (HA_FULLTEXT | HA_SPATIAL))))
{
packet->append_parenthesized((long) key_part->length /
key_part->field->charset()->mbmaxlen);
@@ -6644,9 +6642,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
if (!(key_info->flags & HA_FULLTEXT) &&
(key_part->field &&
key_part->length !=
- show_table->s->field[key_part->fieldnr-1]->key_length()) &&
- (key_info->algorithm != HA_KEY_ALG_LONG_HASH ||
- key_info->algorithm == HA_KEY_ALG_LONG_HASH && key_part->length))
+ show_table->s->field[key_part->fieldnr-1]->key_length()))
{
table->field[10]->store((longlong) key_part->length /
key_part->field->charset()->mbmaxlen, TRUE);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 47a82295719..32f69d3585d 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -2778,23 +2778,26 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db,
This will make checking for duplicated keys faster and ensure that
PRIMARY keys are prioritized.
- This will not reorder LONG_HASH indexes, because they must match the
- order of their LONG_UNIQUE_HASH_FIELD's.
*/
static int sort_keys(KEY *a, KEY *b)
{
ulong a_flags= a->flags, b_flags= b->flags;
+ /*
+ Do not reorder LONG_HASH indexes, because they must match the order
+ of their LONG_UNIQUE_HASH_FIELD's.
+ */
+ if (a->algorithm == HA_KEY_ALG_LONG_HASH &&
+ b->algorithm == HA_KEY_ALG_LONG_HASH)
+ return a->usable_key_parts - b->usable_key_parts;
+
if (a_flags & HA_NOSAME)
{
if (!(b_flags & HA_NOSAME))
return -1;
if ((a_flags ^ b_flags) & HA_NULL_PART_KEY)
{
- if (a->algorithm == HA_KEY_ALG_LONG_HASH &&
- b->algorithm == HA_KEY_ALG_LONG_HASH)
- return a->usable_key_parts - b->usable_key_parts;
/* Sort NOT NULL keys before other keys */
return (a_flags & HA_NULL_PART_KEY) ? 1 : -1;
}
@@ -2817,9 +2820,7 @@ static int sort_keys(KEY *a, KEY *b)
Prefer original key order. usable_key_parts contains here
the original key position.
*/
- return ((a->usable_key_parts < b->usable_key_parts) ? -1 :
- (a->usable_key_parts > b->usable_key_parts) ? 1 :
- 0);
+ return a->usable_key_parts - b->usable_key_parts;
}
/*
@@ -3302,6 +3303,7 @@ static inline void make_long_hash_field_name(LEX_CSTRING *buf, uint num)
buf->length= my_snprintf((char *)buf->str,
LONG_HASH_FIELD_NAME_LENGTH, "DB_ROW_HASH_%u", num);
}
+
/**
Add fully invisible hash field to table in case of long
unique column
@@ -3313,7 +3315,6 @@ static Create_field * add_hash_field(THD * thd, List<Create_field> *create_list,
KEY *key_info)
{
List_iterator<Create_field> it(*create_list);
-// CHARSET_INFO *field_cs;
Create_field *dup_field, *cf= new (thd->mem_root) Create_field();
cf->flags|= UNSIGNED_FLAG | LONG_UNIQUE_HASH_FIELD;
cf->decimals= 0;
@@ -3336,18 +3337,6 @@ static Create_field * add_hash_field(THD * thd, List<Create_field> *create_list,
it.rewind();
}
}
- /* for (uint i= 0; i < key_info->user_defined_key_parts; i++)
- {
- dup_field= create_list->elem(key_info->key_part[i].fieldnr);
- if (!i)
- field_cs= dup_field->charset;
- else if(field_cs != dup_field->charset)
- {
- my_error(ER_MULTIPLE_CS_HASH_KEY, MYF(0));
- return NULL;
- }
- }
- cf->charset= field_cs;*/
cf->field_name= field_name;
cf->set_handler(&type_handler_longlong);
key_info->algorithm= HA_KEY_ALG_LONG_HASH;
@@ -3940,27 +3929,27 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (f_is_blob(sql_field->pack_flag) ||
(f_is_geom(sql_field->pack_flag) && key->type != Key::SPATIAL))
- {
- if (!(file->ha_table_flags() & HA_CAN_INDEX_BLOBS))
- {
- my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name.str,
+ {
+ if (!(file->ha_table_flags() & HA_CAN_INDEX_BLOBS))
+ {
+ my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name.str,
file->table_type());
- DBUG_RETURN(TRUE);
- }
+ DBUG_RETURN(TRUE);
+ }
if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type ==
Field::GEOM_POINT)
column->length= MAX_LEN_GEOM_POINT_FIELD;
- if (!column->length)
- {
- if (key->type == Key::PRIMARY)
- {
- my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
- DBUG_RETURN(TRUE);
+ if (!column->length)
+ {
+ if (key->type == Key::PRIMARY)
+ {
+ my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
+ DBUG_RETURN(TRUE);
+ }
+ else
+ is_hash_field_needed= true;
+ }
}
- else
- is_hash_field_needed= true;
- }
- }
#ifdef HAVE_SPATIAL
if (key->type == Key::SPATIAL)
{
@@ -4029,31 +4018,31 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (column->length)
{
- if (f_is_blob(sql_field->pack_flag))
- {
- key_part_length= MY_MIN(column->length,
- blob_length_by_type(sql_field->real_field_type())
- * sql_field->charset->mbmaxlen);
- if (key_part_length > max_key_length ||
- key_part_length > file->max_key_part_length())
- {
- if (key->type == Key::MULTIPLE)
- {
- key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
- /* not a critical problem */
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ if (f_is_blob(sql_field->pack_flag))
+ {
+ key_part_length= MY_MIN(column->length,
+ blob_length_by_type(sql_field->real_field_type())
+ * sql_field->charset->mbmaxlen);
+ if (key_part_length > max_key_length ||
+ key_part_length > file->max_key_part_length())
+ {
+ if (key->type == Key::MULTIPLE)
+ {
+ key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
+ /* not a critical problem */
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY,
ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
- }
- else
- is_hash_field_needed= true;
- }
- }
+ }
+ else
+ is_hash_field_needed= true;
+ }
+ }
// Catch invalid use of partial keys
- else if (!f_is_geom(sql_field->pack_flag) &&
+ else if (!f_is_geom(sql_field->pack_flag) &&
// is the key partial?
column->length != key_part_length &&
// is prefix length bigger than field length?
@@ -4067,11 +4056,11 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
// and is this a 'unique' key?
(key_info->flags & HA_NOSAME))))
{
- my_message(ER_WRONG_SUB_KEY, ER_THD(thd, ER_WRONG_SUB_KEY), MYF(0));
- DBUG_RETURN(TRUE);
- }
- else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
- key_part_length= column->length;
+ my_message(ER_WRONG_SUB_KEY, ER_THD(thd, ER_WRONG_SUB_KEY), MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
+ key_part_length= column->length;
}
else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG) &&
!is_hash_field_needed)
@@ -4083,43 +4072,38 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (key_part_length > file->max_key_part_length() &&
key->type != Key::FULLTEXT)
{
- if (key->type == Key::MULTIPLE)
- {
- key_part_length= file->max_key_part_length();
- /* not a critical problem */
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ if (key->type == Key::MULTIPLE)
+ {
+ key_part_length= file->max_key_part_length();
+ /* not a critical problem */
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
- }
- else
- {
- if(key->type == Key::UNIQUE)
- {
- is_hash_field_needed= true;
+ }
+ else
+ {
+ if (key->type == Key::UNIQUE)
+ {
+ is_hash_field_needed= true;
+ }
+ else
+ {
+ key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
+ my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
+ DBUG_RETURN(TRUE);
+ }
+ }
}
- else
+ /* We can not store key_part_length more then 2^16 - 1 in frm */
+ if (is_hash_field_needed && column->length > UINT16_MAX)
{
- key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
- my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
- DBUG_RETURN(TRUE);
- }
- }
+ my_error(ER_TOO_LONG_KEYPART, MYF(0), UINT16_MAX);
+ DBUG_RETURN(TRUE);
}
- /* We can not store key_part_length more then 2^16 - 1 in frm
- So we will simply make it zero */
- if (is_hash_field_needed && column->length > (1<<16) - 1)
- {
- my_error(ER_TOO_LONG_HASH_KEYSEG, MYF(0));
- DBUG_RETURN(TRUE);
- }
else
key_part_info->length= (uint16) key_part_length;
- if (is_hash_field_needed &&
- (key_part_info->length == sql_field->char_length * sql_field->charset->mbmaxlen ||
- key_part_info->length == (1<<16) -1))
- key_part_info->length= 0;
/* Use packed keys for long strings on the first column */
if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
!((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
@@ -8385,13 +8369,6 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (cfield->field) // Not new field
{
/*
- if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
- {
- Field *fld= cfield->field;
- if (fld->max_display_length() == cfield->length*fld->charset()->mbmaxlen
- && fld->max_data_length() != key_part->length)
- cfield->length= cfield->char_length= key_part->length;
- }
If the field can't have only a part used in a key according to its
new type, or should not be used partially according to its
previous type, or the field length is less than the key part
diff --git a/sql/table.cc b/sql/table.cc
index 77f0cfe14ca..13f4e200e2f 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1177,10 +1177,10 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
/* Now, initialize CURRENT_TIMESTAMP and UNIQUE_INDEX_HASH_FIELD fields */
for (field_ptr= table->field; *field_ptr; field_ptr++)
- {
+ {
Field *field= *field_ptr;
if (field->flags & LONG_UNIQUE_HASH_FIELD)
- {
+ {
List<Item> *field_list= new (mem_root) List<Item>();
Item *list_item;
KEY *key;
@@ -2443,8 +2443,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
{
share->long_unique_table= 1;
- if (share->frm_version < FRM_VER_EXPRESSSIONS)
- share->frm_version= FRM_VER_EXPRESSSIONS;
hash_keypart= keyinfo->key_part + keyinfo->user_defined_key_parts;
hash_keypart->length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
hash_keypart->store_length= hash_keypart->length;
@@ -2453,8 +2451,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
hash_keypart->key_type= 32834;
/* Last n fields are unique_index_hash fields*/
hash_keypart->offset= offset;
-// hash_keypart->offset= share->reclength
-// - HA_HASH_FIELD_LENGTH*(share->fields - hash_field_used_no);
hash_keypart->fieldnr= hash_field_used_no + 1;
hash_field= share->field[hash_field_used_no];
hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs
@@ -2472,7 +2468,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
KEY* key_first_info= NULL;
if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME &&
- keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
{
/*
If the UNIQUE key doesn't have NULL columns and is not a part key
@@ -2507,7 +2503,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
if (share->use_ext_keys)
- {
+ {
if (primary_key >= MAX_KEY)
{
add_first_key_parts= 0;
@@ -2566,7 +2562,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
for (i= 0; i < keyinfo->user_defined_key_parts; i++)
{
uint fieldnr= keyinfo->key_part[i].fieldnr;
- field= share->field[keyinfo->key_part[i].fieldnr-1];
+ field= share->field[fieldnr-1];
if (field->null_ptr)
len_null_byte= HA_KEY_NULL_LENGTH;
@@ -2581,8 +2577,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
ext_key_length+= keyinfo->key_part[i].length + len_null_byte
+ length_bytes;
- if (share->field[fieldnr-1]->key_length() !=
- keyinfo->key_part[i].length)
+ if (field->key_length() != keyinfo->key_part[i].length)
{
add_keyparts_for_this_key= 0;
break;
@@ -4258,6 +4253,8 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
{
size_t key_comment_total_bytes= 0;
uint i;
+ uchar frm_format= create_info->expression_length ? FRM_VER_EXPRESSSIONS
+ : FRM_VER_TRUE_VARCHAR;
DBUG_ENTER("prepare_frm_header");
/* Fix this when we have new .frm files; Current limit is 4G rows (TODO) */
@@ -4266,17 +4263,6 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
if (create_info->min_rows > UINT_MAX32)
create_info->min_rows= UINT_MAX32;
- size_t key_length, tmp_key_length, tmp, csid;
- bzero((char*) fileinfo, FRM_HEADER_SIZE);
- /* header */
- fileinfo[0]=(uchar) 254;
- fileinfo[1]= 1;
- fileinfo[2]= (create_info->expression_length == 0 ? FRM_VER_TRUE_VARCHAR :
- FRM_VER_EXPRESSSIONS);
-
- DBUG_ASSERT(ha_storage_engine_is_enabled(create_info->db_type));
- fileinfo[3]= (uchar) ha_legacy_type(create_info->db_type);
-
/*
Keep in sync with pack_keys() in unireg.cc
For each key:
@@ -4295,8 +4281,20 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
(key_info[i].comment.length > 0));
if (key_info[i].flags & HA_USES_COMMENT)
key_comment_total_bytes += 2 + key_info[i].comment.length;
+ if (key_info[i].algorithm == HA_KEY_ALG_LONG_HASH)
+ frm_format= FRM_VER_EXPRESSSIONS;
}
+ size_t key_length, tmp_key_length, tmp, csid;
+ bzero((char*) fileinfo, FRM_HEADER_SIZE);
+ /* header */
+ fileinfo[0]=(uchar) 254;
+ fileinfo[1]= 1;
+ fileinfo[2]= frm_format;
+
+ DBUG_ASSERT(ha_storage_engine_is_enabled(create_info->db_type));
+ fileinfo[3]= (uchar) ha_legacy_type(create_info->db_type);
+
key_length= keys * (8 + MAX_REF_PARTS * 9 + NAME_LEN + 1) + 16
+ key_comment_total_bytes;