diff options
author | Sachin <sachin.setiya@mariadb.com> | 2018-12-31 10:05:19 +0530 |
---|---|---|
committer | Sachin <sachin.setiya@mariadb.com> | 2018-12-31 20:35:38 +0530 |
commit | e94320a36b07565d46dc98c36d7de9d32d6b0c4c (patch) | |
tree | c80b577c474e56c20e93b0704e932a3aa9b0a4ae | |
parent | 1e9279a8bd5ecad51238e2759f317baf457f0006 (diff) | |
download | mariadb-git-e94320a36b07565d46dc98c36d7de9d32d6b0c4c.tar.gz |
Architecture change final
-rw-r--r-- | include/my_base.h | 5 | ||||
-rw-r--r-- | mysql-test/main/long_unique.result | 32 | ||||
-rw-r--r-- | mysql-test/main/long_unique.test | 12 | ||||
-rw-r--r-- | mysql-test/main/long_unique_update.result | 9 | ||||
-rw-r--r-- | mysql-test/main/long_unique_update.test | 1 | ||||
-rw-r--r-- | sql/field.h | 2 | ||||
-rw-r--r-- | sql/handler.cc | 10 | ||||
-rw-r--r-- | sql/sql_show.cc | 14 | ||||
-rw-r--r-- | sql/sql_table.cc | 17 | ||||
-rw-r--r-- | sql/table.cc | 297 | ||||
-rw-r--r-- | sql/table.h | 7 | ||||
-rw-r--r-- | sql/table_cache.cc | 3 | ||||
-rw-r--r-- | sql/unireg.cc | 4 |
13 files changed, 203 insertions, 210 deletions
diff --git a/include/my_base.h b/include/my_base.h index 9a16d4b624b..1d7db5a3a11 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -291,11 +291,6 @@ enum ha_base_keytype { #define HA_KEY_HAS_PART_KEY_SEG 65536 /* Internal Flag Can be calcaluted */ #define HA_INVISIBLE_KEY 2<<18 -/* - Flag for long unique hash key - calculated in the init_from_binary_frm_image -*/ -#define HA_LONG_UNIQUE_HASH 2<<19 /* Automatic bits in key-flag */ #define HA_SPACE_PACK_USED 4 /* Test for if SPACE_PACK used */ diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result index c8b44b35cb7..994f1b4c3f9 100644 --- a/mysql-test/main/long_unique.result +++ b/mysql-test/main/long_unique.result @@ -2,11 +2,13 @@ #First we will check all option for #table containing single unique column #table containing keys like unique(a,b,c,d) etc -#then table containing 2 blob unique etc +#then table containing 2 blob unique etc set @allowed_packet= @@max_allowed_packet; -#table with single long blob column; -create table t1(a blob unique); +#table with single long blob column; +create table t1(a blob unique ); insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890); +insert into t1 values(2); +ERROR 23000: Duplicate entry '2' for key 'a' #table structure; desc t1; Field Type Null Key Default Extra @@ -40,7 +42,7 @@ Recordlength: 20 table description: Key Start Len Index Type -1 12 8 multip. ulonglong NULL +1 12 8 unique ulonglong NULL select * from information_schema.columns where table_schema = 'test' and table_name = 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob UNI select,insert,update,references NEVER NULL @@ -128,7 +130,7 @@ Recordlength: 20 table description: Key Start Len Index Type -1 12 8 multip. ulonglong NULL +1 12 8 unique ulonglong NULL #now some alter commands; alter table t1 add column b int; desc t1; @@ -322,7 +324,7 @@ ERROR 23000: Duplicate entry '1' for key 'b' insert into t1 values(7,1); ERROR 23000: Duplicate entry '1' for key 'b' drop table t1; -#table with multiple long blob column and varchar text column ; +#table with multiple long blob column and varchar text column ; create table t1(a blob unique, b int , c blob unique , d text unique , e varchar(3000) unique); insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555), ('sachin',341,'fdf','gfgfgfg','hghgr'),('maria',345,'frter','dasd','utyuty'), @@ -364,10 +366,10 @@ Recordlength: 3072 table description: Key Start Len Index Type -1 3063 8 multip. ulonglong NULL -2 3055 8 multip. ulonglong NULL -3 3047 8 multip. ulonglong NULL -4 3039 8 multip. ulonglong NULL +1 3063 8 unique ulonglong NULL +2 3055 8 unique ulonglong NULL +3 3047 8 unique ulonglong NULL +4 3039 8 unique ulonglong NULL select * from information_schema.columns where table_schema = 'test' and table_name = 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob UNI select,insert,update,references NEVER NULL @@ -522,7 +524,7 @@ g int(11) YES NULL db_row_hash_1 int(11) YES UNI NULL db_row_hash_2 int(11) YES UNI NULL db_row_hash_5 int(11) YES NULL -#this show now break anything; +#this should not break anything; insert into t1 values(1,2,3,4,5,6,23,5,6); ERROR 23000: Duplicate entry '1' for key 'a' #this should also drop the unique index; @@ -721,10 +723,10 @@ Recordlength: 5092 table description: Key Start Len Index Type -1 5081 8 multip. ulonglong NULL -2 5073 8 multip. ulonglong NULL -3 5065 8 multip. ulonglong NULL -4 5057 8 multip. ulonglong NULL +1 5081 8 unique ulonglong NULL +2 5073 8 unique ulonglong NULL +3 5065 8 unique ulonglong NULL +4 5057 8 unique ulonglong NULL select * from information_schema.columns where table_schema = 'test' and table_name = 't1'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob MUL select,insert,update,references NEVER NULL diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test index 65fcdfcc9e4..52e91d1f182 100644 --- a/mysql-test/main/long_unique.test +++ b/mysql-test/main/long_unique.test @@ -4,13 +4,14 @@ let datadir=`select @@datadir`; --echo #First we will check all option for --echo #table containing single unique column --echo #table containing keys like unique(a,b,c,d) etc ---echo #then table containing 2 blob unique etc +--echo #then table containing 2 blob unique etc set @allowed_packet= @@max_allowed_packet; - ---echo #table with single long blob column; -create table t1(a blob unique); +--echo #table with single long blob column; +create table t1(a blob unique ); insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890); +--error ER_DUP_ENTRY +insert into t1 values(2); --echo #table structure; desc t1; show create table t1; @@ -125,7 +126,7 @@ insert into t1 values(1,1); insert into t1 values(7,1); drop table t1; ---echo #table with multiple long blob column and varchar text column ; +--echo #table with multiple long blob column and varchar text column ; create table t1(a blob unique, b int , c blob unique , d text unique , e varchar(3000) unique); insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555), ('sachin',341,'fdf','gfgfgfg','hghgr'),('maria',345,'frter','dasd','utyuty'), @@ -224,7 +225,6 @@ alter table t1 drop key clm1, drop key clm2; show create table t1; show keys from t1; drop table t1; - --echo #now the table with key on multiple columns; the ultimate test; create table t1(a blob, b int , c varchar(2000) , d text , e varchar(3000) , f longblob , g int , h text , unique(a,b,c), unique(c,d,e),unique(e,f,g,h), unique(b,d,g,h)); diff --git a/mysql-test/main/long_unique_update.result b/mysql-test/main/long_unique_update.result index fd826198179..7c3a5ce1445 100644 --- a/mysql-test/main/long_unique_update.result +++ b/mysql-test/main/long_unique_update.result @@ -254,10 +254,11 @@ ERROR 23000: Duplicate entry '2-2' for key 'b' update t1 set b=b+34, c=c+34 where e=1 and g=1 ; update t1 set b=35, c=35 where e=1 and g=1 ; update t1 set b=b+1, c=c+1 where a>0; +ERROR 23000: Duplicate entry '3-3' for key 'b' update ignore t1 set b=b+1, c=c+1 where a>0; select * from t1 ; a b c d e f g -1 35 35 1 1 1 1 +1 37 37 1 1 1 1 2 2 2 2 2 2 2 3 3 3 3 3 3 3 4 4 4 4 4 4 4 @@ -265,7 +266,7 @@ a b c d e f g 6 6 6 6 6 6 6 7 7 7 7 7 7 7 8 8 8 8 8 8 8 -9 9 9 9 9 9 9 +9 10 10 9 9 9 9 truncate table t1; insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4), (5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9); @@ -304,7 +305,7 @@ ERROR 23000: Duplicate entry '3-3' for key 'e' update ignore t1 set e=e+1, g=g+1 where a>0; select * from t1 ; a b c d e f g -1 1 1 1 36 1 36 +1 1 1 1 37 1 37 2 2 2 2 2 2 2 3 3 3 3 3 3 3 4 4 4 4 4 4 4 @@ -312,5 +313,5 @@ a b c d e f g 6 6 6 6 6 6 6 7 7 7 7 7 7 7 8 8 8 8 8 8 8 -9 9 9 9 9 9 9 +9 9 9 9 10 9 10 drop table t1; diff --git a/mysql-test/main/long_unique_update.test b/mysql-test/main/long_unique_update.test index 79cca079fe6..b160ebad9f1 100644 --- a/mysql-test/main/long_unique_update.test +++ b/mysql-test/main/long_unique_update.test @@ -106,6 +106,7 @@ select * from t1 limit 3; update t1 set b=2 ,c=2 where a=1; update t1 set b=b+34, c=c+34 where e=1 and g=1 ; update t1 set b=35, c=35 where e=1 and g=1 ; +--error ER_DUP_ENTRY update t1 set b=b+1, c=c+1 where a>0; update ignore t1 set b=b+1, c=c+1 where a>0; select * from t1 ; diff --git a/sql/field.h b/sql/field.h index 09c642f4137..52649562827 100644 --- a/sql/field.h +++ b/sql/field.h @@ -4311,7 +4311,7 @@ public: int key_cmp(const uchar *a, const uchar *b) { return cmp_binary((uchar *) a, (uchar *) b); } int key_cmp(const uchar *str, uint length); - int cmp_offset(long row_offset); + int cmp_offset(my_ptrdiff_t row_offset); bool update_min(Field *min_val, bool force_update) { longlong val= val_int(); diff --git a/sql/handler.cc b/sql/handler.cc index 6cb812cc473..f1f47a183c4 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -3688,9 +3688,19 @@ void handler::print_error(int error, myf errflag) uint key_nr=get_dup_key(error); if ((int) key_nr >= 0 && key_nr < table->s->keys) { + KEY *long_key= NULL; + if (table->key_info[key_nr].algorithm + == HA_KEY_ALG_LONG_HASH) + { + long_key= table->key_info + key_nr; + re_setup_keyinfo_hash(long_key); + } print_keydup_error(table, &table->key_info[key_nr], errflag); + if (long_key) + setup_keyinfo_hash(long_key); DBUG_VOID_RETURN; } + setup_table_hash(table); } textno=ER_DUP_KEY; break; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index c1af93dbeb2..05b17a476ec 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2083,6 +2083,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, uint primary_key; KEY *key_info; TABLE *table= table_list->table; + re_setup_table(table); TABLE_SHARE *share= table->s; sql_mode_t sql_mode= thd->variables.sql_mode; bool explicit_fields= false; @@ -2306,7 +2307,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, */ packet->append(STRING_WITH_LEN("PRIMARY KEY")); } - else if (key_info->flags & HA_NOSAME || key_info->flags & HA_LONG_UNIQUE_HASH) + else if (key_info->flags & HA_NOSAME) packet->append(STRING_WITH_LEN("UNIQUE KEY ")); else if (key_info->flags & HA_FULLTEXT) packet->append(STRING_WITH_LEN("FULLTEXT KEY ")); @@ -2441,6 +2442,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, } #endif tmp_restore_column_map(table->read_set, old_map); + setup_table_hash(table); DBUG_RETURN(error); } @@ -6546,7 +6548,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, else if (!tables->view) { TABLE *show_table= tables->table; - KEY *key_info=show_table->s->key_info; + KEY *key_info=show_table->key_info; if (show_table->file) { show_table->file->info(HA_STATUS_VARIABLE | @@ -6554,6 +6556,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, HA_STATUS_TIME); set_statistics_for_table(thd, show_table); } + re_setup_table(show_table); for (uint i=0 ; i < show_table->s->keys ; i++,key_info++) { if ((key_info->flags & HA_INVISIBLE_KEY) && @@ -6593,7 +6596,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, table->field[9]->store((longlong) records, TRUE); table->field[9]->set_notnull(); } - if (key->flags & HA_LONG_UNIQUE_HASH) + if (key->algorithm == HA_KEY_ALG_LONG_HASH) table->field[13]->store(STRING_WITH_LEN("HASH"), cs); else { @@ -6627,6 +6630,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, DBUG_RETURN(1); } } + setup_table_hash(show_table); } DBUG_RETURN(res); } @@ -6855,6 +6859,7 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables, show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); + re_setup_table(show_table); for (uint i=0 ; i < show_table->s->keys ; i++, key_info++) { if (i != primary_key && !(key_info->flags & HA_NOSAME)) @@ -6875,6 +6880,7 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables, DBUG_RETURN(1); } } + setup_table_hash(show_table); // Table check constraints for ( uint i = 0; i < show_table->s->table_check_constraints; i++ ) @@ -7053,6 +7059,7 @@ static int get_schema_key_column_usage_record(THD *thd, show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); + re_setup_table(show_table); for (uint i=0 ; i < show_table->s->keys ; i++, key_info++) { if (i != primary_key && !(key_info->flags & HA_NOSAME)) @@ -7075,6 +7082,7 @@ static int get_schema_key_column_usage_record(THD *thd, } } } + setup_table_hash(show_table); show_table->file->get_foreign_key_list(thd, &f_key_list); FOREIGN_KEY_INFO *f_key_info; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 888e07c24fe..9fc95fecd60 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -8331,11 +8331,12 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, Collect all keys which isn't in drop list. Add only those for which some fields exists. */ - for (uint i=0 ; i < table->s->keys ; i++,key_info++) { if (key_info->flags & HA_INVISIBLE_KEY) continue; + if (key_info->algorithm == HA_KEY_ALG_LONG_HASH) + re_setup_keyinfo_hash(key_info); const char *key_name= key_info->name.str; Alter_drop *drop; drop_it.rewind(); @@ -8459,11 +8460,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, enum Key::Keytype key_type; LEX_CSTRING tmp_name; bzero((char*) &key_create_info, sizeof(key_create_info)); - if (key_info->flags & HA_LONG_UNIQUE_HASH) - { - key_info->flags&= ~(HA_LONG_UNIQUE_HASH); + if (key_info->algorithm & HA_KEY_ALG_LONG_HASH) key_info->algorithm= HA_KEY_ALG_UNDEF; - } key_create_info.algorithm= key_info->algorithm; /* We copy block size directly as some engines, like Area, sets this @@ -10631,14 +10629,23 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, if ((int) key_nr >= 0) { const char *err_msg= ER_THD(thd, ER_DUP_ENTRY_WITH_KEY_NAME); + KEY *long_key= NULL; if (key_nr == 0 && to->s->keys > 0 && (to->key_info[0].key_part[0].field->flags & AUTO_INCREMENT_FLAG)) err_msg= ER_THD(thd, ER_DUP_ENTRY_AUTOINCREMENT_CASE); + if (key_nr <= to->s->keys && to->key_info[key_nr].algorithm + == HA_KEY_ALG_LONG_HASH) + { + long_key= to->key_info + key_nr; + re_setup_keyinfo_hash(long_key); + } print_keydup_error(to, key_nr >= to->s->keys ? NULL : &to->key_info[key_nr], err_msg, MYF(0)); + if (long_key) + setup_keyinfo_hash(long_key); } else to->file->print_error(error, MYF(0)); diff --git a/sql/table.cc b/sql/table.cc index 30769d7b139..74309c95830 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -771,14 +771,6 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end, keyinfo->rec_per_key= rec_per_key; for (j=keyinfo->user_defined_key_parts ; j-- ; key_part++) { - //It will be handled later - //Please note we did not allocated extra n key_parts for long unique(a^1,...a^n) - //We have allocated just one key_part and that will point to hash - if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) - { - strpos+=9; - continue; - } if (strpos + (new_frm_ver >= 1 ? 9 : 7) >= frm_image_end) return 1; *rec_per_key++=0; @@ -809,6 +801,8 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end, { keyinfo->flags|= HA_NOSAME; keyinfo->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL; + //Storing key hash + key_part++; } /* @@ -845,8 +839,7 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end, } if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) share->ext_key_parts++; - else - share->ext_key_parts+= keyinfo->ext_key_parts; + share->ext_key_parts+= keyinfo->ext_key_parts; } keynames=(char*) key_part; strpos+= strnmov(keynames, (char *) strpos, frm_image_end - strpos) - keynames; @@ -1164,29 +1157,70 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table, { Field *field= *field_ptr; if (field->flags & LONG_UNIQUE_HASH_FIELD) - { + {/* Item *temp, **arguments= ((Item_func_hash *)field->vcol_info->expr)->arguments(); - Item *list_item; uint count= ((Item_func_hash *)field->vcol_info->expr)->argument_count(); - List<Item > *field_list= new (mem_root) List<Item >(); for (uint i=0; i < count; i++) { temp= arguments[i]; if (temp->type() == Item::CONST_ITEM) { - list_item= new(mem_root)Item_field(thd, table->field[temp->val_int() -1]); } else { Item **l_arguments= ((Item_func_left *)temp)->arguments(); + } + } + */ + + List<Item > *field_list= new (mem_root) List<Item >(); + Item *list_item; + KEY *key; + uint key_index; + for (key_index= 0; key_index < table->s->keys; key_index++) + { + key=table->key_info + key_index; + if (key->algorithm == HA_KEY_ALG_LONG_HASH && + key->key_part[key->user_defined_key_parts].fieldnr == field->field_index+ 1) + break; + } + KEY_PART_INFO *keypart; + for (uint i=0; i < key->user_defined_key_parts; i++) + { + keypart= key->key_part + i; + if (!keypart->length) + { + list_item= new(mem_root)Item_field(thd, keypart->field); + } + else + { list_item= new(mem_root)Item_func_left(thd, - new (mem_root)Item_field(thd, table->field[l_arguments[0]->val_int()]), - new (mem_root) Item_int(thd, l_arguments[1]->val_int())); + new (mem_root)Item_field(thd, keypart->field), + new (mem_root) Item_int(thd, keypart->length)); + list_item->fix_fields(thd, NULL); } field_list->push_back(list_item, mem_root); } Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list); + Virtual_column_info *v= new (mem_root) Virtual_column_info(); + field->vcol_info= v; field->vcol_info->expr= hash_item; + uint parts= key->user_defined_key_parts; + key->user_defined_key_parts= key->ext_key_parts= key->usable_key_parts= 1; + key->key_part+= parts; + + if (key->flags & HA_NULL_PART_KEY) + key->key_length= HA_HASH_KEY_LENGTH_WITH_NULL; + else + key->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL; + key= table->s->key_info + key_index; + key->user_defined_key_parts= key->ext_key_parts= key->usable_key_parts= 1; + key->key_part+= parts; + + if (key->flags & HA_NULL_PART_KEY) + key->key_length= HA_HASH_KEY_LENGTH_WITH_NULL; + else + key->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL; *(vfield_ptr++)= *field_ptr; } if (field->has_default_now_unireg_check()) @@ -1292,7 +1326,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, uint db_create_options, keys, key_parts, n_length; uint com_length, null_bit_pos, UNINIT_VAR(mysql57_vcol_null_bit_pos), bitmap_count; uint i; - uint field_additional_property_length= 0; bool use_hash, mysql57_null_bits= 0; char *keynames, *names, *comment_pos; const uchar *forminfo, *extra2; @@ -1323,7 +1356,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, bool vers_can_native= false; const uchar *extra2_field_flags= 0; size_t extra2_field_flags_length= 0; - const uchar* key_info_ptr; MEM_ROOT *old_root= thd->mem_root; Virtual_column_info **table_check_constraints; @@ -1331,6 +1363,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, keyinfo= &first_keyinfo; share->ext_key_parts= 0; + share->long_unique_table= 0; thd->mem_root= &share->mem_root; if (write && write_frm_image(frm_image, frm_length)) @@ -1632,7 +1665,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, share->set_use_ext_keys_flag(plugin_hton(se_plugin)->flags & HTON_SUPPORTS_EXTENDED_KEYS); - key_info_ptr= disk_buff + 6; if (create_key_infos(disk_buff + 6, frm_image_end, keys, keyinfo, new_frm_ver, ext_key_parts, share, len, &first_keyinfo, keynames)) @@ -1726,7 +1758,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } else { - key_info_ptr= disk_buff + 6; if (create_key_infos(disk_buff + 6, frm_image_end, keys, keyinfo, new_frm_ver, ext_key_parts, share, len, &first_keyinfo, keynames)) @@ -2225,14 +2256,15 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, { keyinfo= share->key_info; uint hash_field_used_no= share->fields -1 ; - KEY_PART_INFO *hash_keypart, *temp_key_part; - Field *hash_field, *temp_field; + KEY_PART_INFO *hash_keypart; + Field *hash_field; for (uint i= 0; i < share->keys; i++, keyinfo++) { /* 1. We need set value in hash key_part 2. Set vcol_info in corresponding db_row_hash_ field */ + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) { /* @@ -2268,7 +2300,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, //v->hash_expr.length= hash_str.length(); hash_fld->vcol_info= v; */ - hash_keypart= keyinfo->key_part; + share->long_unique_table= 1; + hash_keypart= keyinfo->key_part + keyinfo->user_defined_key_parts; hash_keypart->length= HA_HASH_KEY_LENGTH_WITHOUT_NULL; hash_keypart->store_length= hash_keypart->length; hash_keypart->type= HA_KEYTYPE_ULONGLONG; @@ -2284,7 +2317,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, But since we do not know how many fields , and which fields are there We will look into frm (we have saved key_info_ptr) */ - if (new_frm_ver >= 4) //Idk why frm version is 4 I thought it will >=10 +#ifdef backchodi + if (new_frm_ver >= 110202) //Idk why frm version is 4 I thought it will >=10 { //Our goal is to get field no of long unique(a1,a2 .....) key_info_ptr+= keys*8;// Why ? answer in create_key_info @@ -2324,18 +2358,17 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, field_list->push_back(l_item, &share->mem_root); } Item_func_hash *hash_item= new(&share->mem_root)Item_func_hash(thd, *field_list); - Virtual_column_info *v= new (&share->mem_root) Virtual_column_info(); + Virtual_column_info *v= new (&share->mem_root) Virtual_column_info(); v->expr= hash_item; hash_field->vcol_info= v; - hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs keyinfo->user_defined_key_parts= 1; keyinfo->usable_key_parts= 1; keyinfo->ext_key_parts= 1; thd->free_list= temp_free_list; } - else - assert(0);//We cant have long unique in lower frm_versions; +#endif + hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs share->virtual_fields++; hash_field_used_no--; } @@ -2449,7 +2482,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if ((field->type() == MYSQL_TYPE_BLOB || field->real_type() == MYSQL_TYPE_VARCHAR || field->type() == MYSQL_TYPE_GEOMETRY) && - !(keyinfo->flags & HA_LONG_UNIQUE_HASH)) + keyinfo->algorithm != HA_KEY_ALG_LONG_HASH ) { length_bytes= HA_KEY_BLOB_LENGTH; } @@ -2514,6 +2547,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, key_part= keyinfo->key_part; uint key_parts= share->use_ext_keys ? keyinfo->ext_key_parts : keyinfo->user_defined_key_parts; + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) + key_parts++; for (i=0; i < key_parts; key_part++, i++) { Field *field; @@ -2595,7 +2630,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } } if (field->key_length() != key_part->length && - !(keyinfo->flags & HA_LONG_UNIQUE_HASH)) + keyinfo->algorithm != HA_KEY_ALG_LONG_HASH) { #ifndef TO_BE_DELETED_ON_PRODUCTION if (field->type() == MYSQL_TYPE_NEWDECIMAL) @@ -2638,7 +2673,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, HA_BIT_PART)) && key_part->type != HA_KEYTYPE_FLOAT && key_part->type == HA_KEYTYPE_DOUBLE && - !(keyinfo->flags & HA_LONG_UNIQUE_HASH)) + keyinfo->algorithm != HA_KEY_ALG_LONG_HASH) key_part->key_part_flag|= HA_CAN_MEMCMP; } keyinfo->usable_key_parts= usable_parts; // Filesort @@ -3332,8 +3367,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, const char *tmp_alias; bool error_reported= FALSE; uchar *record, *bitmaps; - Field **field_ptr, *field; - KEY *key_info; + Field **field_ptr; uint8 save_context_analysis_only= thd->lex->context_analysis_only; TABLE_SHARE::enum_v_keys check_set_initialized= share->check_set_initialized; DBUG_ENTER("open_table_from_share"); @@ -3467,7 +3501,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, /* Fix key->name and key_part->field */ if (share->key_parts) { - KEY *key_info, *key_info_end; + KEY *key_info, *key_info_end, *share_keyinfo; KEY_PART_INFO *key_part; uint n_length; n_length= share->keys*sizeof(KEY) + share->ext_key_parts*sizeof(KEY_PART_INFO); @@ -3477,12 +3511,13 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, key_part= (reinterpret_cast<KEY_PART_INFO*>(key_info+share->keys)); memcpy(key_info, share->key_info, sizeof(*key_info)*share->keys); - memcpy(key_part, share->key_info[0].key_part, (sizeof(*key_part) * + memcpy(key_part, share->key_info + share->keys, (sizeof(*key_part) * share->ext_key_parts)); + uint key_no= 0; for (key_info_end= key_info + share->keys ; key_info < key_info_end ; - key_info++) + key_info++, key_no++) { KEY_PART_INFO *key_part_end; @@ -3491,6 +3526,36 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts : key_info->user_defined_key_parts) ; + if (key_info->algorithm == HA_KEY_ALG_LONG_HASH) + { + /* + Either it can be first time opening the table share or it can be second time + of more. The difference is when it is first time key_part[0]->fieldnr points + to blob/long field, but when it is 2nd time there will bw always one key_part + and it will point to hash_field. + So in the case of 2nd time we will make key_info->key_part point to start of long + field. + For example we have unique(a,b,c) + In first share opening key_part will point to a field + but in parse_vcol_defs it will be changed to point to db_row_hash field + in Second or later opening key_part will be pointing to db_row_hash + We will chnage it back to point to field a, because in this way we can create + vcol_info for hash field in parse_vcol_defs. + */ + //Second or more time share opening + key_info->user_defined_key_parts= 0; + key_part_end= key_part; + while(!(share->field[key_part_end->fieldnr -1 ]->flags & LONG_UNIQUE_HASH_FIELD)) + { + key_part_end++; + key_info->user_defined_key_parts++; + } + key_info->usable_key_parts= key_info->ext_key_parts= key_info->user_defined_key_parts; + key_part_end++; + share_keyinfo= share->key_info + key_no; + if (share_keyinfo->key_part->field->flags & LONG_UNIQUE_HASH_FIELD) + share_keyinfo->key_part-= key_info->user_defined_key_parts; + } for ( ; key_part < key_part_end; key_part++) { Field *field= key_part->field= outparam->field[key_part->fieldnr - 1]; @@ -8703,15 +8768,32 @@ int find_field_pos_in_hash(Item *hash_item, const char * field_name) } /* - find total number of field in hash_str + find total number of field in hash expr */ -int fields_in_hash_str(Item * hash_item) +int fields_in_hash_expr(Item * hash_item) { Item_func_or_sum * temp= static_cast<Item_func_or_sum *>(hash_item); Item_args * t_item= static_cast<Item_args *>(temp); return t_item->argument_count(); } +inline void setup_keyinfo_hash(KEY *key_info) +{ + while(!(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD)) + key_info->key_part++; + key_info->user_defined_key_parts= key_info->usable_key_parts= + key_info->ext_key_parts= 1; +} + +inline void re_setup_keyinfo_hash(KEY *key_info) +{ + + uint no_of_keyparts= fields_in_hash_expr( + key_info->key_part->field->vcol_info->expr); + key_info->key_part-= no_of_keyparts; + key_info->user_defined_key_parts= key_info->usable_key_parts= + key_info->ext_key_parts= no_of_keyparts; +} /** @brief clone of current handler. Creates a clone of handler used in update for @@ -8725,7 +8807,7 @@ void create_update_handler(THD *thd, TABLE *table) handler *update_handler= NULL; for (uint i= 0; i < table->s->keys; i++) { - if (table->key_info[i].flags & HA_LONG_UNIQUE_HASH) + if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH) { update_handler= table->file->clone(table->s->normalized_path.str, thd->mem_root); @@ -8761,67 +8843,13 @@ void delete_update_handler(THD *thd, TABLE *table) */ void setup_table_hash(TABLE *table) { - /* - Extra parts of long unique key which are used only at server level - for example in key unique(a, b, c) //a b c are blob - extra_key_part_hash is 3 - */ - uint extra_key_part_hash= 0; - uint hash_parts= 0; - KEY *s_keyinfo= table->s->key_info; - KEY *keyinfo= table->key_info; - /* - Sometime s_keyinfo or key_info can be null. So - two different loop for keyinfo and s_keyinfo - reference test case:- main.subselect_sj2 - */ - if (keyinfo) - { - for (uint i= 0; i < table->s->keys; i++, keyinfo++) - { - if (keyinfo->flags & HA_LONG_UNIQUE_HASH) - { - DBUG_ASSERT(keyinfo->user_defined_key_parts == - keyinfo->ext_key_parts); - keyinfo->flags&= ~(HA_NOSAME | HA_LONG_UNIQUE_HASH); - keyinfo->algorithm= HA_KEY_ALG_UNDEF; - extra_key_part_hash+= keyinfo->ext_key_parts; - hash_parts++; - keyinfo->key_part= keyinfo->key_part+ keyinfo->ext_key_parts; - keyinfo->user_defined_key_parts= keyinfo->usable_key_parts= - keyinfo->ext_key_parts= 1; - keyinfo->key_length= keyinfo->key_part->store_length; - } - } - table->s->key_parts-= extra_key_part_hash; - table->s->key_parts+= hash_parts; - table->s->ext_key_parts-= extra_key_part_hash; - } - if (s_keyinfo) - { - for (uint i= 0; i < table->s->keys; i++, s_keyinfo++) - { - if (s_keyinfo->flags & HA_LONG_UNIQUE_HASH) - { - DBUG_ASSERT(s_keyinfo->user_defined_key_parts == - s_keyinfo->ext_key_parts); - s_keyinfo->flags&= ~(HA_NOSAME | HA_LONG_UNIQUE_HASH); - s_keyinfo->algorithm= HA_KEY_ALG_BTREE; - extra_key_part_hash+= s_keyinfo->ext_key_parts; - s_keyinfo->key_part= s_keyinfo->key_part+ s_keyinfo->ext_key_parts; - s_keyinfo->user_defined_key_parts= s_keyinfo->usable_key_parts= - s_keyinfo->ext_key_parts= 1; - s_keyinfo->key_length= s_keyinfo->key_part->store_length; - } - } - if (!keyinfo) - { - table->s->key_parts-= extra_key_part_hash; - table->s->key_parts+= hash_parts; - table->s->ext_key_parts-= extra_key_part_hash; - } - } + if (!table->s->long_unique_table) + return; + KEY *keyinfo= table->key_info; + for (uint i= 0; i < table->s->keys; i++, keyinfo++) + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) + setup_keyinfo_hash(keyinfo); } /** @@ -8830,73 +8858,12 @@ void setup_table_hash(TABLE *table) */ void re_setup_table(TABLE *table) { - //extra key parts excluding hash , which needs to be added in keyparts - uint extra_key_parts_ex_hash= 0; - uint extra_hash_parts= 0; // this var for share->extra_hash_parts - KEY *s_keyinfo= table->s->key_info; + if (!table->s->long_unique_table) + return; KEY *keyinfo= table->key_info; - /* - Sometime s_keyinfo can be null so - two different loop for keyinfo and s_keyinfo - ref test case:- main.subselect_sj2 - */ - if (keyinfo) - { - for (uint i= 0; i < table->s->keys; i++, keyinfo++) - { - if (keyinfo->user_defined_key_parts == 1 && - keyinfo->key_part->field->flags & LONG_UNIQUE_HASH_FIELD) - { - keyinfo->flags|= (HA_NOSAME | HA_LONG_UNIQUE_HASH); - keyinfo->algorithm= HA_KEY_ALG_LONG_HASH; - /* Sometimes it can happen, that we does not parsed hash_str. - Like when this function is called in ha_create. So we will - Use field from table->field rather then share->field*/ - Item *h_item= table->field[keyinfo->key_part->fieldnr - 1]-> - vcol_info->expr; - uint hash_parts= fields_in_hash_str(h_item); - keyinfo->key_part= keyinfo->key_part- hash_parts; - keyinfo->user_defined_key_parts= keyinfo->usable_key_parts= - keyinfo->ext_key_parts= hash_parts; - extra_key_parts_ex_hash+= hash_parts; - extra_hash_parts++; - keyinfo->key_length= -1; - } - } - table->s->key_parts-= extra_hash_parts; - table->s->key_parts+= extra_key_parts_ex_hash; - table->s->ext_key_parts+= extra_key_parts_ex_hash + extra_hash_parts; - } - if (s_keyinfo) - { - for (uint i= 0; i < table->s->keys; i++, s_keyinfo++) - { - if (s_keyinfo->user_defined_key_parts == 1 && - s_keyinfo->key_part->field->flags & LONG_UNIQUE_HASH_FIELD) - { - s_keyinfo->flags|= (HA_NOSAME | HA_LONG_UNIQUE_HASH); - s_keyinfo->algorithm= HA_KEY_ALG_LONG_HASH; - extra_hash_parts++; - /* Sometimes it can happen, that we does not parsed hash_str. - Like when this function is called in ha_create. So we will - Use field from table->field rather then share->field*/ - Item *h_item= table->field[s_keyinfo->key_part->fieldnr - 1]-> - vcol_info->expr; - uint hash_parts= fields_in_hash_str(h_item); - s_keyinfo->key_part= s_keyinfo->key_part- hash_parts; - s_keyinfo->user_defined_key_parts= s_keyinfo->usable_key_parts= - s_keyinfo->ext_key_parts= hash_parts; - extra_key_parts_ex_hash+= hash_parts; - s_keyinfo->key_length= -1; - } - } - if (!keyinfo) - { - table->s->key_parts-= extra_hash_parts; - table->s->key_parts+= extra_key_parts_ex_hash; - table->s->ext_key_parts+= extra_key_parts_ex_hash + extra_hash_parts; - } - } + for (uint i= 0; i < table->s->keys; i++, keyinfo++) + if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) + re_setup_keyinfo_hash(keyinfo); } LEX_CSTRING *fk_option_name(enum_fk_option opt) diff --git a/sql/table.h b/sql/table.h index 54dbac4a337..9ef45ebce03 100644 --- a/sql/table.h +++ b/sql/table.h @@ -359,7 +359,11 @@ const LEX_CSTRING ha_hash_str {STRING_WITH_LEN("HASH")}; int find_field_pos_in_hash(Item *hash_item, const char * field_name); -int fields_in_hash_str(Item *hash_item); +int fields_in_hash_expr(Item *hash_item); + +inline void setup_keyinfo_hash(KEY *key_info); + +inline void re_setup_keyinfo_hash(KEY *key_info); Field * field_ptr_in_hash_str(Item *hash_item, int index); @@ -766,6 +770,7 @@ struct TABLE_SHARE bool vcols_need_refixing; bool has_update_default_function; bool can_do_row_logging; /* 1 if table supports RBR */ + bool long_unique_table; ulong table_map_id; /* for row-based replication */ diff --git a/sql/table_cache.cc b/sql/table_cache.cc index 7a555d53558..760e0e98ac2 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -822,13 +822,10 @@ retry: if (res == -1) DBUG_RETURN(0); - else if (res == 1) - continue; element= (TDC_element*) lf_hash_search_using_hash_value(&tdc_hash, thd->tdc_hash_pins, hash_value, (uchar*) key, key_length); lf_hash_search_unpin(thd->tdc_hash_pins); - DBUG_ASSERT(element); if (!(share= alloc_table_share(tl->db.str, tl->table_name.str, key, key_length))) { diff --git a/sql/unireg.cc b/sql/unireg.cc index 5230fb44081..1134d812ff1 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -296,7 +296,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table, uint e_unique_hash_extra_parts= 0; for (i= 0; i < keys; i++) if (key_info[i].algorithm == HA_KEY_ALG_LONG_HASH) - e_unique_hash_extra_parts+= key_info[i].user_defined_key_parts - 1; + e_unique_hash_extra_parts++; key_buff_length= uint4korr(fileinfo+47); frm.length= FRM_HEADER_SIZE; // fileinfo; @@ -592,7 +592,7 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, } } - key_parts-= e_unique_hash_extra_parts; + key_parts+= e_unique_hash_extra_parts; if (key_count > 127 || key_parts > 127) { keybuff[0]= (key_count & 0x7f) | 0x80; |