summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSachin <sachin.setiya@mariadb.com>2019-01-03 16:42:12 +0530
committerSachin <sachin.setiya@mariadb.com>2019-01-03 16:42:12 +0530
commitefd87bebae953fbf955de0723b106f623ce6312d (patch)
tree7124275bbdd5e1cc62e8120c4699d241f6b97d8f
parent3e51332d439e364f98e658dd3ff6bc83b67d5eea (diff)
downloadmariadb-git-efd87bebae953fbf955de0723b106f623ce6312d.tar.gz
Random fixes
-rw-r--r--mysql-test/main/long_unique.result5
-rw-r--r--mysql-test/main/long_unique.test5
-rw-r--r--mysql-test/main/type_blob.result2
-rw-r--r--mysql-test/main/type_blob.test4
-rw-r--r--sql/handler.cc76
-rw-r--r--sql/item_func.cc1
-rw-r--r--sql/sql_update.cc2
-rw-r--r--sql/sql_yacc.yy1
-rw-r--r--sql/table.cc20
-rw-r--r--sql/table.h4
10 files changed, 49 insertions, 71 deletions
diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result
index 994f1b4c3f9..47d3ec988ba 100644
--- a/mysql-test/main/long_unique.result
+++ b/mysql-test/main/long_unique.result
@@ -7,6 +7,11 @@ set @allowed_packet= @@max_allowed_packet;
#table with single long blob column;
create table t1(a blob unique );
insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
+#blob with primary key not allowed
+create table t2(a blob,primary key(a(10000)));
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
+create table t3(a varchar(10000) primary key);
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
insert into t1 values(2);
ERROR 23000: Duplicate entry '2' for key 'a'
#table structure;
diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test
index 52e91d1f182..41c18178ae5 100644
--- a/mysql-test/main/long_unique.test
+++ b/mysql-test/main/long_unique.test
@@ -9,6 +9,11 @@ set @allowed_packet= @@max_allowed_packet;
--echo #table with single long blob column;
create table t1(a blob unique );
insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
+--echo #blob with primary key not allowed
+--error ER_TOO_LONG_KEY
+create table t2(a blob,primary key(a(10000)));
+--error ER_TOO_LONG_KEY
+create table t3(a varchar(10000) primary key);
--error ER_DUP_ENTRY
insert into t1 values(2);
diff --git a/mysql-test/main/type_blob.result b/mysql-test/main/type_blob.result
index 3c99366168c..5f9fe0131ab 100644
--- a/mysql-test/main/type_blob.result
+++ b/mysql-test/main/type_blob.result
@@ -370,7 +370,7 @@ a 1
hello 1
drop table t1;
create table t1 (a text, unique (a(2100)));
-ERROR 42000: Specified key was too long; max key length is 1000 bytes
+drop table t1;
create table t1 (a text, key (a(2100)));
Warnings:
Note 1071 Specified key was too long; max key length is 1000 bytes
diff --git a/mysql-test/main/type_blob.test b/mysql-test/main/type_blob.test
index 2c74d4ea241..e60de526300 100644
--- a/mysql-test/main/type_blob.test
+++ b/mysql-test/main/type_blob.test
@@ -131,8 +131,10 @@ select c,count(*) from t1 group by c;
select d,count(*) from t1 group by d;
drop table t1;
--- error 1071
+#-- error 1071
+#After index on long unique this will work
create table t1 (a text, unique (a(2100))); # should give an error
+drop table t1;
create table t1 (a text, key (a(2100))); # key is auto-truncated
show create table t1;
drop table t1;
diff --git a/sql/handler.cc b/sql/handler.cc
index 1d10f2eeffb..22e9a38f268 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -2710,7 +2710,6 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
test_if_locked));
table= table_arg;
- //setup_table_hash(table);
DBUG_ASSERT(table->s == table_share);
DBUG_ASSERT(m_lock_type == F_UNLCK);
DBUG_PRINT("info", ("old m_lock_type: %d F_UNLCK %d", m_lock_type, F_UNLCK));
@@ -2727,7 +2726,6 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
error=open(name,O_RDONLY,test_if_locked);
}
}
- //re_setup_table(table);
if (unlikely(error))
{
my_errno= error; /* Safeguard */
@@ -4641,9 +4639,7 @@ handler::ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info_arg)
{
DBUG_ASSERT(m_lock_type == F_UNLCK);
mark_trx_read_write();
- //setup_table_hash(form);
int error= create(name, form, info_arg);
- //re_setup_table(form);
if (!error &&
!(info_arg->options & (HA_LEX_CREATE_TMP_TABLE | HA_CREATE_TMP_ALTER)))
mysql_audit_create_table(form);
@@ -6302,13 +6298,13 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, uchar *new_r
if (!result)
{
bool is_same;
- uint arg_count= fields_in_hash_keyinfo(key_info);
+ Field * t_field;
Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr;
Item ** arguments= temp->arguments();
+ uint arg_count= temp->argument_count();
do
{
long diff= table->check_unique_buf - new_rec;
- Field * t_field;
is_same= true;
for (uint j=0; j < arg_count; j++)
{
@@ -6325,15 +6321,8 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, uchar *new_r
{
Item_func_left *fnc= static_cast<Item_func_left *>(arguments[j]);
DBUG_ASSERT(!my_strcasecmp(system_charset_info, "left", fnc->func_name()));
- //item_data= fnc->val_str(&tmp1);
DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM);
t_field= static_cast<Item_field *>(fnc->arguments()[0])->field;
- // field_data= t_field->val_str(&tmp2);
- // if (my_strnncoll(t_field->charset(),(const uchar *)item_data->ptr(),
- // item_data->length(),
- // (const uchar *)field_data.ptr(),
- // item_data->length()))
- // return 0;
uint length= fnc->arguments()[1]->val_int();
if (t_field->cmp_max(t_field->ptr, t_field->ptr + diff, length))
is_same= false;
@@ -6396,56 +6385,44 @@ static int check_duplicate_long_entries(TABLE *table, handler *h, uchar *new_rec
static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *new_rec)
{
Field **f, *field;
- Item *h_item;
+ int key_parts;
int error= 0;
+ KEY *keyinfo;
+ KEY_PART_INFO *keypart;
bool is_update_handler_null= false;
/*
Here we are comparing whether new record and old record are same
with respect to fields in hash_str
*/
long reclength= table->record[1]-table->record[0];
+ if (!table->update_handler)
+ clone_handler_for_update(current_thd, table);
for (uint i= 0; i < table->s->keys; i++)
{
- if (table->key_info[i].user_defined_key_parts == 1 &&
- table->key_info[i].key_part->field->flags & LONG_UNIQUE_HASH_FIELD)
+ keyinfo= table->key_info + i;
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
{
- /*
- Currently mysql_update is pacthed so that it will automatically set the
- Update handler and then free it but ha_update_row is used in many function (
- like in case of reinsert) Instead of patching them all here we check is
- update_handler is null then set it And then set it null again
- */
- if (!table->update_handler)
- {
- create_update_handler(current_thd, table);
- is_update_handler_null= true;
- }
- h_item= table->key_info[i].key_part->field->vcol_info->expr;
- for (f= table->field; f && (field= *f); f++)
+ key_parts= fields_in_hash_keyinfo(keyinfo);
+ keypart= keyinfo->key_part - key_parts;
+ for (uint j= 0; j < key_parts; j++, keypart++)
{
- if ( find_field_pos_in_hash(h_item, field->field_name.str) != -1)
+ field= keypart->field;
+ /* Compare fields if they are different then check for duplicates*/
+ if(field->cmp_binary_offset(reclength))
{
- /* Compare fields if they are different then check for duplicates*/
- if(field->cmp_binary_offset(reclength))
- {
- if((error= check_duplicate_long_entry_key(table, table->update_handler,
- new_rec, i)))
- goto exit;
- /*
- break beacuse check_duplicate_long_entrie_key will
- take care of remaning fields
- */
- break;
- }
+ if((error= check_duplicate_long_entry_key(table, table->update_handler,
+ new_rec, i)))
+ goto exit;
+ /*
+ break beacuse check_duplicate_long_entrie_key will
+ take care of remaning fields
+ */
+ break;
}
}
}
}
exit:
- if (is_update_handler_null)
- {
- delete_update_handler(current_thd, table);
- }
return error;
}
@@ -6469,7 +6446,6 @@ int handler::ha_write_row(uchar *buf)
{ error= write_row(buf); })
MYSQL_INSERT_ROW_DONE(error);
- //re_setup_table(table);
if (likely(!error) && !row_already_logged)
{
rows_changed++;
@@ -6494,13 +6470,12 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
DBUG_ASSERT(new_data == table->record[0]);
DBUG_ASSERT(old_data == table->record[1]);
- // setup_table_hash(table);
MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write();
increment_statistics(&SSV::ha_update_count);
- if ((error= check_duplicate_long_entries_update(table, table->file, (uchar *)new_data)))
+ if (table->s->long_unique_table &&
+ (error= check_duplicate_long_entries_update(table, table->file, (uchar *)new_data)))
{
- //re_setup_table(table);
return error;
}
@@ -6513,7 +6488,6 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
rows_changed++;
error= binlog_log_row(table, old_data, new_data, log_func);
}
- //re_setup_table(table);
return error;
}
diff --git a/sql/item_func.cc b/sql/item_func.cc
index e336d6c51e1..1188ee63d85 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -1727,7 +1727,6 @@ inline void calc_hash_for_unique(ulong &nr1, ulong &nr2, String *str)
cs->coll->hash_sort(cs, l, sizeof(l), &nr1, &nr2);
cs= str->charset();
cs->coll->hash_sort(cs, (uchar *)str->ptr(), str->length(), &nr1, &nr2);
- sql_print_information("setiya %lu, %s", nr1, str->ptr());
}
longlong Item_func_hash::val_int()
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index b5395c1afa9..2bce61903ca 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -861,7 +861,6 @@ update_begin:
*/
can_compare_record= records_are_comparable(table);
explain->tracker.on_scan_init();
- create_update_handler(thd, table);
THD_STAGE_INFO(thd, stage_updating);
while (!(error=info.read_record()) && !thd->killed)
@@ -2094,7 +2093,6 @@ multi_update::initialize_tables(JOIN *join)
if (ignore)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- create_update_handler(join->thd, table);
if (table == main_table) // First table in join
{
if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 614ea582224..2177ec22351 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -7722,6 +7722,7 @@ fulltext_key_opt:
btree_or_rtree:
BTREE_SYM { $$= HA_KEY_ALG_BTREE; }
| RTREE_SYM { $$= HA_KEY_ALG_RTREE; }
+ | HASH_SYM { $$= HA_KEY_ALG_HASH; }
;
key_list:
diff --git a/sql/table.cc b/sql/table.cc
index bae9b4bb1f4..e2167d5a8c4 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1224,6 +1224,7 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
}
if (field->has_default_now_unireg_check())
{
+ expr_str.length(parse_vcol_keyword.length);
expr_str.append(STRING_WITH_LEN("current_timestamp("));
expr_str.append_ulonglong(field->decimals());
expr_str.append(')');
@@ -8799,20 +8800,15 @@ inline void re_setup_keyinfo_hash(KEY *key_info)
@param table Table Object
@return handler object
*/
-void create_update_handler(THD *thd, TABLE *table)
+void clone_handler_for_update(THD *thd, TABLE *table)
{
handler *update_handler= NULL;
- for (uint i= 0; i < table->s->keys; i++)
- {
- if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH)
- {
- update_handler= table->file->clone(table->s->normalized_path.str,
- thd->mem_root);
- update_handler->ha_external_lock(thd, F_RDLCK);
- table->update_handler= update_handler;
- return;
- }
- }
+ if (!table->s->long_unique_table)
+ return;
+ update_handler= table->file->clone(table->s->normalized_path.str,
+ thd->mem_root);
+ update_handler->ha_external_lock(thd, F_RDLCK);
+ table->update_handler= update_handler;
return;
}
diff --git a/sql/table.h b/sql/table.h
index f4ef7aeb8c2..7de6e113613 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -362,9 +362,7 @@ inline void re_setup_keyinfo_hash(KEY *key_info);
Field * field_ptr_in_hash_str(Item *hash_item, int index);
-inline void calc_hash_for_unique(ulong &nr1, ulong &nr2, String *str);
-
-void create_update_handler(THD *thd, TABLE *table);
+void clone_handler_for_update(THD *thd, TABLE *table);
void delete_update_handler(THD *thd, TABLE *table);