summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2019-02-22 18:58:14 +0100
committerSergei Golubchik <serg@mariadb.org>2019-02-27 23:15:28 -0500
commit387b690eab51035112e1488f82209bc22643ff2f (patch)
treeb517bb350b3a49cb902dfc58240886d7b3e8e193 /sql
parent72ee180512553a40f92ce17c2ed855795d166f62 (diff)
downloadmariadb-git-387b690eab51035112e1488f82209bc22643ff2f.tar.gz
cleanup: cosmetic fixes
Diffstat (limited to 'sql')
-rw-r--r--sql/handler.cc30
-rw-r--r--sql/sql_insert.cc6
-rw-r--r--sql/sql_table.cc25
-rw-r--r--sql/table.cc42
4 files changed, 45 insertions, 58 deletions
diff --git a/sql/handler.cc b/sql/handler.cc
index 47cf6aab19f..418d600cb1f 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -4288,8 +4288,7 @@ err:
*/
uint handler::get_dup_key(int error)
{
- DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
- m_lock_type != F_UNLCK);
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || m_lock_type != F_UNLCK);
DBUG_ENTER("handler::get_dup_key");
if (table->s->long_unique_table && table->file->errkey < table->s->keys)
DBUG_RETURN(table->file->errkey);
@@ -6488,18 +6487,19 @@ static int wsrep_after_row(THD *thd)
}
#endif /* WITH_WSREP */
-static int check_duplicate_long_entry_key(TABLE *table, handler *h, uchar *new_rec,
- uint key_no)
+static int check_duplicate_long_entry_key(TABLE *table, handler *h,
+ uchar *new_rec, uint key_no)
{
Field *hash_field;
int result, error= 0;
KEY *key_info= table->key_info + key_no;
hash_field= key_info->key_part->field;
- DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
- key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL)
- || key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
+ DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
+ key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL)
+ || key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
+
if (hash_field->is_real_null())
return 0;
@@ -6507,7 +6507,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, uchar *new_r
if (!table->check_unique_buf)
table->check_unique_buf= (uchar *)alloc_root(&table->mem_root,
- table->s->reclength);
+ table->s->reclength);
result= h->ha_index_init(key_no, 0);
if (result)
@@ -6551,20 +6551,14 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h, uchar *new_r
while (!is_same && !(result= table->file->ha_index_next_same(table->check_unique_buf,
ptr, key_info->key_length)));
if (is_same)
- {
- table->file->errkey= key_no;
error= HA_ERR_FOUND_DUPP_KEY;
- goto exit;
- }
- else
- goto exit;
+ goto exit;
}
if (result == HA_ERR_LOCK_WAIT_TIMEOUT)
- {
- table->file->errkey= key_no;
error= HA_ERR_LOCK_WAIT_TIMEOUT;
- }
- exit:
+exit:
+ if (error)
+ table->file->errkey= key_no;
h->ha_index_end();
return error;
}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index ba01fa57670..90170f5a132 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1744,10 +1744,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
was used. This ensures that we don't get a problem when the
whole range of the key has been used.
*/
- if (info->handle_duplicates == DUP_REPLACE &&
- table->next_number_field &&
- key_nr == table->s->next_number_index &&
- (insert_id_for_cur_row > 0))
+ if (info->handle_duplicates == DUP_REPLACE && table->next_number_field &&
+ key_nr == table->s->next_number_index && insert_id_for_cur_row > 0)
goto err;
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index edd0b95fca0..412aba684d2 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -4161,29 +4161,28 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
unique_key=1;
key_info->key_length=(uint16) key_length;
if (key_length > max_key_length && key->type != Key::FULLTEXT &&
- !is_hash_field_needed)
+ !is_hash_field_needed)
{
- my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length);
+ my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
DBUG_RETURN(TRUE);
}
- if (is_hash_field_needed &&
- key_info->algorithm != HA_KEY_ALG_UNDEF &&
- key_info->algorithm != HA_KEY_ALG_HASH )
+ if (is_hash_field_needed && key_info->algorithm != HA_KEY_ALG_UNDEF &&
+ key_info->algorithm != HA_KEY_ALG_HASH )
{
- my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
- DBUG_RETURN(TRUE);
+ my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
+ DBUG_RETURN(TRUE);
}
if (is_hash_field_needed ||
- (key_info->algorithm == HA_KEY_ALG_HASH &&
- key_info->flags & HA_NOSAME &&
- !(file->ha_table_flags() & HA_CAN_HASH_KEYS ) &&
- file->ha_table_flags() & HA_CAN_VIRTUAL_COLUMNS))
+ (key_info->algorithm == HA_KEY_ALG_HASH &&
+ key_info->flags & HA_NOSAME &&
+ !(file->ha_table_flags() & HA_CAN_HASH_KEYS ) &&
+ file->ha_table_flags() & HA_CAN_VIRTUAL_COLUMNS))
{
Create_field *hash_fld= add_hash_field(thd, &alter_info->create_list,
- key_info);
+ key_info);
if (!hash_fld)
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(TRUE);
hash_fld->offset= record_offset;
hash_fld->charset= create_info->default_table_charset;
record_offset+= hash_fld->pack_length;
diff --git a/sql/table.cc b/sql/table.cc
index 3ebdf19c4ba..de8fe10fe30 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -712,9 +712,9 @@ err_not_open:
static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
uint keys, KEY *keyinfo,
- uint new_frm_ver, uint &ext_key_parts,
+ uint new_frm_ver, uint *ext_key_parts,
TABLE_SHARE *share, uint len,
- KEY *first_keyinfo, char* &keynames)
+ KEY *first_keyinfo, char** keynames)
{
uint i, j, n_length;
KEY_PART_INFO *key_part= NULL;
@@ -770,8 +770,8 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
if (i == 0)
{
- ext_key_parts+= (share->use_ext_keys ? first_keyinfo->user_defined_key_parts*(keys-1) : 0);
- n_length=keys * sizeof(KEY) + ext_key_parts * sizeof(KEY_PART_INFO);
+ (*ext_key_parts)+= (share->use_ext_keys ? first_keyinfo->user_defined_key_parts*(keys-1) : 0);
+ n_length=keys * sizeof(KEY) + *ext_key_parts * sizeof(KEY_PART_INFO);
if (!(keyinfo= (KEY*) alloc_root(&share->mem_root,
n_length + len)))
return 1;
@@ -780,7 +780,7 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
key_part= reinterpret_cast<KEY_PART_INFO*> (keyinfo + keys);
if (!(rec_per_key= (ulong*) alloc_root(&share->mem_root,
- sizeof(ulong) * ext_key_parts)))
+ sizeof(ulong) * *ext_key_parts)))
return 1;
first_key_part= key_part;
first_key_parts= first_keyinfo->user_defined_key_parts;
@@ -825,8 +825,7 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
{
keyinfo->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
- //Storing key hash
- key_part++;
+ key_part++; // reserved for the hash value
}
/*
@@ -865,8 +864,8 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
share->ext_key_parts++;
share->ext_key_parts+= keyinfo->ext_key_parts;
}
- keynames=(char*) key_part;
- strpos+= strnmov(keynames, (char *) strpos, frm_image_end - strpos) - keynames;
+ *keynames=(char*) key_part;
+ strpos+= strnmov(*keynames, (char *) strpos, frm_image_end - strpos) - *keynames;
if (*strpos++) // key names are \0-terminated
return 1;
@@ -1189,17 +1188,18 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
{
key=table->key_info + key_index;
parts= key->user_defined_key_parts;
- if (key->algorithm == HA_KEY_ALG_LONG_HASH &&
- key->key_part[key->user_defined_key_parts].fieldnr == field->field_index+ 1)
- break;
+ if (key->key_part[parts].fieldnr == field->field_index + 1)
+ break;
}
+ if (key->algorithm != HA_KEY_ALG_LONG_HASH)
+ goto end;
KEY_PART_INFO *keypart;
for (uint i=0; i < parts; i++)
{
keypart= key->key_part + i;
if (!keypart->length)
{
- list_item= new(mem_root)Item_field(thd, keypart->field);
+ list_item= new (mem_root) Item_field(thd, keypart->field);
}
else
{
@@ -1581,7 +1581,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
DBUG_ENTER("TABLE_SHARE::init_from_binary_frm_image");
keyinfo= &first_keyinfo;
- share->ext_key_parts= 0;
thd->mem_root= &share->mem_root;
if (write && write_frm_image(frm_image, frm_length))
@@ -1821,8 +1820,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->set_use_ext_keys_flag(plugin_hton(se_plugin)->flags & HTON_SUPPORTS_EXTENDED_KEYS);
if (create_key_infos(disk_buff + 6, frm_image_end, keys, keyinfo,
- new_frm_ver, ext_key_parts,
- share, len, &first_keyinfo, keynames))
+ new_frm_ver, &ext_key_parts,
+ share, len, &first_keyinfo, &keynames))
goto err;
if (next_chunk + 5 < buff_end)
@@ -1914,14 +1913,14 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
else
{
if (create_key_infos(disk_buff + 6, frm_image_end, keys, keyinfo,
- new_frm_ver, ext_key_parts,
- share, len, &first_keyinfo, keynames))
+ new_frm_ver, &ext_key_parts,
+ share, len, &first_keyinfo, &keynames))
goto err;
}
share->key_block_size= uint2korr(frm_image+62);
keyinfo= share->key_info;
for (uint i= 0; i < share->keys; i++, keyinfo++)
- if(keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
hash_fields++;
if (share->db_plugin && !plugin_equals(share->db_plugin, se_plugin))
@@ -2436,10 +2435,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
uint offset= share->reclength - HA_HASH_FIELD_LENGTH * hash_fields;
for (uint i= 0; i < share->keys; i++, keyinfo++)
{
- /*
- We need set value in hash key_part
- */
-
+ /* We need set value in hash key_part */
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
{
share->long_unique_table= 1;