summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Widenius <monty@askmonty.org>2010-11-27 17:29:52 +0200
committerMichael Widenius <monty@askmonty.org>2010-11-27 17:29:52 +0200
commit7c56b08216d5ff709d10e4ca662d0215cd823c09 (patch)
treec5462600865b2abc8ffcf0751dcf0167c3c8a46a
parentab2f3651161e8f9bbc7256a806aa4082f727d8f9 (diff)
downloadmariadb-git-7c56b08216d5ff709d10e4ca662d0215cd823c09.tar.gz
Added TRASH() to table->record[0] to find out if we access not initialzed data.
- Changed Cached_item_field not copy data for fields with NULL value - In key_copy() and key_restore() don't copy data for fields with NULL value Fixed code to avoid valgrind warnings - Use c_ptr_safe instead of c_ptr() Removed "QQ" from comments (QQ was ment to be used for internal comments that should be removed before pushing) Fixed wrong alias used (from previous patch) sql/event_db_repository.cc: Update testing if event table is valid (to avoid valgrind errors) sql/ha_partition.cc: m_ordered_scan_ongoing was not initialized Reset null bits in record to avoid valgrind errors sql/handler.h: Added flag if storage engine will write row verbatim and the row contains varchar or null fields (in which case we must clear the row to avoid valgrind warnings) sql/item_buff.cc: Changed Cached_item_field not copy data for fields with NULL value (Optimization and avoids valgrind warnings) sql/item_func.cc: c_ptr() -> c_ptr_safe() sql/key.cc: In key_copy() and key_restore() don't copy data for fields with NULL value sql/opt_range.cc: c_ptr() -> c_ptr_safe() sql/sql_base.cc: Added TRASH() to table->record[0] to find out if we access not initialzed data. Initialize null_bytes to: - Get consistent tests - Ensure we don't get valgrind warnings for null fields (as we may only update a couple of bits in a byte) sql/sql_class.cc: Removed "QQ" from comments sql/sql_insert.cc: Initialize row to default values if we are using valgrind and row will be copied verbatim to disk in storage engine. sql/sql_load.cc: QQ -> TODO sql/sql_parse.cc: Removed old not used code marked QQ and withing "#ifdef REMOVED" sql/sql_select.cc: QQ -> TODO Initialize some variables that was used uninitialized Added DBUG_ASSERT() to find out if thd was not properly initialized for sub queries sql/sql_test.cc: Fixed format for printing to DBUG file Fixed wrong alias used (from previous patch) sql/sql_trigger.h: QQ -> TODO sql/table.cc: QQ -> TODO storage/maria/ha_maria.cc: Mark table with HA_RECORD_MUST_BE_CLEAN_ON_WRITE, if row is written verbatim to disk and contains varchar or null fields. storage/maria/ma_open.c: Added flags if table has varchar or null fields storage/maria/maria_def.h: Added flags if table has varchar or null fields storage/myisam/ha_myisam.cc: Mark table with HA_RECORD_MUST_BE_CLEAN_ON_WRITE, if row is written verbatim to disk and contains varchar or null fields. storage/myisam/mi_open.c: Fixed memory overrun bug when using fulltext keys storage/xtradb/row/row0sel.c: Removed initialization of null bits. (not needed anymore)
-rw-r--r--sql/event_db_repository.cc6
-rw-r--r--sql/ha_partition.cc7
-rw-r--r--sql/handler.h1
-rw-r--r--sql/item_buff.cc14
-rw-r--r--sql/item_func.cc2
-rw-r--r--sql/key.cc33
-rw-r--r--sql/opt_range.cc2
-rw-r--r--sql/sql_base.cc12
-rw-r--r--sql/sql_class.cc1
-rw-r--r--sql/sql_insert.cc10
-rw-r--r--sql/sql_load.cc2
-rw-r--r--sql/sql_parse.cc48
-rw-r--r--sql/sql_select.cc8
-rw-r--r--sql/sql_test.cc12
-rw-r--r--sql/sql_trigger.h2
-rw-r--r--sql/table.cc2
-rw-r--r--storage/maria/ha_maria.cc10
-rw-r--r--storage/maria/ma_open.c4
-rw-r--r--storage/maria/maria_def.h2
-rw-r--r--storage/myisam/ha_myisam.cc10
-rw-r--r--storage/myisam/mi_open.c9
-rw-r--r--storage/xtradb/row/row0sel.c6
22 files changed, 112 insertions, 91 deletions
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index c1a64446c12..13a2f8cf7c2 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -896,7 +896,11 @@ Event_db_repository::find_named_event(LEX_STRING db, LEX_STRING name,
same fields.
*/
if (db.length > table->field[ET_FIELD_DB]->field_length ||
- name.length > table->field[ET_FIELD_NAME]->field_length)
+ name.length > table->field[ET_FIELD_NAME]->field_length ||
+ table->s->keys == 0 ||
+ table->key_info[0].key_parts != 2 ||
+ table->key_info[0].key_part[0].fieldnr != ET_FIELD_DB+1 ||
+ table->key_info[0].key_part[1].fieldnr != ET_FIELD_NAME+1)
DBUG_RETURN(TRUE);
table->field[ET_FIELD_DB]->store(db.str, db.length, &my_charset_bin);
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 78dd353c09d..fd0d65a4e73 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -3886,6 +3886,7 @@ int ha_partition::index_init(uint inx, bool sorted)
m_part_spec.start_part= NO_CURRENT_PART_ID;
m_start_key.length= 0;
m_ordered= sorted;
+ m_ordered_scan_ongoing= FALSE;
m_curr_key_info[0]= table->key_info+inx;
if (m_pkey_is_clustered && table->s->primary_key != MAX_KEY)
{
@@ -4709,6 +4710,12 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
int error;
handler *file= m_file[i];
+ /*
+ Reset null bits (to avoid valgrind warnings) and to give a default
+ value for not read null fields.
+ */
+ bfill(rec_buf_ptr, table->s->null_bytes, 255);
+
switch (m_index_scan_type) {
case partition_index_read:
error= file->ha_index_read_map(rec_buf_ptr,
diff --git a/sql/handler.h b/sql/handler.h
index f76580b7082..00d6347eb78 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -139,6 +139,7 @@
#define HA_HAS_NEW_CHECKSUM (LL(1) << 36)
#define HA_MRR_CANT_SORT (LL(1) << 37)
+#define HA_RECORD_MUST_BE_CLEAN_ON_WRITE (LL(1) << 38)
/*
Set of all binlog flags. Currently only contain the capabilities
diff --git a/sql/item_buff.cc b/sql/item_buff.cc
index 7d025ef518b..b15083f54a2 100644
--- a/sql/item_buff.cc
+++ b/sql/item_buff.cc
@@ -121,14 +121,20 @@ bool Cached_item_int::cmp(void)
bool Cached_item_field::cmp(void)
{
- bool tmp= field->cmp(buff) != 0; // This is not a blob!
- if (tmp)
- field->get_image(buff,length,field->charset());
+ bool tmp= FALSE; // Value is identical
+ /* Note that field can't be a blob here ! */
if (null_value != field->is_null())
{
null_value= !null_value;
- tmp=TRUE;
+ tmp= TRUE; // Value has changed
}
+
+ /*
+ If value is not null and value changed (from null to not null or
+ becasue of value change), then copy the new value to buffer.
+ */
+ if (! null_value && (tmp || (tmp= (field->cmp(buff) != 0))))
+ field->get_image(buff,length,field->charset());
return tmp;
}
diff --git a/sql/item_func.cc b/sql/item_func.cc
index a2c8b9f3983..872cdf0185b 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -2961,7 +2961,7 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func,
String *res= arguments[i]->val_str(&buffers[i]);
if (arguments[i]->null_value)
continue;
- f_args.args[i]= (char*) res->c_ptr();
+ f_args.args[i]= (char*) res->c_ptr_safe();
f_args.lengths[i]= res->length();
break;
}
diff --git a/sql/key.cc b/sql/key.cc
index 89423e5280e..a1793e9e5f4 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -113,13 +113,24 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
if (key_length == 0)
key_length= key_info->key_length;
- for (key_part= key_info->key_part; (int) key_length > 0; key_part++)
+ for (key_part= key_info->key_part;
+ (int) key_length > 0;
+ key_part++, to_key+= length, key_length-= length)
{
if (key_part->null_bit)
{
*to_key++= test(from_record[key_part->null_offset] &
key_part->null_bit);
key_length--;
+ if (to_key[-1])
+ {
+ /*
+ Don't copy data for null values
+ The -1 below is to subtract the null byte which is already handled
+ */
+ length= min(key_length, (uint) key_part->store_length-1);
+ continue;
+ }
}
if (key_part->key_part_flag & HA_BLOB_PART ||
key_part->key_part_flag & HA_VAR_LENGTH_PART)
@@ -138,8 +149,6 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
if (bytes < length)
cs->cset->fill(cs, (char*) to_key + bytes, length - bytes, ' ');
}
- to_key+= length;
- key_length-= length;
}
}
@@ -166,16 +175,28 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
{
key_length= key_info->key_length;
}
- for (key_part= key_info->key_part ; (int) key_length > 0 ; key_part++)
+ for (key_part= key_info->key_part ;
+ (int) key_length > 0 ;
+ key_part++, from_key+= length, key_length-= length)
{
uchar used_uneven_bits= 0;
if (key_part->null_bit)
{
- if (*from_key++)
+ bool null_value;
+ if ((null_value= *from_key++))
to_record[key_part->null_offset]|= key_part->null_bit;
else
to_record[key_part->null_offset]&= ~key_part->null_bit;
key_length--;
+ if (null_value)
+ {
+ /*
+ Don't copy data for null bytes
+ The -1 below is to subtract the null byte which is already handled
+ */
+ length= min(key_length, (uint) key_part->store_length-1);
+ continue;
+ }
}
if (key_part->type == HA_KEYTYPE_BIT)
{
@@ -229,8 +250,6 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
memcpy(to_record + key_part->offset, from_key + used_uneven_bits
, (size_t) length - used_uneven_bits);
}
- from_key+= length;
- key_length-= length;
}
}
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 16dec381c8a..3818040d781 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -11131,7 +11131,7 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
tmp.append(STRING_WITH_LEN("(empty)"));
DBUG_PRINT("info", ("SEL_TREE: 0x%lx (%s) scans: %s", (long) tree, msg,
- tmp.c_ptr()));
+ tmp.c_ptr_safe()));
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index a27d726f370..2ae6147a033 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -3026,13 +3026,19 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
table->pos_in_table_list= table_list;
table_list->updatable= 1; // It is not derived table nor non-updatable VIEW
table->clear_column_bitmaps();
-#if !defined(DBUG_OFF) && !defined(HAVE_valgrind)
/*
Fill record with random values to find bugs where we access fields
without first reading them.
*/
- bfill(table->record[0], table->s->reclength, 254);
-#endif
+ TRASH(table->record[0], table->s->reclength);
+ /*
+ Initialize the null marker bits, to ensure that if we are doing a read
+ of only selected columns (like in keyread), all null markers are
+ initialized.
+ */
+ bfill(table->record[0], table->s->null_bytes, 255);
+ bfill(table->record[1], table->s->null_bytes, 255);
+
DBUG_ASSERT(table->key_read == 0);
DBUG_RETURN(table);
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 688416a61a2..62651145481 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -2363,7 +2363,6 @@ bool select_export::send_data(List<Item> &items)
{ // Fill with space
if (item->max_length > used_length)
{
- /* QQ: Fix by adding a my_b_fill() function */
if (!space_inited)
{
space_inited=1;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 9477d2ca971..7267d73a29c 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -622,8 +622,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
/*
We can't write-delayed into a table locked with LOCK TABLES:
this will lead to a deadlock, since the delayed thread will
- never be able to get a lock on the table. QQQ: why not
- upgrade the lock here instead?
+ never be able to get a lock on the table.
*/
if (table_list->lock_type == TL_WRITE_DELAYED && thd->locked_tables &&
find_locked_table(thd, table_list->db, table_list->table_name))
@@ -808,7 +807,12 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
be overwritten by fill_record() anyway (and fill_record() does not
use default values in this case).
*/
- table->record[0][0]= share->default_values[0];
+#ifdef HAVE_valgrind
+ if (table->file->ha_table_flags() && HA_RECORD_MUST_BE_CLEAN_ON_WRITE)
+ restore_record(table,s->default_values); // Get empty record
+ else
+#endif
+ table->record[0][0]= share->default_values[0];
/* Fix undefined null_bits. */
if (share->null_bytes > 1 && share->last_null_bit_pos)
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 75d9179c3bd..48574811f54 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -975,7 +975,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (!field->maybe_null() && field->type() == FIELD_TYPE_TIMESTAMP)
((Field_timestamp*) field)->set_time();
/*
- QQ: We probably should not throw warning for each field.
+ TODO: We probably should not throw warning for each field.
But how about intention to always have the same number
of warnings in THD::cuted_fields (and get rid of cuted_fields
in the end ?)
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index bd14daa09b1..3887321f006 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1350,54 +1350,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
error=TRUE; // End server
break;
-#ifdef REMOVED
- case COM_CREATE_DB: // QQ: To be removed
- {
- LEX_STRING db, alias;
- HA_CREATE_INFO create_info;
-
- status_var_increment(thd->status_var.com_stat[SQLCOM_CREATE_DB]);
- if (thd->make_lex_string(&db, packet, packet_length, FALSE) ||
- thd->make_lex_string(&alias, db.str, db.length, FALSE) ||
- check_db_name(&db))
- {
- my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL");
- break;
- }
- if (check_access(thd, CREATE_ACL, db.str , 0, 1, 0,
- is_schema_db(db.str, db.length)))
- break;
- general_log_print(thd, command, "%.*s", db.length, db.str);
- bzero(&create_info, sizeof(create_info));
- mysql_create_db(thd, (lower_case_table_names == 2 ? alias.str : db.str),
- &create_info, 0);
- break;
- }
- case COM_DROP_DB: // QQ: To be removed
- {
- status_var_increment(thd->status_var.com_stat[SQLCOM_DROP_DB]);
- LEX_STRING db;
-
- if (thd->make_lex_string(&db, packet, packet_length, FALSE) ||
- check_db_name(&db))
- {
- my_error(ER_WRONG_DB_NAME, MYF(0), db.str ? db.str : "NULL");
- break;
- }
- if (check_access(thd, DROP_ACL, db.str, 0, 1, 0,
- is_schema_db(db.str, db.length)))
- break;
- if (thd->locked_tables || thd->active_transaction())
- {
- my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
- ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
- break;
- }
- general_log_write(thd, command, "%.*s", db.length, db.str);
- mysql_rm_db(thd, db.str, 0, 0);
- break;
- }
-#endif
#ifndef EMBEDDED_LIBRARY
case COM_BINLOG_DUMP:
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index ec5cc0ab705..310774a49e8 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -2953,7 +2953,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
{
start_keyuse=keyuse;
key=keyuse->key;
- s->keys.set_bit(key); // QQ: remove this ?
+ s->keys.set_bit(key); // TODO: remove this ?
refs=0;
const_ref.clear_all();
@@ -3900,6 +3900,7 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field)
keyuse.keypart_map= (key_part_map) 1 << part;
keyuse.used_tables=key_field->val->used_tables();
keyuse.optimize= key_field->optimize & KEY_OPTIMIZE_REF_OR_NULL;
+ keyuse.ref_table_rows= 0;
keyuse.null_rejecting= key_field->null_rejecting;
keyuse.cond_guard= key_field->cond_guard;
keyuse.sj_pred_no= key_field->sj_pred_no;
@@ -6343,8 +6344,8 @@ JOIN::make_simple_join(JOIN *parent, TABLE *temp_table)
join_tab->table=temp_table;
join_tab->cache_select= 0;
join_tab->select=0;
+ join_tab->select_cond= 0; // Avoid valgrind warning
join_tab->set_select_cond(NULL, __LINE__);
- join_tab->select_cond=0;
join_tab->quick=0;
join_tab->type= JT_ALL; /* Map through all records */
join_tab->keys.init();
@@ -19193,7 +19194,8 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
{
- /* QQ: thd may not be set for sub queries, but this should be fixed */
+ /* TODO: thd may not be set for sub queries, but this should be fixed */
+ DBUG_ASSERT(thd);
if (!thd)
thd= current_thd;
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index d2063b48a50..5bf9cd1f77e 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -241,11 +241,11 @@ void print_keyuse(KEYUSE *keyuse)
fieldname= keyuse->table->key_info[keyuse->key].key_part[keyuse->keypart].field->field_name;
longlong2str(keyuse->used_tables, buf2, 16, 0);
DBUG_LOCK_FILE;
- fprintf(DBUG_FILE, "KEYUSE: %s.%s=%s optimize= %d used_tables=%s "
- "ref_table_rows= %lu keypart_map= %0lx\n",
- keyuse->table->alias, fieldname, str.ptr(),
- keyuse->optimize, buf2, (ulong)keyuse->ref_table_rows,
- keyuse->keypart_map);
+ fprintf(DBUG_FILE, "KEYUSE: %s.%s=%s optimize: %u used_tables: %s "
+ "ref_table_rows: %lu keypart_map: %0lx\n",
+ keyuse->table->alias.c_ptr(), fieldname, str.ptr(),
+ (uint) keyuse->optimize, buf2, (ulong) keyuse->ref_table_rows,
+ (ulong) keyuse->keypart_map);
DBUG_UNLOCK_FILE;
//key_part_map keypart_map; --?? there can be several?
}
@@ -371,7 +371,7 @@ void print_sjm(SJ_MATERIALIZATION_INFO *sjm)
for (uint i= 0;i < sjm->tables; i++)
{
fprintf(DBUG_FILE, " %s%s\n",
- sjm->positions[i].table->table->alias,
+ sjm->positions[i].table->table->alias.c_ptr(),
(i == sjm->tables -1)? "": ",");
}
fprintf(DBUG_FILE, " }\n");
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index f6754a75284..08125f94b43 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -17,7 +17,7 @@
/**
This class holds all information about triggers of table.
- QQ: Will it be merged into TABLE in the future ?
+ TODO: Will it be merged into TABLE in the future ?
*/
class Table_triggers_list: public Sql_alloc
diff --git a/sql/table.cc b/sql/table.cc
index 556cbcc96c1..17cd9c8dc34 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -2925,7 +2925,7 @@ File create_frm(THD *thd, const char *name, const char *db,
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
create_flags|= O_EXCL | O_NOFOLLOW;
- /* Fix this when we have new .frm files; Current limit is 4G rows (QQ) */
+ /* Fix this when we have new .frm files; Current limit is 4G rows (TODO) */
if (create_info->max_rows > UINT_MAX32)
create_info->max_rows= UINT_MAX32;
if (create_info->min_rows > UINT_MAX32)
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index 6463b25e440..b95a6766878 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -1036,6 +1036,16 @@ int ha_maria::open(const char *name, int mode, uint test_if_locked)
if (file->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD))
int_table_flags |= HA_HAS_NEW_CHECKSUM;
+ /*
+ For static size rows, tell MariaDB that we will access all bytes
+ in the record when writing it. This signals MariaDB to initalize
+ the full row to ensure we don't get any errors from valgrind and
+ that all bytes in the row is properly reset.
+ */
+ if (file->s->data_file_type == STATIC_RECORD &&
+ (file->s->has_varchar_fields | file->s->has_null_fields))
+ int_table_flags|= HA_RECORD_MUST_BE_CLEAN_ON_WRITE;
+
for (i= 0; i < table->s->keys; i++)
{
plugin_ref parser= table->key_info[i].parser;
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index 63e1801a39a..ae3a3918eee 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -763,6 +763,10 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
share->blobs[j].offset= share->columndef[i].offset;
j++;
}
+ if (share->columndef[i].type == FIELD_VARCHAR)
+ share->has_varchar_fields= 1;
+ if (share->columndef[i].null_bit)
+ share->has_null_fields= 1;
}
share->columndef[i].type= FIELD_LAST; /* End marker */
disk_pos= _ma_column_nr_read(disk_pos, share->column_nr,
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index ba97684b1aa..c39aeaabaa6 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -376,6 +376,8 @@ typedef struct st_maria_share
my_bool temporary;
/* Below flag is needed to make log tables work with concurrent insert */
my_bool is_log_table;
+ my_bool has_null_fields;
+ my_bool has_varchar_fields; /* If table has varchar fields */
my_bool changed, /* If changed since lock */
global_changed, /* If changed since open */
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 1f58d72469d..d012ac053d8 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -745,6 +745,16 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked)
int_table_flags|= HA_HAS_OLD_CHECKSUM;
}
+ /*
+ For static size rows, tell MariaDB that we will access all bytes
+ in the record when writing it. This signals MariaDB to initalize
+ the full row to ensure we don't get any errors from valgrind and
+ that all bytes in the row is properly reset.
+ */
+ if ((file->s->options & HA_OPTION_PACK_RECORD) &&
+ (file->s->has_varchar_fields | file->s->has_null_fields))
+ int_table_flags|= HA_RECORD_MUST_BE_CLEAN_ON_WRITE;
+
for (i= 0; i < table->s->keys; i++)
{
plugin_ref parser= table->key_info[i].parser;
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index c40f894b14d..e35dd690482 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -79,7 +79,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
{
int lock_error,kfile,open_mode,save_errno,have_rtree=0, realpath_err;
uint i,j,len,errpos,head_length,base_pos,offset,info_length,keys,
- key_parts,unique_key_parts,fulltext_keys,uniques;
+ key_parts,unique_key_parts,base_key_parts,fulltext_keys,uniques;
char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN],
data_name[FN_REFLEN];
uchar *disk_cache, *disk_pos, *end_pos;
@@ -200,7 +200,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
keys= (uint) share->state.header.keys;
uniques= (uint) share->state.header.uniques;
fulltext_keys= (uint) share->state.header.fulltext_keys;
- key_parts= mi_uint2korr(share->state.header.key_parts);
+ base_key_parts= key_parts= mi_uint2korr(share->state.header.key_parts);
unique_key_parts= mi_uint2korr(share->state.header.unique_key_parts);
if (len != MI_STATE_INFO_SIZE)
{
@@ -298,7 +298,8 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
if (!my_multi_malloc(MY_WME,
&share,sizeof(*share),
- &share->state.rec_per_key_part,sizeof(long)*key_parts,
+ &share->state.rec_per_key_part,
+ sizeof(long)*base_key_parts,
&share->keyinfo,keys*sizeof(MI_KEYDEF),
&share->uniqueinfo,uniques*sizeof(MI_UNIQUEDEF),
&share->keyparts,
@@ -322,7 +323,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
errpos=4;
*share=share_buff;
memcpy((char*) share->state.rec_per_key_part,
- (char*) rec_per_key_part, sizeof(long)*key_parts);
+ (char*) rec_per_key_part, sizeof(long)*base_key_parts);
memcpy((char*) share->state.key_root,
(char*) key_root, sizeof(my_off_t)*keys);
memcpy((char*) share->state.key_del,
diff --git a/storage/xtradb/row/row0sel.c b/storage/xtradb/row/row0sel.c
index 3c73f879b47..42580150ed8 100644
--- a/storage/xtradb/row/row0sel.c
+++ b/storage/xtradb/row/row0sel.c
@@ -3435,12 +3435,6 @@ row_search_for_mysql(
ut_error;
}
- /* init null bytes with default values as they might be
- left uninitialized in some cases and these uninited bytes
- might be copied into mysql record buffer that leads to
- valgrind warnings */
- memcpy(buf, prebuilt->default_rec, prebuilt->null_bitmap_len);
-
#if 0
/* August 19, 2005 by Heikki: temporarily disable this error
print until the cursor lock count is done correctly.