diff options
author | unknown <acurtis/antony@xiphis.org/ltamd64.xiphis.org> | 2008-01-17 15:37:18 -0800 |
---|---|---|
committer | unknown <acurtis/antony@xiphis.org/ltamd64.xiphis.org> | 2008-01-17 15:37:18 -0800 |
commit | 8f3a0ea2c8e791f0ee84c3e1dd7f09ef7daf0d48 (patch) | |
tree | 5d8c935946bf8e385ce551093aa41d1270a3e1d5 /storage/csv | |
parent | dd40d4ea407e640d47b0f34fdb177b4d6ee4a175 (diff) | |
download | mariadb-git-8f3a0ea2c8e791f0ee84c3e1dd7f09ef7daf0d48.tar.gz |
Bug#33067
"Update of CSV row incorrect for some BLOBs"
when reading in rows, move blob columns into temporary storage not
allocated by Field_blob class or else row update operation will
alter original row and make mysql think that nothing has been changed.
fix incrementing wrong statistic values.
mysql-test/r/csv.result:
test for bug33067
mysql-test/t/csv.test:
test for bug33067
storage/csv/ha_tina.cc:
bug33067
when reading in rows, move blob columns into temporary storage not
allocated by Field_blob class or else row update operation will
alter original row and make mysql think that nothing has been changed.
fix incrementing wrong statistic values.
storage/csv/ha_tina.h:
bug33067
new memroot attribute for blobs
Diffstat (limited to 'storage/csv')
-rw-r--r-- | storage/csv/ha_tina.cc | 47 | ||||
-rw-r--r-- | storage/csv/ha_tina.h | 1 |
2 files changed, 42 insertions, 6 deletions
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index c9fab79a4c5..f8dd778045f 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -56,6 +56,7 @@ TODO: #define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \ + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar) #define TINA_CHECK_HEADER 254 // The number we use to determine corruption +#define BLOB_MEMROOT_ALLOC_SIZE 8192 /* The file extension */ #define CSV_EXT ".CSV" // The data file @@ -597,6 +598,8 @@ int ha_tina::find_current_row(uchar *buf) bool read_all; DBUG_ENTER("ha_tina::find_current_row"); + free_root(&blobroot, MYF(MY_MARK_BLOCKS_FREE)); + /* We do not read further then local_saved_data_file_length in order not to conflict with undergoing concurrent insert. @@ -684,6 +687,22 @@ int ha_tina::find_current_row(uchar *buf) if ((*field)->store(buffer.ptr(), buffer.length(), buffer.charset(), CHECK_FIELD_WARN)) goto err; + if ((*field)->flags & BLOB_FLAG) + { + Field_blob *blob= *(Field_blob**) field; + uchar *src, *tgt; + uint length, packlength; + + packlength= blob->pack_length_no_ptr(); + length= blob->get_length(blob->ptr); + memcpy_fixed(&src, blob->ptr + packlength, sizeof(char*)); + if (src) + { + tgt= (uchar*) alloc_root(&blobroot, length); + bmove(tgt, src, length); + memcpy_fixed(blob->ptr + packlength, &tgt, sizeof(char*)); + } + } } } next_position= end_offset + eoln_len; @@ -914,9 +933,10 @@ int ha_tina::open_update_temp_file_if_needed() int ha_tina::update_row(const uchar * old_data, uchar * new_data) { int size; + int rc= -1; DBUG_ENTER("ha_tina::update_row"); - ha_statistic_increment(&SSV::ha_read_rnd_next_count); + ha_statistic_increment(&SSV::ha_update_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -931,20 +951,23 @@ int ha_tina::update_row(const uchar * old_data, uchar * new_data) The temp_file_length is used to calculate new data file length. */ if (chain_append()) - DBUG_RETURN(-1); + goto err; if (open_update_temp_file_if_needed()) - DBUG_RETURN(-1); + goto err; if (my_write(update_temp_file, (uchar*)buffer.ptr(), size, MYF(MY_WME | MY_NABP))) - DBUG_RETURN(-1); + goto err; temp_file_length+= size; + rc= 0; /* UPDATE should never happen on the log tables */ DBUG_ASSERT(!share->is_log_table); - DBUG_RETURN(0); +err: + DBUG_PRINT("info",("rc = %d", rc)); + DBUG_RETURN(rc); } @@ -1050,6 +1073,8 @@ int ha_tina::rnd_init(bool scan) records_is_known= 0; chain_ptr= chain; + init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0); + DBUG_RETURN(0); } @@ -1115,7 +1140,7 @@ void ha_tina::position(const uchar *record) int ha_tina::rnd_pos(uchar * buf, uchar *pos) { DBUG_ENTER("ha_tina::rnd_pos"); - ha_statistic_increment(&SSV::ha_read_rnd_next_count); + ha_statistic_increment(&SSV::ha_read_rnd_count); current_position= (off_t)my_get_ptr(pos,ref_length); DBUG_RETURN(find_current_row(buf)); } @@ -1179,6 +1204,7 @@ int ha_tina::rnd_end() off_t file_buffer_start= 0; DBUG_ENTER("ha_tina::rnd_end"); + free_root(&blobroot, MYF(0)); records_is_known= 1; if ((chain_ptr - chain) > 0) @@ -1350,6 +1376,8 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) /* set current position to the beginning of the file */ current_position= next_position= 0; + init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0); + /* Read the file row-by-row. If everything is ok, repair is not needed. */ while (!(rc= find_current_row(buf))) { @@ -1358,6 +1386,8 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) current_position= next_position; } + free_root(&blobroot, MYF(0)); + my_free((char*)buf, MYF(0)); if (rc == HA_ERR_END_OF_FILE) @@ -1535,6 +1565,9 @@ int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt) local_saved_data_file_length= share->saved_data_file_length; /* set current position to the beginning of the file */ current_position= next_position= 0; + + init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0); + /* Read the file row-by-row. If everything is ok, repair is not needed. */ while (!(rc= find_current_row(buf))) { @@ -1542,6 +1575,8 @@ int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt) count--; current_position= next_position; } + + free_root(&blobroot, MYF(0)); my_free((char*)buf, MYF(0)); thd_proc_info(thd, old_proc_info); diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h index 5ce09783b9b..5b4381396fc 100644 --- a/storage/csv/ha_tina.h +++ b/storage/csv/ha_tina.h @@ -82,6 +82,7 @@ class ha_tina: public handler uint32 chain_size; uint local_data_file_version; /* Saved version of the data file used */ bool records_is_known; + MEM_ROOT blobroot; private: bool get_write_pos(off_t *end_pos, tina_set *closest_hole); |