summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThirunarayanan Balathandayuthapani <thiru@mariadb.com>2021-12-27 16:17:29 +0530
committerThirunarayanan Balathandayuthapani <thiru@mariadb.com>2022-01-12 17:37:06 +0530
commit03ed2e9d97e61104f4c1c863afd1cc5e6909a8ff (patch)
tree329d1dd1b2652c020b657f550f3e00244ec993c1
parenta23f3ee84e966d967177ba0328827a2b761cc719 (diff)
downloadmariadb-git-03ed2e9d97e61104f4c1c863afd1cc5e6909a8ff.tar.gz
MDEV-27318 Assertion `data_size < srv_sort_buf_size' failed in row_merge_bulk_buf_add
InnoDB fails to add the tuple size which is greater than innodb_sort_buffer_size. InnoDB should write the field which are greater than 2000 bytes into the temporary file and place the offset, length and make it as a new tuple. InnoDB should buffer the newly created tuple without any problem during bulk index
-rw-r--r--mysql-test/suite/innodb/r/insert_into_empty.result9
-rw-r--r--mysql-test/suite/innodb/t/insert_into_empty.test8
-rw-r--r--storage/innobase/row/row0merge.cc123
3 files changed, 110 insertions, 30 deletions
diff --git a/mysql-test/suite/innodb/r/insert_into_empty.result b/mysql-test/suite/innodb/r/insert_into_empty.result
index 31bf91595e7..3f4d56e8245 100644
--- a/mysql-test/suite/innodb/r/insert_into_empty.result
+++ b/mysql-test/suite/innodb/r/insert_into_empty.result
@@ -231,3 +231,12 @@ SELECT COUNT(*) FROM t WHERE MBRWithin(t.c, POINT(1,1));
COUNT(*)
1
DROP TABLE t;
+#
+# MDEV-27318 Assertion data_size < srv_sort_buf_size failed in row_merge_bulk_buf_add
+#
+CREATE TABLE t1(f1 MEDIUMTEXT)ENGINE=InnoDB;
+INSERT INTO t1 VALUES(REPEAT(1, 8459264));
+SELECT length(f1) FROM t1;
+length(f1)
+8459264
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/insert_into_empty.test b/mysql-test/suite/innodb/t/insert_into_empty.test
index bfcf96d854c..1d4f79b3f44 100644
--- a/mysql-test/suite/innodb/t/insert_into_empty.test
+++ b/mysql-test/suite/innodb/t/insert_into_empty.test
@@ -242,3 +242,11 @@ CREATE TABLE t (c POINT NOT NULL, SPATIAL INDEX(c)) ENGINE=InnoDB;
INSERT INTO t VALUES (POINT(1, 1));
SELECT COUNT(*) FROM t WHERE MBRWithin(t.c, POINT(1,1));
DROP TABLE t;
+
+--echo #
+--echo # MDEV-27318 Assertion data_size < srv_sort_buf_size failed in row_merge_bulk_buf_add
+--echo #
+CREATE TABLE t1(f1 MEDIUMTEXT)ENGINE=InnoDB;
+INSERT INTO t1 VALUES(REPEAT(1, 8459264));
+SELECT length(f1) FROM t1;
+DROP TABLE t1;
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index f693a9ec96e..44ab435c8e8 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2005, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2021, MariaDB Corporation.
+Copyright (c) 2014, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -503,8 +503,6 @@ static ulint row_merge_bulk_buf_add(row_merge_buf_t* buf,
of extra_size. */
data_size += (extra_size + 1) + ((extra_size + 1) >= 0x80);
- ut_ad(data_size < srv_sort_buf_size);
-
/* Reserve bytes for the end marker of row_merge_block_t. */
if (buf->total_size + data_size >= srv_sort_buf_size)
return 0;
@@ -1045,6 +1043,76 @@ row_merge_buf_sort(
buf->tuples, buf->tmp_tuples, 0, buf->n_tuples);
}
+/** Write the blob field data to temporary file and fill the offset,
+length in the field data
+@param field tuple field
+@param blob_file file to store the blob data
+@param heap heap to store the blob offset and length
+@return DB_SUCCESS if successful */
+static dberr_t row_merge_write_blob_to_tmp_file(
+ dfield_t *field, merge_file_t *blob_file,mem_heap_t **heap)
+{
+ if (blob_file->fd == OS_FILE_CLOSED)
+ {
+ blob_file->fd= row_merge_file_create_low(nullptr);
+ if (blob_file->fd == OS_FILE_CLOSED)
+ return DB_OUT_OF_MEMORY;
+ }
+ uint64_t val= blob_file->offset;
+ uint32_t len= field->len;
+ dberr_t err= os_file_write(
+ IORequestWrite, "(bulk insert)", blob_file->fd,
+ field->data, blob_file->offset * srv_page_size, len);
+
+ if (err != DB_SUCCESS)
+ return err;
+
+ byte *data= static_cast<byte*>
+ (mem_heap_alloc(*heap, BTR_EXTERN_FIELD_REF_SIZE));
+
+ /* Write zeroes for first 8 bytes */
+ memset(data, 0, 8);
+ /* Write offset for next 8 bytes */
+ mach_write_to_8(data + 8, val);
+ /* Write length of the blob in 4 bytes */
+ mach_write_to_4(data + 16, len);
+ blob_file->offset+= field->len;
+ blob_file->n_rec++;
+ dfield_set_data(field, data, BTR_EXTERN_FIELD_REF_SIZE);
+ dfield_set_ext(field);
+ return err;
+}
+
+/** This function is invoked when tuple size is greater than
+innodb_sort_buffer_size. Basically it recreates the tuple
+by writing the blob field to the temporary file.
+@param entry index fields to be encode the blob
+@param blob_file file to store the blob data
+@param heap heap to store the blob offset and blob length
+@return tuple which fits into sort_buffer_size */
+static dtuple_t* row_merge_buf_large_tuple(const dtuple_t &entry,
+ merge_file_t *blob_file,
+ mem_heap_t **heap)
+{
+ if (!*heap)
+ *heap= mem_heap_create(DTUPLE_EST_ALLOC(entry.n_fields));
+
+ dtuple_t *tuple= dtuple_copy(&entry, *heap);
+ for (ulint i= 0; i < tuple->n_fields; i++)
+ {
+ dfield_t *field= &tuple->fields[i];
+ if (dfield_is_null(field) || field->len <= 2000)
+ continue;
+
+ dberr_t err= row_merge_write_blob_to_tmp_file(field, blob_file, heap);
+ if (err != DB_SUCCESS)
+ return nullptr;
+ }
+
+ return tuple;
+}
+
+
/** Write the field data whose length is more than 2000 bytes
into blob temporary file and write offset, length into the
tuple field
@@ -1061,35 +1129,13 @@ static dberr_t row_merge_buf_blob(const mtuple_t *entry, ulint n_fields,
for (ulint i= 0; i < n_fields; i++)
{
- if (dfield_is_null(&entry->fields[i]) || entry->fields[i].len <= 2000)
- continue;
-
- if (blob_file->fd == OS_FILE_CLOSED)
- blob_file->fd= row_merge_file_create_low(nullptr);
-
- uint64_t val= blob_file->offset;
dfield_t *field= &entry->fields[i];
- uint32_t len= field->len;
- dberr_t err= os_file_write(
- IORequestWrite, "(bulk insert)", blob_file->fd,
- field->data, blob_file->offset * srv_page_size, len);
+ if (dfield_is_null(field) || field->len <= 2000)
+ continue;
+ dberr_t err= row_merge_write_blob_to_tmp_file(field, blob_file, heap);
if (err != DB_SUCCESS)
return err;
-
- byte *data= static_cast<byte*>
- (mem_heap_alloc(*heap, BTR_EXTERN_FIELD_REF_SIZE));
-
- /* Write zeroes for first 8 bytes */
- memset(data, 0, 8);
- /* Write offset for next 8 bytes */
- mach_write_to_8(data + 8, val);
- /* Write length of the blob in 4 bytes */
- mach_write_to_4(data + 16, len);
- blob_file->offset+= field->len;
- blob_file->n_rec++;
- dfield_set_data(field, data, BTR_EXTERN_FIELD_REF_SIZE);
- dfield_set_ext(field);
}
return DB_SUCCESS;
@@ -5109,6 +5155,7 @@ dberr_t row_merge_bulk_t::bulk_insert_buffered(const dtuple_t &row,
{
dberr_t err= DB_SUCCESS;
ulint i= 0;
+ mem_heap_t *large_tuple_heap= nullptr;
for (dict_index_t *index= UT_LIST_GET_FIRST(ind.table->indexes);
index; index= UT_LIST_GET_NEXT(indexes, index))
{
@@ -5125,7 +5172,19 @@ add_to_buf:
if (row_merge_bulk_buf_add(buf, *ind.table, row))
{
i++;
- return err;
+ goto func_exit;
+ }
+
+ if (buf->n_tuples == 0)
+ {
+ /* Tuple data size is greater than srv_sort_buf_size */
+ dtuple_t *big_tuple= row_merge_buf_large_tuple(
+ row, &m_blob_file, &large_tuple_heap);
+ if (row_merge_bulk_buf_add(buf, *ind.table, *big_tuple))
+ {
+ i++;
+ goto func_exit;
+ }
}
if (index->is_unique())
@@ -5148,6 +5207,9 @@ add_to_buf:
goto add_to_buf;
}
+func_exit:
+ if (large_tuple_heap)
+ mem_heap_free(large_tuple_heap);
return err;
}
@@ -5183,7 +5245,8 @@ dberr_t row_merge_bulk_t::write_to_index(ulint index_no, trx_t *trx)
/* Data got fit in merge buffer. */
err= row_merge_insert_index_tuples(
index, table, OS_FILE_CLOSED, nullptr,
- &buf, &btr_bulk, 0, 0, 0, nullptr, table->space_id);
+ &buf, &btr_bulk, 0, 0, 0, nullptr, table->space_id, nullptr,
+ m_blob_file.fd == OS_FILE_CLOSED ? nullptr : &m_blob_file);
goto func_exit;
}
}