summaryrefslogtreecommitdiff
path: root/storage/archive/ha_archive.cc
diff options
context:
space:
mode:
authorSergei Petrunia <psergey@askmonty.org>2019-04-18 23:12:43 +0300
committerSergei Petrunia <psergey@askmonty.org>2019-04-18 23:12:43 +0300
commit056b6fe1d59b515a6380e50783b3c4ad0f93959f (patch)
tree6c38da6fdffecc53d65ffd5305aea77ebc2cd338 /storage/archive/ha_archive.cc
parent0bb924e18c338fa2dc901041aad09998b399efc1 (diff)
downloadmariadb-git-056b6fe1d59b515a6380e50783b3c4ad0f93959f.tar.gz
MDEV-17297: stats.records=0 for a table of Archive engine when it has rows, when we run ANALYZE command
Archive storage engine assumed that any query that attempts to read from the table will call ha_archive::info() beforehand. ha_archive would flush un-written data in that call (this would make it visible for the reads). Break this assumption. Flush the data when the table is opened for reading. This way, one can do multiple write statements without causing a flush, but as soon as we might need the data, we flush it.
Diffstat (limited to 'storage/archive/ha_archive.cc')
-rw-r--r--storage/archive/ha_archive.cc50
1 files changed, 33 insertions, 17 deletions
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index d70757e8142..e985f75d646 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -1650,7 +1650,6 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
DBUG_VOID_RETURN;
}
-
/*
Hints for optimizer, see ha_tina for more information
*/
@@ -1658,22 +1657,7 @@ int ha_archive::info(uint flag)
{
DBUG_ENTER("ha_archive::info");
- mysql_mutex_lock(&share->mutex);
- if (share->dirty)
- {
- DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
- DBUG_ASSERT(share->archive_write_open);
- azflush(&(share->archive_write), Z_SYNC_FLUSH);
- share->dirty= FALSE;
- }
-
- /*
- This should be an accurate number now, though bulk and delayed inserts can
- cause the number to be inaccurate.
- */
- stats.records= share->rows_recorded;
- mysql_mutex_unlock(&share->mutex);
-
+ flush_and_clear_pending_writes();
stats.deleted= 0;
DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records));
@@ -1716,6 +1700,38 @@ int ha_archive::info(uint flag)
}
+int ha_archive::external_lock(THD *thd, int lock_type)
+{
+ if (lock_type == F_RDLCK)
+ {
+ // We are going to read from the table. Flush any pending writes that we
+ // may have
+ flush_and_clear_pending_writes();
+ }
+ return 0;
+}
+
+
+void ha_archive::flush_and_clear_pending_writes()
+{
+ mysql_mutex_lock(&share->mutex);
+ if (share->dirty)
+ {
+ DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
+ DBUG_ASSERT(share->archive_write_open);
+ azflush(&(share->archive_write), Z_SYNC_FLUSH);
+ share->dirty= FALSE;
+ }
+
+ /*
+ This should be an accurate number now, though bulk and delayed inserts can
+ cause the number to be inaccurate.
+ */
+ stats.records= share->rows_recorded;
+ mysql_mutex_unlock(&share->mutex);
+}
+
+
/*
This method tells us that a bulk insert operation is about to occur. We set
a flag which will keep write_row from saying that its data is dirty. This in