summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMathias Stearn <mathias@10gen.com>2015-06-11 16:47:31 -0400
committerMathias Stearn <mathias@10gen.com>2015-06-29 19:23:14 -0400
commit40ddfd261f48ff20c649a6601ff19ed6b806332d (patch)
tree48ccc6172ff4e0a40b64135cc732d20942f042d6
parent6acbfbba4f2e9e33221d71f2241642b6f9d2e479 (diff)
downloadmongo-40ddfd261f48ff20c649a6601ff19ed6b806332d.tar.gz
SERVER-18912 Document and implement correct capped visibility rules
Fixes a bug introduced by SERVER-16444 changes.
-rw-r--r--src/mongo/db/storage/SConscript1
-rw-r--r--src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp21
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp20
-rw-r--r--src/mongo/db/storage/record_store.h14
-rw-r--r--src/mongo/db/storage/record_store_test_capped_visibility.cpp188
-rw-r--r--src/mongo/db/storage/record_store_test_harness.h35
-rw-r--r--src/mongo/db/storage/record_store_test_validate.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp39
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h1
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp34
10 files changed, 316 insertions, 39 deletions
diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript
index 3b9fc94fb35..d77d3c5351d 100644
--- a/src/mongo/db/storage/SConscript
+++ b/src/mongo/db/storage/SConscript
@@ -87,6 +87,7 @@ env.Library(
env.Library(
target='record_store_test_harness',
source=[
+ 'record_store_test_capped_visibility.cpp',
'record_store_test_datafor.cpp',
'record_store_test_datasize.cpp',
'record_store_test_deleterecord.cpp',
diff --git a/src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp b/src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp
index aedbb4484db..40366d6f81d 100644
--- a/src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp
+++ b/src/mongo/db/storage/in_memory/in_memory_record_store_test.cpp
@@ -37,22 +37,31 @@
namespace mongo {
-class InMemoryHarnessHelper : public HarnessHelper {
+class InMemoryHarnessHelper final : public HarnessHelper {
public:
InMemoryHarnessHelper() {}
- virtual RecordStore* newNonCappedRecordStore() {
- return new InMemoryRecordStore("a.b", &data);
+ std::unique_ptr<RecordStore> newNonCappedRecordStore() final {
+ return stdx::make_unique<InMemoryRecordStore>("a.b", &data);
+ }
+ std::unique_ptr<RecordStore> newCappedRecordStore(int64_t cappedSizeBytes,
+ int64_t cappedMaxDocs) final {
+ return stdx::make_unique<InMemoryRecordStore>(
+ "a.b", &data, true, cappedSizeBytes, cappedMaxDocs);
}
- virtual RecoveryUnit* newRecoveryUnit() {
+ RecoveryUnit* newRecoveryUnit() final {
return new InMemoryRecoveryUnit();
}
+ bool supportsDocLocking() final {
+ return false;
+ }
+
std::shared_ptr<void> data;
};
-HarnessHelper* newHarnessHelper() {
- return new InMemoryHarnessHelper();
+std::unique_ptr<HarnessHelper> newHarnessHelper() {
+ return stdx::make_unique<InMemoryHarnessHelper>();
}
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
index 5a00a5a9a7f..3386bb057d1 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_record_store_test.cpp
@@ -45,18 +45,18 @@ class MyHarnessHelper : public HarnessHelper {
public:
MyHarnessHelper() {}
- virtual RecordStore* newNonCappedRecordStore() {
+ virtual std::unique_ptr<RecordStore> newNonCappedRecordStore() {
OperationContextNoop txn;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(false, 0);
md->setUserFlag(&txn, CollectionOptions::Flag_NoPadding);
- SimpleRecordStoreV1* rs = new SimpleRecordStoreV1(&txn, "a.b", md, &_em, false);
- return rs;
+ return stdx::make_unique<SimpleRecordStoreV1>(&txn, "a.b", md, &_em, false);
}
- virtual RecordStore* newCappedRecordStore(int64_t cappedMaxSize, int64_t cappedMaxDocs) {
+ std::unique_ptr<RecordStore> newCappedRecordStore(int64_t cappedMaxSize,
+ int64_t cappedMaxDocs) final {
OperationContextNoop txn;
DummyRecordStoreV1MetaData* md = new DummyRecordStoreV1MetaData(true, 0);
- CappedRecordStoreV1* rs = new CappedRecordStoreV1(&txn, NULL, "a.b", md, &_em, false);
+ auto rs = stdx::make_unique<CappedRecordStoreV1>(&txn, nullptr, "a.b", md, &_em, false);
LocAndSize records[] = {{}};
LocAndSize drecs[] = {{DiskLoc(0, 1000), 1000}, {}};
@@ -64,18 +64,22 @@ public:
md->setCapFirstNewRecord(&txn, DiskLoc().setInvalid());
initializeV1RS(&txn, records, drecs, NULL, &_em, md);
- return rs;
+ return std::move(rs);
}
virtual RecoveryUnit* newRecoveryUnit() {
return new RecoveryUnitNoop();
}
+ bool supportsDocLocking() final {
+ return false;
+ }
+
private:
DummyExtentManager _em;
};
-HarnessHelper* newHarnessHelper() {
- return new MyHarnessHelper();
+std::unique_ptr<HarnessHelper> newHarnessHelper() {
+ return stdx::make_unique<MyHarnessHelper>();
}
}
diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h
index 1f5a3f27556..5b8ab470839 100644
--- a/src/mongo/db/storage/record_store.h
+++ b/src/mongo/db/storage/record_store.h
@@ -115,6 +115,20 @@ struct Record {
*
* Implementations may override any default implementation if they can provide a more
* efficient implementation.
+ *
+ * IMPORTANT NOTE FOR DOCUMENT-LOCKING ENGINES: If you implement capped collections with a
+ * "visibility" system such that documents that exist in your snapshot but were inserted after
+ * the last uncommitted document are hidden, you must follow the following rules:
+ * - next() must never return invisible documents.
+ * - If next() on a forward cursor hits an invisible document, it should behave as if it hit
+ * the end of the collection.
+ * - When next() on a reverse cursor seeks to the end of the collection it must return the
+ * newest visible document. This should only return boost::none if there are no visible
+ * documents in the collection.
+ * - seekExact() must ignore the visibility filter and return the requested document even if
+ * it is supposed to be invisible.
+ * TODO SERVER-18934 Handle this above the storage engine layer so storage engines don't have to
+ * deal with capped visibility.
*/
class RecordCursor {
public:
diff --git a/src/mongo/db/storage/record_store_test_capped_visibility.cpp b/src/mongo/db/storage/record_store_test_capped_visibility.cpp
new file mode 100644
index 00000000000..1f7d7afa87c
--- /dev/null
+++ b/src/mongo/db/storage/record_store_test_capped_visibility.cpp
@@ -0,0 +1,188 @@
+/**
+ * Copyright (C) 2015 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/db/storage/record_store_test_harness.h"
+
+#include <memory>
+
+#include "mongo/db/storage/record_store.h"
+#include "mongo/unittest/unittest.h"
+#include "mongo/util/unowned_ptr.h"
+
+
+namespace mongo {
+namespace {
+
+RecordId doInsert(unowned_ptr<OperationContext> txn, unowned_ptr<RecordStore> rs) {
+ static char zeros[16];
+ return uassertStatusOK(rs->insertRecord(txn, zeros, sizeof(zeros), false));
+}
+
+// macro to keep assert line numbers correct.
+#define ASSERT_ID_EQ(EXPR, ID) \
+ [](boost::optional<Record> record, RecordId id) { \
+ ASSERT(record); \
+ ASSERT_EQ(record->id, id); \
+ }((EXPR), (ID));
+
+} // namespace
+
+TEST(RecordStore_CappedVisibility, EmptyInitialState) {
+ auto harness = newHarnessHelper();
+ if (!harness->supportsDocLocking())
+ return;
+
+ auto rs = harness->newCappedRecordStore();
+
+ auto longLivedClient = harness->serviceContext()->makeClient("longLived");
+ auto longLivedOp = harness->newOperationContext(longLivedClient.get());
+ WriteUnitOfWork longLivedWuow(longLivedOp.get());
+
+ // Collection is really empty.
+ ASSERT(!rs->getCursor(longLivedOp.get(), true)->next());
+ ASSERT(!rs->getCursor(longLivedOp.get(), false)->next());
+
+ RecordId lowestHiddenId = doInsert(longLivedOp, rs);
+
+ // Collection still looks empty to iteration but not seekExact.
+ ASSERT(!rs->getCursor(longLivedOp.get(), true)->next());
+ ASSERT(!rs->getCursor(longLivedOp.get(), false)->next());
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(lowestHiddenId), lowestHiddenId);
+
+ RecordId otherId;
+ {
+ auto txn = harness->newOperationContext();
+ WriteUnitOfWork wuow(txn.get());
+
+ // Can't see uncommitted write from other operation.
+ ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+
+ ASSERT(!rs->getCursor(txn.get(), true)->next());
+ ASSERT(!rs->getCursor(txn.get(), false)->next());
+
+ otherId = doInsert(txn, rs);
+
+ ASSERT(!rs->getCursor(txn.get(), true)->next());
+ ASSERT(!rs->getCursor(txn.get(), false)->next());
+ ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
+
+ wuow.commit();
+
+ ASSERT(!rs->getCursor(txn.get(), true)->next());
+ ASSERT(!rs->getCursor(txn.get(), false)->next());
+ ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
+ ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ }
+
+ ASSERT(!rs->getCursor(longLivedOp.get(), true)->next());
+ ASSERT(!rs->getCursor(longLivedOp.get(), false)->next());
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(lowestHiddenId), lowestHiddenId);
+ ASSERT(!rs->getCursor(longLivedOp.get())->seekExact(otherId)); // still on old snapshot.
+
+ // This makes all documents visible and lets longLivedOp get a new snapshot.
+ longLivedWuow.commit();
+
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), true)->next(), lowestHiddenId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(lowestHiddenId), lowestHiddenId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(otherId), otherId);
+}
+
+TEST(RecordStore_CappedVisibility, NonEmptyInitialState) {
+ auto harness = newHarnessHelper();
+ if (!harness->supportsDocLocking())
+ return;
+
+ auto rs = harness->newCappedRecordStore();
+
+ auto longLivedClient = harness->serviceContext()->makeClient("longLived");
+ auto longLivedOp = harness->newOperationContext(longLivedClient.get());
+
+ RecordId initialId;
+ {
+ WriteUnitOfWork wuow(longLivedOp.get());
+ initialId = doInsert(longLivedOp, rs);
+ wuow.commit();
+ }
+
+ WriteUnitOfWork longLivedWuow(longLivedOp.get());
+
+ // Can see initial doc.
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), false)->next(), initialId);
+
+ RecordId lowestHiddenId = doInsert(longLivedOp, rs);
+
+ // Collection still looks like it only has a single doc to iteration but not seekExact.
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), false)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(initialId), initialId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(lowestHiddenId), lowestHiddenId);
+
+ RecordId otherId;
+ {
+ auto txn = harness->newOperationContext();
+ WriteUnitOfWork wuow(txn.get());
+
+ // Can only see committed writes from other operation.
+ ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(initialId), initialId);
+ ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+
+ ASSERT_ID_EQ(rs->getCursor(txn.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), initialId);
+
+ otherId = doInsert(txn, rs);
+
+ ASSERT_ID_EQ(rs->getCursor(txn.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
+
+ wuow.commit();
+
+ ASSERT_ID_EQ(rs->getCursor(txn.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(txn.get(), false)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(txn.get())->seekExact(otherId), otherId);
+ ASSERT(!rs->getCursor(txn.get())->seekExact(lowestHiddenId));
+ }
+
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), false)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(lowestHiddenId), lowestHiddenId);
+ ASSERT(!rs->getCursor(longLivedOp.get())->seekExact(otherId)); // still on old snapshot.
+
+ // This makes all documents visible and lets longLivedOp get a new snapshot.
+ longLivedWuow.commit();
+
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), true)->next(), initialId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get(), false)->next(), otherId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(initialId), initialId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(lowestHiddenId), lowestHiddenId);
+ ASSERT_ID_EQ(rs->getCursor(longLivedOp.get())->seekExact(otherId), otherId);
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/storage/record_store_test_harness.h b/src/mongo/db/storage/record_store_test_harness.h
index e3cd758e545..818f29aec2c 100644
--- a/src/mongo/db/storage/record_store_test_harness.h
+++ b/src/mongo/db/storage/record_store_test_harness.h
@@ -32,6 +32,8 @@
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/service_context_noop.h"
+#include "mongo/platform/cstdint.h"
+#include "mongo/stdx/memory.h"
namespace mongo {
@@ -43,17 +45,40 @@ public:
HarnessHelper() : _serviceContext(), _client(_serviceContext.makeClient("hh")) {}
virtual ~HarnessHelper() {}
- virtual RecordStore* newNonCappedRecordStore() = 0;
- virtual RecoveryUnit* newRecoveryUnit() = 0;
+ virtual std::unique_ptr<RecordStore> newNonCappedRecordStore() = 0;
+
+ static const int64_t kDefaultCapedSizeBytes = 16 * 1024 * 1024;
+ virtual std::unique_ptr<RecordStore> newCappedRecordStore(
+ int64_t cappedSizeBytes = kDefaultCapedSizeBytes, int64_t cappedMaxDocs = -1) = 0;
+
+ virtual std::unique_ptr<OperationContext> newOperationContext(Client* client) {
+ return stdx::make_unique<OperationContextNoop>(client, 1, newRecoveryUnit());
+ }
+
+ std::unique_ptr<OperationContext> newOperationContext() {
+ return newOperationContext(client());
+ }
+
+ /**
+ * Currently this requires that it is possible to have two independent open write operations
+ * at the same time one the same thread (with separate Clients, OperationContexts, and
+ * RecoveryUnits).
+ */
+ virtual bool supportsDocLocking() = 0;
- virtual OperationContext* newOperationContext() {
- return new OperationContextNoop(_client.get(), 1, newRecoveryUnit());
+ Client* client() {
+ return _client.get();
+ }
+ ServiceContext* serviceContext() {
+ return &_serviceContext;
}
private:
+ virtual RecoveryUnit* newRecoveryUnit() = 0;
+
ServiceContextNoop _serviceContext;
ServiceContext::UniqueClient _client;
};
-HarnessHelper* newHarnessHelper();
+std::unique_ptr<HarnessHelper> newHarnessHelper();
}
diff --git a/src/mongo/db/storage/record_store_test_validate.h b/src/mongo/db/storage/record_store_test_validate.h
index e7b435e4738..b5a73e041fa 100644
--- a/src/mongo/db/storage/record_store_test_validate.h
+++ b/src/mongo/db/storage/record_store_test_validate.h
@@ -68,7 +68,7 @@ public:
ValidateTest()
: _harnessHelper(newHarnessHelper()), _rs(_harnessHelper->newNonCappedRecordStore()) {}
- OperationContext* newOperationContext() {
+ std::unique_ptr<OperationContext> newOperationContext() {
return _harnessHelper->newOperationContext();
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index 8e1f09d5951..64690bbd7d2 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -108,7 +108,32 @@ public:
return {};
WT_CURSOR* c = _cursor->get();
- {
+
+ bool mustAdvance = true;
+ if (_lastReturnedId.isNull() && !_forward && _rs._isCapped) {
+ // In this case we need to seek to the highest visible record.
+ const RecordId reverseCappedInitialSeekPoint =
+ _readUntilForOplog.isNull() ? _rs.lowestCappedHiddenRecord() : _readUntilForOplog;
+
+ if (!reverseCappedInitialSeekPoint.isNull()) {
+ c->set_key(c, _makeKey(reverseCappedInitialSeekPoint));
+ int cmp;
+ int seekRet = WT_OP_CHECK(c->search_near(c, &cmp));
+ if (seekRet == WT_NOTFOUND) {
+ _eof = true;
+ return {};
+ }
+ invariantWTOK(seekRet);
+
+ // If we landed at or past the lowest hidden record, we must advance to be in
+ // the visible range.
+ mustAdvance = _rs.isCappedHidden(reverseCappedInitialSeekPoint)
+ ? (cmp >= 0)
+ : (cmp > 0); // No longer hidden.
+ }
+ }
+
+ if (mustAdvance) {
// Nothing after the next line can throw WCEs.
// Note that an unpositioned (or eof) WT_CURSOR returns the first/last entry in the
// table when you call next/prev.
@@ -139,11 +164,6 @@ public:
}
boost::optional<Record> seekExact(const RecordId& id) final {
- if (!isVisible(id)) {
- _eof = true;
- return {};
- }
-
WT_CURSOR* c = _cursor->get();
c->set_key(c, _makeKey(id));
// Nothing after the next line can throw WCEs.
@@ -280,7 +300,7 @@ private:
bool _forParallelCollectionScan; // This can go away once SERVER-17364 is resolved.
std::unique_ptr<WiredTigerCursor> _cursor;
bool _eof = false;
- RecordId _lastReturnedId;
+ RecordId _lastReturnedId; // If null, need to seek to first/last record.
const RecordId _readUntilForOplog;
};
@@ -803,6 +823,11 @@ bool WiredTigerRecordStore::isCappedHidden(const RecordId& loc) const {
return _uncommittedDiskLocs.front() <= loc;
}
+RecordId WiredTigerRecordStore::lowestCappedHiddenRecord() const {
+ boost::lock_guard<boost::mutex> lk(_uncommittedDiskLocsMutex);
+ return _uncommittedDiskLocs.empty() ? RecordId() : _uncommittedDiskLocs.front();
+}
+
StatusWith<RecordId> WiredTigerRecordStore::insertRecord(OperationContext* txn,
const DocWriter* doc,
bool enforceQuota) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
index 1aa0dec93e7..50b0d3547ee 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h
@@ -201,6 +201,7 @@ public:
void dealtWithCappedLoc(const RecordId& loc);
bool isCappedHidden(const RecordId& loc) const;
+ RecordId lowestCappedHiddenRecord() const;
bool inShutdown() const;
int64_t cappedDeleteAsNeeded(OperationContext* txn, const RecordId& justInserted);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index aae1056bf8b..7701002c560 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -54,7 +54,7 @@ using std::unique_ptr;
using std::string;
using std::stringstream;
-class WiredTigerHarnessHelper : public HarnessHelper {
+class WiredTigerHarnessHelper final : public HarnessHelper {
public:
static WT_CONNECTION* createConnection(StringData dbpath, StringData extraStrings) {
WT_CONNECTION* conn = NULL;
@@ -86,10 +86,10 @@ public:
_conn->close(_conn, NULL);
}
- virtual RecordStore* newNonCappedRecordStore() {
+ virtual std::unique_ptr<RecordStore> newNonCappedRecordStore() {
return newNonCappedRecordStore("a.b");
}
- RecordStore* newNonCappedRecordStore(const std::string& ns) {
+ std::unique_ptr<RecordStore> newNonCappedRecordStore(const std::string& ns) {
WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit(_sessionCache);
OperationContextNoop txn(ru);
string uri = "table:" + ns;
@@ -106,12 +106,17 @@ public:
uow.commit();
}
- return new WiredTigerRecordStore(&txn, ns, uri);
+ return stdx::make_unique<WiredTigerRecordStore>(&txn, ns, uri);
}
- virtual RecordStore* newCappedRecordStore(const std::string& ns,
- int64_t cappedMaxSize,
- int64_t cappedMaxDocs) {
+ std::unique_ptr<RecordStore> newCappedRecordStore(int64_t cappedSizeBytes,
+ int64_t cappedMaxDocs) final {
+ return newCappedRecordStore("a.b", cappedSizeBytes, cappedMaxDocs);
+ }
+
+ std::unique_ptr<RecordStore> newCappedRecordStore(const std::string& ns,
+ int64_t cappedMaxSize,
+ int64_t cappedMaxDocs) {
WiredTigerRecoveryUnit* ru = new WiredTigerRecoveryUnit(_sessionCache);
OperationContextNoop txn(ru);
string uri = "table:a.b";
@@ -131,13 +136,18 @@ public:
uow.commit();
}
- return new WiredTigerRecordStore(&txn, ns, uri, true, cappedMaxSize, cappedMaxDocs);
+ return stdx::make_unique<WiredTigerRecordStore>(
+ &txn, ns, uri, true, cappedMaxSize, cappedMaxDocs);
}
- virtual RecoveryUnit* newRecoveryUnit() {
+ RecoveryUnit* newRecoveryUnit() final {
return new WiredTigerRecoveryUnit(_sessionCache);
}
+ bool supportsDocLocking() final {
+ return true;
+ }
+
WT_CONNECTION* conn() const {
return _conn;
}
@@ -148,8 +158,8 @@ private:
WiredTigerSessionCache* _sessionCache;
};
-HarnessHelper* newHarnessHelper() {
- return new WiredTigerHarnessHelper();
+std::unique_ptr<HarnessHelper> newHarnessHelper() {
+ return stdx::make_unique<WiredTigerHarnessHelper>();
}
TEST(WiredTigerRecordStoreTest, GenerateCreateStringEmptyDocument) {
@@ -390,7 +400,7 @@ private:
virtual void setUp() {
harnessHelper.reset(new WiredTigerHarnessHelper());
sizeStorer.reset(new WiredTigerSizeStorer(harnessHelper->conn(), "table:sizeStorer"));
- rs.reset(harnessHelper->newNonCappedRecordStore());
+ rs = harnessHelper->newNonCappedRecordStore();
WiredTigerRecordStore* wtrs = checked_cast<WiredTigerRecordStore*>(rs.get());
wtrs->setSizeStorer(sizeStorer.get());
uri = wtrs->getURI();