summaryrefslogtreecommitdiff
path: root/src/mongo/db/repl/collection_cloner_test.cpp
diff options
context:
space:
mode:
authorBilly Donahue <billy.donahue@mongodb.com>2017-11-28 16:52:58 -0500
committerBilly Donahue <billy.donahue@mongodb.com>2017-12-01 16:37:23 -0500
commit7dc61c0f0c0160ad6cba831a0e12bef501ef3ad5 (patch)
treeea043f2ccc52da4a027d8a0e7b1cb562977bc247 /src/mongo/db/repl/collection_cloner_test.cpp
parent4a5f07ba38561db71727fe4254de5e9c24053645 (diff)
downloadmongo-7dc61c0f0c0160ad6cba831a0e12bef501ef3ad5.tar.gz
SERVER-32070 migrate some easy stdx::bind to lambdas
[branch stdx_bind_1]
Diffstat (limited to 'src/mongo/db/repl/collection_cloner_test.cpp')
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp104
1 files changed, 54 insertions, 50 deletions
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index dfe09f3ee2f..c3f2bd92cef 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -63,8 +63,13 @@ public:
BaseCloner* getCloner() const override;
protected:
+ auto setStatusCallback() {
+ return [this](const Status& s) { setStatus(s); };
+ }
+
void setUp() override;
void tearDown() override;
+
std::vector<BSONObj> makeSecondaryIndexSpecs(const NamespaceString& nss);
// A simple arbitrary value to use as the default batch size.
@@ -84,16 +89,15 @@ void CollectionClonerTest::setUp() {
BaseClonerTest::setUp();
options = {};
collectionCloner.reset(nullptr);
- collectionCloner = stdx::make_unique<CollectionCloner>(
- &getExecutor(),
- dbWorkThreadPool.get(),
- target,
- nss,
- options,
- stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1),
- storageInterface.get(),
- defaultBatchSize,
- defaultNumCloningCursors);
+ collectionCloner = stdx::make_unique<CollectionCloner>(&getExecutor(),
+ dbWorkThreadPool.get(),
+ target,
+ nss,
+ options,
+ setStatusCallback(),
+ storageInterface.get(),
+ defaultBatchSize,
+ defaultNumCloningCursors);
collectionStats = CollectionMockStats();
storageInterface->createCollectionForBulkFn =
[this](const NamespaceString& nss,
@@ -362,16 +366,15 @@ TEST_F(CollectionClonerTest,
return str::equals("listIndexes", request.cmdObj.firstElementFieldName());
});
- collectionCloner = stdx::make_unique<CollectionCloner>(
- &_executorProxy,
- dbWorkThreadPool.get(),
- target,
- nss,
- options,
- stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1),
- storageInterface.get(),
- defaultBatchSize,
- defaultNumCloningCursors);
+ collectionCloner = stdx::make_unique<CollectionCloner>(&_executorProxy,
+ dbWorkThreadPool.get(),
+ target,
+ nss,
+ options,
+ setStatusCallback(),
+ storageInterface.get(),
+ defaultBatchSize,
+ defaultNumCloningCursors);
ASSERT_OK(collectionCloner->startup());
@@ -386,16 +389,15 @@ TEST_F(CollectionClonerTest,
TEST_F(CollectionClonerTest, DoNotCreateIDIndexIfAutoIndexIdUsed) {
options = {};
options.autoIndexId = CollectionOptions::NO;
- collectionCloner.reset(new CollectionCloner(
- &getExecutor(),
- dbWorkThreadPool.get(),
- target,
- nss,
- options,
- stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1),
- storageInterface.get(),
- defaultBatchSize,
- defaultNumCloningCursors));
+ collectionCloner.reset(new CollectionCloner(&getExecutor(),
+ dbWorkThreadPool.get(),
+ target,
+ nss,
+ options,
+ setStatusCallback(),
+ storageInterface.get(),
+ defaultBatchSize,
+ defaultNumCloningCursors));
NamespaceString collNss;
CollectionOptions collOptions;
@@ -1409,16 +1411,15 @@ protected:
void startupWithUUID(int maxNumCloningCursors = 1) {
collectionCloner.reset();
options.uuid = UUID::gen();
- collectionCloner = stdx::make_unique<CollectionCloner>(
- &getExecutor(),
- dbWorkThreadPool.get(),
- target,
- alternateNss,
- options,
- stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1),
- storageInterface.get(),
- defaultBatchSize,
- maxNumCloningCursors);
+ collectionCloner = stdx::make_unique<CollectionCloner>(&getExecutor(),
+ dbWorkThreadPool.get(),
+ target,
+ alternateNss,
+ options,
+ setStatusCallback(),
+ storageInterface.get(),
+ defaultBatchSize,
+ maxNumCloningCursors);
ASSERT_OK(collectionCloner->startup());
}
@@ -1575,6 +1576,10 @@ protected:
void tearDown() override;
std::vector<BSONObj> generateDocs(std::size_t numDocs);
+ auto setStatusCallback() {
+ return [this](const Status& s) { setStatus(s); };
+ }
+
// A simple arbitrary value to use as the default batch size.
const int defaultBatchSize = 1024;
@@ -1592,16 +1597,15 @@ void ParallelCollectionClonerTest::setUp() {
BaseClonerTest::setUp();
options = {};
collectionCloner.reset(nullptr);
- collectionCloner = stdx::make_unique<CollectionCloner>(
- &getExecutor(),
- dbWorkThreadPool.get(),
- target,
- nss,
- options,
- stdx::bind(&CollectionClonerTest::setStatus, this, stdx::placeholders::_1),
- storageInterface.get(),
- defaultBatchSize,
- defaultNumCloningCursors);
+ collectionCloner = stdx::make_unique<CollectionCloner>(&getExecutor(),
+ dbWorkThreadPool.get(),
+ target,
+ nss,
+ options,
+ setStatusCallback(),
+ storageInterface.get(),
+ defaultBatchSize,
+ defaultNumCloningCursors);
collectionStats = CollectionMockStats();
storageInterface->createCollectionForBulkFn =
[this](const NamespaceString& nss,