summaryrefslogtreecommitdiff
path: root/src/mongo/dbtests
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/dbtests')
-rw-r--r--src/mongo/dbtests/basictests.cpp4
-rw-r--r--src/mongo/dbtests/clienttests.cpp4
-rw-r--r--src/mongo/dbtests/commandtests.cpp10
-rw-r--r--src/mongo/dbtests/counttests.cpp9
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/deferred_writer.cpp5
-rw-r--r--src/mongo/dbtests/directclienttests.cpp5
-rw-r--r--src/mongo/dbtests/framework.h2
-rw-r--r--src/mongo/dbtests/framework_options.cpp2
-rw-r--r--src/mongo/dbtests/framework_options.h2
-rw-r--r--src/mongo/dbtests/framework_options_init.cpp2
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp6
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp168
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp85
-rw-r--r--src/mongo/dbtests/jsontests.cpp3
-rw-r--r--src/mongo/dbtests/jstests.cpp20
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.h2
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.h2
-rw-r--r--src/mongo/dbtests/mock_dbclient_conn_test.cpp12
-rw-r--r--src/mongo/dbtests/mock_replica_set_test.cpp2
-rw-r--r--src/mongo/dbtests/multikey_paths_test.cpp45
-rw-r--r--src/mongo/dbtests/plan_executor_invalidation_test.cpp6
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp5
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_near.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp3
-rw-r--r--src/mongo/dbtests/query_stage_tests.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp2
-rw-r--r--src/mongo/dbtests/querytests.cpp38
-rw-r--r--src/mongo/dbtests/replica_set_monitor_test.cpp24
-rw-r--r--src/mongo/dbtests/repltests.cpp2
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp4
-rw-r--r--src/mongo/dbtests/storage_timestamp_tests.cpp500
-rw-r--r--src/mongo/dbtests/threadedtests.cpp2
-rw-r--r--src/mongo/dbtests/updatetests.cpp19
-rw-r--r--src/mongo/dbtests/validate_tests.cpp195
43 files changed, 404 insertions, 811 deletions
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
index c6a53840094..c3160ef54bd 100644
--- a/src/mongo/dbtests/basictests.cpp
+++ b/src/mongo/dbtests/basictests.cpp
@@ -42,14 +42,14 @@
namespace BasicTests {
-using std::unique_ptr;
-using std::shared_ptr;
using std::cout;
using std::dec;
using std::endl;
using std::hex;
+using std::shared_ptr;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
class RarelyTest {
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
index 1c71b2b8e84..bdb97bed3df 100644
--- a/src/mongo/dbtests/clienttests.cpp
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -40,8 +40,8 @@
namespace ClientTests {
-using std::unique_ptr;
using std::string;
+using std::unique_ptr;
using std::vector;
class Base {
@@ -407,4 +407,4 @@ public:
};
SuiteInstance<All> all;
-}
+} // namespace ClientTests
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
index 8be8065bf54..f8822b0a315 100644
--- a/src/mongo/dbtests/commandtests.cpp
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -159,7 +159,7 @@ struct Type2 : Base {
ASSERT_EQUALS(string("5eb63bbbe01eeed093cb22bb8f5acdc3"), result["md5"].valuestr());
}
};
-}
+} // namespace FileMD5
namespace SymbolArgument {
// SERVER-16260
@@ -318,12 +318,10 @@ public:
cmd.append("indexes",
BSON_ARRAY(BSON("key" << BSON("loc"
<< "geoHaystack"
- << "z"
- << 1.0)
+ << "z" << 1.0)
<< "name"
<< "loc_geoHaystack_z_1"
- << "bucketSize"
- << static_cast<double>(0.7))));
+ << "bucketSize" << static_cast<double>(0.7))));
BSONObj result;
ASSERT(db.runCommand(nsDb(), cmd.obj(), result));
@@ -403,4 +401,4 @@ public:
};
SuiteInstance<All> all;
-}
+} // namespace CommandTests
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 36a59a07d90..9f820418793 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -58,12 +58,9 @@ public:
_collection = _database->createCollection(&_opCtx, nss());
IndexCatalog* indexCatalog = _collection->getIndexCatalog();
- auto indexSpec =
- BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion) << "ns" << ns()
- << "key"
- << BSON("a" << 1)
- << "name"
- << "a_1");
+ auto indexSpec = BSON("v" << static_cast<int>(IndexDescriptor::kLatestIndexVersion)
+ << "ns" << ns() << "key" << BSON("a" << 1) << "name"
+ << "a_1");
uassertStatusOK(indexCatalog->createIndexOnEmptyCollection(&_opCtx, indexSpec));
wunit.commit();
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index 1a3f889eb8e..427faa513de 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -45,8 +45,8 @@ namespace mongo {
namespace {
-using std::unique_ptr;
using std::set;
+using std::unique_ptr;
/**
* Unit tests related to DBHelpers
diff --git a/src/mongo/dbtests/deferred_writer.cpp b/src/mongo/dbtests/deferred_writer.cpp
index fe3122e5b54..da51278c392 100644
--- a/src/mongo/dbtests/deferred_writer.cpp
+++ b/src/mongo/dbtests/deferred_writer.cpp
@@ -33,7 +33,6 @@
#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/client.h"
-#include "mongo/db/client.h"
#include "mongo/db/concurrency/deferred_writer.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
@@ -57,7 +56,7 @@ struct BSONObjCompare {
return SimpleBSONObjComparator::kInstance.compare(lhs, rhs) < 0;
}
};
-}
+} // namespace
static const NamespaceString kTestNamespace("unittests", "deferred_writer_tests");
@@ -384,4 +383,4 @@ public:
add<DeferredWriterTestAsync>();
}
} deferredWriterTests;
-}
+} // namespace deferred_writer_tests
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
index a87f38463cf..6ca684003d9 100644
--- a/src/mongo/dbtests/directclienttests.cpp
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -77,10 +77,7 @@ public:
BSONObj info;
BSONObj cmd = BSON("captrunc"
<< "b"
- << "n"
- << 1
- << "inc"
- << true);
+ << "n" << 1 << "inc" << true);
// cout << cmd.toString() << endl;
bool ok = client.runCommand("a", cmd, info);
// cout << info.toString() << endl;
diff --git a/src/mongo/dbtests/framework.h b/src/mongo/dbtests/framework.h
index a7a0f57090d..8ed12ba9faf 100644
--- a/src/mongo/dbtests/framework.h
+++ b/src/mongo/dbtests/framework.h
@@ -37,5 +37,5 @@
namespace mongo {
namespace dbtests {
int runDbTests(int argc, char** argv);
-} // dbtests
+} // namespace dbtests
} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options.cpp b/src/mongo/dbtests/framework_options.cpp
index ea4f54b65d8..e24c9dd9898 100644
--- a/src/mongo/dbtests/framework_options.cpp
+++ b/src/mongo/dbtests/framework_options.cpp
@@ -138,4 +138,4 @@ Status storeTestFrameworkOptions(const moe::Environment& params,
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options.h b/src/mongo/dbtests/framework_options.h
index 602bef0b35c..b79b4eca905 100644
--- a/src/mongo/dbtests/framework_options.h
+++ b/src/mongo/dbtests/framework_options.h
@@ -68,4 +68,4 @@ bool handlePreValidationTestFrameworkOptions(const moe::Environment& params,
Status storeTestFrameworkOptions(const moe::Environment& params,
const std::vector<std::string>& args);
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/framework_options_init.cpp b/src/mongo/dbtests/framework_options_init.cpp
index 5fb68b995c0..9ecf7993499 100644
--- a/src/mongo/dbtests/framework_options_init.cpp
+++ b/src/mongo/dbtests/framework_options_init.cpp
@@ -67,4 +67,4 @@ MONGO_INITIALIZER_GENERAL(CoreOptions_Store, MONGO_NO_PREREQUISITES, MONGO_NO_DE
(InitializerContext* context) {
return Status::OK();
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index 512a3e4b087..9ac94209601 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -147,9 +147,7 @@ public:
&opCtx,
_nss.ns(),
BSON("name" << indexName << "ns" << _nss.ns() << "key" << BSON("x" << 1) << "v"
- << static_cast<int>(kIndexVersion)
- << "expireAfterSeconds"
- << 5)));
+ << static_cast<int>(kIndexVersion) << "expireAfterSeconds" << 5)));
const IndexDescriptor* desc = _catalog->findIndexByName(&opCtx, indexName);
ASSERT(desc);
@@ -194,4 +192,4 @@ public:
};
SuiteInstance<IndexCatalogTests> indexCatalogTests;
-}
+} // namespace IndexCatalogTests
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index bd479539a13..63ed34e3871 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -134,16 +134,9 @@ public:
const BSONObj spec = BSON("name"
<< "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "unique"
- << true
- << "background"
- << background);
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
+ << static_cast<int>(kIndexVersion) << "unique" << true
+ << "background" << background);
ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); });
@@ -189,16 +182,9 @@ public:
const BSONObj spec = BSON("name"
<< "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "unique"
- << true
- << "background"
- << background);
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
+ << static_cast<int>(kIndexVersion) << "unique" << true
+ << "background" << background);
ON_BLOCK_EXIT([&] { indexer.cleanUpAfterBuild(&_opCtx, coll); });
@@ -242,8 +228,7 @@ public:
getGlobalServiceContext()->setKillAllOperations();
BSONObj indexInfo = BSON("key" << BSON("a" << 1) << "ns" << _ns << "name"
<< "a_1"
- << "v"
- << static_cast<int>(kIndexVersion));
+ << "v" << static_cast<int>(kIndexVersion));
// The call is interrupted because mayInterrupt == true.
ASSERT_TRUE(buildIndexInterrupted(indexInfo));
// only want to interrupt the index build
@@ -286,8 +271,7 @@ public:
getGlobalServiceContext()->setKillAllOperations();
BSONObj indexInfo = BSON("key" << BSON("_id" << 1) << "ns" << _ns << "name"
<< "_id_"
- << "v"
- << static_cast<int>(kIndexVersion));
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_TRUE(buildIndexInterrupted(indexInfo));
// only want to interrupt the index build
getGlobalServiceContext()->unsetKillAllOperations();
@@ -333,11 +317,7 @@ public:
ASSERT_OK(createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "key"
- << BSON("x" << 1 << "y" << 1)
- << "v"
+ << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -350,13 +330,8 @@ public:
createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "unique"
- << true
- << "key"
- << BSON("x" << 1 << "y" << 1)
- << "v"
+ << "ns" << _ns << "unique" << true << "key"
+ << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -367,11 +342,7 @@ public:
ASSERT_OK(createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "key"
- << BSON("x" << 1 << "y" << 1)
- << "v"
+ << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -384,11 +355,7 @@ public:
createIndex("unittest",
BSON("name"
<< "x"
- << "ns"
- << _ns
- << "key"
- << BSON("y" << 1 << "x" << 1)
- << "v"
+ << "ns" << _ns << "key" << BSON("y" << 1 << "x" << 1) << "v"
<< static_cast<int>(kIndexVersion))));
}
};
@@ -402,19 +369,11 @@ public:
ASSERT_OK(createIndex("unittests",
BSON("name"
<< "super"
- << "ns"
- << _ns
- << "unique"
- << 1
- << "sparse"
- << true
- << "expireAfterSeconds"
- << 3600
- << "key"
+ << "ns" << _ns << "unique" << 1 << "sparse" << true
+ << "expireAfterSeconds" << 3600 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -428,19 +387,11 @@ public:
createIndex("unittests",
BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "expireAfterSeconds"
- << 3600
- << "sparse"
- << true
- << "unique"
- << 1
- << "key"
+ << "ns" << _ns << "expireAfterSeconds" << 3600 << "sparse"
+ << true << "unique" << 1 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -452,19 +403,11 @@ public:
ASSERT_OK(createIndex("unittests",
BSON("name"
<< "super"
- << "ns"
- << _ns
- << "expireAfterSeconds"
- << 3600
- << "sparse"
- << true
- << "unique"
- << 1
- << "key"
+ << "ns" << _ns << "expireAfterSeconds" << 3600 << "sparse"
+ << true << "unique" << 1 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -478,44 +421,27 @@ public:
createIndex("unittest",
BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "unique"
- << false
- << "sparse"
- << true
- << "expireAfterSeconds"
- << 3600
- << "key"
+ << "ns" << _ns << "unique" << false << "sparse" << true
+ << "expireAfterSeconds" << 3600 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
class SameSpecDifferentSparse : public ComplexIndex {
public:
void run() {
- ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
- createIndex("unittest",
- BSON("name"
- << "super2"
- << "ns"
- << _ns
- << "unique"
- << 1
- << "sparse"
- << false
- << "background"
- << true
- << "expireAfterSeconds"
- << 3600
- << "key"
- << BSON("superIdx"
- << "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ ASSERT_EQUALS(
+ ErrorCodes::IndexOptionsConflict,
+ createIndex("unittest",
+ BSON("name"
+ << "super2"
+ << "ns" << _ns << "unique" << 1 << "sparse" << false << "background"
+ << true << "expireAfterSeconds" << 3600 << "key"
+ << BSON("superIdx"
+ << "2d")
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -526,19 +452,11 @@ public:
createIndex("unittest",
BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "unique"
- << 1
- << "sparse"
- << true
- << "expireAfterSeconds"
- << 2400
- << "key"
+ << "ns" << _ns << "unique" << 1 << "sparse" << true
+ << "expireAfterSeconds" << 2400 << "key"
<< BSON("superIdx"
<< "2d")
- << "v"
- << static_cast<int>(kIndexVersion))));
+ << "v" << static_cast<int>(kIndexVersion))));
}
};
@@ -585,14 +503,8 @@ protected:
BSONObj _createSpec(T storageEngineValue) {
return BSON("name"
<< "super2"
- << "ns"
- << _ns
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "storageEngine"
- << storageEngineValue);
+ << "ns" << _ns << "key" << BSON("a" << 1) << "v"
+ << static_cast<int>(kIndexVersion) << "storageEngine" << storageEngineValue);
}
};
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
index aa4db803b11..56a58e6e852 100644
--- a/src/mongo/dbtests/jsobjtests.cpp
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -1163,18 +1163,13 @@ class LabelShares : public LabelBase {
BSONObj expected() {
return BSON("z"
<< "q"
- << "a"
- << (BSON("$gt" << 1))
- << "x"
+ << "a" << (BSON("$gt" << 1)) << "x"
<< "p");
}
BSONObj actual() {
return BSON("z"
<< "q"
- << "a"
- << GT
- << 1
- << "x"
+ << "a" << GT << 1 << "x"
<< "p");
}
};
@@ -1202,11 +1197,7 @@ class LabelDoubleShares : public LabelBase {
BSONObj actual() {
return BSON("z"
<< "q"
- << "a"
- << GT
- << 1
- << LTE
- << "x"
+ << "a" << GT << 1 << LTE << "x"
<< "x"
<< "p");
}
@@ -1231,27 +1222,15 @@ class LabelMulti : public LabelBase {
<< "b"
<< BSON("$ne" << 1 << "$ne"
<< "f"
- << "$ne"
- << 22.3)
+ << "$ne" << 22.3)
<< "x"
<< "p");
}
BSONObj actual() {
return BSON("z"
<< "q"
- << "a"
- << GT
- << 1
- << LTE
- << "x"
- << "b"
- << NE
- << 1
- << NE
- << "f"
- << NE
- << 22.3
- << "x"
+ << "a" << GT << 1 << LTE << "x"
+ << "b" << NE << 1 << NE << "f" << NE << 22.3 << "x"
<< "p");
}
};
@@ -1261,8 +1240,7 @@ class LabelishOr : public LabelBase {
<< "x"))
<< BSON("b" << BSON("$ne" << 1 << "$ne"
<< "f"
- << "$ne"
- << 22.3))
+ << "$ne" << 22.3))
<< BSON("x"
<< "p")));
}
@@ -1614,9 +1592,7 @@ struct BSONArrayBuilderTest {
BSONObjBuilder objb;
BSONArrayBuilder arrb;
- auto fieldNameGenerator = [i = 0]() mutable {
- return std::to_string(i++);
- };
+ auto fieldNameGenerator = [i = 0]() mutable { return std::to_string(i++); };
objb << fieldNameGenerator() << 100;
arrb << 100;
@@ -1630,8 +1606,9 @@ struct BSONArrayBuilderTest {
objb << fieldNameGenerator() << string("World");
arrb << string("World");
- objb << fieldNameGenerator() << BSON("a" << 1 << "b"
- << "foo");
+ objb << fieldNameGenerator()
+ << BSON("a" << 1 << "b"
+ << "foo");
arrb << BSON("a" << 1 << "b"
<< "foo");
@@ -1685,14 +1662,13 @@ struct BSONArrayBuilderTest {
struct ArrayMacroTest {
void run() {
- BSONArray arr = BSON_ARRAY("hello" << 1 << BSON("foo" << BSON_ARRAY("bar"
- << "baz"
- << "qux")));
+ BSONArray arr = BSON_ARRAY("hello" << 1
+ << BSON("foo" << BSON_ARRAY("bar"
+ << "baz"
+ << "qux")));
BSONObj obj = BSON("0"
<< "hello"
- << "1"
- << 1
- << "2"
+ << "1" << 1 << "2"
<< BSON("foo" << BSON_ARRAY("bar"
<< "baz"
<< "qux")));
@@ -1799,38 +1775,26 @@ public:
// DBRef stuff -- json parser can't handle this yet
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1)));
+ << "$id" << 1)));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$db"
+ << "$id" << 1 << "$db"
<< "a")));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "stuff"
- << 1)));
+ << "$id" << 1 << "stuff" << 1)));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$db"
+ << "$id" << 1 << "$db"
<< "a"
- << "stuff"
- << 1)));
+ << "stuff" << 1)));
bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1)));
bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1 << "$db"
<< "a")));
bad(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$db"
- << 1)));
+ << "$id" << 1 << "$db" << 1)));
bad(BSON("a" << BSON("$ref"
<< "coll")));
bad(BSON("a" << BSON("$ref"
@@ -1842,10 +1806,7 @@ public:
<< "coll")));
bad(BSON("a" << BSON("$ref"
<< "coll"
- << "$id"
- << 1
- << "$hater"
- << 1)));
+ << "$id" << 1 << "$hater" << 1)));
}
};
diff --git a/src/mongo/dbtests/jsontests.cpp b/src/mongo/dbtests/jsontests.cpp
index c70457b73e8..69476f19b77 100644
--- a/src/mongo/dbtests/jsontests.cpp
+++ b/src/mongo/dbtests/jsontests.cpp
@@ -939,7 +939,8 @@ TEST(FromJsonTest, NumericTypes) {
double d;
};
const Val vals[] = {
- {123, kMaxS64, 3.14}, {-123, -kMaxS64, -3.14},
+ {123, kMaxS64, 3.14},
+ {-123, -kMaxS64, -3.14},
};
for (const Val& val : vals) {
const BSONObj obj =
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 051bc478dbc..44ccf7b6b5a 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -411,8 +411,7 @@ public:
<< "eliot"
<< "z"
<< "sara"
- << "zz"
- << BSONObj());
+ << "zz" << BSONObj());
s->setObject("blah", o, true);
BSONObj out;
@@ -1239,7 +1238,22 @@ class NovelNaN {
public:
void run() {
uint8_t bits[] = {
- 16, 0, 0, 0, 0x01, 'a', '\0', 0x61, 0x79, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
+ 16,
+ 0,
+ 0,
+ 0,
+ 0x01,
+ 'a',
+ '\0',
+ 0x61,
+ 0x79,
+ 0xfe,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0,
};
unique_ptr<Scope> s((getGlobalScriptEngine()->*scopeFactory)());
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
index 41ca3fb7a14..59abe29a460 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
@@ -182,4 +182,4 @@ void MockDBClientConnection::checkConnection() {
_remoteServerInstanceID = _remoteServer->getInstanceID();
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h
index aaa4968d58e..afe818fb4ae 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.h
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h
@@ -134,4 +134,4 @@ private:
uint64_t _sockCreationTime;
bool _autoReconnect;
};
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
index f1253cc7fd9..6488e5023ff 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
@@ -239,4 +239,4 @@ void MockRemoteDBServer::checkIfUp(InstanceID id) const {
throwSocketError(mongo::SocketErrorKind::CLOSED, _hostAndPort);
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_replica_set.cpp b/src/mongo/dbtests/mock/mock_replica_set.cpp
index fb17ad1a960..a028f8a0530 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.cpp
+++ b/src/mongo/dbtests/mock/mock_replica_set.cpp
@@ -350,4 +350,4 @@ void MockReplicaSet::mockReplSetGetStatusCmd() {
node->setCommandReply("replSetGetStatus", fullStatBuilder.done());
}
}
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock/mock_replica_set.h b/src/mongo/dbtests/mock/mock_replica_set.h
index a2f442d8beb..01929b0e203 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.h
+++ b/src/mongo/dbtests/mock/mock_replica_set.h
@@ -150,4 +150,4 @@ private:
std::string _primaryHost;
};
-}
+} // namespace mongo
diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
index 555f982002b..236b80d45d4 100644
--- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp
+++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
@@ -414,16 +414,10 @@ TEST(MockDBClientConnTest, CyclingCmd) {
vector<BSONObj> isMasterSequence;
isMasterSequence.push_back(BSON("set"
<< "a"
- << "isMaster"
- << true
- << "ok"
- << 1));
+ << "isMaster" << true << "ok" << 1));
isMasterSequence.push_back(BSON("set"
<< "a"
- << "isMaster"
- << false
- << "ok"
- << 1));
+ << "isMaster" << false << "ok" << 1));
server.setCommandReply("isMaster", isMasterSequence);
}
@@ -630,4 +624,4 @@ TEST(MockDBClientConnTest, Delay) {
ASSERT_EQUALS(1U, server.getQueryCount());
ASSERT_EQUALS(1U, server.getCmdCount());
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/dbtests/mock_replica_set_test.cpp b/src/mongo/dbtests/mock_replica_set_test.cpp
index 18f3a93a444..398c6dfd85d 100644
--- a/src/mongo/dbtests/mock_replica_set_test.cpp
+++ b/src/mongo/dbtests/mock_replica_set_test.cpp
@@ -424,4 +424,4 @@ TEST(MockReplicaSetTest, KillMultipleNode) {
const string priHostName(replSet.getPrimary());
ASSERT(replSet.getNode(priHostName)->isRunning());
}
-}
+} // namespace mongo_test
diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp
index d12ce069a54..8f0759f69b1 100644
--- a/src/mongo/dbtests/multikey_paths_test.cpp
+++ b/src/mongo/dbtests/multikey_paths_test.cpp
@@ -96,8 +96,7 @@ public:
const bool match = (expectedMultikeyPaths == actualMultikeyPaths);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: "
- << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
}
ASSERT_TRUE(match);
}
@@ -143,11 +142,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreation) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -177,11 +172,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreationWithMultipleDocuments) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -197,11 +188,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -239,11 +226,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -292,11 +275,7 @@ TEST_F(MultikeyPathsTest, PathsNotUpdatedOnDocumentDelete) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPattern
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPattern << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -337,11 +316,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPatternAB
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPatternAB << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
@@ -349,11 +324,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_c_1"
- << "ns"
- << _nss.ns()
- << "key"
- << keyPatternAC
- << "v"
+ << "ns" << _nss.ns() << "key" << keyPatternAC << "v"
<< static_cast<int>(kIndexVersion)))
.transitional_ignore();
{
diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
index 48e9a7adfb1..61eedeab72f 100644
--- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp
+++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp
@@ -354,8 +354,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnCollectionRenameWithinDatabas
ASSERT_TRUE(_client.runCommand("admin",
BSON("renameCollection" << nss.ns() << "to"
<< "unittests.new_collection_name"
- << "dropTarget"
- << true),
+ << "dropTarget" << true),
info));
ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
@@ -381,8 +380,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesOnCollectionRenameWithinDatabase)
ASSERT_TRUE(_client.runCommand("admin",
BSON("renameCollection" << nss.ns() << "to"
<< "unittests.new_collection_name"
- << "dropTarget"
- << true),
+ << "dropTarget" << true),
info));
ASSERT_THROWS_CODE(exec->restoreState(), DBException, ErrorCodes::QueryPlanKilled);
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index a9fe6623848..1433e8a2558 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -715,4 +715,4 @@ public:
SuiteInstance<All> planRankingAll;
-} // namespace PlanRankingTest
+} // namespace PlanRankingTests
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 49f6d6429cc..29677d86e8c 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -65,7 +65,7 @@ std::unique_ptr<CanonicalQuery> canonicalQueryFromFilterObj(OperationContext* op
uassertStatusOK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
-}
+} // namespace
class QueryStageCachedPlan : public unittest::Test {
public:
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index 003beb748d9..87e66a58918 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -75,10 +75,7 @@ public:
->createIndexOnEmptyCollection(&_opCtx,
BSON("key" << BSON("x" << 1) << "name"
<< "x_1"
- << "ns"
- << ns()
- << "v"
- << 1))
+ << "ns" << ns() << "v" << 1))
.status_with_transitional_ignore();
for (int i = 0; i < kDocuments; i++) {
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index d38c26eb009..3217ebd979b 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -62,8 +62,7 @@ public:
ASSERT_OK(_coll->getIndexCatalog()->createIndexOnEmptyCollection(
&_opCtx,
BSON("ns" << ns() << "key" << BSON("x" << 1) << "name"
- << DBClientBase::genIndexName(BSON("x" << 1))
- << "v"
+ << DBClientBase::genIndexName(BSON("x" << 1)) << "v"
<< static_cast<int>(kIndexVersion))));
wunit.commit();
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 5656174f7b3..e4e564aef78 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -54,10 +54,10 @@
namespace QueryStageMergeSortTests {
+using std::make_unique;
using std::set;
using std::string;
using std::unique_ptr;
-using std::make_unique;
class QueryStageMergeSortTestBase {
public:
@@ -884,4 +884,4 @@ public:
SuiteInstance<All> queryStageMergeSortTest;
-} // namespace
+} // namespace QueryStageMergeSortTests
diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp
index df400c3bd1f..40f5ba3a5c5 100644
--- a/src/mongo/dbtests/query_stage_near.cpp
+++ b/src/mongo/dbtests/query_stage_near.cpp
@@ -234,4 +234,4 @@ TEST_F(QueryStageNearTest, EmptyResults) {
ASSERT_EQUALS(results.size(), 3u);
assertAscendingAndValid(results);
}
-}
+} // namespace
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 1982273bc79..5b855933793 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -598,4 +598,4 @@ public:
SuiteInstance<All> queryStageSortTest;
-} // namespace
+} // namespace QueryStageSortTests
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index c369ce03b7f..be533ba7142 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -120,8 +120,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanGeo2dOr) {
dbtests::WriteContextForTests ctx(opCtx(), nss.ns());
addIndex(BSON("a"
<< "2d"
- << "b"
- << 1));
+ << "b" << 1));
addIndex(BSON("a"
<< "2d"));
diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp
index ffa55f0040b..f9178555ce2 100644
--- a/src/mongo/dbtests/query_stage_tests.cpp
+++ b/src/mongo/dbtests/query_stage_tests.cpp
@@ -242,4 +242,4 @@ public:
SuiteInstance<All> queryStageTestsAll;
-} // namespace
+} // namespace QueryStageTests
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index adafcaadccd..c6c67923ae5 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -66,9 +66,9 @@
namespace QueryStageUpdate {
+using std::make_unique;
using std::unique_ptr;
using std::vector;
-using std::make_unique;
static const NamespaceString nss("unittests.QueryStageUpdate");
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 0114a3563d5..6e74f6c1571 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -57,9 +57,9 @@
namespace {
namespace QueryTests {
-using std::unique_ptr;
using std::endl;
using std::string;
+using std::unique_ptr;
using std::vector;
class Base {
@@ -234,8 +234,7 @@ public:
bool ok = cl.runCommand("unittests",
BSON("godinsert"
<< "querytests"
- << "obj"
- << BSONObj()),
+ << "obj" << BSONObj()),
info);
ASSERT(ok);
@@ -650,12 +649,7 @@ public:
_client.runCommand("unittests",
BSON("create"
<< "querytests.TailableQueryOnId"
- << "capped"
- << true
- << "size"
- << 8192
- << "autoIndexId"
- << true),
+ << "capped" << true << "size" << 8192 << "autoIndexId" << true),
info);
insertA(ns, 0);
insertA(ns, 1);
@@ -1537,12 +1531,7 @@ public:
_client.runCommand("local",
BSON("create"
<< "oplog.querytests.findingstart"
- << "capped"
- << true
- << "size"
- << 4096
- << "autoIndexId"
- << false),
+ << "capped" << true << "size" << 4096 << "autoIndexId" << false),
info);
// WiredTiger storage engines forbid dropping of the oplog. Evergreen reuses nodes for
// testing, so the oplog may already exist on the test node; in this case, trying to create
@@ -1611,12 +1600,7 @@ public:
_client.runCommand("local",
BSON("create"
<< "oplog.querytests.findingstart"
- << "capped"
- << true
- << "size"
- << 4096
- << "autoIndexId"
- << false),
+ << "capped" << true << "size" << 4096 << "autoIndexId" << false),
info);
// WiredTiger storage engines forbid dropping of the oplog. Evergreen reuses nodes for
// testing, so the oplog may already exist on the test node; in this case, trying to create
@@ -1690,12 +1674,7 @@ public:
_client.runCommand("local",
BSON("create"
<< "oplog.querytests.findingstart"
- << "capped"
- << true
- << "size"
- << 4096
- << "autoIndexId"
- << false),
+ << "capped" << true << "size" << 4096 << "autoIndexId" << false),
info);
// WiredTiger storage engines forbid dropping of the oplog. Evergreen reuses nodes for
// testing, so the oplog may already exist on the test node; in this case, trying to create
@@ -1792,10 +1771,7 @@ public:
ASSERT(_client.runCommand("unittests",
BSON("create"
<< "querytests.exhaust"
- << "capped"
- << true
- << "size"
- << 8192),
+ << "capped" << true << "size" << 8192),
info));
_client.insert(ns(), BSON("ts" << Timestamp(1000, 0)));
Message message;
diff --git a/src/mongo/dbtests/replica_set_monitor_test.cpp b/src/mongo/dbtests/replica_set_monitor_test.cpp
index 4477d727839..81472049c19 100644
--- a/src/mongo/dbtests/replica_set_monitor_test.cpp
+++ b/src/mongo/dbtests/replica_set_monitor_test.cpp
@@ -45,10 +45,10 @@ namespace mongo {
namespace {
using std::map;
-using std::vector;
using std::set;
using std::string;
using std::unique_ptr;
+using std::vector;
using unittest::assertGet;
MONGO_INITIALIZER(DisableReplicaSetMonitorRefreshRetries)(InitializerContext*) {
@@ -216,22 +216,24 @@ protected:
const string host(_replSet->getPrimary());
const mongo::repl::MemberConfig* member =
oldConfig.findMemberByHostAndPort(HostAndPort(host));
- membersBuilder.append(BSON(
- "_id" << member->getId().getData() << "host" << host << "tags" << BSON("dc"
- << "ny"
- << "num"
- << "1")));
+ membersBuilder.append(BSON("_id" << member->getId().getData() << "host" << host
+ << "tags"
+ << BSON("dc"
+ << "ny"
+ << "num"
+ << "1")));
}
{
const string host(_replSet->getSecondaries().front());
const mongo::repl::MemberConfig* member =
oldConfig.findMemberByHostAndPort(HostAndPort(host));
- membersBuilder.append(BSON(
- "_id" << member->getId().getData() << "host" << host << "tags" << BSON("dc"
- << "ny"
- << "num"
- << "2")));
+ membersBuilder.append(BSON("_id" << member->getId().getData() << "host" << host
+ << "tags"
+ << BSON("dc"
+ << "ny"
+ << "num"
+ << "2")));
}
membersBuilder.done();
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index cc19591c47e..8a7edd7d7b2 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -55,10 +55,10 @@ using namespace mongo::repl;
namespace ReplTests {
-using std::unique_ptr;
using std::endl;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
/**
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index 4dfcfee6c66..317be739e89 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -42,10 +42,10 @@
#include "mongo/dbtests/dbtests.h"
#include "mongo/unittest/unittest.h"
-using std::unique_ptr;
+using mongo::unittest::assertGet;
using std::list;
using std::string;
-using mongo::unittest::assertGet;
+using std::unique_ptr;
namespace RollbackTests {
diff --git a/src/mongo/dbtests/storage_timestamp_tests.cpp b/src/mongo/dbtests/storage_timestamp_tests.cpp
index 4a0d996cd55..a42955f4646 100644
--- a/src/mongo/dbtests/storage_timestamp_tests.cpp
+++ b/src/mongo/dbtests/storage_timestamp_tests.cpp
@@ -124,7 +124,7 @@ public:
private:
OperationContext* _opCtx;
};
-}
+} // namespace
const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2;
@@ -258,12 +258,12 @@ public:
BSONObj indexInfoObj;
{
- auto swIndexInfoObj = indexer.init(
- _opCtx,
- coll,
- {BSON("v" << 2 << "name" << indexName << "ns" << coll->ns().ns() << "key"
- << indexKey)},
- MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, coll));
+ auto swIndexInfoObj =
+ indexer.init(_opCtx,
+ coll,
+ {BSON("v" << 2 << "name" << indexName << "ns" << coll->ns().ns()
+ << "key" << indexKey)},
+ MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, coll));
ASSERT_OK(swIndexInfoObj.getStatus());
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
}
@@ -389,11 +389,11 @@ public:
const BSONObj& expectedDoc) {
OneOffRead oor(_opCtx, ts);
if (expectedDoc.isEmpty()) {
- ASSERT_EQ(0, itCount(coll)) << "Should not find any documents in " << coll->ns()
- << " at ts: " << ts;
+ ASSERT_EQ(0, itCount(coll))
+ << "Should not find any documents in " << coll->ns() << " at ts: " << ts;
} else {
- ASSERT_EQ(1, itCount(coll)) << "Should find one document in " << coll->ns()
- << " at ts: " << ts;
+ ASSERT_EQ(1, itCount(coll))
+ << "Should find one document in " << coll->ns() << " at ts: " << ts;
auto doc = findOne(coll);
ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, expectedDoc))
<< "Doc: " << doc.toString() << " Expected: " << expectedDoc.toString();
@@ -670,8 +670,7 @@ public:
const bool match = (expectedMultikeyPaths == actualMultikeyPaths);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: "
- << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
}
ASSERT_TRUE(match);
}
@@ -706,23 +705,16 @@ public:
nss.db().toString(),
BSON("applyOps" << BSON_ARRAY(
BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL
- << "v"
- << 2
- << "op"
+ << "v" << 2 << "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << idx))
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << idx))
<< BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL
<< "op"
<< "c"
<< "ns"
<< "test.$cmd"
- << "o"
- << BSON("applyOps" << BSONArrayBuilder().obj())))),
+ << "o" << BSON("applyOps" << BSONArrayBuilder().obj())))),
repl::OplogApplication::Mode::kApplyOpsCmd,
&result));
}
@@ -824,20 +816,14 @@ public:
// Delete all documents one at a time.
const LogicalTime startDeleteTime = _clock->reserveTicks(docsToInsert);
for (std::int32_t num = 0; num < docsToInsert; ++num) {
- ASSERT_OK(
- doNonAtomicApplyOps(
- nss.db().toString(),
- {BSON("ts" << startDeleteTime.addTicks(num).asTimestamp() << "t" << 0LL << "v"
- << 2
- << "op"
- << "d"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << num))})
- .getStatus());
+ ASSERT_OK(doNonAtomicApplyOps(
+ nss.db().toString(),
+ {BSON("ts" << startDeleteTime.addTicks(num).asTimestamp() << "t" << 0LL
+ << "v" << 2 << "op"
+ << "d"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << num))})
+ .getStatus());
}
for (std::int32_t num = 0; num <= docsToInsert; ++num) {
@@ -889,22 +875,14 @@ public:
const LogicalTime firstUpdateTime = _clock->reserveTicks(updates.size());
for (std::size_t idx = 0; idx < updates.size(); ++idx) {
- ASSERT_OK(
- doNonAtomicApplyOps(
- nss.db().toString(),
- {BSON("ts" << firstUpdateTime.addTicks(idx).asTimestamp() << "t" << 0LL << "v"
- << 2
- << "op"
- << "u"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o2"
- << BSON("_id" << 0)
- << "o"
- << updates[idx].first)})
- .getStatus());
+ ASSERT_OK(doNonAtomicApplyOps(
+ nss.db().toString(),
+ {BSON("ts" << firstUpdateTime.addTicks(idx).asTimestamp() << "t" << 0LL
+ << "v" << 2 << "op"
+ << "u"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o2" << BSON("_id" << 0) << "o" << updates[idx].first)})
+ .getStatus());
}
for (std::size_t idx = 0; idx < updates.size(); ++idx) {
@@ -941,19 +919,11 @@ public:
nss.db().toString(),
{BSON("ts" << insertTime.asTimestamp() << "t" << 1LL << "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid() << "o"
<< BSON("_id" << 0 << "field" << 0)),
BSON("ts" << insertTime.addTicks(1).asTimestamp() << "t" << 1LL << "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid() << "o"
<< BSON("_id" << 0))}));
ASSERT_EQ(2, result.getIntField("applied"));
@@ -992,23 +962,16 @@ public:
// Reserve a timestamp before the inserts should happen.
const LogicalTime preInsertTimestamp = _clock->reserveTicks(1);
- auto swResult = doAtomicApplyOps(nss.db().toString(),
- {BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << 0)),
- BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << 1))});
+ auto swResult =
+ doAtomicApplyOps(nss.db().toString(),
+ {BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << 0)),
+ BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << 1))});
ASSERT_OK(swResult);
ASSERT_EQ(2, swResult.getValue().getIntField("applied"));
@@ -1051,23 +1014,16 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
const LogicalTime preInsertTimestamp = _clock->reserveTicks(1);
- auto swResult = doAtomicApplyOps(nss.db().toString(),
- {BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << 0 << "field" << 0)),
- BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << BSON("_id" << 0))});
+ auto swResult =
+ doAtomicApplyOps(nss.db().toString(),
+ {BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << 0 << "field" << 0)),
+ BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << BSON("_id" << 0))});
ASSERT_OK(swResult);
ASSERT_EQ(2, swResult.getValue().getIntField("applied"));
@@ -1105,17 +1061,14 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(nss.db().toString(),
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll())),
- });
+ auto swResult = doNonAtomicApplyOps(
+ nss.db().toString(),
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns() << "o"
+ << BSON("create" << nss.coll())),
+ });
ASSERT_OK(swResult);
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
@@ -1146,25 +1099,18 @@ public:
const Timestamp dummyTs = dummyLt.asTimestamp();
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(dbName,
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss1.getCommandNS().ns()
- << "o"
- << BSON("create" << nss1.coll())),
- BSON("ts" << futureTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss2.getCommandNS().ns()
- << "o"
- << BSON("create" << nss2.coll())),
- });
+ auto swResult = doNonAtomicApplyOps(
+ dbName,
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss1.getCommandNS().ns() << "o"
+ << BSON("create" << nss1.coll())),
+ BSON("ts" << futureTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss2.getCommandNS().ns() << "o"
+ << BSON("create" << nss2.coll())),
+ });
ASSERT_OK(swResult);
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss1).getCollection()); }
@@ -1212,33 +1158,21 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss2).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(dbName,
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "i"
- << "ns"
- << nss1.ns()
- << "ui"
- << autoColl.getCollection()->uuid()
- << "o"
- << doc1),
- BSON("ts" << futureTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << uuid2
- << "ns"
- << nss2.getCommandNS().ns()
- << "o"
- << BSON("create" << nss2.coll())),
- BSON("ts" << insert2Ts << "t" << 1LL << "op"
- << "i"
- << "ns"
- << nss2.ns()
- << "ui"
- << uuid2
- << "o"
- << doc2),
- });
+ auto swResult = doNonAtomicApplyOps(
+ dbName,
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "i"
+ << "ns" << nss1.ns() << "ui" << autoColl.getCollection()->uuid()
+ << "o" << doc1),
+ BSON("ts" << futureTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << uuid2 << "ns" << nss2.getCommandNS().ns() << "o"
+ << BSON("create" << nss2.coll())),
+ BSON("ts" << insert2Ts << "t" << 1LL << "op"
+ << "i"
+ << "ns" << nss2.ns() << "ui" << uuid2 << "o" << doc2),
+ });
ASSERT_OK(swResult);
}
@@ -1283,17 +1217,14 @@ public:
{ ASSERT_FALSE(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
BSONObjBuilder resultBuilder;
- auto swResult = doNonAtomicApplyOps(nss.db().toString(),
- {
- BSON("ts" << presentTs << "t" << 1LL << "op"
- << "c"
- << "ui"
- << UUID::gen()
- << "ns"
- << nss.getCommandNS().ns()
- << "o"
- << BSON("create" << nss.coll())),
- });
+ auto swResult = doNonAtomicApplyOps(
+ nss.db().toString(),
+ {
+ BSON("ts" << presentTs << "t" << 1LL << "op"
+ << "c"
+ << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns() << "o"
+ << BSON("create" << nss.coll())),
+ });
ASSERT_OK(swResult);
{ ASSERT(AutoGetCollectionForReadCommand(_opCtx, nss).getCollection()); }
@@ -1331,9 +1262,8 @@ public:
uuid = autoColl.getCollection()->uuid();
}
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
_coordinatorMock->alwaysAllowWrites(false);
@@ -1349,30 +1279,15 @@ public:
auto op0 = repl::OplogEntry(BSON("ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc0));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc0));
auto op1 = repl::OplogEntry(BSON("ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc1));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc1));
auto op2 = repl::OplogEntry(BSON("ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc2));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc2));
std::vector<repl::OplogEntry> ops = {op0, op1, op2};
DoNothingOplogApplierObserver observer;
@@ -1416,9 +1331,8 @@ public:
uuid = autoColl.getCollection()->uuid();
}
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
_coordinatorMock->alwaysAllowWrites(false);
@@ -1436,45 +1350,23 @@ public:
auto op0 = repl::OplogEntry(BSON("ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc0));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc0));
auto op1 = repl::OplogEntry(BSON("ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc1));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc1));
auto op2 = repl::OplogEntry(BSON("ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2
<< "op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << uuid
- << "o"
- << doc2));
+ << "ns" << nss.ns() << "ui" << uuid << "o" << doc2));
auto indexSpec2 = BSON("createIndexes" << nss.coll() << "ns" << nss.ns() << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << BSON("b" << 1)
- << "name"
+ << static_cast<int>(kIndexVersion) << "key"
+ << BSON("b" << 1) << "name"
<< "b_1");
auto createIndexOp = repl::OplogEntry(
BSON("ts" << indexBuildTime.asTimestamp() << "t" << 1LL << "v" << 2 << "op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "ui"
- << uuid
- << "o"
- << indexSpec2));
+ << "ns" << nss.getCommandNS().ns() << "ui" << uuid << "o" << indexSpec2));
// We add in an index creation op to test that we restart tracking multikey path info
// after bulk index builds.
@@ -1535,9 +1427,8 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
const LogicalTime pastTime = _clock->reserveTicks(1);
@@ -1565,9 +1456,8 @@ public:
AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX);
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
ASSERT_OK(dbtests::createIndexFromSpec(_opCtx, nss.ns(), indexSpec));
const LogicalTime pastTime = _clock->reserveTicks(1);
@@ -1598,9 +1488,8 @@ public:
reset(nss);
auto indexName = "a_1";
- auto indexSpec =
- BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion));
+ auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion));
auto doc = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2));
{
@@ -1990,10 +1879,7 @@ public:
autoColl.getCollection(),
{BSON("v" << 2 << "unique" << true << "name"
<< "a_1"
- << "ns"
- << nss.ns()
- << "key"
- << BSON("a" << 1))},
+ << "ns" << nss.ns() << "key" << BSON("a" << 1))},
MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, autoColl.getCollection()));
ASSERT_OK(swIndexInfoObj.getStatus());
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
@@ -2101,10 +1987,7 @@ public:
autoColl.getCollection(),
{BSON("v" << 2 << "unique" << true << "name"
<< "a_1"
- << "ns"
- << nss.ns()
- << "key"
- << BSON("a" << 1))},
+ << "ns" << nss.ns() << "key" << BSON("a" << 1))},
MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, autoColl.getCollection()));
ASSERT_OK(swIndexInfoObj.getStatus());
indexInfoObj = std::move(swIndexInfoObj.getValue()[0]);
@@ -2259,8 +2142,7 @@ public:
const Timestamp indexAComplete = queryOplog(BSON("op"
<< "c"
- << "o.createIndexes"
- << nss.coll()
+ << "o.createIndexes" << nss.coll()
<< "o.name"
<< "a_1"))["ts"]
.timestamp();
@@ -2350,9 +2232,9 @@ public:
BSON("renameCollection" << nss.ns() << "to" << renamedNss.ns() << "dropTarget" << true),
renameResult);
- const auto createIndexesDocument = queryOplog(BSON("ns" << renamedNss.db() + ".$cmd"
- << "o.createIndexes"
- << BSON("$exists" << true)));
+ const auto createIndexesDocument =
+ queryOplog(BSON("ns" << renamedNss.db() + ".$cmd"
+ << "o.createIndexes" << BSON("$exists" << true)));
// Find index creation timestamps.
const auto createIndexesString =
@@ -2365,15 +2247,13 @@ public:
const Timestamp indexCreateInitTs = queryOplog(BSON("op"
<< "c"
- << "o.create"
- << tmpName.coll()))["ts"]
+ << "o.create" << tmpName.coll()))["ts"]
.timestamp();
const Timestamp indexAComplete = createIndexesDocument["ts"].timestamp();
const Timestamp indexBComplete = queryOplog(BSON("op"
<< "c"
- << "o.createIndexes"
- << tmpName.coll()
+ << "o.createIndexes" << tmpName.coll()
<< "o.name"
<< "b_1"))["ts"]
.timestamp();
@@ -2552,14 +2432,10 @@ public:
// Make a simple insert operation.
BSONObj doc0 = BSON("_id" << 0 << "a" << 0);
- auto insertOp = repl::OplogEntry(BSON("ts" << futureTs << "t" << 1LL << "v" << 2 << "op"
- << "i"
- << "ns"
- << ns.ns()
- << "ui"
- << uuid
- << "o"
- << doc0));
+ auto insertOp =
+ repl::OplogEntry(BSON("ts" << futureTs << "t" << 1LL << "v" << 2 << "op"
+ << "i"
+ << "ns" << ns.ns() << "ui" << uuid << "o" << doc0));
// Apply the operation.
auto storageInterface = repl::StorageInterface::get(_opCtx);
@@ -2642,20 +2518,14 @@ public:
}
auto indexSpec = BSON("createIndexes" << nss.coll() << "ns" << nss.ns() << "v"
- << static_cast<int>(kIndexVersion)
- << "key"
- << BSON("field" << 1)
- << "name"
+ << static_cast<int>(kIndexVersion) << "key"
+ << BSON("field" << 1) << "name"
<< "field_1");
auto createIndexOp = BSON("ts" << startBuildTs << "t" << 1LL << "v" << 2 << "op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "ui"
- << collUUID
- << "o"
- << indexSpec);
+ << "ns" << nss.getCommandNS().ns() << "ui" << collUUID
+ << "o" << indexSpec);
ASSERT_OK(doAtomicApplyOps(nss.db().toString(), {createIndexOp}));
@@ -2690,21 +2560,17 @@ public:
ASSERT_OK(createCollection(_opCtx,
viewNss.db().toString(),
BSON("create" << viewNss.coll() << "pipeline" << BSONArray()
- << "viewOn"
- << backingCollNss.coll())));
+ << "viewOn" << backingCollNss.coll())));
const Timestamp systemViewsCreateTs = queryOplog(BSON("op"
<< "c"
- << "ns"
- << (viewNss.db() + ".$cmd")
+ << "ns" << (viewNss.db() + ".$cmd")
<< "o.create"
<< "system.views"))["ts"]
.timestamp();
const Timestamp viewCreateTs = queryOplog(BSON("op"
<< "i"
- << "ns"
- << systemViewsNss.ns()
- << "o._id"
+ << "ns" << systemViewsNss.ns() << "o._id"
<< viewNss.ns()))["ts"]
.timestamp();
@@ -2721,11 +2587,11 @@ public:
AutoGetCollection autoColl(_opCtx, systemViewsNss, LockMode::MODE_IS);
assertDocumentAtTimestamp(autoColl.getCollection(), systemViewsCreateTs, BSONObj());
- assertDocumentAtTimestamp(
- autoColl.getCollection(),
- viewCreateTs,
- BSON("_id" << viewNss.ns() << "viewOn" << backingCollNss.coll() << "pipeline"
- << BSONArray()));
+ assertDocumentAtTimestamp(autoColl.getCollection(),
+ viewCreateTs,
+ BSON("_id" << viewNss.ns() << "viewOn"
+ << backingCollNss.coll() << "pipeline"
+ << BSONArray()));
}
}
};
@@ -2752,9 +2618,7 @@ public:
BSONObj result = queryOplog(BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o.create"
+ << "ns" << nss.getCommandNS().ns() << "o.create"
<< nss.coll()));
repl::OplogEntry op(result);
// The logOp() call for createCollection should have timestamp 'futureTs', which will also
@@ -2770,9 +2634,7 @@ public:
result = queryOplog(BSON("op"
<< "c"
- << "ns"
- << nss.getCommandNS().ns()
- << "o.createIndexes"
+ << "ns" << nss.getCommandNS().ns() << "o.createIndexes"
<< nss.coll()));
repl::OplogEntry indexOp(result);
ASSERT_EQ(indexOp.getObject()["name"].str(), "user_1_db_1");
@@ -2976,17 +2838,13 @@ public:
assertFilteredDocumentAtTimestamp(coll, query2, nullTs, doc2);
// Implicit commit oplog entry should exist at commitEntryTs.
- const auto commitFilter = BSON(
- "ts" << commitEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid()
- << "o"
- << doc2))
- << "count"
- << 2));
+ const auto commitFilter =
+ BSON("ts" << commitEntryTs << "o"
+ << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid() << "o" << doc2))
+ << "count" << 2));
assertOplogDocumentExistsAtTimestamp(commitFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(commitFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(commitFilter, firstOplogEntryTs, false);
@@ -3006,14 +2864,9 @@ public:
BSON("ts" << firstOplogEntryTs << "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid()
- << "o"
- << doc))
- << "partialTxn"
- << true));
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid() << "o" << doc))
+ << "partialTxn" << true));
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, firstOplogEntryTs, true);
@@ -3185,14 +3038,9 @@ public:
BSON("ts" << firstOplogEntryTs << "o"
<< BSON("applyOps" << BSON_ARRAY(BSON("op"
<< "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid()
- << "o"
- << doc))
- << "partialTxn"
- << true));
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid() << "o" << doc))
+ << "partialTxn" << true));
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, firstOplogEntryTs, true);
@@ -3200,19 +3048,13 @@ public:
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, commitEntryTs, true);
assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, nullTs, true);
// The prepare oplog entry should exist at prepareEntryTs and onwards.
- const auto prepareOplogEntryFilter = BSON(
- "ts" << prepareEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << coll->uuid()
- << "o"
- << doc2))
- << "prepare"
- << true
- << "count"
- << 2));
+ const auto prepareOplogEntryFilter =
+ BSON("ts" << prepareEntryTs << "o"
+ << BSON("applyOps" << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui"
+ << coll->uuid() << "o" << doc2))
+ << "prepare" << true << "count" << 2));
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, firstOplogEntryTs, false);
@@ -3325,17 +3167,13 @@ public:
}
// The prepare oplog entry should exist at firstOplogEntryTs and onwards.
- const auto prepareOplogEntryFilter =
- BSON("ts" << prepareEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op"
- << "i"
- << "ns"
- << nss.ns()
- << "ui"
- << ui
- << "o"
- << doc))
- << "prepare"
- << true));
+ const auto prepareOplogEntryFilter = BSON(
+ "ts" << prepareEntryTs << "o"
+ << BSON("applyOps"
+ << BSON_ARRAY(BSON("op"
+ << "i"
+ << "ns" << nss.ns() << "ui" << ui << "o" << doc))
+ << "prepare" << true));
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, presentTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, beforeTxnTs, false);
assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, prepareEntryTs, true);
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 1dd468576eb..92d741bbb92 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -48,10 +48,10 @@
namespace ThreadedTests {
-using std::unique_ptr;
using std::cout;
using std::endl;
using std::string;
+using std::unique_ptr;
template <int nthreads_param = 10>
class ThreadedTest {
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
index 106960fb6c2..62bed088466 100644
--- a/src/mongo/dbtests/updatetests.cpp
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -47,10 +47,10 @@
namespace UpdateTests {
-using std::unique_ptr;
using std::numeric_limits;
using std::string;
using std::stringstream;
+using std::unique_ptr;
using std::vector;
namespace dps = ::mongo::dotted_path_support;
@@ -1665,8 +1665,8 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,x:[{a:1},{a:3}]}"));
// { $push : { x : { $each : [ {a:2} ], $sort: {a:1}, $slice:-2 } } }
- BSONObj pushObj = BSON(
- "$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$slice" << -2.0);
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
+ << "$slice" << -2.0);
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
BSONObj result = _client.findOne(ns(), Query());
@@ -1680,9 +1680,8 @@ public:
BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:3}]}");
_client.insert(ns(), expected);
// { $push : { x : { $each : [ {a:2} ], $sort : {a:1}, $sort: {a:1} } } }
- BSONObj pushObj =
- BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$sort"
- << BSON("a" << 1));
+ BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
+ << "$sort" << BSON("a" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
ASSERT_BSONOBJ_EQ(result, expected);
@@ -1763,9 +1762,7 @@ public:
ns(), BSON("_id" << 0 << "a" << 1 << "x" << BSONObj() << "x" << BSONObj() << "z" << 5));
_client.update(ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1)));
ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "a" << 1 << "x" << BSON("b" << 1 << "c" << 1) << "x"
- << BSONObj()
- << "z"
- << 5),
+ << BSONObj() << "z" << 5),
_client.findOne(ns(), BSONObj()));
}
};
@@ -1779,9 +1776,7 @@ public:
_client.update(
ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1 << "x.d" << 1)));
ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "x" << BSON("b" << 1 << "c" << 1 << "d" << 1) << "x"
- << BSONObj()
- << "x"
- << BSONObj()),
+ << BSONObj() << "x" << BSONObj()),
_client.findOne(ns(), BSONObj()));
}
};
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 925d0a88b9a..39f48384421 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -229,18 +229,14 @@ public:
wunit.commit();
}
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "a"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -305,18 +301,14 @@ public:
wunit.commit();
}
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "a"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "a"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -467,14 +459,10 @@ public:
coll->ns().ns(),
BSON("name"
<< "multikey_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a.b" << 1)
- << "v"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("a.b" << 1) << "v"
<< static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -541,20 +529,14 @@ public:
}
// Create a sparse index.
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "sparse_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "sparse"
- << true));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "sparse_index"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "sparse" << true));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -614,20 +596,15 @@ public:
}
// Create a partial index.
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1)
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "partialFilterExpression"
- << BSON("a" << BSON("$gt" << 1))));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "partialFilterExpression"
+ << BSON("a" << BSON("$gt" << 1))));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -680,38 +657,30 @@ public:
}
// Create a partial geo index that indexes the document. This should return an error.
- ASSERT_NOT_OK(dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("x"
- << "2dsphere")
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "partialFilterExpression"
- << BSON("a" << BSON("$eq" << 2)))));
+ ASSERT_NOT_OK(
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("x"
+ << "2dsphere")
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "partialFilterExpression"
+ << BSON("a" << BSON("$eq" << 2)))));
// Create a partial geo index that does not index the document.
- auto status = dbtests::createIndexFromSpec(&_opCtx,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("x"
- << "2dsphere")
- << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false
- << "partialFilterExpression"
- << BSON("a" << BSON("$eq" << 1))));
+ auto status =
+ dbtests::createIndexFromSpec(&_opCtx,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("x"
+ << "2dsphere")
+ << "v" << static_cast<int>(kIndexVersion)
+ << "background" << false << "partialFilterExpression"
+ << BSON("a" << BSON("$eq" << 1))));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
releaseDb();
@@ -766,28 +735,20 @@ public:
coll->ns().ns(),
BSON("name"
<< "compound_index_1"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << 1 << "b" << -1)
- << "v"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("a" << 1 << "b" << -1) << "v"
<< static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << "background" << false));
ASSERT_OK(status);
status = dbtests::createIndexFromSpec(&_opCtx,
coll->ns().ns(),
BSON("name"
<< "compound_index_2"
- << "ns"
- << coll->ns().ns()
- << "key"
- << BSON("a" << -1 << "b" << 1)
- << "v"
+ << "ns" << coll->ns().ns() << "key"
+ << BSON("a" << -1 << "b" << 1) << "v"
<< static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -846,9 +807,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -930,9 +889,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -979,9 +936,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert non-multikey documents.
@@ -1090,9 +1045,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents with indexed and not-indexed paths.
@@ -1183,9 +1136,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents.
@@ -1272,9 +1223,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents.
@@ -1380,9 +1329,7 @@ public:
&_opCtx,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << indexKey << "v"
- << static_cast<int>(kIndexVersion)
- << "background"
- << false));
+ << static_cast<int>(kIndexVersion) << "background" << false));
ASSERT_OK(status);
// Insert documents.