summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp18
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp22
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp5
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp2
-rw-r--r--src/mongo/db/auth/role_graph_test.cpp8
-rw-r--r--src/mongo/db/auth/sasl_commands.cpp2
-rw-r--r--src/mongo/db/auth/sasl_scramsha1_test.cpp57
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp6
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp8
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp2
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp4
-rw-r--r--src/mongo/db/catalog/collection_options_test.cpp2
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp10
-rw-r--r--src/mongo/db/catalog/database_impl.cpp5
-rw-r--r--src/mongo/db/catalog/database_test.cpp4
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp6
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp2
-rw-r--r--src/mongo/db/clientcursor.cpp4
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp2
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp6
-rw-r--r--src/mongo/db/commands/mr.cpp4
-rw-r--r--src/mongo/db/commands/parameters.cpp6
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp20
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp3
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp2
-rw-r--r--src/mongo/db/db.cpp4
-rw-r--r--src/mongo/db/exec/cached_plan.cpp2
-rw-r--r--src/mongo/db/exec/geo_near.cpp4
-rw-r--r--src/mongo/db/exec/multi_plan.cpp7
-rw-r--r--src/mongo/db/exec/projection_exec.cpp4
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp2
-rw-r--r--src/mongo/db/ftdc/compressor.cpp3
-rw-r--r--src/mongo/db/ftdc/file_manager.cpp2
-rw-r--r--src/mongo/db/ftdc/file_manager_test.cpp6
-rw-r--r--src/mongo/db/ftdc/file_writer.cpp2
-rw-r--r--src/mongo/db/ftdc/file_writer_test.cpp6
-rw-r--r--src/mongo/db/geo/r2_region_coverer_test.cpp12
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp2
-rw-r--r--src/mongo/db/introspect.cpp4
-rw-r--r--src/mongo/db/keys_collection_manager_test.cpp7
-rw-r--r--src/mongo/db/logical_clock_test.cpp3
-rw-r--r--src/mongo/db/logical_session_cache_test.cpp22
-rw-r--r--src/mongo/db/matcher/expression_algo.cpp4
-rw-r--r--src/mongo/db/matcher/expression_array.cpp2
-rw-r--r--src/mongo/db/matcher/expression_array.h6
-rw-r--r--src/mongo/db/matcher/expression_array_test.cpp14
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp4
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.h18
-rw-r--r--src/mongo/db/matcher/expression_leaf_test.cpp108
-rw-r--r--src/mongo/db/matcher/expression_parser_geo_test.cpp35
-rw-r--r--src/mongo/db/matcher/expression_test.cpp10
-rw-r--r--src/mongo/db/matcher/expression_tree.h2
-rw-r--r--src/mongo/db/matcher/path.cpp14
-rw-r--r--src/mongo/db/mongod_options.cpp16
-rw-r--r--src/mongo/db/ops/modifier_add_to_set.cpp2
-rw-r--r--src/mongo/db/ops/modifier_pull.cpp2
-rw-r--r--src/mongo/db/ops/modifier_pull_all.cpp2
-rw-r--r--src/mongo/db/ops/modifier_push.cpp2
-rw-r--r--src/mongo/db/ops/modifier_set_test.cpp6
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp5
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp4
-rw-r--r--src/mongo/db/prefetch.cpp4
-rw-r--r--src/mongo/db/query/canonical_query.cpp6
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp2
-rw-r--r--src/mongo/db/query/planner_analysis_test.cpp2
-rw-r--r--src/mongo/db/query/query_planner.cpp2
-rw-r--r--src/mongo/db/repl/bgsync.cpp4
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp2
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp6
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp3
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp3
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper_test.cpp2
-rw-r--r--src/mongo/db/repl/elect_cmd_runner_test.cpp2
-rw-r--r--src/mongo/db/repl/freshness_checker_test.cpp24
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp2
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp11
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp24
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect.cpp14
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp20
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp13
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp26
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp59
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp40
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp74
-rw-r--r--src/mongo/db/repl/reporter.cpp2
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp20
-rw-r--r--src/mongo/db/repl/scatter_gather_test.cpp16
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp32
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp4
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_test.cpp271
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp328
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp8
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp8
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp2
-rw-r--r--src/mongo/db/s/collection_range_deleter_test.cpp3
-rw-r--r--src/mongo/db/s/metadata_manager.cpp22
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp3
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp3
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp2
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp57
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp3
-rw-r--r--src/mongo/db/s/move_timing_helper.cpp12
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp3
-rw-r--r--src/mongo/db/server_options_helpers.cpp2
-rw-r--r--src/mongo/db/server_parameters_test.cpp10
-rw-r--r--src/mongo/db/service_entry_point_mongod.cpp18
-rw-r--r--src/mongo/db/service_liason_mock.cpp2
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp3
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp4
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp6
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp5
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp5
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp94
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp124
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_harness.cpp2
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp32
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp8
-rw-r--r--src/mongo/db/update/arithmetic_node_test.cpp10
-rw-r--r--src/mongo/db/update/path_support_test.cpp2
-rw-r--r--src/mongo/db/update/set_node_test.cpp8
-rw-r--r--src/mongo/db/update/unset_node_test.cpp2
-rw-r--r--src/mongo/db/update/update_array_node_test.cpp4
-rw-r--r--src/mongo/db/update/update_driver_test.cpp6
-rw-r--r--src/mongo/db/views/view_catalog_test.cpp3
134 files changed, 1164 insertions, 914 deletions
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index 53d8edb33da..697453cb764 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -338,7 +338,7 @@ Status AuthorizationManager::getBSONForPrivileges(const PrivilegeVector& privile
if (!ParsedPrivilege::privilegeToParsedPrivilege(*it, &privilege, &errmsg)) {
return Status(ErrorCodes::BadValue, errmsg);
}
- resultArray.appendObject("privileges", privilege.toBSON());
+ resultArray.appendObject("privileges", privilege.toBSON()).transitional_ignore();
}
return Status::OK();
}
@@ -352,14 +352,14 @@ Status AuthorizationManager::getBSONForRole(RoleGraph* graph,
<< "does not name an existing role");
}
std::string id = mongoutils::str::stream() << roleName.getDB() << "." << roleName.getRole();
- result.appendString("_id", id);
- result.appendString(ROLE_NAME_FIELD_NAME, roleName.getRole());
- result.appendString(ROLE_DB_FIELD_NAME, roleName.getDB());
+ result.appendString("_id", id).transitional_ignore();
+ result.appendString(ROLE_NAME_FIELD_NAME, roleName.getRole()).transitional_ignore();
+ result.appendString(ROLE_DB_FIELD_NAME, roleName.getDB()).transitional_ignore();
// Build privileges array
mutablebson::Element privilegesArrayElement =
result.getDocument().makeElementArray("privileges");
- result.pushBack(privilegesArrayElement);
+ result.pushBack(privilegesArrayElement).transitional_ignore();
const PrivilegeVector& privileges = graph->getDirectPrivileges(roleName);
Status status = getBSONForPrivileges(privileges, privilegesArrayElement);
if (!status.isOK()) {
@@ -368,14 +368,14 @@ Status AuthorizationManager::getBSONForRole(RoleGraph* graph,
// Build roles array
mutablebson::Element rolesArrayElement = result.getDocument().makeElementArray("roles");
- result.pushBack(rolesArrayElement);
+ result.pushBack(rolesArrayElement).transitional_ignore();
for (RoleNameIterator roles = graph->getDirectSubordinates(roleName); roles.more();
roles.next()) {
const RoleName& subRole = roles.get();
mutablebson::Element roleObj = result.getDocument().makeElementObject("");
- roleObj.appendString(ROLE_NAME_FIELD_NAME, subRole.getRole());
- roleObj.appendString(ROLE_DB_FIELD_NAME, subRole.getDB());
- rolesArrayElement.pushBack(roleObj);
+ roleObj.appendString(ROLE_NAME_FIELD_NAME, subRole.getRole()).transitional_ignore();
+ roleObj.appendString(ROLE_DB_FIELD_NAME, subRole.getDB()).transitional_ignore();
+ rolesArrayElement.pushBack(roleObj).transitional_ignore();
}
return Status::OK();
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index 71b7f00ee51..34ae2cfb8ed 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -432,8 +432,10 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
// Change the user to be read-only
int ignored;
- managerState->remove(
- &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ managerState
+ ->remove(
+ &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored)
+ .transitional_ignore();
ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
@@ -461,8 +463,10 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
ASSERT(user->isValid());
// Delete the user.
- managerState->remove(
- &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ managerState
+ ->remove(
+ &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored)
+ .transitional_ignore();
// Make sure that invalidating the user causes the session to reload its privileges.
authzManager->invalidateUserByName(user->getName());
authzSession->startRequest(&_opCtx); // Refreshes cached data for invalid users
@@ -502,8 +506,10 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
// Change the user to be read-only
int ignored;
managerState->setFindsShouldFail(true);
- managerState->remove(
- &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ managerState
+ ->remove(
+ &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored)
+ .transitional_ignore();
ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
@@ -726,7 +732,9 @@ TEST_F(AuthorizationSessionTest, AddPrivilegesForStageFailsIfOutNamespaceIsNotVa
<< ""));
BSONObj cmdObj = BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline);
ASSERT_THROWS_CODE(
- authzSession->checkAuthForAggregate(testFooNss, cmdObj, false), UserException, 17139);
+ authzSession->checkAuthForAggregate(testFooNss, cmdObj, false).transitional_ignore(),
+ UserException,
+ 17139);
}
TEST_F(AuthorizationSessionTest, CannotAggregateOutWithoutInsertAndRemoveOnTargetNamespace) {
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 9227e1d2dd2..29baf0cdf1c 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -345,8 +345,9 @@ Status AuthzManagerExternalStateLocal::_getRoleDescription_inlock(const RoleName
fassert(17323, resultDoc.root().pushBack(inheritedPrivilegesElement));
}
} else if (showPrivileges == PrivilegeFormat::kShowSeparate) {
- warningsElement.appendString(
- "", "Role graph state inconsistent; only direct privileges available.");
+ warningsElement
+ .appendString("", "Role graph state inconsistent; only direct privileges available.")
+ .transitional_ignore();
addPrivilegeObjectsOrWarningsToArrayElement(
privilegesElement, warningsElement, _roleGraph.getDirectPrivileges(roleName));
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index f4fef4fde08..933f0aff752 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -203,7 +203,7 @@ Status AuthzManagerExternalStateMock::updateOne(OperationContext* opCtx,
return Status::OK();
} else if (status == ErrorCodes::NoMatchingDocument && upsert) {
if (query.hasField("_id")) {
- document.root().appendElement(query["_id"]);
+ document.root().appendElement(query["_id"]).transitional_ignore();
}
status = driver.populateDocumentWithQueryFields(opCtx, query, NULL, document);
if (!status.isOK()) {
diff --git a/src/mongo/db/auth/role_graph_test.cpp b/src/mongo/db/auth/role_graph_test.cpp
index 94752e36f4b..c9024b22ea0 100644
--- a/src/mongo/db/auth/role_graph_test.cpp
+++ b/src/mongo/db/auth/role_graph_test.cpp
@@ -529,7 +529,7 @@ TEST(RoleGraphTest, CopySwap) {
// Make a copy of the graph to do further modifications on.
RoleGraph tempGraph(graph);
ASSERT_OK(tempGraph.addRoleToRole(roleB, roleC));
- tempGraph.recomputePrivilegeData();
+ tempGraph.recomputePrivilegeData().transitional_ignore();
// Now swap the copy back with the original graph and make sure the original was updated
// properly.
@@ -686,10 +686,10 @@ TEST(RoleGraphTest, BuiltinRolesOnlyOnAppropriateDatabases) {
TEST(RoleGraphTest, getRolesForDatabase) {
RoleGraph graph;
- graph.createRole(RoleName("myRole", "test"));
+ graph.createRole(RoleName("myRole", "test")).transitional_ignore();
// Make sure that a role on "test2" doesn't show up in the roles list for "test"
- graph.createRole(RoleName("anotherRole", "test2"));
- graph.createRole(RoleName("myAdminRole", "admin"));
+ graph.createRole(RoleName("anotherRole", "test2")).transitional_ignore();
+ graph.createRole(RoleName("myAdminRole", "admin")).transitional_ignore();
// Non-admin DB with no user-defined roles
RoleNameIterator it = graph.getRolesForDatabase("fakedb");
diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp
index e0fad877a25..33c74bca2dc 100644
--- a/src/mongo/db/auth/sasl_commands.cpp
+++ b/src/mongo/db/auth/sasl_commands.cpp
@@ -264,7 +264,7 @@ void CmdSaslStart::help(std::stringstream& os) const {
void CmdSaslStart::redactForLogging(mutablebson::Document* cmdObj) {
mutablebson::Element element = mutablebson::findFirstChildNamed(cmdObj->root(), "payload");
if (element.ok()) {
- element.setValueString("xxx");
+ element.setValueString("xxx").transitional_ignore();
}
}
diff --git a/src/mongo/db/auth/sasl_scramsha1_test.cpp b/src/mongo/db/auth/sasl_scramsha1_test.cpp
index c575d94ddc0..49d3d6a27c0 100644
--- a/src/mongo/db/auth/sasl_scramsha1_test.cpp
+++ b/src/mongo/db/auth/sasl_scramsha1_test.cpp
@@ -241,7 +241,8 @@ protected:
saslServerSession = stdx::make_unique<NativeSaslAuthenticationSession>(authzSession.get());
saslServerSession->setOpCtxt(opCtx.get());
- saslServerSession->start("test", "SCRAM-SHA-1", "mongodb", "MockServer.test", 1, false);
+ saslServerSession->start("test", "SCRAM-SHA-1", "mongodb", "MockServer.test", 1, false)
+ .transitional_ignore();
saslClientSession = stdx::make_unique<NativeSaslClientSession>();
saslClientSession->setParameter(NativeSaslClientSession::parameterMechanism, "SCRAM-SHA-1");
saslClientSession->setParameter(NativeSaslClientSession::parameterServiceName, "mongodb");
@@ -253,8 +254,10 @@ protected:
};
TEST_F(SCRAMSHA1Fixture, testServerStep1DoesNotIncludeNonceFromClientStep1) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -276,8 +279,10 @@ TEST_F(SCRAMSHA1Fixture, testServerStep1DoesNotIncludeNonceFromClientStep1) {
}
TEST_F(SCRAMSHA1Fixture, testClientStep2DoesNotIncludeNonceFromServerStep1) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -298,8 +303,10 @@ TEST_F(SCRAMSHA1Fixture, testClientStep2DoesNotIncludeNonceFromServerStep1) {
}
TEST_F(SCRAMSHA1Fixture, testClientStep2GivesBadProof) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -323,8 +330,10 @@ TEST_F(SCRAMSHA1Fixture, testClientStep2GivesBadProof) {
}
TEST_F(SCRAMSHA1Fixture, testServerStep2GivesBadVerifier) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -358,8 +367,10 @@ TEST_F(SCRAMSHA1Fixture, testServerStep2GivesBadVerifier) {
TEST_F(SCRAMSHA1Fixture, testSCRAM) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -371,8 +382,10 @@ TEST_F(SCRAMSHA1Fixture, testSCRAM) {
}
TEST_F(SCRAMSHA1Fixture, testNULLInPassword) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "saj\0ack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "saj\0ack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -385,8 +398,10 @@ TEST_F(SCRAMSHA1Fixture, testNULLInPassword) {
TEST_F(SCRAMSHA1Fixture, testCommasInUsernameAndPassword) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("s,a,jack", "s,a,jack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("s,a,jack", "s,a,jack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "s,a,jack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -410,8 +425,10 @@ TEST_F(SCRAMSHA1Fixture, testIncorrectUser) {
}
TEST_F(SCRAMSHA1Fixture, testIncorrectPassword) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -426,8 +443,10 @@ TEST_F(SCRAMSHA1Fixture, testIncorrectPassword) {
}
TEST_F(SCRAMSHA1Fixture, testMONGODBCR) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateMONGODBCRUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateMONGODBCRUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index d27d485a998..69bac1c04ff 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -214,8 +214,10 @@ mongo::Status mongo::cloneCollectionAsCapped(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
OpDebug* const nullOpDebug = nullptr;
- toCollection->insertDocument(
- opCtx, objToClone.value(), nullOpDebug, true, opCtx->writesAreReplicated());
+ toCollection
+ ->insertDocument(
+ opCtx, objToClone.value(), nullOpDebug, true, opCtx->writesAreReplicated())
+ .transitional_ignore();
wunit.commit();
// Go to the next document
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 741d5cfac86..b58b1ea96bf 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -173,7 +173,7 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
} else if (fieldName == "validationAction" && !isView) {
auto statusW = coll->parseValidationAction(e.String());
if (!statusW.isOK())
- statusW.getStatus();
+ return statusW.getStatus();
cmr.collValidationAction = e.String();
} else if (fieldName == "pipeline") {
@@ -371,15 +371,15 @@ mongo::Status mongo::collMod(OperationContext* opCtx,
// Validator
if (!cmr.collValidator.eoo())
- coll->setValidator(opCtx, cmr.collValidator.Obj());
+ coll->setValidator(opCtx, cmr.collValidator.Obj()).transitional_ignore();
// ValidationAction
if (!cmr.collValidationAction.empty())
- coll->setValidationAction(opCtx, cmr.collValidationAction);
+ coll->setValidationAction(opCtx, cmr.collValidationAction).transitional_ignore();
// ValidationLevel
if (!cmr.collValidationLevel.empty())
- coll->setValidationLevel(opCtx, cmr.collValidationLevel);
+ coll->setValidationLevel(opCtx, cmr.collValidationLevel).transitional_ignore();
// UsePowerof2Sizes
if (!cmr.usePowerOf2Sizes.eoo())
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index a4e1f40c603..fa94ca47f20 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -94,7 +94,7 @@ public:
}
virtual void inserted(const RecordData& recData, const RecordId& newLocation) {
- _multiIndexBlock->insert(recData.toBson(), newLocation);
+ _multiIndexBlock->insert(recData.toBson(), newLocation).transitional_ignore();
}
private:
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index d58b4ad933d..8aeb18796cb 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -193,7 +193,7 @@ CollectionImpl::CollectionImpl(Collection* _this_init,
void CollectionImpl::init(OperationContext* opCtx) {
_magic = kMagicNumber;
- _indexCatalog.init(opCtx);
+ _indexCatalog.init(opCtx).transitional_ignore();
if (isCapped())
_recordStore->setCappedCallback(this);
@@ -1236,7 +1236,7 @@ Status CollectionImpl::validate(OperationContext* opCtx,
IndexAccessMethod* iam = _indexCatalog.getIndex(descriptor);
ValidateResults curIndexResults;
int64_t numKeys;
- iam->validate(opCtx, &numKeys, &curIndexResults);
+ iam->validate(opCtx, &numKeys, &curIndexResults).transitional_ignore();
keysPerIndex.appendNumber(descriptor->indexNamespace(),
static_cast<long long>(numKeys));
diff --git a/src/mongo/db/catalog/collection_options_test.cpp b/src/mongo/db/catalog/collection_options_test.cpp
index 9a22dbd270b..3c000f50243 100644
--- a/src/mongo/db/catalog/collection_options_test.cpp
+++ b/src/mongo/db/catalog/collection_options_test.cpp
@@ -39,7 +39,7 @@ namespace mongo {
void checkRoundTrip(const CollectionOptions& options1) {
CollectionOptions options2;
- options2.parse(options1.toBSON());
+ options2.parse(options1.toBSON()).transitional_ignore();
ASSERT_BSONOBJ_EQ(options1.toBSON(), options2.toBSON());
}
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index a9a7573eccc..4649fca4aa6 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -198,7 +198,10 @@ void DatabaseHolderImpl::close(OperationContext* opCtx, StringData ns, const std
delete it->second;
_dbs.erase(it);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(opCtx, dbName.toString());
+ getGlobalServiceContext()
+ ->getGlobalStorageEngine()
+ ->closeDatabase(opCtx, dbName.toString())
+ .transitional_ignore();
}
bool DatabaseHolderImpl::closeAll(OperationContext* opCtx,
@@ -234,7 +237,10 @@ bool DatabaseHolderImpl::closeAll(OperationContext* opCtx,
_dbs.erase(name);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(opCtx, name);
+ getGlobalServiceContext()
+ ->getGlobalStorageEngine()
+ ->closeDatabase(opCtx, name)
+ .transitional_ignore();
bb.append(name);
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index a000ce266ba..aa4eda0939e 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -740,7 +740,10 @@ void DatabaseImpl::dropDatabase(OperationContext* opCtx, Database* db) {
db = NULL; // d is now deleted
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase(opCtx, name);
+ getGlobalServiceContext()
+ ->getGlobalStorageEngine()
+ ->dropDatabase(opCtx, name)
+ .transitional_ignore();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropDatabase", name);
}
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index 0a71b6defbe..468c0850ba2 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -314,7 +314,9 @@ void _testDropCollectionThrowsExceptionIfThereAreIndexesInProgress(OperationCont
ASSERT_GREATER_THAN(indexCatalog->numIndexesInProgress(opCtx), 0);
WriteUnitOfWork wuow(opCtx);
- ASSERT_THROWS_CODE(db->dropCollection(opCtx, nss.ns()), MsgAssertionException, 40461);
+ ASSERT_THROWS_CODE(db->dropCollection(opCtx, nss.ns()).transitional_ignore(),
+ MsgAssertionException,
+ 40461);
});
}
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 29183e04737..e9cfd3a4f0c 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -432,7 +432,7 @@ void IndexCatalogImpl::IndexBuildBlock::fail() {
invariant(entry == _entry);
if (entry) {
- IndexCatalogImpl::_dropIndex(_catalog, _opCtx, entry);
+ IndexCatalogImpl::_dropIndex(_catalog, _opCtx, entry).transitional_ignore();
} else {
IndexCatalog::_deleteIndexFromDisk(_catalog, _opCtx, _indexName, _indexNamespace);
}
@@ -890,7 +890,7 @@ void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx,
LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
IndexCatalogEntry* entry = _entries.find(desc);
invariant(entry);
- _dropIndex(opCtx, entry);
+ _dropIndex(opCtx, entry).transitional_ignore();
if (droppedIndexes != nullptr) {
droppedIndexes->emplace(desc->indexName(), desc->infoObj());
@@ -1403,7 +1403,7 @@ void IndexCatalogImpl::unindexRecord(OperationContext* opCtx,
// If it's a background index, we DO NOT want to log anything.
bool logIfError = entry->isReady(opCtx) ? !noWarn : false;
- _unindexRecord(opCtx, entry, obj, loc, logIfError, keysDeletedOut);
+ _unindexRecord(opCtx, entry, obj, loc, logIfError, keysDeletedOut).transitional_ignore();
}
}
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index ee7b2de8743..8fdf3551b7b 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -227,7 +227,7 @@ Status renameCollection(OperationContext* opCtx,
}
indexesToCopy.push_back(newIndex.obj());
}
- indexer.init(indexesToCopy);
+ indexer.init(indexesToCopy).status_with_transitional_ignore();
}
{
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 889d9961607..4f169b2fa1f 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -140,7 +140,9 @@ void ClientCursor::updateSlaveLocation(OperationContext* opCtx) {
if (!rid.isSet())
return;
- repl::getGlobalReplicationCoordinator()->setLastOptimeForSlave(rid, _slaveReadTill);
+ repl::getGlobalReplicationCoordinator()
+ ->setLastOptimeForSlave(rid, _slaveReadTill)
+ .transitional_ignore();
}
//
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index d3800a17b1d..dfb5ca174c2 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -145,7 +145,7 @@ void CmdAuthenticate::redactForLogging(mutablebson::Document* cmdObj) {
for (mmb::Element element = mmb::findFirstChildNamed(cmdObj->root(), redactedFields[i]);
element.ok();
element = mmb::findElementNamed(element.rightSibling(), redactedFields[i])) {
- element.setValueString("xxx");
+ element.setValueString("xxx").transitional_ignore();
}
}
}
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 42e9419622c..2fabe66c892 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -267,7 +267,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
querySettings->removeAllowedIndices(planCache->computeKey(*cq));
// Remove entry from plan cache
- planCache->remove(*cq);
+ planCache->remove(*cq).transitional_ignore();
LOG(0) << "Removed index filter on " << ns << " " << redact(cq->toStringShort());
@@ -316,7 +316,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Remove plan cache entry.
- planCache->remove(*cq);
+ planCache->remove(*cq).transitional_ignore();
}
LOG(0) << "Removed all index filters for collection: " << ns;
@@ -394,7 +394,7 @@ Status SetFilter::set(OperationContext* opCtx,
querySettings->setAllowedIndices(*cq, planCache->computeKey(*cq), indexes, indexNames);
// Remove entry from plan cache.
- planCache->remove(*cq);
+ planCache->remove(*cq).transitional_ignore();
LOG(0) << "Index filter set on " << ns << " " << redact(cq->toStringShort()) << " "
<< indexesElt;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 43637227691..02eb08c1aab 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -376,7 +376,7 @@ void State::dropTempCollections() {
"no longer primary",
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(
_opCtx, _config.tempNamespace));
- db->dropCollection(_opCtx, _config.tempNamespace.ns());
+ db->dropCollection(_opCtx, _config.tempNamespace.ns()).transitional_ignore();
wunit.commit();
}
}
@@ -394,7 +394,7 @@ void State::dropTempCollections() {
Lock::DBLock lk(_opCtx, _config.incLong.db(), MODE_X);
if (Database* db = dbHolder().get(_opCtx, _config.incLong.ns())) {
WriteUnitOfWork wunit(_opCtx);
- db->dropCollection(_opCtx, _config.incLong.ns());
+ db->dropCollection(_opCtx, _config.incLong.ns()).transitional_ignore();
wunit.commit();
}
}
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index ab794c8bf0e..649ad191169 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -335,15 +335,15 @@ private:
// Save LogComponent::kDefault LogSeverity at root
if (component == LogComponent::kDefault) {
- doc.root().appendInt("verbosity", severity);
+ doc.root().appendInt("verbosity", severity).transitional_ignore();
continue;
}
mutablebson::Element element = doc.makeElementObject(component.getShortName());
- element.appendInt("verbosity", severity);
+ element.appendInt("verbosity", severity).transitional_ignore();
mutablebson::Element parentElement = _getParentElement(doc, component);
- parentElement.pushBack(element);
+ parentElement.pushBack(element).transitional_ignore();
}
BSONObj result = doc.getObject();
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 87ce9dd8093..97b3884f8cd 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -153,7 +153,7 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
vector<BSONObj> shapes = getShapes(planCache);
ASSERT_EQUALS(shapes.size(), 1U);
@@ -186,7 +186,7 @@ TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
ASSERT_EQUALS(getShapes(planCache).size(), 1U);
// Clear cache and confirm number of keys afterwards.
@@ -327,8 +327,8 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cqA, solns, createDecision(1U));
- planCache.add(*cqB, solns, createDecision(1U));
+ planCache.add(*cqA, solns, createDecision(1U)).transitional_ignore();
+ planCache.add(*cqB, solns, createDecision(1U)).transitional_ignore();
// Check keys in cache before dropping {b: 1}
vector<BSONObj> shapesBefore = getShapes(planCache);
@@ -386,8 +386,8 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
- planCache.add(*cqCollation, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
+ planCache.add(*cqCollation, solns, createDecision(1U)).transitional_ignore();
// Check keys in cache before dropping the query with collation.
vector<BSONObj> shapesBefore = getShapes(planCache);
@@ -530,7 +530,7 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
vector<BSONObj> plans = getPlans(planCache,
cq->getQueryObj(),
@@ -560,7 +560,7 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(2U));
+ planCache.add(*cq, solns, createDecision(2U)).transitional_ignore();
vector<BSONObj> plans = getPlans(planCache,
cq->getQueryObj(),
@@ -599,11 +599,11 @@ TEST(PlanCacheCommandsTest, planCacheListPlansCollation) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
std::vector<QuerySolution*> twoSolns;
twoSolns.push_back(&qs);
twoSolns.push_back(&qs);
- planCache.add(*cqCollation, twoSolns, createDecision(2U));
+ planCache.add(*cqCollation, twoSolns, createDecision(2U)).transitional_ignore();
// Normal query should have one solution.
vector<BSONObj> plans = getPlans(planCache,
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 3126a71551d..4c0a7aee499 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -1238,7 +1238,8 @@ public:
AuthorizationManager::usersCollectionNamespace,
queryBuilder.done(),
projection.done(),
- function);
+ function)
+ .transitional_ignore();
}
result.append("users", usersArrayBuilder.arr());
return true;
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index 6b3fc2f4b19..608719e0341 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -57,7 +57,7 @@ void redactPasswordData(mutablebson::Element parent) {
const auto pwdFieldName = "pwd"_sd;
for (mmb::Element pwdElement = mmb::findFirstChildNamed(parent, pwdFieldName); pwdElement.ok();
pwdElement = mmb::findElementNamed(pwdElement.rightSibling(), pwdFieldName)) {
- pwdElement.setValueString("xxx");
+ pwdElement.setValueString("xxx").transitional_ignore();
}
}
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index a1b43356a8a..9bee4b173f7 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -71,7 +71,7 @@ void redactTooLongLog(mutablebson::Document* cmdObj, StringData fieldName) {
// Redact the log if there are more than one documents or operations.
if (field.countChildren() > 1) {
- field.setValueInt(field.countChildren());
+ field.setValueInt(field.countChildren()).transitional_ignore();
}
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 7d925d6d73f..a5d87bfbf55 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -644,7 +644,7 @@ ExitCode _initAndListen(int listenPort) {
uassertStatusOK(ShardingState::get(startupOpCtx.get())
->initializeShardingAwarenessIfNeeded(startupOpCtx.get()));
if (shardingInitialized) {
- waitForShardRegistryReload(startupOpCtx.get());
+ waitForShardRegistryReload(startupOpCtx.get()).transitional_ignore();
}
if (!storageGlobalParams.readOnly) {
@@ -705,7 +705,7 @@ ExitCode _initAndListen(int listenPort) {
// Set up the periodic runner for background job execution
auto runner = makePeriodicRunner();
- runner->startup();
+ runner->startup().transitional_ignore();
globalServiceContext->setPeriodicRunner(std::move(runner));
// Set up the logical session cache
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 3106994547e..42780395037 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -230,7 +230,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
// cache entry if requested by the caller.
if (shouldCache) {
PlanCache* cache = _collection->infoCache()->getPlanCache();
- cache->remove(*_canonicalQuery);
+ cache->remove(*_canonicalQuery).transitional_ignore();
}
PlanStage* newRoot;
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index b7a25313969..9bb94def100 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -506,7 +506,7 @@ class TwoDPtInAnnulusExpression : public LeafMatchExpression {
public:
TwoDPtInAnnulusExpression(const R2Annulus& annulus, StringData twoDPath)
: LeafMatchExpression(INTERNAL_2D_POINT_IN_ANNULUS), _annulus(annulus) {
- setPath(twoDPath);
+ setPath(twoDPath).transitional_ignore();
}
void serialize(BSONObjBuilder* out) const final {
@@ -726,7 +726,7 @@ StatusWith<NearStage::CoveredInterval*> //
// These parameters are stored by the index, and so must be ok
GeoHashConverter::Parameters hashParams;
- GeoHashConverter::parseParameters(_twoDIndex->infoObj(), &hashParams);
+ GeoHashConverter::parseParameters(_twoDIndex->infoObj(), &hashParams).transitional_ignore();
// 2D indexes support covered search over additional fields they contain
IndexScan* scan = new IndexScan(opCtx, scanParams, workingSet, _nearParams.filter);
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index a01521610ea..b113a17cc30 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -127,7 +127,7 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
// if the best solution fails. Alternatively we could try to
// defer cache insertion to be after the first produced result.
- _collection->infoCache()->getPlanCache()->remove(*_query);
+ _collection->infoCache()->getPlanCache()->remove(*_query).transitional_ignore();
_bestPlanIdx = _backupPlanIdx;
_backupPlanIdx = kNoSuchPlan;
@@ -323,7 +323,10 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
}
if (validSolutions) {
- _collection->infoCache()->getPlanCache()->add(*_query, solutions, ranking.release());
+ _collection->infoCache()
+ ->getPlanCache()
+ ->add(*_query, solutions, ranking.release())
+ .transitional_ignore();
}
}
diff --git a/src/mongo/db/exec/projection_exec.cpp b/src/mongo/db/exec/projection_exec.cpp
index edf0386d042..230673dcc7e 100644
--- a/src/mongo/db/exec/projection_exec.cpp
+++ b/src/mongo/db/exec/projection_exec.cpp
@@ -475,7 +475,7 @@ void ProjectionExec::appendArray(BSONObjBuilder* bob, const BSONObj& array, bool
BSONObjBuilder subBob;
BSONObjIterator jt(elt.embeddedObject());
while (jt.more()) {
- append(&subBob, jt.next());
+ append(&subBob, jt.next()).transitional_ignore();
}
bob->append(bob->numStr(index++), subBob.obj());
break;
@@ -518,7 +518,7 @@ Status ProjectionExec::append(BSONObjBuilder* bob,
BSONObjBuilder subBob;
BSONObjIterator it(elt.embeddedObject());
while (it.more()) {
- subfm.append(&subBob, it.next(), details, arrayOpType);
+ subfm.append(&subBob, it.next(), details, arrayOpType).transitional_ignore();
}
bob->append(elt.fieldName(), subBob.obj());
} else {
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index bdd37af66aa..c562be7ed26 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -260,7 +260,7 @@ void SortKeyGenerator::getBoundsForSort(OperationContext* opCtx,
std::vector<QuerySolution*> solns;
LOG(5) << "Sort key generation: Planning to obtain bounds for sort.";
- QueryPlanner::plan(*queryForSort, params, &solns);
+ QueryPlanner::plan(*queryForSort, params, &solns).transitional_ignore();
// TODO: are there ever > 1 solns? If so, do we look for a specific soln?
if (1 == solns.size()) {
diff --git a/src/mongo/db/ftdc/compressor.cpp b/src/mongo/db/ftdc/compressor.cpp
index ecf9c7ece6c..984698f7daf 100644
--- a/src/mongo/db/ftdc/compressor.cpp
+++ b/src/mongo/db/ftdc/compressor.cpp
@@ -45,7 +45,8 @@ using std::swap;
StatusWith<boost::optional<std::tuple<ConstDataRange, FTDCCompressor::CompressorState, Date_t>>>
FTDCCompressor::addSample(const BSONObj& sample, Date_t date) {
if (_referenceDoc.isEmpty()) {
- FTDCBSONUtil::extractMetricsFromDocument(sample, sample, &_metrics);
+ FTDCBSONUtil::extractMetricsFromDocument(sample, sample, &_metrics)
+ .status_with_transitional_ignore();
_reset(sample, date);
return {boost::none};
}
diff --git a/src/mongo/db/ftdc/file_manager.cpp b/src/mongo/db/ftdc/file_manager.cpp
index a2b1159de4c..8438a820d36 100644
--- a/src/mongo/db/ftdc/file_manager.cpp
+++ b/src/mongo/db/ftdc/file_manager.cpp
@@ -55,7 +55,7 @@ FTDCFileManager::FTDCFileManager(const FTDCConfig* config,
: _config(config), _writer(_config), _path(path), _rotateCollectors(collection) {}
FTDCFileManager::~FTDCFileManager() {
- close();
+ close().transitional_ignore();
}
StatusWith<std::unique_ptr<FTDCFileManager>> FTDCFileManager::create(
diff --git a/src/mongo/db/ftdc/file_manager_test.cpp b/src/mongo/db/ftdc/file_manager_test.cpp
index 6c2e5c220a6..027ae88af62 100644
--- a/src/mongo/db/ftdc/file_manager_test.cpp
+++ b/src/mongo/db/ftdc/file_manager_test.cpp
@@ -111,7 +111,7 @@ TEST(FTDCFileManagerTest, TestFull) {
Date_t()));
}
- mgr->close();
+ mgr->close().transitional_ignore();
auto files = scanDirectory(dir);
@@ -211,7 +211,7 @@ TEST(FTDCFileManagerTest, TestNormalRestart) {
Date_t()));
}
- mgr->close();
+ mgr->close().transitional_ignore();
// Validate the interim file does not have data
ValidateInterimFileHasData(dir, false);
@@ -281,7 +281,7 @@ TEST(FTDCFileManagerTest, TestCorruptCrashRestart) {
Date_t()));
}
- mgr->close();
+ mgr->close().transitional_ignore();
auto swFile = mgr->generateArchiveFileName(dir, "0test-crash");
ASSERT_OK(swFile);
diff --git a/src/mongo/db/ftdc/file_writer.cpp b/src/mongo/db/ftdc/file_writer.cpp
index cd3ffdc45cf..d32736f626c 100644
--- a/src/mongo/db/ftdc/file_writer.cpp
+++ b/src/mongo/db/ftdc/file_writer.cpp
@@ -47,7 +47,7 @@
namespace mongo {
FTDCFileWriter::~FTDCFileWriter() {
- close();
+ close().transitional_ignore();
}
Status FTDCFileWriter::open(const boost::filesystem::path& file) {
diff --git a/src/mongo/db/ftdc/file_writer_test.cpp b/src/mongo/db/ftdc/file_writer_test.cpp
index 138d7c850f6..f7977e2b8b3 100644
--- a/src/mongo/db/ftdc/file_writer_test.cpp
+++ b/src/mongo/db/ftdc/file_writer_test.cpp
@@ -76,7 +76,7 @@ TEST(FTDCFileTest, TestFileBasicMetadata) {
ASSERT_OK(writer.writeMetadata(doc1, Date_t()));
ASSERT_OK(writer.writeMetadata(doc2, Date_t()));
- writer.close();
+ writer.close().transitional_ignore();
FTDCFileReader reader;
ASSERT_OK(reader.open(p));
@@ -127,7 +127,7 @@ TEST(FTDCFileTest, TestFileBasicCompress) {
ASSERT_OK(writer.writeSample(doc1, Date_t()));
ASSERT_OK(writer.writeSample(doc2, Date_t()));
- writer.close();
+ writer.close().transitional_ignore();
FTDCFileReader reader;
ASSERT_OK(reader.open(p));
@@ -194,7 +194,7 @@ private:
ASSERT_OK(sw);
}
- _writer.close();
+ _writer.close().transitional_ignore();
ValidateDocumentList(_path, _docs);
}
diff --git a/src/mongo/db/geo/r2_region_coverer_test.cpp b/src/mongo/db/geo/r2_region_coverer_test.cpp
index 69b6abba563..6ae997d0ee5 100644
--- a/src/mongo/db/geo/r2_region_coverer_test.cpp
+++ b/src/mongo/db/geo/r2_region_coverer_test.cpp
@@ -275,11 +275,13 @@ GeometryContainer* getRandomCircle(double radius) {
// Format: { $center : [ [-74, 40.74], 10 ] }
GeometryContainer* container = new GeometryContainer();
- container->parseFromQuery(
- BSON("$center" << BSON_ARRAY(BSON_ARRAY(randDouble(radius, MAXBOUND - radius)
- << randDouble(radius, MAXBOUND - radius))
- << radius))
- .firstElement());
+ container
+ ->parseFromQuery(
+ BSON("$center" << BSON_ARRAY(BSON_ARRAY(randDouble(radius, MAXBOUND - radius)
+ << randDouble(radius, MAXBOUND - radius))
+ << radius))
+ .firstElement())
+ .transitional_ignore();
return container;
}
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index 95310de17c3..be9e1a26f26 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -335,7 +335,7 @@ MONGO_INITIALIZER(RegisterShortCircuitExitHandler)(InitializerContext*) {
}
bool initializeServerGlobalState() {
- Listener::globalTicketHolder.resize(serverGlobalParams.maxConns);
+ Listener::globalTicketHolder.resize(serverGlobalParams.maxConns).transitional_ignore();
#ifndef _WIN32
if (!fs::is_directory(serverGlobalParams.socket)) {
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 9ef233f11be..f3d6680b242 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -122,7 +122,7 @@ void profile(OperationContext* opCtx, NetworkOp op) {
if (acquireDbXLock) {
autoGetDb.reset(new AutoGetDb(opCtx, dbName, MODE_X));
if (autoGetDb->getDb()) {
- createProfileCollection(opCtx, autoGetDb->getDb());
+ createProfileCollection(opCtx, autoGetDb->getDb()).transitional_ignore();
}
} else {
autoGetDb.reset(new AutoGetDb(opCtx, dbName, MODE_IX));
@@ -142,7 +142,7 @@ void profile(OperationContext* opCtx, NetworkOp op) {
if (coll) {
WriteUnitOfWork wuow(opCtx);
OpDebug* const nullOpDebug = nullptr;
- coll->insertDocument(opCtx, p, nullOpDebug, false);
+ coll->insertDocument(opCtx, p, nullOpDebug, false).transitional_ignore();
wuow.commit();
break;
diff --git a/src/mongo/db/keys_collection_manager_test.cpp b/src/mongo/db/keys_collection_manager_test.cpp
index dbda76176b8..aae1db1a6a4 100644
--- a/src/mongo/db/keys_collection_manager_test.cpp
+++ b/src/mongo/db/keys_collection_manager_test.cpp
@@ -82,9 +82,10 @@ private:
TEST_F(KeysManagerTest, GetKeyForValidationTimesOutIfRefresherIsNotRunning) {
operationContext()->setDeadlineAfterNowBy(Microseconds(250 * 1000));
- ASSERT_THROWS(
- keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))),
- DBException);
+ ASSERT_THROWS(keyManager()
+ ->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0)))
+ .status_with_transitional_ignore(),
+ DBException);
}
TEST_F(KeysManagerTest, GetKeyForValidationErrorsIfKeyDoesntExist) {
diff --git a/src/mongo/db/logical_clock_test.cpp b/src/mongo/db/logical_clock_test.cpp
index 2130b5e9944..27c1f1a0907 100644
--- a/src/mongo/db/logical_clock_test.cpp
+++ b/src/mongo/db/logical_clock_test.cpp
@@ -326,7 +326,8 @@ TEST_F(LogicalClockTest, RejectsLogicalTimesGreaterThanMaxTime) {
auto almostMaxSecs =
Seconds(maxVal) - LogicalClock::kMaxAcceptableLogicalClockDriftSecs + Seconds(10);
setMockClockSourceTime(Date_t::fromDurationSinceEpoch(almostMaxSecs));
- ASSERT_THROWS(getClock()->advanceClusterTime(beyondMaxTime), std::exception);
+ ASSERT_THROWS(getClock()->advanceClusterTime(beyondMaxTime).transitional_ignore(),
+ std::exception);
ASSERT_TRUE(getClock()->getClusterTime() == LogicalTime());
}
diff --git a/src/mongo/db/logical_session_cache_test.cpp b/src/mongo/db/logical_session_cache_test.cpp
index c0f86306ef4..a9d3c5eb1c1 100644
--- a/src/mongo/db/logical_session_cache_test.cpp
+++ b/src/mongo/db/logical_session_cache_test.cpp
@@ -147,7 +147,7 @@ TEST_F(LogicalSessionCacheTest, TestCacheHitsOnly) {
ASSERT(!res.isOK());
// When the record is present, returns the owner
- cache()->getOwner(lsid);
+ cache()->getOwner(lsid).status_with_transitional_ignore();
res = cache()->getOwnerFromCache(lsid);
ASSERT(res.isOK());
auto fetched = res.getValue();
@@ -234,8 +234,8 @@ TEST_F(LogicalSessionCacheTest, CacheRefreshesOwnRecords) {
// Insert two records into the cache
auto record1 = newRecord();
auto record2 = newRecord();
- cache()->startSession(record1);
- cache()->startSession(record2);
+ cache()->startSession(record1).transitional_ignore();
+ cache()->startSession(record2).transitional_ignore();
stdx::promise<int> hitRefresh;
auto refreshFuture = hitRefresh.get_future();
@@ -284,8 +284,8 @@ TEST_F(LogicalSessionCacheTest, CacheDeletesRecordsThatFailToRefresh) {
// Put two sessions into the cache
auto record1 = newRecord();
auto record2 = newRecord();
- cache()->startSession(record1);
- cache()->startSession(record2);
+ cache()->startSession(record1).transitional_ignore();
+ cache()->startSession(record2).transitional_ignore();
stdx::promise<void> hitRefresh;
auto refreshFuture = hitRefresh.get_future();
@@ -313,9 +313,9 @@ TEST_F(LogicalSessionCacheTest, KeepActiveSessionAliveEvenIfRefreshFails) {
// Put two sessions into the cache, one into the service
auto record1 = newRecord();
auto record2 = newRecord();
- cache()->startSession(record1);
+ cache()->startSession(record1).transitional_ignore();
service()->add(record1.getLsid());
- cache()->startSession(record2);
+ cache()->startSession(record2).transitional_ignore();
stdx::promise<void> hitRefresh;
auto refreshFuture = hitRefresh.get_future();
@@ -342,7 +342,7 @@ TEST_F(LogicalSessionCacheTest, KeepActiveSessionAliveEvenIfRefreshFails) {
TEST_F(LogicalSessionCacheTest, BasicSessionExpiration) {
// Insert a record
auto record = newRecord();
- cache()->startSession(record);
+ cache()->startSession(record).transitional_ignore();
auto res = cache()->getOwnerFromCache(record.getLsid());
ASSERT(res.isOK());
@@ -411,7 +411,7 @@ TEST_F(LogicalSessionCacheTest, RefreshCachedAndServiceRecordsTogether) {
auto record1 = newRecord();
service()->add(record1.getLsid());
auto record2 = newRecord();
- cache()->startSession(record2);
+ cache()->startSession(record2).transitional_ignore();
stdx::promise<void> hitRefresh;
auto refreshFuture = hitRefresh.get_future();
@@ -433,7 +433,7 @@ TEST_F(LogicalSessionCacheTest, ManyRecordsInCacheRefresh) {
int count = LogicalSessionCache::kLogicalSessionCacheDefaultCapacity;
for (int i = 0; i < count; i++) {
auto record = newRecord();
- cache()->startSession(record);
+ cache()->startSession(record).transitional_ignore();
}
stdx::promise<void> hitRefresh;
@@ -482,7 +482,7 @@ TEST_F(LogicalSessionCacheTest, ManySessionsRefreshComboDeluxe) {
service()->add(record.getLsid());
auto record2 = newRecord();
- cache()->startSession(record2);
+ cache()->startSession(record2).transitional_ignore();
}
stdx::mutex mutex;
diff --git a/src/mongo/db/matcher/expression_algo.cpp b/src/mongo/db/matcher/expression_algo.cpp
index 12fe0224258..292bf1f590a 100644
--- a/src/mongo/db/matcher/expression_algo.cpp
+++ b/src/mongo/db/matcher/expression_algo.cpp
@@ -152,7 +152,7 @@ bool _isSubsetOf(const MatchExpression* lhs, const ComparisonMatchExpression* rh
for (BSONElement elem : ime->getEqualities()) {
// Each element in the $in-array represents an equality predicate.
EqualityMatchExpression equality;
- equality.init(lhs->path(), elem);
+ equality.init(lhs->path(), elem).transitional_ignore();
equality.setCollator(ime->getCollator());
if (!_isSubsetOf(&equality, rhs)) {
return false;
@@ -283,7 +283,7 @@ void applyRenamesToExpression(MatchExpression* expr, const StringMap<std::string
auto it = renames.find(expr->path());
if (it != renames.end()) {
LeafMatchExpression* leafExpr = checked_cast<LeafMatchExpression*>(expr);
- leafExpr->setPath(it->second);
+ leafExpr->setPath(it->second).transitional_ignore();
}
}
diff --git a/src/mongo/db/matcher/expression_array.cpp b/src/mongo/db/matcher/expression_array.cpp
index 86fbc384eae..965ab75d119 100644
--- a/src/mongo/db/matcher/expression_array.cpp
+++ b/src/mongo/db/matcher/expression_array.cpp
@@ -144,7 +144,7 @@ ElemMatchValueMatchExpression::~ElemMatchValueMatchExpression() {
}
Status ElemMatchValueMatchExpression::init(StringData path, MatchExpression* sub) {
- init(path);
+ init(path).transitional_ignore();
add(sub);
return Status::OK();
}
diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h
index 0920467c3bf..e121788a091 100644
--- a/src/mongo/db/matcher/expression_array.h
+++ b/src/mongo/db/matcher/expression_array.h
@@ -77,7 +77,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ElemMatchObjectMatchExpression> e =
stdx::make_unique<ElemMatchObjectMatchExpression>();
- e->init(path(), _sub->shallowClone().release());
+ e->init(path(), _sub->shallowClone().release()).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -122,7 +122,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ElemMatchValueMatchExpression> e =
stdx::make_unique<ElemMatchValueMatchExpression>();
- e->init(path());
+ e->init(path()).transitional_ignore();
for (size_t i = 0; i < _subs.size(); ++i) {
e->add(_subs[i]->shallowClone().release());
}
@@ -161,7 +161,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<SizeMatchExpression> e = stdx::make_unique<SizeMatchExpression>();
- e->init(path(), _size);
+ e->init(path(), _size).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
diff --git a/src/mongo/db/matcher/expression_array_test.cpp b/src/mongo/db/matcher/expression_array_test.cpp
index 6a7f36cebed..894fbbd9e49 100644
--- a/src/mongo/db/matcher/expression_array_test.cpp
+++ b/src/mongo/db/matcher/expression_array_test.cpp
@@ -305,7 +305,7 @@ TEST(AndOfElemMatch, MatchesElement) {
// and1 = { a : 1, b : 1 }
unique_ptr<ElemMatchObjectMatchExpression> elemMatch1(new ElemMatchObjectMatchExpression());
- elemMatch1->init("x", and1.release());
+ elemMatch1->init("x", and1.release()).transitional_ignore();
// elemMatch1 = { x : { $elemMatch : { a : 1, b : 1 } } }
BSONObj baseOperanda2 = BSON("a" << 2);
@@ -322,7 +322,7 @@ TEST(AndOfElemMatch, MatchesElement) {
// and2 = { a : 2, b : 2 }
unique_ptr<ElemMatchObjectMatchExpression> elemMatch2(new ElemMatchObjectMatchExpression());
- elemMatch2->init("x", and2.release());
+ elemMatch2->init("x", and2.release()).transitional_ignore();
// elemMatch2 = { x : { $elemMatch : { a : 2, b : 2 } } }
unique_ptr<AndMatchExpression> andOfEM(new AndMatchExpression());
@@ -357,7 +357,7 @@ TEST(AndOfElemMatch, Matches) {
ASSERT(lt1->init("", baseOperandlt1["$lt"]).isOK());
unique_ptr<ElemMatchValueMatchExpression> elemMatch1(new ElemMatchValueMatchExpression());
- elemMatch1->init("x");
+ elemMatch1->init("x").transitional_ignore();
elemMatch1->add(gt1.release());
elemMatch1->add(lt1.release());
// elemMatch1 = { x : { $elemMatch : { $gt : 1 , $lt : 10 } } }
@@ -371,7 +371,7 @@ TEST(AndOfElemMatch, Matches) {
ASSERT(lt2->init("", baseOperandlt2["$lt"]).isOK());
unique_ptr<ElemMatchValueMatchExpression> elemMatch2(new ElemMatchValueMatchExpression());
- elemMatch2->init("x");
+ elemMatch2->init("x").transitional_ignore();
elemMatch2->add(gt2.release());
elemMatch2->add(lt2.release());
// elemMatch2 = { x : { $elemMatch : { $gt : 101 , $lt : 110 } } }
@@ -453,9 +453,9 @@ TEST(SizeMatchExpression, Equivalent) {
SizeMatchExpression e2;
SizeMatchExpression e3;
- e1.init("a", 5);
- e2.init("a", 6);
- e3.init("v", 5);
+ e1.init("a", 5).transitional_ignore();
+ e2.init("a", 6).transitional_ignore();
+ e3.init("v", 5).transitional_ignore();
ASSERT(e1.equivalent(&e1));
ASSERT(!e1.equivalent(&e2));
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index f229eec42c2..7129c6413e0 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -391,7 +391,7 @@ bool GeoMatchExpression::equivalent(const MatchExpression* other) const {
std::unique_ptr<MatchExpression> GeoMatchExpression::shallowClone() const {
std::unique_ptr<GeoMatchExpression> next = stdx::make_unique<GeoMatchExpression>();
- next->init(path(), NULL, _rawObj);
+ next->init(path(), NULL, _rawObj).transitional_ignore();
next->_query = _query;
next->_canSkipValidation = _canSkipValidation;
if (getTag()) {
@@ -448,7 +448,7 @@ bool GeoNearMatchExpression::equivalent(const MatchExpression* other) const {
std::unique_ptr<MatchExpression> GeoNearMatchExpression::shallowClone() const {
std::unique_ptr<GeoNearMatchExpression> next = stdx::make_unique<GeoNearMatchExpression>();
- next->init(path(), NULL, _rawObj);
+ next->init(path(), NULL, _rawObj).transitional_ignore();
next->_query = _query;
if (getTag()) {
next->setTag(getTag()->clone());
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index cd987ff9fcb..4980986f9d4 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -513,7 +513,7 @@ Status InMatchExpression::init(StringData path) {
std::unique_ptr<MatchExpression> InMatchExpression::shallowClone() const {
auto next = stdx::make_unique<InMatchExpression>();
- next->init(path());
+ next->init(path()).transitional_ignore();
next->setCollator(_collator);
if (getTag()) {
next->setTag(getTag()->clone());
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index a2c62d3c344..093ebe7edad 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -144,7 +144,7 @@ public:
EqualityMatchExpression() : ComparisonMatchExpression(EQ) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<EqualityMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -158,7 +158,7 @@ public:
LTEMatchExpression() : ComparisonMatchExpression(LTE) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<LTEMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -172,7 +172,7 @@ public:
LTMatchExpression() : ComparisonMatchExpression(LT) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<LTMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -186,7 +186,7 @@ public:
GTMatchExpression() : ComparisonMatchExpression(GT) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<GTMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -200,7 +200,7 @@ public:
GTEMatchExpression() : ComparisonMatchExpression(GTE) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<GTEMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -230,7 +230,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<RegexMatchExpression> e = stdx::make_unique<RegexMatchExpression>();
- e->init(path(), _regex, _flags);
+ e->init(path(), _regex, _flags).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -270,7 +270,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ModMatchExpression> m = stdx::make_unique<ModMatchExpression>();
- m->init(path(), _divisor, _remainder);
+ m->init(path(), _divisor, _remainder).transitional_ignore();
if (getTag()) {
m->setTag(getTag()->clone());
}
@@ -305,7 +305,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ExistsMatchExpression> e = stdx::make_unique<ExistsMatchExpression>();
- e->init(path());
+ e->init(path()).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -498,7 +498,7 @@ protected:
* ownership.
*/
void initClone(BitTestMatchExpression* clone) const {
- clone->init(path(), _bitPositions);
+ clone->init(path(), _bitPositions).transitional_ignore();
if (getTag()) {
clone->setTag(getTag()->clone());
}
diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp
index 0bc43c24465..7ae77f66b2a 100644
--- a/src/mongo/db/matcher/expression_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_leaf_test.cpp
@@ -89,7 +89,7 @@ TEST(EqOp, MatchesElement) {
BSONObj notMatch = BSON("a" << 6);
EqualityMatchExpression eq;
- eq.init("", operand["a"]);
+ eq.init("", operand["a"]).transitional_ignore();
ASSERT(eq.matchesSingleElement(match.firstElement()));
ASSERT(!eq.matchesSingleElement(notMatch.firstElement()));
@@ -105,7 +105,7 @@ TEST(EqOp, InvalidEooOperand) {
TEST(EqOp, MatchesScalar) {
BSONObj operand = BSON("a" << 5);
EqualityMatchExpression eq;
- eq.init("a", operand["a"]);
+ eq.init("a", operand["a"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << 5.0), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
}
@@ -113,7 +113,7 @@ TEST(EqOp, MatchesScalar) {
TEST(EqOp, MatchesArrayValue) {
BSONObj operand = BSON("a" << 5);
EqualityMatchExpression eq;
- eq.init("a", operand["a"]);
+ eq.init("a", operand["a"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL));
}
@@ -121,7 +121,7 @@ TEST(EqOp, MatchesArrayValue) {
TEST(EqOp, MatchesReferencedObjectValue) {
BSONObj operand = BSON("a.b" << 5);
EqualityMatchExpression eq;
- eq.init("a.b", operand["a.b"]);
+ eq.init("a.b", operand["a.b"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << 5)), NULL));
ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(5))), NULL));
ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 5))), NULL));
@@ -130,7 +130,7 @@ TEST(EqOp, MatchesReferencedObjectValue) {
TEST(EqOp, MatchesReferencedArrayValue) {
BSONObj operand = BSON("a.0" << 5);
EqualityMatchExpression eq;
- eq.init("a.0", operand["a.0"]);
+ eq.init("a.0", operand["a.0"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL));
}
@@ -138,7 +138,7 @@ TEST(EqOp, MatchesReferencedArrayValue) {
TEST(EqOp, MatchesNull) {
BSONObj operand = BSON("a" << BSONNULL);
EqualityMatchExpression eq;
- eq.init("a", operand["a"]);
+ eq.init("a", operand["a"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSONObj(), NULL));
ASSERT(eq.matchesBSON(BSON("a" << BSONNULL), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
@@ -151,7 +151,7 @@ TEST(EqOp, MatchesNull) {
TEST(EqOp, MatchesNestedNull) {
BSONObj operand = BSON("a.b" << BSONNULL);
EqualityMatchExpression eq;
- eq.init("a.b", operand["a.b"]);
+ eq.init("a.b", operand["a.b"]).transitional_ignore();
// null matches any empty object that is on a subpath of a.b
ASSERT(eq.matchesBSON(BSONObj(), NULL));
ASSERT(eq.matchesBSON(BSON("a" << BSONObj()), NULL));
@@ -171,7 +171,7 @@ TEST(EqOp, MatchesNestedNull) {
TEST(EqOp, MatchesMinKey) {
BSONObj operand = BSON("a" << MinKey);
EqualityMatchExpression eq;
- eq.init("a", operand["a"]);
+ eq.init("a", operand["a"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << MinKey), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << MaxKey), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
@@ -200,7 +200,7 @@ TEST(EqOp, MatchesFullArray) {
TEST(EqOp, MatchesThroughNestedArray) {
BSONObj operand = BSON("a.b.c.d" << 3);
EqualityMatchExpression eq;
- eq.init("a.b.c.d", operand["a.b.c.d"]);
+ eq.init("a.b.c.d", operand["a.b.c.d"]).transitional_ignore();
BSONObj obj = fromjson("{a:{b:[{c:[{d:1},{d:2}]},{c:[{d:3}]}]}}");
ASSERT(eq.matchesBSON(obj, NULL));
}
@@ -243,9 +243,9 @@ TEST(EqOp, Equality1) {
BSONObj operand = BSON("a" << 5 << "b" << 5 << "c" << 4);
- eq1.init("a", operand["a"]);
- eq2.init("a", operand["b"]);
- eq3.init("c", operand["c"]);
+ eq1.init("a", operand["a"]).transitional_ignore();
+ eq2.init("a", operand["b"]).transitional_ignore();
+ eq3.init("c", operand["c"]).transitional_ignore();
ASSERT(eq1.equivalent(&eq1));
ASSERT(eq1.equivalent(&eq2));
@@ -1239,10 +1239,10 @@ TEST(ModMatchExpression, Equality1) {
ModMatchExpression m3;
ModMatchExpression m4;
- m1.init("a", 1, 2);
- m2.init("a", 2, 2);
- m3.init("a", 1, 1);
- m4.init("b", 1, 2);
+ m1.init("a", 1, 2).transitional_ignore();
+ m2.init("a", 2, 2).transitional_ignore();
+ m3.init("a", 1, 1).transitional_ignore();
+ m4.init("b", 1, 2).transitional_ignore();
ASSERT(m1.equivalent(&m1));
ASSERT(!m1.equivalent(&m2));
@@ -1313,8 +1313,8 @@ TEST(ExistsMatchExpression, ElemMatchKey) {
TEST(ExistsMatchExpression, Equivalent) {
ExistsMatchExpression e1;
ExistsMatchExpression e2;
- e1.init("a");
- e2.init("b");
+ e1.init("a").transitional_ignore();
+ e2.init("b").transitional_ignore();
ASSERT(e1.equivalent(&e1));
ASSERT(!e1.equivalent(&e2));
@@ -1459,9 +1459,9 @@ TEST(TypeMatchExpression, Equivalent) {
TypeMatchExpression e1;
TypeMatchExpression e2;
TypeMatchExpression e3;
- e1.initWithBSONType("a", String);
- e2.initWithBSONType("a", NumberDouble);
- e3.initWithBSONType("b", String);
+ e1.initWithBSONType("a", String).transitional_ignore();
+ e2.initWithBSONType("a", NumberDouble).transitional_ignore();
+ e3.initWithBSONType("b", String).transitional_ignore();
ASSERT(e1.equivalent(&e1));
ASSERT(!e1.equivalent(&e2));
@@ -1473,14 +1473,14 @@ TEST(InMatchExpression, MatchesElementSingle) {
BSONObj match = BSON("a" << 1);
BSONObj notMatch = BSON("a" << 2);
InMatchExpression in;
- in.addEquality(operand.firstElement());
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesSingleElement(match["a"]));
ASSERT(!in.matchesSingleElement(notMatch["a"]));
}
TEST(InMatchExpression, MatchesEmpty) {
InMatchExpression in;
- in.init("a");
+ in.init("a").transitional_ignore();
BSONObj notMatch = BSON("a" << 2);
ASSERT(!in.matchesSingleElement(notMatch["a"]));
@@ -1491,10 +1491,10 @@ TEST(InMatchExpression, MatchesEmpty) {
TEST(InMatchExpression, MatchesElementMultiple) {
BSONObj operand = BSON_ARRAY(1 << "r" << true << 1);
InMatchExpression in;
- in.addEquality(operand[0]);
- in.addEquality(operand[1]);
- in.addEquality(operand[2]);
- in.addEquality(operand[3]);
+ in.addEquality(operand[0]).transitional_ignore();
+ in.addEquality(operand[1]).transitional_ignore();
+ in.addEquality(operand[2]).transitional_ignore();
+ in.addEquality(operand[3]).transitional_ignore();
BSONObj matchFirst = BSON("a" << 1);
BSONObj matchSecond = BSON("a"
@@ -1511,8 +1511,8 @@ TEST(InMatchExpression, MatchesElementMultiple) {
TEST(InMatchExpression, MatchesScalar) {
BSONObj operand = BSON_ARRAY(5);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << 5.0), NULL));
ASSERT(!in.matchesBSON(BSON("a" << 4), NULL));
@@ -1521,8 +1521,8 @@ TEST(InMatchExpression, MatchesScalar) {
TEST(InMatchExpression, MatchesArrayValue) {
BSONObj operand = BSON_ARRAY(5);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), NULL));
ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL));
@@ -1533,8 +1533,8 @@ TEST(InMatchExpression, MatchesNull) {
BSONObj operand = BSON_ARRAY(BSONNULL);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSONObj(), NULL));
ASSERT(in.matchesBSON(BSON("a" << BSONNULL), NULL));
@@ -1547,7 +1547,7 @@ TEST(InMatchExpression, MatchesUndefined) {
BSONObj operand = BSON_ARRAY(BSONUndefined);
InMatchExpression in;
- in.init("a");
+ in.init("a").transitional_ignore();
Status s = in.addEquality(operand.firstElement());
ASSERT_NOT_OK(s);
}
@@ -1555,8 +1555,8 @@ TEST(InMatchExpression, MatchesUndefined) {
TEST(InMatchExpression, MatchesMinKey) {
BSONObj operand = BSON_ARRAY(MinKey);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << MinKey), NULL));
ASSERT(!in.matchesBSON(BSON("a" << MaxKey), NULL));
@@ -1566,8 +1566,8 @@ TEST(InMatchExpression, MatchesMinKey) {
TEST(InMatchExpression, MatchesMaxKey) {
BSONObj operand = BSON_ARRAY(MaxKey);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << MaxKey), NULL));
ASSERT(!in.matchesBSON(BSON("a" << MinKey), NULL));
@@ -1577,10 +1577,10 @@ TEST(InMatchExpression, MatchesMaxKey) {
TEST(InMatchExpression, MatchesFullArray) {
BSONObj operand = BSON_ARRAY(BSON_ARRAY(1 << 2) << 4 << 5);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand[0]);
- in.addEquality(operand[1]);
- in.addEquality(operand[2]);
+ in.init("a").transitional_ignore();
+ in.addEquality(operand[0]).transitional_ignore();
+ in.addEquality(operand[1]).transitional_ignore();
+ in.addEquality(operand[2]).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL));
ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 3)), NULL));
@@ -1591,9 +1591,9 @@ TEST(InMatchExpression, MatchesFullArray) {
TEST(InMatchExpression, ElemMatchKey) {
BSONObj operand = BSON_ARRAY(5 << 2);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand[0]);
- in.addEquality(operand[1]);
+ in.init("a").transitional_ignore();
+ in.addEquality(operand[0]).transitional_ignore();
+ in.addEquality(operand[1]).transitional_ignore();
MatchDetails details;
details.requestElemMatchKey();
@@ -1611,7 +1611,7 @@ TEST(InMatchExpression, InMatchExpressionsWithDifferentNumbersOfElementsAreUnequ
<< "string");
InMatchExpression eq1;
InMatchExpression eq2;
- eq1.addEquality(obj.firstElement());
+ eq1.addEquality(obj.firstElement()).transitional_ignore();
ASSERT(!eq1.equivalent(&eq2));
}
@@ -1647,8 +1647,8 @@ TEST(InMatchExpression, InMatchExpressionsWithCollationEquivalentElementsAreEqua
InMatchExpression eq2;
eq2.setCollator(&collator2);
- eq1.addEquality(obj1.firstElement());
- eq2.addEquality(obj2.firstElement());
+ eq1.addEquality(obj1.firstElement()).transitional_ignore();
+ eq2.addEquality(obj2.firstElement()).transitional_ignore();
ASSERT(eq1.equivalent(&eq2));
}
@@ -1664,8 +1664,8 @@ TEST(InMatchExpression, InMatchExpressionsWithCollationNonEquivalentElementsAreU
InMatchExpression eq2;
eq2.setCollator(&collator2);
- eq1.addEquality(obj1.firstElement());
- eq2.addEquality(obj2.firstElement());
+ eq1.addEquality(obj1.firstElement()).transitional_ignore();
+ eq2.addEquality(obj2.firstElement()).transitional_ignore();
ASSERT(!eq1.equivalent(&eq2));
}
@@ -1674,7 +1674,7 @@ TEST(InMatchExpression, StringMatchingWithNullCollatorUsesBinaryComparison) {
BSONObj notMatch = BSON("a"
<< "string2");
InMatchExpression in;
- in.addEquality(operand.firstElement());
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(!in.matchesSingleElement(notMatch["a"]));
}
@@ -1685,7 +1685,7 @@ TEST(InMatchExpression, StringMatchingRespectsCollation) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
InMatchExpression in;
in.setCollator(&collator);
- in.addEquality(operand.firstElement());
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesSingleElement(match["a"]));
}
@@ -1698,8 +1698,8 @@ TEST(InMatchExpression, ChangingCollationAfterAddingEqualitiesPreservesEqualitie
CollatorInterfaceMock collatorReverseString(CollatorInterfaceMock::MockType::kReverseString);
InMatchExpression in;
in.setCollator(&collatorAlwaysEqual);
- in.addEquality(obj1.firstElement());
- in.addEquality(obj2.firstElement());
+ in.addEquality(obj1.firstElement()).transitional_ignore();
+ in.addEquality(obj2.firstElement()).transitional_ignore();
ASSERT(in.getEqualities().size() == 1);
in.setCollator(&collatorReverseString);
ASSERT(in.getEqualities().size() == 2);
diff --git a/src/mongo/db/matcher/expression_parser_geo_test.cpp b/src/mongo/db/matcher/expression_parser_geo_test.cpp
index b4450c41785..63008393ad6 100644
--- a/src/mongo/db/matcher/expression_parser_geo_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_geo_test.cpp
@@ -1,5 +1,3 @@
-// expression_parser_geo_test.cpp
-
/**
* Copyright (C) 2013 10gen Inc.
*
@@ -80,7 +78,8 @@ TEST(MatchExpressionParserGeoNear, ParseNearExtraField) {
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
@@ -125,21 +124,24 @@ TEST(MatchExpressionParserGeoNear, ParseInvalidNear) {
BSONObj query = fromjson("{loc: {$near: [0,0], $maxDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$near: [0,0], $minDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$near: [0,0], $eq: 40}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
@@ -154,7 +156,8 @@ TEST(MatchExpressionParserGeoNear, ParseInvalidNear) {
"{loc: {$near: [0,0], $geoWithin: {$geometry: {type: \"Polygon\", coordinates: []}}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
@@ -208,21 +211,24 @@ TEST(MatchExpressionParserGeoNear, ParseInvalidGeoNear) {
BSONObj query = fromjson("{loc: {$geoNear: [0,0], $eq: 1}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$geoNear: [0,0], $maxDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$geoNear: [0,0], $minDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
}
@@ -262,21 +268,24 @@ TEST(MatchExpressionParserGeoNear, ParseInvalidNearSphere) {
BSONObj query = fromjson("{loc: {$nearSphere: [0,0], $maxDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$nearSphere: [0,0], $minDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$nearSphere: [0,0], $eq: 1}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
}
diff --git a/src/mongo/db/matcher/expression_test.cpp b/src/mongo/db/matcher/expression_test.cpp
index ed135f4d46c..9b65644042d 100644
--- a/src/mongo/db/matcher/expression_test.cpp
+++ b/src/mongo/db/matcher/expression_test.cpp
@@ -43,7 +43,7 @@ namespace mongo {
TEST(LeafMatchExpressionTest, Equal1) {
BSONObj temp = BSON("x" << 5);
EqualityMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : [5] }")));
@@ -62,7 +62,7 @@ TEST(LeafMatchExpressionTest, Comp1) {
{
LTEMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 4 }")));
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 6 }")));
@@ -71,7 +71,7 @@ TEST(LeafMatchExpressionTest, Comp1) {
{
LTMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 4 }")));
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 6 }")));
@@ -80,7 +80,7 @@ TEST(LeafMatchExpressionTest, Comp1) {
{
GTEMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 4 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 6 }")));
@@ -89,7 +89,7 @@ TEST(LeafMatchExpressionTest, Comp1) {
{
GTMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 4 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 6 }")));
diff --git a/src/mongo/db/matcher/expression_tree.h b/src/mongo/db/matcher/expression_tree.h
index d444f5b3ddc..7432e5a15f8 100644
--- a/src/mongo/db/matcher/expression_tree.h
+++ b/src/mongo/db/matcher/expression_tree.h
@@ -184,7 +184,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<NotMatchExpression> self = stdx::make_unique<NotMatchExpression>();
- self->init(_exp->shallowClone().release());
+ self->init(_exp->shallowClone().release()).transitional_ignore();
if (getTag()) {
self->setTag(getTag()->clone());
}
diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp
index 1cd441b9b82..d5682a61f5c 100644
--- a/src/mongo/db/matcher/path.cpp
+++ b/src/mongo/db/matcher/path.cpp
@@ -193,8 +193,10 @@ bool BSONElementIterator::subCursorHasMore() {
}
_subCursorPath.reset(new ElementPath());
- _subCursorPath->init(_arrayIterationState.restOfPath.substr(
- _arrayIterationState.nextPieceOfPath.size() + 1));
+ _subCursorPath
+ ->init(_arrayIterationState.restOfPath.substr(
+ _arrayIterationState.nextPieceOfPath.size() + 1))
+ .transitional_ignore();
_subCursorPath->setTraverseLeafArray(_path->shouldTraverseLeafArray());
// If we're here, we must be able to traverse nonleaf arrays.
@@ -270,7 +272,7 @@ bool BSONElementIterator::more() {
// The current array element is a subdocument. See if the subdocument generates
// any elements matching the remaining subpath.
_subCursorPath.reset(new ElementPath());
- _subCursorPath->init(_arrayIterationState.restOfPath);
+ _subCursorPath->init(_arrayIterationState.restOfPath).transitional_ignore();
_subCursorPath->setTraverseLeafArray(_path->shouldTraverseLeafArray());
_subCursor.reset(new BSONElementIterator(_subCursorPath.get(), eltInArray.Obj()));
@@ -295,8 +297,10 @@ bool BSONElementIterator::more() {
// The current array element is itself an array. See if the nested array
// has any elements matching the remainihng.
_subCursorPath.reset(new ElementPath());
- _subCursorPath->init(_arrayIterationState.restOfPath.substr(
- _arrayIterationState.nextPieceOfPath.size() + 1));
+ _subCursorPath
+ ->init(_arrayIterationState.restOfPath.substr(
+ _arrayIterationState.nextPieceOfPath.size() + 1))
+ .transitional_ignore();
_subCursorPath->setTraverseLeafArray(_path->shouldTraverseLeafArray());
BSONElementIterator* real = new BSONElementIterator(
_subCursorPath.get(), _arrayIterationState._current.Obj());
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index f679f71b303..0f00b00d615 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -512,18 +512,18 @@ Status addMongodOptions(moe::OptionSection* options) {
.setSources(moe::SourceYAMLConfig);
- options->addSection(general_options);
+ options->addSection(general_options).transitional_ignore();
#if defined(_WIN32)
- options->addSection(windows_scm_options);
+ options->addSection(windows_scm_options).transitional_ignore();
#endif
- options->addSection(replication_options);
- options->addSection(ms_options);
- options->addSection(rs_options);
- options->addSection(sharding_options);
+ options->addSection(replication_options).transitional_ignore();
+ options->addSection(ms_options).transitional_ignore();
+ options->addSection(rs_options).transitional_ignore();
+ options->addSection(sharding_options).transitional_ignore();
#ifdef MONGO_CONFIG_SSL
- options->addSection(ssl_options);
+ options->addSection(ssl_options).transitional_ignore();
#endif
- options->addSection(storage_options);
+ options->addSection(storage_options).transitional_ignore();
// The following are legacy options that are disallowed in the JSON config file
diff --git a/src/mongo/db/ops/modifier_add_to_set.cpp b/src/mongo/db/ops/modifier_add_to_set.cpp
index 5e1d5be480d..23825863b39 100644
--- a/src/mongo/db/ops/modifier_add_to_set.cpp
+++ b/src/mongo/db/ops/modifier_add_to_set.cpp
@@ -64,7 +64,7 @@ void deduplicate(mb::Element parent, Ordering comp, Equality equal) {
std::vector<mb::Element>::iterator next = where;
++next;
while (next != end && equal(*where, *next)) {
- next->remove();
+ next->remove().transitional_ignore();
++next;
}
where = next;
diff --git a/src/mongo/db/ops/modifier_pull.cpp b/src/mongo/db/ops/modifier_pull.cpp
index 75bfaa4aa30..70d5442a716 100644
--- a/src/mongo/db/ops/modifier_pull.cpp
+++ b/src/mongo/db/ops/modifier_pull.cpp
@@ -214,7 +214,7 @@ Status ModifierPull::apply() const {
std::vector<mb::Element>::const_iterator where = _preparedState->elementsToRemove.begin();
const std::vector<mb::Element>::const_iterator end = _preparedState->elementsToRemove.end();
for (; where != end; ++where)
- const_cast<mb::Element&>(*where).remove();
+ const_cast<mb::Element&>(*where).remove().transitional_ignore();
return Status::OK();
}
diff --git a/src/mongo/db/ops/modifier_pull_all.cpp b/src/mongo/db/ops/modifier_pull_all.cpp
index 26a3d6d24ff..0659e91ef38 100644
--- a/src/mongo/db/ops/modifier_pull_all.cpp
+++ b/src/mongo/db/ops/modifier_pull_all.cpp
@@ -212,7 +212,7 @@ Status ModifierPullAll::apply() const {
vector<mutablebson::Element>::const_iterator curr = _preparedState->elementsToRemove.begin();
const vector<mutablebson::Element>::const_iterator end = _preparedState->elementsToRemove.end();
for (; curr != end; ++curr) {
- const_cast<mutablebson::Element&>(*curr).remove();
+ const_cast<mutablebson::Element&>(*curr).remove().transitional_ignore();
}
return Status::OK();
}
diff --git a/src/mongo/db/ops/modifier_push.cpp b/src/mongo/db/ops/modifier_push.cpp
index 6be330cc98f..2e8acb4de53 100644
--- a/src/mongo/db/ops/modifier_push.cpp
+++ b/src/mongo/db/ops/modifier_push.cpp
@@ -623,7 +623,7 @@ Status ModifierPush::apply() const {
// Slice 0 means to remove all
if (_slice == 0) {
while (_preparedState->elemFound.ok() && _preparedState->elemFound.rightChild().ok()) {
- _preparedState->elemFound.rightChild().remove();
+ _preparedState->elemFound.rightChild().remove().transitional_ignore();
}
}
diff --git a/src/mongo/db/ops/modifier_set_test.cpp b/src/mongo/db/ops/modifier_set_test.cpp
index 354aae01929..45c26e495a2 100644
--- a/src/mongo/db/ops/modifier_set_test.cpp
+++ b/src/mongo/db/ops/modifier_set_test.cpp
@@ -134,7 +134,7 @@ TEST(SimpleMod, PrepareIdentityOpOnDeserializedIsNotANoOp) {
Document doc(fromjson("{a: { b: NumberInt(0)}}"));
// Apply a mutation to the document that will make it non-serialized.
- doc.root()["a"]["b"].setValueInt(2);
+ doc.root()["a"]["b"].setValueInt(2).transitional_ignore();
// Apply an op that would be a no-op.
Mod setMod(fromjson("{$set: {a: {b : NumberInt(2)}}}"));
@@ -786,9 +786,9 @@ TEST(Ephemeral, ApplySetModToEphemeralDocument) {
// $set.
Document doc;
Element x = doc.makeElementObject("x");
- doc.root().pushBack(x);
+ doc.root().pushBack(x).transitional_ignore();
Element a = doc.makeElementInt("a", 100);
- x.pushBack(a);
+ x.pushBack(a).transitional_ignore();
Mod setMod(fromjson("{ $set: { x: { a: 100, b: 2 }}}"), true);
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index f8b20c2d10c..e5154ff279d 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -218,8 +218,9 @@ bool handleError(OperationContext* opCtx,
}
if (!opCtx->getClient()->isInDirectClient()) {
- ShardingState::get(opCtx)->onStaleShardVersion(
- opCtx, wholeOp.ns, staleConfigException->getVersionReceived());
+ ShardingState::get(opCtx)
+ ->onStaleShardVersion(opCtx, wholeOp.ns, staleConfigException->getVersionReceived())
+ .transitional_ignore();
}
out->staleConfigException =
stdx::make_unique<SendStaleConfigException>(*staleConfigException);
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 1b7d19f2d2f..bf37a88955e 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -437,10 +437,10 @@ boost::intrusive_ptr<DocumentSourceMatch> DocumentSourceMatch::descendMatchOnPat
if (node->isLeaf() && node->matchType() != MatchExpression::TYPE_OPERATOR &&
node->matchType() != MatchExpression::WHERE) {
auto leafNode = static_cast<LeafMatchExpression*>(node);
- leafNode->setPath(newPath);
+ leafNode->setPath(newPath).transitional_ignore();
} else if (node->isArray()) {
auto arrayNode = static_cast<ArrayMatchingMatchExpression*>(node);
- arrayNode->setPath(newPath);
+ arrayNode->setPath(newPath).transitional_ignore();
}
});
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index f9f6beb455f..cc59d01e1d2 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -86,7 +86,7 @@ void prefetchIndexPages(OperationContext* opCtx,
return;
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex(desc);
invariant(iam);
- iam->touch(opCtx, obj);
+ iam->touch(opCtx, obj).transitional_ignore();
} catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << redact(e);
}
@@ -104,7 +104,7 @@ void prefetchIndexPages(OperationContext* opCtx,
IndexDescriptor* desc = ii.next();
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex(desc);
verify(iam);
- iam->touch(opCtx, obj);
+ iam->touch(opCtx, obj).transitional_ignore();
} catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << redact(e);
}
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 55d1b49f5cb..ace19183ff5 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -325,7 +325,7 @@ MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
// Make a NOT to be the new root and transfer ownership of the child to it.
auto newRoot = stdx::make_unique<NotMatchExpression>();
- newRoot->init(child.release());
+ newRoot->init(child.release()).transitional_ignore();
return newRoot.release();
}
@@ -358,7 +358,7 @@ MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
// Create a new RegexMatchExpression, because 'childRe' does not have a path.
auto re = stdx::make_unique<RegexMatchExpression>();
- re->init(in->path(), childRe->getString(), childRe->getFlags());
+ re->init(in->path(), childRe->getString(), childRe->getFlags()).transitional_ignore();
if (in->getTag()) {
re->setTag(in->getTag()->clone());
}
@@ -368,7 +368,7 @@ MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
// IN of 1 equality is the equality.
if (in->getEqualities().size() == 1 && in->getRegexes().empty()) {
auto eq = stdx::make_unique<EqualityMatchExpression>();
- eq->init(in->path(), *(in->getEqualities().begin()));
+ eq->init(in->path(), *(in->getEqualities().begin())).transitional_ignore();
eq->setCollator(in->getCollator());
if (in->getTag()) {
eq->setTag(in->getTag()->clone());
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index a874f1f89e3..5428386103f 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -550,7 +550,7 @@ TEST(CanonicalQueryTest, NormalizeWithInPreservesCollator) {
BSONObj obj = fromjson("{'': 'string'}");
auto inMatchExpression = stdx::make_unique<InMatchExpression>();
inMatchExpression->setCollator(&collator);
- inMatchExpression->addEquality(obj.firstElement());
+ inMatchExpression->addEquality(obj.firstElement()).transitional_ignore();
unique_ptr<MatchExpression> matchExpression(
CanonicalQuery::normalizeTree(inMatchExpression.release()));
ASSERT(matchExpression->matchType() == MatchExpression::MatchType::EQ);
diff --git a/src/mongo/db/query/planner_analysis_test.cpp b/src/mongo/db/query/planner_analysis_test.cpp
index d01f9b72dcb..783438a482c 100644
--- a/src/mongo/db/query/planner_analysis_test.cpp
+++ b/src/mongo/db/query/planner_analysis_test.cpp
@@ -180,7 +180,7 @@ TEST(QueryPlannerAnalysis, GeoSkipValidation) {
std::unique_ptr<GeoMatchExpression> exprPtr = stdx::make_unique<GeoMatchExpression>();
GeoMatchExpression* expr = exprPtr.get();
- expr->init("geometry.field", nullptr, BSONObj());
+ expr->init("geometry.field", nullptr, BSONObj()).transitional_ignore();
FetchNode* fetchNode = fetchNodePtr.get();
// Takes ownership.
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 07ba13c47e4..4e706b966ec 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -832,7 +832,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
enumParams.indices = &relevantIndices;
PlanEnumerator isp(enumParams);
- isp.init();
+ isp.init().transitional_ignore();
MatchExpression* rawTree;
while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index f09dbc6319b..a73815cbb8f 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -329,7 +329,7 @@ void BackgroundSync::_produce(OperationContext* opCtx) {
log() << "Our newest OpTime : " << lastOpTimeFetched;
log() << "Earliest OpTime available is " << syncSourceResp.earliestOpTimeSeen
<< " from " << syncSourceResp.getSyncSource();
- _replCoord->abortCatchupIfNeeded();
+ _replCoord->abortCatchupIfNeeded().transitional_ignore();
return;
}
@@ -586,7 +586,7 @@ void BackgroundSync::_runRollback(OperationContext* opCtx,
StorageInterface* storageInterface) {
if (_replCoord->getMemberState().primary()) {
warning() << "Rollback situation detected in catch-up mode. Aborting catch-up mode.";
- _replCoord->abortCatchupIfNeeded();
+ _replCoord->abortCatchupIfNeeded().transitional_ignore();
return;
}
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 230a13f43ae..cdf5e7fda84 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -442,7 +442,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
rpc::ReplSetMetadata::kNoPrimary,
-1);
BSONObjBuilder metadataBuilder;
- metadata.writeToMetadata(&metadataBuilder);
+ metadata.writeToMetadata(&metadataBuilder).transitional_ignore();
getNet()->scheduleResponse(
noi,
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 31803378b07..4fb7a7fe208 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -94,7 +94,9 @@ void CollectionClonerTest::setUp() {
const CollectionOptions& options,
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs) {
- (_loader = new CollectionBulkLoaderMock(&collectionStats))->init(secondaryIndexSpecs);
+ (_loader = new CollectionBulkLoaderMock(&collectionStats))
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(_loader));
@@ -352,7 +354,7 @@ TEST_F(CollectionClonerTest, DoNotCreateIDIndexIfAutoIndexIdUsed) {
collNss = theNss;
collOptions = theOptions;
collIndexSpecs = theIndexSpecs;
- loader->init(theIndexSpecs);
+ loader->init(theIndexSpecs).transitional_ignore();
return std::unique_ptr<CollectionBulkLoader>(loader);
};
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index 364e737fba7..593090f9ae0 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -101,7 +101,8 @@ void DatabaseClonerTest::setUp() {
const std::vector<BSONObj>& secondaryIndexSpecs) {
const auto collInfo = &_collections[nss];
(collInfo->loader = new CollectionBulkLoaderMock(&collInfo->stats))
- ->init(secondaryIndexSpecs);
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(collInfo->loader));
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index 36020cea9a8..5e7ceacae2b 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -176,7 +176,8 @@ protected:
log() << "reusing collection during test which may cause problems, ns:" << nss;
}
(collInfo->loader = new CollectionBulkLoaderMock(&collInfo->stats))
- ->init(secondaryIndexSpecs);
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(collInfo->loader));
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
index 8bafea199b9..4dcabbcdd09 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
@@ -132,7 +132,7 @@ TEST_F(DropPendingCollectionReaperTest,
opTime[i] = OpTime({Seconds((i + 1) * 10), 0}, 1LL);
ns[i] = NamespaceString("test", str::stream() << "coll" << i);
dpns[i] = ns[i].makeDropPendingNamespace(opTime[i]);
- _storageInterface->createCollection(opCtx.get(), dpns[i], {});
+ _storageInterface->createCollection(opCtx.get(), dpns[i], {}).transitional_ignore();
}
// Add drop-pending namespaces with drop optimes out of order and check that
diff --git a/src/mongo/db/repl/elect_cmd_runner_test.cpp b/src/mongo/db/repl/elect_cmd_runner_test.cpp
index d9872693f8e..a327208c172 100644
--- a/src/mongo/db/repl/elect_cmd_runner_test.cpp
+++ b/src/mongo/db/repl/elect_cmd_runner_test.cpp
@@ -236,7 +236,7 @@ public:
int selfConfigIndex = 0;
ReplSetConfig config;
- config.initialize(configObj);
+ config.initialize(configObj).transitional_ignore();
std::vector<HostAndPort> hosts;
for (ReplSetConfig::MemberIterator mem = ++config.membersBegin();
diff --git a/src/mongo/db/repl/freshness_checker_test.cpp b/src/mongo/db/repl/freshness_checker_test.cpp
index d9f0b9a8872..f0b86dbd0af 100644
--- a/src/mongo/db/repl/freshness_checker_test.cpp
+++ b/src/mongo/db/repl/freshness_checker_test.cpp
@@ -833,17 +833,19 @@ public:
Timestamp lastOpTimeApplied(100, 0);
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1")
- << BSON("_id" << 2 << "host"
- << "host2"))));
+ config
+ .initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2"))))
+ .transitional_ignore();
std::vector<HostAndPort> hosts;
for (ReplSetConfig::MemberIterator mem = ++config.membersBegin();
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 6ef1ad277d2..54218230581 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -232,7 +232,7 @@ InitialSyncer::InitialSyncer(
InitialSyncer::~InitialSyncer() {
DESTRUCTOR_GUARD({
- shutdown();
+ shutdown().transitional_ignore();
join();
});
}
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 1a55a5d83dc..8dc0b62f53a 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -269,7 +269,8 @@ protected:
log() << "reusing collection during test which may cause problems, ns:" << nss;
}
(collInfo->loader = new CollectionBulkLoaderMock(&collInfo->stats))
- ->init(secondaryIndexSpecs);
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(collInfo->loader));
@@ -903,7 +904,7 @@ TEST_F(InitialSyncerTest, InitialSyncerRecreatesOplogAndDropsReplicatedDatabases
auto oldCreateOplogFn = _storageInterface->createOplogFn;
_storageInterface->createOplogFn = [oldCreateOplogFn](OperationContext* opCtx,
const NamespaceString& nss) {
- oldCreateOplogFn(opCtx, nss);
+ oldCreateOplogFn(opCtx, nss).transitional_ignore();
return Status(ErrorCodes::OperationFailed, "oplog creation failed");
};
@@ -1856,7 +1857,7 @@ TEST_F(InitialSyncerTest,
net->blackHole(noi);
}
- initialSyncer->shutdown();
+ initialSyncer->shutdown().transitional_ignore();
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
@@ -2086,7 +2087,7 @@ TEST_F(
OperationContext*, const NamespaceString& nss, const BSONObj& doc) {
insertDocumentNss = nss;
insertDocumentDoc = doc;
- initialSyncer->shutdown();
+ initialSyncer->shutdown().transitional_ignore();
return Status::OK();
};
@@ -3130,7 +3131,7 @@ TEST_F(
const MultiApplier::Operations& ops,
MultiApplier::ApplyOperationFn applyOperation) {
// 'OperationPtr*' is ignored by our overridden _multiInitialSyncApply().
- applyOperation(nullptr);
+ applyOperation(nullptr).transitional_ignore();
return ops.back().getOpTime();
};
bool fetchCountIncremented = false;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index bf69d4e02bd..79e22f471c3 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1267,7 +1267,7 @@ void SnapshotThread::run() {
name = replCoord->reserveSnapshotName(nullptr);
// This establishes the view that we will name.
- _manager->prepareForCreateSnapshot(opCtx.get());
+ _manager->prepareForCreateSnapshot(opCtx.get()).transitional_ignore();
}
auto opTimeOfSnapshot = OpTime();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 961926a1f35..c35fa20b046 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -859,7 +859,7 @@ void ReplicationCoordinatorExternalStateImpl::createSnapshot(OperationContext* o
SnapshotName name) {
auto manager = _service->getGlobalStorageEngine()->getSnapshotManager();
invariant(manager); // This should never be called if there is no SnapshotManager.
- manager->createSnapshot(opCtx, name);
+ manager->createSnapshot(opCtx, name).transitional_ignore();
}
void ReplicationCoordinatorExternalStateImpl::forceSnapshotCreation() {
@@ -959,7 +959,7 @@ void ReplicationCoordinatorExternalStateImpl::onDurable(const JournalListener::T
void ReplicationCoordinatorExternalStateImpl::startNoopWriter(OpTime opTime) {
invariant(_noopWriter);
- _noopWriter->startWritingPeriodicNoops(opTime);
+ _noopWriter->startWritingPeriodicNoops(opTime).transitional_ignore();
}
void ReplicationCoordinatorExternalStateImpl::stopNoopWriter() {
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index bb2c3e1f0da..2d3ba978e54 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -611,7 +611,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
// Clear maint. mode.
while (getMaintenanceMode()) {
- setMaintenanceMode(false);
+ setMaintenanceMode(false).transitional_ignore();
}
if (startCompleted) {
@@ -2208,14 +2208,16 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(
// Do not conduct an election during a reconfig, as the node may not be electable post-reconfig.
if (auto electionFinishedEvent = _cancelElectionIfNeeded_inlock()) {
// Wait for the election to complete and the node's Role to be set to follower.
- _replExecutor->onEvent(electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetReconfig,
- this,
- stdx::placeholders::_1,
- newConfig,
- isForceReconfig,
- myIndex,
- finishedEvent));
+ _replExecutor
+ ->onEvent(electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetReconfig,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ isForceReconfig,
+ myIndex,
+ finishedEvent))
+ .status_with_transitional_ignore();
return;
}
@@ -3028,12 +3030,12 @@ void ReplicationCoordinatorImpl::_prepareReplSetMetadata_inlock(const OpTime& la
OpTime lastVisibleOpTime =
std::max(lastOpTimeFromClient, _getCurrentCommittedSnapshotOpTime_inlock());
auto metadata = _topCoord->prepareReplSetMetadata(lastVisibleOpTime);
- metadata.writeToMetadata(builder);
+ metadata.writeToMetadata(builder).transitional_ignore();
}
void ReplicationCoordinatorImpl::_prepareOplogQueryMetadata_inlock(int rbid,
BSONObjBuilder* builder) const {
- _topCoord->prepareOplogQueryMetadata(rbid).writeToMetadata(builder);
+ _topCoord->prepareOplogQueryMetadata(rbid).writeToMetadata(builder).transitional_ignore();
}
bool ReplicationCoordinatorImpl::isV1ElectionProtocol() const {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
index 322c84246e6..91b1c5dfc15 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
@@ -142,9 +142,10 @@ void ReplicationCoordinatorImpl::_startElectSelf_inlock() {
return;
}
fassert(18681, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(
- nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this))
+ .status_with_transitional_ignore();
lossGuard.dismiss();
}
@@ -217,9 +218,10 @@ void ReplicationCoordinatorImpl::_onFreshnessCheckComplete() {
}
fassert(18685, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(
- nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this))
+ .status_with_transitional_ignore();
lossGuard.dismiss();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
index 5462732a99e..f09de3c8b80 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
@@ -391,15 +391,17 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
net->enterNetwork();
ReplSetHeartbeatResponse hbResp2;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))))
+ .transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
hbResp2.setSetName("mySet");
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index d227f78b76b..394878e52ec 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -149,8 +149,10 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock() {
return;
}
fassert(28685, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term))
+ .status_with_transitional_ignore();
lossGuard.dismiss();
}
@@ -244,9 +246,10 @@ void ReplicationCoordinatorImpl::_startVoteRequester_inlock(long long newTerm) {
return;
}
fassert(28643, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(
- nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onVoteRequestComplete, this, newTerm));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onVoteRequestComplete, this, newTerm))
+ .status_with_transitional_ignore();
}
void ReplicationCoordinatorImpl::_onVoteRequestComplete(long long originalTerm) {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index a1c34534c3f..6d0ade99358 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -470,17 +470,19 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
net->enterNetwork();
ReplSetHeartbeatResponse hbResp2;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "protocolVersion"
- << 1));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "protocolVersion"
+ << 1))
+ .transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
hbResp2.setSetName("mySet");
@@ -759,7 +761,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
simulateEnoughHeartbeatsForAllNodesUp();
simulateSuccessfulDryRun();
// update to a future term before the election completes
- getReplCoord()->updateTerm(&opCtx, 1000);
+ getReplCoord()->updateTerm(&opCtx, 1000).transitional_ignore();
NetworkInterfaceMock* net = getNet();
net->enterNetwork();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 04216de9202..379a18fb231 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -337,8 +337,12 @@ executor::TaskExecutor::EventHandle ReplicationCoordinatorImpl::_stepDownStart()
return finishEvent;
}
- _replExecutor->scheduleWork(stdx::bind(
- &ReplicationCoordinatorImpl::_stepDownFinish, this, stdx::placeholders::_1, finishEvent));
+ _replExecutor
+ ->scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_stepDownFinish,
+ this,
+ stdx::placeholders::_1,
+ finishEvent))
+ .status_with_transitional_ignore();
return finishEvent;
}
@@ -398,17 +402,21 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig_inlock(const ReplSet
<< newConfig.getConfigVersion()
<< " to be processed after election is cancelled.";
- _replExecutor->onEvent(electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
- this,
- stdx::placeholders::_1,
- newConfig));
+ _replExecutor
+ ->onEvent(electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
+ this,
+ stdx::placeholders::_1,
+ newConfig))
+ .status_with_transitional_ignore();
return;
}
- _replExecutor->scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
- this,
- stdx::placeholders::_1,
- newConfig));
+ _replExecutor
+ ->scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
+ this,
+ stdx::placeholders::_1,
+ newConfig))
+ .status_with_transitional_ignore();
}
void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
@@ -490,13 +498,14 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
if (MONGO_FAIL_POINT(blockHeartbeatReconfigFinish)) {
LOG_FOR_HEARTBEATS(0) << "blockHeartbeatReconfigFinish fail point enabled. Rescheduling "
"_heartbeatReconfigFinish until fail point is disabled.";
- _replExecutor->scheduleWorkAt(
- _replExecutor->now() + Milliseconds{10},
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex));
+ _replExecutor
+ ->scheduleWorkAt(_replExecutor->now() + Milliseconds{10},
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex))
+ .status_with_transitional_ignore();
return;
}
@@ -522,12 +531,14 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
<< "Waiting for election to complete before finishing reconfig to version "
<< newConfig.getConfigVersion();
// Wait for the election to complete and the node's Role to be set to follower.
- _replExecutor->onEvent(electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex));
+ _replExecutor
+ ->onEvent(electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex))
+ .status_with_transitional_ignore();
return;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 59eee099c93..9a00ab76f2e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -379,7 +379,7 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
rpc::ReplSetMetadata metadata(
opTime.getTerm(), opTime, opTime, rsConfig.getConfigVersion(), unexpectedId, 1, -1);
BSONObjBuilder metadataBuilder;
- metadata.writeToMetadata(&metadataBuilder);
+ metadata.writeToMetadata(&metadataBuilder).transitional_ignore();
heartbeatResponse = makeResponseStatus(responseBuilder.obj(), metadataBuilder.obj());
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 761359552ed..e6fecc98450 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -520,15 +520,17 @@ TEST_F(
net->enterNetwork();
ReplSetHeartbeatResponse hbResp2;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))))
+ .transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
hbResp2.setSetName("mySet");
@@ -591,15 +593,17 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
ReplSetHeartbeatResponse hbResp;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 4
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))))
+ .transitional_ignore();
hbResp.setConfig(config);
hbResp.setConfigVersion(4);
hbResp.setSetName("mySet");
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 2fd396c1218..896cea7bd5c 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -979,10 +979,10 @@ TEST_F(
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
// Majority satisfied but not either custom mode
- getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1);
- getReplCoord()->setLastDurableOptime_forTest(2, 1, time1);
- getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1);
- getReplCoord()->setLastDurableOptime_forTest(2, 2, time1);
+ getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 1, time1).transitional_ignore();
+ getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 2, time1).transitional_ignore();
getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1));
statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern);
@@ -993,8 +993,8 @@ TEST_F(
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
// All modes satisfied
- getReplCoord()->setLastAppliedOptime_forTest(2, 3, time1);
- getReplCoord()->setLastDurableOptime_forTest(2, 3, time1);
+ getReplCoord()->setLastAppliedOptime_forTest(2, 3, time1).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 3, time1).transitional_ignore();
statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern);
ASSERT_OK(statusAndDur.status);
@@ -1039,8 +1039,8 @@ TEST_F(
// multiDC satisfied but not majority or multiRack
getReplCoord()->setMyLastAppliedOpTime(time2);
getReplCoord()->setMyLastDurableOpTime(time2);
- getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2);
- getReplCoord()->setLastDurableOptime_forTest(2, 3, time2);
+ getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 3, time2).transitional_ignore();
statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, majorityWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
@@ -2333,7 +2333,7 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
// Step down from primary.
- getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1);
+ getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1).transitional_ignore();
ASSERT_OK(getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Seconds(1)));
status = getReplCoord()->setMaintenanceMode(false);
@@ -2369,8 +2369,10 @@ TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection)
// Step down from primary.
- getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1);
- getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Milliseconds(10 * 1000));
+ getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1).transitional_ignore();
+ getReplCoord()
+ ->waitForMemberState(MemberState::RS_SECONDARY, Milliseconds(10 * 1000))
+ .transitional_ignore();
// Can't modify maintenance mode when running for election (before and after dry run).
ASSERT_EQUALS(TopologyCoordinator::Role::follower, getTopoCoord().getRole());
@@ -3863,7 +3865,7 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeIsNewer
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
OpTime time(Timestamp(10, 0), 1);
@@ -3905,7 +3907,7 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
// higher term, should change
@@ -3984,7 +3986,7 @@ TEST_F(ReplCoordTest,
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
auto replCoord = getReplCoord();
@@ -4110,7 +4112,7 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
auto replCoord = getReplCoord();
@@ -4330,17 +4332,19 @@ TEST_F(ReplCoordTest,
// Respond to node1's heartbeat command with a config that excludes node1.
ReplSetHeartbeatResponse hbResp;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("host"
- << "node2:12345"
- << "_id"
- << 1))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1))))
+ .transitional_ignore();
hbResp.setConfig(config);
hbResp.setConfigVersion(3);
hbResp.setSetName("mySet");
@@ -4735,9 +4739,11 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
memberIds.insert(memberId);
OpTime appliedOpTime;
OpTime durableOpTime;
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, appliedOpTime);
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, durableOpTime);
}
ASSERT_EQUALS(2U, memberIds.size());
@@ -4751,7 +4757,8 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
long long memberId = entry[OldUpdatePositionArgs::kMemberIdFieldName].Number();
memberIds2.insert(memberId);
OpTime entryOpTime;
- bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime);
+ bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, entryOpTime);
}
ASSERT_EQUALS(2U, memberIds2.size());
@@ -4779,9 +4786,11 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
memberIds3.insert(memberId);
OpTime appliedOpTime;
OpTime durableOpTime;
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, appliedOpTime);
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, durableOpTime);
}
ASSERT_EQUALS(1U, memberIds3.size());
@@ -4795,7 +4804,8 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
long long memberId = entry[OldUpdatePositionArgs::kMemberIdFieldName].Number();
memberIds4.insert(memberId);
OpTime entryOpTime;
- bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime);
+ bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, entryOpTime);
}
ASSERT_EQUALS(1U, memberIds4.size());
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index 64b7dd27e12..b22c180bb7c 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -110,7 +110,7 @@ Reporter::Reporter(executor::TaskExecutor* executor,
}
Reporter::~Reporter() {
- DESTRUCTOR_GUARD(shutdown(); join(););
+ DESTRUCTOR_GUARD(shutdown(); join().transitional_ignore(););
}
std::string Reporter::toString() const {
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index cd77a41ba95..d1e794e015d 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -82,7 +82,7 @@ void RollbackTest::setUp() {
_opCtx = cc().makeOperationContext();
_replicationProcess->getConsistencyMarkers()->setAppliedThrough(_opCtx.get(), OpTime{});
_replicationProcess->getConsistencyMarkers()->setMinValid(_opCtx.get(), OpTime{});
- _replicationProcess->initializeRollbackID(_opCtx.get());
+ _replicationProcess->initializeRollbackID(_opCtx.get()).transitional_ignore();
_threadPoolExecutorTest.launchExecutorThread();
}
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index 6e3615ceee0..048aa72ab6d 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -188,7 +188,8 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdThrows) {
RollbackSourceLocal(stdx::make_unique<OplogInterfaceMock>()),
{},
_coordinator,
- _replicationProcess.get()),
+ _replicationProcess.get())
+ .transitional_ignore(),
UserException,
ErrorCodes::UnknownError);
}
@@ -211,7 +212,8 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdDiffersFromRequiredRBID) {
RollbackSourceLocal(stdx::make_unique<OplogInterfaceMock>()),
1,
_coordinator,
- _replicationProcess.get()),
+ _replicationProcess.get())
+ .transitional_ignore(),
UserException,
ErrorCodes::Error(40362));
}
@@ -243,7 +245,7 @@ Collection* _createCollection(OperationContext* opCtx,
mongo::WriteUnitOfWork wuow(opCtx);
auto db = dbHolder().openDb(opCtx, nss.db());
ASSERT_TRUE(db);
- db->dropCollection(opCtx, nss.ns());
+ db->dropCollection(opCtx, nss.ns()).transitional_ignore();
auto coll = db->createCollection(opCtx, nss.ns(), options);
ASSERT_TRUE(coll);
wuow.commit();
@@ -899,7 +901,8 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommandFailsIfRBIDChangesWhileSynci
rollbackSource,
0,
_coordinator,
- _replicationProcess.get()),
+ _replicationProcess.get())
+ .transitional_ignore(),
DBException,
40365);
ASSERT(rollbackSource.copyCollectionCalled);
@@ -1160,7 +1163,8 @@ TEST(RSRollbackTest, LocalEntryWithoutNsIsFatal) {
const auto validOplogEntry = fromjson("{op: 'i', ns: 'test.t', o: {_id:1, a: 1}}");
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry));
- ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("ns")),
+ ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("ns"))
+ .transitional_ignore(),
RSFatalException);
}
@@ -1168,7 +1172,8 @@ TEST(RSRollbackTest, LocalEntryWithoutOIsFatal) {
const auto validOplogEntry = fromjson("{op: 'i', ns: 'test.t', o: {_id:1, a: 1}}");
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry));
- ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o")),
+ ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o"))
+ .transitional_ignore(),
RSFatalException);
}
@@ -1177,7 +1182,8 @@ TEST(RSRollbackTest, LocalEntryWithoutO2IsFatal) {
fromjson("{op: 'u', ns: 'test.t', o2: {_id: 1}, o: {_id:1, a: 1}}");
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry));
- ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o2")),
+ ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o2"))
+ .transitional_ignore(),
RSFatalException);
}
diff --git a/src/mongo/db/repl/scatter_gather_test.cpp b/src/mongo/db/repl/scatter_gather_test.cpp
index 294b2c84ed7..3f4d8d4d5cd 100644
--- a/src/mongo/db/repl/scatter_gather_test.cpp
+++ b/src/mongo/db/repl/scatter_gather_test.cpp
@@ -157,7 +157,9 @@ TEST_F(ScatterGatherTest, DeleteAlgorithmAfterItHasCompleted) {
ScatterGatherRunner* sgr = new ScatterGatherRunner(sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr->start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
ASSERT_OK(status.getStatus());
ASSERT_FALSE(ranCompletion);
@@ -245,7 +247,9 @@ TEST_F(ScatterGatherTest, ShutdownExecutorAfterStart) {
ScatterGatherRunner sgr(&sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr.start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
shutdownExecutorThread();
sga.finish();
ASSERT_FALSE(ranCompletion);
@@ -258,7 +262,9 @@ TEST_F(ScatterGatherTest, DoNotProcessMoreThanSufficientResponses) {
ScatterGatherRunner sgr(&sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr.start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
ASSERT_OK(status.getStatus());
ASSERT_FALSE(ranCompletion);
@@ -300,7 +306,9 @@ TEST_F(ScatterGatherTest, DoNotCreateCallbacksIfHasSufficientResponsesReturnsTru
ScatterGatherRunner sgr(&sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr.start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
ASSERT_OK(status.getStatus());
// Wait until callback finishes.
NetworkInterfaceMock* net = getNet();
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index c01a1badff2..57eb9d4d262 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -133,7 +133,7 @@ int64_t getIndexKeyCount(OperationContext* opCtx, IndexCatalog* cat, IndexDescri
auto idx = cat->getIndex(desc);
int64_t numKeys;
ValidateResults fullRes;
- idx->validate(opCtx, &numKeys, &fullRes);
+ idx->validate(opCtx, &numKeys, &fullRes).transitional_ignore();
return numKeys;
}
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index 1218ffa2cc2..77579cc24ad 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -256,12 +256,13 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
_finishCallback(Status(ErrorCodes::CallbackCanceled,
str::stream()
<< "sync source resolver shut down while probing candidate: "
- << candidate));
+ << candidate))
+ .transitional_ignore();
return;
}
if (ErrorCodes::CallbackCanceled == queryResult.getStatus()) {
- _finishCallback(queryResult.getStatus());
+ _finishCallback(queryResult.getStatus()).transitional_ignore();
return;
}
@@ -272,14 +273,14 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
<< "' for " << kFetcherErrorBlacklistDuration << " until: " << until;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
const auto& queryResponse = queryResult.getValue();
const auto remoteEarliestOpTime = _parseRemoteEarliestOpTime(candidate, queryResponse);
if (remoteEarliestOpTime.isNull()) {
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -306,7 +307,7 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
earliestOpTimeSeen = remoteEarliestOpTime;
}
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -323,7 +324,7 @@ void SyncSourceResolver::_scheduleRBIDRequest(HostAndPort candidate, OpTime earl
stdx::placeholders::_1));
if (!handle.isOK()) {
- _finishCallback(handle.getStatus());
+ _finishCallback(handle.getStatus()).transitional_ignore();
return;
}
@@ -339,7 +340,7 @@ void SyncSourceResolver::_rbidRequestCallback(
OpTime earliestOpTimeSeen,
const executor::TaskExecutor::RemoteCommandCallbackArgs& rbidReply) {
if (rbidReply.response.status == ErrorCodes::CallbackCanceled) {
- _finishCallback(rbidReply.response.status);
+ _finishCallback(rbidReply.response.status).transitional_ignore();
return;
}
@@ -352,7 +353,7 @@ void SyncSourceResolver::_rbidRequestCallback(
log() << "Blacklisting " << candidate << " due to error: '" << ex << "' for "
<< kFetcherErrorBlacklistDuration << " until: " << until;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -361,11 +362,11 @@ void SyncSourceResolver::_rbidRequestCallback(
// Unittest requires that this kind of failure be handled specially.
auto status = _scheduleFetcher(_makeRequiredOpTimeFetcher(candidate, earliestOpTimeSeen));
if (!status.isOK()) {
- _finishCallback(status);
+ _finishCallback(status).transitional_ignore();
}
return;
}
- _finishCallback(candidate);
+ _finishCallback(candidate).transitional_ignore();
}
Status SyncSourceResolver::_compareRequiredOpTimeWithQueryResponse(
@@ -405,12 +406,13 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
"required optime "
<< _requiredOpTime.toString()
<< " in candidate's oplog: "
- << candidate));
+ << candidate))
+ .transitional_ignore();
return;
}
if (ErrorCodes::CallbackCanceled == queryResult.getStatus()) {
- _finishCallback(queryResult.getStatus());
+ _finishCallback(queryResult.getStatus()).transitional_ignore();
return;
}
@@ -422,7 +424,7 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
<< " until: " << until << ". required optime: " << _requiredOpTime;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -439,11 +441,11 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
<< " until: " << until;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
- _finishCallback(candidate);
+ _finishCallback(candidate).transitional_ignore();
}
Status SyncSourceResolver::_chooseAndProbeNextSyncSource(OpTime earliestOpTimeSeen) {
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 7492bb4bb40..9dbed691425 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -1967,7 +1967,9 @@ StatusWith<BSONObj> TopologyCoordinatorImpl::prepareReplSetUpdatePositionCommand
// Add metadata to command. Old style parsing logic will reject the metadata.
if (commandStyle == ReplicationCoordinator::ReplSetUpdatePositionCommandStyle::kNewStyle) {
- prepareReplSetMetadata(currentCommittedSnapshotOpTime).writeToMetadata(&cmdBuilder);
+ prepareReplSetMetadata(currentCommittedSnapshotOpTime)
+ .writeToMetadata(&cmdBuilder)
+ .transitional_ignore();
}
return cmdBuilder.obj();
}
diff --git a/src/mongo/db/repl/topology_coordinator_impl_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
index b5af086b797..ec66ca1eb5f 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
@@ -2585,19 +2585,21 @@ TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataSameConfig) {
// construct a copy of the original config for log message checking later
// see HeartbeatResponseTest for the origin of the original config
ReplSetConfig originalConfig;
- originalConfig.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 5
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017")
- << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)));
+ originalConfig
+ .initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)))
+ .transitional_ignore();
ReplSetHeartbeatResponse sameConfigResponse;
sameConfigResponse.noteReplSet();
@@ -4757,7 +4759,8 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4791,7 +4794,8 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -5081,7 +5085,8 @@ TEST_F(HeartbeatResponseTest, ReconfigBetweenHeartbeatRequestAndRepsonse) {
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -5124,7 +5129,8 @@ TEST_F(HeartbeatResponseTest, ReconfigNodeRemovedBetweenHeartbeatRequestAndRepso
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -5561,7 +5567,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5569,17 +5576,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -5617,7 +5625,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5626,19 +5635,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -5674,7 +5684,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5683,19 +5694,20 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -5730,7 +5742,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5764,7 +5777,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5810,7 +5824,8 @@ TEST_F(TopoCoordTest, ArbiterDoesNotGrantVoteWhenItCanSeeAHealthyPrimaryOfEqualO
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5849,7 +5864,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5885,7 +5901,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
@@ -5918,17 +5935,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -5949,7 +5967,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5977,17 +5996,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6008,7 +6028,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -6036,17 +6057,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6066,7 +6088,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -6094,17 +6117,18 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6125,7 +6149,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -6153,17 +6178,18 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6184,7 +6210,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
diff --git a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
index 80fc9c3bf52..f4e43bf3e0b 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
@@ -2044,7 +2044,8 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2078,7 +2079,8 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2442,7 +2444,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2450,17 +2453,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -2498,7 +2502,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2507,19 +2512,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -2528,19 +2534,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// real request fine
ReplSetRequestVotesArgs args3;
- args3.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args3
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response3;
getTopoCoord().processReplSetRequestVotes(args3, &response3);
@@ -2549,19 +2556,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// dry post real, fails
ReplSetRequestVotesArgs args4;
- args4.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args4
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response4;
getTopoCoord().processReplSetRequestVotes(args4, &response4);
@@ -2598,7 +2606,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2607,19 +2616,20 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -2654,7 +2664,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2688,7 +2699,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2726,7 +2738,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2762,7 +2775,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
@@ -2795,17 +2809,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -2826,7 +2841,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2854,17 +2870,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -2885,7 +2902,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2913,17 +2931,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -2943,7 +2962,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2971,17 +2991,18 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -3002,7 +3023,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -3030,17 +3052,18 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -3061,7 +3084,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
@@ -3552,7 +3576,8 @@ TEST_F(HeartbeatResponseTestV1, ReconfigNodeRemovedBetweenHeartbeatRequestAndRep
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -3598,7 +3623,8 @@ TEST_F(HeartbeatResponseTestV1, ReconfigBetweenHeartbeatRequestAndRepsonse) {
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -3900,7 +3926,7 @@ TEST_F(HeartbeatResponseTestV1,
// Freeze node to set stepdown wait.
BSONObjBuilder response;
- getTopoCoord().prepareFreezeResponse(now()++, 20, &response);
+ getTopoCoord().prepareFreezeResponse(now()++, 20, &response).status_with_transitional_ignore();
nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0");
ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
@@ -4659,21 +4685,23 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
// construct a copy of the original config for log message checking later
// see HeartbeatResponseTest for the origin of the original config
ReplSetConfig originalConfig;
- originalConfig.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 5
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017")
- << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)));
+ originalConfig
+ .initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)))
+ .transitional_ignore();
ReplSetHeartbeatResponse sameConfigResponse;
sameConfigResponse.noteReplSet();
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index d23c1220874..b8ab1945627 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -289,7 +289,7 @@ Status Balancer::moveSingleChunk(OperationContext* opCtx,
void Balancer::report(OperationContext* opCtx, BSONObjBuilder* builder) {
auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
- balancerConfig->refreshAndCheck(opCtx);
+ balancerConfig->refreshAndCheck(opCtx).transitional_ignore();
const auto mode = balancerConfig->getBalancerMode();
@@ -389,7 +389,8 @@ void Balancer::_mainThread() {
_balancedLastTime);
shardingContext->catalogClient(opCtx.get())
- ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON());
+ ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON())
+ .transitional_ignore();
}
LOG(1) << "*** End of balancing round";
@@ -408,7 +409,8 @@ void Balancer::_mainThread() {
roundDetails.setFailed(e.what());
shardingContext->catalogClient(opCtx.get())
- ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON());
+ ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON())
+ .transitional_ignore();
// Sleep a fair amount before retrying because of the error
_endRound(opCtx.get(), kBalanceRoundDefaultInterval);
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index eb47bb227c8..a8108a1f540 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -80,7 +80,8 @@ Status extractMigrationStatusFromCommandResponse(const BSONObj& commandResponse)
if (!commandStatus.isOK()) {
bool chunkTooBig = false;
- bsonExtractBooleanFieldWithDefault(commandResponse, kChunkTooBig, false, &chunkTooBig);
+ bsonExtractBooleanFieldWithDefault(commandResponse, kChunkTooBig, false, &chunkTooBig)
+ .transitional_ignore();
if (chunkTooBig) {
commandStatus = {ErrorCodes::ChunkTooBig, commandStatus.reason()};
}
@@ -594,8 +595,9 @@ void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext
// Clear the config.migrations collection so that those chunks can be scheduled for migration
// again.
- catalogClient->removeConfigDocuments(
- opCtx, MigrationType::ConfigNS, BSONObj(), kMajorityWriteConcern);
+ catalogClient
+ ->removeConfigDocuments(opCtx, MigrationType::ConfigNS, BSONObj(), kMajorityWriteConcern)
+ .transitional_ignore();
_state = State::kEnabled;
_condVar.notify_all();
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index fb2197b8d0c..a567a8a171a 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -279,7 +279,7 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
WriteUnitOfWork wuow(opCtx);
if (saver) {
- saver->goingToDelete(obj);
+ saver->goingToDelete(obj).transitional_ignore();
}
collection->deleteDocument(opCtx, rloc, nullptr, true);
wuow.commit();
diff --git a/src/mongo/db/s/collection_range_deleter_test.cpp b/src/mongo/db/s/collection_range_deleter_test.cpp
index c30412ceecc..f2e4af1a090 100644
--- a/src/mongo/db/s/collection_range_deleter_test.cpp
+++ b/src/mongo/db/s/collection_range_deleter_test.cpp
@@ -111,7 +111,8 @@ void CollectionRangeDeleterTest::setUp() {
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
ShardingMongodTestFixture::setUp();
replicationCoordinator()->alwaysAllowWrites(true);
- initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost));
+ initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost))
+ .transitional_ignore();
// RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter())
// ->setConnectionStringReturnValue(kConfigConnStr);
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index de6cfcabd96..68a4797a6e5 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -360,16 +360,18 @@ void MetadataManager::append(BSONObjBuilder* builder) {
void MetadataManager::_scheduleCleanup(executor::TaskExecutor* executor,
NamespaceString nss,
CollectionRangeDeleter::Action action) {
- executor->scheduleWork([executor, nss, action](auto&) {
- const int maxToDelete = std::max(int(internalQueryExecYieldIterations.load()), 1);
- Client::initThreadIfNotAlready("Collection Range Deleter");
- auto UniqueOpCtx = Client::getCurrent()->makeOperationContext();
- auto opCtx = UniqueOpCtx.get();
- auto next = CollectionRangeDeleter::cleanUpNextRange(opCtx, nss, action, maxToDelete);
- if (next != CollectionRangeDeleter::Action::kFinished) {
- _scheduleCleanup(executor, nss, next);
- }
- });
+ executor
+ ->scheduleWork([executor, nss, action](auto&) {
+ const int maxToDelete = std::max(int(internalQueryExecYieldIterations.load()), 1);
+ Client::initThreadIfNotAlready("Collection Range Deleter");
+ auto UniqueOpCtx = Client::getCurrent()->makeOperationContext();
+ auto opCtx = UniqueOpCtx.get();
+ auto next = CollectionRangeDeleter::cleanUpNextRange(opCtx, nss, action, maxToDelete);
+ if (next != CollectionRangeDeleter::Action::kFinished) {
+ _scheduleCleanup(executor, nss, next);
+ }
+ })
+ .status_with_transitional_ignore();
}
auto MetadataManager::_pushRangeToClean(ChunkRange const& range) -> CleanupNotification {
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 5ba653ac731..a17eed1c6b4 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -81,7 +81,8 @@ protected:
void setUp() override {
ShardingMongodTestFixture::setUp();
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost));
+ initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost))
+ .transitional_ignore();
configTargeter()->setFindHostReturnValue(dummyHost);
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 46bfc059de2..50c05a7f763 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -327,7 +327,8 @@ void MigrationChunkClonerSourceLegacy::cancelClone(OperationContext* opCtx) {
case kDone:
break;
case kCloning:
- _callRecipient(createRequestWithSessionId(kRecvChunkAbort, _args.getNss(), _sessionId));
+ _callRecipient(createRequestWithSessionId(kRecvChunkAbort, _args.getNss(), _sessionId))
+ .status_with_transitional_ignore();
// Intentional fall through
case kNew:
_cleanup(opCtx);
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 12cf31ef2dd..0ca6d2643e2 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -892,7 +892,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
}
if (serverGlobalParams.moveParanoia) {
- rs.goingToDelete(fullObj);
+ rs.goingToDelete(fullObj).transitional_ignore();
}
deleteObjects(opCtx,
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 2b6038748e7..da1b18008cf 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -164,15 +164,16 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
invariant(_state == kCreated);
auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
- grid.catalogClient(opCtx)->logChange(
- opCtx,
- "moveChunk.start",
- getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(opCtx)
+ ->logChange(opCtx,
+ "moveChunk.start",
+ getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
_cloneDriver = stdx::make_unique<MigrationChunkClonerSourceLegacy>(
_args, _collectionMetadata->getKeyPattern(), _donorConnStr, _recipientHost);
@@ -434,15 +435,16 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
scopedGuard.Dismiss();
_cleanup(opCtx);
- grid.catalogClient(opCtx)->logChange(
- opCtx,
- "moveChunk.commit",
- getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(opCtx)
+ ->logChange(opCtx,
+ "moveChunk.commit",
+ getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
return Status::OK();
}
@@ -452,15 +454,16 @@ void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) {
return;
}
- grid.catalogClient(opCtx)->logChange(
- opCtx,
- "moveChunk.error",
- getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(opCtx)
+ ->logChange(opCtx,
+ "moveChunk.error",
+ getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
_cleanup(opCtx);
}
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 67edd9bb5af..b457f6d5df6 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -231,7 +231,8 @@ private:
auto range = ChunkRange(moveChunkRequest.getMinKey(), moveChunkRequest.getMaxKey());
if (moveChunkRequest.getWaitForDelete()) {
CollectionShardingState::waitForClean(
- opCtx, moveChunkRequest.getNss(), moveChunkRequest.getVersionEpoch(), range);
+ opCtx, moveChunkRequest.getNss(), moveChunkRequest.getVersionEpoch(), range)
+ .transitional_ignore();
// Ensure that wait for write concern for the chunk cleanup will include
// the deletes performed by the range deleter thread.
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp
index 89c305cda43..32b1a82b3c3 100644
--- a/src/mongo/db/s/move_timing_helper.cpp
+++ b/src/mongo/db/s/move_timing_helper.cpp
@@ -82,11 +82,13 @@ MoveTimingHelper::~MoveTimingHelper() {
_b.append("errmsg", *_cmdErrmsg);
}
- grid.catalogClient(_opCtx)->logChange(_opCtx,
- str::stream() << "moveChunk." << _where,
- _ns,
- _b.obj(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(_opCtx)
+ ->logChange(_opCtx,
+ str::stream() << "moveChunk." << _where,
+ _ns,
+ _b.obj(),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
} catch (const std::exception& e) {
warning() << "couldn't record timing for moveChunk '" << _where
<< "': " << redact(e.what());
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index 5f322d945ee..35f9c4e04a6 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -233,7 +233,8 @@ Status ShardingStateRecovery::startMetadataOp(OperationContext* opCtx) {
// Couldn't wait for the replication to complete, but the local write was performed. Clear
// it up fast (without any waiting for journal or replication) and still treat it as
// failure.
- modifyRecoveryDocument(opCtx, RecoveryDocument::Decrement, WriteConcernOptions());
+ modifyRecoveryDocument(opCtx, RecoveryDocument::Decrement, WriteConcernOptions())
+ .transitional_ignore();
}
return upsertStatus;
diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp
index c66fdf098ab..3d968ebabd1 100644
--- a/src/mongo/db/server_options_helpers.cpp
+++ b/src/mongo/db/server_options_helpers.cpp
@@ -653,7 +653,7 @@ Status canonicalizeServerOptions(moe::Environment* params) {
if (params->count("verbose")) {
std::string verbosity;
- params->get("verbose", &verbosity);
+ params->get("verbose", &verbosity).transitional_ignore();
if (s == verbosity ||
// Treat a verbosity of "true" the same as a single "v". See SERVER-11471.
(s == "v" && verbosity == "true")) {
diff --git a/src/mongo/db/server_parameters_test.cpp b/src/mongo/db/server_parameters_test.cpp
index 8ce7c9fe141..83fdb3e93b5 100644
--- a/src/mongo/db/server_parameters_test.cpp
+++ b/src/mongo/db/server_parameters_test.cpp
@@ -45,13 +45,13 @@ TEST(ServerParameters, Simple1) {
ExportedServerParameter<int, ServerParameterType::kStartupAndRuntime> ff(NULL, "ff", &f);
ASSERT_EQUALS("ff", ff.name());
- ff.set(6);
+ ff.set(6).transitional_ignore();
ASSERT_EQUALS(6, f.load());
- ff.set(BSON("x" << 7).firstElement());
+ ff.set(BSON("x" << 7).firstElement()).transitional_ignore();
ASSERT_EQUALS(7, f.load());
- ff.setFromString("8");
+ ff.setFromString("8").transitional_ignore();
ASSERT_EQUALS(8, f.load());
}
@@ -63,7 +63,7 @@ TEST(ServerParameters, Vector1) {
BSONObj x = BSON("x" << BSON_ARRAY("a"
<< "b"
<< "c"));
- vv.set(x.firstElement());
+ vv.set(x.firstElement()).transitional_ignore();
ASSERT_EQUALS(3U, v.size());
ASSERT_EQUALS("a", v[0]);
@@ -79,7 +79,7 @@ TEST(ServerParameters, Vector1) {
ASSERT(x.firstElement().woCompare(y.firstElement(), false) == 0);
- vv.setFromString("d,e");
+ vv.setFromString("d,e").transitional_ignore();
ASSERT_EQUALS(2U, v.size());
ASSERT_EQUALS("d", v[0]);
ASSERT_EQUALS("e", v[1]);
diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp
index 76234bc3b4b..1ad67a9f275 100644
--- a/src/mongo/db/service_entry_point_mongod.cpp
+++ b/src/mongo/db/service_entry_point_mongod.cpp
@@ -225,7 +225,9 @@ public:
repl::getGlobalReplicationCoordinator()->setMaintenanceMode(true).isOK()) {}
~MaintenanceModeSetter() {
if (maintenanceModeSet)
- repl::getGlobalReplicationCoordinator()->setMaintenanceMode(false);
+ repl::getGlobalReplicationCoordinator()
+ ->setMaintenanceMode(false)
+ .transitional_ignore();
}
private:
@@ -250,7 +252,8 @@ void appendReplyMetadata(OperationContext* opCtx,
// TODO: refactor out of here as part of SERVER-18236
if (isShardingAware || isConfig) {
rpc::ShardingMetadata(lastOpTimeFromClient, replCoord->getElectionId())
- .writeToMetadata(metadataBob);
+ .writeToMetadata(metadataBob)
+ .transitional_ignore();
if (LogicalTimeValidator::isAuthorizedToAdvanceClock(opCtx)) {
// No need to sign logical times for internal clients.
SignedLogicalTime currentTime(
@@ -670,8 +673,10 @@ void execCommandDatabase(OperationContext* opCtx,
invariant(sce); // do not upcasts from DBException created by uassert variants.
if (!opCtx->getClient()->isInDirectClient()) {
- ShardingState::get(opCtx)->onStaleShardVersion(
- opCtx, NamespaceString(sce->getns()), sce->getVersionReceived());
+ ShardingState::get(opCtx)
+ ->onStaleShardVersion(
+ opCtx, NamespaceString(sce->getns()), sce->getVersionReceived())
+ .transitional_ignore();
}
}
@@ -864,8 +869,9 @@ DbResponse receivedQuery(OperationContext* opCtx,
// If we got a stale config, wait in case the operation is stuck in a critical section
if (!opCtx->getClient()->isInDirectClient() && e.getCode() == ErrorCodes::SendStaleConfig) {
auto& sce = static_cast<const StaleConfigException&>(e);
- ShardingState::get(opCtx)->onStaleShardVersion(
- opCtx, NamespaceString(sce.getns()), sce.getVersionReceived());
+ ShardingState::get(opCtx)
+ ->onStaleShardVersion(opCtx, NamespaceString(sce.getns()), sce.getVersionReceived())
+ .transitional_ignore();
}
dbResponse.response.reset();
diff --git a/src/mongo/db/service_liason_mock.cpp b/src/mongo/db/service_liason_mock.cpp
index b7b03ba9ceb..4c2ba699acb 100644
--- a/src/mongo/db/service_liason_mock.cpp
+++ b/src/mongo/db/service_liason_mock.cpp
@@ -39,7 +39,7 @@ MockServiceLiasonImpl::MockServiceLiasonImpl() {
auto timerFactory = stdx::make_unique<executor::AsyncTimerFactoryMock>();
_timerFactory = timerFactory.get();
_runner = stdx::make_unique<PeriodicRunnerASIO>(std::move(timerFactory));
- _runner->startup();
+ _runner->startup().transitional_ignore();
}
LogicalSessionIdSet MockServiceLiasonImpl::getActiveSessions() const {
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index 48e8c41bc50..fb47d11a7eb 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -280,7 +280,8 @@ void BSONCollectionCatalogEntry::MetaData::parse(const BSONObj& obj) {
ns = obj["ns"].valuestrsafe();
if (obj["options"].isABSONObj()) {
- options.parse(obj["options"].Obj(), CollectionOptions::parseForStorage);
+ options.parse(obj["options"].Obj(), CollectionOptions::parseForStorage)
+ .transitional_ignore();
}
BSONElement indexList = obj["indexes"];
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
index 70ae05fe666..e96cddbb1a7 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
@@ -57,7 +57,7 @@ public:
virtual void commit() {}
virtual void rollback() {
// Intentionally ignoring failure.
- _cce->_engine->dropIdent(_opCtx, _ident);
+ _cce->_engine->dropIdent(_opCtx, _ident).transitional_ignore();
}
OperationContext* const _opCtx;
@@ -74,7 +74,7 @@ public:
virtual void commit() {
// Intentionally ignoring failure here. Since we've removed the metadata pointing to the
// index, we should never see it again anyway.
- _cce->_engine->dropIdent(_opCtx, _ident);
+ _cce->_engine->dropIdent(_opCtx, _ident).transitional_ignore();
}
OperationContext* const _opCtx;
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
index cc6b9a7829a..88f1b7b7e55 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
@@ -62,7 +62,7 @@ public:
virtual void rollback() {
if (_dropOnRollback) {
// Intentionally ignoring failure
- _dce->_engine->getEngine()->dropIdent(_opCtx, _ident);
+ _dce->_engine->getEngine()->dropIdent(_opCtx, _ident).transitional_ignore();
}
const CollectionMap::iterator it = _dce->_collections.find(_collection);
@@ -100,7 +100,7 @@ public:
// Intentionally ignoring failure here. Since we've removed the metadata pointing to the
// collection, we should never see it again anyway.
if (_dropOnCommit)
- _dce->_engine->getEngine()->dropIdent(_opCtx, _ident);
+ _dce->_engine->getEngine()->dropIdent(_opCtx, _ident).transitional_ignore();
}
virtual void rollback() {
@@ -346,7 +346,7 @@ Status KVDatabaseCatalogEntryBase::dropCollection(OperationContext* opCtx, Strin
std::vector<std::string> indexNames;
entry->getAllIndexes(opCtx, &indexNames);
for (size_t i = 0; i < indexNames.size(); i++) {
- entry->removeIndex(opCtx, indexNames[i]);
+ entry->removeIndex(opCtx, indexNames[i]).transitional_ignore();
}
}
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 11f3fa50c60..4661b92e7c6 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -205,8 +205,9 @@ TEST(KVCatalogTest, Coll1) {
{
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
- catalog->dropCollection(&opCtx, "a.b");
- catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed);
+ catalog->dropCollection(&opCtx, "a.b").transitional_ignore();
+ catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed)
+ .transitional_ignore();
uow.commit();
}
ASSERT_NOT_EQUALS(ident, catalog->getCollectionIdent("a.b"));
diff --git a/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp b/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
index 11cad9890d1..0612973f107 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
@@ -91,7 +91,7 @@ public:
void prepareSnapshot() {
snapshotOperation = makeOperation(); // each prepare gets a new operation.
- snapshotManager->prepareForCreateSnapshot(snapshotOperation);
+ snapshotManager->prepareForCreateSnapshot(snapshotOperation).transitional_ignore();
}
SnapshotName createSnapshot() {
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 462b6214243..f1538063192 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -86,7 +86,7 @@ KVStorageEngine::KVStorageEngine(
if (options.forRepair && catalogExists) {
log() << "Repairing catalog metadata";
// TODO should also validate all BSON in the catalog.
- engine->repairIdent(&opCtx, catalogInfo);
+ engine->repairIdent(&opCtx, catalogInfo).transitional_ignore();
}
if (!catalogExists) {
@@ -161,7 +161,7 @@ KVStorageEngine::KVStorageEngine(
continue;
log() << "dropping unused ident: " << toRemove;
WriteUnitOfWork wuow(&opCtx);
- _engine->dropIdent(&opCtx, toRemove);
+ _engine->dropIdent(&opCtx, toRemove).transitional_ignore();
wuow.commit();
}
}
@@ -239,7 +239,7 @@ Status KVStorageEngine::dropDatabase(OperationContext* opCtx, StringData db) {
for (std::list<std::string>::iterator it = toDrop.begin(); it != toDrop.end(); ++it) {
string coll = *it;
- entry->dropCollection(opCtx, coll);
+ entry->dropCollection(opCtx, coll).transitional_ignore();
}
toDrop.clear();
entry->getCollectionNamespaces(&toDrop);
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index 28a822c7814..cb35765e131 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -1811,7 +1811,8 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* opCtx,
splitkey.recordLoc,
true, // dupsallowed
bucketLoc,
- rLoc);
+ rLoc)
+ .transitional_ignore();
}
int newpos = keypos;
@@ -2336,7 +2337,7 @@ DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* opCtx,
int position;
BucketType* bucket = getBucket(opCtx, bucketLoc);
// XXX: owned to not owned conversion(?)
- _find(opCtx, bucket, key, recordLoc, false, &position, foundOut);
+ _find(opCtx, bucket, key, recordLoc, false, &position, foundOut).transitional_ignore();
// Look in our current bucket.
if (*foundOut) {
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index e34a5c5a22e..d4274feaa4b 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -185,7 +185,7 @@ class SimpleCreate : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
this->checkValidNumKeys(0);
}
@@ -196,10 +196,10 @@ class SimpleInsertDelete : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
BSONObj key = simpleKey('z');
- this->insert(key, this->_helper.dummyDiskLoc);
+ this->insert(key, this->_helper.dummyDiskLoc).transitional_ignore();
this->checkValidNumKeys(1);
this->locate(key, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
@@ -216,14 +216,14 @@ class SplitUnevenBucketBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 10; ++i) {
BSONObj shortKey = simpleKey(shortToken(i), 1);
- this->insert(shortKey, this->_helper.dummyDiskLoc);
+ this->insert(shortKey, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj longKey = simpleKey(longToken(i), 800);
- this->insert(longKey, this->_helper.dummyDiskLoc);
+ this->insert(longKey, this->_helper.dummyDiskLoc).transitional_ignore();
}
this->checkValidNumKeys(20);
@@ -280,11 +280,11 @@ class MissingLocate : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 3; ++i) {
BSONObj k = simpleKey('b' + 2 * i);
- this->insert(k, this->_helper.dummyDiskLoc);
+ this->insert(k, this->_helper.dummyDiskLoc).transitional_ignore();
}
locateExtended(1, 'a', 'b', this->_helper.headManager.getHead(&opCtx));
@@ -318,20 +318,20 @@ class MissingLocateMultiBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
- this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc).transitional_ignore();
// This causes split
- this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc).transitional_ignore();
int pos;
DiskLoc loc;
@@ -370,20 +370,20 @@ class SERVER983 : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
- this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc).transitional_ignore();
// This will cause split
- this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc).transitional_ignore();
int pos;
DiskLoc loc;
@@ -419,17 +419,17 @@ class DontReuseUnused : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 10; ++i) {
const BSONObj k = simpleKey('b' + 2 * i, 800);
- this->insert(k, this->_helper.dummyDiskLoc);
+ this->insert(k, this->_helper.dummyDiskLoc).transitional_ignore();
}
const BSONObj root = simpleKey('p', 800);
this->unindex(root);
- this->insert(root, this->_helper.dummyDiskLoc);
+ this->insert(root, this->_helper.dummyDiskLoc).transitional_ignore();
this->locate(root, 0, true, this->head()->nextChild, 1);
}
};
@@ -439,11 +439,11 @@ class MergeBucketsTestBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 10; ++i) {
const BSONObj k = simpleKey('b' + 2 * i, 800);
- this->insert(k, this->_helper.dummyDiskLoc);
+ this->insert(k, this->_helper.dummyDiskLoc).transitional_ignore();
}
// numRecords() - 1, because this->_helper.dummyDiskLoc is actually in the record store too
@@ -495,11 +495,11 @@ class MergeBucketsDontReplaceHead : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 18; ++i) {
const BSONObj k = simpleKey('a' + i, 800);
- this->insert(k, this->_helper.dummyDiskLoc);
+ this->insert(k, this->_helper.dummyDiskLoc).transitional_ignore();
}
// numRecords(NULL) - 1, because fixedDiskLoc is actually in the record store too
@@ -884,7 +884,7 @@ public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
@@ -2181,14 +2181,14 @@ class LocateEmptyForward : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
BSONObj key1 = simpleKey('a');
- this->insert(key1, this->_helper.dummyDiskLoc);
+ this->insert(key1, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj key2 = simpleKey('b');
- this->insert(key2, this->_helper.dummyDiskLoc);
+ this->insert(key2, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj key3 = simpleKey('c');
- this->insert(key3, this->_helper.dummyDiskLoc);
+ this->insert(key3, this->_helper.dummyDiskLoc).transitional_ignore();
this->checkValidNumKeys(3);
this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(&opCtx), 1);
@@ -2200,14 +2200,14 @@ class LocateEmptyReverse : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
BSONObj key1 = simpleKey('a');
- this->insert(key1, this->_helper.dummyDiskLoc);
+ this->insert(key1, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj key2 = simpleKey('b');
- this->insert(key2, this->_helper.dummyDiskLoc);
+ this->insert(key2, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj key3 = simpleKey('c');
- this->insert(key3, this->_helper.dummyDiskLoc);
+ this->insert(key3, this->_helper.dummyDiskLoc).transitional_ignore();
this->checkValidNumKeys(3);
this->locate(BSONObj(), -1, false, DiskLoc(), -1);
@@ -2219,7 +2219,7 @@ class DuplicateKeys : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
BSONObj key1 = simpleKey('z');
ASSERT_OK(this->insert(key1, this->_helper.dummyDiskLoc, true));
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
index de02abcf76b..f12f3328e72 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
@@ -74,7 +74,7 @@ void simpleInsertTest(const char* buf, int size) {
ASSERT_NOT_OK(rs.insertRecord(&opCtx, buf, 3, 1000).getStatus());
- rs.insertRecord(&opCtx, buf, size, 10000);
+ rs.insertRecord(&opCtx, buf, size, 10000).status_with_transitional_ignore();
{
BSONObjBuilder b;
@@ -119,7 +119,8 @@ TEST(CappedRecordStoreV1, EmptySingleExtent) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100}, {}};
@@ -150,7 +151,8 @@ TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -188,7 +190,8 @@ TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -229,7 +232,8 @@ TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -266,7 +270,8 @@ TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100},
@@ -301,7 +306,8 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentUnLooped) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
@@ -339,7 +345,8 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
@@ -424,13 +431,16 @@ TEST(CappedRecordStoreV1Scrambler, Minimal) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(
- &opCtx, zeros, 400 - MmapV1RecordHeader::HeaderSize, false); // won't fit at end so wraps
- rs.insertRecord(&opCtx, zeros, 120 - MmapV1RecordHeader::HeaderSize, false); // fits at end
- rs.insertRecord(
- &opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false); // fits in earlier hole
+ rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 400 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore(); // won't fit at end so wraps
+ rs.insertRecord(&opCtx, zeros, 120 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore(); // fits at end
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore(); // fits in earlier hole
{
LocAndSize recs[] = {{DiskLoc(0, 1500), 300}, // 2nd insert
@@ -467,34 +477,62 @@ TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
// This list of sizes was empirically generated to achieve this outcome. Don't think too
// much about them.
- rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 304 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 56 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 104 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 36 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 64 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 304 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 56 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 104 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 36 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 64 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1148), 148},
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
index e49ac7c1301..47c85e38974 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
@@ -444,7 +444,7 @@ TEST(SimpleRecordStoreV1, Truncate) {
ASSERT_EQUALS(em.getExtent(DiskLoc(0, 0))->length, em.minSize());
}
- rs.truncate(&opCtx);
+ rs.truncate(&opCtx).transitional_ignore();
{
LocAndSize recs[] = {{}};
diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp
index 12df12c7765..a257d17037a 100644
--- a/src/mongo/db/storage/record_store_test_harness.cpp
+++ b/src/mongo/db/storage/record_store_test_harness.cpp
@@ -384,7 +384,7 @@ TEST(RecordStoreTestHarness, Truncate1) {
ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- rs->truncate(opCtx.get());
+ rs->truncate(opCtx.get()).transitional_ignore();
uow.commit();
}
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index 5c034ceedbc..f8014dfabc5 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -78,7 +78,7 @@ TEST(SortedDataInterface, InsertWithDups1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), true).transitional_ignore();
uow.commit();
}
}
@@ -87,7 +87,7 @@ TEST(SortedDataInterface, InsertWithDups1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(6, 2), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(6, 2), true).transitional_ignore();
uow.commit();
}
}
@@ -110,7 +110,7 @@ TEST(SortedDataInterface, InsertWithDups2) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true).transitional_ignore();
uow.commit();
}
}
@@ -119,7 +119,7 @@ TEST(SortedDataInterface, InsertWithDups2) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true).transitional_ignore();
uow.commit();
}
}
@@ -138,7 +138,7 @@ TEST(SortedDataInterface, InsertWithDups3AndRollback) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true).transitional_ignore();
uow.commit();
}
}
@@ -147,7 +147,7 @@ TEST(SortedDataInterface, InsertWithDups3AndRollback) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true).transitional_ignore();
// no commit
}
}
@@ -166,7 +166,8 @@ TEST(SortedDataInterface, InsertNoDups1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), false);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), false)
+ .transitional_ignore();
uow.commit();
}
}
@@ -175,7 +176,8 @@ TEST(SortedDataInterface, InsertNoDups1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 2), RecordId(5, 20), false);
+ sorted->insert(opCtx.get(), BSON("" << 2), RecordId(5, 20), false)
+ .transitional_ignore();
uow.commit();
}
}
@@ -194,7 +196,7 @@ TEST(SortedDataInterface, InsertNoDups2) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), false);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), false).transitional_ignore();
uow.commit();
}
}
@@ -203,7 +205,7 @@ TEST(SortedDataInterface, InsertNoDups2) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 4), false);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 4), false).transitional_ignore();
uow.commit();
}
}
@@ -222,7 +224,7 @@ TEST(SortedDataInterface, Unindex1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true).transitional_ignore();
uow.commit();
}
}
@@ -287,7 +289,7 @@ TEST(SortedDataInterface, Unindex2Rollback) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true).transitional_ignore();
uow.commit();
}
}
@@ -349,7 +351,8 @@ TEST(SortedDataInterface, CursorIterate1WithSaveRestore) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << i), RecordId(5, i * 2), true);
+ sorted->insert(opCtx.get(), BSON("" << i), RecordId(5, i * 2), true)
+ .transitional_ignore();
uow.commit();
}
}
@@ -378,7 +381,8 @@ TEST(SortedDataInterface, CursorIterateAllDupKeysWithSaveRestore) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 5), RecordId(5, i * 2), true);
+ sorted->insert(opCtx.get(), BSON("" << 5), RecordId(5, i * 2), true)
+ .transitional_ignore();
uow.commit();
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 47241f8bbf2..0d15a514950 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -142,7 +142,7 @@ TEST(WiredTigerRecordStoreTest, Isolation1) {
try {
// this should fail
- rs->updateRecord(t2.get(), id1, "c", 2, false, NULL);
+ rs->updateRecord(t2.get(), id1, "c", 2, false, NULL).transitional_ignore();
ASSERT(0);
} catch (WriteConflictException& dle) {
w2.reset(NULL);
@@ -197,7 +197,7 @@ TEST(WiredTigerRecordStoreTest, Isolation2) {
ASSERT_EQUALS(string("a"), rs->dataFor(t2.get(), id1).data());
try {
// this should fail as our version of id1 is too old
- rs->updateRecord(t2.get(), id1, "c", 2, false, NULL);
+ rs->updateRecord(t2.get(), id1, "c", 2, false, NULL).transitional_ignore();
ASSERT(0);
} catch (WriteConflictException& dle) {
}
@@ -358,7 +358,7 @@ TEST(WiredTigerRecordStoreTest, CappedOrder) {
// we make sure we can't find the 2nd until the first is commited
ServiceContext::UniqueOperationContext t1(harnessHelper->newOperationContext());
unique_ptr<WriteUnitOfWork> w1(new WriteUnitOfWork(t1.get()));
- rs->insertRecord(t1.get(), "b", 2, false);
+ rs->insertRecord(t1.get(), "b", 2, false).status_with_transitional_ignore();
// do not commit yet
{ // create 2nd doc
@@ -366,7 +366,7 @@ TEST(WiredTigerRecordStoreTest, CappedOrder) {
auto t2 = harnessHelper->newOperationContext(client2.get());
{
WriteUnitOfWork w2(t2.get());
- rs->insertRecord(t2.get(), "c", 2, false);
+ rs->insertRecord(t2.get(), "c", 2, false).status_with_transitional_ignore();
w2.commit();
}
}
diff --git a/src/mongo/db/update/arithmetic_node_test.cpp b/src/mongo/db/update/arithmetic_node_test.cpp
index 815c45cc70c..924d66a8076 100644
--- a/src/mongo/db/update/arithmetic_node_test.cpp
+++ b/src/mongo/db/update/arithmetic_node_test.cpp
@@ -47,7 +47,7 @@ DEATH_TEST(ArithmeticNodeTest, InitFailsForEmptyElement, "Invariant failure modE
auto update = fromjson("{$inc: {}}");
const CollatorInterface* collator = nullptr;
ArithmeticNode node(ArithmeticNode::ArithmeticOp::kAdd);
- node.init(update["$inc"].embeddedObject().firstElement(), collator);
+ node.init(update["$inc"].embeddedObject().firstElement(), collator).transitional_ignore();
}
TEST(ArithmeticNodeTest, InitSucceedsForNumberIntElement) {
@@ -1742,7 +1742,7 @@ TEST(ArithmeticNodeTest, ApplyDeserializedDocNotNoOp) {
Document doc(fromjson("{a: 1}"));
// De-serialize the int.
- doc.root()["a"].setValueInt(1);
+ doc.root()["a"].setValueInt(1).transitional_ignore();
FieldRef pathToCreate("b");
FieldRef pathTaken("");
@@ -1777,7 +1777,7 @@ TEST(ArithmeticNodeTest, ApplyToDeserializedDocNoOp) {
Document doc(fromjson("{a: 1}"));
// De-serialize the int.
- doc.root()["a"].setValueInt(2);
+ doc.root()["a"].setValueInt(2).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("a");
@@ -1812,7 +1812,7 @@ TEST(ArithmeticNodeTest, ApplyToDeserializedDocNestedNoop) {
Document doc{BSONObj()};
// De-serialize the int.
- doc.root().appendObject("a", BSON("b" << static_cast<int>(1)));
+ doc.root().appendObject("a", BSON("b" << static_cast<int>(1))).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("a.b");
@@ -1847,7 +1847,7 @@ TEST(ArithmeticNodeTest, ApplyToDeserializedDocNestedNotNoop) {
Document doc{BSONObj()};
// De-serialize the int.
- doc.root().appendObject("a", BSON("b" << static_cast<int>(1)));
+ doc.root().appendObject("a", BSON("b" << static_cast<int>(1))).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("a.b");
diff --git a/src/mongo/db/update/path_support_test.cpp b/src/mongo/db/update/path_support_test.cpp
index 5ea820bbf7b..49675d09ac3 100644
--- a/src/mongo/db/update/path_support_test.cpp
+++ b/src/mongo/db/update/path_support_test.cpp
@@ -500,7 +500,7 @@ TEST_F(ArrayDoc, ExcessivePaddingNotRequestedIfArrayAlreadyPadded) {
Element arrayA = doc().root().leftChild();
ASSERT_EQ(arrayA.getFieldName(), "a");
ASSERT_EQ(arrayA.getType(), mongo::Array);
- arrayA.appendInt("", 1);
+ arrayA.appendInt("", 1).transitional_ignore();
}
size_t idxFound;
diff --git a/src/mongo/db/update/set_node_test.cpp b/src/mongo/db/update/set_node_test.cpp
index 8c7850f6000..5b0b8b29398 100644
--- a/src/mongo/db/update/set_node_test.cpp
+++ b/src/mongo/db/update/set_node_test.cpp
@@ -47,7 +47,7 @@ DEATH_TEST(SetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok(
auto update = fromjson("{$set: {}}");
const CollatorInterface* collator = nullptr;
SetNode node;
- node.init(update["$set"].embeddedObject().firstElement(), collator);
+ node.init(update["$set"].embeddedObject().firstElement(), collator).transitional_ignore();
}
TEST(SetNodeTest, InitSucceedsForNonemptyElement) {
@@ -387,7 +387,7 @@ TEST(SetNodeTest, IdentityOpOnDeserializedIsNotANoOp) {
Document doc(fromjson("{a: { b: NumberInt(0)}}"));
// Apply a mutation to the document that will make it non-serialized.
- doc.root()["a"]["b"].setValueInt(2);
+ doc.root()["a"]["b"].setValueInt(2).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("a");
@@ -1724,9 +1724,9 @@ TEST(SetNodeTest, ApplySetModToEphemeralDocument) {
Document doc;
Element x = doc.makeElementObject("x");
- doc.root().pushBack(x);
+ doc.root().pushBack(x).transitional_ignore();
Element a = doc.makeElementInt("a", 100);
- x.pushBack(a);
+ x.pushBack(a).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("x");
diff --git a/src/mongo/db/update/unset_node_test.cpp b/src/mongo/db/update/unset_node_test.cpp
index b62a7501e2d..0dcbf3f4776 100644
--- a/src/mongo/db/update/unset_node_test.cpp
+++ b/src/mongo/db/update/unset_node_test.cpp
@@ -47,7 +47,7 @@ DEATH_TEST(UnsetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.o
auto update = fromjson("{$unset: {}}");
const CollatorInterface* collator = nullptr;
UnsetNode node;
- node.init(update["$unset"].embeddedObject().firstElement(), collator);
+ node.init(update["$unset"].embeddedObject().firstElement(), collator).transitional_ignore();
}
DEATH_TEST(UnsetNodeTest, ApplyToRootFails, "Invariant failure parent.ok()") {
diff --git a/src/mongo/db/update/update_array_node_test.cpp b/src/mongo/db/update/update_array_node_test.cpp
index 4bb95967113..1eb5d657ae1 100644
--- a/src/mongo/db/update/update_array_node_test.cpp
+++ b/src/mongo/db/update/update_array_node_test.cpp
@@ -189,8 +189,8 @@ DEATH_TEST(UpdateArrayNodeTest,
foundIdentifiers));
Document doc(fromjson("{a: [{c: 0}, {c: 0}, {c: 1}]}"));
- doc.root()["a"]["1"]["c"].setValueInt(1);
- doc.root()["a"]["2"]["c"].setValueInt(0);
+ doc.root()["a"]["1"]["c"].setValueInt(1).transitional_ignore();
+ doc.root()["a"]["2"]["c"].setValueInt(0).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("");
StringData matchedField;
diff --git a/src/mongo/db/update/update_driver_test.cpp b/src/mongo/db/update/update_driver_test.cpp
index b2e98556d16..e9b92433142 100644
--- a/src/mongo/db/update/update_driver_test.cpp
+++ b/src/mongo/db/update/update_driver_test.cpp
@@ -147,7 +147,7 @@ TEST(Collator, SetCollationUpdatesModifierInterfaces) {
bool modified = false;
Document doc(fromjson("{a: 'cba'}"));
driver.setCollator(&collator);
- driver.update(StringData(), &doc, nullptr, nullptr, &modified);
+ driver.update(StringData(), &doc, nullptr, nullptr, &modified).transitional_ignore();
ASSERT_TRUE(modified);
}
@@ -164,8 +164,8 @@ public:
CreateFromQueryFixture()
: _driverOps(new UpdateDriver(UpdateDriver::Options())),
_driverRepl(new UpdateDriver(UpdateDriver::Options())) {
- _driverOps->parse(fromjson("{$set:{'_':1}}"));
- _driverRepl->parse(fromjson("{}"));
+ _driverOps->parse(fromjson("{$set:{'_':1}}")).transitional_ignore();
+ _driverRepl->parse(fromjson("{}")).transitional_ignore();
_opCtx = _serviceContext.makeOperationContext();
}
diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp
index 3e284372735..ca25984533a 100644
--- a/src/mongo/db/views/view_catalog_test.cpp
+++ b/src/mongo/db/views/view_catalog_test.cpp
@@ -141,7 +141,8 @@ TEST_F(ViewCatalogFixture, CreateViewWithPipelineFailsOnInvalidStageName) {
auto invalidPipeline = BSON_ARRAY(BSON("INVALID_STAGE_NAME" << 1));
ASSERT_THROWS(
- viewCatalog.createView(opCtx.get(), viewName, viewOn, invalidPipeline, emptyCollation),
+ viewCatalog.createView(opCtx.get(), viewName, viewOn, invalidPipeline, emptyCollation)
+ .transitional_ignore(),
UserException);
}