summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorADAM David Alan Martin <adam.martin@10gen.com>2017-06-18 23:22:02 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2017-06-18 23:46:57 -0400
commit9abef6f25aadfd04309cb2219068097f93dc961d (patch)
treef88c7f183f201813f363d5d68c1a4a76781ca7ef /src
parenta5f0a84c79b6ce41fef33da920c62be0ecc8f07b (diff)
downloadmongo-9abef6f25aadfd04309cb2219068097f93dc961d.tar.gz
SERVER-27244 Status usage compile-time facilities.
There are numerous places in the codebase where `mongo::Status` or `mongo::StatusWith< T >` objects are returned and never checked. Many of these are innocuous, but many of them are potentially severe bugs. This change introduces facilities to permit compile-time warning of unchecked `Status` and `StatusWith` usage on clang compilers. It introduces an `ignore` function which is useful to state that a specific "ignored status" case was intentional. It not presently an error, in clang builds, to forget to check a `Status` -- this will come in a later commit. This also introduces a `transitional_ignore` function, which allows for easy continual auditing of the codebase for current "whitelisted" unchecked-status instances. All present "ignored status" cases have been marked `transitional_ignore`.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/base/data_builder.h3
-rw-r--r--src/mongo/base/status.h28
-rw-r--r--src/mongo/base/status_with.h22
-rw-r--r--src/mongo/bson/json.cpp2
-rw-r--r--src/mongo/bson/mutable/algorithm.h6
-rw-r--r--src/mongo/bson/mutable/document.cpp2
-rw-r--r--src/mongo/bson/mutable/mutable_bson_test.cpp153
-rw-r--r--src/mongo/client/fetcher_test.cpp2
-rw-r--r--src/mongo/client/scoped_db_conn_test.cpp8
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp18
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp22
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp5
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp2
-rw-r--r--src/mongo/db/auth/role_graph_test.cpp8
-rw-r--r--src/mongo/db/auth/sasl_commands.cpp2
-rw-r--r--src/mongo/db/auth/sasl_scramsha1_test.cpp57
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp6
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp8
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp2
-rw-r--r--src/mongo/db/catalog/collection_impl.cpp4
-rw-r--r--src/mongo/db/catalog/collection_options_test.cpp2
-rw-r--r--src/mongo/db/catalog/database_holder_impl.cpp10
-rw-r--r--src/mongo/db/catalog/database_impl.cpp5
-rw-r--r--src/mongo/db/catalog/database_test.cpp4
-rw-r--r--src/mongo/db/catalog/index_catalog_impl.cpp6
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp2
-rw-r--r--src/mongo/db/clientcursor.cpp4
-rw-r--r--src/mongo/db/commands/authentication_commands.cpp2
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp6
-rw-r--r--src/mongo/db/commands/mr.cpp4
-rw-r--r--src/mongo/db/commands/parameters.cpp6
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp20
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp3
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/write_commands.cpp2
-rw-r--r--src/mongo/db/db.cpp4
-rw-r--r--src/mongo/db/exec/cached_plan.cpp2
-rw-r--r--src/mongo/db/exec/geo_near.cpp4
-rw-r--r--src/mongo/db/exec/multi_plan.cpp7
-rw-r--r--src/mongo/db/exec/projection_exec.cpp4
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp2
-rw-r--r--src/mongo/db/ftdc/compressor.cpp3
-rw-r--r--src/mongo/db/ftdc/file_manager.cpp2
-rw-r--r--src/mongo/db/ftdc/file_manager_test.cpp6
-rw-r--r--src/mongo/db/ftdc/file_writer.cpp2
-rw-r--r--src/mongo/db/ftdc/file_writer_test.cpp6
-rw-r--r--src/mongo/db/geo/r2_region_coverer_test.cpp12
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp2
-rw-r--r--src/mongo/db/introspect.cpp4
-rw-r--r--src/mongo/db/keys_collection_manager_test.cpp7
-rw-r--r--src/mongo/db/logical_clock_test.cpp3
-rw-r--r--src/mongo/db/logical_session_cache_test.cpp22
-rw-r--r--src/mongo/db/matcher/expression_algo.cpp4
-rw-r--r--src/mongo/db/matcher/expression_array.cpp2
-rw-r--r--src/mongo/db/matcher/expression_array.h6
-rw-r--r--src/mongo/db/matcher/expression_array_test.cpp14
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp4
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.h18
-rw-r--r--src/mongo/db/matcher/expression_leaf_test.cpp108
-rw-r--r--src/mongo/db/matcher/expression_parser_geo_test.cpp35
-rw-r--r--src/mongo/db/matcher/expression_test.cpp10
-rw-r--r--src/mongo/db/matcher/expression_tree.h2
-rw-r--r--src/mongo/db/matcher/path.cpp14
-rw-r--r--src/mongo/db/mongod_options.cpp16
-rw-r--r--src/mongo/db/ops/modifier_add_to_set.cpp2
-rw-r--r--src/mongo/db/ops/modifier_pull.cpp2
-rw-r--r--src/mongo/db/ops/modifier_pull_all.cpp2
-rw-r--r--src/mongo/db/ops/modifier_push.cpp2
-rw-r--r--src/mongo/db/ops/modifier_set_test.cpp6
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp5
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp4
-rw-r--r--src/mongo/db/prefetch.cpp4
-rw-r--r--src/mongo/db/query/canonical_query.cpp6
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp2
-rw-r--r--src/mongo/db/query/planner_analysis_test.cpp2
-rw-r--r--src/mongo/db/query/query_planner.cpp2
-rw-r--r--src/mongo/db/repl/bgsync.cpp4
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp2
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp6
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp3
-rw-r--r--src/mongo/db/repl/databases_cloner_test.cpp3
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper_test.cpp2
-rw-r--r--src/mongo/db/repl/elect_cmd_runner_test.cpp2
-rw-r--r--src/mongo/db/repl/freshness_checker_test.cpp24
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp2
-rw-r--r--src/mongo/db/repl/initial_syncer_test.cpp11
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp24
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect.cpp14
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp20
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp13
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp26
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp59
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp40
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp74
-rw-r--r--src/mongo/db/repl/reporter.cpp2
-rw-r--r--src/mongo/db/repl/rollback_test_fixture.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp20
-rw-r--r--src/mongo/db/repl/scatter_gather_test.cpp16
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp2
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp32
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp4
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_test.cpp271
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp328
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp8
-rw-r--r--src/mongo/db/s/balancer/migration_manager.cpp8
-rw-r--r--src/mongo/db/s/collection_range_deleter.cpp2
-rw-r--r--src/mongo/db/s/collection_range_deleter_test.cpp3
-rw-r--r--src/mongo/db/s/metadata_manager.cpp22
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp3
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp3
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp2
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp57
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp3
-rw-r--r--src/mongo/db/s/move_timing_helper.cpp12
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp3
-rw-r--r--src/mongo/db/server_options_helpers.cpp2
-rw-r--r--src/mongo/db/server_parameters_test.cpp10
-rw-r--r--src/mongo/db/service_entry_point_mongod.cpp18
-rw-r--r--src/mongo/db/service_liason_mock.cpp2
-rw-r--r--src/mongo/db/storage/bson_collection_catalog_entry.cpp3
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp4
-rw-r--r--src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp6
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp5
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp2
-rw-r--r--src/mongo/db/storage/kv/kv_storage_engine.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp5
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp94
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp124
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp2
-rw-r--r--src/mongo/db/storage/record_store_test_harness.cpp2
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp32
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp8
-rw-r--r--src/mongo/db/update/arithmetic_node_test.cpp10
-rw-r--r--src/mongo/db/update/path_support_test.cpp2
-rw-r--r--src/mongo/db/update/set_node_test.cpp8
-rw-r--r--src/mongo/db/update/unset_node_test.cpp2
-rw-r--r--src/mongo/db/update/update_array_node_test.cpp4
-rw-r--r--src/mongo/db/update/update_driver_test.cpp6
-rw-r--r--src/mongo/db/views/view_catalog_test.cpp3
-rw-r--r--src/mongo/dbtests/counttests.cpp6
-rw-r--r--src/mongo/dbtests/cursor_manager_test.cpp2
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp8
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp38
-rw-r--r--src/mongo/dbtests/multikey_paths_test.cpp40
-rw-r--r--src/mongo/dbtests/namespacetests.cpp4
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp2
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp39
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_sort.cpp6
-rw-r--r--src/mongo/dbtests/querytests.cpp8
-rw-r--r--src/mongo/dbtests/replica_set_tests.cpp18
-rw-r--r--src/mongo/dbtests/repltests.cpp4
-rw-r--r--src/mongo/dbtests/validate_tests.cpp3
-rw-r--r--src/mongo/executor/connection_pool_asio_integration_test.cpp50
-rw-r--r--src/mongo/executor/network_interface_asio_integration_fixture.cpp11
-rw-r--r--src/mongo/executor/network_interface_mock.cpp3
-rw-r--r--src/mongo/executor/network_interface_perf_test.cpp2
-rw-r--r--src/mongo/executor/network_interface_thread_pool.cpp12
-rw-r--r--src/mongo/executor/task_executor_test_common.cpp6
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp59
-rw-r--r--src/mongo/logger/log_test.cpp9
-rw-r--r--src/mongo/logger/logstream_builder.cpp2
-rw-r--r--src/mongo/platform/compiler.h8
-rw-r--r--src/mongo/platform/compiler_gcc.h3
-rw-r--r--src/mongo/platform/compiler_msvc.h2
-rw-r--r--src/mongo/rpc/legacy_reply_builder.cpp2
-rw-r--r--src/mongo/rpc/metadata/oplog_query_metadata_test.cpp4
-rw-r--r--src/mongo/rpc/metadata/repl_set_metadata_test.cpp4
-rw-r--r--src/mongo/rpc/metadata/sharding_metadata.cpp4
-rw-r--r--src/mongo/rpc/object_check_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp8
-rw-r--r--src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp2
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp24
-rw-r--r--src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp12
-rw-r--r--src/mongo/s/catalog/sharding_catalog_log_change_test.cpp33
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp7
-rw-r--r--src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp18
-rw-r--r--src/mongo/s/catalog/sharding_catalog_test.cpp20
-rw-r--r--src/mongo/s/cluster_last_error_info_test.cpp26
-rw-r--r--src/mongo/s/commands/cluster_drop_database_cmd.cpp21
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp27
-rw-r--r--src/mongo/s/mongos_options.cpp8
-rw-r--r--src/mongo/s/query/async_results_merger.cpp8
-rw-r--r--src/mongo/s/query/cluster_cursor_manager_test.cpp120
-rw-r--r--src/mongo/s/query/establish_cursors.cpp6
-rw-r--r--src/mongo/s/server.cpp7
-rw-r--r--src/mongo/s/shard_key_pattern.cpp2
-rw-r--r--src/mongo/s/sharding_mongod_test_fixture.cpp11
-rw-r--r--src/mongo/s/sharding_test_fixture.cpp4
-rw-r--r--src/mongo/s/sharding_uptime_reporter.cpp16
-rw-r--r--src/mongo/scripting/mozjs/mongo.cpp2
-rw-r--r--src/mongo/shell/shell_options.cpp2
-rw-r--r--src/mongo/transport/message_compressor_manager.cpp6
-rw-r--r--src/mongo/transport/message_compressor_manager_test.cpp4
-rw-r--r--src/mongo/transport/message_compressor_noop.h4
-rw-r--r--src/mongo/transport/message_compressor_registry_test.cpp2
-rw-r--r--src/mongo/transport/service_state_machine_test.cpp2
-rw-r--r--src/mongo/transport/transport_layer_asio.cpp8
-rw-r--r--src/mongo/transport/transport_layer_legacy_test.cpp4
-rw-r--r--src/mongo/util/background.cpp2
-rw-r--r--src/mongo/util/net/listen.cpp2
-rw-r--r--src/mongo/util/options_parser/environment.cpp2
-rw-r--r--src/mongo/util/options_parser/environment_test.cpp18
-rw-r--r--src/mongo/util/options_parser/option_section.cpp8
-rw-r--r--src/mongo/util/options_parser/options_parser.cpp4
-rw-r--r--src/mongo/util/options_parser/options_parser_test.cpp4
-rw-r--r--src/mongo/util/signal_handlers_synchronous.cpp11
-rw-r--r--src/third_party/gperftools-2.5/SConscript2
219 files changed, 1831 insertions, 1383 deletions
diff --git a/src/mongo/base/data_builder.h b/src/mongo/base/data_builder.h
index 53b09263403..dd5155bb35b 100644
--- a/src/mongo/base/data_builder.h
+++ b/src/mongo/base/data_builder.h
@@ -234,7 +234,8 @@ private:
template <typename T>
std::size_t _getSerializedSize(const T& value) {
std::size_t advance = 0;
- DataType::store(value, nullptr, std::numeric_limits<std::size_t>::max(), &advance, 0);
+ DataType::store(value, nullptr, std::numeric_limits<std::size_t>::max(), &advance, 0)
+ .transitional_ignore();
return advance;
}
diff --git a/src/mongo/base/status.h b/src/mongo/base/status.h
index 19b74a5a8cb..d8009ecc96e 100644
--- a/src/mongo/base/status.h
+++ b/src/mongo/base/status.h
@@ -32,12 +32,13 @@
#include "mongo/base/error_codes.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/compiler.h"
namespace mongoutils {
namespace str {
class stream;
-}
-}
+} // namespace str
+} // namespace mongoutils
namespace mongo {
@@ -64,7 +65,7 @@ namespace mongo {
* TODO: generate base/error_codes.h out of a description file
* TODO: check 'location' duplicates against assert numbers
*/
-class Status {
+class MONGO_WARN_UNUSED_RESULT_CLASS Status {
public:
// Short-hand for returning an OK status.
static inline Status OK();
@@ -127,6 +128,27 @@ public:
std::string toString() const;
+ /**
+ * Call this method to indicate that it is your intention to ignore a returned status. Ignoring
+ * is only possible if the value being ignored is an xvalue -- it is not appropriate to create a
+ * status variable and then ignore it.
+ */
+ inline void ignore() && noexcept {}
+ inline void ignore() const& noexcept = delete;
+
+ /**
+ * This method is a transitional tool, to facilitate transition to compile-time enforced status
+ * checking.
+ *
+ * NOTE: DO NOT ADD NEW CALLS TO THIS METHOD. This method serves the same purpose as
+ * `.ignore()`; however, it indicates a situation where the code that presently ignores a status
+ * code has not been audited for correctness. This method will be removed at some point. If you
+ * encounter a compiler error from ignoring the result of a status-returning function be sure to
+ * check the return value, or deliberately ignore the return value.
+ */
+ inline void transitional_ignore() && noexcept {};
+ inline void transitional_ignore() const& noexcept = delete;
+
//
// Below interface used for testing code only.
//
diff --git a/src/mongo/base/status_with.h b/src/mongo/base/status_with.h
index 29323861199..9287e45d938 100644
--- a/src/mongo/base/status_with.h
+++ b/src/mongo/base/status_with.h
@@ -1,5 +1,3 @@
-// status_with.h
-
/* Copyright 2013 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
@@ -36,11 +34,13 @@
#include "mongo/base/static_assert.h"
#include "mongo/base/status.h"
+#include "mongo/platform/compiler.h"
#define MONGO_INCLUDE_INVARIANT_H_WHITELISTED
#include "mongo/util/invariant.h"
#undef MONGO_INCLUDE_INVARIANT_H_WHITELISTED
+
namespace mongo {
/**
@@ -61,7 +61,7 @@ namespace mongo {
* }
*/
template <typename T>
-class StatusWith {
+class MONGO_WARN_UNUSED_RESULT_CLASS StatusWith {
MONGO_STATIC_ASSERT_MSG(!(std::is_same<T, mongo::Status>::value),
"StatusWith<Status> is banned.");
@@ -112,6 +112,22 @@ public:
return _status.isOK();
}
+
+ /**
+ * This method is a transitional tool, to facilitate transition to compile-time enforced status
+ * checking.
+ *
+ * NOTE: DO NOT ADD NEW CALLS TO THIS METHOD. This method serves the same purpose as
+ * `.getStatus().ignore()`; however, it indicates a situation where the code that presently
+ * ignores a status code has not been audited for correctness. This method will be removed at
+ * some point. If you encounter a compiler error from ignoring the result of a `StatusWith`
+ * returning function be sure to check the return value, or deliberately ignore the return
+ * value. The function is named to be auditable independently from unaudited `Status` ignore
+ * cases.
+ */
+ inline void status_with_transitional_ignore() && noexcept {};
+ inline void status_with_transitional_ignore() const& noexcept = delete;
+
private:
Status _status;
boost::optional<T> _t;
diff --git a/src/mongo/bson/json.cpp b/src/mongo/bson/json.cpp
index fffbd042328..d6a94ea08eb 100644
--- a/src/mongo/bson/json.cpp
+++ b/src/mongo/bson/json.cpp
@@ -739,7 +739,7 @@ Status JParse::array(StringData fieldName, BSONObjBuilder& builder, bool subObje
* have the same behavior. XXX: this may not be desired. */
Status JParse::constructor(StringData fieldName, BSONObjBuilder& builder) {
if (readToken("Date")) {
- date(fieldName, builder);
+ date(fieldName, builder).transitional_ignore();
} else {
return parseError("\"new\" keyword not followed by Date constructor");
}
diff --git a/src/mongo/bson/mutable/algorithm.h b/src/mongo/bson/mutable/algorithm.h
index 768469deff4..9e4ffd278e4 100644
--- a/src/mongo/bson/mutable/algorithm.h
+++ b/src/mongo/bson/mutable/algorithm.h
@@ -140,9 +140,9 @@ void sortChildren(Element parent, Comparator comp) {
const std::vector<Element>::iterator end = children.end();
for (; where != end; ++where) {
// Detach from its current location.
- where->remove();
+ where->remove().transitional_ignore();
// Make it the new rightmost element.
- parent.pushBack(*where);
+ parent.pushBack(*where).transitional_ignore();
}
}
@@ -156,7 +156,7 @@ void deduplicateChildren(Element parent, EqualityComparator equal) {
while (current.ok()) {
Element next = current.rightSibling();
if (next.ok() && equal(current, next)) {
- next.remove();
+ next.remove().transitional_ignore();
} else {
current = next;
}
diff --git a/src/mongo/bson/mutable/document.cpp b/src/mongo/bson/mutable/document.cpp
index 93ca13cc751..d939bc90b9b 100644
--- a/src/mongo/bson/mutable/document.cpp
+++ b/src/mongo/bson/mutable/document.cpp
@@ -1294,7 +1294,7 @@ Status Element::rename(StringData newName) {
// For leaf elements we just create a new Element with the current value and
// replace. Note that the 'setValue' call below will invalidate thisRep.
Element replacement = _doc->makeElementWithNewFieldName(newName, *this);
- setValue(replacement._repIdx);
+ setValue(replacement._repIdx).transitional_ignore();
} else {
// The easy case: just update what our field name offset refers to.
impl.insertFieldName(*thisRep, newName);
diff --git a/src/mongo/bson/mutable/mutable_bson_test.cpp b/src/mongo/bson/mutable/mutable_bson_test.cpp
index 26930dfa2c8..c1369d31b38 100644
--- a/src/mongo/bson/mutable/mutable_bson_test.cpp
+++ b/src/mongo/bson/mutable/mutable_bson_test.cpp
@@ -546,34 +546,34 @@ TEST(Element, setters) {
mmb::Element t0 = doc.makeElementNull("t0");
- t0.setValueBool(true);
+ t0.setValueBool(true).transitional_ignore();
ASSERT_EQUALS(mongo::Bool, t0.getType());
- t0.setValueInt(12345);
+ t0.setValueInt(12345).transitional_ignore();
ASSERT_EQUALS(mongo::NumberInt, t0.getType());
- t0.setValueLong(12345LL);
+ t0.setValueLong(12345LL).transitional_ignore();
ASSERT_EQUALS(mongo::NumberLong, t0.getType());
- t0.setValueTimestamp(mongo::Timestamp());
+ t0.setValueTimestamp(mongo::Timestamp()).transitional_ignore();
ASSERT_EQUALS(mongo::bsonTimestamp, t0.getType());
- t0.setValueDate(mongo::Date_t::fromMillisSinceEpoch(12345LL));
+ t0.setValueDate(mongo::Date_t::fromMillisSinceEpoch(12345LL)).transitional_ignore();
ASSERT_EQUALS(mongo::Date, t0.getType());
- t0.setValueDouble(123.45);
+ t0.setValueDouble(123.45).transitional_ignore();
ASSERT_EQUALS(mongo::NumberDouble, t0.getType());
- t0.setValueDecimal(mongo::Decimal128("123.45E1234"));
+ t0.setValueDecimal(mongo::Decimal128("123.45E1234")).transitional_ignore();
ASSERT_EQUALS(mongo::NumberDecimal, t0.getType());
- t0.setValueOID(mongo::OID("47cc67093475061e3d95369d"));
+ t0.setValueOID(mongo::OID("47cc67093475061e3d95369d")).transitional_ignore();
ASSERT_EQUALS(mongo::jstOID, t0.getType());
- t0.setValueRegex("[a-zA-Z]?", "");
+ t0.setValueRegex("[a-zA-Z]?", "").transitional_ignore();
ASSERT_EQUALS(mongo::RegEx, t0.getType());
- t0.setValueString("foo bar baz");
+ t0.setValueString("foo bar baz").transitional_ignore();
ASSERT_EQUALS(mongo::String, t0.getType());
}
@@ -622,12 +622,12 @@ TEST(DecimalType, setElement) {
mmb::Document doc;
mmb::Element d0 = doc.makeElementDecimal("d0", mongo::Decimal128("128"));
- d0.setValueDecimal(mongo::Decimal128("123456"));
+ d0.setValueDecimal(mongo::Decimal128("123456")).transitional_ignore();
ASSERT_TRUE(mongo::Decimal128("123456").isEqual(d0.getValueDecimal()));
- d0.setValueDouble(0.1);
+ d0.setValueDouble(0.1).transitional_ignore();
ASSERT_EQUALS(0.1, d0.getValueDouble());
- d0.setValueDecimal(mongo::Decimal128("23"));
+ d0.setValueDecimal(mongo::Decimal128("23")).transitional_ignore();
ASSERT_TRUE(mongo::Decimal128("23").isEqual(d0.getValueDecimal()));
}
@@ -635,7 +635,7 @@ TEST(DecimalType, appendElement) {
mmb::Document doc;
mmb::Element d0 = doc.makeElementObject("e0");
- d0.appendDecimal("precision", mongo::Decimal128(34));
+ d0.appendDecimal("precision", mongo::Decimal128(34)).transitional_ignore();
mmb::Element it = mmb::findFirstChildNamed(d0, "precision");
ASSERT_TRUE(it.ok());
@@ -656,18 +656,18 @@ TEST(TimestampType, setElement) {
mmb::Document doc;
mmb::Element t0 = doc.makeElementTimestamp("t0", mongo::Timestamp());
- t0.setValueTimestamp(mongo::Timestamp(123, 456));
+ t0.setValueTimestamp(mongo::Timestamp(123, 456)).transitional_ignore();
ASSERT(mongo::Timestamp(123, 456) == t0.getValueTimestamp());
// Try setting to other types and back to Timestamp
- t0.setValueLong(1234567890);
+ t0.setValueLong(1234567890).transitional_ignore();
ASSERT_EQUALS(1234567890LL, t0.getValueLong());
- t0.setValueTimestamp(mongo::Timestamp(789, 321));
+ t0.setValueTimestamp(mongo::Timestamp(789, 321)).transitional_ignore();
ASSERT(mongo::Timestamp(789, 321) == t0.getValueTimestamp());
- t0.setValueString("foo bar baz");
+ t0.setValueString("foo bar baz").transitional_ignore();
ASSERT_EQUALS("foo bar baz", t0.getValueString());
- t0.setValueTimestamp(mongo::Timestamp(9876, 5432));
+ t0.setValueTimestamp(mongo::Timestamp(9876, 5432)).transitional_ignore();
ASSERT(mongo::Timestamp(9876, 5432) == t0.getValueTimestamp());
}
@@ -675,7 +675,8 @@ TEST(TimestampType, appendElement) {
mmb::Document doc;
mmb::Element t0 = doc.makeElementObject("e0");
- t0.appendTimestamp("a timestamp field", mongo::Timestamp(1352151971, 471));
+ t0.appendTimestamp("a timestamp field", mongo::Timestamp(1352151971, 471))
+ .transitional_ignore();
mmb::Element it = mmb::findFirstChildNamed(t0, "a timestamp field");
ASSERT_TRUE(it.ok());
@@ -698,17 +699,17 @@ TEST(SafeNumType, getSafeNum) {
mongo::SafeNum num = t0.getValueSafeNum();
ASSERT_EQUALS(num, static_cast<int64_t>(1234567890));
- t0.setValueLong(1234567890LL);
+ t0.setValueLong(1234567890LL).transitional_ignore();
ASSERT_EQUALS(1234567890LL, t0.getValueLong());
num = t0.getValueSafeNum();
ASSERT_EQUALS(num, static_cast<int64_t>(1234567890LL));
- t0.setValueDouble(123.456789);
+ t0.setValueDouble(123.456789).transitional_ignore();
ASSERT_EQUALS(123.456789, t0.getValueDouble());
num = t0.getValueSafeNum();
ASSERT_EQUALS(num, 123.456789);
- t0.setValueDecimal(mongo::Decimal128("12345678.1234"));
+ t0.setValueDecimal(mongo::Decimal128("12345678.1234")).transitional_ignore();
ASSERT_TRUE(mongo::Decimal128("12345678.1234").isEqual(t0.getValueDecimal()));
num = t0.getValueSafeNum();
ASSERT_EQUALS(num, mongo::Decimal128("12345678.1234"));
@@ -718,23 +719,23 @@ TEST(SafeNumType, setSafeNum) {
mmb::Document doc;
mmb::Element t0 = doc.makeElementSafeNum("t0", mongo::SafeNum(123456));
- t0.setValueSafeNum(mongo::SafeNum(654321));
+ t0.setValueSafeNum(mongo::SafeNum(654321)).transitional_ignore();
ASSERT_EQUALS(mongo::SafeNum(654321), t0.getValueSafeNum());
// Try setting to other types and back to SafeNum
- t0.setValueLong(1234567890);
+ t0.setValueLong(1234567890).transitional_ignore();
ASSERT_EQUALS(1234567890LL, t0.getValueLong());
- t0.setValueSafeNum(mongo::SafeNum(1234567890));
+ t0.setValueSafeNum(mongo::SafeNum(1234567890)).transitional_ignore();
ASSERT_EQUALS(mongo::SafeNum(1234567890), t0.getValueSafeNum());
- t0.setValueString("foo bar baz");
+ t0.setValueString("foo bar baz").transitional_ignore();
mongo::StringData left = "foo bar baz";
mongo::StringData right = t0.getValueString();
ASSERT_EQUALS(left, right);
ASSERT_EQUALS(mongo::StringData("foo bar baz"), t0.getValueString());
- t0.setValueSafeNum(mongo::SafeNum(12345));
+ t0.setValueSafeNum(mongo::SafeNum(12345)).transitional_ignore();
ASSERT_EQUALS(mongo::SafeNum(12345), t0.getValueSafeNum());
}
@@ -742,7 +743,8 @@ TEST(SafeNumType, appendElement) {
mmb::Document doc;
mmb::Element t0 = doc.makeElementObject("e0");
- t0.appendSafeNum("a timestamp field", mongo::SafeNum(static_cast<int64_t>(1352151971LL)));
+ t0.appendSafeNum("a timestamp field", mongo::SafeNum(static_cast<int64_t>(1352151971LL)))
+ .transitional_ignore();
mmb::Element it = findFirstChildNamed(t0, "a timestamp field");
ASSERT_TRUE(it.ok());
@@ -753,7 +755,7 @@ TEST(OIDType, getOidValue) {
mmb::Document doc;
mmb::Element t0 = doc.makeElementObject("e0");
const mongo::OID generated = mongo::OID::gen();
- t0.appendOID("myOid", generated);
+ t0.appendOID("myOid", generated).transitional_ignore();
mmb::Element it = findFirstChildNamed(t0, "myOid");
const mongo::OID recovered = mongo::OID(it.getValueOID());
ASSERT_EQUALS(generated, recovered);
@@ -763,7 +765,7 @@ TEST(OIDType, nullOID) {
mmb::Document doc;
mmb::Element t0 = doc.makeElementObject("e0");
const mongo::OID withNull("50a9c82263e413ad0028faad");
- t0.appendOID("myOid", withNull);
+ t0.appendOID("myOid", withNull).transitional_ignore();
mmb::Element it = findFirstChildNamed(t0, "myOid");
const mongo::OID recovered = mongo::OID(it.getValueOID());
ASSERT_EQUALS(withNull, recovered);
@@ -1033,7 +1035,7 @@ TEST(Documentation, Example3) {
ASSERT_OK(ys.pushBack(ycaps));
mmb::Element pun = doc.makeElementArray("why");
ASSERT_OK(ys.pushBack(pun));
- pun.appendString("na", "not");
+ pun.appendString("na", "not").transitional_ignore();
mongo::BSONObj outObj = doc.getObject();
static const char outJson[] =
@@ -1153,7 +1155,7 @@ TEST(Document, RenameDeserialization) {
ASSERT_TRUE(b.ok());
mmb::Element c = b.leftChild();
ASSERT_TRUE(c.ok());
- c.rename("C");
+ c.rename("C").transitional_ignore();
mongo::BSONObj outObj = doc.getObject();
static const char outJson[] =
"{"
@@ -1182,7 +1184,7 @@ TEST(Document, RemoveElementWithOpaqueRightSibling) {
mmb::Element a = doc.root().leftChild();
ASSERT_TRUE(a.ok());
- a.remove();
+ a.remove().transitional_ignore();
static const char outJson[] =
"{"
@@ -1413,8 +1415,8 @@ TEST(Document, ArraySerialization) {
mmb::Element b = a.leftChild();
mmb::Element new_array = doc.makeElementArray("XXX");
mmb::Element e = doc.makeElementString("e", "e");
- new_array.pushBack(e);
- b.pushBack(new_array);
+ new_array.pushBack(e).transitional_ignore();
+ b.pushBack(new_array).transitional_ignore();
static const char outJson[] =
"{ "
@@ -1438,7 +1440,7 @@ TEST(Document, SetValueBSONElementFieldNameHandling) {
const mongo::BSONElement b = iterator.next();
mmb::Element a = doc.root().leftChild();
- a.setValueBSONElement(b);
+ a.setValueBSONElement(b).transitional_ignore();
static const char outJson[] = "{ a : 5 }";
ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), doc.getObject());
@@ -1662,7 +1664,9 @@ TEST(Document, ElementCloningToDifferentDocument) {
mmb::Document source(mongo::fromjson(initial));
// Dirty the 'd' node and parents.
- source.root()["d"].pushBack(source.makeElementInt(mongo::StringData(), 7));
+ source.root()["d"]
+ .pushBack(source.makeElementInt(mongo::StringData(), 7))
+ .transitional_ignore();
mmb::Document target;
@@ -1694,7 +1698,7 @@ TEST(Document, ElementCloningToSameDocument) {
mmb::Document doc(mongo::fromjson(initial));
// Dirty the 'd' node and parents.
- doc.root()["d"].pushBack(doc.makeElementInt(mongo::StringData(), 7));
+ doc.root()["d"].pushBack(doc.makeElementInt(mongo::StringData(), 7)).transitional_ignore();
mmb::Element newElement = doc.makeElement(doc.root()["d"]);
ASSERT_TRUE(newElement.ok());
@@ -1739,7 +1743,9 @@ TEST(Document, RootCloningToDifferentDocument) {
mmb::Document source(mongo::fromjson(initial));
// Dirty the 'd' node and parents.
- source.root()["d"].pushBack(source.makeElementInt(mongo::StringData(), 7));
+ source.root()["d"]
+ .pushBack(source.makeElementInt(mongo::StringData(), 7))
+ .transitional_ignore();
mmb::Document target;
@@ -1758,7 +1764,7 @@ TEST(Document, RootCloningToSameDocument) {
mmb::Document doc(mongo::fromjson(initial));
// Dirty the 'd' node and parents.
- doc.root()["d"].pushBack(doc.makeElementInt(mongo::StringData(), 7));
+ doc.root()["d"].pushBack(doc.makeElementInt(mongo::StringData(), 7)).transitional_ignore();
mmb::Element newElement = doc.makeElementWithNewFieldName("X", doc.root());
mongo::Status status = doc.root().pushBack(newElement);
@@ -1854,7 +1860,7 @@ TEST(TypeSupport, EncodingEquivalenceDouble) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueDouble(value1);
+ c.setValueDouble(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::NumberDouble);
ASSERT_TRUE(c.hasValue());
@@ -1894,7 +1900,7 @@ TEST(TypeSupport, EncodingEquivalenceString) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueString(value1);
+ c.setValueString(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::String);
ASSERT_TRUE(c.hasValue());
@@ -1934,7 +1940,7 @@ TEST(TypeSupport, EncodingEquivalenceObject) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueObject(value1);
+ c.setValueObject(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::Object);
ASSERT_TRUE(c.hasValue());
@@ -1975,7 +1981,7 @@ TEST(TypeSupport, EncodingEquivalenceArray) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueArray(value1);
+ c.setValueArray(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::Array);
ASSERT_TRUE(c.hasValue());
@@ -2030,7 +2036,7 @@ TEST(TypeSupport, EncodingEquivalenceBinary) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueBinary(sizeof(value2), value1, &value2[0]);
+ c.setValueBinary(sizeof(value2), value1, &value2[0]).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::BinData);
ASSERT_TRUE(c.hasValue());
@@ -2069,7 +2075,7 @@ TEST(TypeSupport, EncodingEquivalenceUndefined) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueUndefined();
+ c.setValueUndefined().transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::Undefined);
ASSERT_TRUE(c.hasValue());
@@ -2109,7 +2115,7 @@ TEST(TypeSupport, EncodingEquivalenceOID) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueOID(value1);
+ c.setValueOID(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::jstOID);
ASSERT_TRUE(c.hasValue());
@@ -2149,7 +2155,7 @@ TEST(TypeSupport, EncodingEquivalenceBoolean) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueBool(value1);
+ c.setValueBool(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::Bool);
ASSERT_TRUE(c.hasValue());
@@ -2189,7 +2195,7 @@ TEST(TypeSupport, EncodingEquivalenceDate) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueDate(value1);
+ c.setValueDate(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::Date);
ASSERT_TRUE(c.hasValue());
@@ -2228,7 +2234,7 @@ TEST(TypeSupport, EncodingEquivalenceNull) {
ASSERT_OK(doc.root().appendUndefined(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueNull();
+ c.setValueNull().transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::jstNULL);
ASSERT_TRUE(c.hasValue());
@@ -2268,7 +2274,7 @@ TEST(TypeSupport, EncodingEquivalenceRegex) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueRegex(value1, value2);
+ c.setValueRegex(value1, value2).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::RegEx);
ASSERT_TRUE(c.hasValue());
@@ -2308,7 +2314,7 @@ TEST(TypeSupport, EncodingEquivalenceDBRef) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueDBRef(value1, value2);
+ c.setValueDBRef(value1, value2).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::DBRef);
ASSERT_TRUE(c.hasValue());
@@ -2347,7 +2353,7 @@ TEST(TypeSupport, EncodingEquivalenceCode) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueCode(value1);
+ c.setValueCode(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::Code);
ASSERT_TRUE(c.hasValue());
@@ -2387,7 +2393,7 @@ TEST(TypeSupport, EncodingEquivalenceSymbol) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueSymbol(value1);
+ c.setValueSymbol(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::Symbol);
ASSERT_TRUE(c.hasValue());
@@ -2427,7 +2433,7 @@ TEST(TypeSupport, EncodingEquivalenceCodeWithScope) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueCodeWithScope(value1, value2);
+ c.setValueCodeWithScope(value1, value2).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::CodeWScope);
ASSERT_TRUE(c.hasValue());
@@ -2467,7 +2473,7 @@ TEST(TypeSupport, EncodingEquivalenceInt) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueInt(value1);
+ c.setValueInt(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::NumberInt);
ASSERT_TRUE(c.hasValue());
@@ -2507,7 +2513,7 @@ TEST(TypeSupport, EncodingEquivalenceTimestamp) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueTimestamp(value1);
+ c.setValueTimestamp(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::bsonTimestamp);
ASSERT_TRUE(c.hasValue());
@@ -2547,7 +2553,7 @@ TEST(TypeSupport, EncodingEquivalenceLong) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueLong(value1);
+ c.setValueLong(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::NumberLong);
ASSERT_TRUE(c.hasValue());
@@ -2587,7 +2593,7 @@ TEST(TypeSupport, EncodingEquivalenceDecimal) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueDecimal(value1);
+ c.setValueDecimal(value1).transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::NumberDecimal);
ASSERT_TRUE(c.hasValue());
@@ -2626,7 +2632,7 @@ TEST(TypeSupport, EncodingEquivalenceMinKey) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueMinKey();
+ c.setValueMinKey().transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::MinKey);
ASSERT_TRUE(c.hasValue());
@@ -2665,7 +2671,7 @@ TEST(TypeSupport, EncodingEquivalenceMaxKey) {
ASSERT_OK(doc.root().appendNull(name));
mmb::Element c = doc.root().rightChild();
ASSERT_TRUE(c.ok());
- c.setValueMaxKey();
+ c.setValueMaxKey().transitional_ignore();
ASSERT_EQUALS(c.getType(), mongo::MaxKey);
ASSERT_TRUE(c.hasValue());
@@ -2901,7 +2907,7 @@ TEST(DocumentInPlace, StringLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueString("bar");
+ x.setValueString("bar").transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -2927,7 +2933,7 @@ TEST(DocumentInPlace, BinDataLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueBinary(binData2.length, binData2.type, binData2.data);
+ x.setValueBinary(binData2.length, binData2.type, binData2.data).transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -2957,7 +2963,7 @@ TEST(DocumentInPlace, OIDLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueOID(oid2);
+ x.setValueOID(oid2).transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -2977,7 +2983,7 @@ TEST(DocumentInPlace, BooleanLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueBool(false);
+ x.setValueBool(false).transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -3003,7 +3009,7 @@ TEST(DocumentInPlace, DateLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueDate(mongo::Date_t::fromMillisSinceEpoch(20000));
+ x.setValueDate(mongo::Date_t::fromMillisSinceEpoch(20000)).transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -3025,7 +3031,7 @@ TEST(DocumentInPlace, NumberIntLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueInt(value2);
+ x.setValueInt(value2).transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -3051,7 +3057,8 @@ TEST(DocumentInPlace, TimestampLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueTimestamp(mongo::Timestamp(mongo::Date_t::fromMillisSinceEpoch(20000)));
+ x.setValueTimestamp(mongo::Timestamp(mongo::Date_t::fromMillisSinceEpoch(20000)))
+ .transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -3074,7 +3081,7 @@ TEST(DocumentInPlace, NumberLongLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueLong(value2);
+ x.setValueLong(value2).transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -3103,7 +3110,7 @@ TEST(DocumentInPlace, NumberDoubleLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueDouble(value2);
+ x.setValueDouble(value2).transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -3132,7 +3139,7 @@ TEST(DocumentInPlace, NumberDecimalLifecycle) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueDecimal(value2);
+ x.setValueDecimal(value2).transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
ASSERT_EQUALS(1U, damages.size());
apply(&obj, damages, source);
@@ -3163,7 +3170,7 @@ TEST(DocumentInPlace, DoubleToLongAndBack) {
mmb::DamageVector damages;
const char* source = NULL;
- x.setValueLong(value2);
+ x.setValueLong(value2).transitional_ignore();
ASSERT_TRUE(doc.getInPlaceUpdates(&damages, &source));
// We changed the type, so we get an extra damage event.
ASSERT_EQUALS(2U, damages.size());
diff --git a/src/mongo/client/fetcher_test.cpp b/src/mongo/client/fetcher_test.cpp
index 4e17d55b346..a10baecd052 100644
--- a/src/mongo/client/fetcher_test.cpp
+++ b/src/mongo/client/fetcher_test.cpp
@@ -758,7 +758,7 @@ TEST_F(FetcherTest, CancelDuringCallbackPutsFetcherInShutdown) {
fetchStatus1 = fetchResult.getStatus();
fetcher->shutdown();
};
- fetcher->schedule();
+ fetcher->schedule().transitional_ignore();
const BSONObj doc = BSON("_id" << 1);
processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
<< "db.coll"
diff --git a/src/mongo/client/scoped_db_conn_test.cpp b/src/mongo/client/scoped_db_conn_test.cpp
index dd3c33db497..bda09d75263 100644
--- a/src/mongo/client/scoped_db_conn_test.cpp
+++ b/src/mongo/client/scoped_db_conn_test.cpp
@@ -216,8 +216,8 @@ public:
* Helper method for running the server on a separate thread.
*/
static void runServer(transport::TransportLayerLegacy* server) {
- server->setup();
- server->start();
+ server->setup().transitional_ignore();
+ server->start().transitional_ignore();
}
private:
@@ -352,8 +352,8 @@ protected:
private:
static void runServer(transport::TransportLayerLegacy* server) {
- server->setup();
- server->start();
+ server->setup().transitional_ignore();
+ server->start().transitional_ignore();
}
/**
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index 53d8edb33da..697453cb764 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -338,7 +338,7 @@ Status AuthorizationManager::getBSONForPrivileges(const PrivilegeVector& privile
if (!ParsedPrivilege::privilegeToParsedPrivilege(*it, &privilege, &errmsg)) {
return Status(ErrorCodes::BadValue, errmsg);
}
- resultArray.appendObject("privileges", privilege.toBSON());
+ resultArray.appendObject("privileges", privilege.toBSON()).transitional_ignore();
}
return Status::OK();
}
@@ -352,14 +352,14 @@ Status AuthorizationManager::getBSONForRole(RoleGraph* graph,
<< "does not name an existing role");
}
std::string id = mongoutils::str::stream() << roleName.getDB() << "." << roleName.getRole();
- result.appendString("_id", id);
- result.appendString(ROLE_NAME_FIELD_NAME, roleName.getRole());
- result.appendString(ROLE_DB_FIELD_NAME, roleName.getDB());
+ result.appendString("_id", id).transitional_ignore();
+ result.appendString(ROLE_NAME_FIELD_NAME, roleName.getRole()).transitional_ignore();
+ result.appendString(ROLE_DB_FIELD_NAME, roleName.getDB()).transitional_ignore();
// Build privileges array
mutablebson::Element privilegesArrayElement =
result.getDocument().makeElementArray("privileges");
- result.pushBack(privilegesArrayElement);
+ result.pushBack(privilegesArrayElement).transitional_ignore();
const PrivilegeVector& privileges = graph->getDirectPrivileges(roleName);
Status status = getBSONForPrivileges(privileges, privilegesArrayElement);
if (!status.isOK()) {
@@ -368,14 +368,14 @@ Status AuthorizationManager::getBSONForRole(RoleGraph* graph,
// Build roles array
mutablebson::Element rolesArrayElement = result.getDocument().makeElementArray("roles");
- result.pushBack(rolesArrayElement);
+ result.pushBack(rolesArrayElement).transitional_ignore();
for (RoleNameIterator roles = graph->getDirectSubordinates(roleName); roles.more();
roles.next()) {
const RoleName& subRole = roles.get();
mutablebson::Element roleObj = result.getDocument().makeElementObject("");
- roleObj.appendString(ROLE_NAME_FIELD_NAME, subRole.getRole());
- roleObj.appendString(ROLE_DB_FIELD_NAME, subRole.getDB());
- rolesArrayElement.pushBack(roleObj);
+ roleObj.appendString(ROLE_NAME_FIELD_NAME, subRole.getRole()).transitional_ignore();
+ roleObj.appendString(ROLE_DB_FIELD_NAME, subRole.getDB()).transitional_ignore();
+ rolesArrayElement.pushBack(roleObj).transitional_ignore();
}
return Status::OK();
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index 71b7f00ee51..34ae2cfb8ed 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -432,8 +432,10 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
// Change the user to be read-only
int ignored;
- managerState->remove(
- &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ managerState
+ ->remove(
+ &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored)
+ .transitional_ignore();
ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
@@ -461,8 +463,10 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
ASSERT(user->isValid());
// Delete the user.
- managerState->remove(
- &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ managerState
+ ->remove(
+ &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored)
+ .transitional_ignore();
// Make sure that invalidating the user causes the session to reload its privileges.
authzManager->invalidateUserByName(user->getName());
authzSession->startRequest(&_opCtx); // Refreshes cached data for invalid users
@@ -502,8 +506,10 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
// Change the user to be read-only
int ignored;
managerState->setFindsShouldFail(true);
- managerState->remove(
- &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored);
+ managerState
+ ->remove(
+ &_opCtx, AuthorizationManager::usersCollectionNamespace, BSONObj(), BSONObj(), &ignored)
+ .transitional_ignore();
ASSERT_OK(managerState->insertPrivilegeDocument(&_opCtx,
BSON("user"
<< "spencer"
@@ -726,7 +732,9 @@ TEST_F(AuthorizationSessionTest, AddPrivilegesForStageFailsIfOutNamespaceIsNotVa
<< ""));
BSONObj cmdObj = BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline);
ASSERT_THROWS_CODE(
- authzSession->checkAuthForAggregate(testFooNss, cmdObj, false), UserException, 17139);
+ authzSession->checkAuthForAggregate(testFooNss, cmdObj, false).transitional_ignore(),
+ UserException,
+ 17139);
}
TEST_F(AuthorizationSessionTest, CannotAggregateOutWithoutInsertAndRemoveOnTargetNamespace) {
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 9227e1d2dd2..29baf0cdf1c 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -345,8 +345,9 @@ Status AuthzManagerExternalStateLocal::_getRoleDescription_inlock(const RoleName
fassert(17323, resultDoc.root().pushBack(inheritedPrivilegesElement));
}
} else if (showPrivileges == PrivilegeFormat::kShowSeparate) {
- warningsElement.appendString(
- "", "Role graph state inconsistent; only direct privileges available.");
+ warningsElement
+ .appendString("", "Role graph state inconsistent; only direct privileges available.")
+ .transitional_ignore();
addPrivilegeObjectsOrWarningsToArrayElement(
privilegesElement, warningsElement, _roleGraph.getDirectPrivileges(roleName));
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index f4fef4fde08..933f0aff752 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -203,7 +203,7 @@ Status AuthzManagerExternalStateMock::updateOne(OperationContext* opCtx,
return Status::OK();
} else if (status == ErrorCodes::NoMatchingDocument && upsert) {
if (query.hasField("_id")) {
- document.root().appendElement(query["_id"]);
+ document.root().appendElement(query["_id"]).transitional_ignore();
}
status = driver.populateDocumentWithQueryFields(opCtx, query, NULL, document);
if (!status.isOK()) {
diff --git a/src/mongo/db/auth/role_graph_test.cpp b/src/mongo/db/auth/role_graph_test.cpp
index 94752e36f4b..c9024b22ea0 100644
--- a/src/mongo/db/auth/role_graph_test.cpp
+++ b/src/mongo/db/auth/role_graph_test.cpp
@@ -529,7 +529,7 @@ TEST(RoleGraphTest, CopySwap) {
// Make a copy of the graph to do further modifications on.
RoleGraph tempGraph(graph);
ASSERT_OK(tempGraph.addRoleToRole(roleB, roleC));
- tempGraph.recomputePrivilegeData();
+ tempGraph.recomputePrivilegeData().transitional_ignore();
// Now swap the copy back with the original graph and make sure the original was updated
// properly.
@@ -686,10 +686,10 @@ TEST(RoleGraphTest, BuiltinRolesOnlyOnAppropriateDatabases) {
TEST(RoleGraphTest, getRolesForDatabase) {
RoleGraph graph;
- graph.createRole(RoleName("myRole", "test"));
+ graph.createRole(RoleName("myRole", "test")).transitional_ignore();
// Make sure that a role on "test2" doesn't show up in the roles list for "test"
- graph.createRole(RoleName("anotherRole", "test2"));
- graph.createRole(RoleName("myAdminRole", "admin"));
+ graph.createRole(RoleName("anotherRole", "test2")).transitional_ignore();
+ graph.createRole(RoleName("myAdminRole", "admin")).transitional_ignore();
// Non-admin DB with no user-defined roles
RoleNameIterator it = graph.getRolesForDatabase("fakedb");
diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp
index e0fad877a25..33c74bca2dc 100644
--- a/src/mongo/db/auth/sasl_commands.cpp
+++ b/src/mongo/db/auth/sasl_commands.cpp
@@ -264,7 +264,7 @@ void CmdSaslStart::help(std::stringstream& os) const {
void CmdSaslStart::redactForLogging(mutablebson::Document* cmdObj) {
mutablebson::Element element = mutablebson::findFirstChildNamed(cmdObj->root(), "payload");
if (element.ok()) {
- element.setValueString("xxx");
+ element.setValueString("xxx").transitional_ignore();
}
}
diff --git a/src/mongo/db/auth/sasl_scramsha1_test.cpp b/src/mongo/db/auth/sasl_scramsha1_test.cpp
index c575d94ddc0..49d3d6a27c0 100644
--- a/src/mongo/db/auth/sasl_scramsha1_test.cpp
+++ b/src/mongo/db/auth/sasl_scramsha1_test.cpp
@@ -241,7 +241,8 @@ protected:
saslServerSession = stdx::make_unique<NativeSaslAuthenticationSession>(authzSession.get());
saslServerSession->setOpCtxt(opCtx.get());
- saslServerSession->start("test", "SCRAM-SHA-1", "mongodb", "MockServer.test", 1, false);
+ saslServerSession->start("test", "SCRAM-SHA-1", "mongodb", "MockServer.test", 1, false)
+ .transitional_ignore();
saslClientSession = stdx::make_unique<NativeSaslClientSession>();
saslClientSession->setParameter(NativeSaslClientSession::parameterMechanism, "SCRAM-SHA-1");
saslClientSession->setParameter(NativeSaslClientSession::parameterServiceName, "mongodb");
@@ -253,8 +254,10 @@ protected:
};
TEST_F(SCRAMSHA1Fixture, testServerStep1DoesNotIncludeNonceFromClientStep1) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -276,8 +279,10 @@ TEST_F(SCRAMSHA1Fixture, testServerStep1DoesNotIncludeNonceFromClientStep1) {
}
TEST_F(SCRAMSHA1Fixture, testClientStep2DoesNotIncludeNonceFromServerStep1) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -298,8 +303,10 @@ TEST_F(SCRAMSHA1Fixture, testClientStep2DoesNotIncludeNonceFromServerStep1) {
}
TEST_F(SCRAMSHA1Fixture, testClientStep2GivesBadProof) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -323,8 +330,10 @@ TEST_F(SCRAMSHA1Fixture, testClientStep2GivesBadProof) {
}
TEST_F(SCRAMSHA1Fixture, testServerStep2GivesBadVerifier) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -358,8 +367,10 @@ TEST_F(SCRAMSHA1Fixture, testServerStep2GivesBadVerifier) {
TEST_F(SCRAMSHA1Fixture, testSCRAM) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -371,8 +382,10 @@ TEST_F(SCRAMSHA1Fixture, testSCRAM) {
}
TEST_F(SCRAMSHA1Fixture, testNULLInPassword) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "saj\0ack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "saj\0ack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -385,8 +398,10 @@ TEST_F(SCRAMSHA1Fixture, testNULLInPassword) {
TEST_F(SCRAMSHA1Fixture, testCommasInUsernameAndPassword) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("s,a,jack", "s,a,jack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("s,a,jack", "s,a,jack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "s,a,jack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -410,8 +425,10 @@ TEST_F(SCRAMSHA1Fixture, testIncorrectUser) {
}
TEST_F(SCRAMSHA1Fixture, testIncorrectPassword) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateSCRAMUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
@@ -426,8 +443,10 @@ TEST_F(SCRAMSHA1Fixture, testIncorrectPassword) {
}
TEST_F(SCRAMSHA1Fixture, testMONGODBCR) {
- authzManagerExternalState->insertPrivilegeDocument(
- opCtx.get(), generateMONGODBCRUserDocument("sajack", "sajack"), BSONObj());
+ authzManagerExternalState
+ ->insertPrivilegeDocument(
+ opCtx.get(), generateMONGODBCRUserDocument("sajack", "sajack"), BSONObj())
+ .transitional_ignore();
saslClientSession->setParameter(NativeSaslClientSession::parameterUser, "sajack");
saslClientSession->setParameter(NativeSaslClientSession::parameterPassword,
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index d27d485a998..69bac1c04ff 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -214,8 +214,10 @@ mongo::Status mongo::cloneCollectionAsCapped(OperationContext* opCtx,
WriteUnitOfWork wunit(opCtx);
OpDebug* const nullOpDebug = nullptr;
- toCollection->insertDocument(
- opCtx, objToClone.value(), nullOpDebug, true, opCtx->writesAreReplicated());
+ toCollection
+ ->insertDocument(
+ opCtx, objToClone.value(), nullOpDebug, true, opCtx->writesAreReplicated())
+ .transitional_ignore();
wunit.commit();
// Go to the next document
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 741d5cfac86..b58b1ea96bf 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -173,7 +173,7 @@ StatusWith<CollModRequest> parseCollModRequest(OperationContext* opCtx,
} else if (fieldName == "validationAction" && !isView) {
auto statusW = coll->parseValidationAction(e.String());
if (!statusW.isOK())
- statusW.getStatus();
+ return statusW.getStatus();
cmr.collValidationAction = e.String();
} else if (fieldName == "pipeline") {
@@ -371,15 +371,15 @@ mongo::Status mongo::collMod(OperationContext* opCtx,
// Validator
if (!cmr.collValidator.eoo())
- coll->setValidator(opCtx, cmr.collValidator.Obj());
+ coll->setValidator(opCtx, cmr.collValidator.Obj()).transitional_ignore();
// ValidationAction
if (!cmr.collValidationAction.empty())
- coll->setValidationAction(opCtx, cmr.collValidationAction);
+ coll->setValidationAction(opCtx, cmr.collValidationAction).transitional_ignore();
// ValidationLevel
if (!cmr.collValidationLevel.empty())
- coll->setValidationLevel(opCtx, cmr.collValidationLevel);
+ coll->setValidationLevel(opCtx, cmr.collValidationLevel).transitional_ignore();
// UsePowerof2Sizes
if (!cmr.usePowerOf2Sizes.eoo())
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index a4e1f40c603..fa94ca47f20 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -94,7 +94,7 @@ public:
}
virtual void inserted(const RecordData& recData, const RecordId& newLocation) {
- _multiIndexBlock->insert(recData.toBson(), newLocation);
+ _multiIndexBlock->insert(recData.toBson(), newLocation).transitional_ignore();
}
private:
diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp
index d58b4ad933d..8aeb18796cb 100644
--- a/src/mongo/db/catalog/collection_impl.cpp
+++ b/src/mongo/db/catalog/collection_impl.cpp
@@ -193,7 +193,7 @@ CollectionImpl::CollectionImpl(Collection* _this_init,
void CollectionImpl::init(OperationContext* opCtx) {
_magic = kMagicNumber;
- _indexCatalog.init(opCtx);
+ _indexCatalog.init(opCtx).transitional_ignore();
if (isCapped())
_recordStore->setCappedCallback(this);
@@ -1236,7 +1236,7 @@ Status CollectionImpl::validate(OperationContext* opCtx,
IndexAccessMethod* iam = _indexCatalog.getIndex(descriptor);
ValidateResults curIndexResults;
int64_t numKeys;
- iam->validate(opCtx, &numKeys, &curIndexResults);
+ iam->validate(opCtx, &numKeys, &curIndexResults).transitional_ignore();
keysPerIndex.appendNumber(descriptor->indexNamespace(),
static_cast<long long>(numKeys));
diff --git a/src/mongo/db/catalog/collection_options_test.cpp b/src/mongo/db/catalog/collection_options_test.cpp
index 9a22dbd270b..3c000f50243 100644
--- a/src/mongo/db/catalog/collection_options_test.cpp
+++ b/src/mongo/db/catalog/collection_options_test.cpp
@@ -39,7 +39,7 @@ namespace mongo {
void checkRoundTrip(const CollectionOptions& options1) {
CollectionOptions options2;
- options2.parse(options1.toBSON());
+ options2.parse(options1.toBSON()).transitional_ignore();
ASSERT_BSONOBJ_EQ(options1.toBSON(), options2.toBSON());
}
diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp
index a9a7573eccc..4649fca4aa6 100644
--- a/src/mongo/db/catalog/database_holder_impl.cpp
+++ b/src/mongo/db/catalog/database_holder_impl.cpp
@@ -198,7 +198,10 @@ void DatabaseHolderImpl::close(OperationContext* opCtx, StringData ns, const std
delete it->second;
_dbs.erase(it);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(opCtx, dbName.toString());
+ getGlobalServiceContext()
+ ->getGlobalStorageEngine()
+ ->closeDatabase(opCtx, dbName.toString())
+ .transitional_ignore();
}
bool DatabaseHolderImpl::closeAll(OperationContext* opCtx,
@@ -234,7 +237,10 @@ bool DatabaseHolderImpl::closeAll(OperationContext* opCtx,
_dbs.erase(name);
- getGlobalServiceContext()->getGlobalStorageEngine()->closeDatabase(opCtx, name);
+ getGlobalServiceContext()
+ ->getGlobalStorageEngine()
+ ->closeDatabase(opCtx, name)
+ .transitional_ignore();
bb.append(name);
}
diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp
index a000ce266ba..aa4eda0939e 100644
--- a/src/mongo/db/catalog/database_impl.cpp
+++ b/src/mongo/db/catalog/database_impl.cpp
@@ -740,7 +740,10 @@ void DatabaseImpl::dropDatabase(OperationContext* opCtx, Database* db) {
db = NULL; // d is now deleted
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- getGlobalServiceContext()->getGlobalStorageEngine()->dropDatabase(opCtx, name);
+ getGlobalServiceContext()
+ ->getGlobalStorageEngine()
+ ->dropDatabase(opCtx, name)
+ .transitional_ignore();
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "dropDatabase", name);
}
diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp
index 0a71b6defbe..468c0850ba2 100644
--- a/src/mongo/db/catalog/database_test.cpp
+++ b/src/mongo/db/catalog/database_test.cpp
@@ -314,7 +314,9 @@ void _testDropCollectionThrowsExceptionIfThereAreIndexesInProgress(OperationCont
ASSERT_GREATER_THAN(indexCatalog->numIndexesInProgress(opCtx), 0);
WriteUnitOfWork wuow(opCtx);
- ASSERT_THROWS_CODE(db->dropCollection(opCtx, nss.ns()), MsgAssertionException, 40461);
+ ASSERT_THROWS_CODE(db->dropCollection(opCtx, nss.ns()).transitional_ignore(),
+ MsgAssertionException,
+ 40461);
});
}
diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp
index 29183e04737..e9cfd3a4f0c 100644
--- a/src/mongo/db/catalog/index_catalog_impl.cpp
+++ b/src/mongo/db/catalog/index_catalog_impl.cpp
@@ -432,7 +432,7 @@ void IndexCatalogImpl::IndexBuildBlock::fail() {
invariant(entry == _entry);
if (entry) {
- IndexCatalogImpl::_dropIndex(_catalog, _opCtx, entry);
+ IndexCatalogImpl::_dropIndex(_catalog, _opCtx, entry).transitional_ignore();
} else {
IndexCatalog::_deleteIndexFromDisk(_catalog, _opCtx, _indexName, _indexNamespace);
}
@@ -890,7 +890,7 @@ void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx,
LOG(1) << "\t dropAllIndexes dropping: " << desc->toString();
IndexCatalogEntry* entry = _entries.find(desc);
invariant(entry);
- _dropIndex(opCtx, entry);
+ _dropIndex(opCtx, entry).transitional_ignore();
if (droppedIndexes != nullptr) {
droppedIndexes->emplace(desc->indexName(), desc->infoObj());
@@ -1403,7 +1403,7 @@ void IndexCatalogImpl::unindexRecord(OperationContext* opCtx,
// If it's a background index, we DO NOT want to log anything.
bool logIfError = entry->isReady(opCtx) ? !noWarn : false;
- _unindexRecord(opCtx, entry, obj, loc, logIfError, keysDeletedOut);
+ _unindexRecord(opCtx, entry, obj, loc, logIfError, keysDeletedOut).transitional_ignore();
}
}
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index ee7b2de8743..8fdf3551b7b 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -227,7 +227,7 @@ Status renameCollection(OperationContext* opCtx,
}
indexesToCopy.push_back(newIndex.obj());
}
- indexer.init(indexesToCopy);
+ indexer.init(indexesToCopy).status_with_transitional_ignore();
}
{
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
index 889d9961607..4f169b2fa1f 100644
--- a/src/mongo/db/clientcursor.cpp
+++ b/src/mongo/db/clientcursor.cpp
@@ -140,7 +140,9 @@ void ClientCursor::updateSlaveLocation(OperationContext* opCtx) {
if (!rid.isSet())
return;
- repl::getGlobalReplicationCoordinator()->setLastOptimeForSlave(rid, _slaveReadTill);
+ repl::getGlobalReplicationCoordinator()
+ ->setLastOptimeForSlave(rid, _slaveReadTill)
+ .transitional_ignore();
}
//
diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp
index d3800a17b1d..dfb5ca174c2 100644
--- a/src/mongo/db/commands/authentication_commands.cpp
+++ b/src/mongo/db/commands/authentication_commands.cpp
@@ -145,7 +145,7 @@ void CmdAuthenticate::redactForLogging(mutablebson::Document* cmdObj) {
for (mmb::Element element = mmb::findFirstChildNamed(cmdObj->root(), redactedFields[i]);
element.ok();
element = mmb::findElementNamed(element.rightSibling(), redactedFields[i])) {
- element.setValueString("xxx");
+ element.setValueString("xxx").transitional_ignore();
}
}
}
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 42e9419622c..2fabe66c892 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -267,7 +267,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
querySettings->removeAllowedIndices(planCache->computeKey(*cq));
// Remove entry from plan cache
- planCache->remove(*cq);
+ planCache->remove(*cq).transitional_ignore();
LOG(0) << "Removed index filter on " << ns << " " << redact(cq->toStringShort());
@@ -316,7 +316,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
// Remove plan cache entry.
- planCache->remove(*cq);
+ planCache->remove(*cq).transitional_ignore();
}
LOG(0) << "Removed all index filters for collection: " << ns;
@@ -394,7 +394,7 @@ Status SetFilter::set(OperationContext* opCtx,
querySettings->setAllowedIndices(*cq, planCache->computeKey(*cq), indexes, indexNames);
// Remove entry from plan cache.
- planCache->remove(*cq);
+ planCache->remove(*cq).transitional_ignore();
LOG(0) << "Index filter set on " << ns << " " << redact(cq->toStringShort()) << " "
<< indexesElt;
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 43637227691..02eb08c1aab 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -376,7 +376,7 @@ void State::dropTempCollections() {
"no longer primary",
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(
_opCtx, _config.tempNamespace));
- db->dropCollection(_opCtx, _config.tempNamespace.ns());
+ db->dropCollection(_opCtx, _config.tempNamespace.ns()).transitional_ignore();
wunit.commit();
}
}
@@ -394,7 +394,7 @@ void State::dropTempCollections() {
Lock::DBLock lk(_opCtx, _config.incLong.db(), MODE_X);
if (Database* db = dbHolder().get(_opCtx, _config.incLong.ns())) {
WriteUnitOfWork wunit(_opCtx);
- db->dropCollection(_opCtx, _config.incLong.ns());
+ db->dropCollection(_opCtx, _config.incLong.ns()).transitional_ignore();
wunit.commit();
}
}
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index ab794c8bf0e..649ad191169 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -335,15 +335,15 @@ private:
// Save LogComponent::kDefault LogSeverity at root
if (component == LogComponent::kDefault) {
- doc.root().appendInt("verbosity", severity);
+ doc.root().appendInt("verbosity", severity).transitional_ignore();
continue;
}
mutablebson::Element element = doc.makeElementObject(component.getShortName());
- element.appendInt("verbosity", severity);
+ element.appendInt("verbosity", severity).transitional_ignore();
mutablebson::Element parentElement = _getParentElement(doc, component);
- parentElement.pushBack(element);
+ parentElement.pushBack(element).transitional_ignore();
}
BSONObj result = doc.getObject();
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 87ce9dd8093..97b3884f8cd 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -153,7 +153,7 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
vector<BSONObj> shapes = getShapes(planCache);
ASSERT_EQUALS(shapes.size(), 1U);
@@ -186,7 +186,7 @@ TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
ASSERT_EQUALS(getShapes(planCache).size(), 1U);
// Clear cache and confirm number of keys afterwards.
@@ -327,8 +327,8 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cqA, solns, createDecision(1U));
- planCache.add(*cqB, solns, createDecision(1U));
+ planCache.add(*cqA, solns, createDecision(1U)).transitional_ignore();
+ planCache.add(*cqB, solns, createDecision(1U)).transitional_ignore();
// Check keys in cache before dropping {b: 1}
vector<BSONObj> shapesBefore = getShapes(planCache);
@@ -386,8 +386,8 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
- planCache.add(*cqCollation, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
+ planCache.add(*cqCollation, solns, createDecision(1U)).transitional_ignore();
// Check keys in cache before dropping the query with collation.
vector<BSONObj> shapesBefore = getShapes(planCache);
@@ -530,7 +530,7 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
vector<BSONObj> plans = getPlans(planCache,
cq->getQueryObj(),
@@ -560,7 +560,7 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(2U));
+ planCache.add(*cq, solns, createDecision(2U)).transitional_ignore();
vector<BSONObj> plans = getPlans(planCache,
cq->getQueryObj(),
@@ -599,11 +599,11 @@ TEST(PlanCacheCommandsTest, planCacheListPlansCollation) {
qs.cacheData.reset(createSolutionCacheData());
std::vector<QuerySolution*> solns;
solns.push_back(&qs);
- planCache.add(*cq, solns, createDecision(1U));
+ planCache.add(*cq, solns, createDecision(1U)).transitional_ignore();
std::vector<QuerySolution*> twoSolns;
twoSolns.push_back(&qs);
twoSolns.push_back(&qs);
- planCache.add(*cqCollation, twoSolns, createDecision(2U));
+ planCache.add(*cqCollation, twoSolns, createDecision(2U)).transitional_ignore();
// Normal query should have one solution.
vector<BSONObj> plans = getPlans(planCache,
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 3126a71551d..4c0a7aee499 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -1238,7 +1238,8 @@ public:
AuthorizationManager::usersCollectionNamespace,
queryBuilder.done(),
projection.done(),
- function);
+ function)
+ .transitional_ignore();
}
result.append("users", usersArrayBuilder.arr());
return true;
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index 6b3fc2f4b19..608719e0341 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -57,7 +57,7 @@ void redactPasswordData(mutablebson::Element parent) {
const auto pwdFieldName = "pwd"_sd;
for (mmb::Element pwdElement = mmb::findFirstChildNamed(parent, pwdFieldName); pwdElement.ok();
pwdElement = mmb::findElementNamed(pwdElement.rightSibling(), pwdFieldName)) {
- pwdElement.setValueString("xxx");
+ pwdElement.setValueString("xxx").transitional_ignore();
}
}
diff --git a/src/mongo/db/commands/write_commands/write_commands.cpp b/src/mongo/db/commands/write_commands/write_commands.cpp
index a1b43356a8a..9bee4b173f7 100644
--- a/src/mongo/db/commands/write_commands/write_commands.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands.cpp
@@ -71,7 +71,7 @@ void redactTooLongLog(mutablebson::Document* cmdObj, StringData fieldName) {
// Redact the log if there are more than one documents or operations.
if (field.countChildren() > 1) {
- field.setValueInt(field.countChildren());
+ field.setValueInt(field.countChildren()).transitional_ignore();
}
}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 7d925d6d73f..a5d87bfbf55 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -644,7 +644,7 @@ ExitCode _initAndListen(int listenPort) {
uassertStatusOK(ShardingState::get(startupOpCtx.get())
->initializeShardingAwarenessIfNeeded(startupOpCtx.get()));
if (shardingInitialized) {
- waitForShardRegistryReload(startupOpCtx.get());
+ waitForShardRegistryReload(startupOpCtx.get()).transitional_ignore();
}
if (!storageGlobalParams.readOnly) {
@@ -705,7 +705,7 @@ ExitCode _initAndListen(int listenPort) {
// Set up the periodic runner for background job execution
auto runner = makePeriodicRunner();
- runner->startup();
+ runner->startup().transitional_ignore();
globalServiceContext->setPeriodicRunner(std::move(runner));
// Set up the logical session cache
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 3106994547e..42780395037 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -230,7 +230,7 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
// cache entry if requested by the caller.
if (shouldCache) {
PlanCache* cache = _collection->infoCache()->getPlanCache();
- cache->remove(*_canonicalQuery);
+ cache->remove(*_canonicalQuery).transitional_ignore();
}
PlanStage* newRoot;
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index b7a25313969..9bb94def100 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -506,7 +506,7 @@ class TwoDPtInAnnulusExpression : public LeafMatchExpression {
public:
TwoDPtInAnnulusExpression(const R2Annulus& annulus, StringData twoDPath)
: LeafMatchExpression(INTERNAL_2D_POINT_IN_ANNULUS), _annulus(annulus) {
- setPath(twoDPath);
+ setPath(twoDPath).transitional_ignore();
}
void serialize(BSONObjBuilder* out) const final {
@@ -726,7 +726,7 @@ StatusWith<NearStage::CoveredInterval*> //
// These parameters are stored by the index, and so must be ok
GeoHashConverter::Parameters hashParams;
- GeoHashConverter::parseParameters(_twoDIndex->infoObj(), &hashParams);
+ GeoHashConverter::parseParameters(_twoDIndex->infoObj(), &hashParams).transitional_ignore();
// 2D indexes support covered search over additional fields they contain
IndexScan* scan = new IndexScan(opCtx, scanParams, workingSet, _nearParams.filter);
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index a01521610ea..b113a17cc30 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -127,7 +127,7 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) {
// if the best solution fails. Alternatively we could try to
// defer cache insertion to be after the first produced result.
- _collection->infoCache()->getPlanCache()->remove(*_query);
+ _collection->infoCache()->getPlanCache()->remove(*_query).transitional_ignore();
_bestPlanIdx = _backupPlanIdx;
_backupPlanIdx = kNoSuchPlan;
@@ -323,7 +323,10 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) {
}
if (validSolutions) {
- _collection->infoCache()->getPlanCache()->add(*_query, solutions, ranking.release());
+ _collection->infoCache()
+ ->getPlanCache()
+ ->add(*_query, solutions, ranking.release())
+ .transitional_ignore();
}
}
diff --git a/src/mongo/db/exec/projection_exec.cpp b/src/mongo/db/exec/projection_exec.cpp
index edf0386d042..230673dcc7e 100644
--- a/src/mongo/db/exec/projection_exec.cpp
+++ b/src/mongo/db/exec/projection_exec.cpp
@@ -475,7 +475,7 @@ void ProjectionExec::appendArray(BSONObjBuilder* bob, const BSONObj& array, bool
BSONObjBuilder subBob;
BSONObjIterator jt(elt.embeddedObject());
while (jt.more()) {
- append(&subBob, jt.next());
+ append(&subBob, jt.next()).transitional_ignore();
}
bob->append(bob->numStr(index++), subBob.obj());
break;
@@ -518,7 +518,7 @@ Status ProjectionExec::append(BSONObjBuilder* bob,
BSONObjBuilder subBob;
BSONObjIterator it(elt.embeddedObject());
while (it.more()) {
- subfm.append(&subBob, it.next(), details, arrayOpType);
+ subfm.append(&subBob, it.next(), details, arrayOpType).transitional_ignore();
}
bob->append(elt.fieldName(), subBob.obj());
} else {
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index bdd37af66aa..c562be7ed26 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -260,7 +260,7 @@ void SortKeyGenerator::getBoundsForSort(OperationContext* opCtx,
std::vector<QuerySolution*> solns;
LOG(5) << "Sort key generation: Planning to obtain bounds for sort.";
- QueryPlanner::plan(*queryForSort, params, &solns);
+ QueryPlanner::plan(*queryForSort, params, &solns).transitional_ignore();
// TODO: are there ever > 1 solns? If so, do we look for a specific soln?
if (1 == solns.size()) {
diff --git a/src/mongo/db/ftdc/compressor.cpp b/src/mongo/db/ftdc/compressor.cpp
index ecf9c7ece6c..984698f7daf 100644
--- a/src/mongo/db/ftdc/compressor.cpp
+++ b/src/mongo/db/ftdc/compressor.cpp
@@ -45,7 +45,8 @@ using std::swap;
StatusWith<boost::optional<std::tuple<ConstDataRange, FTDCCompressor::CompressorState, Date_t>>>
FTDCCompressor::addSample(const BSONObj& sample, Date_t date) {
if (_referenceDoc.isEmpty()) {
- FTDCBSONUtil::extractMetricsFromDocument(sample, sample, &_metrics);
+ FTDCBSONUtil::extractMetricsFromDocument(sample, sample, &_metrics)
+ .status_with_transitional_ignore();
_reset(sample, date);
return {boost::none};
}
diff --git a/src/mongo/db/ftdc/file_manager.cpp b/src/mongo/db/ftdc/file_manager.cpp
index a2b1159de4c..8438a820d36 100644
--- a/src/mongo/db/ftdc/file_manager.cpp
+++ b/src/mongo/db/ftdc/file_manager.cpp
@@ -55,7 +55,7 @@ FTDCFileManager::FTDCFileManager(const FTDCConfig* config,
: _config(config), _writer(_config), _path(path), _rotateCollectors(collection) {}
FTDCFileManager::~FTDCFileManager() {
- close();
+ close().transitional_ignore();
}
StatusWith<std::unique_ptr<FTDCFileManager>> FTDCFileManager::create(
diff --git a/src/mongo/db/ftdc/file_manager_test.cpp b/src/mongo/db/ftdc/file_manager_test.cpp
index 6c2e5c220a6..027ae88af62 100644
--- a/src/mongo/db/ftdc/file_manager_test.cpp
+++ b/src/mongo/db/ftdc/file_manager_test.cpp
@@ -111,7 +111,7 @@ TEST(FTDCFileManagerTest, TestFull) {
Date_t()));
}
- mgr->close();
+ mgr->close().transitional_ignore();
auto files = scanDirectory(dir);
@@ -211,7 +211,7 @@ TEST(FTDCFileManagerTest, TestNormalRestart) {
Date_t()));
}
- mgr->close();
+ mgr->close().transitional_ignore();
// Validate the interim file does not have data
ValidateInterimFileHasData(dir, false);
@@ -281,7 +281,7 @@ TEST(FTDCFileManagerTest, TestCorruptCrashRestart) {
Date_t()));
}
- mgr->close();
+ mgr->close().transitional_ignore();
auto swFile = mgr->generateArchiveFileName(dir, "0test-crash");
ASSERT_OK(swFile);
diff --git a/src/mongo/db/ftdc/file_writer.cpp b/src/mongo/db/ftdc/file_writer.cpp
index cd3ffdc45cf..d32736f626c 100644
--- a/src/mongo/db/ftdc/file_writer.cpp
+++ b/src/mongo/db/ftdc/file_writer.cpp
@@ -47,7 +47,7 @@
namespace mongo {
FTDCFileWriter::~FTDCFileWriter() {
- close();
+ close().transitional_ignore();
}
Status FTDCFileWriter::open(const boost::filesystem::path& file) {
diff --git a/src/mongo/db/ftdc/file_writer_test.cpp b/src/mongo/db/ftdc/file_writer_test.cpp
index 138d7c850f6..f7977e2b8b3 100644
--- a/src/mongo/db/ftdc/file_writer_test.cpp
+++ b/src/mongo/db/ftdc/file_writer_test.cpp
@@ -76,7 +76,7 @@ TEST(FTDCFileTest, TestFileBasicMetadata) {
ASSERT_OK(writer.writeMetadata(doc1, Date_t()));
ASSERT_OK(writer.writeMetadata(doc2, Date_t()));
- writer.close();
+ writer.close().transitional_ignore();
FTDCFileReader reader;
ASSERT_OK(reader.open(p));
@@ -127,7 +127,7 @@ TEST(FTDCFileTest, TestFileBasicCompress) {
ASSERT_OK(writer.writeSample(doc1, Date_t()));
ASSERT_OK(writer.writeSample(doc2, Date_t()));
- writer.close();
+ writer.close().transitional_ignore();
FTDCFileReader reader;
ASSERT_OK(reader.open(p));
@@ -194,7 +194,7 @@ private:
ASSERT_OK(sw);
}
- _writer.close();
+ _writer.close().transitional_ignore();
ValidateDocumentList(_path, _docs);
}
diff --git a/src/mongo/db/geo/r2_region_coverer_test.cpp b/src/mongo/db/geo/r2_region_coverer_test.cpp
index 69b6abba563..6ae997d0ee5 100644
--- a/src/mongo/db/geo/r2_region_coverer_test.cpp
+++ b/src/mongo/db/geo/r2_region_coverer_test.cpp
@@ -275,11 +275,13 @@ GeometryContainer* getRandomCircle(double radius) {
// Format: { $center : [ [-74, 40.74], 10 ] }
GeometryContainer* container = new GeometryContainer();
- container->parseFromQuery(
- BSON("$center" << BSON_ARRAY(BSON_ARRAY(randDouble(radius, MAXBOUND - radius)
- << randDouble(radius, MAXBOUND - radius))
- << radius))
- .firstElement());
+ container
+ ->parseFromQuery(
+ BSON("$center" << BSON_ARRAY(BSON_ARRAY(randDouble(radius, MAXBOUND - radius)
+ << randDouble(radius, MAXBOUND - radius))
+ << radius))
+ .firstElement())
+ .transitional_ignore();
return container;
}
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index 95310de17c3..be9e1a26f26 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -335,7 +335,7 @@ MONGO_INITIALIZER(RegisterShortCircuitExitHandler)(InitializerContext*) {
}
bool initializeServerGlobalState() {
- Listener::globalTicketHolder.resize(serverGlobalParams.maxConns);
+ Listener::globalTicketHolder.resize(serverGlobalParams.maxConns).transitional_ignore();
#ifndef _WIN32
if (!fs::is_directory(serverGlobalParams.socket)) {
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
index 9ef233f11be..f3d6680b242 100644
--- a/src/mongo/db/introspect.cpp
+++ b/src/mongo/db/introspect.cpp
@@ -122,7 +122,7 @@ void profile(OperationContext* opCtx, NetworkOp op) {
if (acquireDbXLock) {
autoGetDb.reset(new AutoGetDb(opCtx, dbName, MODE_X));
if (autoGetDb->getDb()) {
- createProfileCollection(opCtx, autoGetDb->getDb());
+ createProfileCollection(opCtx, autoGetDb->getDb()).transitional_ignore();
}
} else {
autoGetDb.reset(new AutoGetDb(opCtx, dbName, MODE_IX));
@@ -142,7 +142,7 @@ void profile(OperationContext* opCtx, NetworkOp op) {
if (coll) {
WriteUnitOfWork wuow(opCtx);
OpDebug* const nullOpDebug = nullptr;
- coll->insertDocument(opCtx, p, nullOpDebug, false);
+ coll->insertDocument(opCtx, p, nullOpDebug, false).transitional_ignore();
wuow.commit();
break;
diff --git a/src/mongo/db/keys_collection_manager_test.cpp b/src/mongo/db/keys_collection_manager_test.cpp
index dbda76176b8..aae1db1a6a4 100644
--- a/src/mongo/db/keys_collection_manager_test.cpp
+++ b/src/mongo/db/keys_collection_manager_test.cpp
@@ -82,9 +82,10 @@ private:
TEST_F(KeysManagerTest, GetKeyForValidationTimesOutIfRefresherIsNotRunning) {
operationContext()->setDeadlineAfterNowBy(Microseconds(250 * 1000));
- ASSERT_THROWS(
- keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))),
- DBException);
+ ASSERT_THROWS(keyManager()
+ ->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0)))
+ .status_with_transitional_ignore(),
+ DBException);
}
TEST_F(KeysManagerTest, GetKeyForValidationErrorsIfKeyDoesntExist) {
diff --git a/src/mongo/db/logical_clock_test.cpp b/src/mongo/db/logical_clock_test.cpp
index 2130b5e9944..27c1f1a0907 100644
--- a/src/mongo/db/logical_clock_test.cpp
+++ b/src/mongo/db/logical_clock_test.cpp
@@ -326,7 +326,8 @@ TEST_F(LogicalClockTest, RejectsLogicalTimesGreaterThanMaxTime) {
auto almostMaxSecs =
Seconds(maxVal) - LogicalClock::kMaxAcceptableLogicalClockDriftSecs + Seconds(10);
setMockClockSourceTime(Date_t::fromDurationSinceEpoch(almostMaxSecs));
- ASSERT_THROWS(getClock()->advanceClusterTime(beyondMaxTime), std::exception);
+ ASSERT_THROWS(getClock()->advanceClusterTime(beyondMaxTime).transitional_ignore(),
+ std::exception);
ASSERT_TRUE(getClock()->getClusterTime() == LogicalTime());
}
diff --git a/src/mongo/db/logical_session_cache_test.cpp b/src/mongo/db/logical_session_cache_test.cpp
index c0f86306ef4..a9d3c5eb1c1 100644
--- a/src/mongo/db/logical_session_cache_test.cpp
+++ b/src/mongo/db/logical_session_cache_test.cpp
@@ -147,7 +147,7 @@ TEST_F(LogicalSessionCacheTest, TestCacheHitsOnly) {
ASSERT(!res.isOK());
// When the record is present, returns the owner
- cache()->getOwner(lsid);
+ cache()->getOwner(lsid).status_with_transitional_ignore();
res = cache()->getOwnerFromCache(lsid);
ASSERT(res.isOK());
auto fetched = res.getValue();
@@ -234,8 +234,8 @@ TEST_F(LogicalSessionCacheTest, CacheRefreshesOwnRecords) {
// Insert two records into the cache
auto record1 = newRecord();
auto record2 = newRecord();
- cache()->startSession(record1);
- cache()->startSession(record2);
+ cache()->startSession(record1).transitional_ignore();
+ cache()->startSession(record2).transitional_ignore();
stdx::promise<int> hitRefresh;
auto refreshFuture = hitRefresh.get_future();
@@ -284,8 +284,8 @@ TEST_F(LogicalSessionCacheTest, CacheDeletesRecordsThatFailToRefresh) {
// Put two sessions into the cache
auto record1 = newRecord();
auto record2 = newRecord();
- cache()->startSession(record1);
- cache()->startSession(record2);
+ cache()->startSession(record1).transitional_ignore();
+ cache()->startSession(record2).transitional_ignore();
stdx::promise<void> hitRefresh;
auto refreshFuture = hitRefresh.get_future();
@@ -313,9 +313,9 @@ TEST_F(LogicalSessionCacheTest, KeepActiveSessionAliveEvenIfRefreshFails) {
// Put two sessions into the cache, one into the service
auto record1 = newRecord();
auto record2 = newRecord();
- cache()->startSession(record1);
+ cache()->startSession(record1).transitional_ignore();
service()->add(record1.getLsid());
- cache()->startSession(record2);
+ cache()->startSession(record2).transitional_ignore();
stdx::promise<void> hitRefresh;
auto refreshFuture = hitRefresh.get_future();
@@ -342,7 +342,7 @@ TEST_F(LogicalSessionCacheTest, KeepActiveSessionAliveEvenIfRefreshFails) {
TEST_F(LogicalSessionCacheTest, BasicSessionExpiration) {
// Insert a record
auto record = newRecord();
- cache()->startSession(record);
+ cache()->startSession(record).transitional_ignore();
auto res = cache()->getOwnerFromCache(record.getLsid());
ASSERT(res.isOK());
@@ -411,7 +411,7 @@ TEST_F(LogicalSessionCacheTest, RefreshCachedAndServiceRecordsTogether) {
auto record1 = newRecord();
service()->add(record1.getLsid());
auto record2 = newRecord();
- cache()->startSession(record2);
+ cache()->startSession(record2).transitional_ignore();
stdx::promise<void> hitRefresh;
auto refreshFuture = hitRefresh.get_future();
@@ -433,7 +433,7 @@ TEST_F(LogicalSessionCacheTest, ManyRecordsInCacheRefresh) {
int count = LogicalSessionCache::kLogicalSessionCacheDefaultCapacity;
for (int i = 0; i < count; i++) {
auto record = newRecord();
- cache()->startSession(record);
+ cache()->startSession(record).transitional_ignore();
}
stdx::promise<void> hitRefresh;
@@ -482,7 +482,7 @@ TEST_F(LogicalSessionCacheTest, ManySessionsRefreshComboDeluxe) {
service()->add(record.getLsid());
auto record2 = newRecord();
- cache()->startSession(record2);
+ cache()->startSession(record2).transitional_ignore();
}
stdx::mutex mutex;
diff --git a/src/mongo/db/matcher/expression_algo.cpp b/src/mongo/db/matcher/expression_algo.cpp
index 12fe0224258..292bf1f590a 100644
--- a/src/mongo/db/matcher/expression_algo.cpp
+++ b/src/mongo/db/matcher/expression_algo.cpp
@@ -152,7 +152,7 @@ bool _isSubsetOf(const MatchExpression* lhs, const ComparisonMatchExpression* rh
for (BSONElement elem : ime->getEqualities()) {
// Each element in the $in-array represents an equality predicate.
EqualityMatchExpression equality;
- equality.init(lhs->path(), elem);
+ equality.init(lhs->path(), elem).transitional_ignore();
equality.setCollator(ime->getCollator());
if (!_isSubsetOf(&equality, rhs)) {
return false;
@@ -283,7 +283,7 @@ void applyRenamesToExpression(MatchExpression* expr, const StringMap<std::string
auto it = renames.find(expr->path());
if (it != renames.end()) {
LeafMatchExpression* leafExpr = checked_cast<LeafMatchExpression*>(expr);
- leafExpr->setPath(it->second);
+ leafExpr->setPath(it->second).transitional_ignore();
}
}
diff --git a/src/mongo/db/matcher/expression_array.cpp b/src/mongo/db/matcher/expression_array.cpp
index 86fbc384eae..965ab75d119 100644
--- a/src/mongo/db/matcher/expression_array.cpp
+++ b/src/mongo/db/matcher/expression_array.cpp
@@ -144,7 +144,7 @@ ElemMatchValueMatchExpression::~ElemMatchValueMatchExpression() {
}
Status ElemMatchValueMatchExpression::init(StringData path, MatchExpression* sub) {
- init(path);
+ init(path).transitional_ignore();
add(sub);
return Status::OK();
}
diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h
index 0920467c3bf..e121788a091 100644
--- a/src/mongo/db/matcher/expression_array.h
+++ b/src/mongo/db/matcher/expression_array.h
@@ -77,7 +77,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ElemMatchObjectMatchExpression> e =
stdx::make_unique<ElemMatchObjectMatchExpression>();
- e->init(path(), _sub->shallowClone().release());
+ e->init(path(), _sub->shallowClone().release()).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -122,7 +122,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ElemMatchValueMatchExpression> e =
stdx::make_unique<ElemMatchValueMatchExpression>();
- e->init(path());
+ e->init(path()).transitional_ignore();
for (size_t i = 0; i < _subs.size(); ++i) {
e->add(_subs[i]->shallowClone().release());
}
@@ -161,7 +161,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<SizeMatchExpression> e = stdx::make_unique<SizeMatchExpression>();
- e->init(path(), _size);
+ e->init(path(), _size).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
diff --git a/src/mongo/db/matcher/expression_array_test.cpp b/src/mongo/db/matcher/expression_array_test.cpp
index 6a7f36cebed..894fbbd9e49 100644
--- a/src/mongo/db/matcher/expression_array_test.cpp
+++ b/src/mongo/db/matcher/expression_array_test.cpp
@@ -305,7 +305,7 @@ TEST(AndOfElemMatch, MatchesElement) {
// and1 = { a : 1, b : 1 }
unique_ptr<ElemMatchObjectMatchExpression> elemMatch1(new ElemMatchObjectMatchExpression());
- elemMatch1->init("x", and1.release());
+ elemMatch1->init("x", and1.release()).transitional_ignore();
// elemMatch1 = { x : { $elemMatch : { a : 1, b : 1 } } }
BSONObj baseOperanda2 = BSON("a" << 2);
@@ -322,7 +322,7 @@ TEST(AndOfElemMatch, MatchesElement) {
// and2 = { a : 2, b : 2 }
unique_ptr<ElemMatchObjectMatchExpression> elemMatch2(new ElemMatchObjectMatchExpression());
- elemMatch2->init("x", and2.release());
+ elemMatch2->init("x", and2.release()).transitional_ignore();
// elemMatch2 = { x : { $elemMatch : { a : 2, b : 2 } } }
unique_ptr<AndMatchExpression> andOfEM(new AndMatchExpression());
@@ -357,7 +357,7 @@ TEST(AndOfElemMatch, Matches) {
ASSERT(lt1->init("", baseOperandlt1["$lt"]).isOK());
unique_ptr<ElemMatchValueMatchExpression> elemMatch1(new ElemMatchValueMatchExpression());
- elemMatch1->init("x");
+ elemMatch1->init("x").transitional_ignore();
elemMatch1->add(gt1.release());
elemMatch1->add(lt1.release());
// elemMatch1 = { x : { $elemMatch : { $gt : 1 , $lt : 10 } } }
@@ -371,7 +371,7 @@ TEST(AndOfElemMatch, Matches) {
ASSERT(lt2->init("", baseOperandlt2["$lt"]).isOK());
unique_ptr<ElemMatchValueMatchExpression> elemMatch2(new ElemMatchValueMatchExpression());
- elemMatch2->init("x");
+ elemMatch2->init("x").transitional_ignore();
elemMatch2->add(gt2.release());
elemMatch2->add(lt2.release());
// elemMatch2 = { x : { $elemMatch : { $gt : 101 , $lt : 110 } } }
@@ -453,9 +453,9 @@ TEST(SizeMatchExpression, Equivalent) {
SizeMatchExpression e2;
SizeMatchExpression e3;
- e1.init("a", 5);
- e2.init("a", 6);
- e3.init("v", 5);
+ e1.init("a", 5).transitional_ignore();
+ e2.init("a", 6).transitional_ignore();
+ e3.init("v", 5).transitional_ignore();
ASSERT(e1.equivalent(&e1));
ASSERT(!e1.equivalent(&e2));
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index f229eec42c2..7129c6413e0 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -391,7 +391,7 @@ bool GeoMatchExpression::equivalent(const MatchExpression* other) const {
std::unique_ptr<MatchExpression> GeoMatchExpression::shallowClone() const {
std::unique_ptr<GeoMatchExpression> next = stdx::make_unique<GeoMatchExpression>();
- next->init(path(), NULL, _rawObj);
+ next->init(path(), NULL, _rawObj).transitional_ignore();
next->_query = _query;
next->_canSkipValidation = _canSkipValidation;
if (getTag()) {
@@ -448,7 +448,7 @@ bool GeoNearMatchExpression::equivalent(const MatchExpression* other) const {
std::unique_ptr<MatchExpression> GeoNearMatchExpression::shallowClone() const {
std::unique_ptr<GeoNearMatchExpression> next = stdx::make_unique<GeoNearMatchExpression>();
- next->init(path(), NULL, _rawObj);
+ next->init(path(), NULL, _rawObj).transitional_ignore();
next->_query = _query;
if (getTag()) {
next->setTag(getTag()->clone());
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index cd987ff9fcb..4980986f9d4 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -513,7 +513,7 @@ Status InMatchExpression::init(StringData path) {
std::unique_ptr<MatchExpression> InMatchExpression::shallowClone() const {
auto next = stdx::make_unique<InMatchExpression>();
- next->init(path());
+ next->init(path()).transitional_ignore();
next->setCollator(_collator);
if (getTag()) {
next->setTag(getTag()->clone());
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index a2c62d3c344..093ebe7edad 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -144,7 +144,7 @@ public:
EqualityMatchExpression() : ComparisonMatchExpression(EQ) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<EqualityMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -158,7 +158,7 @@ public:
LTEMatchExpression() : ComparisonMatchExpression(LTE) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<LTEMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -172,7 +172,7 @@ public:
LTMatchExpression() : ComparisonMatchExpression(LT) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<LTMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -186,7 +186,7 @@ public:
GTMatchExpression() : ComparisonMatchExpression(GT) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<GTMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -200,7 +200,7 @@ public:
GTEMatchExpression() : ComparisonMatchExpression(GTE) {}
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ComparisonMatchExpression> e = stdx::make_unique<GTEMatchExpression>();
- e->init(path(), _rhs);
+ e->init(path(), _rhs).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -230,7 +230,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<RegexMatchExpression> e = stdx::make_unique<RegexMatchExpression>();
- e->init(path(), _regex, _flags);
+ e->init(path(), _regex, _flags).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -270,7 +270,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ModMatchExpression> m = stdx::make_unique<ModMatchExpression>();
- m->init(path(), _divisor, _remainder);
+ m->init(path(), _divisor, _remainder).transitional_ignore();
if (getTag()) {
m->setTag(getTag()->clone());
}
@@ -305,7 +305,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<ExistsMatchExpression> e = stdx::make_unique<ExistsMatchExpression>();
- e->init(path());
+ e->init(path()).transitional_ignore();
if (getTag()) {
e->setTag(getTag()->clone());
}
@@ -498,7 +498,7 @@ protected:
* ownership.
*/
void initClone(BitTestMatchExpression* clone) const {
- clone->init(path(), _bitPositions);
+ clone->init(path(), _bitPositions).transitional_ignore();
if (getTag()) {
clone->setTag(getTag()->clone());
}
diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp
index 0bc43c24465..7ae77f66b2a 100644
--- a/src/mongo/db/matcher/expression_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_leaf_test.cpp
@@ -89,7 +89,7 @@ TEST(EqOp, MatchesElement) {
BSONObj notMatch = BSON("a" << 6);
EqualityMatchExpression eq;
- eq.init("", operand["a"]);
+ eq.init("", operand["a"]).transitional_ignore();
ASSERT(eq.matchesSingleElement(match.firstElement()));
ASSERT(!eq.matchesSingleElement(notMatch.firstElement()));
@@ -105,7 +105,7 @@ TEST(EqOp, InvalidEooOperand) {
TEST(EqOp, MatchesScalar) {
BSONObj operand = BSON("a" << 5);
EqualityMatchExpression eq;
- eq.init("a", operand["a"]);
+ eq.init("a", operand["a"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << 5.0), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
}
@@ -113,7 +113,7 @@ TEST(EqOp, MatchesScalar) {
TEST(EqOp, MatchesArrayValue) {
BSONObj operand = BSON("a" << 5);
EqualityMatchExpression eq;
- eq.init("a", operand["a"]);
+ eq.init("a", operand["a"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL));
}
@@ -121,7 +121,7 @@ TEST(EqOp, MatchesArrayValue) {
TEST(EqOp, MatchesReferencedObjectValue) {
BSONObj operand = BSON("a.b" << 5);
EqualityMatchExpression eq;
- eq.init("a.b", operand["a.b"]);
+ eq.init("a.b", operand["a.b"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << 5)), NULL));
ASSERT(eq.matchesBSON(BSON("a" << BSON("b" << BSON_ARRAY(5))), NULL));
ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON("b" << 5))), NULL));
@@ -130,7 +130,7 @@ TEST(EqOp, MatchesReferencedObjectValue) {
TEST(EqOp, MatchesReferencedArrayValue) {
BSONObj operand = BSON("a.0" << 5);
EqualityMatchExpression eq;
- eq.init("a.0", operand["a.0"]);
+ eq.init("a.0", operand["a.0"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << BSON_ARRAY(5)), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << BSON_ARRAY(BSON_ARRAY(5))), NULL));
}
@@ -138,7 +138,7 @@ TEST(EqOp, MatchesReferencedArrayValue) {
TEST(EqOp, MatchesNull) {
BSONObj operand = BSON("a" << BSONNULL);
EqualityMatchExpression eq;
- eq.init("a", operand["a"]);
+ eq.init("a", operand["a"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSONObj(), NULL));
ASSERT(eq.matchesBSON(BSON("a" << BSONNULL), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
@@ -151,7 +151,7 @@ TEST(EqOp, MatchesNull) {
TEST(EqOp, MatchesNestedNull) {
BSONObj operand = BSON("a.b" << BSONNULL);
EqualityMatchExpression eq;
- eq.init("a.b", operand["a.b"]);
+ eq.init("a.b", operand["a.b"]).transitional_ignore();
// null matches any empty object that is on a subpath of a.b
ASSERT(eq.matchesBSON(BSONObj(), NULL));
ASSERT(eq.matchesBSON(BSON("a" << BSONObj()), NULL));
@@ -171,7 +171,7 @@ TEST(EqOp, MatchesNestedNull) {
TEST(EqOp, MatchesMinKey) {
BSONObj operand = BSON("a" << MinKey);
EqualityMatchExpression eq;
- eq.init("a", operand["a"]);
+ eq.init("a", operand["a"]).transitional_ignore();
ASSERT(eq.matchesBSON(BSON("a" << MinKey), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << MaxKey), NULL));
ASSERT(!eq.matchesBSON(BSON("a" << 4), NULL));
@@ -200,7 +200,7 @@ TEST(EqOp, MatchesFullArray) {
TEST(EqOp, MatchesThroughNestedArray) {
BSONObj operand = BSON("a.b.c.d" << 3);
EqualityMatchExpression eq;
- eq.init("a.b.c.d", operand["a.b.c.d"]);
+ eq.init("a.b.c.d", operand["a.b.c.d"]).transitional_ignore();
BSONObj obj = fromjson("{a:{b:[{c:[{d:1},{d:2}]},{c:[{d:3}]}]}}");
ASSERT(eq.matchesBSON(obj, NULL));
}
@@ -243,9 +243,9 @@ TEST(EqOp, Equality1) {
BSONObj operand = BSON("a" << 5 << "b" << 5 << "c" << 4);
- eq1.init("a", operand["a"]);
- eq2.init("a", operand["b"]);
- eq3.init("c", operand["c"]);
+ eq1.init("a", operand["a"]).transitional_ignore();
+ eq2.init("a", operand["b"]).transitional_ignore();
+ eq3.init("c", operand["c"]).transitional_ignore();
ASSERT(eq1.equivalent(&eq1));
ASSERT(eq1.equivalent(&eq2));
@@ -1239,10 +1239,10 @@ TEST(ModMatchExpression, Equality1) {
ModMatchExpression m3;
ModMatchExpression m4;
- m1.init("a", 1, 2);
- m2.init("a", 2, 2);
- m3.init("a", 1, 1);
- m4.init("b", 1, 2);
+ m1.init("a", 1, 2).transitional_ignore();
+ m2.init("a", 2, 2).transitional_ignore();
+ m3.init("a", 1, 1).transitional_ignore();
+ m4.init("b", 1, 2).transitional_ignore();
ASSERT(m1.equivalent(&m1));
ASSERT(!m1.equivalent(&m2));
@@ -1313,8 +1313,8 @@ TEST(ExistsMatchExpression, ElemMatchKey) {
TEST(ExistsMatchExpression, Equivalent) {
ExistsMatchExpression e1;
ExistsMatchExpression e2;
- e1.init("a");
- e2.init("b");
+ e1.init("a").transitional_ignore();
+ e2.init("b").transitional_ignore();
ASSERT(e1.equivalent(&e1));
ASSERT(!e1.equivalent(&e2));
@@ -1459,9 +1459,9 @@ TEST(TypeMatchExpression, Equivalent) {
TypeMatchExpression e1;
TypeMatchExpression e2;
TypeMatchExpression e3;
- e1.initWithBSONType("a", String);
- e2.initWithBSONType("a", NumberDouble);
- e3.initWithBSONType("b", String);
+ e1.initWithBSONType("a", String).transitional_ignore();
+ e2.initWithBSONType("a", NumberDouble).transitional_ignore();
+ e3.initWithBSONType("b", String).transitional_ignore();
ASSERT(e1.equivalent(&e1));
ASSERT(!e1.equivalent(&e2));
@@ -1473,14 +1473,14 @@ TEST(InMatchExpression, MatchesElementSingle) {
BSONObj match = BSON("a" << 1);
BSONObj notMatch = BSON("a" << 2);
InMatchExpression in;
- in.addEquality(operand.firstElement());
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesSingleElement(match["a"]));
ASSERT(!in.matchesSingleElement(notMatch["a"]));
}
TEST(InMatchExpression, MatchesEmpty) {
InMatchExpression in;
- in.init("a");
+ in.init("a").transitional_ignore();
BSONObj notMatch = BSON("a" << 2);
ASSERT(!in.matchesSingleElement(notMatch["a"]));
@@ -1491,10 +1491,10 @@ TEST(InMatchExpression, MatchesEmpty) {
TEST(InMatchExpression, MatchesElementMultiple) {
BSONObj operand = BSON_ARRAY(1 << "r" << true << 1);
InMatchExpression in;
- in.addEquality(operand[0]);
- in.addEquality(operand[1]);
- in.addEquality(operand[2]);
- in.addEquality(operand[3]);
+ in.addEquality(operand[0]).transitional_ignore();
+ in.addEquality(operand[1]).transitional_ignore();
+ in.addEquality(operand[2]).transitional_ignore();
+ in.addEquality(operand[3]).transitional_ignore();
BSONObj matchFirst = BSON("a" << 1);
BSONObj matchSecond = BSON("a"
@@ -1511,8 +1511,8 @@ TEST(InMatchExpression, MatchesElementMultiple) {
TEST(InMatchExpression, MatchesScalar) {
BSONObj operand = BSON_ARRAY(5);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << 5.0), NULL));
ASSERT(!in.matchesBSON(BSON("a" << 4), NULL));
@@ -1521,8 +1521,8 @@ TEST(InMatchExpression, MatchesScalar) {
TEST(InMatchExpression, MatchesArrayValue) {
BSONObj operand = BSON_ARRAY(5);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(5.0 << 6)), NULL));
ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(6 << 7)), NULL));
@@ -1533,8 +1533,8 @@ TEST(InMatchExpression, MatchesNull) {
BSONObj operand = BSON_ARRAY(BSONNULL);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSONObj(), NULL));
ASSERT(in.matchesBSON(BSON("a" << BSONNULL), NULL));
@@ -1547,7 +1547,7 @@ TEST(InMatchExpression, MatchesUndefined) {
BSONObj operand = BSON_ARRAY(BSONUndefined);
InMatchExpression in;
- in.init("a");
+ in.init("a").transitional_ignore();
Status s = in.addEquality(operand.firstElement());
ASSERT_NOT_OK(s);
}
@@ -1555,8 +1555,8 @@ TEST(InMatchExpression, MatchesUndefined) {
TEST(InMatchExpression, MatchesMinKey) {
BSONObj operand = BSON_ARRAY(MinKey);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << MinKey), NULL));
ASSERT(!in.matchesBSON(BSON("a" << MaxKey), NULL));
@@ -1566,8 +1566,8 @@ TEST(InMatchExpression, MatchesMinKey) {
TEST(InMatchExpression, MatchesMaxKey) {
BSONObj operand = BSON_ARRAY(MaxKey);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand.firstElement());
+ in.init("a").transitional_ignore();
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << MaxKey), NULL));
ASSERT(!in.matchesBSON(BSON("a" << MinKey), NULL));
@@ -1577,10 +1577,10 @@ TEST(InMatchExpression, MatchesMaxKey) {
TEST(InMatchExpression, MatchesFullArray) {
BSONObj operand = BSON_ARRAY(BSON_ARRAY(1 << 2) << 4 << 5);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand[0]);
- in.addEquality(operand[1]);
- in.addEquality(operand[2]);
+ in.init("a").transitional_ignore();
+ in.addEquality(operand[0]).transitional_ignore();
+ in.addEquality(operand[1]).transitional_ignore();
+ in.addEquality(operand[2]).transitional_ignore();
ASSERT(in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2)), NULL));
ASSERT(!in.matchesBSON(BSON("a" << BSON_ARRAY(1 << 2 << 3)), NULL));
@@ -1591,9 +1591,9 @@ TEST(InMatchExpression, MatchesFullArray) {
TEST(InMatchExpression, ElemMatchKey) {
BSONObj operand = BSON_ARRAY(5 << 2);
InMatchExpression in;
- in.init("a");
- in.addEquality(operand[0]);
- in.addEquality(operand[1]);
+ in.init("a").transitional_ignore();
+ in.addEquality(operand[0]).transitional_ignore();
+ in.addEquality(operand[1]).transitional_ignore();
MatchDetails details;
details.requestElemMatchKey();
@@ -1611,7 +1611,7 @@ TEST(InMatchExpression, InMatchExpressionsWithDifferentNumbersOfElementsAreUnequ
<< "string");
InMatchExpression eq1;
InMatchExpression eq2;
- eq1.addEquality(obj.firstElement());
+ eq1.addEquality(obj.firstElement()).transitional_ignore();
ASSERT(!eq1.equivalent(&eq2));
}
@@ -1647,8 +1647,8 @@ TEST(InMatchExpression, InMatchExpressionsWithCollationEquivalentElementsAreEqua
InMatchExpression eq2;
eq2.setCollator(&collator2);
- eq1.addEquality(obj1.firstElement());
- eq2.addEquality(obj2.firstElement());
+ eq1.addEquality(obj1.firstElement()).transitional_ignore();
+ eq2.addEquality(obj2.firstElement()).transitional_ignore();
ASSERT(eq1.equivalent(&eq2));
}
@@ -1664,8 +1664,8 @@ TEST(InMatchExpression, InMatchExpressionsWithCollationNonEquivalentElementsAreU
InMatchExpression eq2;
eq2.setCollator(&collator2);
- eq1.addEquality(obj1.firstElement());
- eq2.addEquality(obj2.firstElement());
+ eq1.addEquality(obj1.firstElement()).transitional_ignore();
+ eq2.addEquality(obj2.firstElement()).transitional_ignore();
ASSERT(!eq1.equivalent(&eq2));
}
@@ -1674,7 +1674,7 @@ TEST(InMatchExpression, StringMatchingWithNullCollatorUsesBinaryComparison) {
BSONObj notMatch = BSON("a"
<< "string2");
InMatchExpression in;
- in.addEquality(operand.firstElement());
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(!in.matchesSingleElement(notMatch["a"]));
}
@@ -1685,7 +1685,7 @@ TEST(InMatchExpression, StringMatchingRespectsCollation) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kAlwaysEqual);
InMatchExpression in;
in.setCollator(&collator);
- in.addEquality(operand.firstElement());
+ in.addEquality(operand.firstElement()).transitional_ignore();
ASSERT(in.matchesSingleElement(match["a"]));
}
@@ -1698,8 +1698,8 @@ TEST(InMatchExpression, ChangingCollationAfterAddingEqualitiesPreservesEqualitie
CollatorInterfaceMock collatorReverseString(CollatorInterfaceMock::MockType::kReverseString);
InMatchExpression in;
in.setCollator(&collatorAlwaysEqual);
- in.addEquality(obj1.firstElement());
- in.addEquality(obj2.firstElement());
+ in.addEquality(obj1.firstElement()).transitional_ignore();
+ in.addEquality(obj2.firstElement()).transitional_ignore();
ASSERT(in.getEqualities().size() == 1);
in.setCollator(&collatorReverseString);
ASSERT(in.getEqualities().size() == 2);
diff --git a/src/mongo/db/matcher/expression_parser_geo_test.cpp b/src/mongo/db/matcher/expression_parser_geo_test.cpp
index b4450c41785..63008393ad6 100644
--- a/src/mongo/db/matcher/expression_parser_geo_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_geo_test.cpp
@@ -1,5 +1,3 @@
-// expression_parser_geo_test.cpp
-
/**
* Copyright (C) 2013 10gen Inc.
*
@@ -80,7 +78,8 @@ TEST(MatchExpressionParserGeoNear, ParseNearExtraField) {
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
@@ -125,21 +124,24 @@ TEST(MatchExpressionParserGeoNear, ParseInvalidNear) {
BSONObj query = fromjson("{loc: {$near: [0,0], $maxDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$near: [0,0], $minDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$near: [0,0], $eq: 40}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
@@ -154,7 +156,8 @@ TEST(MatchExpressionParserGeoNear, ParseInvalidNear) {
"{loc: {$near: [0,0], $geoWithin: {$geometry: {type: \"Polygon\", coordinates: []}}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
@@ -208,21 +211,24 @@ TEST(MatchExpressionParserGeoNear, ParseInvalidGeoNear) {
BSONObj query = fromjson("{loc: {$geoNear: [0,0], $eq: 1}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$geoNear: [0,0], $maxDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$geoNear: [0,0], $minDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
}
@@ -262,21 +268,24 @@ TEST(MatchExpressionParserGeoNear, ParseInvalidNearSphere) {
BSONObj query = fromjson("{loc: {$nearSphere: [0,0], $maxDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$nearSphere: [0,0], $minDistance: {}}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
{
BSONObj query = fromjson("{loc: {$nearSphere: [0,0], $eq: 1}}");
const CollatorInterface* collator = nullptr;
ASSERT_THROWS(
- MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator),
+ MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator)
+ .status_with_transitional_ignore(),
UserException);
}
}
diff --git a/src/mongo/db/matcher/expression_test.cpp b/src/mongo/db/matcher/expression_test.cpp
index ed135f4d46c..9b65644042d 100644
--- a/src/mongo/db/matcher/expression_test.cpp
+++ b/src/mongo/db/matcher/expression_test.cpp
@@ -43,7 +43,7 @@ namespace mongo {
TEST(LeafMatchExpressionTest, Equal1) {
BSONObj temp = BSON("x" << 5);
EqualityMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : [5] }")));
@@ -62,7 +62,7 @@ TEST(LeafMatchExpressionTest, Comp1) {
{
LTEMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 4 }")));
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 6 }")));
@@ -71,7 +71,7 @@ TEST(LeafMatchExpressionTest, Comp1) {
{
LTMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 4 }")));
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 6 }")));
@@ -80,7 +80,7 @@ TEST(LeafMatchExpressionTest, Comp1) {
{
GTEMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 4 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 6 }")));
@@ -89,7 +89,7 @@ TEST(LeafMatchExpressionTest, Comp1) {
{
GTMatchExpression e;
- e.init("x", temp["x"]);
+ e.init("x", temp["x"]).transitional_ignore();
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 5 }")));
ASSERT_FALSE(e.matchesBSON(fromjson("{ x : 4 }")));
ASSERT_TRUE(e.matchesBSON(fromjson("{ x : 6 }")));
diff --git a/src/mongo/db/matcher/expression_tree.h b/src/mongo/db/matcher/expression_tree.h
index d444f5b3ddc..7432e5a15f8 100644
--- a/src/mongo/db/matcher/expression_tree.h
+++ b/src/mongo/db/matcher/expression_tree.h
@@ -184,7 +184,7 @@ public:
virtual std::unique_ptr<MatchExpression> shallowClone() const {
std::unique_ptr<NotMatchExpression> self = stdx::make_unique<NotMatchExpression>();
- self->init(_exp->shallowClone().release());
+ self->init(_exp->shallowClone().release()).transitional_ignore();
if (getTag()) {
self->setTag(getTag()->clone());
}
diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp
index 1cd441b9b82..d5682a61f5c 100644
--- a/src/mongo/db/matcher/path.cpp
+++ b/src/mongo/db/matcher/path.cpp
@@ -193,8 +193,10 @@ bool BSONElementIterator::subCursorHasMore() {
}
_subCursorPath.reset(new ElementPath());
- _subCursorPath->init(_arrayIterationState.restOfPath.substr(
- _arrayIterationState.nextPieceOfPath.size() + 1));
+ _subCursorPath
+ ->init(_arrayIterationState.restOfPath.substr(
+ _arrayIterationState.nextPieceOfPath.size() + 1))
+ .transitional_ignore();
_subCursorPath->setTraverseLeafArray(_path->shouldTraverseLeafArray());
// If we're here, we must be able to traverse nonleaf arrays.
@@ -270,7 +272,7 @@ bool BSONElementIterator::more() {
// The current array element is a subdocument. See if the subdocument generates
// any elements matching the remaining subpath.
_subCursorPath.reset(new ElementPath());
- _subCursorPath->init(_arrayIterationState.restOfPath);
+ _subCursorPath->init(_arrayIterationState.restOfPath).transitional_ignore();
_subCursorPath->setTraverseLeafArray(_path->shouldTraverseLeafArray());
_subCursor.reset(new BSONElementIterator(_subCursorPath.get(), eltInArray.Obj()));
@@ -295,8 +297,10 @@ bool BSONElementIterator::more() {
// The current array element is itself an array. See if the nested array
// has any elements matching the remainihng.
_subCursorPath.reset(new ElementPath());
- _subCursorPath->init(_arrayIterationState.restOfPath.substr(
- _arrayIterationState.nextPieceOfPath.size() + 1));
+ _subCursorPath
+ ->init(_arrayIterationState.restOfPath.substr(
+ _arrayIterationState.nextPieceOfPath.size() + 1))
+ .transitional_ignore();
_subCursorPath->setTraverseLeafArray(_path->shouldTraverseLeafArray());
BSONElementIterator* real = new BSONElementIterator(
_subCursorPath.get(), _arrayIterationState._current.Obj());
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index f679f71b303..0f00b00d615 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -512,18 +512,18 @@ Status addMongodOptions(moe::OptionSection* options) {
.setSources(moe::SourceYAMLConfig);
- options->addSection(general_options);
+ options->addSection(general_options).transitional_ignore();
#if defined(_WIN32)
- options->addSection(windows_scm_options);
+ options->addSection(windows_scm_options).transitional_ignore();
#endif
- options->addSection(replication_options);
- options->addSection(ms_options);
- options->addSection(rs_options);
- options->addSection(sharding_options);
+ options->addSection(replication_options).transitional_ignore();
+ options->addSection(ms_options).transitional_ignore();
+ options->addSection(rs_options).transitional_ignore();
+ options->addSection(sharding_options).transitional_ignore();
#ifdef MONGO_CONFIG_SSL
- options->addSection(ssl_options);
+ options->addSection(ssl_options).transitional_ignore();
#endif
- options->addSection(storage_options);
+ options->addSection(storage_options).transitional_ignore();
// The following are legacy options that are disallowed in the JSON config file
diff --git a/src/mongo/db/ops/modifier_add_to_set.cpp b/src/mongo/db/ops/modifier_add_to_set.cpp
index 5e1d5be480d..23825863b39 100644
--- a/src/mongo/db/ops/modifier_add_to_set.cpp
+++ b/src/mongo/db/ops/modifier_add_to_set.cpp
@@ -64,7 +64,7 @@ void deduplicate(mb::Element parent, Ordering comp, Equality equal) {
std::vector<mb::Element>::iterator next = where;
++next;
while (next != end && equal(*where, *next)) {
- next->remove();
+ next->remove().transitional_ignore();
++next;
}
where = next;
diff --git a/src/mongo/db/ops/modifier_pull.cpp b/src/mongo/db/ops/modifier_pull.cpp
index 75bfaa4aa30..70d5442a716 100644
--- a/src/mongo/db/ops/modifier_pull.cpp
+++ b/src/mongo/db/ops/modifier_pull.cpp
@@ -214,7 +214,7 @@ Status ModifierPull::apply() const {
std::vector<mb::Element>::const_iterator where = _preparedState->elementsToRemove.begin();
const std::vector<mb::Element>::const_iterator end = _preparedState->elementsToRemove.end();
for (; where != end; ++where)
- const_cast<mb::Element&>(*where).remove();
+ const_cast<mb::Element&>(*where).remove().transitional_ignore();
return Status::OK();
}
diff --git a/src/mongo/db/ops/modifier_pull_all.cpp b/src/mongo/db/ops/modifier_pull_all.cpp
index 26a3d6d24ff..0659e91ef38 100644
--- a/src/mongo/db/ops/modifier_pull_all.cpp
+++ b/src/mongo/db/ops/modifier_pull_all.cpp
@@ -212,7 +212,7 @@ Status ModifierPullAll::apply() const {
vector<mutablebson::Element>::const_iterator curr = _preparedState->elementsToRemove.begin();
const vector<mutablebson::Element>::const_iterator end = _preparedState->elementsToRemove.end();
for (; curr != end; ++curr) {
- const_cast<mutablebson::Element&>(*curr).remove();
+ const_cast<mutablebson::Element&>(*curr).remove().transitional_ignore();
}
return Status::OK();
}
diff --git a/src/mongo/db/ops/modifier_push.cpp b/src/mongo/db/ops/modifier_push.cpp
index 6be330cc98f..2e8acb4de53 100644
--- a/src/mongo/db/ops/modifier_push.cpp
+++ b/src/mongo/db/ops/modifier_push.cpp
@@ -623,7 +623,7 @@ Status ModifierPush::apply() const {
// Slice 0 means to remove all
if (_slice == 0) {
while (_preparedState->elemFound.ok() && _preparedState->elemFound.rightChild().ok()) {
- _preparedState->elemFound.rightChild().remove();
+ _preparedState->elemFound.rightChild().remove().transitional_ignore();
}
}
diff --git a/src/mongo/db/ops/modifier_set_test.cpp b/src/mongo/db/ops/modifier_set_test.cpp
index 354aae01929..45c26e495a2 100644
--- a/src/mongo/db/ops/modifier_set_test.cpp
+++ b/src/mongo/db/ops/modifier_set_test.cpp
@@ -134,7 +134,7 @@ TEST(SimpleMod, PrepareIdentityOpOnDeserializedIsNotANoOp) {
Document doc(fromjson("{a: { b: NumberInt(0)}}"));
// Apply a mutation to the document that will make it non-serialized.
- doc.root()["a"]["b"].setValueInt(2);
+ doc.root()["a"]["b"].setValueInt(2).transitional_ignore();
// Apply an op that would be a no-op.
Mod setMod(fromjson("{$set: {a: {b : NumberInt(2)}}}"));
@@ -786,9 +786,9 @@ TEST(Ephemeral, ApplySetModToEphemeralDocument) {
// $set.
Document doc;
Element x = doc.makeElementObject("x");
- doc.root().pushBack(x);
+ doc.root().pushBack(x).transitional_ignore();
Element a = doc.makeElementInt("a", 100);
- x.pushBack(a);
+ x.pushBack(a).transitional_ignore();
Mod setMod(fromjson("{ $set: { x: { a: 100, b: 2 }}}"), true);
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index f8b20c2d10c..e5154ff279d 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -218,8 +218,9 @@ bool handleError(OperationContext* opCtx,
}
if (!opCtx->getClient()->isInDirectClient()) {
- ShardingState::get(opCtx)->onStaleShardVersion(
- opCtx, wholeOp.ns, staleConfigException->getVersionReceived());
+ ShardingState::get(opCtx)
+ ->onStaleShardVersion(opCtx, wholeOp.ns, staleConfigException->getVersionReceived())
+ .transitional_ignore();
}
out->staleConfigException =
stdx::make_unique<SendStaleConfigException>(*staleConfigException);
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 1b7d19f2d2f..bf37a88955e 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -437,10 +437,10 @@ boost::intrusive_ptr<DocumentSourceMatch> DocumentSourceMatch::descendMatchOnPat
if (node->isLeaf() && node->matchType() != MatchExpression::TYPE_OPERATOR &&
node->matchType() != MatchExpression::WHERE) {
auto leafNode = static_cast<LeafMatchExpression*>(node);
- leafNode->setPath(newPath);
+ leafNode->setPath(newPath).transitional_ignore();
} else if (node->isArray()) {
auto arrayNode = static_cast<ArrayMatchingMatchExpression*>(node);
- arrayNode->setPath(newPath);
+ arrayNode->setPath(newPath).transitional_ignore();
}
});
diff --git a/src/mongo/db/prefetch.cpp b/src/mongo/db/prefetch.cpp
index f9f6beb455f..cc59d01e1d2 100644
--- a/src/mongo/db/prefetch.cpp
+++ b/src/mongo/db/prefetch.cpp
@@ -86,7 +86,7 @@ void prefetchIndexPages(OperationContext* opCtx,
return;
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex(desc);
invariant(iam);
- iam->touch(opCtx, obj);
+ iam->touch(opCtx, obj).transitional_ignore();
} catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << redact(e);
}
@@ -104,7 +104,7 @@ void prefetchIndexPages(OperationContext* opCtx,
IndexDescriptor* desc = ii.next();
IndexAccessMethod* iam = collection->getIndexCatalog()->getIndex(desc);
verify(iam);
- iam->touch(opCtx, obj);
+ iam->touch(opCtx, obj).transitional_ignore();
} catch (const DBException& e) {
LOG(2) << "ignoring exception in prefetchIndexPages(): " << redact(e);
}
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 55d1b49f5cb..ace19183ff5 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -325,7 +325,7 @@ MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
// Make a NOT to be the new root and transfer ownership of the child to it.
auto newRoot = stdx::make_unique<NotMatchExpression>();
- newRoot->init(child.release());
+ newRoot->init(child.release()).transitional_ignore();
return newRoot.release();
}
@@ -358,7 +358,7 @@ MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
// Create a new RegexMatchExpression, because 'childRe' does not have a path.
auto re = stdx::make_unique<RegexMatchExpression>();
- re->init(in->path(), childRe->getString(), childRe->getFlags());
+ re->init(in->path(), childRe->getString(), childRe->getFlags()).transitional_ignore();
if (in->getTag()) {
re->setTag(in->getTag()->clone());
}
@@ -368,7 +368,7 @@ MatchExpression* CanonicalQuery::normalizeTree(MatchExpression* root) {
// IN of 1 equality is the equality.
if (in->getEqualities().size() == 1 && in->getRegexes().empty()) {
auto eq = stdx::make_unique<EqualityMatchExpression>();
- eq->init(in->path(), *(in->getEqualities().begin()));
+ eq->init(in->path(), *(in->getEqualities().begin())).transitional_ignore();
eq->setCollator(in->getCollator());
if (in->getTag()) {
eq->setTag(in->getTag()->clone());
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index a874f1f89e3..5428386103f 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -550,7 +550,7 @@ TEST(CanonicalQueryTest, NormalizeWithInPreservesCollator) {
BSONObj obj = fromjson("{'': 'string'}");
auto inMatchExpression = stdx::make_unique<InMatchExpression>();
inMatchExpression->setCollator(&collator);
- inMatchExpression->addEquality(obj.firstElement());
+ inMatchExpression->addEquality(obj.firstElement()).transitional_ignore();
unique_ptr<MatchExpression> matchExpression(
CanonicalQuery::normalizeTree(inMatchExpression.release()));
ASSERT(matchExpression->matchType() == MatchExpression::MatchType::EQ);
diff --git a/src/mongo/db/query/planner_analysis_test.cpp b/src/mongo/db/query/planner_analysis_test.cpp
index d01f9b72dcb..783438a482c 100644
--- a/src/mongo/db/query/planner_analysis_test.cpp
+++ b/src/mongo/db/query/planner_analysis_test.cpp
@@ -180,7 +180,7 @@ TEST(QueryPlannerAnalysis, GeoSkipValidation) {
std::unique_ptr<GeoMatchExpression> exprPtr = stdx::make_unique<GeoMatchExpression>();
GeoMatchExpression* expr = exprPtr.get();
- expr->init("geometry.field", nullptr, BSONObj());
+ expr->init("geometry.field", nullptr, BSONObj()).transitional_ignore();
FetchNode* fetchNode = fetchNodePtr.get();
// Takes ownership.
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 07ba13c47e4..4e706b966ec 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -832,7 +832,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
enumParams.indices = &relevantIndices;
PlanEnumerator isp(enumParams);
- isp.init();
+ isp.init().transitional_ignore();
MatchExpression* rawTree;
while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index f09dbc6319b..a73815cbb8f 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -329,7 +329,7 @@ void BackgroundSync::_produce(OperationContext* opCtx) {
log() << "Our newest OpTime : " << lastOpTimeFetched;
log() << "Earliest OpTime available is " << syncSourceResp.earliestOpTimeSeen
<< " from " << syncSourceResp.getSyncSource();
- _replCoord->abortCatchupIfNeeded();
+ _replCoord->abortCatchupIfNeeded().transitional_ignore();
return;
}
@@ -586,7 +586,7 @@ void BackgroundSync::_runRollback(OperationContext* opCtx,
StorageInterface* storageInterface) {
if (_replCoord->getMemberState().primary()) {
warning() << "Rollback situation detected in catch-up mode. Aborting catch-up mode.";
- _replCoord->abortCatchupIfNeeded();
+ _replCoord->abortCatchupIfNeeded().transitional_ignore();
return;
}
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 230a13f43ae..cdf5e7fda84 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -442,7 +442,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
rpc::ReplSetMetadata::kNoPrimary,
-1);
BSONObjBuilder metadataBuilder;
- metadata.writeToMetadata(&metadataBuilder);
+ metadata.writeToMetadata(&metadataBuilder).transitional_ignore();
getNet()->scheduleResponse(
noi,
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 31803378b07..4fb7a7fe208 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -94,7 +94,9 @@ void CollectionClonerTest::setUp() {
const CollectionOptions& options,
const BSONObj idIndexSpec,
const std::vector<BSONObj>& secondaryIndexSpecs) {
- (_loader = new CollectionBulkLoaderMock(&collectionStats))->init(secondaryIndexSpecs);
+ (_loader = new CollectionBulkLoaderMock(&collectionStats))
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(_loader));
@@ -352,7 +354,7 @@ TEST_F(CollectionClonerTest, DoNotCreateIDIndexIfAutoIndexIdUsed) {
collNss = theNss;
collOptions = theOptions;
collIndexSpecs = theIndexSpecs;
- loader->init(theIndexSpecs);
+ loader->init(theIndexSpecs).transitional_ignore();
return std::unique_ptr<CollectionBulkLoader>(loader);
};
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index 364e737fba7..593090f9ae0 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -101,7 +101,8 @@ void DatabaseClonerTest::setUp() {
const std::vector<BSONObj>& secondaryIndexSpecs) {
const auto collInfo = &_collections[nss];
(collInfo->loader = new CollectionBulkLoaderMock(&collInfo->stats))
- ->init(secondaryIndexSpecs);
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(collInfo->loader));
diff --git a/src/mongo/db/repl/databases_cloner_test.cpp b/src/mongo/db/repl/databases_cloner_test.cpp
index 36020cea9a8..5e7ceacae2b 100644
--- a/src/mongo/db/repl/databases_cloner_test.cpp
+++ b/src/mongo/db/repl/databases_cloner_test.cpp
@@ -176,7 +176,8 @@ protected:
log() << "reusing collection during test which may cause problems, ns:" << nss;
}
(collInfo->loader = new CollectionBulkLoaderMock(&collInfo->stats))
- ->init(secondaryIndexSpecs);
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(collInfo->loader));
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
index 8bafea199b9..4dcabbcdd09 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp
@@ -132,7 +132,7 @@ TEST_F(DropPendingCollectionReaperTest,
opTime[i] = OpTime({Seconds((i + 1) * 10), 0}, 1LL);
ns[i] = NamespaceString("test", str::stream() << "coll" << i);
dpns[i] = ns[i].makeDropPendingNamespace(opTime[i]);
- _storageInterface->createCollection(opCtx.get(), dpns[i], {});
+ _storageInterface->createCollection(opCtx.get(), dpns[i], {}).transitional_ignore();
}
// Add drop-pending namespaces with drop optimes out of order and check that
diff --git a/src/mongo/db/repl/elect_cmd_runner_test.cpp b/src/mongo/db/repl/elect_cmd_runner_test.cpp
index d9872693f8e..a327208c172 100644
--- a/src/mongo/db/repl/elect_cmd_runner_test.cpp
+++ b/src/mongo/db/repl/elect_cmd_runner_test.cpp
@@ -236,7 +236,7 @@ public:
int selfConfigIndex = 0;
ReplSetConfig config;
- config.initialize(configObj);
+ config.initialize(configObj).transitional_ignore();
std::vector<HostAndPort> hosts;
for (ReplSetConfig::MemberIterator mem = ++config.membersBegin();
diff --git a/src/mongo/db/repl/freshness_checker_test.cpp b/src/mongo/db/repl/freshness_checker_test.cpp
index d9f0b9a8872..f0b86dbd0af 100644
--- a/src/mongo/db/repl/freshness_checker_test.cpp
+++ b/src/mongo/db/repl/freshness_checker_test.cpp
@@ -833,17 +833,19 @@ public:
Timestamp lastOpTimeApplied(100, 0);
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 1
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1")
- << BSON("_id" << 2 << "host"
- << "host2"))));
+ config
+ .initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2"))))
+ .transitional_ignore();
std::vector<HostAndPort> hosts;
for (ReplSetConfig::MemberIterator mem = ++config.membersBegin();
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index 6ef1ad277d2..54218230581 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -232,7 +232,7 @@ InitialSyncer::InitialSyncer(
InitialSyncer::~InitialSyncer() {
DESTRUCTOR_GUARD({
- shutdown();
+ shutdown().transitional_ignore();
join();
});
}
diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp
index 1a55a5d83dc..8dc0b62f53a 100644
--- a/src/mongo/db/repl/initial_syncer_test.cpp
+++ b/src/mongo/db/repl/initial_syncer_test.cpp
@@ -269,7 +269,8 @@ protected:
log() << "reusing collection during test which may cause problems, ns:" << nss;
}
(collInfo->loader = new CollectionBulkLoaderMock(&collInfo->stats))
- ->init(secondaryIndexSpecs);
+ ->init(secondaryIndexSpecs)
+ .transitional_ignore();
return StatusWith<std::unique_ptr<CollectionBulkLoader>>(
std::unique_ptr<CollectionBulkLoader>(collInfo->loader));
@@ -903,7 +904,7 @@ TEST_F(InitialSyncerTest, InitialSyncerRecreatesOplogAndDropsReplicatedDatabases
auto oldCreateOplogFn = _storageInterface->createOplogFn;
_storageInterface->createOplogFn = [oldCreateOplogFn](OperationContext* opCtx,
const NamespaceString& nss) {
- oldCreateOplogFn(opCtx, nss);
+ oldCreateOplogFn(opCtx, nss).transitional_ignore();
return Status(ErrorCodes::OperationFailed, "oplog creation failed");
};
@@ -1856,7 +1857,7 @@ TEST_F(InitialSyncerTest,
net->blackHole(noi);
}
- initialSyncer->shutdown();
+ initialSyncer->shutdown().transitional_ignore();
executor::NetworkInterfaceMock::InNetworkGuard(net)->runReadyNetworkOperations();
initialSyncer->join();
@@ -2086,7 +2087,7 @@ TEST_F(
OperationContext*, const NamespaceString& nss, const BSONObj& doc) {
insertDocumentNss = nss;
insertDocumentDoc = doc;
- initialSyncer->shutdown();
+ initialSyncer->shutdown().transitional_ignore();
return Status::OK();
};
@@ -3130,7 +3131,7 @@ TEST_F(
const MultiApplier::Operations& ops,
MultiApplier::ApplyOperationFn applyOperation) {
// 'OperationPtr*' is ignored by our overridden _multiInitialSyncApply().
- applyOperation(nullptr);
+ applyOperation(nullptr).transitional_ignore();
return ops.back().getOpTime();
};
bool fetchCountIncremented = false;
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index bf69d4e02bd..79e22f471c3 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1267,7 +1267,7 @@ void SnapshotThread::run() {
name = replCoord->reserveSnapshotName(nullptr);
// This establishes the view that we will name.
- _manager->prepareForCreateSnapshot(opCtx.get());
+ _manager->prepareForCreateSnapshot(opCtx.get()).transitional_ignore();
}
auto opTimeOfSnapshot = OpTime();
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 961926a1f35..c35fa20b046 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -859,7 +859,7 @@ void ReplicationCoordinatorExternalStateImpl::createSnapshot(OperationContext* o
SnapshotName name) {
auto manager = _service->getGlobalStorageEngine()->getSnapshotManager();
invariant(manager); // This should never be called if there is no SnapshotManager.
- manager->createSnapshot(opCtx, name);
+ manager->createSnapshot(opCtx, name).transitional_ignore();
}
void ReplicationCoordinatorExternalStateImpl::forceSnapshotCreation() {
@@ -959,7 +959,7 @@ void ReplicationCoordinatorExternalStateImpl::onDurable(const JournalListener::T
void ReplicationCoordinatorExternalStateImpl::startNoopWriter(OpTime opTime) {
invariant(_noopWriter);
- _noopWriter->startWritingPeriodicNoops(opTime);
+ _noopWriter->startWritingPeriodicNoops(opTime).transitional_ignore();
}
void ReplicationCoordinatorExternalStateImpl::stopNoopWriter() {
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index bb2c3e1f0da..2d3ba978e54 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -611,7 +611,7 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
// Clear maint. mode.
while (getMaintenanceMode()) {
- setMaintenanceMode(false);
+ setMaintenanceMode(false).transitional_ignore();
}
if (startCompleted) {
@@ -2208,14 +2208,16 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(
// Do not conduct an election during a reconfig, as the node may not be electable post-reconfig.
if (auto electionFinishedEvent = _cancelElectionIfNeeded_inlock()) {
// Wait for the election to complete and the node's Role to be set to follower.
- _replExecutor->onEvent(electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetReconfig,
- this,
- stdx::placeholders::_1,
- newConfig,
- isForceReconfig,
- myIndex,
- finishedEvent));
+ _replExecutor
+ ->onEvent(electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_finishReplSetReconfig,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ isForceReconfig,
+ myIndex,
+ finishedEvent))
+ .status_with_transitional_ignore();
return;
}
@@ -3028,12 +3030,12 @@ void ReplicationCoordinatorImpl::_prepareReplSetMetadata_inlock(const OpTime& la
OpTime lastVisibleOpTime =
std::max(lastOpTimeFromClient, _getCurrentCommittedSnapshotOpTime_inlock());
auto metadata = _topCoord->prepareReplSetMetadata(lastVisibleOpTime);
- metadata.writeToMetadata(builder);
+ metadata.writeToMetadata(builder).transitional_ignore();
}
void ReplicationCoordinatorImpl::_prepareOplogQueryMetadata_inlock(int rbid,
BSONObjBuilder* builder) const {
- _topCoord->prepareOplogQueryMetadata(rbid).writeToMetadata(builder);
+ _topCoord->prepareOplogQueryMetadata(rbid).writeToMetadata(builder).transitional_ignore();
}
bool ReplicationCoordinatorImpl::isV1ElectionProtocol() const {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
index 322c84246e6..91b1c5dfc15 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
@@ -142,9 +142,10 @@ void ReplicationCoordinatorImpl::_startElectSelf_inlock() {
return;
}
fassert(18681, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(
- nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this))
+ .status_with_transitional_ignore();
lossGuard.dismiss();
}
@@ -217,9 +218,10 @@ void ReplicationCoordinatorImpl::_onFreshnessCheckComplete() {
}
fassert(18685, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(
- nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onElectCmdRunnerComplete, this))
+ .status_with_transitional_ignore();
lossGuard.dismiss();
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
index 5462732a99e..f09de3c8b80 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
@@ -391,15 +391,17 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
net->enterNetwork();
ReplSetHeartbeatResponse hbResp2;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))))
+ .transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
hbResp2.setSetName("mySet");
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index d227f78b76b..394878e52ec 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -149,8 +149,10 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock() {
return;
}
fassert(28685, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term))
+ .status_with_transitional_ignore();
lossGuard.dismiss();
}
@@ -244,9 +246,10 @@ void ReplicationCoordinatorImpl::_startVoteRequester_inlock(long long newTerm) {
return;
}
fassert(28643, nextPhaseEvh.getStatus());
- _replExecutor->onEvent(
- nextPhaseEvh.getValue(),
- stdx::bind(&ReplicationCoordinatorImpl::_onVoteRequestComplete, this, newTerm));
+ _replExecutor
+ ->onEvent(nextPhaseEvh.getValue(),
+ stdx::bind(&ReplicationCoordinatorImpl::_onVoteRequestComplete, this, newTerm))
+ .status_with_transitional_ignore();
}
void ReplicationCoordinatorImpl::_onVoteRequestComplete(long long originalTerm) {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index a1c34534c3f..6d0ade99358 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -470,17 +470,19 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
net->enterNetwork();
ReplSetHeartbeatResponse hbResp2;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "protocolVersion"
- << 1));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "protocolVersion"
+ << 1))
+ .transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
hbResp2.setSetName("mySet");
@@ -759,7 +761,7 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
simulateEnoughHeartbeatsForAllNodesUp();
simulateSuccessfulDryRun();
// update to a future term before the election completes
- getReplCoord()->updateTerm(&opCtx, 1000);
+ getReplCoord()->updateTerm(&opCtx, 1000).transitional_ignore();
NetworkInterfaceMock* net = getNet();
net->enterNetwork();
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 04216de9202..379a18fb231 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -337,8 +337,12 @@ executor::TaskExecutor::EventHandle ReplicationCoordinatorImpl::_stepDownStart()
return finishEvent;
}
- _replExecutor->scheduleWork(stdx::bind(
- &ReplicationCoordinatorImpl::_stepDownFinish, this, stdx::placeholders::_1, finishEvent));
+ _replExecutor
+ ->scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_stepDownFinish,
+ this,
+ stdx::placeholders::_1,
+ finishEvent))
+ .status_with_transitional_ignore();
return finishEvent;
}
@@ -398,17 +402,21 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig_inlock(const ReplSet
<< newConfig.getConfigVersion()
<< " to be processed after election is cancelled.";
- _replExecutor->onEvent(electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
- this,
- stdx::placeholders::_1,
- newConfig));
+ _replExecutor
+ ->onEvent(electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
+ this,
+ stdx::placeholders::_1,
+ newConfig))
+ .status_with_transitional_ignore();
return;
}
- _replExecutor->scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
- this,
- stdx::placeholders::_1,
- newConfig));
+ _replExecutor
+ ->scheduleWork(stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigStore,
+ this,
+ stdx::placeholders::_1,
+ newConfig))
+ .status_with_transitional_ignore();
}
void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
@@ -490,13 +498,14 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
if (MONGO_FAIL_POINT(blockHeartbeatReconfigFinish)) {
LOG_FOR_HEARTBEATS(0) << "blockHeartbeatReconfigFinish fail point enabled. Rescheduling "
"_heartbeatReconfigFinish until fail point is disabled.";
- _replExecutor->scheduleWorkAt(
- _replExecutor->now() + Milliseconds{10},
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex));
+ _replExecutor
+ ->scheduleWorkAt(_replExecutor->now() + Milliseconds{10},
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex))
+ .status_with_transitional_ignore();
return;
}
@@ -522,12 +531,14 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
<< "Waiting for election to complete before finishing reconfig to version "
<< newConfig.getConfigVersion();
// Wait for the election to complete and the node's Role to be set to follower.
- _replExecutor->onEvent(electionFinishedEvent,
- stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
- this,
- stdx::placeholders::_1,
- newConfig,
- myIndex));
+ _replExecutor
+ ->onEvent(electionFinishedEvent,
+ stdx::bind(&ReplicationCoordinatorImpl::_heartbeatReconfigFinish,
+ this,
+ stdx::placeholders::_1,
+ newConfig,
+ myIndex))
+ .status_with_transitional_ignore();
return;
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 59eee099c93..9a00ab76f2e 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -379,7 +379,7 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
rpc::ReplSetMetadata metadata(
opTime.getTerm(), opTime, opTime, rsConfig.getConfigVersion(), unexpectedId, 1, -1);
BSONObjBuilder metadataBuilder;
- metadata.writeToMetadata(&metadataBuilder);
+ metadata.writeToMetadata(&metadataBuilder).transitional_ignore();
heartbeatResponse = makeResponseStatus(responseBuilder.obj(), metadataBuilder.obj());
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 761359552ed..e6fecc98450 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -520,15 +520,17 @@ TEST_F(
net->enterNetwork();
ReplSetHeartbeatResponse hbResp2;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))))
+ .transitional_ignore();
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
hbResp2.setSetName("mySet");
@@ -591,15 +593,17 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
const NetworkInterfaceMock::NetworkOperationIterator noi = net->getNextReadyRequest();
ReplSetHeartbeatResponse hbResp;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "version"
- << 4
- << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))))
+ .transitional_ignore();
hbResp.setConfig(config);
hbResp.setConfigVersion(4);
hbResp.setSetName("mySet");
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 2fd396c1218..896cea7bd5c 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -979,10 +979,10 @@ TEST_F(
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
// Majority satisfied but not either custom mode
- getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1);
- getReplCoord()->setLastDurableOptime_forTest(2, 1, time1);
- getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1);
- getReplCoord()->setLastDurableOptime_forTest(2, 2, time1);
+ getReplCoord()->setLastAppliedOptime_forTest(2, 1, time1).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 1, time1).transitional_ignore();
+ getReplCoord()->setLastAppliedOptime_forTest(2, 2, time1).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 2, time1).transitional_ignore();
getReplCoord()->createSnapshot(opCtx.get(), time1, SnapshotName(1));
statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern);
@@ -993,8 +993,8 @@ TEST_F(
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
// All modes satisfied
- getReplCoord()->setLastAppliedOptime_forTest(2, 3, time1);
- getReplCoord()->setLastDurableOptime_forTest(2, 3, time1);
+ getReplCoord()->setLastAppliedOptime_forTest(2, 3, time1).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 3, time1).transitional_ignore();
statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time1, majorityWriteConcern);
ASSERT_OK(statusAndDur.status);
@@ -1039,8 +1039,8 @@ TEST_F(
// multiDC satisfied but not majority or multiRack
getReplCoord()->setMyLastAppliedOpTime(time2);
getReplCoord()->setMyLastDurableOpTime(time2);
- getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2);
- getReplCoord()->setLastDurableOptime_forTest(2, 3, time2);
+ getReplCoord()->setLastAppliedOptime_forTest(2, 3, time2).transitional_ignore();
+ getReplCoord()->setLastDurableOptime_forTest(2, 3, time2).transitional_ignore();
statusAndDur = getReplCoord()->awaitReplication(opCtx.get(), time2, majorityWriteConcern);
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed, statusAndDur.status);
@@ -2333,7 +2333,7 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
// Step down from primary.
- getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1);
+ getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1).transitional_ignore();
ASSERT_OK(getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Seconds(1)));
status = getReplCoord()->setMaintenanceMode(false);
@@ -2369,8 +2369,10 @@ TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection)
// Step down from primary.
- getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1);
- getReplCoord()->waitForMemberState(MemberState::RS_SECONDARY, Milliseconds(10 * 1000));
+ getReplCoord()->updateTerm(opCtx.get(), getReplCoord()->getTerm() + 1).transitional_ignore();
+ getReplCoord()
+ ->waitForMemberState(MemberState::RS_SECONDARY, Milliseconds(10 * 1000))
+ .transitional_ignore();
// Can't modify maintenance mode when running for election (before and after dry run).
ASSERT_EQUALS(TopologyCoordinator::Role::follower, getTopoCoord().getRole());
@@ -3863,7 +3865,7 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeIsNewer
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
OpTime time(Timestamp(10, 0), 1);
@@ -3905,7 +3907,7 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
// higher term, should change
@@ -3984,7 +3986,7 @@ TEST_F(ReplCoordTest,
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
auto replCoord = getReplCoord();
@@ -4110,7 +4112,7 @@ TEST_F(ReplCoordTest, TermAndLastCommittedOpTimeUpdatedFromHeartbeatWhenArbiter)
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
auto opCtx = makeOperationContext();
- getReplCoord()->updateTerm(opCtx.get(), 1);
+ getReplCoord()->updateTerm(opCtx.get(), 1).transitional_ignore();
ASSERT_EQUALS(1, getReplCoord()->getTerm());
auto replCoord = getReplCoord();
@@ -4330,17 +4332,19 @@ TEST_F(ReplCoordTest,
// Respond to node1's heartbeat command with a config that excludes node1.
ReplSetHeartbeatResponse hbResp;
ReplSetConfig config;
- config.initialize(BSON("_id"
- << "mySet"
- << "protocolVersion"
- << 1
- << "version"
- << 3
- << "members"
- << BSON_ARRAY(BSON("host"
- << "node2:12345"
- << "_id"
- << 1))));
+ config
+ .initialize(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1))))
+ .transitional_ignore();
hbResp.setConfig(config);
hbResp.setConfigVersion(3);
hbResp.setSetName("mySet");
@@ -4735,9 +4739,11 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
memberIds.insert(memberId);
OpTime appliedOpTime;
OpTime durableOpTime;
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, appliedOpTime);
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, durableOpTime);
}
ASSERT_EQUALS(2U, memberIds.size());
@@ -4751,7 +4757,8 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
long long memberId = entry[OldUpdatePositionArgs::kMemberIdFieldName].Number();
memberIds2.insert(memberId);
OpTime entryOpTime;
- bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime);
+ bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, entryOpTime);
}
ASSERT_EQUALS(2U, memberIds2.size());
@@ -4779,9 +4786,11 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
memberIds3.insert(memberId);
OpTime appliedOpTime;
OpTime durableOpTime;
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kAppliedOpTimeFieldName, &appliedOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, appliedOpTime);
- bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime);
+ bsonExtractOpTimeField(entry, UpdatePositionArgs::kDurableOpTimeFieldName, &durableOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, durableOpTime);
}
ASSERT_EQUALS(1U, memberIds3.size());
@@ -4795,7 +4804,8 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
long long memberId = entry[OldUpdatePositionArgs::kMemberIdFieldName].Number();
memberIds4.insert(memberId);
OpTime entryOpTime;
- bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime);
+ bsonExtractOpTimeField(entry, OldUpdatePositionArgs::kOpTimeFieldName, &entryOpTime)
+ .transitional_ignore();
ASSERT_EQUALS(optime, entryOpTime);
}
ASSERT_EQUALS(1U, memberIds4.size());
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index 64b7dd27e12..b22c180bb7c 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -110,7 +110,7 @@ Reporter::Reporter(executor::TaskExecutor* executor,
}
Reporter::~Reporter() {
- DESTRUCTOR_GUARD(shutdown(); join(););
+ DESTRUCTOR_GUARD(shutdown(); join().transitional_ignore(););
}
std::string Reporter::toString() const {
diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp
index cd77a41ba95..d1e794e015d 100644
--- a/src/mongo/db/repl/rollback_test_fixture.cpp
+++ b/src/mongo/db/repl/rollback_test_fixture.cpp
@@ -82,7 +82,7 @@ void RollbackTest::setUp() {
_opCtx = cc().makeOperationContext();
_replicationProcess->getConsistencyMarkers()->setAppliedThrough(_opCtx.get(), OpTime{});
_replicationProcess->getConsistencyMarkers()->setMinValid(_opCtx.get(), OpTime{});
- _replicationProcess->initializeRollbackID(_opCtx.get());
+ _replicationProcess->initializeRollbackID(_opCtx.get()).transitional_ignore();
_threadPoolExecutorTest.launchExecutorThread();
}
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index 6e3615ceee0..048aa72ab6d 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -188,7 +188,8 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdThrows) {
RollbackSourceLocal(stdx::make_unique<OplogInterfaceMock>()),
{},
_coordinator,
- _replicationProcess.get()),
+ _replicationProcess.get())
+ .transitional_ignore(),
UserException,
ErrorCodes::UnknownError);
}
@@ -211,7 +212,8 @@ TEST_F(RSRollbackTest, RemoteGetRollbackIdDiffersFromRequiredRBID) {
RollbackSourceLocal(stdx::make_unique<OplogInterfaceMock>()),
1,
_coordinator,
- _replicationProcess.get()),
+ _replicationProcess.get())
+ .transitional_ignore(),
UserException,
ErrorCodes::Error(40362));
}
@@ -243,7 +245,7 @@ Collection* _createCollection(OperationContext* opCtx,
mongo::WriteUnitOfWork wuow(opCtx);
auto db = dbHolder().openDb(opCtx, nss.db());
ASSERT_TRUE(db);
- db->dropCollection(opCtx, nss.ns());
+ db->dropCollection(opCtx, nss.ns()).transitional_ignore();
auto coll = db->createCollection(opCtx, nss.ns(), options);
ASSERT_TRUE(coll);
wuow.commit();
@@ -899,7 +901,8 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommandFailsIfRBIDChangesWhileSynci
rollbackSource,
0,
_coordinator,
- _replicationProcess.get()),
+ _replicationProcess.get())
+ .transitional_ignore(),
DBException,
40365);
ASSERT(rollbackSource.copyCollectionCalled);
@@ -1160,7 +1163,8 @@ TEST(RSRollbackTest, LocalEntryWithoutNsIsFatal) {
const auto validOplogEntry = fromjson("{op: 'i', ns: 'test.t', o: {_id:1, a: 1}}");
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry));
- ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("ns")),
+ ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("ns"))
+ .transitional_ignore(),
RSFatalException);
}
@@ -1168,7 +1172,8 @@ TEST(RSRollbackTest, LocalEntryWithoutOIsFatal) {
const auto validOplogEntry = fromjson("{op: 'i', ns: 'test.t', o: {_id:1, a: 1}}");
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry));
- ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o")),
+ ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o"))
+ .transitional_ignore(),
RSFatalException);
}
@@ -1177,7 +1182,8 @@ TEST(RSRollbackTest, LocalEntryWithoutO2IsFatal) {
fromjson("{op: 'u', ns: 'test.t', o2: {_id: 1}, o: {_id:1, a: 1}}");
FixUpInfo fui;
ASSERT_OK(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry));
- ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o2")),
+ ASSERT_THROWS(updateFixUpInfoFromLocalOplogEntry(fui, validOplogEntry.removeField("o2"))
+ .transitional_ignore(),
RSFatalException);
}
diff --git a/src/mongo/db/repl/scatter_gather_test.cpp b/src/mongo/db/repl/scatter_gather_test.cpp
index 294b2c84ed7..3f4d8d4d5cd 100644
--- a/src/mongo/db/repl/scatter_gather_test.cpp
+++ b/src/mongo/db/repl/scatter_gather_test.cpp
@@ -157,7 +157,9 @@ TEST_F(ScatterGatherTest, DeleteAlgorithmAfterItHasCompleted) {
ScatterGatherRunner* sgr = new ScatterGatherRunner(sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr->start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
ASSERT_OK(status.getStatus());
ASSERT_FALSE(ranCompletion);
@@ -245,7 +247,9 @@ TEST_F(ScatterGatherTest, ShutdownExecutorAfterStart) {
ScatterGatherRunner sgr(&sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr.start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
shutdownExecutorThread();
sga.finish();
ASSERT_FALSE(ranCompletion);
@@ -258,7 +262,9 @@ TEST_F(ScatterGatherTest, DoNotProcessMoreThanSufficientResponses) {
ScatterGatherRunner sgr(&sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr.start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
ASSERT_OK(status.getStatus());
ASSERT_FALSE(ranCompletion);
@@ -300,7 +306,9 @@ TEST_F(ScatterGatherTest, DoNotCreateCallbacksIfHasSufficientResponsesReturnsTru
ScatterGatherRunner sgr(&sga, &getExecutor());
bool ranCompletion = false;
StatusWith<executor::TaskExecutor::EventHandle> status = sgr.start();
- getExecutor().onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion));
+ getExecutor()
+ .onEvent(status.getValue(), getOnCompletionTestFunction(&ranCompletion))
+ .status_with_transitional_ignore();
ASSERT_OK(status.getStatus());
// Wait until callback finishes.
NetworkInterfaceMock* net = getNet();
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index c01a1badff2..57eb9d4d262 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -133,7 +133,7 @@ int64_t getIndexKeyCount(OperationContext* opCtx, IndexCatalog* cat, IndexDescri
auto idx = cat->getIndex(desc);
int64_t numKeys;
ValidateResults fullRes;
- idx->validate(opCtx, &numKeys, &fullRes);
+ idx->validate(opCtx, &numKeys, &fullRes).transitional_ignore();
return numKeys;
}
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index 1218ffa2cc2..77579cc24ad 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -256,12 +256,13 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
_finishCallback(Status(ErrorCodes::CallbackCanceled,
str::stream()
<< "sync source resolver shut down while probing candidate: "
- << candidate));
+ << candidate))
+ .transitional_ignore();
return;
}
if (ErrorCodes::CallbackCanceled == queryResult.getStatus()) {
- _finishCallback(queryResult.getStatus());
+ _finishCallback(queryResult.getStatus()).transitional_ignore();
return;
}
@@ -272,14 +273,14 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
<< "' for " << kFetcherErrorBlacklistDuration << " until: " << until;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
const auto& queryResponse = queryResult.getValue();
const auto remoteEarliestOpTime = _parseRemoteEarliestOpTime(candidate, queryResponse);
if (remoteEarliestOpTime.isNull()) {
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -306,7 +307,7 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
earliestOpTimeSeen = remoteEarliestOpTime;
}
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -323,7 +324,7 @@ void SyncSourceResolver::_scheduleRBIDRequest(HostAndPort candidate, OpTime earl
stdx::placeholders::_1));
if (!handle.isOK()) {
- _finishCallback(handle.getStatus());
+ _finishCallback(handle.getStatus()).transitional_ignore();
return;
}
@@ -339,7 +340,7 @@ void SyncSourceResolver::_rbidRequestCallback(
OpTime earliestOpTimeSeen,
const executor::TaskExecutor::RemoteCommandCallbackArgs& rbidReply) {
if (rbidReply.response.status == ErrorCodes::CallbackCanceled) {
- _finishCallback(rbidReply.response.status);
+ _finishCallback(rbidReply.response.status).transitional_ignore();
return;
}
@@ -352,7 +353,7 @@ void SyncSourceResolver::_rbidRequestCallback(
log() << "Blacklisting " << candidate << " due to error: '" << ex << "' for "
<< kFetcherErrorBlacklistDuration << " until: " << until;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -361,11 +362,11 @@ void SyncSourceResolver::_rbidRequestCallback(
// Unittest requires that this kind of failure be handled specially.
auto status = _scheduleFetcher(_makeRequiredOpTimeFetcher(candidate, earliestOpTimeSeen));
if (!status.isOK()) {
- _finishCallback(status);
+ _finishCallback(status).transitional_ignore();
}
return;
}
- _finishCallback(candidate);
+ _finishCallback(candidate).transitional_ignore();
}
Status SyncSourceResolver::_compareRequiredOpTimeWithQueryResponse(
@@ -405,12 +406,13 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
"required optime "
<< _requiredOpTime.toString()
<< " in candidate's oplog: "
- << candidate));
+ << candidate))
+ .transitional_ignore();
return;
}
if (ErrorCodes::CallbackCanceled == queryResult.getStatus()) {
- _finishCallback(queryResult.getStatus());
+ _finishCallback(queryResult.getStatus()).transitional_ignore();
return;
}
@@ -422,7 +424,7 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
<< " until: " << until << ". required optime: " << _requiredOpTime;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
@@ -439,11 +441,11 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
<< " until: " << until;
_syncSourceSelector->blacklistSyncSource(candidate, until);
- _chooseAndProbeNextSyncSource(earliestOpTimeSeen);
+ _chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
return;
}
- _finishCallback(candidate);
+ _finishCallback(candidate).transitional_ignore();
}
Status SyncSourceResolver::_chooseAndProbeNextSyncSource(OpTime earliestOpTimeSeen) {
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 7492bb4bb40..9dbed691425 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -1967,7 +1967,9 @@ StatusWith<BSONObj> TopologyCoordinatorImpl::prepareReplSetUpdatePositionCommand
// Add metadata to command. Old style parsing logic will reject the metadata.
if (commandStyle == ReplicationCoordinator::ReplSetUpdatePositionCommandStyle::kNewStyle) {
- prepareReplSetMetadata(currentCommittedSnapshotOpTime).writeToMetadata(&cmdBuilder);
+ prepareReplSetMetadata(currentCommittedSnapshotOpTime)
+ .writeToMetadata(&cmdBuilder)
+ .transitional_ignore();
}
return cmdBuilder.obj();
}
diff --git a/src/mongo/db/repl/topology_coordinator_impl_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
index b5af086b797..ec66ca1eb5f 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
@@ -2585,19 +2585,21 @@ TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataSameConfig) {
// construct a copy of the original config for log message checking later
// see HeartbeatResponseTest for the origin of the original config
ReplSetConfig originalConfig;
- originalConfig.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 5
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017")
- << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)));
+ originalConfig
+ .initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)))
+ .transitional_ignore();
ReplSetHeartbeatResponse sameConfigResponse;
sameConfigResponse.noteReplSet();
@@ -4757,7 +4759,8 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4791,7 +4794,8 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -5081,7 +5085,8 @@ TEST_F(HeartbeatResponseTest, ReconfigBetweenHeartbeatRequestAndRepsonse) {
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -5124,7 +5129,8 @@ TEST_F(HeartbeatResponseTest, ReconfigNodeRemovedBetweenHeartbeatRequestAndRepso
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -5561,7 +5567,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5569,17 +5576,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -5617,7 +5625,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5626,19 +5635,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -5674,7 +5684,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5683,19 +5694,20 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -5730,7 +5742,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5764,7 +5777,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5810,7 +5824,8 @@ TEST_F(TopoCoordTest, ArbiterDoesNotGrantVoteWhenItCanSeeAHealthyPrimaryOfEqualO
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5849,7 +5864,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5885,7 +5901,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
@@ -5918,17 +5935,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -5949,7 +5967,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -5977,17 +5996,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6008,7 +6028,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -6036,17 +6057,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6066,7 +6088,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -6094,17 +6117,18 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6125,7 +6149,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -6153,17 +6178,18 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -6184,7 +6210,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
diff --git a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
index 80fc9c3bf52..f4e43bf3e0b 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
@@ -2044,7 +2044,8 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2078,7 +2079,8 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
<< "priority"
- << 0))));
+ << 0))))
+ .transitional_ignore();
getTopoCoord().updateConfig(cfg, 0, now()++);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2442,7 +2444,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2450,17 +2453,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 1LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -2498,7 +2502,8 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2507,19 +2512,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << true
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -2528,19 +2534,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// real request fine
ReplSetRequestVotesArgs args3;
- args3.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args3
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response3;
getTopoCoord().processReplSetRequestVotes(args3, &response3);
@@ -2549,19 +2556,20 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// dry post real, fails
ReplSetRequestVotesArgs args4;
- args4.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args4
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response4;
getTopoCoord().processReplSetRequestVotes(args4, &response4);
@@ -2598,7 +2606,8 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2607,19 +2616,20 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "dryRun"
- << false
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2);
@@ -2654,7 +2664,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2688,7 +2699,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2726,7 +2738,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2762,7 +2775,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
@@ -2795,17 +2809,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -2826,7 +2841,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2854,17 +2870,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -2885,7 +2902,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
<< "configVersion"
<< 0LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2913,17 +2931,18 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -2943,7 +2962,8 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -2971,17 +2991,18 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -3002,7 +3023,8 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().processReplSetRequestVotes(args, &response);
@@ -3030,17 +3052,18 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(
- BSON("replSetRequestVotes" << 1 << "setName"
- << "rs0"
- << "term"
- << 1LL
- << "candidateIndex"
- << 0LL
- << "configVersion"
- << 1LL
- << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote
+ .initialize(BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse responseForRealVote;
getTopoCoord().processReplSetRequestVotes(argsForRealVote, &responseForRealVote);
@@ -3061,7 +3084,8 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
<< "configVersion"
<< 1LL
<< "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)))
+ .transitional_ignore();
ReplSetRequestVotesResponse response;
getTopoCoord().getMyMemberData()->setLastAppliedOpTime({Timestamp(20, 0), 0}, Date_t());
@@ -3552,7 +3576,8 @@ TEST_F(HeartbeatResponseTestV1, ReconfigNodeRemovedBetweenHeartbeatRequestAndRep
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -3598,7 +3623,8 @@ TEST_F(HeartbeatResponseTestV1, ReconfigBetweenHeartbeatRequestAndRepsonse) {
0);
ReplSetHeartbeatResponse hb;
- hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0);
+ hb.initialize(BSON("ok" << 1 << "v" << 1 << "state" << MemberState::RS_PRIMARY), 0)
+ .transitional_ignore();
hb.setDurableOpTime(lastOpTimeApplied);
hb.setElectionTime(election.getTimestamp());
StatusWith<ReplSetHeartbeatResponse> hbResponse = StatusWith<ReplSetHeartbeatResponse>(hb);
@@ -3900,7 +3926,7 @@ TEST_F(HeartbeatResponseTestV1,
// Freeze node to set stepdown wait.
BSONObjBuilder response;
- getTopoCoord().prepareFreezeResponse(now()++, 20, &response);
+ getTopoCoord().prepareFreezeResponse(now()++, 20, &response).status_with_transitional_ignore();
nextAction = receiveDownHeartbeat(HostAndPort("host2"), "rs0");
ASSERT_EQUALS(-1, getCurrentPrimaryIndex());
@@ -4659,21 +4685,23 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
// construct a copy of the original config for log message checking later
// see HeartbeatResponseTest for the origin of the original config
ReplSetConfig originalConfig;
- originalConfig.initialize(BSON("_id"
- << "rs0"
- << "version"
- << 5
- << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017")
- << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion"
- << 1
- << "settings"
- << BSON("heartbeatTimeoutSecs" << 5)));
+ originalConfig
+ .initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)))
+ .transitional_ignore();
ReplSetHeartbeatResponse sameConfigResponse;
sameConfigResponse.noteReplSet();
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index d23c1220874..b8ab1945627 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -289,7 +289,7 @@ Status Balancer::moveSingleChunk(OperationContext* opCtx,
void Balancer::report(OperationContext* opCtx, BSONObjBuilder* builder) {
auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
- balancerConfig->refreshAndCheck(opCtx);
+ balancerConfig->refreshAndCheck(opCtx).transitional_ignore();
const auto mode = balancerConfig->getBalancerMode();
@@ -389,7 +389,8 @@ void Balancer::_mainThread() {
_balancedLastTime);
shardingContext->catalogClient(opCtx.get())
- ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON());
+ ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON())
+ .transitional_ignore();
}
LOG(1) << "*** End of balancing round";
@@ -408,7 +409,8 @@ void Balancer::_mainThread() {
roundDetails.setFailed(e.what());
shardingContext->catalogClient(opCtx.get())
- ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON());
+ ->logAction(opCtx.get(), "balancer.round", "", roundDetails.toBSON())
+ .transitional_ignore();
// Sleep a fair amount before retrying because of the error
_endRound(opCtx.get(), kBalanceRoundDefaultInterval);
diff --git a/src/mongo/db/s/balancer/migration_manager.cpp b/src/mongo/db/s/balancer/migration_manager.cpp
index eb47bb227c8..a8108a1f540 100644
--- a/src/mongo/db/s/balancer/migration_manager.cpp
+++ b/src/mongo/db/s/balancer/migration_manager.cpp
@@ -80,7 +80,8 @@ Status extractMigrationStatusFromCommandResponse(const BSONObj& commandResponse)
if (!commandStatus.isOK()) {
bool chunkTooBig = false;
- bsonExtractBooleanFieldWithDefault(commandResponse, kChunkTooBig, false, &chunkTooBig);
+ bsonExtractBooleanFieldWithDefault(commandResponse, kChunkTooBig, false, &chunkTooBig)
+ .transitional_ignore();
if (chunkTooBig) {
commandStatus = {ErrorCodes::ChunkTooBig, commandStatus.reason()};
}
@@ -594,8 +595,9 @@ void MigrationManager::_abandonActiveMigrationsAndEnableManager(OperationContext
// Clear the config.migrations collection so that those chunks can be scheduled for migration
// again.
- catalogClient->removeConfigDocuments(
- opCtx, MigrationType::ConfigNS, BSONObj(), kMajorityWriteConcern);
+ catalogClient
+ ->removeConfigDocuments(opCtx, MigrationType::ConfigNS, BSONObj(), kMajorityWriteConcern)
+ .transitional_ignore();
_state = State::kEnabled;
_condVar.notify_all();
diff --git a/src/mongo/db/s/collection_range_deleter.cpp b/src/mongo/db/s/collection_range_deleter.cpp
index fb2197b8d0c..a567a8a171a 100644
--- a/src/mongo/db/s/collection_range_deleter.cpp
+++ b/src/mongo/db/s/collection_range_deleter.cpp
@@ -279,7 +279,7 @@ StatusWith<int> CollectionRangeDeleter::_doDeletion(OperationContext* opCtx,
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
WriteUnitOfWork wuow(opCtx);
if (saver) {
- saver->goingToDelete(obj);
+ saver->goingToDelete(obj).transitional_ignore();
}
collection->deleteDocument(opCtx, rloc, nullptr, true);
wuow.commit();
diff --git a/src/mongo/db/s/collection_range_deleter_test.cpp b/src/mongo/db/s/collection_range_deleter_test.cpp
index c30412ceecc..f2e4af1a090 100644
--- a/src/mongo/db/s/collection_range_deleter_test.cpp
+++ b/src/mongo/db/s/collection_range_deleter_test.cpp
@@ -111,7 +111,8 @@ void CollectionRangeDeleterTest::setUp() {
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
ShardingMongodTestFixture::setUp();
replicationCoordinator()->alwaysAllowWrites(true);
- initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost));
+ initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost))
+ .transitional_ignore();
// RemoteCommandTargeterMock::get(shardRegistry()->getConfigShard()->getTargeter())
// ->setConnectionStringReturnValue(kConfigConnStr);
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index de6cfcabd96..68a4797a6e5 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -360,16 +360,18 @@ void MetadataManager::append(BSONObjBuilder* builder) {
void MetadataManager::_scheduleCleanup(executor::TaskExecutor* executor,
NamespaceString nss,
CollectionRangeDeleter::Action action) {
- executor->scheduleWork([executor, nss, action](auto&) {
- const int maxToDelete = std::max(int(internalQueryExecYieldIterations.load()), 1);
- Client::initThreadIfNotAlready("Collection Range Deleter");
- auto UniqueOpCtx = Client::getCurrent()->makeOperationContext();
- auto opCtx = UniqueOpCtx.get();
- auto next = CollectionRangeDeleter::cleanUpNextRange(opCtx, nss, action, maxToDelete);
- if (next != CollectionRangeDeleter::Action::kFinished) {
- _scheduleCleanup(executor, nss, next);
- }
- });
+ executor
+ ->scheduleWork([executor, nss, action](auto&) {
+ const int maxToDelete = std::max(int(internalQueryExecYieldIterations.load()), 1);
+ Client::initThreadIfNotAlready("Collection Range Deleter");
+ auto UniqueOpCtx = Client::getCurrent()->makeOperationContext();
+ auto opCtx = UniqueOpCtx.get();
+ auto next = CollectionRangeDeleter::cleanUpNextRange(opCtx, nss, action, maxToDelete);
+ if (next != CollectionRangeDeleter::Action::kFinished) {
+ _scheduleCleanup(executor, nss, next);
+ }
+ })
+ .status_with_transitional_ignore();
}
auto MetadataManager::_pushRangeToClean(ChunkRange const& range) -> CleanupNotification {
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 5ba653ac731..a17eed1c6b4 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -81,7 +81,8 @@ protected:
void setUp() override {
ShardingMongodTestFixture::setUp();
serverGlobalParams.clusterRole = ClusterRole::ShardServer;
- initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost));
+ initializeGlobalShardingStateForMongodForTest(ConnectionString(dummyHost))
+ .transitional_ignore();
configTargeter()->setFindHostReturnValue(dummyHost);
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 46bfc059de2..50c05a7f763 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -327,7 +327,8 @@ void MigrationChunkClonerSourceLegacy::cancelClone(OperationContext* opCtx) {
case kDone:
break;
case kCloning:
- _callRecipient(createRequestWithSessionId(kRecvChunkAbort, _args.getNss(), _sessionId));
+ _callRecipient(createRequestWithSessionId(kRecvChunkAbort, _args.getNss(), _sessionId))
+ .status_with_transitional_ignore();
// Intentional fall through
case kNew:
_cleanup(opCtx);
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 12cf31ef2dd..0ca6d2643e2 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -892,7 +892,7 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx,
}
if (serverGlobalParams.moveParanoia) {
- rs.goingToDelete(fullObj);
+ rs.goingToDelete(fullObj).transitional_ignore();
}
deleteObjects(opCtx,
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 2b6038748e7..da1b18008cf 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -164,15 +164,16 @@ Status MigrationSourceManager::startClone(OperationContext* opCtx) {
invariant(_state == kCreated);
auto scopedGuard = MakeGuard([&] { cleanupOnError(opCtx); });
- grid.catalogClient(opCtx)->logChange(
- opCtx,
- "moveChunk.start",
- getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(opCtx)
+ ->logChange(opCtx,
+ "moveChunk.start",
+ getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
_cloneDriver = stdx::make_unique<MigrationChunkClonerSourceLegacy>(
_args, _collectionMetadata->getKeyPattern(), _donorConnStr, _recipientHost);
@@ -434,15 +435,16 @@ Status MigrationSourceManager::commitChunkMetadataOnConfig(OperationContext* opC
scopedGuard.Dismiss();
_cleanup(opCtx);
- grid.catalogClient(opCtx)->logChange(
- opCtx,
- "moveChunk.commit",
- getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(opCtx)
+ ->logChange(opCtx,
+ "moveChunk.commit",
+ getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
return Status::OK();
}
@@ -452,15 +454,16 @@ void MigrationSourceManager::cleanupOnError(OperationContext* opCtx) {
return;
}
- grid.catalogClient(opCtx)->logChange(
- opCtx,
- "moveChunk.error",
- getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId()
- << "to"
- << _args.getToShardId()),
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(opCtx)
+ ->logChange(opCtx,
+ "moveChunk.error",
+ getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
_cleanup(opCtx);
}
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 67edd9bb5af..b457f6d5df6 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -231,7 +231,8 @@ private:
auto range = ChunkRange(moveChunkRequest.getMinKey(), moveChunkRequest.getMaxKey());
if (moveChunkRequest.getWaitForDelete()) {
CollectionShardingState::waitForClean(
- opCtx, moveChunkRequest.getNss(), moveChunkRequest.getVersionEpoch(), range);
+ opCtx, moveChunkRequest.getNss(), moveChunkRequest.getVersionEpoch(), range)
+ .transitional_ignore();
// Ensure that wait for write concern for the chunk cleanup will include
// the deletes performed by the range deleter thread.
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp
index 89c305cda43..32b1a82b3c3 100644
--- a/src/mongo/db/s/move_timing_helper.cpp
+++ b/src/mongo/db/s/move_timing_helper.cpp
@@ -82,11 +82,13 @@ MoveTimingHelper::~MoveTimingHelper() {
_b.append("errmsg", *_cmdErrmsg);
}
- grid.catalogClient(_opCtx)->logChange(_opCtx,
- str::stream() << "moveChunk." << _where,
- _ns,
- _b.obj(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ grid.catalogClient(_opCtx)
+ ->logChange(_opCtx,
+ str::stream() << "moveChunk." << _where,
+ _ns,
+ _b.obj(),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
} catch (const std::exception& e) {
warning() << "couldn't record timing for moveChunk '" << _where
<< "': " << redact(e.what());
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index 5f322d945ee..35f9c4e04a6 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -233,7 +233,8 @@ Status ShardingStateRecovery::startMetadataOp(OperationContext* opCtx) {
// Couldn't wait for the replication to complete, but the local write was performed. Clear
// it up fast (without any waiting for journal or replication) and still treat it as
// failure.
- modifyRecoveryDocument(opCtx, RecoveryDocument::Decrement, WriteConcernOptions());
+ modifyRecoveryDocument(opCtx, RecoveryDocument::Decrement, WriteConcernOptions())
+ .transitional_ignore();
}
return upsertStatus;
diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp
index c66fdf098ab..3d968ebabd1 100644
--- a/src/mongo/db/server_options_helpers.cpp
+++ b/src/mongo/db/server_options_helpers.cpp
@@ -653,7 +653,7 @@ Status canonicalizeServerOptions(moe::Environment* params) {
if (params->count("verbose")) {
std::string verbosity;
- params->get("verbose", &verbosity);
+ params->get("verbose", &verbosity).transitional_ignore();
if (s == verbosity ||
// Treat a verbosity of "true" the same as a single "v". See SERVER-11471.
(s == "v" && verbosity == "true")) {
diff --git a/src/mongo/db/server_parameters_test.cpp b/src/mongo/db/server_parameters_test.cpp
index 8ce7c9fe141..83fdb3e93b5 100644
--- a/src/mongo/db/server_parameters_test.cpp
+++ b/src/mongo/db/server_parameters_test.cpp
@@ -45,13 +45,13 @@ TEST(ServerParameters, Simple1) {
ExportedServerParameter<int, ServerParameterType::kStartupAndRuntime> ff(NULL, "ff", &f);
ASSERT_EQUALS("ff", ff.name());
- ff.set(6);
+ ff.set(6).transitional_ignore();
ASSERT_EQUALS(6, f.load());
- ff.set(BSON("x" << 7).firstElement());
+ ff.set(BSON("x" << 7).firstElement()).transitional_ignore();
ASSERT_EQUALS(7, f.load());
- ff.setFromString("8");
+ ff.setFromString("8").transitional_ignore();
ASSERT_EQUALS(8, f.load());
}
@@ -63,7 +63,7 @@ TEST(ServerParameters, Vector1) {
BSONObj x = BSON("x" << BSON_ARRAY("a"
<< "b"
<< "c"));
- vv.set(x.firstElement());
+ vv.set(x.firstElement()).transitional_ignore();
ASSERT_EQUALS(3U, v.size());
ASSERT_EQUALS("a", v[0]);
@@ -79,7 +79,7 @@ TEST(ServerParameters, Vector1) {
ASSERT(x.firstElement().woCompare(y.firstElement(), false) == 0);
- vv.setFromString("d,e");
+ vv.setFromString("d,e").transitional_ignore();
ASSERT_EQUALS(2U, v.size());
ASSERT_EQUALS("d", v[0]);
ASSERT_EQUALS("e", v[1]);
diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp
index 76234bc3b4b..1ad67a9f275 100644
--- a/src/mongo/db/service_entry_point_mongod.cpp
+++ b/src/mongo/db/service_entry_point_mongod.cpp
@@ -225,7 +225,9 @@ public:
repl::getGlobalReplicationCoordinator()->setMaintenanceMode(true).isOK()) {}
~MaintenanceModeSetter() {
if (maintenanceModeSet)
- repl::getGlobalReplicationCoordinator()->setMaintenanceMode(false);
+ repl::getGlobalReplicationCoordinator()
+ ->setMaintenanceMode(false)
+ .transitional_ignore();
}
private:
@@ -250,7 +252,8 @@ void appendReplyMetadata(OperationContext* opCtx,
// TODO: refactor out of here as part of SERVER-18236
if (isShardingAware || isConfig) {
rpc::ShardingMetadata(lastOpTimeFromClient, replCoord->getElectionId())
- .writeToMetadata(metadataBob);
+ .writeToMetadata(metadataBob)
+ .transitional_ignore();
if (LogicalTimeValidator::isAuthorizedToAdvanceClock(opCtx)) {
// No need to sign logical times for internal clients.
SignedLogicalTime currentTime(
@@ -670,8 +673,10 @@ void execCommandDatabase(OperationContext* opCtx,
invariant(sce); // do not upcasts from DBException created by uassert variants.
if (!opCtx->getClient()->isInDirectClient()) {
- ShardingState::get(opCtx)->onStaleShardVersion(
- opCtx, NamespaceString(sce->getns()), sce->getVersionReceived());
+ ShardingState::get(opCtx)
+ ->onStaleShardVersion(
+ opCtx, NamespaceString(sce->getns()), sce->getVersionReceived())
+ .transitional_ignore();
}
}
@@ -864,8 +869,9 @@ DbResponse receivedQuery(OperationContext* opCtx,
// If we got a stale config, wait in case the operation is stuck in a critical section
if (!opCtx->getClient()->isInDirectClient() && e.getCode() == ErrorCodes::SendStaleConfig) {
auto& sce = static_cast<const StaleConfigException&>(e);
- ShardingState::get(opCtx)->onStaleShardVersion(
- opCtx, NamespaceString(sce.getns()), sce.getVersionReceived());
+ ShardingState::get(opCtx)
+ ->onStaleShardVersion(opCtx, NamespaceString(sce.getns()), sce.getVersionReceived())
+ .transitional_ignore();
}
dbResponse.response.reset();
diff --git a/src/mongo/db/service_liason_mock.cpp b/src/mongo/db/service_liason_mock.cpp
index b7b03ba9ceb..4c2ba699acb 100644
--- a/src/mongo/db/service_liason_mock.cpp
+++ b/src/mongo/db/service_liason_mock.cpp
@@ -39,7 +39,7 @@ MockServiceLiasonImpl::MockServiceLiasonImpl() {
auto timerFactory = stdx::make_unique<executor::AsyncTimerFactoryMock>();
_timerFactory = timerFactory.get();
_runner = stdx::make_unique<PeriodicRunnerASIO>(std::move(timerFactory));
- _runner->startup();
+ _runner->startup().transitional_ignore();
}
LogicalSessionIdSet MockServiceLiasonImpl::getActiveSessions() const {
diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
index 48e8c41bc50..fb47d11a7eb 100644
--- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp
@@ -280,7 +280,8 @@ void BSONCollectionCatalogEntry::MetaData::parse(const BSONObj& obj) {
ns = obj["ns"].valuestrsafe();
if (obj["options"].isABSONObj()) {
- options.parse(obj["options"].Obj(), CollectionOptions::parseForStorage);
+ options.parse(obj["options"].Obj(), CollectionOptions::parseForStorage)
+ .transitional_ignore();
}
BSONElement indexList = obj["indexes"];
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
index 70ae05fe666..e96cddbb1a7 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry.cpp
@@ -57,7 +57,7 @@ public:
virtual void commit() {}
virtual void rollback() {
// Intentionally ignoring failure.
- _cce->_engine->dropIdent(_opCtx, _ident);
+ _cce->_engine->dropIdent(_opCtx, _ident).transitional_ignore();
}
OperationContext* const _opCtx;
@@ -74,7 +74,7 @@ public:
virtual void commit() {
// Intentionally ignoring failure here. Since we've removed the metadata pointing to the
// index, we should never see it again anyway.
- _cce->_engine->dropIdent(_opCtx, _ident);
+ _cce->_engine->dropIdent(_opCtx, _ident).transitional_ignore();
}
OperationContext* const _opCtx;
diff --git a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
index cc6b9a7829a..88f1b7b7e55 100644
--- a/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
+++ b/src/mongo/db/storage/kv/kv_database_catalog_entry_base.cpp
@@ -62,7 +62,7 @@ public:
virtual void rollback() {
if (_dropOnRollback) {
// Intentionally ignoring failure
- _dce->_engine->getEngine()->dropIdent(_opCtx, _ident);
+ _dce->_engine->getEngine()->dropIdent(_opCtx, _ident).transitional_ignore();
}
const CollectionMap::iterator it = _dce->_collections.find(_collection);
@@ -100,7 +100,7 @@ public:
// Intentionally ignoring failure here. Since we've removed the metadata pointing to the
// collection, we should never see it again anyway.
if (_dropOnCommit)
- _dce->_engine->getEngine()->dropIdent(_opCtx, _ident);
+ _dce->_engine->getEngine()->dropIdent(_opCtx, _ident).transitional_ignore();
}
virtual void rollback() {
@@ -346,7 +346,7 @@ Status KVDatabaseCatalogEntryBase::dropCollection(OperationContext* opCtx, Strin
std::vector<std::string> indexNames;
entry->getAllIndexes(opCtx, &indexNames);
for (size_t i = 0; i < indexNames.size(); i++) {
- entry->removeIndex(opCtx, indexNames[i]);
+ entry->removeIndex(opCtx, indexNames[i]).transitional_ignore();
}
}
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 11f3fa50c60..4661b92e7c6 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -205,8 +205,9 @@ TEST(KVCatalogTest, Coll1) {
{
MyOperationContext opCtx(engine);
WriteUnitOfWork uow(&opCtx);
- catalog->dropCollection(&opCtx, "a.b");
- catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed);
+ catalog->dropCollection(&opCtx, "a.b").transitional_ignore();
+ catalog->newCollection(&opCtx, "a.b", CollectionOptions(), KVPrefix::kNotPrefixed)
+ .transitional_ignore();
uow.commit();
}
ASSERT_NOT_EQUALS(ident, catalog->getCollectionIdent("a.b"));
diff --git a/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp b/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
index 11cad9890d1..0612973f107 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_snapshots.cpp
@@ -91,7 +91,7 @@ public:
void prepareSnapshot() {
snapshotOperation = makeOperation(); // each prepare gets a new operation.
- snapshotManager->prepareForCreateSnapshot(snapshotOperation);
+ snapshotManager->prepareForCreateSnapshot(snapshotOperation).transitional_ignore();
}
SnapshotName createSnapshot() {
diff --git a/src/mongo/db/storage/kv/kv_storage_engine.cpp b/src/mongo/db/storage/kv/kv_storage_engine.cpp
index 462b6214243..f1538063192 100644
--- a/src/mongo/db/storage/kv/kv_storage_engine.cpp
+++ b/src/mongo/db/storage/kv/kv_storage_engine.cpp
@@ -86,7 +86,7 @@ KVStorageEngine::KVStorageEngine(
if (options.forRepair && catalogExists) {
log() << "Repairing catalog metadata";
// TODO should also validate all BSON in the catalog.
- engine->repairIdent(&opCtx, catalogInfo);
+ engine->repairIdent(&opCtx, catalogInfo).transitional_ignore();
}
if (!catalogExists) {
@@ -161,7 +161,7 @@ KVStorageEngine::KVStorageEngine(
continue;
log() << "dropping unused ident: " << toRemove;
WriteUnitOfWork wuow(&opCtx);
- _engine->dropIdent(&opCtx, toRemove);
+ _engine->dropIdent(&opCtx, toRemove).transitional_ignore();
wuow.commit();
}
}
@@ -239,7 +239,7 @@ Status KVStorageEngine::dropDatabase(OperationContext* opCtx, StringData db) {
for (std::list<std::string>::iterator it = toDrop.begin(); it != toDrop.end(); ++it) {
string coll = *it;
- entry->dropCollection(opCtx, coll);
+ entry->dropCollection(opCtx, coll).transitional_ignore();
}
toDrop.clear();
entry->getCollectionNamespaces(&toDrop);
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index 28a822c7814..cb35765e131 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -1811,7 +1811,8 @@ void BtreeLogic<BtreeLayout>::split(OperationContext* opCtx,
splitkey.recordLoc,
true, // dupsallowed
bucketLoc,
- rLoc);
+ rLoc)
+ .transitional_ignore();
}
int newpos = keypos;
@@ -2336,7 +2337,7 @@ DiskLoc BtreeLogic<BtreeLayout>::_locate(OperationContext* opCtx,
int position;
BucketType* bucket = getBucket(opCtx, bucketLoc);
// XXX: owned to not owned conversion(?)
- _find(opCtx, bucket, key, recordLoc, false, &position, foundOut);
+ _find(opCtx, bucket, key, recordLoc, false, &position, foundOut).transitional_ignore();
// Look in our current bucket.
if (*foundOut) {
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index e34a5c5a22e..d4274feaa4b 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -185,7 +185,7 @@ class SimpleCreate : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
this->checkValidNumKeys(0);
}
@@ -196,10 +196,10 @@ class SimpleInsertDelete : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
BSONObj key = simpleKey('z');
- this->insert(key, this->_helper.dummyDiskLoc);
+ this->insert(key, this->_helper.dummyDiskLoc).transitional_ignore();
this->checkValidNumKeys(1);
this->locate(key, 0, true, this->_helper.headManager.getHead(&opCtx), 1);
@@ -216,14 +216,14 @@ class SplitUnevenBucketBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 10; ++i) {
BSONObj shortKey = simpleKey(shortToken(i), 1);
- this->insert(shortKey, this->_helper.dummyDiskLoc);
+ this->insert(shortKey, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj longKey = simpleKey(longToken(i), 800);
- this->insert(longKey, this->_helper.dummyDiskLoc);
+ this->insert(longKey, this->_helper.dummyDiskLoc).transitional_ignore();
}
this->checkValidNumKeys(20);
@@ -280,11 +280,11 @@ class MissingLocate : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 3; ++i) {
BSONObj k = simpleKey('b' + 2 * i);
- this->insert(k, this->_helper.dummyDiskLoc);
+ this->insert(k, this->_helper.dummyDiskLoc).transitional_ignore();
}
locateExtended(1, 'a', 'b', this->_helper.headManager.getHead(&opCtx));
@@ -318,20 +318,20 @@ class MissingLocateMultiBucket : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
- this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc).transitional_ignore();
// This causes split
- this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc).transitional_ignore();
int pos;
DiskLoc loc;
@@ -370,20 +370,20 @@ class SERVER983 : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
- this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc);
- this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('A', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('B', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('C', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('D', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('E', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('F', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('G', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('H', 800), this->_helper.dummyDiskLoc).transitional_ignore();
+ this->insert(simpleKey('I', 800), this->_helper.dummyDiskLoc).transitional_ignore();
// This will cause split
- this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc);
+ this->insert(simpleKey('J', 800), this->_helper.dummyDiskLoc).transitional_ignore();
int pos;
DiskLoc loc;
@@ -419,17 +419,17 @@ class DontReuseUnused : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 10; ++i) {
const BSONObj k = simpleKey('b' + 2 * i, 800);
- this->insert(k, this->_helper.dummyDiskLoc);
+ this->insert(k, this->_helper.dummyDiskLoc).transitional_ignore();
}
const BSONObj root = simpleKey('p', 800);
this->unindex(root);
- this->insert(root, this->_helper.dummyDiskLoc);
+ this->insert(root, this->_helper.dummyDiskLoc).transitional_ignore();
this->locate(root, 0, true, this->head()->nextChild, 1);
}
};
@@ -439,11 +439,11 @@ class MergeBucketsTestBase : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 10; ++i) {
const BSONObj k = simpleKey('b' + 2 * i, 800);
- this->insert(k, this->_helper.dummyDiskLoc);
+ this->insert(k, this->_helper.dummyDiskLoc).transitional_ignore();
}
// numRecords() - 1, because this->_helper.dummyDiskLoc is actually in the record store too
@@ -495,11 +495,11 @@ class MergeBucketsDontReplaceHead : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
for (int i = 0; i < 18; ++i) {
const BSONObj k = simpleKey('a' + i, 800);
- this->insert(k, this->_helper.dummyDiskLoc);
+ this->insert(k, this->_helper.dummyDiskLoc).transitional_ignore();
}
// numRecords(NULL) - 1, because fixedDiskLoc is actually in the record store too
@@ -884,7 +884,7 @@ public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
ArtificialTreeBuilder<OnDiskFormat> builder(&opCtx, &this->_helper);
@@ -2181,14 +2181,14 @@ class LocateEmptyForward : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
BSONObj key1 = simpleKey('a');
- this->insert(key1, this->_helper.dummyDiskLoc);
+ this->insert(key1, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj key2 = simpleKey('b');
- this->insert(key2, this->_helper.dummyDiskLoc);
+ this->insert(key2, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj key3 = simpleKey('c');
- this->insert(key3, this->_helper.dummyDiskLoc);
+ this->insert(key3, this->_helper.dummyDiskLoc).transitional_ignore();
this->checkValidNumKeys(3);
this->locate(BSONObj(), 0, false, this->_helper.headManager.getHead(&opCtx), 1);
@@ -2200,14 +2200,14 @@ class LocateEmptyReverse : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
BSONObj key1 = simpleKey('a');
- this->insert(key1, this->_helper.dummyDiskLoc);
+ this->insert(key1, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj key2 = simpleKey('b');
- this->insert(key2, this->_helper.dummyDiskLoc);
+ this->insert(key2, this->_helper.dummyDiskLoc).transitional_ignore();
BSONObj key3 = simpleKey('c');
- this->insert(key3, this->_helper.dummyDiskLoc);
+ this->insert(key3, this->_helper.dummyDiskLoc).transitional_ignore();
this->checkValidNumKeys(3);
this->locate(BSONObj(), -1, false, DiskLoc(), -1);
@@ -2219,7 +2219,7 @@ class DuplicateKeys : public BtreeLogicTestBase<OnDiskFormat> {
public:
void run() {
OperationContextNoop opCtx;
- this->_helper.btree.initAsEmpty(&opCtx);
+ this->_helper.btree.initAsEmpty(&opCtx).transitional_ignore();
BSONObj key1 = simpleKey('z');
ASSERT_OK(this->insert(key1, this->_helper.dummyDiskLoc, true));
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
index de02abcf76b..f12f3328e72 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
@@ -74,7 +74,7 @@ void simpleInsertTest(const char* buf, int size) {
ASSERT_NOT_OK(rs.insertRecord(&opCtx, buf, 3, 1000).getStatus());
- rs.insertRecord(&opCtx, buf, size, 10000);
+ rs.insertRecord(&opCtx, buf, size, 10000).status_with_transitional_ignore();
{
BSONObjBuilder b;
@@ -119,7 +119,8 @@ TEST(CappedRecordStoreV1, EmptySingleExtent) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100}, {}};
@@ -150,7 +151,8 @@ TEST(CappedRecordStoreV1, FirstLoopWithSingleExtentExactSize) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -188,7 +190,8 @@ TEST(CappedRecordStoreV1, NonFirstLoopWithSingleExtentExactSize) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -229,7 +232,8 @@ TEST(CappedRecordStoreV1, WillLoopWithout24SpareBytes) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1200), 100}, // first old record
@@ -266,7 +270,8 @@ TEST(CappedRecordStoreV1, WontLoopWith24SpareBytes) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 100},
@@ -301,7 +306,8 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentUnLooped) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
@@ -339,7 +345,8 @@ TEST(CappedRecordStoreV1, MoveToSecondExtentLooped) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1000), 500},
@@ -424,13 +431,16 @@ TEST(CappedRecordStoreV1Scrambler, Minimal) {
initializeV1RS(&opCtx, records, drecs, NULL, &em, md);
}
- rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(
- &opCtx, zeros, 400 - MmapV1RecordHeader::HeaderSize, false); // won't fit at end so wraps
- rs.insertRecord(&opCtx, zeros, 120 - MmapV1RecordHeader::HeaderSize, false); // fits at end
- rs.insertRecord(
- &opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false); // fits in earlier hole
+ rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 400 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore(); // won't fit at end so wraps
+ rs.insertRecord(&opCtx, zeros, 120 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore(); // fits at end
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore(); // fits in earlier hole
{
LocAndSize recs[] = {{DiskLoc(0, 1500), 300}, // 2nd insert
@@ -467,34 +477,62 @@ TEST(CappedRecordStoreV1Scrambler, FourDeletedRecordsInSingleExtent) {
// This list of sizes was empirically generated to achieve this outcome. Don't think too
// much about them.
- rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 304 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 56 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 104 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 36 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false);
- rs.insertRecord(&opCtx, zeros, 64 - MmapV1RecordHeader::HeaderSize, false);
+ rs.insertRecord(&opCtx, zeros, 500 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 300 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 304 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 76 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 56 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 104 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 146 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 40 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 36 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 100 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 96 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 200 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 60 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
+ rs.insertRecord(&opCtx, zeros, 64 - MmapV1RecordHeader::HeaderSize, false)
+ .status_with_transitional_ignore();
{
LocAndSize recs[] = {{DiskLoc(0, 1148), 148},
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
index e49ac7c1301..47c85e38974 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple_test.cpp
@@ -444,7 +444,7 @@ TEST(SimpleRecordStoreV1, Truncate) {
ASSERT_EQUALS(em.getExtent(DiskLoc(0, 0))->length, em.minSize());
}
- rs.truncate(&opCtx);
+ rs.truncate(&opCtx).transitional_ignore();
{
LocAndSize recs[] = {{}};
diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp
index 12df12c7765..a257d17037a 100644
--- a/src/mongo/db/storage/record_store_test_harness.cpp
+++ b/src/mongo/db/storage/record_store_test_harness.cpp
@@ -384,7 +384,7 @@ TEST(RecordStoreTestHarness, Truncate1) {
ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- rs->truncate(opCtx.get());
+ rs->truncate(opCtx.get()).transitional_ignore();
uow.commit();
}
}
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index 5c034ceedbc..f8014dfabc5 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -78,7 +78,7 @@ TEST(SortedDataInterface, InsertWithDups1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), true).transitional_ignore();
uow.commit();
}
}
@@ -87,7 +87,7 @@ TEST(SortedDataInterface, InsertWithDups1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(6, 2), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(6, 2), true).transitional_ignore();
uow.commit();
}
}
@@ -110,7 +110,7 @@ TEST(SortedDataInterface, InsertWithDups2) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true).transitional_ignore();
uow.commit();
}
}
@@ -119,7 +119,7 @@ TEST(SortedDataInterface, InsertWithDups2) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true).transitional_ignore();
uow.commit();
}
}
@@ -138,7 +138,7 @@ TEST(SortedDataInterface, InsertWithDups3AndRollback) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true).transitional_ignore();
uow.commit();
}
}
@@ -147,7 +147,7 @@ TEST(SortedDataInterface, InsertWithDups3AndRollback) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 20), true).transitional_ignore();
// no commit
}
}
@@ -166,7 +166,8 @@ TEST(SortedDataInterface, InsertNoDups1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), false);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), false)
+ .transitional_ignore();
uow.commit();
}
}
@@ -175,7 +176,8 @@ TEST(SortedDataInterface, InsertNoDups1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 2), RecordId(5, 20), false);
+ sorted->insert(opCtx.get(), BSON("" << 2), RecordId(5, 20), false)
+ .transitional_ignore();
uow.commit();
}
}
@@ -194,7 +196,7 @@ TEST(SortedDataInterface, InsertNoDups2) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), false);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 2), false).transitional_ignore();
uow.commit();
}
}
@@ -203,7 +205,7 @@ TEST(SortedDataInterface, InsertNoDups2) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 4), false);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 4), false).transitional_ignore();
uow.commit();
}
}
@@ -222,7 +224,7 @@ TEST(SortedDataInterface, Unindex1) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true).transitional_ignore();
uow.commit();
}
}
@@ -287,7 +289,7 @@ TEST(SortedDataInterface, Unindex2Rollback) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true);
+ sorted->insert(opCtx.get(), BSON("" << 1), RecordId(5, 18), true).transitional_ignore();
uow.commit();
}
}
@@ -349,7 +351,8 @@ TEST(SortedDataInterface, CursorIterate1WithSaveRestore) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << i), RecordId(5, i * 2), true);
+ sorted->insert(opCtx.get(), BSON("" << i), RecordId(5, i * 2), true)
+ .transitional_ignore();
uow.commit();
}
}
@@ -378,7 +381,8 @@ TEST(SortedDataInterface, CursorIterateAllDupKeysWithSaveRestore) {
const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext());
{
WriteUnitOfWork uow(opCtx.get());
- sorted->insert(opCtx.get(), BSON("" << 5), RecordId(5, i * 2), true);
+ sorted->insert(opCtx.get(), BSON("" << 5), RecordId(5, i * 2), true)
+ .transitional_ignore();
uow.commit();
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 47241f8bbf2..0d15a514950 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -142,7 +142,7 @@ TEST(WiredTigerRecordStoreTest, Isolation1) {
try {
// this should fail
- rs->updateRecord(t2.get(), id1, "c", 2, false, NULL);
+ rs->updateRecord(t2.get(), id1, "c", 2, false, NULL).transitional_ignore();
ASSERT(0);
} catch (WriteConflictException& dle) {
w2.reset(NULL);
@@ -197,7 +197,7 @@ TEST(WiredTigerRecordStoreTest, Isolation2) {
ASSERT_EQUALS(string("a"), rs->dataFor(t2.get(), id1).data());
try {
// this should fail as our version of id1 is too old
- rs->updateRecord(t2.get(), id1, "c", 2, false, NULL);
+ rs->updateRecord(t2.get(), id1, "c", 2, false, NULL).transitional_ignore();
ASSERT(0);
} catch (WriteConflictException& dle) {
}
@@ -358,7 +358,7 @@ TEST(WiredTigerRecordStoreTest, CappedOrder) {
// we make sure we can't find the 2nd until the first is commited
ServiceContext::UniqueOperationContext t1(harnessHelper->newOperationContext());
unique_ptr<WriteUnitOfWork> w1(new WriteUnitOfWork(t1.get()));
- rs->insertRecord(t1.get(), "b", 2, false);
+ rs->insertRecord(t1.get(), "b", 2, false).status_with_transitional_ignore();
// do not commit yet
{ // create 2nd doc
@@ -366,7 +366,7 @@ TEST(WiredTigerRecordStoreTest, CappedOrder) {
auto t2 = harnessHelper->newOperationContext(client2.get());
{
WriteUnitOfWork w2(t2.get());
- rs->insertRecord(t2.get(), "c", 2, false);
+ rs->insertRecord(t2.get(), "c", 2, false).status_with_transitional_ignore();
w2.commit();
}
}
diff --git a/src/mongo/db/update/arithmetic_node_test.cpp b/src/mongo/db/update/arithmetic_node_test.cpp
index 815c45cc70c..924d66a8076 100644
--- a/src/mongo/db/update/arithmetic_node_test.cpp
+++ b/src/mongo/db/update/arithmetic_node_test.cpp
@@ -47,7 +47,7 @@ DEATH_TEST(ArithmeticNodeTest, InitFailsForEmptyElement, "Invariant failure modE
auto update = fromjson("{$inc: {}}");
const CollatorInterface* collator = nullptr;
ArithmeticNode node(ArithmeticNode::ArithmeticOp::kAdd);
- node.init(update["$inc"].embeddedObject().firstElement(), collator);
+ node.init(update["$inc"].embeddedObject().firstElement(), collator).transitional_ignore();
}
TEST(ArithmeticNodeTest, InitSucceedsForNumberIntElement) {
@@ -1742,7 +1742,7 @@ TEST(ArithmeticNodeTest, ApplyDeserializedDocNotNoOp) {
Document doc(fromjson("{a: 1}"));
// De-serialize the int.
- doc.root()["a"].setValueInt(1);
+ doc.root()["a"].setValueInt(1).transitional_ignore();
FieldRef pathToCreate("b");
FieldRef pathTaken("");
@@ -1777,7 +1777,7 @@ TEST(ArithmeticNodeTest, ApplyToDeserializedDocNoOp) {
Document doc(fromjson("{a: 1}"));
// De-serialize the int.
- doc.root()["a"].setValueInt(2);
+ doc.root()["a"].setValueInt(2).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("a");
@@ -1812,7 +1812,7 @@ TEST(ArithmeticNodeTest, ApplyToDeserializedDocNestedNoop) {
Document doc{BSONObj()};
// De-serialize the int.
- doc.root().appendObject("a", BSON("b" << static_cast<int>(1)));
+ doc.root().appendObject("a", BSON("b" << static_cast<int>(1))).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("a.b");
@@ -1847,7 +1847,7 @@ TEST(ArithmeticNodeTest, ApplyToDeserializedDocNestedNotNoop) {
Document doc{BSONObj()};
// De-serialize the int.
- doc.root().appendObject("a", BSON("b" << static_cast<int>(1)));
+ doc.root().appendObject("a", BSON("b" << static_cast<int>(1))).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("a.b");
diff --git a/src/mongo/db/update/path_support_test.cpp b/src/mongo/db/update/path_support_test.cpp
index 5ea820bbf7b..49675d09ac3 100644
--- a/src/mongo/db/update/path_support_test.cpp
+++ b/src/mongo/db/update/path_support_test.cpp
@@ -500,7 +500,7 @@ TEST_F(ArrayDoc, ExcessivePaddingNotRequestedIfArrayAlreadyPadded) {
Element arrayA = doc().root().leftChild();
ASSERT_EQ(arrayA.getFieldName(), "a");
ASSERT_EQ(arrayA.getType(), mongo::Array);
- arrayA.appendInt("", 1);
+ arrayA.appendInt("", 1).transitional_ignore();
}
size_t idxFound;
diff --git a/src/mongo/db/update/set_node_test.cpp b/src/mongo/db/update/set_node_test.cpp
index 8c7850f6000..5b0b8b29398 100644
--- a/src/mongo/db/update/set_node_test.cpp
+++ b/src/mongo/db/update/set_node_test.cpp
@@ -47,7 +47,7 @@ DEATH_TEST(SetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.ok(
auto update = fromjson("{$set: {}}");
const CollatorInterface* collator = nullptr;
SetNode node;
- node.init(update["$set"].embeddedObject().firstElement(), collator);
+ node.init(update["$set"].embeddedObject().firstElement(), collator).transitional_ignore();
}
TEST(SetNodeTest, InitSucceedsForNonemptyElement) {
@@ -387,7 +387,7 @@ TEST(SetNodeTest, IdentityOpOnDeserializedIsNotANoOp) {
Document doc(fromjson("{a: { b: NumberInt(0)}}"));
// Apply a mutation to the document that will make it non-serialized.
- doc.root()["a"]["b"].setValueInt(2);
+ doc.root()["a"]["b"].setValueInt(2).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("a");
@@ -1724,9 +1724,9 @@ TEST(SetNodeTest, ApplySetModToEphemeralDocument) {
Document doc;
Element x = doc.makeElementObject("x");
- doc.root().pushBack(x);
+ doc.root().pushBack(x).transitional_ignore();
Element a = doc.makeElementInt("a", 100);
- x.pushBack(a);
+ x.pushBack(a).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("x");
diff --git a/src/mongo/db/update/unset_node_test.cpp b/src/mongo/db/update/unset_node_test.cpp
index b62a7501e2d..0dcbf3f4776 100644
--- a/src/mongo/db/update/unset_node_test.cpp
+++ b/src/mongo/db/update/unset_node_test.cpp
@@ -47,7 +47,7 @@ DEATH_TEST(UnsetNodeTest, InitFailsForEmptyElement, "Invariant failure modExpr.o
auto update = fromjson("{$unset: {}}");
const CollatorInterface* collator = nullptr;
UnsetNode node;
- node.init(update["$unset"].embeddedObject().firstElement(), collator);
+ node.init(update["$unset"].embeddedObject().firstElement(), collator).transitional_ignore();
}
DEATH_TEST(UnsetNodeTest, ApplyToRootFails, "Invariant failure parent.ok()") {
diff --git a/src/mongo/db/update/update_array_node_test.cpp b/src/mongo/db/update/update_array_node_test.cpp
index 4bb95967113..1eb5d657ae1 100644
--- a/src/mongo/db/update/update_array_node_test.cpp
+++ b/src/mongo/db/update/update_array_node_test.cpp
@@ -189,8 +189,8 @@ DEATH_TEST(UpdateArrayNodeTest,
foundIdentifiers));
Document doc(fromjson("{a: [{c: 0}, {c: 0}, {c: 1}]}"));
- doc.root()["a"]["1"]["c"].setValueInt(1);
- doc.root()["a"]["2"]["c"].setValueInt(0);
+ doc.root()["a"]["1"]["c"].setValueInt(1).transitional_ignore();
+ doc.root()["a"]["2"]["c"].setValueInt(0).transitional_ignore();
FieldRef pathToCreate("");
FieldRef pathTaken("");
StringData matchedField;
diff --git a/src/mongo/db/update/update_driver_test.cpp b/src/mongo/db/update/update_driver_test.cpp
index b2e98556d16..e9b92433142 100644
--- a/src/mongo/db/update/update_driver_test.cpp
+++ b/src/mongo/db/update/update_driver_test.cpp
@@ -147,7 +147,7 @@ TEST(Collator, SetCollationUpdatesModifierInterfaces) {
bool modified = false;
Document doc(fromjson("{a: 'cba'}"));
driver.setCollator(&collator);
- driver.update(StringData(), &doc, nullptr, nullptr, &modified);
+ driver.update(StringData(), &doc, nullptr, nullptr, &modified).transitional_ignore();
ASSERT_TRUE(modified);
}
@@ -164,8 +164,8 @@ public:
CreateFromQueryFixture()
: _driverOps(new UpdateDriver(UpdateDriver::Options())),
_driverRepl(new UpdateDriver(UpdateDriver::Options())) {
- _driverOps->parse(fromjson("{$set:{'_':1}}"));
- _driverRepl->parse(fromjson("{}"));
+ _driverOps->parse(fromjson("{$set:{'_':1}}")).transitional_ignore();
+ _driverRepl->parse(fromjson("{}")).transitional_ignore();
_opCtx = _serviceContext.makeOperationContext();
}
diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp
index 3e284372735..ca25984533a 100644
--- a/src/mongo/db/views/view_catalog_test.cpp
+++ b/src/mongo/db/views/view_catalog_test.cpp
@@ -141,7 +141,8 @@ TEST_F(ViewCatalogFixture, CreateViewWithPipelineFailsOnInvalidStageName) {
auto invalidPipeline = BSON_ARRAY(BSON("INVALID_STAGE_NAME" << 1));
ASSERT_THROWS(
- viewCatalog.createView(opCtx.get(), viewName, viewOn, invalidPipeline, emptyCollation),
+ viewCatalog.createView(opCtx.get(), viewName, viewOn, invalidPipeline, emptyCollation)
+ .transitional_ignore(),
UserException);
}
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
index 7fd12a2ce74..9f3cf21c73b 100644
--- a/src/mongo/dbtests/counttests.cpp
+++ b/src/mongo/dbtests/counttests.cpp
@@ -51,7 +51,7 @@ public:
WriteUnitOfWork wunit(&_opCtx);
_collection = _database->getCollection(&_opCtx, ns());
if (_collection) {
- _database->dropCollection(&_opCtx, ns());
+ _database->dropCollection(&_opCtx, ns()).transitional_ignore();
}
_collection = _database->createCollection(&_opCtx, ns());
wunit.commit();
@@ -87,9 +87,9 @@ protected:
oid.init();
b.appendOID("_id", &oid);
b.appendElements(o);
- _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false);
+ _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false).transitional_ignore();
} else {
- _collection->insertDocument(&_opCtx, o, nullOpDebug, false);
+ _collection->insertDocument(&_opCtx, o, nullOpDebug, false).transitional_ignore();
}
wunit.commit();
}
diff --git a/src/mongo/dbtests/cursor_manager_test.cpp b/src/mongo/dbtests/cursor_manager_test.cpp
index 9b0d495e5df..6abd9844de7 100644
--- a/src/mongo/dbtests/cursor_manager_test.cpp
+++ b/src/mongo/dbtests/cursor_manager_test.cpp
@@ -380,7 +380,7 @@ TEST_F(CursorManagerTest, UsingACursorShouldUpdateTimeOfLastUse) {
clock->advance(Milliseconds(1));
// Touch the cursor with id 'usedCursorId' to advance its time of last use.
- cursorManager->pinCursor(_opCtx.get(), usedCursorId);
+ cursorManager->pinCursor(_opCtx.get(), usedCursorId).status_with_transitional_ignore();
// We should be able to time out the unused cursor, but the one we used should stay alive.
ASSERT_EQ(2UL, cursorManager->numCursors());
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index 04bce2a1b38..0696e151858 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -56,7 +56,7 @@ public:
OldClientContext ctx(&opCtx, _ns);
WriteUnitOfWork wuow(&opCtx);
- _db->dropCollection(&opCtx, _ns);
+ _db->dropCollection(&opCtx, _ns).transitional_ignore();
wuow.commit();
}
@@ -67,8 +67,8 @@ public:
int numFinishedIndexesStart = _catalog->numIndexesReady(&opCtx);
- dbtests::createIndex(&opCtx, _ns, BSON("x" << 1));
- dbtests::createIndex(&opCtx, _ns, BSON("y" << 1));
+ dbtests::createIndex(&opCtx, _ns, BSON("x" << 1)).transitional_ignore();
+ dbtests::createIndex(&opCtx, _ns, BSON("y" << 1)).transitional_ignore();
ASSERT_TRUE(_catalog->numIndexesReady(&opCtx) == numFinishedIndexesStart + 2);
@@ -123,7 +123,7 @@ public:
OldClientContext ctx(&opCtx, _ns);
WriteUnitOfWork wuow(&opCtx);
- _db->dropCollection(&opCtx, _ns);
+ _db->dropCollection(&opCtx, _ns).transitional_ignore();
wuow.commit();
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 4cc101e0dac..b30677d8d64 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -108,7 +108,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns);
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
@@ -116,12 +116,14 @@ public:
BSON("_id" << 1 << "a"
<< "dup"),
nullOpDebug,
- true);
+ true)
+ .transitional_ignore();
coll->insertDocument(&_opCtx,
BSON("_id" << 2 << "a"
<< "dup"),
nullOpDebug,
- true);
+ true)
+ .transitional_ignore();
wunit.commit();
}
@@ -162,7 +164,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns);
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
@@ -170,12 +172,14 @@ public:
BSON("_id" << 1 << "a"
<< "dup"),
nullOpDebug,
- true);
+ true)
+ .transitional_ignore();
coll->insertDocument(&_opCtx,
BSON("_id" << 2 << "a"
<< "dup"),
nullOpDebug,
- true);
+ true)
+ .transitional_ignore();
wunit.commit();
}
@@ -215,7 +219,7 @@ public:
RecordId loc2;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns);
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
coll = db->createCollection(&_opCtx, _ns);
OpDebug* const nullOpDebug = nullptr;
@@ -275,7 +279,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns);
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
coll = db->createCollection(&_opCtx, _ns);
// Drop all indexes including id index.
coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
@@ -283,7 +287,8 @@ public:
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_opCtx, BSON("a" << i), nullOpDebug, true);
+ coll->insertDocument(&_opCtx, BSON("a" << i), nullOpDebug, true)
+ .transitional_ignore();
}
wunit.commit();
}
@@ -311,14 +316,15 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns);
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
coll = db->createCollection(&_opCtx, _ns);
coll->getIndexCatalog()->dropAllIndexes(&_opCtx, true);
// Insert some documents.
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_opCtx, BSON("a" << i), nullOpDebug, true);
+ coll->insertDocument(&_opCtx, BSON("a" << i), nullOpDebug, true)
+ .transitional_ignore();
}
wunit.commit();
}
@@ -346,7 +352,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns);
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
@@ -356,7 +362,8 @@ public:
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_opCtx, BSON("_id" << i), nullOpDebug, true);
+ coll->insertDocument(&_opCtx, BSON("_id" << i), nullOpDebug, true)
+ .transitional_ignore();
}
wunit.commit();
}
@@ -384,7 +391,7 @@ public:
Collection* coll;
{
WriteUnitOfWork wunit(&_opCtx);
- db->dropCollection(&_opCtx, _ns);
+ db->dropCollection(&_opCtx, _ns).transitional_ignore();
CollectionOptions options;
options.capped = true;
options.cappedSize = 10 * 1024;
@@ -394,7 +401,8 @@ public:
int32_t nDocs = 1000;
OpDebug* const nullOpDebug = nullptr;
for (int32_t i = 0; i < nDocs; ++i) {
- coll->insertDocument(&_opCtx, BSON("_id" << i), nullOpDebug, true);
+ coll->insertDocument(&_opCtx, BSON("_id" << i), nullOpDebug, true)
+ .transitional_ignore();
}
wunit.commit();
}
diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp
index b9ad16ce17b..9acec0370e0 100644
--- a/src/mongo/dbtests/multikey_paths_test.cpp
+++ b/src/mongo/dbtests/multikey_paths_test.cpp
@@ -162,7 +162,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreation) {
<< "key"
<< keyPattern
<< "v"
- << static_cast<int>(kIndexVersion)));
+ << static_cast<int>(kIndexVersion)))
+ .transitional_ignore();
assertMultikeyPaths(collection, keyPattern, {std::set<size_t>{}, {0U}});
}
@@ -198,7 +199,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreationWithMultipleDocuments) {
<< "key"
<< keyPattern
<< "v"
- << static_cast<int>(kIndexVersion)));
+ << static_cast<int>(kIndexVersion)))
+ .transitional_ignore();
assertMultikeyPaths(collection, keyPattern, {{0U}, {0U}});
}
@@ -217,7 +219,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
<< "key"
<< keyPattern
<< "v"
- << static_cast<int>(kIndexVersion)));
+ << static_cast<int>(kIndexVersion)))
+ .transitional_ignore();
{
WriteUnitOfWork wuow(_opCtx.get());
@@ -262,7 +265,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
<< "key"
<< keyPattern
<< "v"
- << static_cast<int>(kIndexVersion)));
+ << static_cast<int>(kIndexVersion)))
+ .transitional_ignore();
{
WriteUnitOfWork wuow(_opCtx.get());
@@ -287,15 +291,16 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
const bool indexesAffected = true;
OpDebug* opDebug = nullptr;
OplogUpdateEntryArgs args;
- collection->updateDocument(
- _opCtx.get(),
- record->id,
- oldDoc,
- BSON("_id" << 0 << "a" << 5 << "b" << BSON_ARRAY(1 << 2 << 3)),
- enforceQuota,
- indexesAffected,
- opDebug,
- &args);
+ collection
+ ->updateDocument(_opCtx.get(),
+ record->id,
+ oldDoc,
+ BSON("_id" << 0 << "a" << 5 << "b" << BSON_ARRAY(1 << 2 << 3)),
+ enforceQuota,
+ indexesAffected,
+ opDebug,
+ &args)
+ .status_with_transitional_ignore();
wuow.commit();
}
}
@@ -317,7 +322,8 @@ TEST_F(MultikeyPathsTest, PathsNotUpdatedOnDocumentDelete) {
<< "key"
<< keyPattern
<< "v"
- << static_cast<int>(kIndexVersion)));
+ << static_cast<int>(kIndexVersion)))
+ .transitional_ignore();
{
WriteUnitOfWork wuow(_opCtx.get());
@@ -363,7 +369,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
<< "key"
<< keyPatternAB
<< "v"
- << static_cast<int>(kIndexVersion)));
+ << static_cast<int>(kIndexVersion)))
+ .transitional_ignore();
BSONObj keyPatternAC = BSON("a" << 1 << "c" << 1);
createIndex(collection,
@@ -374,7 +381,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
<< "key"
<< keyPatternAC
<< "v"
- << static_cast<int>(kIndexVersion)));
+ << static_cast<int>(kIndexVersion)))
+ .transitional_ignore();
{
WriteUnitOfWork wuow(_opCtx.get());
OpDebug* const nullOpDebug = nullptr;
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index b853873a115..6aa07cb3414 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -603,7 +603,7 @@ public:
Collection* droppedColl;
droppedColl = db->createCollection(&opCtx, droppedName);
ASSERT_EQUALS(db->getCollection(&opCtx, droppedName), droppedColl);
- db->dropCollection(&opCtx, droppedName);
+ db->dropCollection(&opCtx, droppedName).transitional_ignore();
wunit.commit();
}
@@ -616,7 +616,7 @@ public:
Collection* rolledBackColl = db->createCollection(&opCtx, rolledBackName);
wunit.commit();
ASSERT_EQUALS(db->getCollection(&opCtx, rolledBackName), rolledBackColl);
- db->dropCollection(&opCtx, rolledBackName);
+ db->dropCollection(&opCtx, rolledBackName).transitional_ignore();
// not committing so dropping should be rolled back
}
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
index bbf98ca2cc6..98641b42627 100644
--- a/src/mongo/dbtests/pdfiletests.cpp
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -50,7 +50,7 @@ public:
if (!collection())
return;
WriteUnitOfWork wunit(&_opCtx);
- _context.db()->dropCollection(&_opCtx, ns());
+ _context.db()->dropCollection(&_opCtx, ns()).transitional_ignore();
wunit.commit();
}
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index c376bd605c3..a1750c90154 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -135,7 +135,7 @@ public:
// This is what sets a backup plan, should we test for it.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD,
_opCtx.getServiceContext()->getFastClockSource());
- _mps->pickBestPlan(&yieldPolicy);
+ _mps->pickBestPlan(&yieldPolicy).transitional_ignore();
ASSERT(_mps->bestPlanChosen());
size_t bestPlanIdx = _mps->bestPlanIdx();
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index 1f7cc5abcf3..7aedbd9dd00 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -85,7 +85,7 @@ public:
}
WriteUnitOfWork wuow(&_opCtx);
- database->dropCollection(&_opCtx, nss.ns());
+ database->dropCollection(&_opCtx, nss.ns()).transitional_ignore();
wuow.commit();
}
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index c202c8308c6..3ff097f8cc8 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -64,17 +64,18 @@ public:
virtual void setup() {
WriteUnitOfWork wunit(&_opCtx);
- _ctx.db()->dropCollection(&_opCtx, ns());
+ _ctx.db()->dropCollection(&_opCtx, ns()).transitional_ignore();
_coll = _ctx.db()->createCollection(&_opCtx, ns());
- _coll->getIndexCatalog()->createIndexOnEmptyCollection(&_opCtx,
- BSON("key" << BSON("x" << 1)
- << "name"
- << "x_1"
- << "ns"
- << ns()
- << "v"
- << 1));
+ _coll->getIndexCatalog()
+ ->createIndexOnEmptyCollection(&_opCtx,
+ BSON("key" << BSON("x" << 1) << "name"
+ << "x_1"
+ << "ns"
+ << ns()
+ << "v"
+ << 1))
+ .status_with_transitional_ignore();
for (int i = 0; i < kDocuments; i++) {
insert(BSON(GENOID << "x" << i));
@@ -107,7 +108,7 @@ public:
void insert(const BSONObj& doc) {
WriteUnitOfWork wunit(&_opCtx);
OpDebug* const nullOpDebug = nullptr;
- _coll->insertDocument(&_opCtx, doc, nullOpDebug, false);
+ _coll->insertDocument(&_opCtx, doc, nullOpDebug, false).transitional_ignore();
wunit.commit();
}
@@ -123,14 +124,16 @@ public:
BSONObj oldDoc = _coll->getRecordStore()->dataFor(&_opCtx, oldrecordId).releaseToBson();
OplogUpdateEntryArgs args;
args.nss = _coll->ns();
- _coll->updateDocument(&_opCtx,
- oldrecordId,
- Snapshotted<BSONObj>(_opCtx.recoveryUnit()->getSnapshotId(), oldDoc),
- newDoc,
- false,
- true,
- NULL,
- &args);
+ _coll
+ ->updateDocument(&_opCtx,
+ oldrecordId,
+ Snapshotted<BSONObj>(_opCtx.recoveryUnit()->getSnapshotId(), oldDoc),
+ newDoc,
+ false,
+ true,
+ NULL,
+ &args)
+ .status_with_transitional_ignore();
wunit.commit();
}
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index b2727d223d8..79c5b112bf3 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -53,7 +53,7 @@ public:
virtual void setup() {
WriteUnitOfWork wunit(&_opCtx);
- _ctx.db()->dropCollection(&_opCtx, ns());
+ _ctx.db()->dropCollection(&_opCtx, ns()).transitional_ignore();
_coll = _ctx.db()->createCollection(&_opCtx, ns());
ASSERT_OK(_coll->getIndexCatalog()->createIndexOnEmptyCollection(
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index 703eee3c7c9..3bc8dd83bee 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -188,7 +188,7 @@ public:
// Plan 0 aka the first plan aka the index scan should be the best.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
- mps->pickBestPlan(&yieldPolicy);
+ mps->pickBestPlan(&yieldPolicy).transitional_ignore();
ASSERT(mps->bestPlanChosen());
ASSERT_EQUALS(0, mps->bestPlanIdx());
@@ -272,7 +272,7 @@ public:
// This sets a backup plan.
PlanYieldPolicy yieldPolicy(PlanExecutor::NO_YIELD, _clock);
- mps->pickBestPlan(&yieldPolicy);
+ mps->pickBestPlan(&yieldPolicy).transitional_ignore();
ASSERT(mps->bestPlanChosen());
ASSERT(mps->hasBackupPlan());
@@ -423,7 +423,7 @@ public:
ASSERT_EQ(exec->getRootStage()->stageType(), STAGE_MULTI_PLAN);
- exec->executePlan();
+ exec->executePlan().transitional_ignore();
PlanSummaryStats stats;
Explain::getSummaryStats(*exec, &stats);
diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp
index 40f03c7a8c4..efe7c4c5ece 100644
--- a/src/mongo/dbtests/query_stage_sort.cpp
+++ b/src/mongo/dbtests/query_stage_sort.cpp
@@ -360,7 +360,8 @@ public:
args.nss = coll->ns();
{
WriteUnitOfWork wuow(&_opCtx);
- coll->updateDocument(&_opCtx, *it, oldDoc, newDoc, false, false, NULL, &args);
+ coll->updateDocument(&_opCtx, *it, oldDoc, newDoc, false, false, NULL, &args)
+ .status_with_transitional_ignore();
wuow.commit();
}
exec->restoreState();
@@ -378,7 +379,8 @@ public:
oldDoc = coll->docFor(&_opCtx, *it);
{
WriteUnitOfWork wuow(&_opCtx);
- coll->updateDocument(&_opCtx, *it++, oldDoc, newDoc, false, false, NULL, &args);
+ coll->updateDocument(&_opCtx, *it++, oldDoc, newDoc, false, false, NULL, &args)
+ .status_with_transitional_ignore();
wuow.commit();
}
}
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 101aa23f552..57103f31874 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -66,7 +66,7 @@ public:
_database = _context.db();
_collection = _database->getCollection(&_opCtx, ns());
if (_collection) {
- _database->dropCollection(&_opCtx, ns());
+ _database->dropCollection(&_opCtx, ns()).transitional_ignore();
}
_collection = _database->createCollection(&_opCtx, ns());
wunit.commit();
@@ -109,9 +109,9 @@ protected:
oid.init();
b.appendOID("_id", &oid);
b.appendElements(o);
- _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false);
+ _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false).transitional_ignore();
} else {
- _collection->insertDocument(&_opCtx, o, nullOpDebug, false);
+ _collection->insertDocument(&_opCtx, o, nullOpDebug, false).transitional_ignore();
}
wunit.commit();
}
@@ -193,7 +193,7 @@ public:
Database* db = ctx.db();
if (db->getCollection(&_opCtx, ns())) {
_collection = NULL;
- db->dropCollection(&_opCtx, ns());
+ db->dropCollection(&_opCtx, ns()).transitional_ignore();
}
_collection = db->createCollection(&_opCtx, ns(), CollectionOptions(), false);
wunit.commit();
diff --git a/src/mongo/dbtests/replica_set_tests.cpp b/src/mongo/dbtests/replica_set_tests.cpp
index 91fa21f8800..e70a221fa57 100644
--- a/src/mongo/dbtests/replica_set_tests.cpp
+++ b/src/mongo/dbtests/replica_set_tests.cpp
@@ -93,14 +93,16 @@ TEST_F(ReplicaSetTest, ReplCoordExternalStateStoresLastVoteWithNewTerm) {
auto opCtx = makeOpCtx();
auto replCoordExternalState = getReplCoordExternalState();
- replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1})
+ .transitional_ignore();
auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
ASSERT_EQ(lastVote.getValue().getTerm(), 2);
ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1);
- replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{3, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{3, 1})
+ .transitional_ignore();
lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
@@ -112,14 +114,16 @@ TEST_F(ReplicaSetTest, ReplCoordExternalStateDoesNotStoreLastVoteWithOldTerm) {
auto opCtx = makeOpCtx();
auto replCoordExternalState = getReplCoordExternalState();
- replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1})
+ .transitional_ignore();
auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
ASSERT_EQ(lastVote.getValue().getTerm(), 2);
ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1);
- replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{1, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{1, 1})
+ .transitional_ignore();
lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
@@ -131,14 +135,16 @@ TEST_F(ReplicaSetTest, ReplCoordExternalStateDoesNotStoreLastVoteWithEqualTerm)
auto opCtx = makeOpCtx();
auto replCoordExternalState = getReplCoordExternalState();
- replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 1})
+ .transitional_ignore();
auto lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
ASSERT_EQ(lastVote.getValue().getTerm(), 2);
ASSERT_EQ(lastVote.getValue().getCandidateIndex(), 1);
- replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 2});
+ replCoordExternalState->storeLocalLastVoteDocument(opCtx.get(), repl::LastVote{2, 2})
+ .transitional_ignore();
lastVote = replCoordExternalState->loadLocalLastVoteDocument(opCtx.get());
ASSERT_OK(lastVote.getStatus());
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index d967bfdb9ff..9cb4b18af93 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -235,7 +235,7 @@ protected:
OpDebug* const nullOpDebug = nullptr;
if (o.hasField("_id")) {
repl::UnreplicatedWritesBlock uwb(&_opCtx);
- coll->insertDocument(&_opCtx, o, nullOpDebug, true);
+ coll->insertDocument(&_opCtx, o, nullOpDebug, true).transitional_ignore();
wunit.commit();
return;
}
@@ -246,7 +246,7 @@ protected:
b.appendOID("_id", &id);
b.appendElements(o);
repl::UnreplicatedWritesBlock uwb(&_opCtx);
- coll->insertDocument(&_opCtx, b.obj(), nullOpDebug, true);
+ coll->insertDocument(&_opCtx, b.obj(), nullOpDebug, true).transitional_ignore();
wunit.commit();
}
static BSONObj wid(const char* json) {
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index 3bd85e9925f..546b3d7a007 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -574,7 +574,8 @@ public:
<< "background"
<< false
<< "partialFilterExpression"
- << BSON("a" << BSON("$eq" << 2)))),
+ << BSON("a" << BSON("$eq" << 2))))
+ .transitional_ignore(),
UserException);
// Create a partial geo index that does not index the document.
diff --git a/src/mongo/executor/connection_pool_asio_integration_test.cpp b/src/mongo/executor/connection_pool_asio_integration_test.cpp
index 6c60cd78ecd..da245a03fd4 100644
--- a/src/mongo/executor/connection_pool_asio_integration_test.cpp
+++ b/src/mongo/executor/connection_pool_asio_integration_test.cpp
@@ -108,9 +108,10 @@ TEST(ConnectionPoolASIO, TestPing) {
RemoteCommandRequest request{
fixture.getServers()[0], "admin", BSON("ping" << 1), BSONObj(), nullptr};
net.startCommand(
- makeCallbackHandle(), request, [&deferred](RemoteCommandResponse resp) {
- deferred.emplace(std::move(resp));
- });
+ makeCallbackHandle(),
+ request,
+ [&deferred](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); })
+ .transitional_ignore();
ASSERT_OK(deferred.get().status);
});
@@ -143,9 +144,10 @@ TEST(ConnectionPoolASIO, TestHostTimeoutRace) {
Deferred<RemoteCommandResponse> deferred;
RemoteCommandRequest request{
fixture.getServers()[0], "admin", BSON("ping" << 1), BSONObj(), nullptr};
- net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) {
- deferred.emplace(std::move(resp));
- });
+ net.startCommand(makeCallbackHandle(),
+ request,
+ [&](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); })
+ .transitional_ignore();
ASSERT_OK(deferred.get().status);
sleepmillis(1);
@@ -171,9 +173,10 @@ TEST(ConnectionPoolASIO, ConnSetupTimeout) {
Deferred<RemoteCommandResponse> deferred;
RemoteCommandRequest request{
fixture.getServers()[0], "admin", BSON("ping" << 1), BSONObj(), nullptr};
- net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) {
- deferred.emplace(std::move(resp));
- });
+ net.startCommand(makeCallbackHandle(),
+ request,
+ [&](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); })
+ .transitional_ignore();
ASSERT_EQ(deferred.get().status.code(), ErrorCodes::ExceededTimeLimit);
}
@@ -206,9 +209,10 @@ TEST(ConnectionPoolASIO, ConnRefreshHappens) {
nullptr};
for (auto& deferred : deferreds) {
- net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) {
- deferred.emplace(std::move(resp));
- });
+ net.startCommand(makeCallbackHandle(),
+ request,
+ [&](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); })
+ .transitional_ignore();
}
for (auto& deferred : deferreds) {
@@ -243,9 +247,10 @@ TEST(ConnectionPoolASIO, ConnRefreshSurvivesFailure) {
RemoteCommandRequest request{
fixture.getServers()[0], "admin", BSON("ping" << 1), BSONObj(), nullptr};
- net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) {
- deferred.emplace(std::move(resp));
- });
+ net.startCommand(makeCallbackHandle(),
+ request,
+ [&](RemoteCommandResponse resp) { deferred.emplace(std::move(resp)); })
+ .transitional_ignore();
deferred.get();
@@ -301,11 +306,14 @@ TEST(ConnectionPoolASIO, ConnSetupSurvivesFailure) {
<< 3),
BSONObj(),
nullptr};
- net.startCommand(makeCallbackHandle(), request, [&](RemoteCommandResponse resp) {
- if (!unfinished.subtractAndFetch(1)) {
- condvar.notify_one();
- }
- });
+ net.startCommand(makeCallbackHandle(),
+ request,
+ [&](RemoteCommandResponse resp) {
+ if (!unfinished.subtractAndFetch(1)) {
+ condvar.notify_one();
+ }
+ })
+ .transitional_ignore();
unstarted.subtractAndFetch(1);
}
});
@@ -313,7 +321,7 @@ TEST(ConnectionPoolASIO, ConnSetupSurvivesFailure) {
stdx::thread timerThrasher([&] {
while (unstarted.load()) {
- net.setAlarm(Date_t::now() + Seconds(1), [] {});
+ net.setAlarm(Date_t::now() + Seconds(1), [] {}).transitional_ignore();
}
});
diff --git a/src/mongo/executor/network_interface_asio_integration_fixture.cpp b/src/mongo/executor/network_interface_asio_integration_fixture.cpp
index 76f0e451a71..718904b3acb 100644
--- a/src/mongo/executor/network_interface_asio_integration_fixture.cpp
+++ b/src/mongo/executor/network_interface_asio_integration_fixture.cpp
@@ -87,15 +87,18 @@ void NetworkInterfaceASIOIntegrationFixture::startCommand(
const TaskExecutor::CallbackHandle& cbHandle,
RemoteCommandRequest& request,
StartCommandCB onFinish) {
- net().startCommand(cbHandle, request, onFinish);
+ net().startCommand(cbHandle, request, onFinish).transitional_ignore();
}
Deferred<RemoteCommandResponse> NetworkInterfaceASIOIntegrationFixture::runCommand(
const TaskExecutor::CallbackHandle& cbHandle, RemoteCommandRequest& request) {
Deferred<RemoteCommandResponse> deferred;
- net().startCommand(cbHandle, request, [deferred](RemoteCommandResponse resp) mutable {
- deferred.emplace(std::move(resp));
- });
+ net()
+ .startCommand(
+ cbHandle,
+ request,
+ [deferred](RemoteCommandResponse resp) mutable { deferred.emplace(std::move(resp)); })
+ .transitional_ignore();
return deferred;
}
diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp
index bf45981490b..32dd7f49e3d 100644
--- a/src/mongo/executor/network_interface_mock.cpp
+++ b/src/mongo/executor/network_interface_mock.cpp
@@ -308,7 +308,8 @@ void NetworkInterfaceMock::scheduleResponse(NetworkOperationIterator noi,
// If no RemoteCommandResponse was returned (for example, on a simulated network error), then
// do not attempt to run the metadata hook, since there is no returned metadata.
if (_metadataHook && response.isOK()) {
- _metadataHook->readReplyMetadata(noi->getRequest().target.toString(), response.metadata);
+ _metadataHook->readReplyMetadata(noi->getRequest().target.toString(), response.metadata)
+ .transitional_ignore();
}
noi->setResponse(when, response);
diff --git a/src/mongo/executor/network_interface_perf_test.cpp b/src/mongo/executor/network_interface_perf_test.cpp
index 0ec9966dd56..1159844a06b 100644
--- a/src/mongo/executor/network_interface_perf_test.cpp
+++ b/src/mongo/executor/network_interface_perf_test.cpp
@@ -87,7 +87,7 @@ int timeNetworkTestMillis(std::size_t operations, NetworkInterface* net) {
func = [&]() {
RemoteCommandRequest request{
server, "admin", bsonObjPing, BSONObj(), nullptr, Milliseconds(-1)};
- net->startCommand(makeCallbackHandle(), request, callback);
+ net->startCommand(makeCallbackHandle(), request, callback).transitional_ignore();
};
func();
diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp
index f556b2aab35..1e26b02f2a9 100644
--- a/src/mongo/executor/network_interface_thread_pool.cpp
+++ b/src/mongo/executor/network_interface_thread_pool.cpp
@@ -132,11 +132,13 @@ void NetworkInterfaceThreadPool::consumeTasks(stdx::unique_lock<stdx::mutex> lk)
if (!_registeredAlarm) {
_registeredAlarm = true;
lk.unlock();
- _net->setAlarm(_net->now(), [this] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- _registeredAlarm = false;
- consumeTasks(std::move(lk));
- });
+ _net->setAlarm(_net->now(),
+ [this] {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ _registeredAlarm = false;
+ consumeTasks(std::move(lk));
+ })
+ .transitional_ignore();
}
return;
diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp
index 94984a32381..7532dc09569 100644
--- a/src/mongo/executor/task_executor_test_common.cpp
+++ b/src/mongo/executor/task_executor_test_common.cpp
@@ -252,8 +252,10 @@ EventChainAndWaitingTest::~EventChainAndWaitingTest() {
}
void EventChainAndWaitingTest::run() {
- executor->onEvent(goEvent,
- stdx::bind(&EventChainAndWaitingTest::onGo, this, stdx::placeholders::_1));
+ executor
+ ->onEvent(goEvent,
+ stdx::bind(&EventChainAndWaitingTest::onGo, this, stdx::placeholders::_1))
+ .status_with_transitional_ignore();
executor->signalEvent(goEvent);
executor->waitForEvent(goEvent);
executor->waitForEvent(event2);
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index dbcf465f8d8..f5a33b8e451 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -318,17 +318,20 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt(
return cbHandle;
}
lk.unlock();
- _net->setAlarm(when, [this, when, cbHandle] {
- auto cbState = checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle.getValue()));
- if (cbState->canceled.load()) {
- return;
- }
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (cbState->canceled.load()) {
- return;
- }
- scheduleIntoPool_inlock(&_sleepersQueue, cbState->iter, std::move(lk));
- });
+ _net->setAlarm(when,
+ [this, when, cbHandle] {
+ auto cbState =
+ checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle.getValue()));
+ if (cbState->canceled.load()) {
+ return;
+ }
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ if (cbState->canceled.load()) {
+ return;
+ }
+ scheduleIntoPool_inlock(&_sleepersQueue, cbState->iter, std::move(lk));
+ })
+ .transitional_ignore();
return cbHandle;
}
@@ -384,22 +387,24 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
LOG(3) << "Scheduling remote command request: " << redact(scheduledRequest.toString());
lk.unlock();
_net->startCommand(
- cbHandle.getValue(),
- scheduledRequest,
- [this, scheduledRequest, cbState, cb](const ResponseStatus& response) {
- using std::swap;
- CallbackFn newCb = [cb, scheduledRequest, response](const CallbackArgs& cbData) {
- remoteCommandFinished(cbData, cb, scheduledRequest, response);
- };
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (_inShutdown_inlock()) {
- return;
- }
- LOG(3) << "Received remote response: "
- << redact(response.isOK() ? response.toString() : response.status.toString());
- swap(cbState->callback, newCb);
- scheduleIntoPool_inlock(&_networkInProgressQueue, cbState->iter, std::move(lk));
- });
+ cbHandle.getValue(),
+ scheduledRequest,
+ [this, scheduledRequest, cbState, cb](const ResponseStatus& response) {
+ using std::swap;
+ CallbackFn newCb = [cb, scheduledRequest, response](const CallbackArgs& cbData) {
+ remoteCommandFinished(cbData, cb, scheduledRequest, response);
+ };
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ if (_inShutdown_inlock()) {
+ return;
+ }
+ LOG(3) << "Received remote response: "
+ << redact(response.isOK() ? response.toString()
+ : response.status.toString());
+ swap(cbState->callback, newCb);
+ scheduleIntoPool_inlock(&_networkInProgressQueue, cbState->iter, std::move(lk));
+ })
+ .transitional_ignore();
return cbHandle;
}
diff --git a/src/mongo/logger/log_test.cpp b/src/mongo/logger/log_test.cpp
index 5be29f5d74b..122820f0fef 100644
--- a/src/mongo/logger/log_test.cpp
+++ b/src/mongo/logger/log_test.cpp
@@ -91,17 +91,20 @@ TEST_F(LogTestUnadornedEncoder, DetachAppender) {
MessageLogDomain domain;
// Appending to the domain before attaching the appender does not affect the appender.
- domain.append(MessageEventEphemeral(Date_t(), LogSeverity::Log(), "", "1"));
+ domain.append(MessageEventEphemeral(Date_t(), LogSeverity::Log(), "", "1"))
+ .transitional_ignore();
ASSERT_EQUALS(0, dynamic_cast<CountAppender*>(countAppender.get())->getCount());
// Appending to the domain after attaching the appender does affect the appender.
MessageLogDomain::AppenderHandle handle = domain.attachAppender(std::move(countAppender));
- domain.append(MessageEventEphemeral(Date_t(), LogSeverity::Log(), "", "2"));
+ domain.append(MessageEventEphemeral(Date_t(), LogSeverity::Log(), "", "2"))
+ .transitional_ignore();
countAppender = domain.detachAppender(handle);
ASSERT_EQUALS(1, dynamic_cast<CountAppender*>(countAppender.get())->getCount());
// Appending to the domain after detaching the appender does not affect the appender.
- domain.append(MessageEventEphemeral(Date_t(), LogSeverity::Log(), "", "3"));
+ domain.append(MessageEventEphemeral(Date_t(), LogSeverity::Log(), "", "3"))
+ .transitional_ignore();
ASSERT_EQUALS(1, dynamic_cast<CountAppender*>(countAppender.get())->getCount());
}
diff --git a/src/mongo/logger/logstream_builder.cpp b/src/mongo/logger/logstream_builder.cpp
index 2378558bd4a..f0dfbaeebe9 100644
--- a/src/mongo/logger/logstream_builder.cpp
+++ b/src/mongo/logger/logstream_builder.cpp
@@ -104,7 +104,7 @@ LogstreamBuilder::~LogstreamBuilder() {
MessageEventEphemeral message(
Date_t::now(), _severity, _component, _contextName, _baseMessage);
message.setIsTruncatable(_isTruncatable);
- _domain->append(message);
+ _domain->append(message).transitional_ignore();
if (_tee) {
_os->str("");
logger::MessageEventDetailsEncoder teeEncoder;
diff --git a/src/mongo/platform/compiler.h b/src/mongo/platform/compiler.h
index 59d7de5dbfb..af3e0b20dc3 100644
--- a/src/mongo/platform/compiler.h
+++ b/src/mongo/platform/compiler.h
@@ -143,8 +143,16 @@
* adversarial inputs, but for some reason the compiler cannot figure this out on its own, for
* example after a call to a function that never returns but cannot be labeled with
* MONGO_COMPILER_NORETURN. In almost all cases MONGO_UNREACHABLE is preferred.
+ *
+ *
+ * MONGO_WARN_UNUSED_RESULT_CLASS
+ *
+ * Tells the compiler that a class defines a type for which checking results is necessary. Types
+ * thus defined turn functions returning them into "must check results" style functions. Preview
+ * of the `[[nodiscard]]` C++17 attribute.
*/
+
#if defined(_MSC_VER)
#include "mongo/platform/compiler_msvc.h"
#elif defined(__GNUC__)
diff --git a/src/mongo/platform/compiler_gcc.h b/src/mongo/platform/compiler_gcc.h
index cf3b239db35..cf5c6960a86 100644
--- a/src/mongo/platform/compiler_gcc.h
+++ b/src/mongo/platform/compiler_gcc.h
@@ -42,9 +42,12 @@
// worth it.
#define MONGO_COMPILER_COLD_FUNCTION
#define MONGO_COMPILER_NORETURN __attribute__((__noreturn__))
+// MONGO_WARN_UNUSED_RESULT is only supported in the semantics we want for classes in Clang, not in GCC.
+#define MONGO_WARN_UNUSED_RESULT_CLASS
#else
#define MONGO_COMPILER_COLD_FUNCTION __attribute__((__cold__))
#define MONGO_COMPILER_NORETURN __attribute__((__noreturn__, __cold__))
+#define MONGO_WARN_UNUSED_RESULT_CLASS
#endif
#define MONGO_COMPILER_VARIABLE_UNUSED __attribute__((__unused__))
diff --git a/src/mongo/platform/compiler_msvc.h b/src/mongo/platform/compiler_msvc.h
index c47372e04bd..e9e2bfa86ef 100644
--- a/src/mongo/platform/compiler_msvc.h
+++ b/src/mongo/platform/compiler_msvc.h
@@ -50,6 +50,8 @@
#define MONGO_COMPILER_API_EXPORT __declspec(dllexport)
#define MONGO_COMPILER_API_IMPORT __declspec(dllimport)
+#define MONGO_WARN_UNUSED_RESULT_CLASS
+
#ifdef _M_IX86
// 32-bit x86 supports multiple of calling conventions. We build supporting the cdecl convention
// (most common). By labeling our exported and imported functions as such, we do a small favor to
diff --git a/src/mongo/rpc/legacy_reply_builder.cpp b/src/mongo/rpc/legacy_reply_builder.cpp
index 5f4d3cfe3d1..5fb373d12c8 100644
--- a/src/mongo/rpc/legacy_reply_builder.cpp
+++ b/src/mongo/rpc/legacy_reply_builder.cpp
@@ -100,7 +100,7 @@ LegacyReplyBuilder& LegacyReplyBuilder::setMetadata(const BSONObj& metadata) {
// because we already have skipped some bytes for the message header.
BSONObjBuilder resumedBuilder(
BSONObjBuilder::ResumeBuildingTag(), _builder, sizeof(QueryResult::Value));
- shardingMetadata.getValue().writeToMetadata(&resumedBuilder);
+ shardingMetadata.getValue().writeToMetadata(&resumedBuilder).transitional_ignore();
}
_state = State::kOutputDocs;
return *this;
diff --git a/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp b/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp
index cc08bf832fb..06784a65ee1 100644
--- a/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp
@@ -47,7 +47,7 @@ TEST(ReplResponseMetadataTest, OplogQueryMetadataRoundtrip) {
ASSERT_EQ(opTime2, metadata.getLastOpApplied());
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
BSONObj expectedObj(BSON(kOplogQueryMetadataFieldName << BSON(
"lastOpCommitted"
@@ -75,7 +75,7 @@ TEST(ReplResponseMetadataTest, OplogQueryMetadataRoundtrip) {
ASSERT_EQ(metadata.getSyncSourceIndex(), clonedMetadata.getSyncSourceIndex());
BSONObjBuilder clonedBuilder;
- clonedMetadata.writeToMetadata(&clonedBuilder);
+ clonedMetadata.writeToMetadata(&clonedBuilder).transitional_ignore();
BSONObj clonedSerializedObj = clonedBuilder.obj();
ASSERT_BSONOBJ_EQ(expectedObj, clonedSerializedObj);
diff --git a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
index 6d5b7be1a2f..b6285e63342 100644
--- a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
@@ -52,7 +52,7 @@ TEST(ReplResponseMetadataTest, Roundtrip) {
ASSERT_TRUE(metadata.hasReplicaSetId());
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
BSONObj expectedObj(
BSON(kReplSetMetadataFieldName
@@ -82,7 +82,7 @@ TEST(ReplResponseMetadataTest, Roundtrip) {
ASSERT_EQ(metadata.getReplicaSetId(), clonedMetadata.getReplicaSetId());
BSONObjBuilder clonedBuilder;
- clonedMetadata.writeToMetadata(&clonedBuilder);
+ clonedMetadata.writeToMetadata(&clonedBuilder).transitional_ignore();
BSONObj clonedSerializedObj = clonedBuilder.obj();
ASSERT_BSONOBJ_EQ(expectedObj, clonedSerializedObj);
diff --git a/src/mongo/rpc/metadata/sharding_metadata.cpp b/src/mongo/rpc/metadata/sharding_metadata.cpp
index b1c4b47b8e5..384c75f2679 100644
--- a/src/mongo/rpc/metadata/sharding_metadata.cpp
+++ b/src/mongo/rpc/metadata/sharding_metadata.cpp
@@ -116,7 +116,7 @@ Status ShardingMetadata::downconvert(const BSONObj& commandReply,
// We can reuse the same logic to write the sharding metadata out to the legacy
// command as the element has the same format whether it is there or on the metadata
// object.
- swShardingMetadata.getValue().writeToMetadata(legacyCommandReplyBob);
+ swShardingMetadata.getValue().writeToMetadata(legacyCommandReplyBob).transitional_ignore();
} else if (swShardingMetadata.getStatus() == ErrorCodes::NoSuchKey) {
// It is valid to not have a $gleStats field.
} else {
@@ -132,7 +132,7 @@ Status ShardingMetadata::upconvert(const BSONObj& legacyCommand,
// as it has the same format whether it is there or on the metadata object.
auto swShardingMetadata = readFromMetadata(legacyCommand);
if (swShardingMetadata.isOK()) {
- swShardingMetadata.getValue().writeToMetadata(metadataBob);
+ swShardingMetadata.getValue().writeToMetadata(metadataBob).transitional_ignore();
// Write out the command excluding the $gleStats subobject.
for (const auto& elem : legacyCommand) {
diff --git a/src/mongo/rpc/object_check_test.cpp b/src/mongo/rpc/object_check_test.cpp
index 00092ec009d..e36b396b10c 100644
--- a/src/mongo/rpc/object_check_test.cpp
+++ b/src/mongo/rpc/object_check_test.cpp
@@ -68,7 +68,7 @@ TEST(DataTypeValidated, BSONValidationEnabled) {
// mess up the data
DataRangeCursor drc(begin(buf), end(buf));
// skip past size so we don't trip any sanity checks.
- drc.advance(4); // skip size
+ drc.advance(4).transitional_ignore(); // skip size
while (drc.writeAndAdvance(0xFF).isOK())
;
}
diff --git a/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp
index 9c2257f85d6..cf7e44ba9c1 100644
--- a/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_add_shard_to_zone_test.cpp
@@ -48,7 +48,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToExistingShardShouldSucceed) {
shard.setName("a");
shard.setHost("a:1234");
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
ASSERT_OK(catalogManager()->addShardToZone(operationContext(), shard.getName(), "z"));
auto shardDocStatus = getShardDoc(operationContext(), shard.getName());
@@ -66,7 +66,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithSameTagShouldSucceed) {
shard.setHost("a:1234");
shard.setTags({"x", "y"});
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
ASSERT_OK(catalogManager()->addShardToZone(operationContext(), shard.getName(), "x"));
@@ -86,7 +86,7 @@ TEST_F(AddShardToZoneTest, AddZoneToShardWithNewTagShouldAppend) {
shard.setHost("a:1234");
shard.setTags({"x"});
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
ASSERT_OK(catalogManager()->addShardToZone(operationContext(), shard.getName(), "y"));
@@ -105,7 +105,7 @@ TEST_F(AddShardToZoneTest, AddSingleZoneToNonExistingShardShouldFail) {
shard.setName("a");
shard.setHost("a:1234");
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
auto status = catalogManager()->addShardToZone(operationContext(), "b", "z");
ASSERT_EQ(ErrorCodes::ShardNotFound, status);
diff --git a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
index 104a97a3f6a..eff3e270fc4 100644
--- a/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_assign_key_range_to_zone_test.cpp
@@ -59,7 +59,7 @@ public:
shard.setHost("a:1234");
shard.setTags({zoneName()});
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
CollectionType shardedCollection;
shardedCollection.setNs(shardedNS());
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 27e01623f6b..0c5f16812d5 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -575,7 +575,8 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* opCtx,
"shardCollection.start",
ns,
collectionDetail.obj(),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
}
const NamespaceString nss(ns);
@@ -652,7 +653,8 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* opCtx,
"shardCollection.end",
ns,
BSON("version" << collVersion.toString()),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
return Status::OK();
}
@@ -715,7 +717,8 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
"removeShard.start",
"",
BSON("shard" << name),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
return ShardDrainingStatus::STARTED;
}
@@ -765,7 +768,8 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
"removeShard",
"",
BSON("shard" << name),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
return ShardDrainingStatus::COMPLETED;
}
@@ -921,7 +925,8 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* opCtx,
"dropCollection.start",
ns.ns(),
BSONObj(),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
auto shardsStatus = getAllShards(opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
if (!shardsStatus.isOK()) {
@@ -1099,7 +1104,8 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* opCtx,
"dropCollection",
ns.ns(),
BSONObj(),
- ShardingCatalogClientImpl::kMajorityWriteConcern);
+ ShardingCatalogClientImpl::kMajorityWriteConcern)
+ .transitional_ignore();
return Status::OK();
}
diff --git a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
index 0413d18d030..81d07a8481c 100644
--- a/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_commit_chunk_migration_test.cpp
@@ -55,7 +55,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -80,7 +80,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandWithCtl) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0, chunk1});
+ setupChunks({chunk0, chunk1}).transitional_ignore();
// use crefs to verify it will take consts:
ChunkType const& chunk0cref = chunk0;
@@ -129,7 +129,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 15;
auto const origVersion = ChunkVersion(origMajorVersion, 4, OID::gen());
@@ -145,7 +145,7 @@ TEST_F(CommitChunkMigrate, CheckCorrectOpsCommandNoCtl) {
auto chunkMax = BSON("a" << 10);
chunk0.setMax(chunkMax);
- setupChunks({chunk0});
+ setupChunks({chunk0}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
@@ -185,7 +185,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -210,7 +210,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0, chunk1});
+ setupChunks({chunk0, chunk1}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
@@ -236,7 +236,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -263,7 +263,7 @@ TEST_F(CommitChunkMigrate, RejectWrongCollectionEpoch1) {
chunk1.setMax(chunkMaxax);
// get version from the control chunk this time
- setupChunks({chunk1, chunk0});
+ setupChunks({chunk1, chunk0}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
@@ -289,7 +289,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -314,7 +314,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing0) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk1});
+ setupChunks({chunk1}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
@@ -340,7 +340,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
shard1.setName("shard1");
shard1.setHost("shard1:12");
- setupShards({shard0, shard1});
+ setupShards({shard0, shard1}).transitional_ignore();
int origMajorVersion = 12;
auto const origVersion = ChunkVersion(origMajorVersion, 7, OID::gen());
@@ -365,7 +365,7 @@ TEST_F(CommitChunkMigrate, RejectChunkMissing1) {
auto chunkMaxax = BSON("a" << 20);
chunk1.setMax(chunkMaxax);
- setupChunks({chunk0});
+ setupChunks({chunk0}).transitional_ignore();
StatusWith<BSONObj> resultBSON =
catalogManager()->commitChunkMigration(operationContext(),
diff --git a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
index c43cfe3a8aa..5c6d84f5abc 100644
--- a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
@@ -346,8 +346,10 @@ TEST_F(ConfigInitializationTest, BuildsNecessaryIndexes) {
}
TEST_F(ConfigInitializationTest, CompatibleIndexAlreadyExists) {
- getConfigShard()->createIndexOnConfig(
- operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), true);
+ getConfigShard()
+ ->createIndexOnConfig(
+ operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), true)
+ .transitional_ignore();
ASSERT_OK(catalogManager()->initializeConfigDatabaseIfNeeded(operationContext()));
@@ -370,8 +372,10 @@ TEST_F(ConfigInitializationTest, CompatibleIndexAlreadyExists) {
TEST_F(ConfigInitializationTest, IncompatibleIndexAlreadyExists) {
// Make the index non-unique even though its supposed to be unique, make sure initialization
// fails
- getConfigShard()->createIndexOnConfig(
- operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), false);
+ getConfigShard()
+ ->createIndexOnConfig(
+ operationContext(), NamespaceString(ShardType::ConfigNS), BSON("host" << 1), false)
+ .transitional_ignore();
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
catalogManager()->initializeConfigDatabaseIfNeeded(operationContext()));
diff --git a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
index 7ddff536bb4..1523f00825a 100644
--- a/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_log_change_test.cpp
@@ -73,8 +73,9 @@ public:
protected:
void noRetryAfterSuccessfulCreate() {
- auto future = launchAsync(
- [this] { log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)); });
+ auto future = launchAsync([this] {
+ log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)).transitional_ignore();
+ });
expectConfigCollectionCreate(configHost, getConfigCollName(), _cappedSize, BSON("ok" << 1));
expectConfigCollectionInsert(configHost,
@@ -88,8 +89,10 @@ protected:
future.timed_get(kFutureTimeout);
// Now log another change and confirm that we don't re-attempt to create the collection
- future = launchAsync(
- [this] { log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5)); });
+ future = launchAsync([this] {
+ log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5))
+ .transitional_ignore();
+ });
expectConfigCollectionInsert(configHost,
getConfigCollName(),
@@ -103,8 +106,9 @@ protected:
}
void noRetryCreateIfAlreadyExists() {
- auto future = launchAsync(
- [this] { log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)); });
+ auto future = launchAsync([this] {
+ log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)).transitional_ignore();
+ });
BSONObjBuilder createResponseBuilder;
Command::appendCommandStatus(createResponseBuilder,
@@ -122,8 +126,10 @@ protected:
future.timed_get(kFutureTimeout);
// Now log another change and confirm that we don't re-attempt to create the collection
- future = launchAsync(
- [this] { log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5)); });
+ future = launchAsync([this] {
+ log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5))
+ .transitional_ignore();
+ });
expectConfigCollectionInsert(configHost,
getConfigCollName(),
@@ -137,8 +143,9 @@ protected:
}
void createFailure() {
- auto future = launchAsync(
- [this] { log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)); });
+ auto future = launchAsync([this] {
+ log("moved a chunk", "foo.bar", BSON("min" << 3 << "max" << 4)).transitional_ignore();
+ });
BSONObjBuilder createResponseBuilder;
Command::appendCommandStatus(createResponseBuilder,
@@ -150,8 +157,10 @@ protected:
future.timed_get(kFutureTimeout);
// Now log another change and confirm that we *do* attempt to create the collection
- future = launchAsync(
- [this] { log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5)); });
+ future = launchAsync([this] {
+ log("moved a second chunk", "foo.bar", BSON("min" << 4 << "max" << 5))
+ .transitional_ignore();
+ });
expectConfigCollectionCreate(configHost, getConfigCollName(), _cappedSize, BSON("ok" << 1));
expectConfigCollectionInsert(configHost,
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
index 76479cbd5c3..58391f3173e 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
@@ -394,8 +394,10 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* opCtx,
appendShortVersion(&logDetail.subobjStart("left"), newChunks[0]);
appendShortVersion(&logDetail.subobjStart("right"), newChunks[1]);
- Grid::get(opCtx)->catalogClient(opCtx)->logChange(
- opCtx, "split", ns.ns(), logDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->logChange(opCtx, "split", ns.ns(), logDetail.obj(), WriteConcernOptions())
+ .transitional_ignore();
} else {
BSONObj beforeDetailObj = logDetail.obj();
BSONObj firstDetailObj = beforeDetailObj.getOwned();
@@ -408,8 +410,10 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* opCtx,
chunkDetail.append("of", newChunksSize);
appendShortVersion(&chunkDetail.subobjStart("chunk"), newChunks[i]);
- Grid::get(opCtx)->catalogClient(opCtx)->logChange(
- opCtx, "multi-split", ns.ns(), chunkDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->logChange(opCtx, "multi-split", ns.ns(), chunkDetail.obj(), WriteConcernOptions())
+ .transitional_ignore();
}
}
@@ -517,8 +521,10 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* opCtx,
collVersion.addToBSON(logDetail, "prevShardVersion");
mergeVersion.addToBSON(logDetail, "mergedVersion");
- Grid::get(opCtx)->catalogClient(opCtx)->logChange(
- opCtx, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->logChange(opCtx, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions())
+ .transitional_ignore();
return applyOpsStatus;
}
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
index 9d766d11f46..12ffa5c278d 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
@@ -684,8 +684,11 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
shardDetails.append("name", shardType.getName());
shardDetails.append("host", shardConnectionString.toString());
- Grid::get(opCtx)->catalogClient(opCtx)->logChange(
- opCtx, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern);
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->logChange(
+ opCtx, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
// Ensure the added shard is visible to this process.
auto shardRegistry = Grid::get(opCtx)->shardRegistry();
diff --git a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
index cdb71cfa7cf..d6c2878ecdf 100644
--- a/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_merge_chunks_test.cpp
@@ -63,7 +63,7 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2});
+ setupChunks({chunk, chunk2}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -126,7 +126,7 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkBound2, chunkMax};
- setupChunks({chunk, chunk2, chunk3});
+ setupChunks({chunk, chunk2, chunk3}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -193,7 +193,7 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
otherChunk.setMin(BSON("a" << 10));
otherChunk.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2, otherChunk});
+ setupChunks({chunk, chunk2, otherChunk}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -256,7 +256,7 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
otherChunk.setMin(BSON("a" << 10));
otherChunk.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2, otherChunk});
+ setupChunks({chunk, chunk2, otherChunk}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -318,7 +318,7 @@ TEST_F(MergeChunkTest, NonExistingNamespace) {
// Record chunk boundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2});
+ setupChunks({chunk, chunk2}).transitional_ignore();
auto mergeStatus = catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.NonExistingColl"),
@@ -351,7 +351,7 @@ TEST_F(MergeChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
// Record chunk baoundaries for passing into commitChunkMerge
std::vector<BSONObj> chunkBoundaries{chunkMin, chunkBound, chunkMax};
- setupChunks({chunk, chunk2});
+ setupChunks({chunk, chunk2}).transitional_ignore();
auto mergeStatus = catalogManager()->commitChunkMerge(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -390,7 +390,7 @@ TEST_F(MergeChunkTest, MergeAlreadyHappenedFailsPrecondition) {
mergedChunk.setVersion(mergedVersion);
mergedChunk.setMax(chunkMax);
- setupChunks({mergedChunk});
+ setupChunks({mergedChunk}).transitional_ignore();
ASSERT_EQ(ErrorCodes::BadValue,
catalogManager()->commitChunkMerge(operationContext(),
@@ -449,7 +449,7 @@ TEST_F(MergeChunkTest, ChunkBoundariesOutOfOrderFails) {
chunk.setVersion(version);
originalChunks.push_back(chunk);
- setupChunks(originalChunks);
+ setupChunks(originalChunks).transitional_ignore();
}
ASSERT_EQ(ErrorCodes::InvalidOptions,
diff --git a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
index fa89754211f..8b9b47a284c 100644
--- a/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_remove_shard_from_zone_test.cpp
@@ -49,7 +49,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneThatNoLongerExistsShouldNotError) {
shard.setName("a");
shard.setHost("a:1234");
- setupShards({shard});
+ setupShards({shard}).transitional_ignore();
ASSERT_OK(catalogManager()->removeShardFromZone(operationContext(), shard.getName(), "z"));
auto shardDocStatus = getShardDoc(operationContext(), shard.getName());
@@ -70,7 +70,7 @@ TEST_F(RemoveShardFromZoneTest, RemovingZoneThatIsOnlyReferencedByAnotherShardSh
shardB.setName("b");
shardB.setHost("b:1234");
- setupShards({shardA, shardB});
+ setupShards({shardA, shardB}).transitional_ignore();
ASSERT_OK(catalogManager()->removeShardFromZone(operationContext(), shardB.getName(), "z"));
@@ -102,7 +102,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldSucceedWhenNoChunks
shardB.setName("b");
shardB.setHost("b:1234");
- setupShards({shardA, shardB});
+ setupShards({shardA, shardB}).transitional_ignore();
// Insert a chunk range document referring to a different zone
TagsType tagDoc;
@@ -111,7 +111,8 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldSucceedWhenNoChunks
tagDoc.setMaxKey(BSON("x" << 10));
tagDoc.setTag("y");
insertToConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON());
+ operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON())
+ .transitional_ignore();
ASSERT_OK(catalogManager()->removeShardFromZone(operationContext(), shardA.getName(), "z"));
@@ -142,7 +143,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldFailWhenAChunkRefer
shardB.setName("b");
shardB.setHost("b:1234");
- setupShards({shardA, shardB});
+ setupShards({shardA, shardB}).transitional_ignore();
TagsType tagDoc;
tagDoc.setNS("test.foo");
@@ -150,7 +151,8 @@ TEST_F(RemoveShardFromZoneTest, RemoveLastZoneFromShardShouldFailWhenAChunkRefer
tagDoc.setMaxKey(BSON("x" << 10));
tagDoc.setTag("z");
insertToConfigCollection(
- operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON());
+ operationContext(), NamespaceString(TagsType::ConfigNS), tagDoc.toBSON())
+ .transitional_ignore();
auto status = catalogManager()->removeShardFromZone(operationContext(), shardA.getName(), "z");
ASSERT_EQ(ErrorCodes::ZoneStillInUse, status);
@@ -180,7 +182,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneShouldFailIfShardDoesntExist) {
shardA.setHost("a:1234");
shardA.setTags({"z"});
- setupShards({shardA});
+ setupShards({shardA}).transitional_ignore();
auto status = catalogManager()->removeShardFromZone(operationContext(), "b", "z");
ASSERT_EQ(ErrorCodes::ShardNotFound, status);
@@ -206,7 +208,7 @@ TEST_F(RemoveShardFromZoneTest, RemoveZoneFromShardShouldOnlyRemoveZoneOnSpecifi
shardB.setHost("b:1234");
shardB.setTags({"y", "z"});
- setupShards({shardA, shardB});
+ setupShards({shardA, shardB}).transitional_ignore();
ASSERT_OK(catalogManager()->removeShardFromZone(operationContext(), shardB.getName(), "z"));
diff --git a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
index 8373636edad..b3b8b02780b 100644
--- a/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_split_chunk_test.cpp
@@ -55,7 +55,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
auto chunkSplitPoint = BSON("a" << 5);
std::vector<BSONObj> splitPoints{chunkSplitPoint};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -104,7 +104,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
auto chunkSplitPoint2 = BSON("a" << 7);
std::vector<BSONObj> splitPoints{chunkSplitPoint, chunkSplitPoint2};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -174,7 +174,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
chunk2.setMin(BSON("a" << 10));
chunk2.setMax(BSON("a" << 20));
- setupChunks({chunk, chunk2});
+ setupChunks({chunk, chunk2}).transitional_ignore();
ASSERT_OK(catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -223,7 +223,7 @@ TEST_F(SplitChunkTest, PreConditionFailErrors) {
auto chunkSplitPoint = BSON("a" << 5);
splitPoints.push_back(chunkSplitPoint);
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -249,7 +249,7 @@ TEST_F(SplitChunkTest, NonExisingNamespaceErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.NonExistingColl"),
@@ -275,7 +275,7 @@ TEST_F(SplitChunkTest, NonMatchingEpochsOfChunkAndRequestErrors) {
std::vector<BSONObj> splitPoints{BSON("a" << 5)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -301,7 +301,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfOrderShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 4)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -327,7 +327,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMinShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 0), BSON("a" << 5)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
@@ -353,7 +353,7 @@ TEST_F(SplitChunkTest, SplitPointsOutOfRangeAtMaxShouldFail) {
std::vector<BSONObj> splitPoints{BSON("a" << 5), BSON("a" << 15)};
- setupChunks({chunk});
+ setupChunks({chunk}).transitional_ignore();
auto splitStatus = catalogManager()->commitChunkSplit(operationContext(),
NamespaceString("TestDB.TestColl"),
diff --git a/src/mongo/s/catalog/sharding_catalog_test.cpp b/src/mongo/s/catalog/sharding_catalog_test.cpp
index 34d33d0a4c9..23a790ff052 100644
--- a/src/mongo/s/catalog/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_test.cpp
@@ -121,7 +121,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return std::make_tuple(vector<BSONObj>{expectedColl.toBSON()}, builder.obj());
});
@@ -184,7 +184,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseExisting) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return std::make_tuple(vector<BSONObj>{expectedDb.toBSON()}, builder.obj());
});
@@ -402,7 +402,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return std::make_tuple(vector<BSONObj>{chunkA.toConfigBSON(), chunkB.toConfigBSON()},
builder.obj());
@@ -817,7 +817,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return std::make_tuple(vector<BSONObj>{coll1.toBSON(), coll2.toBSON(), coll3.toBSON()},
builder.obj());
@@ -2037,7 +2037,7 @@ TEST_F(ShardingCatalogClientTest, BasicReadAfterOpTime) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
@@ -2073,7 +2073,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeShouldNotGoBack) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
@@ -2102,7 +2102,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeShouldNotGoBack) {
ReplSetMetadata metadata(10, oldOpTime, oldOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
@@ -2127,7 +2127,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeShouldNotGoBack) {
ReplSetMetadata metadata(10, oldOpTime, oldOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
@@ -2153,7 +2153,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeFindThenCmd) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
DatabaseType dbType;
dbType.setName("TestDB");
@@ -2215,7 +2215,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeCmdThenFind) {
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ metadata.writeToMetadata(&builder).transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), builder.obj(), Milliseconds(1));
});
diff --git a/src/mongo/s/cluster_last_error_info_test.cpp b/src/mongo/s/cluster_last_error_info_test.cpp
index 49913c27b4e..b7a06a58ea1 100644
--- a/src/mongo/s/cluster_last_error_info_test.cpp
+++ b/src/mongo/s/cluster_last_error_info_test.cpp
@@ -75,15 +75,19 @@ TEST_F(ClusterGetLastErrorTest,
BSON("unusued"
<< "obj"),
operationContext());
- executor()->scheduleRemoteCommand(
- request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {});
+ executor()
+ ->scheduleRemoteCommand(
+ request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {})
+ .status_with_transitional_ignore();
});
// Make the reply contain ShardingMetadata.
repl::OpTime opTime{Timestamp{10, 10}, 10};
onCommandWithMetadata([&](const RemoteCommandRequest& request) {
BSONObjBuilder metadataBob;
- rpc::ShardingMetadata(opTime, OID() /* ignored OID field */).writeToMetadata(&metadataBob);
+ rpc::ShardingMetadata(opTime, OID() /* ignored OID field */)
+ .writeToMetadata(&metadataBob)
+ .transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), metadataBob.obj(), Milliseconds(1));
});
@@ -116,15 +120,19 @@ TEST_F(ClusterGetLastErrorTest, ClusterLastErrorInfoNotUpdatedIfNotInitialized)
BSON("unusued"
<< "obj"),
operationContext());
- executor()->scheduleRemoteCommand(
- request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {});
+ executor()
+ ->scheduleRemoteCommand(
+ request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {})
+ .status_with_transitional_ignore();
});
// Make the reply contain ShardingMetadata.
repl::OpTime opTime{Timestamp{10, 10}, 10};
onCommandWithMetadata([&](const RemoteCommandRequest& request) {
BSONObjBuilder metadataBob;
- rpc::ShardingMetadata(opTime, OID() /* ignored OID field */).writeToMetadata(&metadataBob);
+ rpc::ShardingMetadata(opTime, OID() /* ignored OID field */)
+ .writeToMetadata(&metadataBob)
+ .transitional_ignore();
return RemoteCommandResponse(BSON("ok" << 1), metadataBob.obj(), Milliseconds(1));
});
@@ -157,8 +165,10 @@ TEST_F(ClusterGetLastErrorTest, ClusterLastErrorInfoNotUpdatedIfReplyDoesntHaveS
BSON("unusued"
<< "obj"),
operationContext());
- executor()->scheduleRemoteCommand(
- request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {});
+ executor()
+ ->scheduleRemoteCommand(
+ request, [=](const executor::TaskExecutor::RemoteCommandCallbackArgs) -> void {})
+ .status_with_transitional_ignore();
});
// Do not return ShardingMetadata in the reply.
diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
index 3ac6de0af42..8f7d6bb81c8 100644
--- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
@@ -103,11 +103,13 @@ public:
uassertStatusOK(dbInfoStatus.getStatus());
- catalogClient->logChange(opCtx,
- "dropDatabase.start",
- dbname,
- BSONObj(),
- ShardingCatalogClient::kMajorityWriteConcern);
+ catalogClient
+ ->logChange(opCtx,
+ "dropDatabase.start",
+ dbname,
+ BSONObj(),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
auto& dbInfo = dbInfoStatus.getValue();
@@ -145,8 +147,13 @@ public:
// Invalidate the database so the next access will do a full reload
catalogCache->purgeDatabase(dbname);
- catalogClient->logChange(
- opCtx, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
+ catalogClient
+ ->logChange(opCtx,
+ "dropDatabase",
+ dbname,
+ BSONObj(),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
result.append("dropped", dbname);
return true;
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index 45d619c82df..2e0f977753f 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -167,12 +167,14 @@ public:
const auto shardedColls = getAllShardedCollectionsForDb(opCtx, dbname);
// Record start in changelog
- catalogClient->logChange(
- opCtx,
- "movePrimary.start",
- dbname,
- _buildMoveLogEntry(dbname, fromShard->toString(), toShard->toString(), shardedColls),
- ShardingCatalogClient::kMajorityWriteConcern);
+ catalogClient
+ ->logChange(opCtx,
+ "movePrimary.start",
+ dbname,
+ _buildMoveLogEntry(
+ dbname, fromShard->toString(), toShard->toString(), shardedColls),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
ScopedDbConnection toconn(toShard->getConnString());
@@ -292,12 +294,13 @@ public:
result << "primary" << toShard->toString();
// Record finish in changelog
- catalogClient->logChange(
- opCtx,
- "movePrimary",
- dbname,
- _buildMoveLogEntry(dbname, oldPrimary, toShard->toString(), shardedColls),
- ShardingCatalogClient::kMajorityWriteConcern);
+ catalogClient
+ ->logChange(opCtx,
+ "movePrimary",
+ dbname,
+ _buildMoveLogEntry(dbname, oldPrimary, toShard->toString(), shardedColls),
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .transitional_ignore();
return true;
}
diff --git a/src/mongo/s/mongos_options.cpp b/src/mongo/s/mongos_options.cpp
index a98cb444fc3..5163ca6c295 100644
--- a/src/mongo/s/mongos_options.cpp
+++ b/src/mongo/s/mongos_options.cpp
@@ -103,16 +103,16 @@ Status addMongosOptions(moe::OptionSection* options) {
.setSources(moe::SourceAllLegacy);
- options->addSection(general_options);
+ options->addSection(general_options).transitional_ignore();
#if defined(_WIN32)
- options->addSection(windows_scm_options);
+ options->addSection(windows_scm_options).transitional_ignore();
#endif
- options->addSection(sharding_options);
+ options->addSection(sharding_options).transitional_ignore();
#ifdef MONGO_CONFIG_SSL
- options->addSection(ssl_options);
+ options->addSection(ssl_options).transitional_ignore();
#endif
return Status::OK();
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index ced16197e98..037765fd5f9 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -525,9 +525,11 @@ void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* opCtx) {
executor::RemoteCommandRequest request(
remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, opCtx);
- _executor->scheduleRemoteCommand(
- request,
- stdx::bind(&AsyncResultsMerger::handleKillCursorsResponse, stdx::placeholders::_1));
+ _executor
+ ->scheduleRemoteCommand(request,
+ stdx::bind(&AsyncResultsMerger::handleKillCursorsResponse,
+ stdx::placeholders::_1))
+ .status_with_transitional_ignore();
}
}
}
diff --git a/src/mongo/s/query/cluster_cursor_manager_test.cpp b/src/mongo/s/query/cluster_cursor_manager_test.cpp
index dc2c5460c38..6fc0d25da20 100644
--- a/src/mongo/s/query/cluster_cursor_manager_test.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager_test.cpp
@@ -362,11 +362,13 @@ TEST_F(ClusterCursorManagerTest, KillCursorWrongCursorId) {
// Test that killing all mortal expired cursors correctly kills a mortal expired cursor.
TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceBasic) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
getManager()->killMortalCursorsInactiveSince(getClockSource()->now());
ASSERT(!isMockCursorKilled(0));
getManager()->reapZombieCursors();
@@ -377,11 +379,13 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceBasic) {
TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceSkipUnexpired) {
Date_t timeBeforeCursorCreation = getClockSource()->now();
getClockSource()->advance(Milliseconds(1));
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
getManager()->killMortalCursorsInactiveSince(timeBeforeCursorCreation);
ASSERT(!isMockCursorKilled(0));
getManager()->reapZombieCursors();
@@ -390,11 +394,13 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceSkipUnexpired) {
// Test that killing all mortal expired cursors does not kill a cursor that is immortal.
TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceSkipImmortal) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Immortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Immortal)
+ .status_with_transitional_ignore();
getManager()->killMortalCursorsInactiveSince(getClockSource()->now());
ASSERT(!isMockCursorKilled(0));
getManager()->reapZombieCursors();
@@ -432,11 +438,13 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceMultipleCursors)
if (i < numKilledCursorsExpected) {
cutoff = getClockSource()->now();
}
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
getClockSource()->advance(Milliseconds(1));
}
getManager()->killMortalCursorsInactiveSince(cutoff);
@@ -457,11 +465,13 @@ TEST_F(ClusterCursorManagerTest, KillMortalCursorsInactiveSinceMultipleCursors)
TEST_F(ClusterCursorManagerTest, KillAllCursors) {
const size_t numCursors = 10;
for (size_t i = 0; i < numCursors; ++i) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
}
getManager()->killAllCursors();
for (size_t i = 0; i < numCursors; ++i) {
@@ -506,11 +516,13 @@ TEST_F(ClusterCursorManagerTest, ReapZombieCursorsSkipPinned) {
// Test that reaping does not call kill() on the underlying ClusterClientCursor for cursors that
// haven't been killed.
TEST_F(ClusterCursorManagerTest, ReapZombieCursorsSkipNonZombies) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT(!isMockCursorKilled(0));
getManager()->reapZombieCursors();
ASSERT(!isMockCursorKilled(0));
@@ -525,21 +537,25 @@ TEST_F(ClusterCursorManagerTest, StatsInitAsZero) {
// Test that registering a sharded cursor updates the corresponding counter in stats().
TEST_F(ClusterCursorManagerTest, StatsRegisterShardedCursor) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT_EQ(1U, getManager()->stats().cursorsSharded);
}
// Test that registering a not-sharded cursor updates the corresponding counter in stats().
TEST_F(ClusterCursorManagerTest, StatsRegisterNotShardedCursor) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT_EQ(1U, getManager()->stats().cursorsNotSharded);
}
@@ -560,21 +576,25 @@ TEST_F(ClusterCursorManagerTest, StatsPinCursor) {
TEST_F(ClusterCursorManagerTest, StatsRegisterMultipleCursors) {
const size_t numShardedCursors = 10;
for (size_t i = 0; i < numShardedCursors; ++i) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT_EQ(i + 1, getManager()->stats().cursorsSharded);
ASSERT_EQ(0U, getManager()->stats().cursorsNotSharded);
}
const size_t numNotShardedCursors = 10;
for (size_t i = 0; i < numNotShardedCursors; ++i) {
- getManager()->registerCursor(nullptr,
- allocateMockCursor(),
- nss,
- ClusterCursorManager::CursorType::NamespaceNotSharded,
- ClusterCursorManager::CursorLifetime::Mortal);
+ getManager()
+ ->registerCursor(nullptr,
+ allocateMockCursor(),
+ nss,
+ ClusterCursorManager::CursorType::NamespaceNotSharded,
+ ClusterCursorManager::CursorLifetime::Mortal)
+ .status_with_transitional_ignore();
ASSERT_EQ(numShardedCursors, getManager()->stats().cursorsSharded);
ASSERT_EQ(i + 1, getManager()->stats().cursorsNotSharded);
}
diff --git a/src/mongo/s/query/establish_cursors.cpp b/src/mongo/s/query/establish_cursors.cpp
index 905dec8f3c2..52ce9ed63bb 100644
--- a/src/mongo/s/query/establish_cursors.cpp
+++ b/src/mongo/s/query/establish_cursors.cpp
@@ -151,8 +151,10 @@ StatusWith<std::vector<ClusterClientCursorParams::RemoteCursor>> establishCursor
// We do not process the response to the killCursors request (we make a good-faith
// attempt at cleaning up the cursors, but ignore any returned errors).
- executor->scheduleRemoteCommand(
- request, [](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {});
+ executor
+ ->scheduleRemoteCommand(
+ request, [](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {})
+ .status_with_transitional_ignore();
}
return status;
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index af0147eb685..dfb2eb21aa7 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -318,7 +318,10 @@ static ExitCode runMongosServer() {
return EXIT_SHARDING_ERROR;
}
- Grid::get(opCtx.get())->getBalancerConfiguration()->refreshAndCheck(opCtx.get());
+ Grid::get(opCtx.get())
+ ->getBalancerConfiguration()
+ ->refreshAndCheck(opCtx.get())
+ .transitional_ignore();
}
Status status = getGlobalAuthorizationManager()->initialize(NULL);
@@ -344,7 +347,7 @@ static ExitCode runMongosServer() {
// Set up the periodic runner for background job execution
auto runner = makePeriodicRunner();
- runner->startup();
+ runner->startup().transitional_ignore();
getGlobalServiceContext()->setPeriodicRunner(std::move(runner));
// Set up the logical session cache
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index c71a23f0133..5288efab6a9 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -208,7 +208,7 @@ BSONObj ShardKeyPattern::normalizeShardKey(const BSONObj& shardKey) const {
static BSONElement extractKeyElementFromMatchable(const MatchableDocument& matchable,
StringData pathStr) {
ElementPath path;
- path.init(pathStr);
+ path.init(pathStr).transitional_ignore();
path.setTraverseNonleafArrays(false);
path.setTraverseLeafArray(false);
diff --git a/src/mongo/s/sharding_mongod_test_fixture.cpp b/src/mongo/s/sharding_mongod_test_fixture.cpp
index 4a0dccb1670..6ab6b1cb119 100644
--- a/src/mongo/s/sharding_mongod_test_fixture.cpp
+++ b/src/mongo/s/sharding_mongod_test_fixture.cpp
@@ -120,9 +120,10 @@ void ShardingMongodTestFixture::setUp() {
serversBob.append(BSON("host" << _servers[i].toString() << "_id" << static_cast<int>(i)));
}
repl::ReplSetConfig replSetConfig;
- replSetConfig.initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3
- << "members"
- << serversBob.arr()));
+ replSetConfig
+ .initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members"
+ << serversBob.arr()))
+ .transitional_ignore();
replCoordPtr->setGetConfigReturnValue(replSetConfig);
repl::ReplicationCoordinator::set(service, std::move(replCoordPtr));
@@ -136,7 +137,9 @@ void ShardingMongodTestFixture::setUp() {
service,
stdx::make_unique<repl::ReplicationProcess>(
storagePtr.get(), stdx::make_unique<repl::ReplicationConsistencyMarkersMock>()));
- repl::ReplicationProcess::get(_opCtx.get())->initializeRollbackID(_opCtx.get());
+ repl::ReplicationProcess::get(_opCtx.get())
+ ->initializeRollbackID(_opCtx.get())
+ .transitional_ignore();
repl::StorageInterface::set(service, std::move(storagePtr));
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp
index 1029febad24..a203d24d518 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_test_fixture.cpp
@@ -111,7 +111,7 @@ void ShardingTestFixture::setUp() {
auto tlMock = stdx::make_unique<transport::TransportLayerMock>();
_transportLayer = tlMock.get();
service->setTransportLayer(std::move(tlMock));
- _transportLayer->start();
+ _transportLayer->start().transitional_ignore();
// Set the newly created service context to be the current global context so that tests,
// which invoke code still referencing getGlobalServiceContext will work properly.
@@ -148,7 +148,7 @@ void ShardingTestFixture::setUp() {
std::unique_ptr<ShardingCatalogClientImpl> catalogClient(
stdx::make_unique<ShardingCatalogClientImpl>(std::move(uniqueDistLockManager)));
_catalogClient = catalogClient.get();
- catalogClient->startup();
+ catalogClient->startup().transitional_ignore();
ConnectionString configCS = ConnectionString::forReplicaSet(
"configRS", {HostAndPort{"TestHost1"}, HostAndPort{"TestHost2"}});
diff --git a/src/mongo/s/sharding_uptime_reporter.cpp b/src/mongo/s/sharding_uptime_reporter.cpp
index 8db978218fa..8f668b34f56 100644
--- a/src/mongo/s/sharding_uptime_reporter.cpp
+++ b/src/mongo/s/sharding_uptime_reporter.cpp
@@ -70,13 +70,15 @@ void reportStatus(OperationContext* opCtx,
mType.setMongoVersion(VersionInfoInterface::instance().version().toString());
try {
- Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
- opCtx,
- MongosType::ConfigNS,
- BSON(MongosType::name(instanceId)),
- BSON("$set" << mType.toBSON()),
- true,
- ShardingCatalogClient::kMajorityWriteConcern);
+ Grid::get(opCtx)
+ ->catalogClient(opCtx)
+ ->updateConfigDocument(opCtx,
+ MongosType::ConfigNS,
+ BSON(MongosType::name(instanceId)),
+ BSON("$set" << mType.toBSON()),
+ true,
+ ShardingCatalogClient::kMajorityWriteConcern)
+ .status_with_transitional_ignore();
} catch (const std::exception& e) {
log() << "Caught exception while reporting uptime: " << e.what();
}
diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp
index d8ddaff5b3d..5e2c2236fc9 100644
--- a/src/mongo/scripting/mozjs/mongo.cpp
+++ b/src/mongo/scripting/mozjs/mongo.cpp
@@ -557,7 +557,7 @@ void MongoBase::Functions::copyDatabaseWithSCRAM::call(JSContext* cx, JS::CallAr
session->setParameter(SaslClientSession::parameterMechanism, "SCRAM-SHA-1");
session->setParameter(SaslClientSession::parameterUser, user);
session->setParameter(SaslClientSession::parameterPassword, hashedPwd);
- session->initialize();
+ session->initialize().transitional_ignore();
BSONObj saslFirstCommandPrefix =
BSON("copydbsaslstart" << 1 << "fromhost" << fromHost << "fromdb" << fromDb
diff --git a/src/mongo/shell/shell_options.cpp b/src/mongo/shell/shell_options.cpp
index 8ed52f8f8a4..1edbbf1bba1 100644
--- a/src/mongo/shell/shell_options.cpp
+++ b/src/mongo/shell/shell_options.cpp
@@ -130,7 +130,7 @@ Status addMongoShellOptions(moe::OptionSection* options) {
moe::String,
"Remote host name to use for purpose of GSSAPI/Kerberos authentication");
- options->addSection(authenticationOptions);
+ options->addSection(authenticationOptions).transitional_ignore();
options->addOptionChaining("help", "help,h", moe::Switch, "show this usage information");
diff --git a/src/mongo/transport/message_compressor_manager.cpp b/src/mongo/transport/message_compressor_manager.cpp
index 98448feeae4..ee378f0ee94 100644
--- a/src/mongo/transport/message_compressor_manager.cpp
+++ b/src/mongo/transport/message_compressor_manager.cpp
@@ -52,9 +52,9 @@ struct CompressionHeader {
uint8_t compressorId;
void serialize(DataRangeCursor* cursor) {
- cursor->writeAndAdvance<LittleEndian<int32_t>>(originalOpCode);
- cursor->writeAndAdvance<LittleEndian<int32_t>>(uncompressedSize);
- cursor->writeAndAdvance<LittleEndian<uint8_t>>(compressorId);
+ cursor->writeAndAdvance<LittleEndian<int32_t>>(originalOpCode).transitional_ignore();
+ cursor->writeAndAdvance<LittleEndian<int32_t>>(uncompressedSize).transitional_ignore();
+ cursor->writeAndAdvance<LittleEndian<uint8_t>>(compressorId).transitional_ignore();
}
CompressionHeader(int32_t _opcode, int32_t _size, uint8_t _id)
diff --git a/src/mongo/transport/message_compressor_manager_test.cpp b/src/mongo/transport/message_compressor_manager_test.cpp
index 50da507a2c6..10404e67767 100644
--- a/src/mongo/transport/message_compressor_manager_test.cpp
+++ b/src/mongo/transport/message_compressor_manager_test.cpp
@@ -50,7 +50,7 @@ MessageCompressorRegistry buildRegistry() {
std::vector<std::string> compressorList = {compressor->getName()};
ret.setSupportedCompressors(std::move(compressorList));
ret.registerImplementation(std::move(compressor));
- ret.finalizeSupportedCompressors();
+ ret.finalizeSupportedCompressors().transitional_ignore();
return ret;
}
@@ -92,7 +92,7 @@ void checkFidelity(const Message& msg, std::unique_ptr<MessageCompressorBase> co
std::vector<std::string> compressorList = {compressorName};
registry.setSupportedCompressors(std::move(compressorList));
registry.registerImplementation(std::move(compressor));
- registry.finalizeSupportedCompressors();
+ registry.finalizeSupportedCompressors().transitional_ignore();
MessageCompressorManager mgr(&registry);
auto negotiator = BSON("isMaster" << 1 << "compression" << BSON_ARRAY(compressorName));
diff --git a/src/mongo/transport/message_compressor_noop.h b/src/mongo/transport/message_compressor_noop.h
index b0602482b78..07252801db3 100644
--- a/src/mongo/transport/message_compressor_noop.h
+++ b/src/mongo/transport/message_compressor_noop.h
@@ -39,13 +39,13 @@ public:
}
StatusWith<std::size_t> compressData(ConstDataRange input, DataRange output) override {
- output.write(input);
+ output.write(input).transitional_ignore();
counterHitCompress(input.length(), input.length());
return {input.length()};
}
StatusWith<std::size_t> decompressData(ConstDataRange input, DataRange output) override {
- output.write(input);
+ output.write(input).transitional_ignore();
counterHitDecompress(input.length(), input.length());
return {input.length()};
}
diff --git a/src/mongo/transport/message_compressor_registry_test.cpp b/src/mongo/transport/message_compressor_registry_test.cpp
index a14f067e606..e80da969f0b 100644
--- a/src/mongo/transport/message_compressor_registry_test.cpp
+++ b/src/mongo/transport/message_compressor_registry_test.cpp
@@ -52,7 +52,7 @@ TEST(MessageCompressorRegistry, RegularTest) {
auto compressorListCheck = compressorList;
registry.setSupportedCompressors(std::move(compressorList));
registry.registerImplementation(std::move(compressor));
- registry.finalizeSupportedCompressors();
+ registry.finalizeSupportedCompressors().transitional_ignore();
ASSERT_TRUE(compressorListCheck == registry.getCompressorNames());
diff --git a/src/mongo/transport/service_state_machine_test.cpp b/src/mongo/transport/service_state_machine_test.cpp
index e402072a258..1153b554a27 100644
--- a/src/mongo/transport/service_state_machine_test.cpp
+++ b/src/mongo/transport/service_state_machine_test.cpp
@@ -211,7 +211,7 @@ protected:
auto tl = stdx::make_unique<MockTL>();
_tl = tl.get();
sc->setTransportLayer(std::move(tl));
- _tl->start();
+ _tl->start().transitional_ignore();
_ssm = stdx::make_unique<ServiceStateMachine>(
getGlobalServiceContext(), _tl->createSession(), true);
diff --git a/src/mongo/transport/transport_layer_asio.cpp b/src/mongo/transport/transport_layer_asio.cpp
index 01e8b01b5c2..ebadfd7c56d 100644
--- a/src/mongo/transport/transport_layer_asio.cpp
+++ b/src/mongo/transport/transport_layer_asio.cpp
@@ -258,9 +258,11 @@ Status TransportLayerASIO::setup() {
_sslContext = stdx::make_unique<asio::ssl::context>(asio::ssl::context::sslv23);
const auto sslManager = getSSLManager();
- sslManager->initSSLContext(_sslContext->native_handle(),
- sslParams,
- SSLManagerInterface::ConnectionDirection::kOutgoing);
+ sslManager
+ ->initSSLContext(_sslContext->native_handle(),
+ sslParams,
+ SSLManagerInterface::ConnectionDirection::kOutgoing)
+ .transitional_ignore();
}
#endif
diff --git a/src/mongo/transport/transport_layer_legacy_test.cpp b/src/mongo/transport/transport_layer_legacy_test.cpp
index 90cce0223e3..f442b231e9a 100644
--- a/src/mongo/transport/transport_layer_legacy_test.cpp
+++ b/src/mongo/transport/transport_layer_legacy_test.cpp
@@ -70,8 +70,8 @@ TEST(TransportLayerLegacy, endSessionsDoesntDoubleClose) {
sepu.tll = &tll;
- tll.setup();
- tll.start();
+ tll.setup().transitional_ignore();
+ tll.start().transitional_ignore();
stdx::mutex mutex;
bool end = false;
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
index 47a01afaf86..7c0575daa46 100644
--- a/src/mongo/util/background.cpp
+++ b/src/mongo/util/background.cpp
@@ -262,7 +262,7 @@ Status PeriodicTask::stopRunningPeriodicTasks(int gracePeriodMillis) {
if (runnerDestroyed || !runner)
return status;
- runner->cancel();
+ runner->cancel().transitional_ignore();
status = runner->stop(gracePeriodMillis);
if (status.isOK()) {
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
index 9d04059a743..7be61a420ee 100644
--- a/src/mongo/util/net/listen.cpp
+++ b/src/mongo/util/net/listen.cpp
@@ -649,7 +649,7 @@ void Listener::checkTicketNumbers() {
log() << " --maxConns too high, can only handle " << want;
}
}
- globalTicketHolder.resize(want);
+ globalTicketHolder.resize(want).transitional_ignore();
}
void Listener::shutdown() {
diff --git a/src/mongo/util/options_parser/environment.cpp b/src/mongo/util/options_parser/environment.cpp
index 1b54a96ee22..021fedec677 100644
--- a/src/mongo/util/options_parser/environment.cpp
+++ b/src/mongo/util/options_parser/environment.cpp
@@ -310,7 +310,7 @@ Status valueMapToBSON(const std::map<Key, Value>& params,
// Use the section name in our object builder, and recursively call
// valueMapToBSON with our sub map with keys that have the section name removed.
BSONObjBuilder sectionObjBuilder(builder->subobjStart(sectionName));
- valueMapToBSON(sectionMap, &sectionObjBuilder, sectionName);
+ valueMapToBSON(sectionMap, &sectionObjBuilder, sectionName).transitional_ignore();
sectionObjBuilder.done();
// Our iterator is currently on the last field that matched our dot and prefix, so
diff --git a/src/mongo/util/options_parser/environment_test.cpp b/src/mongo/util/options_parser/environment_test.cpp
index 79764b382fa..01a8324ad7c 100644
--- a/src/mongo/util/options_parser/environment_test.cpp
+++ b/src/mongo/util/options_parser/environment_test.cpp
@@ -46,7 +46,7 @@ TEST(Environment, EmptyValue) {
TEST(Environment, Immutable) {
moe::Environment environment;
moe::ImmutableKeyConstraint immutableKeyConstraint(moe::Key("port"));
- environment.addKeyConstraint(&immutableKeyConstraint);
+ environment.addKeyConstraint(&immutableKeyConstraint).transitional_ignore();
ASSERT_OK(environment.set(moe::Key("port"), moe::Value(5)));
ASSERT_OK(environment.validate());
ASSERT_NOT_OK(environment.set(moe::Key("port"), moe::Value(0)));
@@ -55,7 +55,7 @@ TEST(Environment, Immutable) {
TEST(Environment, OutOfRange) {
moe::Environment environment;
moe::NumericKeyConstraint numericKeyConstraint(moe::Key("port"), 1000, 65535);
- environment.addKeyConstraint(&numericKeyConstraint);
+ environment.addKeyConstraint(&numericKeyConstraint).transitional_ignore();
ASSERT_OK(environment.validate());
ASSERT_NOT_OK(environment.set(moe::Key("port"), moe::Value(0)));
}
@@ -63,7 +63,7 @@ TEST(Environment, OutOfRange) {
TEST(Environment, NonNumericRangeConstraint) {
moe::Environment environment;
moe::NumericKeyConstraint numericKeyConstraint(moe::Key("port"), 1000, 65535);
- environment.addKeyConstraint(&numericKeyConstraint);
+ environment.addKeyConstraint(&numericKeyConstraint).transitional_ignore();
ASSERT_OK(environment.validate());
ASSERT_NOT_OK(environment.set(moe::Key("port"), moe::Value("string")));
}
@@ -71,7 +71,7 @@ TEST(Environment, NonNumericRangeConstraint) {
TEST(Environment, BadType) {
moe::Environment environment;
moe::TypeKeyConstraint<int> typeKeyConstraintInt(moe::Key("port"));
- environment.addKeyConstraint(&typeKeyConstraintInt);
+ environment.addKeyConstraint(&typeKeyConstraintInt).transitional_ignore();
ASSERT_OK(environment.set(moe::Key("port"), moe::Value("string")));
ASSERT_NOT_OK(environment.validate());
}
@@ -79,9 +79,9 @@ TEST(Environment, BadType) {
TEST(Environment, AllowNumeric) {
moe::Environment environment;
moe::TypeKeyConstraint<long> typeKeyConstraintLong(moe::Key("port"));
- environment.addKeyConstraint(&typeKeyConstraintLong);
+ environment.addKeyConstraint(&typeKeyConstraintLong).transitional_ignore();
moe::TypeKeyConstraint<int> typeKeyConstraintInt(moe::Key("port"));
- environment.addKeyConstraint(&typeKeyConstraintInt);
+ environment.addKeyConstraint(&typeKeyConstraintInt).transitional_ignore();
ASSERT_OK(environment.set(moe::Key("port"), moe::Value(1)));
ASSERT_OK(environment.validate());
}
@@ -89,7 +89,7 @@ TEST(Environment, AllowNumeric) {
TEST(Environment, MutuallyExclusive) {
moe::Environment environment;
moe::MutuallyExclusiveKeyConstraint constraint(moe::Key("key"), moe::Key("otherKey"));
- environment.addKeyConstraint(&constraint);
+ environment.addKeyConstraint(&constraint).transitional_ignore();
ASSERT_OK(environment.set(moe::Key("key"), moe::Value(1)));
ASSERT_OK(environment.set(moe::Key("otherKey"), moe::Value(1)));
ASSERT_NOT_OK(environment.validate());
@@ -98,7 +98,7 @@ TEST(Environment, MutuallyExclusive) {
TEST(Environment, RequiresOther) {
moe::Environment environment;
moe::RequiresOtherKeyConstraint constraint(moe::Key("key"), moe::Key("otherKey"));
- environment.addKeyConstraint(&constraint);
+ environment.addKeyConstraint(&constraint).transitional_ignore();
ASSERT_OK(environment.set(moe::Key("key"), moe::Value(1)));
ASSERT_NOT_OK(environment.validate());
ASSERT_OK(environment.set(moe::Key("otherKey"), moe::Value(1)));
@@ -108,7 +108,7 @@ TEST(Environment, RequiresOther) {
TEST(Environment, StringFormat) {
moe::Environment environment;
moe::StringFormatKeyConstraint constraint(moe::Key("key"), "[0-9]", "[0-9]");
- environment.addKeyConstraint(&constraint);
+ environment.addKeyConstraint(&constraint).transitional_ignore();
ASSERT_OK(environment.set(moe::Key("key"), moe::Value(1)));
ASSERT_NOT_OK(environment.validate());
ASSERT_OK(environment.set(moe::Key("key"), moe::Value(std::string("a"))));
diff --git a/src/mongo/util/options_parser/option_section.cpp b/src/mongo/util/options_parser/option_section.cpp
index b49902508af..904bf5e284f 100644
--- a/src/mongo/util/options_parser/option_section.cpp
+++ b/src/mongo/util/options_parser/option_section.cpp
@@ -474,7 +474,7 @@ Status OptionSection::getAllOptions(std::vector<OptionDescription>* options) con
std::list<OptionSection>::const_iterator ositerator;
for (ositerator = _subSections.begin(); ositerator != _subSections.end(); ositerator++) {
- ositerator->getAllOptions(options);
+ ositerator->getAllOptions(options).transitional_ignore();
}
return Status::OK();
@@ -490,7 +490,7 @@ Status OptionSection::getDefaults(std::map<Key, Value>* values) const {
std::list<OptionSection>::const_iterator ositerator;
for (ositerator = _subSections.begin(); ositerator != _subSections.end(); ositerator++) {
- ositerator->getDefaults(values);
+ ositerator->getDefaults(values).transitional_ignore();
}
return Status::OK();
@@ -511,7 +511,7 @@ Status OptionSection::countOptions(int* numOptions, bool visibleOnly, OptionSour
std::list<OptionSection>::const_iterator ositerator;
for (ositerator = _subSections.begin(); ositerator != _subSections.end(); ositerator++) {
int numSubOptions = 0;
- ositerator->countOptions(&numSubOptions, visibleOnly, sources);
+ ositerator->countOptions(&numSubOptions, visibleOnly, sources).transitional_ignore();
*numOptions += numSubOptions;
}
@@ -531,7 +531,7 @@ Status OptionSection::getConstraints(std::vector<std::shared_ptr<Constraint>>* c
std::list<OptionSection>::const_iterator ositerator;
for (ositerator = _subSections.begin(); ositerator != _subSections.end(); ositerator++) {
- ositerator->getConstraints(constraints);
+ ositerator->getConstraints(constraints).transitional_ignore();
}
return Status::OK();
diff --git a/src/mongo/util/options_parser/options_parser.cpp b/src/mongo/util/options_parser/options_parser.cpp
index 5d03910a390..11fe6ec1434 100644
--- a/src/mongo/util/options_parser/options_parser.cpp
+++ b/src/mongo/util/options_parser/options_parser.cpp
@@ -416,7 +416,7 @@ Status addBoostVariablesToEnvironment(const po::variables_map& vm,
optionValue = Value(mapValue);
}
- environment->set(iterator->_dottedName, optionValue);
+ environment->set(iterator->_dottedName, optionValue).transitional_ignore();
}
}
return Status::OK();
@@ -605,7 +605,7 @@ Status addConstraints(const OptionSection& options, Environment* dest) {
std::vector<std::shared_ptr<Constraint>>::const_iterator citerator;
for (citerator = constraints_vector.begin(); citerator != constraints_vector.end();
citerator++) {
- dest->addConstraint(citerator->get());
+ dest->addConstraint(citerator->get()).transitional_ignore();
}
return Status::OK();
diff --git a/src/mongo/util/options_parser/options_parser_test.cpp b/src/mongo/util/options_parser/options_parser_test.cpp
index a0d97c512bb..bf8fb873be5 100644
--- a/src/mongo/util/options_parser/options_parser_test.cpp
+++ b/src/mongo/util/options_parser/options_parser_test.cpp
@@ -302,7 +302,7 @@ TEST(Parsing, SubSection) {
moe::OptionSection subSection("Section Name");
subSection.addOptionChaining("port", "port", moe::Int, "Port");
- testOpts.addSection(subSection);
+ testOpts.addSection(subSection).transitional_ignore();
std::vector<std::string> argv;
argv.push_back("binaryname");
@@ -4020,7 +4020,7 @@ TEST(OptionCount, Basic) {
moe::OptionSection subSection("Section Name");
subSection.addOptionChaining("port", "port", moe::Int, "Port")
.setSources(moe::SourceYAMLConfig);
- testOpts.addSection(subSection);
+ testOpts.addSection(subSection).transitional_ignore();
int numOptions;
ASSERT_OK(testOpts.countOptions(&numOptions, true /*visibleOnly*/, moe::SourceCommandLine));
diff --git a/src/mongo/util/signal_handlers_synchronous.cpp b/src/mongo/util/signal_handlers_synchronous.cpp
index e9422155a4f..e05463743fb 100644
--- a/src/mongo/util/signal_handlers_synchronous.cpp
+++ b/src/mongo/util/signal_handlers_synchronous.cpp
@@ -165,10 +165,13 @@ int MallocFreeOStreamGuard::terminateDepth = 0;
// must hold MallocFreeOStreamGuard to call
void writeMallocFreeStreamToLog() {
- logger::globalLogDomain()->append(
- logger::MessageEventEphemeral(
- Date_t::now(), logger::LogSeverity::Severe(), getThreadName(), mallocFreeOStream.str())
- .setIsTruncatable(false));
+ logger::globalLogDomain()
+ ->append(logger::MessageEventEphemeral(Date_t::now(),
+ logger::LogSeverity::Severe(),
+ getThreadName(),
+ mallocFreeOStream.str())
+ .setIsTruncatable(false))
+ .transitional_ignore();
mallocFreeOStream.rewind();
}
diff --git a/src/third_party/gperftools-2.5/SConscript b/src/third_party/gperftools-2.5/SConscript
index 1ffe7e200c1..fd35f0c54d1 100644
--- a/src/third_party/gperftools-2.5/SConscript
+++ b/src/third_party/gperftools-2.5/SConscript
@@ -115,7 +115,7 @@ def removeIfPresent(lst, item):
except ValueError:
pass
-for to_remove in ['-Werror', "-Wsign-compare","-Wall"]:
+for to_remove in ['-Werror', "-Wsign-compare","-Wall","-Werror=unused-result"]:
removeIfPresent(env['CCFLAGS'], to_remove)
# GCC on PowerPC under C++11 mode does not define __linux which gperftools depends on