summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Storch <david.storch@10gen.com>2016-08-12 15:58:56 -0400
committerDavid Storch <david.storch@10gen.com>2016-08-18 11:14:17 -0400
commit26543060c852aac22f26143a04bf7789ec8fec53 (patch)
treedf3ae49e5c4745058be29b7ec8a8e4b528b50a9a
parent13fa28982d008568f7620d73ddec0c61fad7cbc8 (diff)
downloadmongo-26543060c852aac22f26143a04bf7789ec8fec53.tar.gz
SERVER-24508 BSONObj::ComparatorInterface
BSONObj instances should now be compared via the comparator interface's evaluate() method. This preferred over using BSONObj::woCompare() directly. If the comparison doesn't require any database semantics (e.g. there is no collation), there is a global instance of the SimpleBSONObjComparator which should be used for BSONObj comparisons. If the comparison requires special semantics, then callers must instantiate their own comparator object.
-rw-r--r--src/mongo/SConscript1
-rw-r--r--src/mongo/bson/bson_field_test.cpp12
-rw-r--r--src/mongo/bson/bson_obj_test.cpp418
-rw-r--r--src/mongo/bson/bsonobj.h68
-rw-r--r--src/mongo/bson/bsonobj_comparator_interface.h195
-rw-r--r--src/mongo/bson/bsonobjbuilder_test.cpp26
-rw-r--r--src/mongo/bson/mutable/mutable_bson_test.cpp90
-rw-r--r--src/mongo/bson/simple_bsonobj_comparator.cpp38
-rw-r--r--src/mongo/bson/simple_bsonobj_comparator.h49
-rw-r--r--src/mongo/client/authenticate_test.cpp2
-rw-r--r--src/mongo/client/fetcher_test.cpp38
-rw-r--r--src/mongo/client/read_preference.h3
-rw-r--r--src/mongo/client/replica_set_monitor_read_preference_test.cpp2
-rw-r--r--src/mongo/db/catalog/collection_options_test.cpp16
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp15
-rw-r--r--src/mongo/db/collection_index_usage_tracker_test.cpp2
-rw-r--r--src/mongo/db/commands/dbcommands.cpp4
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp25
-rw-r--r--src/mongo/db/commands/mr_test.cpp4
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp34
-rw-r--r--src/mongo/db/exec/and_common-inl.h4
-rw-r--r--src/mongo/db/exec/projection_exec_test.cpp14
-rw-r--r--src/mongo/db/exec/sort_test.cpp2
-rw-r--r--src/mongo/db/field_parser_test.cpp38
-rw-r--r--src/mongo/db/ftdc/file_writer_test.cpp8
-rw-r--r--src/mongo/db/ftdc/ftdc_test.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_impl_test.cpp36
-rw-r--r--src/mongo/db/fts/fts_spec_test.cpp22
-rw-r--r--src/mongo/db/index/2d_key_generator_test.cpp14
-rw-r--r--src/mongo/db/index/btree_key_generator_test.cpp18
-rw-r--r--src/mongo/db/index/hash_key_generator_test.cpp14
-rw-r--r--src/mongo/db/index/s2_key_generator_test.cpp25
-rw-r--r--src/mongo/db/keypattern_test.cpp64
-rw-r--r--src/mongo/db/matcher/expression_algo_test.cpp16
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp5
-rw-r--r--src/mongo/db/matcher/expression_serialization_test.cpp238
-rw-r--r--src/mongo/db/matcher/expression_where_base.cpp5
-rw-r--r--src/mongo/db/matcher/path_test.cpp8
-rw-r--r--src/mongo/db/ops/modifier_current_date_test.cpp20
-rw-r--r--src/mongo/db/ops/modifier_push_sorter_test.cpp56
-rw-r--r--src/mongo/db/ops/write_ops_parsers_test.cpp28
-rw-r--r--src/mongo/db/pipeline/aggregation_request_test.cpp6
-rw-r--r--src/mongo/db/pipeline/document_source_test.cpp104
-rw-r--r--src/mongo/db/pipeline/document_value_test.cpp14
-rw-r--r--src/mongo/db/pipeline/expression_test.cpp72
-rw-r--r--src/mongo/db/pipeline/lookup_set_cache_test.cpp4
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp12
-rw-r--r--src/mongo/db/pipeline/pipeline_test.cpp4
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp7
-rw-r--r--src/mongo/db/query/collation/collation_index_key_test.cpp18
-rw-r--r--src/mongo/db/query/collation/collation_spec_test.cpp18
-rw-r--r--src/mongo/db/query/collation/collator_factory_mock.cpp4
-rw-r--r--src/mongo/db/query/collation/collator_interface.h6
-rw-r--r--src/mongo/db/query/collation/collator_interface_icu.cpp4
-rw-r--r--src/mongo/db/query/collation/collator_interface_icu.h2
-rw-r--r--src/mongo/db/query/collation/collator_interface_mock.cpp4
-rw-r--r--src/mongo/db/query/collation/collator_interface_mock.h2
-rw-r--r--src/mongo/db/query/count_request_test.cpp18
-rw-r--r--src/mongo/db/query/cursor_response_test.cpp20
-rw-r--r--src/mongo/db/query/find_and_modify_request_test.cpp80
-rw-r--r--src/mongo/db/query/getmore_request_test.cpp10
-rw-r--r--src/mongo/db/query/index_bounds.cpp7
-rw-r--r--src/mongo/db/query/killcursors_request_test.cpp2
-rw-r--r--src/mongo/db/query/killcursors_response_test.cpp2
-rw-r--r--src/mongo/db/query/parsed_projection.cpp7
-rw-r--r--src/mongo/db/query/parsed_projection_test.cpp16
-rw-r--r--src/mongo/db/query/planner_analysis_test.cpp111
-rw-r--r--src/mongo/db/query/query_planner_test_lib.cpp18
-rw-r--r--src/mongo/db/query/query_request.cpp4
-rw-r--r--src/mongo/db/query/query_request_test.cpp12
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp26
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp4
-rw-r--r--src/mongo/db/repl/data_replicator.cpp5
-rw-r--r--src/mongo/db/repl/data_replicator_test.cpp21
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp16
-rw-r--r--src/mongo/db/repl/elect_cmd_runner_test.cpp2
-rw-r--r--src/mongo/db/repl/freshness_checker_test.cpp22
-rw-r--r--src/mongo/db/repl/multiapplier_test.cpp10
-rw-r--r--src/mongo/db/repl/oplog_buffer_collection_test.cpp180
-rw-r--r--src/mongo/db/repl/oplog_entry.h4
-rw-r--r--src/mongo/db/repl/oplog_fetcher_test.cpp28
-rw-r--r--src/mongo/db/repl/read_concern_args_test.cpp8
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp4
-rw-r--r--src/mongo/db/repl/reporter_test.cpp10
-rw-r--r--src/mongo/db/repl/roll_back_local_operations_test.cpp37
-rw-r--r--src/mongo/db/repl/storage_interface_impl_test.cpp90
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp28
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp2
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_test.cpp7
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp17
-rw-r--r--src/mongo/db/s/collection_metadata.cpp3
-rw-r--r--src/mongo/db/s/metadata_manager.cpp12
-rw-r--r--src/mongo/db/s/metadata_manager_test.cpp6
-rw-r--r--src/mongo/db/s/split_vector_command.cpp8
-rw-r--r--src/mongo/db/s/start_chunk_clone_request_test.cpp6
-rw-r--r--src/mongo/db/s/type_shard_identity_test.cpp4
-rw-r--r--src/mongo/db/stats/timer_stats_test.cpp4
-rw-r--r--src/mongo/db/storage/index_entry_comparison.h5
-rw-r--r--src/mongo/db/storage/key_string_test.cpp55
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp18
-rw-r--r--src/mongo/db/storage/storage_engine_metadata_test.cpp2
-rw-r--r--src/mongo/dbtests/chunktests.cpp2
-rw-r--r--src/mongo/dbtests/dbhelper_tests.cpp2
-rw-r--r--src/mongo/dbtests/extensions_callback_real_test.cpp4
-rw-r--r--src/mongo/dbtests/index_access_method_test.cpp8
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp6
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp50
-rw-r--r--src/mongo/dbtests/jstests.cpp6
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.cpp4
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_ixscan.cpp20
-rw-r--r--src/mongo/dbtests/query_stage_merge_sort.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp10
-rw-r--r--src/mongo/dbtests/querytests.cpp12
-rw-r--r--src/mongo/dbtests/repltests.cpp2
-rw-r--r--src/mongo/dbtests/rollbacktests.cpp2
-rw-r--r--src/mongo/dbtests/sort_key_generator_test.cpp34
-rw-r--r--src/mongo/dbtests/updatetests.cpp165
-rw-r--r--src/mongo/executor/network_interface_asio_test.cpp17
-rw-r--r--src/mongo/executor/network_interface_mock_test.cpp11
-rw-r--r--src/mongo/executor/remote_command_request.cpp7
-rw-r--r--src/mongo/executor/remote_command_response.cpp5
-rw-r--r--src/mongo/rpc/command_reply.cpp7
-rw-r--r--src/mongo/rpc/command_reply_test.cpp8
-rw-r--r--src/mongo/rpc/command_request.cpp8
-rw-r--r--src/mongo/rpc/command_request_builder_test.cpp4
-rw-r--r--src/mongo/rpc/command_request_test.cpp8
-rw-r--r--src/mongo/rpc/metadata/client_metadata_test.cpp4
-rw-r--r--src/mongo/rpc/metadata/config_server_metadata_test.cpp4
-rw-r--r--src/mongo/rpc/metadata/repl_set_metadata_test.cpp4
-rw-r--r--src/mongo/rpc/metadata/server_selection_metadata_test.cpp4
-rw-r--r--src/mongo/rpc/metadata/sharding_metadata_test.cpp6
-rw-r--r--src/mongo/rpc/reply_builder_test.cpp10
-rw-r--r--src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp5
-rw-r--r--src/mongo/s/balancer/balancer_configuration_test.cpp4
-rw-r--r--src/mongo/s/balancer/balancer_policy.cpp15
-rw-r--r--src/mongo/s/balancer/balancer_policy_tests.cpp76
-rw-r--r--src/mongo/s/balancer/migration_manager.cpp4
-rw-r--r--src/mongo/s/balancer/migration_manager_test.cpp4
-rw-r--r--src/mongo/s/balancer/type_migration_test.cpp4
-rw-r--r--src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp82
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp18
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_append_db_stats_test.cpp4
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_assign_key_range_to_zone_test.cpp44
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp24
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_merge_chunk_test.cpp20
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_remove_shard_test.cpp22
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_shard_collection_test.cpp35
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_split_chunk_test.cpp14
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_test.cpp314
-rw-r--r--src/mongo/s/catalog/replset/sharding_catalog_write_retry_test.cpp8
-rw-r--r--src/mongo/s/catalog/type_changelog_test.cpp6
-rw-r--r--src/mongo/s/catalog/type_chunk.cpp5
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp12
-rw-r--r--src/mongo/s/catalog/type_collection_test.cpp18
-rw-r--r--src/mongo/s/catalog/type_config_version_test.cpp2
-rw-r--r--src/mongo/s/catalog/type_tags_test.cpp4
-rw-r--r--src/mongo/s/chunk.cpp5
-rw-r--r--src/mongo/s/chunk_manager.cpp12
-rw-r--r--src/mongo/s/chunk_manager_targeter.h7
-rw-r--r--src/mongo/s/cluster_identity_loader_test.cpp4
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp5
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp8
-rw-r--r--src/mongo/s/commands/commands_public.cpp10
-rw-r--r--src/mongo/s/migration_secondary_throttle_options_test.cpp10
-rw-r--r--src/mongo/s/move_chunk_request_test.cpp8
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp196
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl_test.cpp16
-rw-r--r--src/mongo/s/query/cluster_cursor_manager_test.cpp6
-rw-r--r--src/mongo/s/query/router_stage_limit_test.cpp18
-rw-r--r--src/mongo/s/query/router_stage_remove_sortkey_test.cpp22
-rw-r--r--src/mongo/s/query/router_stage_skip_test.cpp24
-rw-r--r--src/mongo/s/query/store_possible_cursor_test.cpp6
-rw-r--r--src/mongo/s/request_types/add_shard_to_zone_request_test.cpp20
-rw-r--r--src/mongo/s/request_types/balance_chunk_request_test.cpp8
-rw-r--r--src/mongo/s/request_types/commit_chunk_migration_request_test.cpp12
-rw-r--r--src/mongo/s/request_types/merge_chunk_request_test.cpp8
-rw-r--r--src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp20
-rw-r--r--src/mongo/s/request_types/split_chunk_request_test.cpp8
-rw-r--r--src/mongo/s/request_types/update_zone_key_range_request_test.cpp24
-rw-r--r--src/mongo/s/set_shard_version_request_test.cpp188
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp320
-rw-r--r--src/mongo/s/sharding_test_fixture.cpp22
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp12
-rw-r--r--src/mongo/s/write_ops/batched_delete_request_test.cpp26
-rw-r--r--src/mongo/s/write_ops/batched_insert_request_test.cpp2
-rw-r--r--src/mongo/s/write_ops/batched_update_request_test.cpp28
-rw-r--r--src/mongo/unittest/SConscript8
-rw-r--r--src/mongo/unittest/bson_test_util.cpp59
-rw-r--r--src/mongo/unittest/bson_test_util.h70
-rw-r--r--src/mongo/unittest/unittest.h1
-rw-r--r--src/mongo/unittest/unittest_test.cpp51
-rw-r--r--src/mongo/util/cmdline_utils/censor_cmdline_test.cpp6
-rw-r--r--src/mongo/util/options_parser/environment_test.cpp6
-rw-r--r--src/mongo/util/options_parser/options_parser_test.cpp2
197 files changed, 3059 insertions, 2306 deletions
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index 4dd5a21f7f8..d9f421ece0f 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -77,6 +77,7 @@ baseSource=[
'bson/bsontypes.cpp',
'bson/json.cpp',
'bson/oid.cpp',
+ 'bson/simple_bsonobj_comparator.cpp',
'bson/timestamp.cpp',
'logger/component_message_log_domain.cpp',
'logger/console.cpp',
diff --git a/src/mongo/bson/bson_field_test.cpp b/src/mongo/bson/bson_field_test.cpp
index b6fbb0fd565..0a0b2d9aec2 100644
--- a/src/mongo/bson/bson_field_test.cpp
+++ b/src/mongo/bson/bson_field_test.cpp
@@ -36,31 +36,31 @@ using mongo::BSONObj;
TEST(Assignment, Simple) {
BSONField<int> x("x");
BSONObj o = BSON(x << 5);
- ASSERT_EQUALS(BSON("x" << 5), o);
+ ASSERT_BSONOBJ_EQ(BSON("x" << 5), o);
}
TEST(Make, Simple) {
BSONField<int> x("x");
BSONObj o = BSON(x.make(5));
- ASSERT_EQUALS(BSON("x" << 5), o);
+ ASSERT_BSONOBJ_EQ(BSON("x" << 5), o);
}
TEST(Query, GreaterThan) {
BSONField<int> x("x");
BSONObj o = BSON(x(5));
- ASSERT_EQUALS(BSON("x" << 5), o);
+ ASSERT_BSONOBJ_EQ(BSON("x" << 5), o);
o = BSON(x.gt(5));
- ASSERT_EQUALS(BSON("x" << BSON("$gt" << 5)), o);
+ ASSERT_BSONOBJ_EQ(BSON("x" << BSON("$gt" << 5)), o);
}
TEST(Query, NotEqual) {
BSONField<int> x("x");
BSONObj o = BSON(x(10));
- ASSERT_EQUALS(BSON("x" << 10), o);
+ ASSERT_BSONOBJ_EQ(BSON("x" << 10), o);
o = BSON(x.ne(5));
- ASSERT_EQUALS(BSON("x" << BSON("$ne" << 5)), o);
+ ASSERT_BSONOBJ_EQ(BSON("x" << BSON("$ne" << 5)), o);
}
} // unnamed namespace
diff --git a/src/mongo/bson/bson_obj_test.cpp b/src/mongo/bson/bson_obj_test.cpp
index f59f74ebd82..bf38662a321 100644
--- a/src/mongo/bson/bson_obj_test.cpp
+++ b/src/mongo/bson/bson_obj_test.cpp
@@ -42,92 +42,94 @@ TEST(BSONObjToString, EmptyArray) {
}
TEST(BSONObjCompare, Timestamp) {
- ASSERT_LT(BSON("" << Timestamp(0, 3)), BSON("" << Timestamp(~0U, 2)));
- ASSERT_GT(BSON("" << Timestamp(2, 3)), BSON("" << Timestamp(2, 2)));
- ASSERT_EQ(BSON("" << Timestamp(3ULL)), BSON("" << Timestamp(0, 3)));
+ ASSERT_BSONOBJ_LT(BSON("" << Timestamp(0, 3)), BSON("" << Timestamp(~0U, 2)));
+ ASSERT_BSONOBJ_GT(BSON("" << Timestamp(2, 3)), BSON("" << Timestamp(2, 2)));
+ ASSERT_BSONOBJ_EQ(BSON("" << Timestamp(3ULL)), BSON("" << Timestamp(0, 3)));
}
TEST(BSONObjCompare, NumberDouble) {
- ASSERT_LT(BSON("" << 0.0), BSON("" << 1.0));
- ASSERT_LT(BSON("" << -1.0), BSON("" << 0.0));
- ASSERT_LT(BSON("" << -1.0), BSON("" << 1.0));
-
- ASSERT_LT(BSON("" << 0.0), BSON("" << 0.1));
- ASSERT_LT(BSON("" << 0.1), BSON("" << 1.0));
- ASSERT_LT(BSON("" << -1.0), BSON("" << -0.1));
- ASSERT_LT(BSON("" << -0.1), BSON("" << 0.0));
- ASSERT_LT(BSON("" << -0.1), BSON("" << 0.1));
-
- ASSERT_LT(BSON("" << 0.0), BSON("" << std::numeric_limits<double>::denorm_min()));
- ASSERT_GT(BSON("" << 0.0), BSON("" << -std::numeric_limits<double>::denorm_min()));
-
- ASSERT_LT(BSON("" << 1.0), BSON("" << (1.0 + std::numeric_limits<double>::epsilon())));
- ASSERT_GT(BSON("" << -1.0), BSON("" << (-1.0 - std::numeric_limits<double>::epsilon())));
-
- ASSERT_EQ(BSON("" << 0.0), BSON("" << -0.0));
-
- ASSERT_GT(BSON("" << std::numeric_limits<double>::infinity()), BSON("" << 0.0));
- ASSERT_GT(BSON("" << std::numeric_limits<double>::infinity()),
- BSON("" << std::numeric_limits<double>::max())); // max is finite
- ASSERT_GT(BSON("" << std::numeric_limits<double>::infinity()),
- BSON("" << -std::numeric_limits<double>::infinity()));
-
- ASSERT_LT(BSON("" << -std::numeric_limits<double>::infinity()), BSON("" << 0.0));
- ASSERT_LT(BSON("" << -std::numeric_limits<double>::infinity()),
- BSON("" << -std::numeric_limits<double>::max()));
- ASSERT_LT(BSON("" << -std::numeric_limits<double>::infinity()),
- BSON("" << std::numeric_limits<double>::infinity()));
-
- ASSERT_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()), BSON("" << 0.0));
- ASSERT_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()),
- BSON("" << -std::numeric_limits<double>::max()));
- ASSERT_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()),
- BSON("" << std::numeric_limits<double>::infinity()));
- ASSERT_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()),
- BSON("" << -std::numeric_limits<double>::infinity()));
+ ASSERT_BSONOBJ_LT(BSON("" << 0.0), BSON("" << 1.0));
+ ASSERT_BSONOBJ_LT(BSON("" << -1.0), BSON("" << 0.0));
+ ASSERT_BSONOBJ_LT(BSON("" << -1.0), BSON("" << 1.0));
+
+ ASSERT_BSONOBJ_LT(BSON("" << 0.0), BSON("" << 0.1));
+ ASSERT_BSONOBJ_LT(BSON("" << 0.1), BSON("" << 1.0));
+ ASSERT_BSONOBJ_LT(BSON("" << -1.0), BSON("" << -0.1));
+ ASSERT_BSONOBJ_LT(BSON("" << -0.1), BSON("" << 0.0));
+ ASSERT_BSONOBJ_LT(BSON("" << -0.1), BSON("" << 0.1));
+
+ ASSERT_BSONOBJ_LT(BSON("" << 0.0), BSON("" << std::numeric_limits<double>::denorm_min()));
+ ASSERT_BSONOBJ_GT(BSON("" << 0.0), BSON("" << -std::numeric_limits<double>::denorm_min()));
+
+ ASSERT_BSONOBJ_LT(BSON("" << 1.0), BSON("" << (1.0 + std::numeric_limits<double>::epsilon())));
+ ASSERT_BSONOBJ_GT(BSON("" << -1.0),
+ BSON("" << (-1.0 - std::numeric_limits<double>::epsilon())));
+
+ ASSERT_BSONOBJ_EQ(BSON("" << 0.0), BSON("" << -0.0));
+
+ ASSERT_BSONOBJ_GT(BSON("" << std::numeric_limits<double>::infinity()), BSON("" << 0.0));
+ ASSERT_BSONOBJ_GT(BSON("" << std::numeric_limits<double>::infinity()),
+ BSON("" << std::numeric_limits<double>::max())); // max is finite
+ ASSERT_BSONOBJ_GT(BSON("" << std::numeric_limits<double>::infinity()),
+ BSON("" << -std::numeric_limits<double>::infinity()));
+
+ ASSERT_BSONOBJ_LT(BSON("" << -std::numeric_limits<double>::infinity()), BSON("" << 0.0));
+ ASSERT_BSONOBJ_LT(BSON("" << -std::numeric_limits<double>::infinity()),
+ BSON("" << -std::numeric_limits<double>::max()));
+ ASSERT_BSONOBJ_LT(BSON("" << -std::numeric_limits<double>::infinity()),
+ BSON("" << std::numeric_limits<double>::infinity()));
+
+ ASSERT_BSONOBJ_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()), BSON("" << 0.0));
+ ASSERT_BSONOBJ_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()),
+ BSON("" << -std::numeric_limits<double>::max()));
+ ASSERT_BSONOBJ_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()),
+ BSON("" << std::numeric_limits<double>::infinity()));
+ ASSERT_BSONOBJ_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()),
+ BSON("" << -std::numeric_limits<double>::infinity()));
// TODO in C++11 use hex floating point to test distinct NaN representations
- ASSERT_EQ(BSON("" << std::numeric_limits<double>::quiet_NaN()),
- BSON("" << std::numeric_limits<double>::signaling_NaN()));
+ ASSERT_BSONOBJ_EQ(BSON("" << std::numeric_limits<double>::quiet_NaN()),
+ BSON("" << std::numeric_limits<double>::signaling_NaN()));
}
TEST(BSONObjCompare, NumberLong_Double) {
- ASSERT_EQ(BSON("" << 0ll), BSON("" << 0.0));
- ASSERT_EQ(BSON("" << 0ll), BSON("" << -0.0));
+ ASSERT_BSONOBJ_EQ(BSON("" << 0ll), BSON("" << 0.0));
+ ASSERT_BSONOBJ_EQ(BSON("" << 0ll), BSON("" << -0.0));
- ASSERT_EQ(BSON("" << 1ll), BSON("" << 1.0));
- ASSERT_EQ(BSON("" << -1ll), BSON("" << -1.0));
+ ASSERT_BSONOBJ_EQ(BSON("" << 1ll), BSON("" << 1.0));
+ ASSERT_BSONOBJ_EQ(BSON("" << -1ll), BSON("" << -1.0));
- ASSERT_LT(BSON("" << 0ll), BSON("" << 1.0));
- ASSERT_LT(BSON("" << -1ll), BSON("" << 0.0));
- ASSERT_LT(BSON("" << -1ll), BSON("" << 1.0));
+ ASSERT_BSONOBJ_LT(BSON("" << 0ll), BSON("" << 1.0));
+ ASSERT_BSONOBJ_LT(BSON("" << -1ll), BSON("" << 0.0));
+ ASSERT_BSONOBJ_LT(BSON("" << -1ll), BSON("" << 1.0));
- ASSERT_LT(BSON("" << 0ll), BSON("" << 0.1));
- ASSERT_LT(BSON("" << 0.1), BSON("" << 1ll));
- ASSERT_LT(BSON("" << -1ll), BSON("" << -0.1));
- ASSERT_LT(BSON("" << -0.1), BSON("" << 0ll));
+ ASSERT_BSONOBJ_LT(BSON("" << 0ll), BSON("" << 0.1));
+ ASSERT_BSONOBJ_LT(BSON("" << 0.1), BSON("" << 1ll));
+ ASSERT_BSONOBJ_LT(BSON("" << -1ll), BSON("" << -0.1));
+ ASSERT_BSONOBJ_LT(BSON("" << -0.1), BSON("" << 0ll));
- ASSERT_LT(BSON("" << 0ll), BSON("" << std::numeric_limits<double>::denorm_min()));
- ASSERT_GT(BSON("" << 0ll), BSON("" << -std::numeric_limits<double>::denorm_min()));
+ ASSERT_BSONOBJ_LT(BSON("" << 0ll), BSON("" << std::numeric_limits<double>::denorm_min()));
+ ASSERT_BSONOBJ_GT(BSON("" << 0ll), BSON("" << -std::numeric_limits<double>::denorm_min()));
- ASSERT_LT(BSON("" << 1ll), BSON("" << (1.0 + std::numeric_limits<double>::epsilon())));
- ASSERT_GT(BSON("" << -1ll), BSON("" << (-1.0 - std::numeric_limits<double>::epsilon())));
+ ASSERT_BSONOBJ_LT(BSON("" << 1ll), BSON("" << (1.0 + std::numeric_limits<double>::epsilon())));
+ ASSERT_BSONOBJ_GT(BSON("" << -1ll),
+ BSON("" << (-1.0 - std::numeric_limits<double>::epsilon())));
- ASSERT_GT(BSON("" << std::numeric_limits<double>::infinity()), BSON("" << 0ll));
- ASSERT_GT(BSON("" << std::numeric_limits<double>::infinity()),
- BSON("" << std::numeric_limits<long long>::max()));
- ASSERT_GT(BSON("" << std::numeric_limits<double>::infinity()),
- BSON("" << std::numeric_limits<long long>::min()));
+ ASSERT_BSONOBJ_GT(BSON("" << std::numeric_limits<double>::infinity()), BSON("" << 0ll));
+ ASSERT_BSONOBJ_GT(BSON("" << std::numeric_limits<double>::infinity()),
+ BSON("" << std::numeric_limits<long long>::max()));
+ ASSERT_BSONOBJ_GT(BSON("" << std::numeric_limits<double>::infinity()),
+ BSON("" << std::numeric_limits<long long>::min()));
- ASSERT_LT(BSON("" << -std::numeric_limits<double>::infinity()), BSON("" << 0ll));
- ASSERT_LT(BSON("" << -std::numeric_limits<double>::infinity()),
- BSON("" << std::numeric_limits<long long>::max()));
- ASSERT_LT(BSON("" << -std::numeric_limits<double>::infinity()),
- BSON("" << std::numeric_limits<long long>::min()));
+ ASSERT_BSONOBJ_LT(BSON("" << -std::numeric_limits<double>::infinity()), BSON("" << 0ll));
+ ASSERT_BSONOBJ_LT(BSON("" << -std::numeric_limits<double>::infinity()),
+ BSON("" << std::numeric_limits<long long>::max()));
+ ASSERT_BSONOBJ_LT(BSON("" << -std::numeric_limits<double>::infinity()),
+ BSON("" << std::numeric_limits<long long>::min()));
- ASSERT_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()), BSON("" << 0ll));
- ASSERT_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()),
- BSON("" << std::numeric_limits<long long>::min()));
+ ASSERT_BSONOBJ_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()), BSON("" << 0ll));
+ ASSERT_BSONOBJ_LT(BSON("" << std::numeric_limits<double>::quiet_NaN()),
+ BSON("" << std::numeric_limits<long long>::min()));
for (int powerOfTwo = 0; powerOfTwo < 63; powerOfTwo++) {
const long long lNum = 1ll << powerOfTwo;
@@ -136,22 +138,22 @@ TEST(BSONObjCompare, NumberLong_Double) {
// All powers of two in this range can be represented exactly as doubles.
invariant(lNum == static_cast<long long>(dNum));
- ASSERT_EQ(BSON("" << lNum), BSON("" << dNum));
- ASSERT_EQ(BSON("" << -lNum), BSON("" << -dNum));
+ ASSERT_BSONOBJ_EQ(BSON("" << lNum), BSON("" << dNum));
+ ASSERT_BSONOBJ_EQ(BSON("" << -lNum), BSON("" << -dNum));
- ASSERT_GT(BSON("" << (lNum + 1)), BSON("" << dNum));
- ASSERT_LT(BSON("" << (lNum - 1)), BSON("" << dNum));
- ASSERT_GT(BSON("" << (-lNum + 1)), BSON("" << -dNum));
- ASSERT_LT(BSON("" << (-lNum - 1)), BSON("" << -dNum));
+ ASSERT_BSONOBJ_GT(BSON("" << (lNum + 1)), BSON("" << dNum));
+ ASSERT_BSONOBJ_LT(BSON("" << (lNum - 1)), BSON("" << dNum));
+ ASSERT_BSONOBJ_GT(BSON("" << (-lNum + 1)), BSON("" << -dNum));
+ ASSERT_BSONOBJ_LT(BSON("" << (-lNum - 1)), BSON("" << -dNum));
if (powerOfTwo <= 52) { // is dNum - 0.5 representable?
- ASSERT_GT(BSON("" << lNum), BSON("" << (dNum - 0.5)));
- ASSERT_LT(BSON("" << -lNum), BSON("" << -(dNum - 0.5)));
+ ASSERT_BSONOBJ_GT(BSON("" << lNum), BSON("" << (dNum - 0.5)));
+ ASSERT_BSONOBJ_LT(BSON("" << -lNum), BSON("" << -(dNum - 0.5)));
}
if (powerOfTwo <= 51) { // is dNum + 0.5 representable?
- ASSERT_LT(BSON("" << lNum), BSON("" << (dNum + 0.5)));
- ASSERT_GT(BSON("" << -lNum), BSON("" << -(dNum + 0.5)));
+ ASSERT_BSONOBJ_LT(BSON("" << lNum), BSON("" << (dNum + 0.5)));
+ ASSERT_BSONOBJ_GT(BSON("" << -lNum), BSON("" << -(dNum + 0.5)));
}
}
@@ -162,13 +164,13 @@ TEST(BSONObjCompare, NumberLong_Double) {
const double closestAbove = 9223372036854775808.0; // 2**63
const double closestBelow = 9223372036854774784.0; // 2**63 - epsilon
- ASSERT_GT(BSON("" << maxLL), BSON("" << (maxLL - 1)));
- ASSERT_LT(BSON("" << maxLL), BSON("" << closestAbove));
- ASSERT_GT(BSON("" << maxLL), BSON("" << closestBelow));
+ ASSERT_BSONOBJ_GT(BSON("" << maxLL), BSON("" << (maxLL - 1)));
+ ASSERT_BSONOBJ_LT(BSON("" << maxLL), BSON("" << closestAbove));
+ ASSERT_BSONOBJ_GT(BSON("" << maxLL), BSON("" << closestBelow));
- ASSERT_LT(BSON("" << -maxLL), BSON("" << -(maxLL - 1)));
- ASSERT_GT(BSON("" << -maxLL), BSON("" << -closestAbove));
- ASSERT_LT(BSON("" << -maxLL), BSON("" << -closestBelow));
+ ASSERT_BSONOBJ_LT(BSON("" << -maxLL), BSON("" << -(maxLL - 1)));
+ ASSERT_BSONOBJ_GT(BSON("" << -maxLL), BSON("" << -closestAbove));
+ ASSERT_BSONOBJ_LT(BSON("" << -maxLL), BSON("" << -closestBelow));
}
{
@@ -182,122 +184,137 @@ TEST(BSONObjCompare, NumberLong_Double) {
invariant(static_cast<double>(minLL) == equal);
invariant(static_cast<long long>(equal) == minLL);
- ASSERT_LT(BSON("" << minLL), BSON("" << (minLL + 1)));
+ ASSERT_BSONOBJ_LT(BSON("" << minLL), BSON("" << (minLL + 1)));
- ASSERT_EQ(BSON("" << minLL), BSON("" << equal));
- ASSERT_LT(BSON("" << minLL), BSON("" << closestAbove));
- ASSERT_GT(BSON("" << minLL), BSON("" << closestBelow));
+ ASSERT_BSONOBJ_EQ(BSON("" << minLL), BSON("" << equal));
+ ASSERT_BSONOBJ_LT(BSON("" << minLL), BSON("" << closestAbove));
+ ASSERT_BSONOBJ_GT(BSON("" << minLL), BSON("" << closestBelow));
}
}
TEST(BSONObjCompare, NumberDecimalScaleAndZero) {
- ASSERT_LT(BSON("" << Decimal128(0.0)), BSON("" << Decimal128(1.0)));
- ASSERT_LT(BSON("" << Decimal128(-1.0)), BSON("" << Decimal128(0.0)));
- ASSERT_LT(BSON("" << Decimal128(-1.0)), BSON("" << Decimal128(1.0)));
-
- ASSERT_LT(BSON("" << Decimal128(0.0)), BSON("" << Decimal128(0.1)));
- ASSERT_LT(BSON("" << Decimal128(0.1)), BSON("" << Decimal128(1.0)));
- ASSERT_LT(BSON("" << Decimal128(-1.0)), BSON("" << Decimal128(-0.1)));
- ASSERT_LT(BSON("" << Decimal128(-0.1)), BSON("" << Decimal128(-0.0)));
- ASSERT_LT(BSON("" << Decimal128(-0.1)), BSON("" << Decimal128(0.1)));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(0.0)), BSON("" << Decimal128(1.0)));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(-1.0)), BSON("" << Decimal128(0.0)));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(-1.0)), BSON("" << Decimal128(1.0)));
+
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(0.0)), BSON("" << Decimal128(0.1)));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(0.1)), BSON("" << Decimal128(1.0)));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(-1.0)), BSON("" << Decimal128(-0.1)));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(-0.1)), BSON("" << Decimal128(-0.0)));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(-0.1)), BSON("" << Decimal128(0.1)));
}
TEST(BSONObjCompare, NumberDecimalMaxAndMins) {
- ASSERT_LT(BSON("" << Decimal128(0.0)), BSON("" << Decimal128::kSmallestPositive));
- ASSERT_GT(BSON("" << Decimal128(0.0)), BSON("" << Decimal128::kLargestNegative));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(0.0)), BSON("" << Decimal128::kSmallestPositive));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128(0.0)), BSON("" << Decimal128::kLargestNegative));
// over 34 digits of precision so it should be equal
- ASSERT_EQ(BSON("" << Decimal128(1.0)),
- BSON("" << Decimal128(1.0).add(Decimal128::kSmallestPositive)));
- ASSERT_EQ(BSON("" << Decimal128(0.0)), BSON("" << Decimal128(-0.0)));
-
- ASSERT_EQ(BSON("" << Decimal128(0)), BSON("" << Decimal128(0)));
- ASSERT_EQ(BSON("" << Decimal128::kSmallestPositive), BSON("" << Decimal128::kSmallestPositive));
- ASSERT_EQ(BSON("" << Decimal128::kLargestNegative), BSON("" << Decimal128::kLargestNegative));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(1.0)),
+ BSON("" << Decimal128(1.0).add(Decimal128::kSmallestPositive)));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(0.0)), BSON("" << Decimal128(-0.0)));
+
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(0)), BSON("" << Decimal128(0)));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kSmallestPositive),
+ BSON("" << Decimal128::kSmallestPositive));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kLargestNegative),
+ BSON("" << Decimal128::kLargestNegative));
}
TEST(BSONObjCompare, NumberDecimalInfinity) {
- ASSERT_GT(BSON("" << Decimal128::kPositiveInfinity), BSON("" << Decimal128(0.0)));
- ASSERT_GT(BSON("" << Decimal128::kPositiveInfinity), BSON("" << Decimal128::kLargestPositive));
- ASSERT_GT(BSON("" << Decimal128::kPositiveInfinity), BSON("" << Decimal128::kNegativeInfinity));
-
- ASSERT_EQ(BSON("" << Decimal128::kPositiveInfinity), BSON("" << Decimal128::kPositiveInfinity));
- ASSERT_EQ(BSON("" << Decimal128::kNegativeInfinity), BSON("" << Decimal128::kNegativeInfinity));
-
- ASSERT_LT(BSON("" << Decimal128::kNegativeInfinity), BSON("" << Decimal128(0.0)));
- ASSERT_LT(BSON("" << Decimal128::kNegativeInfinity), BSON("" << Decimal128::kSmallestNegative));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128::kPositiveInfinity), BSON("" << Decimal128(0.0)));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128::kPositiveInfinity),
+ BSON("" << Decimal128::kLargestPositive));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128::kPositiveInfinity),
+ BSON("" << Decimal128::kNegativeInfinity));
+
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kPositiveInfinity),
+ BSON("" << Decimal128::kPositiveInfinity));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kNegativeInfinity),
+ BSON("" << Decimal128::kNegativeInfinity));
+
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeInfinity), BSON("" << Decimal128(0.0)));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeInfinity),
+ BSON("" << Decimal128::kSmallestNegative));
}
TEST(BSONObjCompare, NumberDecimalPosNaN) {
// +/-NaN is well ordered and compares smallest, so +NaN and -NaN should behave the same
- ASSERT_LT(BSON("" << Decimal128::kPositiveNaN), BSON("" << 0.0));
- ASSERT_LT(BSON("" << Decimal128::kPositiveNaN), BSON("" << Decimal128::kSmallestNegative));
- ASSERT_LT(BSON("" << Decimal128::kPositiveNaN), BSON("" << Decimal128::kPositiveInfinity));
- ASSERT_LT(BSON("" << Decimal128::kPositiveNaN), BSON("" << Decimal128::kNegativeInfinity));
-
- ASSERT_EQ(BSON("" << Decimal128::kPositiveNaN), BSON("" << Decimal128::kNegativeNaN));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kPositiveNaN), BSON("" << 0.0));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kPositiveNaN),
+ BSON("" << Decimal128::kSmallestNegative));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kPositiveNaN),
+ BSON("" << Decimal128::kPositiveInfinity));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kPositiveNaN),
+ BSON("" << Decimal128::kNegativeInfinity));
+
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kPositiveNaN), BSON("" << Decimal128::kNegativeNaN));
}
TEST(BSONObjCompare, NumberDecimalNegNan) {
- ASSERT_LT(BSON("" << Decimal128::kNegativeNaN), BSON("" << 0.0));
- ASSERT_LT(BSON("" << Decimal128::kNegativeNaN), BSON("" << Decimal128::kSmallestNegative));
- ASSERT_LT(BSON("" << Decimal128::kNegativeNaN), BSON("" << Decimal128::kPositiveInfinity));
- ASSERT_LT(BSON("" << Decimal128::kNegativeNaN), BSON("" << Decimal128::kNegativeInfinity));
-
- ASSERT_EQ(BSON("" << Decimal128::kNegativeNaN), BSON("" << Decimal128::kPositiveNaN));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeNaN), BSON("" << 0.0));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeNaN),
+ BSON("" << Decimal128::kSmallestNegative));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeNaN),
+ BSON("" << Decimal128::kPositiveInfinity));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeNaN),
+ BSON("" << Decimal128::kNegativeInfinity));
+
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kNegativeNaN), BSON("" << Decimal128::kPositiveNaN));
}
TEST(BSONObjCompare, NumberDecimalCompareInt) {
- ASSERT_EQ(BSON("" << Decimal128(0.0)), BSON("" << 0));
- ASSERT_EQ(BSON("" << Decimal128(502.0)), BSON("" << 502));
- ASSERT_EQ(BSON("" << Decimal128(std::numeric_limits<int>::max())),
- BSON("" << std::numeric_limits<int>::max()));
- ASSERT_EQ(BSON("" << Decimal128(-std::numeric_limits<int>::max())),
- BSON("" << -std::numeric_limits<int>::max()));
-
- ASSERT_LT(BSON("" << Decimal128::kNegativeNaN), BSON("" << -std::numeric_limits<int>::max()));
- ASSERT_LT(BSON("" << Decimal128::kPositiveNaN), BSON("" << -std::numeric_limits<int>::max()));
- ASSERT_LT(BSON("" << Decimal128::kNegativeInfinity),
- BSON("" << -std::numeric_limits<int>::max()));
- ASSERT_GT(BSON("" << Decimal128::kPositiveInfinity),
- BSON("" << std::numeric_limits<int>::max()));
-
- ASSERT_GT(BSON("" << Decimal128(1.0)), BSON("" << 0));
- ASSERT_LT(BSON("" << Decimal128(-1.0)), BSON("" << 0));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(0.0)), BSON("" << 0));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(502.0)), BSON("" << 502));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(std::numeric_limits<int>::max())),
+ BSON("" << std::numeric_limits<int>::max()));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(-std::numeric_limits<int>::max())),
+ BSON("" << -std::numeric_limits<int>::max()));
+
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeNaN),
+ BSON("" << -std::numeric_limits<int>::max()));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kPositiveNaN),
+ BSON("" << -std::numeric_limits<int>::max()));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeInfinity),
+ BSON("" << -std::numeric_limits<int>::max()));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128::kPositiveInfinity),
+ BSON("" << std::numeric_limits<int>::max()));
+
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128(1.0)), BSON("" << 0));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(-1.0)), BSON("" << 0));
}
TEST(BSONObjCompare, NumberDecimalCompareLong) {
- ASSERT_EQ(BSON("" << Decimal128(0.0)), BSON("" << 0ll));
- ASSERT_EQ(BSON("" << Decimal128(502.0)), BSON("" << 502ll));
- ASSERT_EQ(BSON("" << Decimal128(std::numeric_limits<int64_t>::max())),
- BSON("" << std::numeric_limits<long long>::max()));
- ASSERT_EQ(BSON("" << Decimal128(-std::numeric_limits<int64_t>::max())),
- BSON("" << -std::numeric_limits<long long>::max()));
-
- ASSERT_LT(BSON("" << Decimal128::kNegativeNaN),
- BSON("" << -std::numeric_limits<long long>::max()));
- ASSERT_LT(BSON("" << Decimal128::kPositiveNaN),
- BSON("" << -std::numeric_limits<long long>::max()));
- ASSERT_LT(BSON("" << Decimal128::kNegativeInfinity),
- BSON("" << -std::numeric_limits<long long>::max()));
- ASSERT_GT(BSON("" << Decimal128::kPositiveInfinity),
- BSON("" << std::numeric_limits<long long>::max()));
-
- ASSERT_GT(BSON("" << Decimal128(1.0)), BSON("" << 0ll));
- ASSERT_LT(BSON("" << Decimal128(-1.0)), BSON("" << 0ll));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(0.0)), BSON("" << 0ll));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(502.0)), BSON("" << 502ll));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(std::numeric_limits<int64_t>::max())),
+ BSON("" << std::numeric_limits<long long>::max()));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(-std::numeric_limits<int64_t>::max())),
+ BSON("" << -std::numeric_limits<long long>::max()));
+
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeNaN),
+ BSON("" << -std::numeric_limits<long long>::max()));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kPositiveNaN),
+ BSON("" << -std::numeric_limits<long long>::max()));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128::kNegativeInfinity),
+ BSON("" << -std::numeric_limits<long long>::max()));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128::kPositiveInfinity),
+ BSON("" << std::numeric_limits<long long>::max()));
+
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128(1.0)), BSON("" << 0ll));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(-1.0)), BSON("" << 0ll));
}
TEST(BSONObjCompare, NumberDecimalCompareDoubleExactRepresentations) {
- ASSERT_EQ(BSON("" << Decimal128(0.0)), BSON("" << 0.0));
- ASSERT_EQ(BSON("" << Decimal128(1.0)), BSON("" << 1.0));
- ASSERT_EQ(BSON("" << Decimal128(-1.0)), BSON("" << -1.0));
- ASSERT_EQ(BSON("" << Decimal128(0.125)), BSON("" << 0.125));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(0.0)), BSON("" << 0.0));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(1.0)), BSON("" << 1.0));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(-1.0)), BSON("" << -1.0));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128(0.125)), BSON("" << 0.125));
- ASSERT_LT(BSON("" << Decimal128(0.0)), BSON("" << 0.125));
- ASSERT_LT(BSON("" << Decimal128(-1.0)), BSON("" << -0.125));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(0.0)), BSON("" << 0.125));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(-1.0)), BSON("" << -0.125));
- ASSERT_GT(BSON("" << Decimal128(1.0)), BSON("" << 0.125));
- ASSERT_GT(BSON("" << Decimal128(0.0)), BSON("" << -0.125));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128(1.0)), BSON("" << 0.125));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128(0.0)), BSON("" << -0.125));
}
TEST(BSONObjCompare, NumberDecimalCompareDoubleNoDoubleRepresentation) {
@@ -307,20 +324,21 @@ TEST(BSONObjCompare, NumberDecimalCompareDoubleNoDoubleRepresentation) {
// then compare equal to both double(0.10000000000000000555) and
// double(0.999999999999999876). The following test cases check that
// proper well ordering is applied to double and decimal comparisons.
- ASSERT_GT(BSON("" << Decimal128("0.3")), BSON("" << 0.1));
- ASSERT_LT(BSON("" << Decimal128("0.1")), BSON("" << 0.3));
- ASSERT_LT(BSON("" << Decimal128("-0.3")), BSON("" << -0.1));
- ASSERT_GT(BSON("" << Decimal128("-0.1")), BSON("" << -0.3));
- ASSERT_LT(BSON("" << Decimal128("0.1")), BSON("" << 0.1));
- ASSERT_GT(BSON("" << Decimal128("0.3")), BSON("" << 0.3));
- ASSERT_GT(BSON("" << Decimal128("-0.1")), BSON("" << -0.1));
- ASSERT_LT(BSON("" << Decimal128("-0.3")), BSON("" << -0.3));
- ASSERT_EQ(BSON("" << Decimal128("0.5")), BSON("" << 0.5));
- ASSERT_GT(BSON("" << Decimal128("0.5000000000000000000000000000000001")), BSON("" << 0.5));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128("0.3")), BSON("" << 0.1));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128("0.1")), BSON("" << 0.3));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128("-0.3")), BSON("" << -0.1));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128("-0.1")), BSON("" << -0.3));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128("0.1")), BSON("" << 0.1));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128("0.3")), BSON("" << 0.3));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128("-0.1")), BSON("" << -0.1));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128("-0.3")), BSON("" << -0.3));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128("0.5")), BSON("" << 0.5));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128("0.5000000000000000000000000000000001")),
+ BSON("" << 0.5));
// Double 0.1 should compare well against significantly different decimals
- ASSERT_LT(BSON("" << Decimal128(0.0)), BSON("" << 0.1));
- ASSERT_GT(BSON("" << Decimal128(1.0)), BSON("" << 0.1));
+ ASSERT_BSONOBJ_LT(BSON("" << Decimal128(0.0)), BSON("" << 0.1));
+ ASSERT_BSONOBJ_GT(BSON("" << Decimal128(1.0)), BSON("" << 0.1));
}
TEST(BSONObjCompare, NumberDecimalCompareDoubleQuantize) {
@@ -332,29 +350,29 @@ TEST(BSONObjCompare, NumberDecimalCompareDoubleQuantize) {
Decimal128 roundedDoubleLargestNegValue("-179769313486232E294");
Decimal128 roundedDoubleOneAboveSmallestNegValue("-179769313486231E294");
- ASSERT_EQ(BSON("" << roundedDoubleLargestPosValue),
- BSON("" << Decimal128(std::numeric_limits<double>::max())));
- ASSERT_EQ(BSON("" << roundedDoubleLargestNegValue),
- BSON("" << Decimal128(-std::numeric_limits<double>::max())));
+ ASSERT_BSONOBJ_EQ(BSON("" << roundedDoubleLargestPosValue),
+ BSON("" << Decimal128(std::numeric_limits<double>::max())));
+ ASSERT_BSONOBJ_EQ(BSON("" << roundedDoubleLargestNegValue),
+ BSON("" << Decimal128(-std::numeric_limits<double>::max())));
- ASSERT_GT(BSON("" << roundedDoubleOneAboveLargestPosValue),
- BSON("" << Decimal128(std::numeric_limits<double>::max())));
- ASSERT_LT(BSON("" << roundedDoubleOneAboveSmallestNegValue),
- BSON("" << Decimal128(-std::numeric_limits<double>::min())));
+ ASSERT_BSONOBJ_GT(BSON("" << roundedDoubleOneAboveLargestPosValue),
+ BSON("" << Decimal128(std::numeric_limits<double>::max())));
+ ASSERT_BSONOBJ_LT(BSON("" << roundedDoubleOneAboveSmallestNegValue),
+ BSON("" << Decimal128(-std::numeric_limits<double>::min())));
}
TEST(BSONObjCompare, NumberDecimalCompareDoubleInfinity) {
- ASSERT_EQ(BSON("" << Decimal128::kPositiveInfinity),
- BSON("" << std::numeric_limits<double>::infinity()));
- ASSERT_EQ(BSON("" << Decimal128::kNegativeInfinity),
- BSON("" << -std::numeric_limits<double>::infinity()));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kPositiveInfinity),
+ BSON("" << std::numeric_limits<double>::infinity()));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kNegativeInfinity),
+ BSON("" << -std::numeric_limits<double>::infinity()));
}
TEST(BSONObjCompare, NumberDecimalCompareDoubleNaN) {
- ASSERT_EQ(BSON("" << Decimal128::kPositiveNaN),
- BSON("" << std::numeric_limits<double>::quiet_NaN()));
- ASSERT_EQ(BSON("" << Decimal128::kNegativeNaN),
- BSON("" << -std::numeric_limits<double>::quiet_NaN()));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kPositiveNaN),
+ BSON("" << std::numeric_limits<double>::quiet_NaN()));
+ ASSERT_BSONOBJ_EQ(BSON("" << Decimal128::kNegativeNaN),
+ BSON("" << -std::numeric_limits<double>::quiet_NaN()));
}
TEST(BSONObjCompare, StringSymbol) {
@@ -552,7 +570,7 @@ TEST(BSONObj, ShareOwnershipWith) {
// Now that tmp is out of scope, if obj didn't retain ownership, it would be accessing free'd
// memory which should error on ASAN and debug builds.
ASSERT(obj.isOwned());
- ASSERT_EQ(obj, BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(obj, BSON("a" << 1));
}
} // unnamed namespace
diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h
index fa405e9f299..726a5f43be9 100644
--- a/src/mongo/bson/bsonobj.h
+++ b/src/mongo/bson/bsonobj.h
@@ -95,6 +95,31 @@ typedef std::multiset<BSONElement, BSONElementCmpWithoutField> BSONElementMSet;
*/
class BSONObj {
public:
+ // Declared in bsonobj_comparator_interface.h.
+ class ComparatorInterface;
+
+ /**
+ * Operator overloads for relops return a DeferredComparison which can subsequently be evaluated
+ * by a BSONObj::ComparatorInterface.
+ */
+ struct DeferredComparison {
+ enum class Type {
+ kLT,
+ kLTE,
+ kEQ,
+ kGT,
+ kGTE,
+ kNE,
+ };
+
+ DeferredComparison(Type type, const BSONObj& lhs, const BSONObj& rhs)
+ : type(type), lhs(lhs), rhs(rhs) {}
+
+ Type type;
+ const BSONObj& lhs;
+ const BSONObj& rhs;
+ };
+
static const char kMinBSONLength = 5;
/** Construct an empty BSONObj -- that is, {}. */
@@ -394,6 +419,15 @@ public:
/** Alternative output format */
std::string hexDump() const;
+ //
+ // Comparison API.
+ //
+ // BSONObj instances can be compared either using woCompare() or via operator overloads. Most
+ // callers should prefer operator overloads. Note that the operator overloads return a
+ // DeferredComparison, which must be subsequently evaluated by a BSONObj::ComparatorInterface.
+ // See bsonobj_comparator_interface.h for details.
+ //
+
/**wo='well ordered'. fields must be in same order in each object.
Ordering is with respect to the signs of the elements
and allows ascending / descending key mixing.
@@ -416,17 +450,28 @@ public:
bool considerFieldName = true,
const StringData::ComparatorInterface* comparator = nullptr) const;
- bool operator<(const BSONObj& other) const {
- return woCompare(other) < 0;
+ DeferredComparison operator<(const BSONObj& other) const {
+ return DeferredComparison(DeferredComparison::Type::kLT, *this, other);
+ }
+
+ DeferredComparison operator<=(const BSONObj& other) const {
+ return DeferredComparison(DeferredComparison::Type::kLTE, *this, other);
}
- bool operator<=(const BSONObj& other) const {
- return woCompare(other) <= 0;
+
+ DeferredComparison operator>(const BSONObj& other) const {
+ return DeferredComparison(DeferredComparison::Type::kGT, *this, other);
+ }
+
+ DeferredComparison operator>=(const BSONObj& other) const {
+ return DeferredComparison(DeferredComparison::Type::kGTE, *this, other);
}
- bool operator>(const BSONObj& other) const {
- return woCompare(other) > 0;
+
+ DeferredComparison operator==(const BSONObj& other) const {
+ return DeferredComparison(DeferredComparison::Type::kEQ, *this, other);
}
- bool operator>=(const BSONObj& other) const {
- return woCompare(other) >= 0;
+
+ DeferredComparison operator!=(const BSONObj& other) const {
+ return DeferredComparison(DeferredComparison::Type::kNE, *this, other);
}
bool equal(const BSONObj& r) const;
@@ -505,13 +550,6 @@ public:
/** true unless corrupt */
bool valid() const;
- bool operator==(const BSONObj& other) const {
- return equal(other);
- }
- bool operator!=(const BSONObj& other) const {
- return !operator==(other);
- }
-
enum MatchType {
Equality = 0,
LT = 0x1,
diff --git a/src/mongo/bson/bsonobj_comparator_interface.h b/src/mongo/bson/bsonobj_comparator_interface.h
new file mode 100644
index 00000000000..56fa736dab0
--- /dev/null
+++ b/src/mongo/bson/bsonobj_comparator_interface.h
@@ -0,0 +1,195 @@
+/**
+ * Copyright (C) 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include <map>
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "mongo/base/disallow_copying.h"
+#include "mongo/bson/bsonobj.h"
+#include "mongo/util/assert_util.h"
+
+namespace mongo {
+
+/**
+ * A BSONObj::ComparatorInterface is an abstract class for comparing BSONObj objects. Usage for
+ * comparing two BSON objects, 'lhs' and 'rhs', where 'comparator' is an instance of a class
+ * implementing this interface, is as shown below:
+ *
+ * bool lessThan = comparator.evaluate(lhs < rhs);
+ * bool lessThanOrEqual = comparator.evaluate(lhs <= rhs);
+ * bool equal = comparator.evaluate(lhs == rhs);
+ * bool greaterThanOrEqual = comparator.evaluate(lhs >= rhs);
+ * bool greaterThan = comparator.evaluate(lhs > rhs);
+ * bool notEqual = comparator.evaluate(lhs != rhs);
+ *
+ * Can also be used to obtain function objects compatible for use with standard library algorithms
+ * such as std::sort, and to construct STL sets and maps which respect this comparator.
+ *
+ * All methods are thread-safe.
+ */
+class BSONObj::ComparatorInterface {
+ MONGO_DISALLOW_COPYING(ComparatorInterface);
+
+public:
+ /**
+ * Functor compatible for use with ordered STL containers.
+ */
+ class LessThan {
+ public:
+ explicit LessThan(const ComparatorInterface* comparator) : _comparator(comparator) {}
+
+ bool operator()(const BSONObj& lhs, const BSONObj& rhs) const {
+ return _comparator->compare(lhs, rhs) < 0;
+ }
+
+ private:
+ const ComparatorInterface* _comparator;
+ };
+
+ /**
+ * Functor compatible for use with unordered STL containers.
+ */
+ class EqualTo {
+ public:
+ explicit EqualTo(const ComparatorInterface* comparator) : _comparator(comparator) {}
+
+ bool operator()(const BSONObj& lhs, const BSONObj& rhs) const {
+ return _comparator->compare(lhs, rhs) == 0;
+ }
+
+ private:
+ const ComparatorInterface* _comparator;
+ };
+
+ using BSONObjSet = std::set<BSONObj, BSONObj::ComparatorInterface::LessThan>;
+
+ // TODO SERVER-23990: Make the BSONObj hash collation-aware.
+ using BSONObjUnorderedSet =
+ std::unordered_set<BSONObj, BSONObj::Hasher, BSONObj::ComparatorInterface::EqualTo>;
+
+ template <typename T>
+ using BSONObjMap = std::map<BSONObj, T, BSONObj::ComparatorInterface::LessThan>;
+
+ // TODO SERVER-23990: Make the BSONObj hash collation-aware.
+ template <typename T>
+ using BSONObjIndexedMap =
+ std::unordered_map<BSONObj, T, BSONObj::Hasher, BSONObj::ComparatorInterface::EqualTo>;
+
+ virtual ~ComparatorInterface() = default;
+
+ /**
+ * Compares two BSONObj objects. Returns <0, 0, >0 if 'lhs' < 'rhs', 'lhs' == 'rhs', or 'lhs' >
+ * 'rhs' respectively.
+ */
+ virtual int compare(const BSONObj& lhs, const BSONObj& rhs) const = 0;
+
+ /**
+ * Evaluates a deferred comparison object generated by invocation of one of the BSONObj operator
+ * overloads for relops.
+ */
+ bool evaluate(BSONObj::DeferredComparison deferredComparison) const {
+ int cmp = compare(deferredComparison.lhs, deferredComparison.rhs);
+ switch (deferredComparison.type) {
+ case BSONObj::DeferredComparison::Type::kLT:
+ return cmp < 0;
+ case BSONObj::DeferredComparison::Type::kLTE:
+ return cmp <= 0;
+ case BSONObj::DeferredComparison::Type::kEQ:
+ return cmp == 0;
+ case BSONObj::DeferredComparison::Type::kGT:
+ return cmp > 0;
+ case BSONObj::DeferredComparison::Type::kGTE:
+ return cmp >= 0;
+ case BSONObj::DeferredComparison::Type::kNE:
+ return cmp != 0;
+ }
+
+ MONGO_UNREACHABLE;
+ }
+
+ /**
+ * Returns a function object which computes whether one BSONObj is less than another under this
+ * comparator. This comparator must outlive the returned function object.
+ */
+ LessThan makeLessThan() const {
+ return LessThan(this);
+ }
+
+ /**
+ * Returns a function object which computes whether one BSONObj is equal to another under this
+ * comparator. This comparator must outlive the returned function object.
+ */
+ EqualTo makeEqualTo() const {
+ return EqualTo(this);
+ }
+
+ /**
+ * Construct an empty BSONObjSet whose ordering is given by this comparator. This comparator
+ * must outlive the returned set.
+ */
+ BSONObjSet makeOrderedBSONObjSet() const {
+ return BSONObjSet(LessThan(this));
+ }
+
+ /**
+ * Construct an empty BSONObjUnorderedSet whose equivalence classes are given by this
+ * comparator. This comparator must outlive the returned set.
+ */
+ BSONObjUnorderedSet makeUnorderedBSONObjSet() const {
+ // TODO SERVER-23990: Make the BSONObj hash collation-aware.
+ return BSONObjUnorderedSet(0, BSONObj::Hasher(), EqualTo(this));
+ }
+
+ /**
+ * Construct an empty ordered map from BSONObj to type T whose ordering is given by this
+ * comparator. This comparator must outlive the returned map.
+ */
+ template <typename T>
+ BSONObjMap<T> makeOrderedBSONObjMap() const {
+ return BSONObjMap<T>(LessThan(this));
+ }
+
+ /**
+ * Construct an empty unordered map from BSONObj to type T whose ordering is given by this
+ * comparator. This comparator must outlive the returned map.
+ */
+ template <typename T>
+ BSONObjIndexedMap<T> makeBSONObjIndexedMap() const {
+ // TODO SERVER-23990: Make the BSONObj hash collation-aware.
+ return BSONObjIndexedMap<T>(0, BSONObj::Hasher(), EqualTo(this));
+ }
+
+protected:
+ constexpr ComparatorInterface() = default;
+};
+
+} // namespace mongo
diff --git a/src/mongo/bson/bsonobjbuilder_test.cpp b/src/mongo/bson/bsonobjbuilder_test.cpp
index 4f11df260bb..003f40c27dd 100644
--- a/src/mongo/bson/bsonobjbuilder_test.cpp
+++ b/src/mongo/bson/bsonobjbuilder_test.cpp
@@ -259,7 +259,7 @@ TEST(BSONObjBuilderTest, AppendNumberLongLongMinCompareObject) {
BSONObj o2 = BSON("a" << std::numeric_limits<long long>::min());
- ASSERT_EQUALS(o1, o2);
+ ASSERT_BSONOBJ_EQ(o1, o2);
}
TEST(BSONObjBuilderTest, AppendMaxTimestampConversion) {
@@ -285,11 +285,11 @@ TEST(BSONObjBuilderTest, ResumeBuilding) {
secondBuilder.append("c", "d");
}
auto obj = BSONObj(b.buf());
- ASSERT_EQ(obj,
- BSON("a"
- << "b"
- << "c"
- << "d"));
+ ASSERT_BSONOBJ_EQ(obj,
+ BSON("a"
+ << "b"
+ << "c"
+ << "d"));
}
TEST(BSONObjBuilderTest, ResumeBuildingWithNesting) {
@@ -307,18 +307,18 @@ TEST(BSONObjBuilderTest, ResumeBuildingWithNesting) {
secondBuilder.append("a", BSON("c" << 3));
}
auto obj = BSONObj(b.buf());
- ASSERT_EQ(obj,
- BSON("ll" << BSON("f" << BSON("cc"
- << "dd"))
- << "a"
- << BSON("c" << 3)));
+ ASSERT_BSONOBJ_EQ(obj,
+ BSON("ll" << BSON("f" << BSON("cc"
+ << "dd"))
+ << "a"
+ << BSON("c" << 3)));
}
TEST(BSONObjBuilderTest, ResetToEmptyResultsInEmptyObj) {
BSONObjBuilder bob;
bob.append("a", 3);
bob.resetToEmpty();
- ASSERT_EQ(BSONObj(), bob.obj());
+ ASSERT_BSONOBJ_EQ(BSONObj(), bob.obj());
}
TEST(BSONObjBuilderTest, ResetToEmptyForNestedBuilderOnlyResetsInnerObj) {
@@ -328,7 +328,7 @@ TEST(BSONObjBuilderTest, ResetToEmptyForNestedBuilderOnlyResetsInnerObj) {
innerObj.append("b", 4);
innerObj.resetToEmpty();
innerObj.done();
- ASSERT_EQ(BSON("a" << 3 << "nestedObj" << BSONObj()), bob.obj());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 3 << "nestedObj" << BSONObj()), bob.obj());
}
} // unnamed namespace
diff --git a/src/mongo/bson/mutable/mutable_bson_test.cpp b/src/mongo/bson/mutable/mutable_bson_test.cpp
index 24629617926..d1584ff9714 100644
--- a/src/mongo/bson/mutable/mutable_bson_test.cpp
+++ b/src/mongo/bson/mutable/mutable_bson_test.cpp
@@ -807,13 +807,13 @@ TEST(Serialization, RoundTrip) {
obj = mongo::fromjson(jsonSampleWithDecimal);
mmb::Document doc(obj.copy());
mongo::BSONObj built = doc.getObject();
- ASSERT_EQUALS(obj, built);
+ ASSERT_BSONOBJ_EQ(obj, built);
}
TEST(Documentation, Example1) {
// Create a new document
mmb::Document doc;
- ASSERT_EQUALS(mongo::fromjson("{}"), doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{}"), doc.getObject());
// Get the root of the document.
mmb::Element root = doc.root();
@@ -822,41 +822,41 @@ TEST(Documentation, Example1) {
// everything, then push that Element into the root object, making it a child of root.
mmb::Element e0 = doc.makeElementInt("ltuae", 42);
ASSERT_OK(root.pushBack(e0));
- ASSERT_EQUALS(mongo::fromjson("{ ltuae : 42 }"), doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ ltuae : 42 }"), doc.getObject());
// Create a new empty mongo::Object-typed Element named 'magic', and push it back as a
// child of the root, making it a sibling of e0.
mmb::Element e1 = doc.makeElementObject("magic");
ASSERT_OK(root.pushBack(e1));
- ASSERT_EQUALS(mongo::fromjson("{ ltuae : 42, magic : {} }"), doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ ltuae : 42, magic : {} }"), doc.getObject());
// Create a new mongo::NumberDouble typed Element to represent Pi, and insert it as child
// of the new object we just created.
mmb::Element e3 = doc.makeElementDouble("pi", 3.14);
ASSERT_OK(e1.pushBack(e3));
- ASSERT_EQUALS(mongo::fromjson("{ ltuae : 42, magic : { pi : 3.14 } }"), doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ ltuae : 42, magic : { pi : 3.14 } }"), doc.getObject());
// Create a new mongo::NumberDouble to represent Plancks constant in electrovolt
// micrometers, and add it as a child of the 'magic' object.
mmb::Element e4 = doc.makeElementDouble("hbar", 1.239);
ASSERT_OK(e1.pushBack(e4));
- ASSERT_EQUALS(mongo::fromjson("{ ltuae : 42, magic : { pi : 3.14, hbar : 1.239 } }"),
- doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ ltuae : 42, magic : { pi : 3.14, hbar : 1.239 } }"),
+ doc.getObject());
// Rename the parent element of 'hbar' to be 'constants'.
ASSERT_OK(e4.parent().rename("constants"));
- ASSERT_EQUALS(mongo::fromjson("{ ltuae : 42, constants : { pi : 3.14, hbar : 1.239 } }"),
- doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ ltuae : 42, constants : { pi : 3.14, hbar : 1.239 } }"),
+ doc.getObject());
// Rename 'ltuae' to 'answer' by accessing it as the root objects left child.
ASSERT_OK(doc.root().leftChild().rename("answer"));
- ASSERT_EQUALS(mongo::fromjson("{ answer : 42, constants : { pi : 3.14, hbar : 1.239 } }"),
- doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ answer : 42, constants : { pi : 3.14, hbar : 1.239 } }"),
+ doc.getObject());
// Sort the constants by name.
mmb::sortChildren(doc.root().rightChild(), mmb::FieldNameLessThan());
- ASSERT_EQUALS(mongo::fromjson("{ answer : 42, constants : { hbar : 1.239, pi : 3.14 } }"),
- doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ answer : 42, constants : { hbar : 1.239, pi : 3.14 } }"),
+ doc.getObject());
}
TEST(Documentation, Example2) {
@@ -919,7 +919,7 @@ TEST(Documentation, Example2) {
mongo::BSONObjBuilder builder;
doc.writeTo(&builder);
- ASSERT_EQUALS(mongo::fromjson(outJson), doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), doc.getObject());
}
namespace {
@@ -943,7 +943,7 @@ TEST(Documentation, Example2InPlaceWithDamageVector) {
// Make the object, and make a copy for reference.
mongo::BSONObj obj = mongo::fromjson(inJson);
const mongo::BSONObj copyOfObj = obj.getOwned();
- ASSERT_EQUALS(obj, copyOfObj);
+ ASSERT_BSONOBJ_EQ(obj, copyOfObj);
// Create a new document representing BSONObj with the above contents.
mmb::Document doc(obj, mmb::Document::kInPlaceEnabled);
@@ -984,7 +984,7 @@ TEST(Documentation, Example2InPlaceWithDamageVector) {
// Demonstrate that while the document has changed, the underlying BSONObj has not yet
// changed.
ASSERT_FALSE(obj == doc);
- ASSERT_EQUALS(copyOfObj, obj);
+ ASSERT_BSONOBJ_EQ(copyOfObj, obj);
// Ensure that in-place updates are still enabled.
ASSERT_EQUALS(mmb::Document::kInPlaceEnabled, doc.getCurrentInPlaceMode());
@@ -1011,7 +1011,7 @@ TEST(Documentation, Example2InPlaceWithDamageVector) {
mongo::BSONObjBuilder builder;
doc.writeTo(&builder);
- ASSERT_EQUALS(mongo::fromjson(outJson), doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), doc.getObject());
}
TEST(Documentation, Example3) {
@@ -1041,7 +1041,7 @@ TEST(Documentation, Example3) {
" 'xs': { 'x' : 'x', 'X' : 'X' },"
" 'ys': { 'y' : 'y', 'Y' : 'Y', 'why' : ['not'] }"
"}";
- ASSERT_EQUALS(mongo::fromjson(outJson), outObj);
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), outObj);
}
TEST(Document, LifecycleConstructDefault) {
@@ -1159,7 +1159,7 @@ TEST(Document, RenameDeserialization) {
"{"
" 'a' : { 'b' : { 'C' : { 'd' : 4 } } }"
"}";
- ASSERT_EQUALS(mongo::fromjson(outJson), outObj);
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), outObj);
}
TEST(Document, CantRenameRootElement) {
@@ -1189,7 +1189,7 @@ TEST(Document, RemoveElementWithOpaqueRightSibling) {
" 'b' : 2, 'c' : 3"
"}";
mongo::BSONObj outObj = doc.getObject();
- ASSERT_EQUALS(mongo::fromjson(outJson), outObj);
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), outObj);
}
TEST(Document, AddRightSiblingToElementWithOpaqueRightSibling) {
@@ -1214,7 +1214,7 @@ TEST(Document, AddRightSiblingToElementWithOpaqueRightSibling) {
" 'a' : 1, 'X' : 'X', 'b' : 2, 'c' : 3"
"}";
mongo::BSONObj outObj = doc.getObject();
- ASSERT_EQUALS(mongo::fromjson(outJson), outObj);
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), outObj);
}
TEST(Document, ArrayIndexedAccessFromJson) {
@@ -1422,7 +1422,7 @@ TEST(Document, ArraySerialization) {
"}";
const mongo::BSONObj outObj = doc.getObject();
- ASSERT_EQUALS(mongo::fromjson(outJson), outObj);
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), outObj);
}
TEST(Document, SetValueBSONElementFieldNameHandling) {
@@ -1441,7 +1441,7 @@ TEST(Document, SetValueBSONElementFieldNameHandling) {
a.setValueBSONElement(b);
static const char outJson[] = "{ a : 5 }";
- ASSERT_EQUALS(mongo::fromjson(outJson), doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), doc.getObject());
}
TEST(Document, SetValueElementFromSeparateDocument) {
@@ -1455,10 +1455,10 @@ TEST(Document, SetValueElementFromSeparateDocument) {
auto setFrom = doc2.root().leftChild();
ASSERT_OK(setTo.setValueElement(setFrom));
- ASSERT_EQ(mongo::fromjson("{ a : 5 }"), doc1.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ a : 5 }"), doc1.getObject());
// Doc containing the 'setFrom' element should be unchanged.
- ASSERT_EQ(inObj2, doc2.getObject());
+ ASSERT_BSONOBJ_EQ(inObj2, doc2.getObject());
}
TEST(Document, SetValueElementIsNoopWhenSetToSelf) {
@@ -1468,7 +1468,7 @@ TEST(Document, SetValueElementIsNoopWhenSetToSelf) {
auto element = doc.root().leftChild();
ASSERT_OK(element.setValueElement(element));
- ASSERT_EQ(inObj, doc.getObject());
+ ASSERT_BSONOBJ_EQ(inObj, doc.getObject());
}
TEST(Document, SetValueElementIsNoopWhenSetToSelfFromCopy) {
@@ -1479,7 +1479,7 @@ TEST(Document, SetValueElementIsNoopWhenSetToSelfFromCopy) {
auto elementCopy = element;
ASSERT_OK(element.setValueElement(elementCopy));
- ASSERT_EQ(inObj, doc.getObject());
+ ASSERT_BSONOBJ_EQ(inObj, doc.getObject());
}
TEST(Document, SetValueElementIsNoopWhenSetToSelfNonRootElement) {
@@ -1490,7 +1490,7 @@ TEST(Document, SetValueElementIsNoopWhenSetToSelfNonRootElement) {
ASSERT_EQ("c", element.getFieldName());
ASSERT_OK(element.setValueElement(element));
- ASSERT_EQ(inObj, doc.getObject());
+ ASSERT_BSONOBJ_EQ(inObj, doc.getObject());
}
TEST(Document, SetValueElementSetToNestedObject) {
@@ -1504,10 +1504,10 @@ TEST(Document, SetValueElementSetToNestedObject) {
auto setFrom = doc2.root().leftChild();
ASSERT_OK(setTo.setValueElement(setFrom));
- ASSERT_EQ(mongo::fromjson("{ a : { c : 5, d : 6 } }"), doc1.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ a : { c : 5, d : 6 } }"), doc1.getObject());
// Doc containing the 'setFrom' element should be unchanged.
- ASSERT_EQ(inObj2, doc2.getObject());
+ ASSERT_BSONOBJ_EQ(inObj2, doc2.getObject());
}
TEST(Document, SetValueElementNonRootElements) {
@@ -1523,10 +1523,10 @@ TEST(Document, SetValueElementNonRootElements) {
ASSERT_EQ("e", setFrom.getFieldName());
ASSERT_OK(setTo.setValueElement(setFrom));
- ASSERT_EQ(mongo::fromjson("{ a : { b : 5, c : 8 } }"), doc1.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ a : { b : 5, c : 8 } }"), doc1.getObject());
// Doc containing the 'setFrom' element should be unchanged.
- ASSERT_EQ(inObj2, doc2.getObject());
+ ASSERT_BSONOBJ_EQ(inObj2, doc2.getObject());
}
TEST(Document, SetValueElementSetRootToSelfErrors) {
@@ -1535,7 +1535,7 @@ TEST(Document, SetValueElementSetRootToSelfErrors) {
auto element = doc.root();
ASSERT_NOT_OK(element.setValueElement(element));
- ASSERT_EQ(inObj, doc.getObject());
+ ASSERT_BSONOBJ_EQ(inObj, doc.getObject());
}
TEST(Document, SetValueElementSetRootToAnotherDocRootErrors) {
@@ -1549,8 +1549,8 @@ TEST(Document, SetValueElementSetRootToAnotherDocRootErrors) {
auto setFrom = doc2.root();
ASSERT_NOT_OK(setTo.setValueElement(setFrom));
- ASSERT_EQ(inObj, doc1.getObject());
- ASSERT_EQ(inObj2, doc2.getObject());
+ ASSERT_BSONOBJ_EQ(inObj, doc1.getObject());
+ ASSERT_BSONOBJ_EQ(inObj2, doc2.getObject());
}
TEST(Document, SetValueElementSetRootToNotRootInSelfErrors) {
@@ -1560,7 +1560,7 @@ TEST(Document, SetValueElementSetRootToNotRootInSelfErrors) {
auto setTo = doc.root();
auto setFrom = doc.root().leftChild();
ASSERT_NOT_OK(setTo.setValueElement(setFrom));
- ASSERT_EQ(inObj, doc.getObject());
+ ASSERT_BSONOBJ_EQ(inObj, doc.getObject());
}
TEST(Document, SetValueElementSetRootToNotRootInAnotherDocErrors) {
@@ -1574,8 +1574,8 @@ TEST(Document, SetValueElementSetRootToNotRootInAnotherDocErrors) {
auto setFrom = doc2.root().leftChild();
ASSERT_NOT_OK(setTo.setValueElement(setFrom));
- ASSERT_EQ(inObj, doc1.getObject());
- ASSERT_EQ(inObj2, doc2.getObject());
+ ASSERT_BSONOBJ_EQ(inObj, doc1.getObject());
+ ASSERT_BSONOBJ_EQ(inObj2, doc2.getObject());
}
TEST(Document, SetValueElementSetToOwnRootErrors) {
@@ -1587,7 +1587,7 @@ TEST(Document, SetValueElementSetToOwnRootErrors) {
auto setFrom = doc.root();
ASSERT_NOT_OK(setTo.setValueElement(setFrom));
- ASSERT_EQ(inObj, doc.getObject());
+ ASSERT_BSONOBJ_EQ(inObj, doc.getObject());
}
TEST(Document, SetValueElementSetToOtherDocRoot) {
@@ -1602,8 +1602,8 @@ TEST(Document, SetValueElementSetToOtherDocRoot) {
auto setFrom = doc2.root();
ASSERT_OK(setTo.setValueElement(setFrom));
- ASSERT_EQ(mongo::fromjson("{ a : { b : { c : 5 } } }"), doc1.getObject());
- ASSERT_EQ(inObj2, doc2.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson("{ a : { b : { c : 5 } } }"), doc1.getObject());
+ ASSERT_BSONOBJ_EQ(inObj2, doc2.getObject());
}
TEST(Document, CreateElementWithEmptyFieldName) {
@@ -1921,7 +1921,7 @@ TEST(TypeSupport, EncodingEquivalenceObject) {
ASSERT_TRUE(a.ok());
ASSERT_EQUALS(a.getType(), mongo::Object);
ASSERT_TRUE(a.hasValue());
- ASSERT_EQUALS(value1, mmb::ConstElement(a).getValueObject());
+ ASSERT_BSONOBJ_EQ(value1, mmb::ConstElement(a).getValueObject());
// Construct via call passing BSON element
ASSERT_OK(doc.root().appendElement(thing));
@@ -1962,7 +1962,7 @@ TEST(TypeSupport, EncodingEquivalenceArray) {
ASSERT_TRUE(a.ok());
ASSERT_EQUALS(a.getType(), mongo::Array);
ASSERT_TRUE(a.hasValue());
- ASSERT_EQUALS(value1, mmb::ConstElement(a).getValueArray());
+ ASSERT_BSONOBJ_EQ(value1, mmb::ConstElement(a).getValueArray());
// Construct via call passing BSON element
ASSERT_OK(doc.root().appendElement(thing));
@@ -2714,7 +2714,7 @@ TEST(Document, ManipulateComplexObjInLeafHeap) {
static const char outJson[] =
"{ embedded: { a: 1, b: 2, c: 2.0, d : ['w', 'y', 'z'] }, free: {} }";
- ASSERT_EQUALS(mongo::fromjson(outJson), doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), doc.getObject());
}
TEST(DocumentInPlace, EphemeralDocumentsDoNotUseInPlaceMode) {
@@ -2889,7 +2889,7 @@ TEST(DocumentInPlace, DisablingInPlaceDoesNotDiscardUpdates) {
ASSERT_FALSE(doc.isInPlaceModeEnabled());
static const char outJson[] = "{ foo : true, bar : false, baz : 'baz' }";
- ASSERT_EQUALS(mongo::fromjson(outJson), doc.getObject());
+ ASSERT_BSONOBJ_EQ(mongo::fromjson(outJson), doc.getObject());
}
TEST(DocumentInPlace, StringLifecycle) {
diff --git a/src/mongo/bson/simple_bsonobj_comparator.cpp b/src/mongo/bson/simple_bsonobj_comparator.cpp
new file mode 100644
index 00000000000..38b04e70034
--- /dev/null
+++ b/src/mongo/bson/simple_bsonobj_comparator.cpp
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/bson/simple_bsonobj_comparator.h"
+
+namespace mongo {
+
+const SimpleBSONObjComparator SimpleBSONObjComparator::kInstance{};
+
+} // namespace mongo
diff --git a/src/mongo/bson/simple_bsonobj_comparator.h b/src/mongo/bson/simple_bsonobj_comparator.h
new file mode 100644
index 00000000000..76d4070f059
--- /dev/null
+++ b/src/mongo/bson/simple_bsonobj_comparator.h
@@ -0,0 +1,49 @@
+/**
+ * Copyright (C) 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/bson/bsonobj_comparator_interface.h"
+
+namespace mongo {
+
+/**
+ * A BSONObj comparator that has simple binary compare semantics.
+ */
+class SimpleBSONObjComparator final : public BSONObj::ComparatorInterface {
+public:
+ // Global simple comparator for stateless BSONObj comparisons. BSONObj comparisons that require
+ // database logic, such as collations, much instantiate their own comparator.
+ static const SimpleBSONObjComparator kInstance;
+
+ int compare(const BSONObj& lhs, const BSONObj& rhs) const final {
+ return lhs.woCompare(rhs, BSONObj(), true, nullptr);
+ }
+};
+
+} // namespace mongo
diff --git a/src/mongo/client/authenticate_test.cpp b/src/mongo/client/authenticate_test.cpp
index 97b876cf814..fd7e62206e5 100644
--- a/src/mongo/client/authenticate_test.cpp
+++ b/src/mongo/client/authenticate_test.cpp
@@ -87,7 +87,7 @@ public:
ASSERT(!_requests.empty());
RemoteCommandRequest expected = _requests.front();
ASSERT(expected.dbname == request.dbname);
- ASSERT_EQ(expected.cmdObj, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expected.cmdObj, request.cmdObj);
_requests.pop();
// Then pop a response and call the handler
diff --git a/src/mongo/client/fetcher_test.cpp b/src/mongo/client/fetcher_test.cpp
index 26778806b6c..4bd52cb7e09 100644
--- a/src/mongo/client/fetcher_test.cpp
+++ b/src/mongo/client/fetcher_test.cpp
@@ -271,8 +271,8 @@ TEST_F(FetcherTest, RemoteCommandRequestShouldContainCommandParametersPassedToCo
&getExecutor(), source, "db", findCmdObj, doNothingCallback, metadataObj, timeout);
ASSERT_EQUALS(source, fetcher->getSource());
- ASSERT_EQUALS(findCmdObj, fetcher->getCommandObject());
- ASSERT_EQUALS(metadataObj, fetcher->getMetadataObject());
+ ASSERT_BSONOBJ_EQ(findCmdObj, fetcher->getCommandObject());
+ ASSERT_BSONOBJ_EQ(metadataObj, fetcher->getMetadataObject());
ASSERT_EQUALS(timeout, fetcher->getTimeout());
ASSERT_OK(fetcher->schedule());
@@ -287,8 +287,8 @@ TEST_F(FetcherTest, RemoteCommandRequestShouldContainCommandParametersPassedToCo
}
ASSERT_EQUALS(source, request.target);
- ASSERT_EQUALS(findCmdObj, request.cmdObj);
- ASSERT_EQUALS(metadataObj, request.metadata);
+ ASSERT_BSONOBJ_EQ(findCmdObj, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(metadataObj, request.metadata);
ASSERT_EQUALS(timeout, request.timeout);
}
@@ -561,7 +561,7 @@ TEST_F(FetcherTest, FetchOneDocument) {
ASSERT_EQUALS(0, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc, documents.front());
+ ASSERT_BSONOBJ_EQ(doc, documents.front());
ASSERT_FALSE(fetcher->inShutdown_forTest());
}
@@ -590,7 +590,7 @@ TEST_F(FetcherTest, SetNextActionToContinueWhenNextBatchIsNotAvailable) {
ASSERT_EQUALS(0, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc, documents.front());
+ ASSERT_BSONOBJ_EQ(doc, documents.front());
ASSERT_FALSE(fetcher->inShutdown_forTest());
}
@@ -626,7 +626,7 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc, documents.front());
+ ASSERT_BSONOBJ_EQ(doc, documents.front());
ASSERT_EQUALS(elapsedMillis, Milliseconds(100));
ASSERT_TRUE(first);
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
@@ -648,7 +648,7 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc2, documents.front());
+ ASSERT_BSONOBJ_EQ(doc2, documents.front());
ASSERT_EQUALS(elapsedMillis, Milliseconds(200));
ASSERT_FALSE(first);
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
@@ -669,7 +669,7 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
ASSERT_EQUALS(0, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc3, documents.front());
+ ASSERT_BSONOBJ_EQ(doc3, documents.front());
ASSERT_EQUALS(elapsedMillis, Milliseconds(300));
ASSERT_FALSE(first);
ASSERT_TRUE(Fetcher::NextAction::kNoAction == nextAction);
@@ -696,7 +696,7 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) {
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc, documents.front());
+ ASSERT_BSONOBJ_EQ(doc, documents.front());
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
const BSONObj doc2 = BSON("_id" << 2);
@@ -713,7 +713,7 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) {
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc2, documents.front());
+ ASSERT_BSONOBJ_EQ(doc2, documents.front());
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
fetcher->shutdown();
@@ -782,7 +782,7 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) {
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc, documents.front());
+ ASSERT_BSONOBJ_EQ(doc, documents.front());
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
const BSONObj doc2 = BSON("_id" << 2);
@@ -800,7 +800,7 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) {
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc2, documents.front());
+ ASSERT_BSONOBJ_EQ(doc2, documents.front());
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
getExecutor().shutdown();
@@ -840,7 +840,7 @@ TEST_F(FetcherTest, EmptyGetMoreRequestAfterFirstBatchMakesFetcherInactiveAndKil
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc, documents.front());
+ ASSERT_BSONOBJ_EQ(doc, documents.front());
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
executor::RemoteCommandRequest request;
@@ -898,7 +898,7 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) {
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc, documents.front());
+ ASSERT_BSONOBJ_EQ(doc, documents.front());
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
const BSONObj doc2 = BSON("_id" << 2);
@@ -918,7 +918,7 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) {
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc2, documents.front());
+ ASSERT_BSONOBJ_EQ(doc2, documents.front());
ASSERT_TRUE(Fetcher::NextAction::kNoAction == nextAction);
{
@@ -964,7 +964,7 @@ void shutdownDuringSecondBatch(const StatusWith<Fetcher::QueryResponse>& fetchRe
ASSERT_OK(fetchResult.getStatus());
Fetcher::QueryResponse batchData = fetchResult.getValue();
ASSERT_EQUALS(1U, batchData.documents.size());
- ASSERT_EQUALS(doc2, batchData.documents.front());
+ ASSERT_BSONOBJ_EQ(doc2, batchData.documents.front());
ASSERT_TRUE(Fetcher::NextAction::kGetMore == *nextAction);
ASSERT(getMoreBob);
getMoreBob->append("getMore", batchData.cursorId);
@@ -996,7 +996,7 @@ TEST_F(FetcherTest, ShutdownDuringSecondBatch) {
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc, documents.front());
+ ASSERT_BSONOBJ_EQ(doc, documents.front());
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
const BSONObj doc2 = BSON("_id" << 2);
@@ -1065,7 +1065,7 @@ TEST_F(FetcherTest, FetcherAppliesRetryPolicyToFirstCommandButNotToGetMoreReques
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
ASSERT_EQUALS(1U, documents.size());
- ASSERT_EQUALS(doc, documents.front());
+ ASSERT_BSONOBJ_EQ(doc, documents.front());
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
rs = ResponseStatus(ErrorCodes::OperationFailed, "getMore failed", Milliseconds(0));
diff --git a/src/mongo/client/read_preference.h b/src/mongo/client/read_preference.h
index c521913d94d..037a895555e 100644
--- a/src/mongo/client/read_preference.h
+++ b/src/mongo/client/read_preference.h
@@ -28,6 +28,7 @@
#pragma once
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/optime.h"
#include "mongo/util/duration.h"
@@ -103,7 +104,7 @@ public:
}
bool operator==(const TagSet& other) const {
- return _tags == other._tags;
+ return SimpleBSONObjComparator::kInstance.evaluate(_tags == other._tags);
}
bool operator!=(const TagSet& other) const {
return !(*this == other);
diff --git a/src/mongo/client/replica_set_monitor_read_preference_test.cpp b/src/mongo/client/replica_set_monitor_read_preference_test.cpp
index 99346574b04..956f40003bc 100644
--- a/src/mongo/client/replica_set_monitor_read_preference_test.cpp
+++ b/src/mongo/client/replica_set_monitor_read_preference_test.cpp
@@ -1063,6 +1063,6 @@ TEST(MultiTags, NearestMultiTagsNoMatch) {
TEST(TagSet, DefaultConstructorMatchesAll) {
TagSet tags;
- ASSERT_EQUALS(tags.getTagBSON(), BSON_ARRAY(BSONObj()));
+ ASSERT_BSONOBJ_EQ(tags.getTagBSON(), BSON_ARRAY(BSONObj()));
}
}
diff --git a/src/mongo/db/catalog/collection_options_test.cpp b/src/mongo/db/catalog/collection_options_test.cpp
index 60f94abf1bd..2b247916898 100644
--- a/src/mongo/db/catalog/collection_options_test.cpp
+++ b/src/mongo/db/catalog/collection_options_test.cpp
@@ -38,7 +38,7 @@ namespace mongo {
void checkRoundTrip(const CollectionOptions& options1) {
CollectionOptions options2;
options2.parse(options1.toBSON());
- ASSERT_EQUALS(options1.toBSON(), options2.toBSON());
+ ASSERT_BSONOBJ_EQ(options1.toBSON(), options2.toBSON());
}
TEST(CollectionOptions, SimpleRoundTrip) {
@@ -77,13 +77,13 @@ TEST(CollectionOptions, Validator) {
ASSERT_NOT_OK(options.parse(fromjson("{validator: 'notAnObject'}")));
ASSERT_OK(options.parse(fromjson("{validator: {a: 1}}")));
- ASSERT_EQ(options.validator, fromjson("{a: 1}"));
+ ASSERT_BSONOBJ_EQ(options.validator, fromjson("{a: 1}"));
options.validator = fromjson("{b: 1}");
- ASSERT_EQ(options.toBSON()["validator"].Obj(), fromjson("{b: 1}"));
+ ASSERT_BSONOBJ_EQ(options.toBSON()["validator"].Obj(), fromjson("{b: 1}"));
options.reset();
- ASSERT_EQ(options.validator, BSONObj());
+ ASSERT_BSONOBJ_EQ(options.validator, BSONObj());
ASSERT(!options.toBSON()["validator"]);
}
@@ -201,7 +201,7 @@ TEST(CollectionOptions, FailToParseCollationThatIsAnEmptyObject) {
TEST(CollectionOptions, CollationFieldParsesCorrectly) {
CollectionOptions options;
ASSERT_OK(options.parse(fromjson("{collation: {locale: 'en'}}")));
- ASSERT_EQ(options.collation, fromjson("{locale: 'en'}"));
+ ASSERT_BSONOBJ_EQ(options.collation, fromjson("{locale: 'en'}"));
ASSERT_TRUE(options.isValid());
ASSERT_OK(options.validate());
}
@@ -209,7 +209,7 @@ TEST(CollectionOptions, CollationFieldParsesCorrectly) {
TEST(CollectionOptions, ParsedCollationObjShouldBeOwned) {
CollectionOptions options;
ASSERT_OK(options.parse(fromjson("{collation: {locale: 'en'}}")));
- ASSERT_EQ(options.collation, fromjson("{locale: 'en'}"));
+ ASSERT_BSONOBJ_EQ(options.collation, fromjson("{locale: 'en'}"));
ASSERT_TRUE(options.collation.isOwned());
}
@@ -239,14 +239,14 @@ TEST(CollectionOptions, ViewParsesCorrectly) {
CollectionOptions options;
ASSERT_OK(options.parse(fromjson("{viewOn: 'c', pipeline: [{$match: {}}]}")));
ASSERT_EQ(options.viewOn, "c");
- ASSERT_EQ(options.pipeline, fromjson("[{$match: {}}]"));
+ ASSERT_BSONOBJ_EQ(options.pipeline, fromjson("[{$match: {}}]"));
}
TEST(CollectionOptions, ViewParsesCorrectlyWithoutPipeline) {
CollectionOptions options;
ASSERT_OK(options.parse(fromjson("{viewOn: 'c'}")));
ASSERT_EQ(options.viewOn, "c");
- ASSERT_EQ(options.pipeline, BSONObj());
+ ASSERT_BSONOBJ_EQ(options.pipeline, BSONObj());
}
TEST(CollectionOptions, PipelineFieldRequiresViewOn) {
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index c6c6a9fa034..4690b5daa46 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -36,6 +36,7 @@
#include <vector>
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/audit.h"
#include "mongo/db/background.h"
#include "mongo/db/catalog/collection.h"
@@ -665,7 +666,8 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
// index already exists with same name
if (desc->keyPattern().equal(key) &&
- desc->infoObj().getObjectField("collation") != collation) {
+ SimpleBSONObjComparator::kInstance.evaluate(
+ desc->infoObj().getObjectField("collation") != collation)) {
// key patterns are equal but collations differ.
return Status(ErrorCodes::IndexOptionsConflict,
str::stream()
@@ -679,7 +681,8 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
}
if (!desc->keyPattern().equal(key) ||
- desc->infoObj().getObjectField("collation") != collation) {
+ SimpleBSONObjComparator::kInstance.evaluate(
+ desc->infoObj().getObjectField("collation") != collation)) {
return Status(ErrorCodes::IndexKeySpecsConflict,
str::stream() << "Index must have unique name."
<< "The existing index: "
@@ -1078,9 +1081,11 @@ IndexDescriptor* IndexCatalog::findIndexByKeyPatternAndCollationSpec(
IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
- if (desc->keyPattern() == key &&
- desc->infoObj().getObjectField("collation") == collationSpec)
+ if (SimpleBSONObjComparator::kInstance.evaluate(desc->keyPattern() == key) &&
+ SimpleBSONObjComparator::kInstance.evaluate(
+ desc->infoObj().getObjectField("collation") == collationSpec)) {
return desc;
+ }
}
return NULL;
}
@@ -1093,7 +1098,7 @@ void IndexCatalog::findIndexesByKeyPattern(OperationContext* txn,
IndexIterator ii = getIndexIterator(txn, includeUnfinishedIndexes);
while (ii.more()) {
IndexDescriptor* desc = ii.next();
- if (desc->keyPattern() == key) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(desc->keyPattern() == key)) {
matches->push_back(desc);
}
}
diff --git a/src/mongo/db/collection_index_usage_tracker_test.cpp b/src/mongo/db/collection_index_usage_tracker_test.cpp
index b82583d5daf..173ebdf47b8 100644
--- a/src/mongo/db/collection_index_usage_tracker_test.cpp
+++ b/src/mongo/db/collection_index_usage_tracker_test.cpp
@@ -87,7 +87,7 @@ TEST_F(CollectionIndexUsageTrackerTest, IndexKey) {
getTracker()->registerIndex("foo", BSON("foo" << 1));
CollectionIndexUsageMap statsMap = getTracker()->getUsageStats();
ASSERT(statsMap.find("foo") != statsMap.end());
- ASSERT_EQUALS(BSON("foo" << 1), statsMap["foo"].indexKey);
+ ASSERT_BSONOBJ_EQ(BSON("foo" << 1), statsMap["foo"].indexKey);
}
// Test that index registration generates an entry in the stats map.
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp
index a65467d068e..2e8c49c4bf9 100644
--- a/src/mongo/db/commands/dbcommands.cpp
+++ b/src/mongo/db/commands/dbcommands.cpp
@@ -37,6 +37,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/builder.h"
#include "mongo/db/audit.h"
#include "mongo/db/auth/action_set.h"
@@ -1500,7 +1501,8 @@ bool Command::run(OperationContext* txn,
result = run(txn, db, cmd, 0, errmsg, inPlaceReplyBob);
// Nothing in run() should change the writeConcern.
- dassert(txn->getWriteConcern().toBSON() == wcResult.getValue().toBSON());
+ dassert(SimpleBSONObjComparator::kInstance.evaluate(txn->getWriteConcern().toBSON() ==
+ wcResult.getValue().toBSON()));
WriteConcernResult res;
auto waitForWCStatus =
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 391b84ba62c..0da61155048 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/commands/index_filter_commands.h"
-
#include "mongo/db/json.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/db/operation_context_noop.h"
@@ -348,9 +347,9 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}", "{locale: 'mock_reverse_string'}"));
// Fields in filter should match criteria in most recent query settings update.
- ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 1, b: 1}"));
- ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{a: -1}"));
- ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{_id: 0, a: 1}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("query"), fromjson("{a: 1, b: 1}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("sort"), fromjson("{a: -1}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("projection"), fromjson("{_id: 0, a: 1}"));
ASSERT_EQUALS(StringData(filters[0].getObjectField("collation").getStringField("locale")),
"mock_reverse_string");
@@ -369,7 +368,7 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
ASSERT(filterIndexes.type() == BSONType::Array);
auto filterArray = filterIndexes.Array();
ASSERT_EQ(filterArray.size(), 1U);
- ASSERT_EQUALS(filterArray[0].Obj(), fromjson("{a: 1, b: 1}"));
+ ASSERT_BSONOBJ_EQ(filterArray[0].Obj(), fromjson("{a: 1, b: 1}"));
// Add hint for different query shape.
ASSERT_OK(SetFilter::set(txn.get(),
@@ -439,9 +438,9 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
"indexes: [{a: 1}]}")));
vector<BSONObj> filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
- ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 'foo'}"));
- ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{}"));
- ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("query"), fromjson("{a: 'foo'}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("sort"), fromjson("{}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("projection"), fromjson("{}"));
ASSERT_EQUALS(StringData(filters[0].getObjectField("collation").getStringField("locale")),
"mock_reverse_string");
@@ -473,10 +472,10 @@ TEST(IndexFilterCommandsTest, SetAndClearFiltersCollation) {
fromjson("{query: {a: 'foo'}, collation: {locale: 'mock_reverse_string'}}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
- ASSERT_EQUALS(filters[0].getObjectField("query"), fromjson("{a: 'foo'}"));
- ASSERT_EQUALS(filters[0].getObjectField("sort"), fromjson("{}"));
- ASSERT_EQUALS(filters[0].getObjectField("projection"), fromjson("{}"));
- ASSERT_EQUALS(filters[0].getObjectField("collation"), fromjson("{}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("query"), fromjson("{a: 'foo'}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("sort"), fromjson("{}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("projection"), fromjson("{}"));
+ ASSERT_BSONOBJ_EQ(filters[0].getObjectField("collation"), fromjson("{}"));
// Plan cache should only contain entry for query without collation.
ASSERT_FALSE(
@@ -512,7 +511,7 @@ TEST(IndexFilterCommandsTest, SetFilterAcceptsIndexNames) {
ASSERT_EQUALS(filters.size(), 1U);
auto indexes = filters[0]["indexes"].Array();
- ASSERT_EQUALS(indexes[0].embeddedObject(), fromjson("{a: 1}"));
+ ASSERT_BSONOBJ_EQ(indexes[0].embeddedObject(), fromjson("{a: 1}"));
ASSERT_EQUALS(indexes[1].valueStringData(), "a_1:rev");
}
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index 8987af00474..0d9f5f7edca 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -224,7 +224,7 @@ TEST(ConfigTest, ParseCollation) {
bob.append("collation", collation);
BSONObj cmdObj = bob.obj();
mr::Config config(dbname, cmdObj);
- ASSERT_EQUALS(config.collation, collation);
+ ASSERT_BSONOBJ_EQ(config.collation, collation);
}
TEST(ConfigTest, ParseNoCollation) {
@@ -236,7 +236,7 @@ TEST(ConfigTest, ParseNoCollation) {
bob.append("out", "outCollection");
BSONObj cmdObj = bob.obj();
mr::Config config(dbname, cmdObj);
- ASSERT_EQUALS(config.collation, BSONObj());
+ ASSERT_BSONOBJ_EQ(config.collation, BSONObj());
}
TEST(ConfigTest, CollationNotAnObjectFailsToParse) {
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 195ad13b250..4975a557443 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -156,10 +156,10 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
vector<BSONObj> shapes = getShapes(planCache);
ASSERT_EQUALS(shapes.size(), 1U);
- ASSERT_EQUALS(shapes[0].getObjectField("query"), cq->getQueryObj());
- ASSERT_EQUALS(shapes[0].getObjectField("sort"), cq->getQueryRequest().getSort());
- ASSERT_EQUALS(shapes[0].getObjectField("projection"), cq->getQueryRequest().getProj());
- ASSERT_EQUALS(shapes[0].getObjectField("collation"), cq->getCollator()->getSpec().toBSON());
+ ASSERT_BSONOBJ_EQ(shapes[0].getObjectField("query"), cq->getQueryObj());
+ ASSERT_BSONOBJ_EQ(shapes[0].getObjectField("sort"), cq->getQueryRequest().getSort());
+ ASSERT_BSONOBJ_EQ(shapes[0].getObjectField("projection"), cq->getQueryRequest().getProj());
+ ASSERT_BSONOBJ_EQ(shapes[0].getObjectField("collation"), cq->getCollator()->getSpec().toBSON());
}
/**
@@ -337,8 +337,14 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
BSONObj shapeB = BSON(
"query" << cqB->getQueryObj() << "sort" << cqB->getQueryRequest().getSort() << "projection"
<< cqB->getQueryRequest().getProj());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeA) != shapesBefore.end());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeB) != shapesBefore.end());
+ ASSERT_TRUE(
+ std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shapeA](const BSONObj& obj) {
+ return SimpleBSONObjComparator::kInstance.evaluate(shapeA == obj);
+ }) != shapesBefore.end());
+ ASSERT_TRUE(
+ std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shapeB](const BSONObj& obj) {
+ return SimpleBSONObjComparator::kInstance.evaluate(shapeB == obj);
+ }) != shapesBefore.end());
// Drop {b: 1} from cache. Make sure {a: 1} is still in cache afterwards.
BSONObjBuilder bob;
@@ -347,7 +353,7 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
txn.get(), &planCache, nss.ns(), BSON("query" << cqB->getQueryObj())));
vector<BSONObj> shapesAfter = getShapes(planCache);
ASSERT_EQUALS(shapesAfter.size(), 1U);
- ASSERT_EQUALS(shapesAfter[0], shapeA);
+ ASSERT_BSONOBJ_EQ(shapesAfter[0], shapeA);
}
TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
@@ -393,9 +399,15 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
<< cqCollation->getQueryRequest().getProj()
<< "collation"
<< cqCollation->getCollator()->getSpec().toBSON());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shape) != shapesBefore.end());
- ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeWithCollation) !=
- shapesBefore.end());
+ ASSERT_TRUE(
+ std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shape](const BSONObj& obj) {
+ return SimpleBSONObjComparator::kInstance.evaluate(shape == obj);
+ }) != shapesBefore.end());
+ ASSERT_TRUE(
+ std::find_if(
+ shapesBefore.begin(), shapesBefore.end(), [&shapeWithCollation](const BSONObj& obj) {
+ return SimpleBSONObjComparator::kInstance.evaluate(shapeWithCollation == obj);
+ }) != shapesBefore.end());
// Drop query with collation from cache. Make other query is still in cache afterwards.
BSONObjBuilder bob;
@@ -403,7 +415,7 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
ASSERT_OK(PlanCacheClear::clear(txn.get(), &planCache, nss.ns(), shapeWithCollation));
vector<BSONObj> shapesAfter = getShapes(planCache);
ASSERT_EQUALS(shapesAfter.size(), 1U);
- ASSERT_EQUALS(shapesAfter[0], shape);
+ ASSERT_BSONOBJ_EQ(shapesAfter[0], shape);
}
/**
diff --git a/src/mongo/db/exec/and_common-inl.h b/src/mongo/db/exec/and_common-inl.h
index 3a6f7cb0e69..958c57fe75e 100644
--- a/src/mongo/db/exec/and_common-inl.h
+++ b/src/mongo/db/exec/and_common-inl.h
@@ -26,6 +26,7 @@
* it in the license file.
*/
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/exec/working_set.h"
namespace mongo {
@@ -85,7 +86,8 @@ public:
for (size_t i = 0; i < src.keyData.size(); ++i) {
bool found = false;
for (size_t j = 0; j < dest->keyData.size(); ++j) {
- if (dest->keyData[j].indexKeyPattern == src.keyData[i].indexKeyPattern) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(dest->keyData[j].indexKeyPattern ==
+ src.keyData[i].indexKeyPattern)) {
found = true;
break;
}
diff --git a/src/mongo/db/exec/projection_exec_test.cpp b/src/mongo/db/exec/projection_exec_test.cpp
index 2399e35fd73..b9e240a5a24 100644
--- a/src/mongo/db/exec/projection_exec_test.cpp
+++ b/src/mongo/db/exec/projection_exec_test.cpp
@@ -125,7 +125,7 @@ void testTransform(const char* specStr,
// Finally, we compare the projected object.
const BSONObj& obj = wsm.obj.value();
BSONObj expectedObj = fromjson(expectedObjStr);
- if (obj != expectedObj) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(obj != expectedObj)) {
mongoutils::str::stream ss;
ss << "transform() test failed: unexpected projected object."
<< "\nprojection spec: " << specStr << "\nquery: " << queryStr
@@ -303,7 +303,7 @@ TEST(ProjectionExecTest, TransformMetaSortKeyCoveredNormal) {
"{_id: 0, a: 1, b: {$meta: 'sortKey'}}",
IndexKeyDatum(BSON("a" << 1), BSON("" << 5), nullptr));
BSONObj expectedOut = BSON("a" << 5 << "b" << BSON("" << 5));
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(ProjectionExecTest, TransformMetaSortKeyCoveredOverwrite) {
@@ -312,7 +312,7 @@ TEST(ProjectionExecTest, TransformMetaSortKeyCoveredOverwrite) {
"{_id: 0, a: 1, a: {$meta: 'sortKey'}}",
IndexKeyDatum(BSON("a" << 1), BSON("" << 5), nullptr));
BSONObj expectedOut = BSON("a" << BSON("" << 5));
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(ProjectionExecTest, TransformMetaSortKeyCoveredAdditionalData) {
@@ -321,7 +321,7 @@ TEST(ProjectionExecTest, TransformMetaSortKeyCoveredAdditionalData) {
"{_id: 0, a: 1, b: {$meta: 'sortKey'}, c: 1}",
IndexKeyDatum(BSON("a" << 1 << "c" << 1), BSON("" << 5 << "" << 6), nullptr));
BSONObj expectedOut = BSON("a" << 5 << "c" << 6 << "b" << BSON("" << 5));
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(ProjectionExecTest, TransformMetaSortKeyCoveredCompound) {
@@ -330,7 +330,7 @@ TEST(ProjectionExecTest, TransformMetaSortKeyCoveredCompound) {
"{_id: 0, a: 1, b: {$meta: 'sortKey'}}",
IndexKeyDatum(BSON("a" << 1 << "c" << 1), BSON("" << 5 << "" << 6), nullptr));
BSONObj expectedOut = BSON("a" << 5 << "b" << BSON("" << 5 << "" << 6));
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(ProjectionExecTest, TransformMetaSortKeyCoveredCompound2) {
@@ -340,7 +340,7 @@ TEST(ProjectionExecTest, TransformMetaSortKeyCoveredCompound2) {
IndexKeyDatum(
BSON("a" << 1 << "b" << 1 << "c" << 1), BSON("" << 5 << "" << 6 << "" << 4), nullptr));
BSONObj expectedOut = BSON("a" << 5 << "c" << 4 << "b" << BSON("" << 5 << "" << 6));
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(ProjectionExecTest, TransformMetaSortKeyCoveredCompound3) {
@@ -351,7 +351,7 @@ TEST(ProjectionExecTest, TransformMetaSortKeyCoveredCompound3) {
BSON("" << 5 << "" << 6 << "" << 4 << "" << 9000),
nullptr));
BSONObj expectedOut = BSON("c" << 4 << "d" << 9000 << "b" << BSON("" << 6 << "" << 4));
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
} // namespace
diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp
index 6232df7eec7..5dfd2db70ab 100644
--- a/src/mongo/db/exec/sort_test.cpp
+++ b/src/mongo/db/exec/sort_test.cpp
@@ -145,7 +145,7 @@ public:
// Finally, we get to compare the sorted results against what we expect.
BSONObj expectedObj = fromjson(expectedStr);
- if (outputObj != expectedObj) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(outputObj != expectedObj)) {
mongoutils::str::stream ss;
// Even though we have the original string representation of the expected output,
// we invoke BSONObj::toString() to get a format consistent with outputObj.
diff --git a/src/mongo/db/field_parser_test.cpp b/src/mongo/db/field_parser_test.cpp
index 428699484b9..7ceb01d66a6 100644
--- a/src/mongo/db/field_parser_test.cpp
+++ b/src/mongo/db/field_parser_test.cpp
@@ -44,9 +44,9 @@ using mongo::BSONObjBuilder;
using mongo::Date_t;
using mongo::FieldParser;
using mongo::OID;
+using std::map;
using std::string;
using std::vector;
-using std::map;
class ExtractionFixture : public mongo::unittest::Test {
protected:
@@ -112,11 +112,11 @@ TEST_F(ExtractionFixture, GetBSONArray) {
BSONField<BSONArray> wrongType(aString.name());
BSONArray val;
ASSERT_TRUE(FieldParser::extract(doc, anArray, &val));
- ASSERT_EQUALS(val, valArray);
+ ASSERT_BSONOBJ_EQ(val, valArray);
ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
- ASSERT_EQUALS(val,
- BSON_ARRAY("a"
- << "b"));
+ ASSERT_BSONOBJ_EQ(val,
+ BSON_ARRAY("a"
+ << "b"));
ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
}
@@ -125,9 +125,9 @@ TEST_F(ExtractionFixture, GetBSONObj) {
BSONField<BSONObj> wrongType(aString.name());
BSONObj val;
ASSERT_TRUE(FieldParser::extract(doc, anObj, &val));
- ASSERT_EQUALS(val, valObj);
+ ASSERT_BSONOBJ_EQ(val, valObj);
ASSERT_TRUE(FieldParser::extract(doc, notThere, &val));
- ASSERT_EQUALS(val, BSON("b" << 1));
+ ASSERT_BSONOBJ_EQ(val, BSON("b" << 1));
ASSERT_FALSE(FieldParser::extract(doc, wrongType, &val));
}
@@ -237,9 +237,9 @@ TEST(ComplexExtraction, GetObjectVector) {
vector<BSONObj> parsedVector;
ASSERT(FieldParser::extract(obj, vectorField, &parsedVector));
- ASSERT_EQUALS(BSON("a" << 1), parsedVector[0]);
- ASSERT_EQUALS(BSON("b" << 1), parsedVector[1]);
- ASSERT_EQUALS(BSON("c" << 1), parsedVector[2]);
+ ASSERT_BSONOBJ_EQ(BSON("a" << 1), parsedVector[0]);
+ ASSERT_BSONOBJ_EQ(BSON("b" << 1), parsedVector[1]);
+ ASSERT_BSONOBJ_EQ(BSON("c" << 1), parsedVector[2]);
ASSERT_EQUALS(parsedVector.size(), static_cast<size_t>(3));
}
@@ -329,15 +329,15 @@ TEST(ComplexExtraction, GetObjectMap) {
map<string, BSONObj> parsedMap;
ASSERT(FieldParser::extract(obj, mapField, &parsedMap));
- ASSERT_EQUALS(BSON("a"
- << "a"),
- parsedMap["a"]);
- ASSERT_EQUALS(BSON("b"
- << "b"),
- parsedMap["b"]);
- ASSERT_EQUALS(BSON("c"
- << "c"),
- parsedMap["c"]);
+ ASSERT_BSONOBJ_EQ(BSON("a"
+ << "a"),
+ parsedMap["a"]);
+ ASSERT_BSONOBJ_EQ(BSON("b"
+ << "b"),
+ parsedMap["b"]);
+ ASSERT_BSONOBJ_EQ(BSON("c"
+ << "c"),
+ parsedMap["c"]);
ASSERT_EQUALS(parsedMap.size(), static_cast<size_t>(3));
}
diff --git a/src/mongo/db/ftdc/file_writer_test.cpp b/src/mongo/db/ftdc/file_writer_test.cpp
index 00124eb2c36..86841fd475a 100644
--- a/src/mongo/db/ftdc/file_writer_test.cpp
+++ b/src/mongo/db/ftdc/file_writer_test.cpp
@@ -84,13 +84,13 @@ TEST(FTDCFileTest, TestFileBasicMetadata) {
BSONObj doc1a = std::get<1>(reader.next());
- ASSERT_TRUE(doc1 == doc1a);
+ ASSERT_BSONOBJ_EQ(doc1, doc1a);
ASSERT_OK(reader.hasNext());
BSONObj doc2a = std::get<1>(reader.next());
- ASSERT_TRUE(doc2 == doc2a);
+ ASSERT_BSONOBJ_EQ(doc2, doc2a);
auto sw = reader.hasNext();
ASSERT_OK(sw);
@@ -135,13 +135,13 @@ TEST(FTDCFileTest, TestFileBasicCompress) {
BSONObj doc1a = std::get<1>(reader.next());
- ASSERT_TRUE(doc1 == doc1a);
+ ASSERT_BSONOBJ_EQ(doc1, doc1a);
ASSERT_OK(reader.hasNext());
BSONObj doc2a = std::get<1>(reader.next());
- ASSERT_TRUE(doc2 == doc2a);
+ ASSERT_BSONOBJ_EQ(doc2, doc2a);
auto sw = reader.hasNext();
ASSERT_OK(sw);
diff --git a/src/mongo/db/ftdc/ftdc_test.cpp b/src/mongo/db/ftdc/ftdc_test.cpp
index 3a010ab32c5..9f4f616db3b 100644
--- a/src/mongo/db/ftdc/ftdc_test.cpp
+++ b/src/mongo/db/ftdc/ftdc_test.cpp
@@ -70,9 +70,9 @@ void ValidateDocumentList(const std::vector<BSONObj>& docs1, const std::vector<B
auto bi = docs2.begin();
while (ai != docs1.end() && bi != docs2.end()) {
- if (!(*ai == *bi)) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(*ai != *bi)) {
std::cout << *ai << " vs " << *bi << std::endl;
- ASSERT_TRUE(*ai == *bi);
+ ASSERT_BSONOBJ_EQ(*ai, *bi);
}
++ai;
++bi;
diff --git a/src/mongo/db/fts/fts_query_impl_test.cpp b/src/mongo/db/fts/fts_query_impl_test.cpp
index 43585e8a982..6cb8ca8f9de 100644
--- a/src/mongo/db/fts/fts_query_impl_test.cpp
+++ b/src/mongo/db/fts/fts_query_impl_test.cpp
@@ -158,9 +158,10 @@ TEST(FTSQueryImpl, Phrase1) {
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
- ASSERT_EQUALS(q.toBSON(),
- fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
- "test'], negatedPhrases: []}"));
+ ASSERT_BSONOBJ_EQ(
+ q.toBSON(),
+ fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
+ "test'], negatedPhrases: []}"));
ASSERT_TRUE(q.getTermsForBounds() == q.getPositiveTerms());
}
@@ -182,7 +183,7 @@ TEST(FTSQueryImpl, HyphenDirectlyBeforePhraseShouldNegateEntirePhrase) {
q.setCaseSensitive(false);
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
- ASSERT_EQUALS(
+ ASSERT_BSONOBJ_EQ(
q.toBSON(),
fromjson(
"{terms: ['fun'], negatedTerms: [], phrases: [], negatedPhrases: ['phrase test']}"));
@@ -195,9 +196,10 @@ TEST(FTSQueryImpl, HyphenSurroundedByWhitespaceBeforePhraseShouldNotNegateEntire
q.setCaseSensitive(false);
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
- ASSERT_EQUALS(q.toBSON(),
- fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
- "test'], negatedPhrases: []}"));
+ ASSERT_BSONOBJ_EQ(
+ q.toBSON(),
+ fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
+ "test'], negatedPhrases: []}"));
}
TEST(FTSQueryImpl, HyphenBetweenTermAndPhraseShouldBeTreatedAsDelimiter) {
@@ -207,9 +209,10 @@ TEST(FTSQueryImpl, HyphenBetweenTermAndPhraseShouldBeTreatedAsDelimiter) {
q.setCaseSensitive(false);
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
- ASSERT_EQUALS(q.toBSON(),
- fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
- "test'], negatedPhrases: []}"));
+ ASSERT_BSONOBJ_EQ(
+ q.toBSON(),
+ fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
+ "test'], negatedPhrases: []}"));
}
TEST(FTSQueryImpl, HyphenShouldNegateAllSucceedingPhrasesSeparatedByHyphens) {
@@ -219,9 +222,9 @@ TEST(FTSQueryImpl, HyphenShouldNegateAllSucceedingPhrasesSeparatedByHyphens) {
q.setCaseSensitive(false);
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
- ASSERT_EQUALS(q.toBSON(),
- fromjson("{terms: ['anoth', 'phrase'], negatedTerms: [], phrases: ['another "
- "phrase'], negatedPhrases: ['really fun', 'stuff here']}"));
+ ASSERT_BSONOBJ_EQ(q.toBSON(),
+ fromjson("{terms: ['anoth', 'phrase'], negatedTerms: [], phrases: ['another "
+ "phrase'], negatedPhrases: ['really fun', 'stuff here']}"));
}
TEST(FTSQueryImpl, CaseSensitiveOption) {
@@ -304,9 +307,10 @@ TEST(FTSQueryImpl, Mix1) {
q.setCaseSensitive(false);
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
- ASSERT_EQUALS(q.toBSON(),
- fromjson("{terms: ['industri'], negatedTerms: ['melbourn', 'physic'], phrases: "
- "['industry'], negatedPhrases: []}"));
+ ASSERT_BSONOBJ_EQ(
+ q.toBSON(),
+ fromjson("{terms: ['industri'], negatedTerms: ['melbourn', 'physic'], phrases: "
+ "['industry'], negatedPhrases: []}"));
}
TEST(FTSQueryImpl, NegPhrase2) {
diff --git a/src/mongo/db/fts/fts_spec_test.cpp b/src/mongo/db/fts/fts_spec_test.cpp
index 3c041cbd363..ca50a0d7511 100644
--- a/src/mongo/db/fts/fts_spec_test.cpp
+++ b/src/mongo/db/fts/fts_spec_test.cpp
@@ -54,7 +54,7 @@ void assertFixSuccess(const std::string& s) {
// fixSpec() on an already-fixed spec shouldn't change it.
BSONObj fixed2 = assertGet(FTSSpec::fixSpec(fixed));
- ASSERT_EQUALS(fixed, fixed2);
+ ASSERT_BSONOBJ_EQ(fixed, fixed2);
} catch (UserException&) {
ASSERT(false);
}
@@ -283,7 +283,7 @@ TEST(FTSSpec, Extra2) {
ASSERT_EQUALS(StringData("x"), spec.extraAfter(0));
BSONObj fixed2 = assertGet(FTSSpec::fixSpec(fixed));
- ASSERT_EQUALS(fixed, fixed2);
+ ASSERT_BSONOBJ_EQ(fixed, fixed2);
}
TEST(FTSSpec, Extra3) {
@@ -291,15 +291,15 @@ TEST(FTSSpec, Extra3) {
<< "text"));
BSONObj fixed = assertGet(FTSSpec::fixSpec(user));
- ASSERT_EQUALS(BSON("x" << 1 << "_fts"
- << "text"
- << "_ftsx"
- << 1),
- fixed["key"].Obj());
- ASSERT_EQUALS(BSON("data" << 1), fixed["weights"].Obj());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1 << "_fts"
+ << "text"
+ << "_ftsx"
+ << 1),
+ fixed["key"].Obj());
+ ASSERT_BSONOBJ_EQ(BSON("data" << 1), fixed["weights"].Obj());
BSONObj fixed2 = assertGet(FTSSpec::fixSpec(fixed));
- ASSERT_EQUALS(fixed, fixed2);
+ ASSERT_BSONOBJ_EQ(fixed, fixed2);
FTSSpec spec(fixed);
ASSERT_EQUALS(1U, spec.numExtraBefore());
@@ -309,10 +309,10 @@ TEST(FTSSpec, Extra3) {
BSONObj prefix;
ASSERT(spec.getIndexPrefix(BSON("x" << 2), &prefix).isOK());
- ASSERT_EQUALS(BSON("x" << 2), prefix);
+ ASSERT_BSONOBJ_EQ(BSON("x" << 2), prefix);
ASSERT(spec.getIndexPrefix(BSON("x" << 3 << "y" << 4), &prefix).isOK());
- ASSERT_EQUALS(BSON("x" << 3), prefix);
+ ASSERT_BSONOBJ_EQ(BSON("x" << 3), prefix);
ASSERT(!spec.getIndexPrefix(BSON("x" << BSON("$gt" << 5)), &prefix).isOK());
ASSERT(!spec.getIndexPrefix(BSON("y" << 4), &prefix).isOK());
diff --git a/src/mongo/db/index/2d_key_generator_test.cpp b/src/mongo/db/index/2d_key_generator_test.cpp
index 256dc7ecc2f..9f6e3405fd1 100644
--- a/src/mongo/db/index/2d_key_generator_test.cpp
+++ b/src/mongo/db/index/2d_key_generator_test.cpp
@@ -32,6 +32,8 @@
#include "mongo/db/index/expression_keys_private.h"
+#include <algorithm>
+
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/index/2d_common.h"
#include "mongo/db/index/expression_params.h"
@@ -55,11 +57,21 @@ std::string dumpKeyset(const BSONObjSet& objs) {
}
bool assertKeysetsEqual(const BSONObjSet& expectedKeys, const BSONObjSet& actualKeys) {
- if (expectedKeys != actualKeys) {
+ if (expectedKeys.size() != actualKeys.size()) {
log() << "Expected: " << dumpKeyset(expectedKeys) << ", "
<< "Actual: " << dumpKeyset(actualKeys);
return false;
}
+
+ if (!std::equal(expectedKeys.begin(),
+ expectedKeys.end(),
+ actualKeys.begin(),
+ SimpleBSONObjComparator::kInstance.makeEqualTo())) {
+ log() << "Expected: " << dumpKeyset(expectedKeys) << ", "
+ << "Actual: " << dumpKeyset(actualKeys);
+ return false;
+ }
+
return true;
}
diff --git a/src/mongo/db/index/btree_key_generator_test.cpp b/src/mongo/db/index/btree_key_generator_test.cpp
index 7840056b447..effeb192e9a 100644
--- a/src/mongo/db/index/btree_key_generator_test.cpp
+++ b/src/mongo/db/index/btree_key_generator_test.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/index/btree_key_generator.h"
+#include <algorithm>
#include <iostream>
#include "mongo/db/json.h"
@@ -78,6 +79,21 @@ std::string dumpMultikeyPaths(const MultikeyPaths& multikeyPaths) {
return ss.str();
}
+bool keysetsEqual(const BSONObjSet& expectedKeys, const BSONObjSet& actualKeys) {
+ if (expectedKeys.size() != actualKeys.size()) {
+ return false;
+ }
+
+ if (!std::equal(expectedKeys.begin(),
+ expectedKeys.end(),
+ actualKeys.begin(),
+ SimpleBSONObjComparator::kInstance.makeEqualTo())) {
+ return false;
+ }
+
+ return true;
+}
+
bool testKeygen(const BSONObj& kp,
const BSONObj& obj,
const BSONObjSet& expectedKeys,
@@ -115,7 +131,7 @@ bool testKeygen(const BSONObj& kp,
//
// Step 3: check that the results match the expected result.
//
- bool match = (expectedKeys == actualKeys);
+ bool match = keysetsEqual(expectedKeys, actualKeys);
if (!match) {
log() << "Expected: " << dumpKeyset(expectedKeys) << ", "
<< "Actual: " << dumpKeyset(actualKeys);
diff --git a/src/mongo/db/index/hash_key_generator_test.cpp b/src/mongo/db/index/hash_key_generator_test.cpp
index b2bf9a2755a..a6f6600449a 100644
--- a/src/mongo/db/index/hash_key_generator_test.cpp
+++ b/src/mongo/db/index/hash_key_generator_test.cpp
@@ -32,6 +32,8 @@
#include "mongo/db/index/expression_keys_private.h"
+#include <algorithm>
+
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/hasher.h"
#include "mongo/db/json.h"
@@ -58,11 +60,21 @@ std::string dumpKeyset(const BSONObjSet& objs) {
}
bool assertKeysetsEqual(const BSONObjSet& expectedKeys, const BSONObjSet& actualKeys) {
- if (expectedKeys != actualKeys) {
+ if (expectedKeys.size() != actualKeys.size()) {
log() << "Expected: " << dumpKeyset(expectedKeys) << ", "
<< "Actual: " << dumpKeyset(actualKeys);
return false;
}
+
+ if (!std::equal(expectedKeys.begin(),
+ expectedKeys.end(),
+ actualKeys.begin(),
+ SimpleBSONObjComparator::kInstance.makeEqualTo())) {
+ log() << "Expected: " << dumpKeyset(expectedKeys) << ", "
+ << "Actual: " << dumpKeyset(actualKeys);
+ return false;
+ }
+
return true;
}
diff --git a/src/mongo/db/index/s2_key_generator_test.cpp b/src/mongo/db/index/s2_key_generator_test.cpp
index 894e7b8036e..fc126c0272c 100644
--- a/src/mongo/db/index/s2_key_generator_test.cpp
+++ b/src/mongo/db/index/s2_key_generator_test.cpp
@@ -26,16 +26,21 @@
* it in the license file.
*/
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kIndex
+
#include "mongo/platform/basic.h"
#include "mongo/db/index/expression_keys_private.h"
+#include <algorithm>
+
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/index/expression_params.h"
#include "mongo/db/index/s2_common.h"
#include "mongo/db/json.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
using namespace mongo;
@@ -69,11 +74,23 @@ std::string dumpMultikeyPaths(const MultikeyPaths& multikeyPaths) {
return ss.str();
}
-void assertKeysetsEqual(const BSONObjSet& expectedKeys, const BSONObjSet& actualKeys) {
- if (expectedKeys != actualKeys) {
- FAIL(str::stream() << "Expected: " << dumpKeyset(expectedKeys) << ", Actual: "
- << dumpKeyset(actualKeys));
+bool assertKeysetsEqual(const BSONObjSet& expectedKeys, const BSONObjSet& actualKeys) {
+ if (expectedKeys.size() != actualKeys.size()) {
+ log() << "Expected: " << dumpKeyset(expectedKeys) << ", "
+ << "Actual: " << dumpKeyset(actualKeys);
+ return false;
}
+
+ if (!std::equal(expectedKeys.begin(),
+ expectedKeys.end(),
+ actualKeys.begin(),
+ SimpleBSONObjComparator::kInstance.makeEqualTo())) {
+ log() << "Expected: " << dumpKeyset(expectedKeys) << ", "
+ << "Actual: " << dumpKeyset(actualKeys);
+ return false;
+ }
+
+ return true;
}
void assertMultikeyPathsEqual(const MultikeyPaths& expectedMultikeyPaths,
diff --git a/src/mongo/db/keypattern_test.cpp b/src/mongo/db/keypattern_test.cpp
index 4b83648fa7d..b2d35b5aac4 100644
--- a/src/mongo/db/keypattern_test.cpp
+++ b/src/mongo/db/keypattern_test.cpp
@@ -57,46 +57,46 @@ TEST(KeyPattern, ExtendRangeBound) {
{
KeyPattern keyPat(BSON("a" << 1));
BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55));
+ ASSERT_BSONOBJ_EQ(newB, BSON("a" << 55));
}
{
KeyPattern keyPat(BSON("a" << 1));
BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55));
+ ASSERT_BSONOBJ_EQ(newB, BSON("a" << 55));
}
// test keyPattern longer than bound, simple
{
KeyPattern keyPat(BSON("a" << 1 << "b" << 1));
BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MINKEY));
+ ASSERT_BSONOBJ_EQ(newB, BSON("a" << 55 << "b" << MINKEY));
}
{
KeyPattern keyPat(BSON("a" << 1 << "b" << 1));
BSONObj newB = keyPat.extendRangeBound(bound, true);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(newB, BSON("a" << 55 << "b" << MAXKEY));
}
// test keyPattern longer than bound, more complex pattern directions
{
KeyPattern keyPat(BSON("a" << 1 << "b" << -1));
BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(newB, BSON("a" << 55 << "b" << MAXKEY));
}
{
KeyPattern keyPat(BSON("a" << 1 << "b" << -1));
BSONObj newB = keyPat.extendRangeBound(bound, true);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MINKEY));
+ ASSERT_BSONOBJ_EQ(newB, BSON("a" << 55 << "b" << MINKEY));
}
{
KeyPattern keyPat(BSON("a" << 1 << "b" << -1 << "c" << 1));
BSONObj newB = keyPat.extendRangeBound(bound, false);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MAXKEY << "c" << MINKEY));
+ ASSERT_BSONOBJ_EQ(newB, BSON("a" << 55 << "b" << MAXKEY << "c" << MINKEY));
}
{
KeyPattern keyPat(BSON("a" << 1 << "b" << -1 << "c" << 1));
BSONObj newB = keyPat.extendRangeBound(bound, true);
- ASSERT_EQUALS(newB, BSON("a" << 55 << "b" << MINKEY << "c" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(newB, BSON("a" << 55 << "b" << MINKEY << "c" << MAXKEY));
}
}
@@ -105,39 +105,39 @@ TEST(KeyPattern, GlobalMinMax) {
// Simple KeyPatterns
//
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1)).globalMin(), BSON("a" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1)).globalMax(), BSON("a" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a" << 1)).globalMin(), BSON("a" << MINKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a" << 1)).globalMax(), BSON("a" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << -1)).globalMin(), BSON("a" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << -1)).globalMax(), BSON("a" << MINKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a" << -1)).globalMin(), BSON("a" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a" << -1)).globalMax(), BSON("a" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << 1.0)).globalMin(),
- BSON("a" << MINKEY << "b" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << 1.0)).globalMax(),
- BSON("a" << MAXKEY << "b" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a" << 1 << "b" << 1.0)).globalMin(),
+ BSON("a" << MINKEY << "b" << MINKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a" << 1 << "b" << 1.0)).globalMax(),
+ BSON("a" << MAXKEY << "b" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << -1.0f)).globalMin(),
- BSON("a" << MINKEY << "b" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a" << 1 << "b" << -1.0f)).globalMax(),
- BSON("a" << MAXKEY << "b" << MINKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a" << 1 << "b" << -1.0f)).globalMin(),
+ BSON("a" << MINKEY << "b" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a" << 1 << "b" << -1.0f)).globalMax(),
+ BSON("a" << MAXKEY << "b" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a"
- << "hashed"))
- .globalMin(),
- BSON("a" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a"
- << "hashed"))
- .globalMax(),
- BSON("a" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a"
+ << "hashed"))
+ .globalMin(),
+ BSON("a" << MINKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a"
+ << "hashed"))
+ .globalMax(),
+ BSON("a" << MAXKEY));
//
// Nested KeyPatterns
//
- ASSERT_EQUALS(KeyPattern(BSON("a.b" << 1)).globalMin(), BSON("a.b" << MINKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a.b" << 1)).globalMax(), BSON("a.b" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b" << 1)).globalMin(), BSON("a.b" << MINKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b" << 1)).globalMax(), BSON("a.b" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a.b.c" << -1)).globalMin(), BSON("a.b.c" << MAXKEY));
- ASSERT_EQUALS(KeyPattern(BSON("a.b.c" << -1)).globalMax(), BSON("a.b.c" << MINKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b.c" << -1)).globalMin(), BSON("a.b.c" << MAXKEY));
+ ASSERT_BSONOBJ_EQ(KeyPattern(BSON("a.b.c" << -1)).globalMax(), BSON("a.b.c" << MINKEY));
}
}
diff --git a/src/mongo/db/matcher/expression_algo_test.cpp b/src/mongo/db/matcher/expression_algo_test.cpp
index b9e6cafe134..e8fe53ddc93 100644
--- a/src/mongo/db/matcher/expression_algo_test.cpp
+++ b/src/mongo/db/matcher/expression_algo_test.cpp
@@ -815,8 +815,8 @@ TEST(SplitMatchExpression, AndWithSplittableChildrenIsSplittable) {
BSONObjBuilder secondBob;
splitExpr.second->serialize(&secondBob);
- ASSERT_EQUALS(firstBob.obj(), fromjson("{a: {$eq: 1}}"));
- ASSERT_EQUALS(secondBob.obj(), fromjson("{b: {$eq: 1}}"));
+ ASSERT_BSONOBJ_EQ(firstBob.obj(), fromjson("{a: {$eq: 1}}"));
+ ASSERT_BSONOBJ_EQ(secondBob.obj(), fromjson("{b: {$eq: 1}}"));
}
TEST(SplitMatchExpression, NorWithIndependentChildrenIsSplittable) {
@@ -837,8 +837,8 @@ TEST(SplitMatchExpression, NorWithIndependentChildrenIsSplittable) {
BSONObjBuilder secondBob;
splitExpr.second->serialize(&secondBob);
- ASSERT_EQUALS(firstBob.obj(), fromjson("{$nor: [{a: {$eq: 1}}]}"));
- ASSERT_EQUALS(secondBob.obj(), fromjson("{$nor: [{b: {$eq: 1}}]}"));
+ ASSERT_BSONOBJ_EQ(firstBob.obj(), fromjson("{$nor: [{a: {$eq: 1}}]}"));
+ ASSERT_BSONOBJ_EQ(secondBob.obj(), fromjson("{$nor: [{b: {$eq: 1}}]}"));
}
TEST(SplitMatchExpression, NotWithIndependentChildIsSplittable) {
@@ -855,7 +855,7 @@ TEST(SplitMatchExpression, NotWithIndependentChildIsSplittable) {
BSONObjBuilder firstBob;
splitExpr.first->serialize(&firstBob);
- ASSERT_EQUALS(firstBob.obj(), fromjson("{$nor: [{$and: [{x: {$gt: 4}}]}]}"));
+ ASSERT_BSONOBJ_EQ(firstBob.obj(), fromjson("{$nor: [{$and: [{x: {$gt: 4}}]}]}"));
ASSERT_FALSE(splitExpr.second);
}
@@ -874,7 +874,7 @@ TEST(SplitMatchExpression, OrWithOnlyIndependentChildrenIsNotSplittable) {
splitExpr.second->serialize(&bob);
ASSERT_FALSE(splitExpr.first);
- ASSERT_EQUALS(bob.obj(), fromjson("{$or: [{a: {$eq: 1}}, {b: {$eq: 1}}]}"));
+ ASSERT_BSONOBJ_EQ(bob.obj(), fromjson("{$or: [{a: {$eq: 1}}, {b: {$eq: 1}}]}"));
}
TEST(SplitMatchExpression, ComplexMatchExpressionSplitsCorrectly) {
@@ -898,8 +898,8 @@ TEST(SplitMatchExpression, ComplexMatchExpressionSplitsCorrectly) {
BSONObjBuilder secondBob;
splitExpr.second->serialize(&secondBob);
- ASSERT_EQUALS(firstBob.obj(), fromjson("{$or: [{'a.b': {$eq: 3}}, {'a.b.c': {$eq: 4}}]}"));
- ASSERT_EQUALS(
+ ASSERT_BSONOBJ_EQ(firstBob.obj(), fromjson("{$or: [{'a.b': {$eq: 3}}, {'a.b.c': {$eq: 4}}]}"));
+ ASSERT_BSONOBJ_EQ(
secondBob.obj(),
fromjson("{$and: [{$nor: [{$and: [{x: {$size: 2}}]}]}, {$nor: [{x: {$gt: 4}}, {$and: "
"[{$nor: [{$and: [{x: "
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index 9970384577b..d3b2ef0c716 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -31,6 +31,7 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault
#include "mongo/db/matcher/expression_geo.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/geo/geoparser.h"
#include "mongo/platform/basic.h"
#include "mongo/util/log.h"
@@ -385,7 +386,7 @@ bool GeoMatchExpression::equivalent(const MatchExpression* other) const {
if (path() != realOther->path())
return false;
- return _rawObj == realOther->_rawObj;
+ return SimpleBSONObjComparator::kInstance.evaluate(_rawObj == realOther->_rawObj);
}
std::unique_ptr<MatchExpression> GeoMatchExpression::shallowClone() const {
@@ -442,7 +443,7 @@ bool GeoNearMatchExpression::equivalent(const MatchExpression* other) const {
if (path() != realOther->path())
return false;
- return _rawObj == realOther->_rawObj;
+ return SimpleBSONObjComparator::kInstance.evaluate(_rawObj == realOther->_rawObj);
}
std::unique_ptr<MatchExpression> GeoNearMatchExpression::shallowClone() const {
diff --git a/src/mongo/db/matcher/expression_serialization_test.cpp b/src/mongo/db/matcher/expression_serialization_test.cpp
index ce321f326d7..572ae25d585 100644
--- a/src/mongo/db/matcher/expression_serialization_test.cpp
+++ b/src/mongo/db/matcher/expression_serialization_test.cpp
@@ -55,8 +55,8 @@ TEST(SerializeBasic, AndExpressionWithOneChildSerializesCorrectly) {
Matcher original(fromjson("{$and: [{x: 0}]}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: {$eq: 0}}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: {$eq: 0}}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 1}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -70,8 +70,8 @@ TEST(SerializeBasic, AndExpressionWithTwoChildrenSerializesCorrectly) {
Matcher original(fromjson("{$and: [{x: 1}, {x: 2}]}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: {$eq: 1}}, {x: {$eq: 2}}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: {$eq: 1}}, {x: {$eq: 2}}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 1}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -85,8 +85,8 @@ TEST(SerializeBasic, AndExpressionWithTwoIdenticalChildrenSerializesCorrectly) {
Matcher original(fromjson("{$and: [{x: 1}, {x: 1}]}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: {$eq: 1}}, {x: {$eq: 1}}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: {$eq: 1}}, {x: {$eq: 1}}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 1}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -100,8 +100,9 @@ TEST(SerializeBasic, ExpressionOr) {
Matcher original(fromjson("{$or: [{x: 'A'}, {x: 'B'}]}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$or: [{x: {$eq: 'A'}}, {x: {$eq: 'B'}}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(),
+ fromjson("{$or: [{x: {$eq: 'A'}}, {x: {$eq: 'B'}}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 'A'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -117,9 +118,9 @@ TEST(SerializeBasic, ExpressionElemMatchObjectSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson("{x: {$elemMatch: {$and: [{a: {$gt: 0}}, {b: {$gt: 0}}]}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(),
+ fromjson("{x: {$elemMatch: {$and: [{a: {$gt: 0}}, {b: {$gt: 0}}]}}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: [{a: 1, b: -1}, {a: -1, b: 1}]}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -135,9 +136,9 @@ TEST(SerializeBasic, ExpressionElemMatchObjectWithEmptyStringSerializesCorrectly
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson("{'': {$elemMatch: {$and: [{a: {$gt: 0}}, {b: {$gt: 0}}]}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(),
+ fromjson("{'': {$elemMatch: {$and: [{a: {$gt: 0}}, {b: {$gt: 0}}]}}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{'': [{a: 1, b: -1}, {a: -1, b: 1}]}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -152,8 +153,8 @@ TEST(SerializeBasic, ExpressionElemMatchValueSerializesCorrectly) {
fromjson("{x: {$elemMatch: {$lt: 1, $gt: -1}}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$elemMatch: {$lt: 1, $gt: -1}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$elemMatch: {$lt: 1, $gt: -1}}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: [{a: 1, b: -1}, {a: -1, b: 1}]}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -171,8 +172,8 @@ TEST(SerializeBasic, ExpressionElemMatchValueWithEmptyStringSerializesCorrectly)
fromjson("{x: {$elemMatch: {$lt: 1, $gt: -1}}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$elemMatch: {$lt: 1, $gt: -1}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$elemMatch: {$lt: 1, $gt: -1}}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: [{a: 1, b: -1}, {a: -1, b: 1}]}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -189,8 +190,8 @@ TEST(SerializeBasic, ExpressionSizeSerializesCorrectly) {
Matcher original(fromjson("{x: {$size: 2}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$size: 2}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$size: 2}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: [1, 2, 3]}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -204,8 +205,8 @@ TEST(SerializeBasic, ExpressionAllSerializesCorrectly) {
Matcher original(fromjson("{x: {$all: [1, 2]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: {$eq: 1}}, {x: {$eq: 2}}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: {$eq: 1}}, {x: {$eq: 2}}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: [1, 2, 3]}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -219,8 +220,8 @@ TEST(SerializeBasic, ExpressionAllWithEmptyArraySerializesCorrectly) {
Matcher original(fromjson("{x: {$all: []}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$all: []}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$all: []}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: [1, 2, 3]}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -232,8 +233,8 @@ TEST(SerializeBasic, ExpressionAllWithRegex) {
fromjson("{x: {$all: [/a.b.c/, /.d.e./]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: /a.b.c/}, {x: /.d.e./}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$and: [{x: /a.b.c/}, {x: /.d.e./}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 'abcde'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -247,8 +248,8 @@ TEST(SerializeBasic, ExpressionEqSerializesCorrectly) {
Matcher original(fromjson("{x: {$eq: {a: 1}}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$eq: {a: 1}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$eq: {a: 1}}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: {a: 1}}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -265,8 +266,8 @@ TEST(SerializeBasic, ExpressionNeSerializesCorrectly) {
Matcher original(fromjson("{x: {$ne: {a: 1}}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: {$eq: {a: 1}}}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: {$eq: {a: 1}}}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: {a: 1}}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -280,8 +281,8 @@ TEST(SerializeBasic, ExpressionLtSerializesCorrectly) {
Matcher original(fromjson("{x: {$lt: 3}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$lt: 3}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$lt: 3}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 3}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -295,8 +296,8 @@ TEST(SerializeBasic, ExpressionGtSerializesCorrectly) {
Matcher original(fromjson("{x: {$gt: 3}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$gt: 3}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$gt: 3}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 3}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -310,8 +311,8 @@ TEST(SerializeBasic, ExpressionGteSerializesCorrectly) {
Matcher original(fromjson("{x: {$gte: 3}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$gte: 3}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$gte: 3}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 3}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -325,8 +326,8 @@ TEST(SerializeBasic, ExpressionLteSerializesCorrectly) {
Matcher original(fromjson("{x: {$lte: 3}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$lte: 3}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$lte: 3}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 3}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -340,8 +341,8 @@ TEST(SerializeBasic, ExpressionRegexWithObjSerializesCorrectly) {
Matcher original(fromjson("{x: {$regex: 'a.b'}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$regex: 'a.b'}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$regex: 'a.b'}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 'abc'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -355,8 +356,8 @@ TEST(SerializeBasic, ExpressionRegexWithValueSerializesCorrectly) {
Matcher original(fromjson("{x: /a.b/i}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$regex: 'a.b', $options: 'i'}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$regex: 'a.b', $options: 'i'}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 'abc'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -370,8 +371,8 @@ TEST(SerializeBasic, ExpressionRegexWithValueAndOptionsSerializesCorrectly) {
Matcher original(fromjson("{x: /a.b/}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$regex: 'a.b'}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$regex: 'a.b'}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 'abc'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -385,8 +386,8 @@ TEST(SerializeBasic, ExpressionRegexWithEqObjSerializesCorrectly) {
Matcher original(fromjson("{x: {$eq: {$regex: 'a.b'}}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$eq: {$regex: 'a.b'}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$eq: {$regex: 'a.b'}}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 'abc'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -403,8 +404,8 @@ TEST(SerializeBasic, ExpressionModSerializesCorrectly) {
Matcher original(fromjson("{x: {$mod: [2, 1]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$mod: [2, 1]}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$mod: [2, 1]}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 1}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -418,8 +419,8 @@ TEST(SerializeBasic, ExpressionExistsTrueSerializesCorrectly) {
Matcher original(fromjson("{x: {$exists: true}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$exists: true}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$exists: true}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 1}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -433,8 +434,8 @@ TEST(SerializeBasic, ExpressionExistsFalseSerializesCorrectly) {
Matcher original(fromjson("{x: {$exists: false}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: {$exists: true}}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: {$exists: true}}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 1}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -448,8 +449,8 @@ TEST(SerializeBasic, ExpressionInSerializesCorrectly) {
Matcher original(fromjson("{x: {$in: [1, 2, 3]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$in: [1, 2, 3]}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$in: [1, 2, 3]}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 1}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -466,8 +467,8 @@ TEST(SerializeBasic, ExpressionInWithEmptyArraySerializesCorrectly) {
Matcher original(fromjson("{x: {$in: []}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$in: []}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$in: []}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 1}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -478,8 +479,8 @@ TEST(SerializeBasic, ExpressionInWithRegexSerializesCorrectly) {
Matcher original(fromjson("{x: {$in: [/\\d+/, /\\w+/]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$in: [/\\d+/, /\\w+/]}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$in: [/\\d+/, /\\w+/]}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: '1234'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -496,8 +497,8 @@ TEST(SerializeBasic, ExpressionNinSerializesCorrectly) {
Matcher original(fromjson("{x: {$nin: [1, 2, 3]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: {$in: [1, 2, 3]}}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: {$in: [1, 2, 3]}}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 1}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -514,8 +515,8 @@ TEST(SerializeBasic, ExpressionBitsAllSetSerializesCorrectly) {
Matcher original(fromjson("{x: {$bitsAllSet: [1, 3]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$bitsAllSet: [1, 3]}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$bitsAllSet: [1, 3]}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 2}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -529,8 +530,8 @@ TEST(SerializeBasic, ExpressionBitsAllClearSerializesCorrectly) {
Matcher original(fromjson("{x: {$bitsAllClear: [1, 3]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$bitsAllClear: [1, 3]}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$bitsAllClear: [1, 3]}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 2}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -544,8 +545,8 @@ TEST(SerializeBasic, ExpressionBitsAnySetSerializesCorrectly) {
Matcher original(fromjson("{x: {$bitsAnySet: [1, 3]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$bitsAnySet: [1, 3]}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$bitsAnySet: [1, 3]}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 2}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -559,8 +560,8 @@ TEST(SerializeBasic, ExpressionBitsAnyClearSerializesCorrectly) {
Matcher original(fromjson("{x: {$bitsAnyClear: [1, 3]}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$bitsAnyClear: [1, 3]}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$bitsAnyClear: [1, 3]}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 2}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -577,8 +578,8 @@ TEST(SerializeBasic, ExpressionNotSerializesCorrectly) {
Matcher original(fromjson("{x: {$not: {$eq: 3}}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$nor: [{$and: [{x: {$eq: 3}}]}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$nor: [{$and: [{x: {$eq: 3}}]}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 3}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -592,9 +593,9 @@ TEST(SerializeBasic, ExpressionNotWithMultipleChildrenSerializesCorrectly) {
Matcher original(fromjson("{x: {$not: {$lt: 1, $gt: 3}}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson("{$nor: [{$and: [{x: {$lt: 1}}, {x: {$gt: 3}}]}]}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(),
+ fromjson("{$nor: [{$and: [{x: {$lt: 1}}, {x: {$gt: 3}}]}]}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 2}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -609,8 +610,9 @@ TEST(SerializeBasic, ExpressionNotWithBitTestSerializesCorrectly) {
fromjson("{x: {$not: {$bitsAnySet: [1, 3]}}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$nor: [{$and: [{x: {$bitsAnySet: [1, 3]}}]}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(),
+ fromjson("{$nor: [{$and: [{x: {$bitsAnySet: [1, 3]}}]}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 2}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -624,8 +626,8 @@ TEST(SerializeBasic, ExpressionNotWithRegexObjSerializesCorrectly) {
Matcher original(fromjson("{x: {$not: {$regex: 'a.b'}}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: /a.b/}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: /a.b/}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 'abc'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -639,8 +641,8 @@ TEST(SerializeBasic, ExpressionNotWithRegexValueSerializesCorrectly) {
Matcher original(fromjson("{x: {$not: /a.b/}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: /a.b/}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: /a.b/}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 'abc'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -654,8 +656,8 @@ TEST(SerializeBasic, ExpressionNotWithRegexValueAndOptionsSerializesCorrectly) {
Matcher original(fromjson("{x: {$not: /a.b/i}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: /a.b/i}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: /a.b/i}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 'abc'}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -673,13 +675,13 @@ TEST(SerializeBasic, ExpressionNotWithGeoSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(
+ ASSERT_BSONOBJ_EQ(
*reserialized.getQuery(),
fromjson("{$nor: [{$and: [{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: "
"[[[0,0], "
"[5,0], [5, 5], [0, 5], [0, 0]]]}}}}]}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj =
fromjson("{x: {type: 'Polygon', coordinates: [[4, 4], [4, 6], [6, 6], [6, 4], [4, 4]]}}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -700,8 +702,8 @@ TEST(SerializeBasic, ExpressionNorSerializesCorrectly) {
fromjson("{$nor: [{x: 3}, {x: {$lt: 1}}]}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: {$eq: 3}}, {x: {$lt: 1}}]}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{$nor: [{x: {$eq: 3}}, {x: {$lt: 1}}]}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 3}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -718,8 +720,8 @@ TEST(SerializeBasic, ExpressionTypeSerializesCorrectly) {
Matcher original(fromjson("{x: {$type: 2}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$type: 2}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$type: 2}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 3}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -733,8 +735,8 @@ TEST(SerializeBasic, ExpressionTypeWithNumberSerializesCorrectly) {
Matcher original(fromjson("{x: {$type: 'number'}}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{x: {$type: 'number'}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{x: {$type: 'number'}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 3}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -748,8 +750,8 @@ TEST(SerializeBasic, ExpressionEmptySerializesCorrectly) {
Matcher original(fromjson("{}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: 3}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -760,9 +762,10 @@ TEST(SerializeBasic, ExpressionWhereSerializesCorrectly) {
Matcher original(fromjson("{$where: 'this.a == this.b'}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- BSONObjBuilder().appendCodeWScope("$where", "this.a == this.b", BSONObj()).obj());
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(
+ *reserialized.getQuery(),
+ BSONObjBuilder().appendCodeWScope("$where", "this.a == this.b", BSONObj()).obj());
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
TEST(SerializeBasic, ExpressionWhereWithScopeSerializesCorrectly) {
@@ -772,9 +775,9 @@ TEST(SerializeBasic, ExpressionWhereWithScopeSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- BSON("$where" << BSONCodeWScope("this.a == this.b", BSON("x" << 3))));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(),
+ BSON("$where" << BSONCodeWScope("this.a == this.b", BSON("x" << 3))));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
TEST(SerializeBasic, ExpressionCommentSerializesCorrectly) {
@@ -782,8 +785,8 @@ TEST(SerializeBasic, ExpressionCommentSerializesCorrectly) {
Matcher original(fromjson("{$comment: 'Hello'}"), ExtensionsCallbackNoop(), collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(), fromjson("{}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), fromjson("{}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{a: 1, b: 2}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -802,11 +805,11 @@ TEST(SerializeBasic, ExpressionGeoWithinSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(
+ ASSERT_BSONOBJ_EQ(
*reserialized.getQuery(),
fromjson("{x: {$geoWithin: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [10,0], "
"[10, 10], [0, 10], [0, 0]]]}}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: {type: 'Point', coordinates: [5, 5]}}");
ASSERT_EQ(original.matches(obj), reserialized.matches(obj));
@@ -825,11 +828,11 @@ TEST(SerializeBasic, ExpressionGeoIntersectsSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(
+ ASSERT_BSONOBJ_EQ(
*reserialized.getQuery(),
fromjson("{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [5,0], "
"[5, 5], [0, 5], [0, 0]]]}}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj =
fromjson("{x: {type: 'Polygon', coordinates: [[4, 4], [4, 6], [6, 6], [6, 4], [4, 4]]}}");
@@ -854,11 +857,11 @@ TEST(SerializeBasic, ExpressionNearSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(
+ ASSERT_BSONOBJ_EQ(
*reserialized.getQuery(),
fromjson("{x: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10, "
"$minDistance: 1}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
TEST(SerializeBasic, ExpressionNearSphereSerializesCorrectly) {
@@ -871,10 +874,11 @@ TEST(SerializeBasic, ExpressionNearSphereSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson("{x: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0, 0]}, "
- "$maxDistance: 10, $minDistance: 1}}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(
+ *reserialized.getQuery(),
+ fromjson("{x: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0, 0]}, "
+ "$maxDistance: 10, $minDistance: 1}}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
TEST(SerializeBasic, ExpressionTextSerializesCorrectly) {
@@ -884,10 +888,10 @@ TEST(SerializeBasic, ExpressionTextSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson("{$text: {$search: 'a', $language: 'en', $caseSensitive: true, "
- "$diacriticSensitive: false}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(),
+ fromjson("{$text: {$search: 'a', $language: 'en', $caseSensitive: true, "
+ "$diacriticSensitive: false}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
TEST(SerializeBasic, ExpressionTextWithDefaultLanguageSerializesCorrectly) {
@@ -897,10 +901,10 @@ TEST(SerializeBasic, ExpressionTextWithDefaultLanguageSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson("{$text: {$search: 'a', $language: '', $caseSensitive: false, "
- "$diacriticSensitive: false}}"));
- ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(),
+ fromjson("{$text: {$search: 'a', $language: '', $caseSensitive: false, "
+ "$diacriticSensitive: false}}"));
+ ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
} // namespace
diff --git a/src/mongo/db/matcher/expression_where_base.cpp b/src/mongo/db/matcher/expression_where_base.cpp
index 21b746e13ab..6b0022a7e94 100644
--- a/src/mongo/db/matcher/expression_where_base.cpp
+++ b/src/mongo/db/matcher/expression_where_base.cpp
@@ -30,6 +30,8 @@
#include "mongo/db/matcher/expression_where_base.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
+
namespace mongo {
WhereMatchExpressionBase::WhereMatchExpressionBase(WhereParams params)
@@ -55,7 +57,8 @@ bool WhereMatchExpressionBase::equivalent(const MatchExpression* other) const {
return false;
}
const WhereMatchExpressionBase* realOther = static_cast<const WhereMatchExpressionBase*>(other);
- return getCode() == realOther->getCode() && getScope() == realOther->getScope();
+ return getCode() == realOther->getCode() &&
+ SimpleBSONObjComparator::kInstance.evaluate(getScope() == realOther->getScope());
}
} // namespace mongo
diff --git a/src/mongo/db/matcher/path_test.cpp b/src/mongo/db/matcher/path_test.cpp
index 73bbba4961f..734718ba332 100644
--- a/src/mongo/db/matcher/path_test.cpp
+++ b/src/mongo/db/matcher/path_test.cpp
@@ -268,7 +268,7 @@ TEST(Path, ArrayIndex3) {
ASSERT(cursor.more());
e = cursor.next();
- ASSERT_EQUALS(BSON("1" << 4), e.element().Obj());
+ ASSERT_BSONOBJ_EQ(BSON("1" << 4), e.element().Obj());
ASSERT(e.outerArray());
ASSERT(!cursor.more());
@@ -354,7 +354,7 @@ TEST(Path, NestedArrayImplicitTraversal) {
ASSERT(cursor.more());
e = cursor.next();
ASSERT_EQUALS(Array, e.element().type());
- ASSERT_EQUALS(BSON("0" << 2 << "1" << 3), e.element().Obj());
+ ASSERT_BSONOBJ_EQ(BSON("0" << 2 << "1" << 3), e.element().Obj());
ASSERT_EQUALS("0", e.arrayOffset().fieldNameStringData());
ASSERT(cursor.more());
@@ -372,7 +372,7 @@ TEST(Path, NestedArrayImplicitTraversal) {
ASSERT(cursor.more());
e = cursor.next();
ASSERT_EQUALS(Array, e.element().type());
- ASSERT_EQUALS(BSON("0" << 4 << "1" << 5), e.element().Obj());
+ ASSERT_BSONOBJ_EQ(BSON("0" << 4 << "1" << 5), e.element().Obj());
ASSERT_EQUALS("1", e.arrayOffset().fieldNameStringData());
ASSERT(!cursor.more());
@@ -407,7 +407,7 @@ TEST(Path, ArrayOffsetWithImplicitAndExplicitTraversal) {
ASSERT(cursor.more());
e = cursor.next();
ASSERT_EQUALS(Array, e.element().type());
- ASSERT_EQUALS(BSON("0" << 2 << "1" << 3), e.element().Obj());
+ ASSERT_BSONOBJ_EQ(BSON("0" << 2 << "1" << 3), e.element().Obj());
ASSERT(e.arrayOffset().eoo());
ASSERT(cursor.more());
diff --git a/src/mongo/db/ops/modifier_current_date_test.cpp b/src/mongo/db/ops/modifier_current_date_test.cpp
index 126b983adbc..81bcd4d04e0 100644
--- a/src/mongo/db/ops/modifier_current_date_test.cpp
+++ b/src/mongo/db/ops/modifier_current_date_test.cpp
@@ -44,9 +44,9 @@ using mongo::BSONObj;
using mongo::LogBuilder;
using mongo::ModifierCurrentDate;
using mongo::ModifierInterface;
-using mongo::Timestamp;
using mongo::Status;
using mongo::StringData;
+using mongo::Timestamp;
using mongo::fromjson;
using mongo::mutablebson::ConstElement;
using mongo::mutablebson::Document;
@@ -177,7 +177,7 @@ TEST(BoolInput, EmptyStartDoc) {
BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_BSONOBJ_LT(olderDateObj, doc.getObject());
ASSERT_FALSE(doc.isInPlaceModeEnabled());
Document logDoc;
@@ -198,7 +198,7 @@ TEST(DateInput, EmptyStartDoc) {
BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_BSONOBJ_LT(olderDateObj, doc.getObject());
ASSERT_FALSE(doc.isInPlaceModeEnabled());
Document logDoc;
@@ -221,7 +221,7 @@ TEST(TimestampInput, EmptyStartDoc) {
BSONObj olderDateObj = BSON("a" << ts);
ASSERT_OK(mod.apply());
ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_BSONOBJ_LT(olderDateObj, doc.getObject());
Document logDoc;
LogBuilder logBuilder(logDoc.root());
@@ -241,7 +241,7 @@ TEST(BoolInput, ExistingStringDoc) {
BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_BSONOBJ_LT(olderDateObj, doc.getObject());
ASSERT_FALSE(doc.isInPlaceModeEnabled());
Document logDoc;
@@ -262,7 +262,7 @@ TEST(BoolInput, ExistingDateDoc) {
BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_BSONOBJ_LT(olderDateObj, doc.getObject());
ASSERT_TRUE(doc.isInPlaceModeEnabled());
Document logDoc;
@@ -283,7 +283,7 @@ TEST(DateInput, ExistingDateDoc) {
BSONObj olderDateObj = fromjson("{ a : { $date : 0 } }");
ASSERT_OK(mod.apply());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_BSONOBJ_LT(olderDateObj, doc.getObject());
ASSERT_TRUE(doc.isInPlaceModeEnabled());
Document logDoc;
@@ -306,7 +306,7 @@ TEST(TimestampInput, ExistingDateDoc) {
BSONObj olderDateObj = BSON("a" << ts);
ASSERT_OK(mod.apply());
ASSERT_TRUE(doc.isInPlaceModeEnabled()); // Same Size as Date
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_BSONOBJ_LT(olderDateObj, doc.getObject());
Document logDoc;
LogBuilder logBuilder(logDoc.root());
@@ -328,7 +328,7 @@ TEST(TimestampInput, ExistingEmbeddedDateDoc) {
BSONObj olderDateObj = BSON("a" << BSON("b" << ts));
ASSERT_OK(mod.apply());
ASSERT_TRUE(doc.isInPlaceModeEnabled()); // Same Size as Date
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_BSONOBJ_LT(olderDateObj, doc.getObject());
Document logDoc;
LogBuilder logBuilder(logDoc.root());
@@ -350,7 +350,7 @@ TEST(DottedTimestampInput, EmptyStartDoc) {
BSONObj olderDateObj = BSON("a" << BSON("b" << ts));
ASSERT_OK(mod.apply());
ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_LESS_THAN(olderDateObj, doc.getObject());
+ ASSERT_BSONOBJ_LT(olderDateObj, doc.getObject());
Document logDoc;
LogBuilder logBuilder(logDoc.root());
diff --git a/src/mongo/db/ops/modifier_push_sorter_test.cpp b/src/mongo/db/ops/modifier_push_sorter_test.cpp
index 267636b6e70..c0c5f4dd46f 100644
--- a/src/mongo/db/ops/modifier_push_sorter_test.cpp
+++ b/src/mongo/db/ops/modifier_push_sorter_test.cpp
@@ -42,8 +42,8 @@ namespace {
using mongo::BSONObj;
using mongo::CollatorInterface;
using mongo::CollatorInterfaceMock;
-using mongo::fromjson;
using mongo::PatternElementCmp;
+using mongo::fromjson;
using mongo::mutablebson::Document;
using mongo::mutablebson::Element;
using mongo::mutablebson::sortChildren;
@@ -92,9 +92,9 @@ TEST_F(ObjectArray, NormalOrder) {
sortChildren(getArray(), PatternElementCmp(fromjson("{'a':1,'b':1}"), collator));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(2));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+ ASSERT_BSONOBJ_EQ(getOrigObj(0), getSortedObj(0));
+ ASSERT_BSONOBJ_EQ(getOrigObj(1), getSortedObj(2));
+ ASSERT_BSONOBJ_EQ(getOrigObj(2), getSortedObj(1));
}
TEST_F(ObjectArray, MixedOrder) {
@@ -105,9 +105,9 @@ TEST_F(ObjectArray, MixedOrder) {
sortChildren(getArray(), PatternElementCmp(fromjson("{b:1,a:-1}"), collator));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
+ ASSERT_BSONOBJ_EQ(getOrigObj(0), getSortedObj(0));
+ ASSERT_BSONOBJ_EQ(getOrigObj(1), getSortedObj(1));
+ ASSERT_BSONOBJ_EQ(getOrigObj(2), getSortedObj(2));
}
TEST_F(ObjectArray, ExtraFields) {
@@ -118,9 +118,9 @@ TEST_F(ObjectArray, ExtraFields) {
sortChildren(getArray(), PatternElementCmp(fromjson("{a:1,b:1}"), collator));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(2));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+ ASSERT_BSONOBJ_EQ(getOrigObj(0), getSortedObj(0));
+ ASSERT_BSONOBJ_EQ(getOrigObj(1), getSortedObj(2));
+ ASSERT_BSONOBJ_EQ(getOrigObj(2), getSortedObj(1));
}
TEST_F(ObjectArray, MissingFields) {
@@ -131,9 +131,9 @@ TEST_F(ObjectArray, MissingFields) {
sortChildren(getArray(), PatternElementCmp(fromjson("{b:1,c:1}"), collator));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
+ ASSERT_BSONOBJ_EQ(getOrigObj(0), getSortedObj(1));
+ ASSERT_BSONOBJ_EQ(getOrigObj(1), getSortedObj(0));
+ ASSERT_BSONOBJ_EQ(getOrigObj(2), getSortedObj(2));
}
TEST_F(ObjectArray, NestedFields) {
@@ -144,9 +144,9 @@ TEST_F(ObjectArray, NestedFields) {
sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b':1}"), collator));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
+ ASSERT_BSONOBJ_EQ(getOrigObj(0), getSortedObj(1));
+ ASSERT_BSONOBJ_EQ(getOrigObj(1), getSortedObj(0));
+ ASSERT_BSONOBJ_EQ(getOrigObj(2), getSortedObj(2));
}
TEST_F(ObjectArray, SimpleNestedFields) {
@@ -157,9 +157,9 @@ TEST_F(ObjectArray, SimpleNestedFields) {
sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b':1}"), collator));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(2));
+ ASSERT_BSONOBJ_EQ(getOrigObj(0), getSortedObj(1));
+ ASSERT_BSONOBJ_EQ(getOrigObj(1), getSortedObj(0));
+ ASSERT_BSONOBJ_EQ(getOrigObj(2), getSortedObj(2));
}
TEST_F(ObjectArray, NestedInnerObjectDescending) {
@@ -170,9 +170,9 @@ TEST_F(ObjectArray, NestedInnerObjectDescending) {
sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b.d':-1}"), collator));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(2));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+ ASSERT_BSONOBJ_EQ(getOrigObj(0), getSortedObj(2));
+ ASSERT_BSONOBJ_EQ(getOrigObj(1), getSortedObj(0));
+ ASSERT_BSONOBJ_EQ(getOrigObj(2), getSortedObj(1));
}
TEST_F(ObjectArray, NestedInnerObjectAscending) {
@@ -183,9 +183,9 @@ TEST_F(ObjectArray, NestedInnerObjectAscending) {
sortChildren(getArray(), PatternElementCmp(fromjson("{'a.b.d':1}"), collator));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(2));
+ ASSERT_BSONOBJ_EQ(getOrigObj(0), getSortedObj(0));
+ ASSERT_BSONOBJ_EQ(getOrigObj(2), getSortedObj(1));
+ ASSERT_BSONOBJ_EQ(getOrigObj(1), getSortedObj(2));
}
TEST_F(ObjectArray, SortRespectsCollation) {
@@ -196,9 +196,9 @@ TEST_F(ObjectArray, SortRespectsCollation) {
sortChildren(getArray(), PatternElementCmp(fromjson("{a: 1}"), &collator));
- ASSERT_EQUALS(getOrigObj(0), getSortedObj(2));
- ASSERT_EQUALS(getOrigObj(1), getSortedObj(0));
- ASSERT_EQUALS(getOrigObj(2), getSortedObj(1));
+ ASSERT_BSONOBJ_EQ(getOrigObj(0), getSortedObj(2));
+ ASSERT_BSONOBJ_EQ(getOrigObj(1), getSortedObj(0));
+ ASSERT_BSONOBJ_EQ(getOrigObj(2), getSortedObj(1));
}
} // unnamed namespace
diff --git a/src/mongo/db/ops/write_ops_parsers_test.cpp b/src/mongo/db/ops/write_ops_parsers_test.cpp
index 78eb0212609..488a67ce509 100644
--- a/src/mongo/db/ops/write_ops_parsers_test.cpp
+++ b/src/mongo/db/ops/write_ops_parsers_test.cpp
@@ -126,7 +126,7 @@ TEST(CommandWriteOpsParsers, SingleInsert) {
ASSERT(!op.bypassDocumentValidation);
ASSERT(!op.continueOnError);
ASSERT_EQ(op.documents.size(), 1u);
- ASSERT_EQ(op.documents[0], obj);
+ ASSERT_BSONOBJ_EQ(op.documents[0], obj);
}
TEST(CommandWriteOpsParsers, EmptyMultiInsertFails) {
@@ -145,8 +145,8 @@ TEST(CommandWriteOpsParsers, RealMultiInsert) {
ASSERT(!op.bypassDocumentValidation);
ASSERT(!op.continueOnError);
ASSERT_EQ(op.documents.size(), 2u);
- ASSERT_EQ(op.documents[0], obj0);
- ASSERT_EQ(op.documents[1], obj1);
+ ASSERT_BSONOBJ_EQ(op.documents[0], obj0);
+ ASSERT_BSONOBJ_EQ(op.documents[1], obj1);
}
TEST(CommandWriteOpsParsers, Update) {
@@ -169,9 +169,9 @@ TEST(CommandWriteOpsParsers, Update) {
ASSERT(!op.bypassDocumentValidation);
ASSERT_EQ(op.continueOnError, false);
ASSERT_EQ(op.updates.size(), 1u);
- ASSERT_EQ(op.updates[0].query, query);
- ASSERT_EQ(op.updates[0].update, update);
- ASSERT_EQ(op.updates[0].collation, collation);
+ ASSERT_BSONOBJ_EQ(op.updates[0].query, query);
+ ASSERT_BSONOBJ_EQ(op.updates[0].update, update);
+ ASSERT_BSONOBJ_EQ(op.updates[0].collation, collation);
ASSERT_EQ(op.updates[0].upsert, upsert);
ASSERT_EQ(op.updates[0].multi, multi);
}
@@ -193,8 +193,8 @@ TEST(CommandWriteOpsParsers, Remove) {
ASSERT(!op.bypassDocumentValidation);
ASSERT_EQ(op.continueOnError, false);
ASSERT_EQ(op.deletes.size(), 1u);
- ASSERT_EQ(op.deletes[0].query, query);
- ASSERT_EQ(op.deletes[0].collation, collation);
+ ASSERT_BSONOBJ_EQ(op.deletes[0].query, query);
+ ASSERT_BSONOBJ_EQ(op.deletes[0].collation, collation);
ASSERT_EQ(op.deletes[0].multi, multi);
}
}
@@ -268,7 +268,7 @@ TEST(LegacyWriteOpsParsers, SingleInsert) {
ASSERT(!op.bypassDocumentValidation);
ASSERT_EQ(op.continueOnError, continueOnError);
ASSERT_EQ(op.documents.size(), 1u);
- ASSERT_EQ(op.documents[0], obj);
+ ASSERT_BSONOBJ_EQ(op.documents[0], obj);
}
}
@@ -295,8 +295,8 @@ TEST(LegacyWriteOpsParsers, RealMultiInsert) {
ASSERT(!op.bypassDocumentValidation);
ASSERT_EQ(op.continueOnError, continueOnError);
ASSERT_EQ(op.documents.size(), 2u);
- ASSERT_EQ(op.documents[0], obj0);
- ASSERT_EQ(op.documents[1], obj1);
+ ASSERT_BSONOBJ_EQ(op.documents[0], obj0);
+ ASSERT_BSONOBJ_EQ(op.documents[1], obj1);
}
}
@@ -313,8 +313,8 @@ TEST(LegacyWriteOpsParsers, Update) {
ASSERT(!op.bypassDocumentValidation);
ASSERT_EQ(op.continueOnError, false);
ASSERT_EQ(op.updates.size(), 1u);
- ASSERT_EQ(op.updates[0].query, query);
- ASSERT_EQ(op.updates[0].update, update);
+ ASSERT_BSONOBJ_EQ(op.updates[0].query, query);
+ ASSERT_BSONOBJ_EQ(op.updates[0].update, update);
ASSERT_EQ(op.updates[0].upsert, upsert);
ASSERT_EQ(op.updates[0].multi, multi);
}
@@ -332,7 +332,7 @@ TEST(LegacyWriteOpsParsers, Remove) {
ASSERT(!op.bypassDocumentValidation);
ASSERT_EQ(op.continueOnError, false);
ASSERT_EQ(op.deletes.size(), 1u);
- ASSERT_EQ(op.deletes[0].query, query);
+ ASSERT_BSONOBJ_EQ(op.deletes[0].query, query);
ASSERT_EQ(op.deletes[0].multi, multi);
}
}
diff --git a/src/mongo/db/pipeline/aggregation_request_test.cpp b/src/mongo/db/pipeline/aggregation_request_test.cpp
index b9fe685961f..acd38732522 100644
--- a/src/mongo/db/pipeline/aggregation_request_test.cpp
+++ b/src/mongo/db/pipeline/aggregation_request_test.cpp
@@ -60,9 +60,9 @@ TEST(AggregationRequestTest, ShouldParseAllKnownOptions) {
ASSERT_TRUE(request.shouldBypassDocumentValidation());
ASSERT_TRUE(request.isCursorCommand());
ASSERT_EQ(request.getBatchSize().get(), 10);
- ASSERT_EQ(request.getCollation(),
- BSON("locale"
- << "en_US"));
+ ASSERT_BSONOBJ_EQ(request.getCollation(),
+ BSON("locale"
+ << "en_US"));
}
//
diff --git a/src/mongo/db/pipeline/document_source_test.cpp b/src/mongo/db/pipeline/document_source_test.cpp
index 7b2a0e72c12..ba326b9c82d 100644
--- a/src/mongo/db/pipeline/document_source_test.cpp
+++ b/src/mongo/db/pipeline/document_source_test.cpp
@@ -134,50 +134,50 @@ public:
const char* array[] = {"a", "b"}; // basic
DepsTracker deps;
deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "b" << 1 << "_id" << 0));
+ ASSERT_BSONOBJ_EQ(deps.toProjection(), BSON("a" << 1 << "b" << 1 << "_id" << 0));
}
{
const char* array[] = {"a", "ab"}; // prefixed but not subfield
DepsTracker deps;
deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "ab" << 1 << "_id" << 0));
+ ASSERT_BSONOBJ_EQ(deps.toProjection(), BSON("a" << 1 << "ab" << 1 << "_id" << 0));
}
{
const char* array[] = {"a", "b", "a.b"}; // a.b included by a
DepsTracker deps;
deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "b" << 1 << "_id" << 0));
+ ASSERT_BSONOBJ_EQ(deps.toProjection(), BSON("a" << 1 << "b" << 1 << "_id" << 0));
}
{
const char* array[] = {"a", "_id"}; // _id now included
DepsTracker deps;
deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
+ ASSERT_BSONOBJ_EQ(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
}
{
const char* array[] = {"a", "_id.a"}; // still include whole _id (SERVER-7502)
DepsTracker deps;
deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
+ ASSERT_BSONOBJ_EQ(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
}
{
const char* array[] = {"a", "_id", "_id.a"}; // handle both _id and subfield
DepsTracker deps;
deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
+ ASSERT_BSONOBJ_EQ(deps.toProjection(), BSON("a" << 1 << "_id" << 1));
}
{
const char* array[] = {"a", "_id", "_id_a"}; // _id prefixed but non-subfield
DepsTracker deps;
deps.fields = arrayToSet(array);
- ASSERT_EQUALS(deps.toProjection(), BSON("_id_a" << 1 << "a" << 1 << "_id" << 1));
+ ASSERT_BSONOBJ_EQ(deps.toProjection(), BSON("_id_a" << 1 << "a" << 1 << "_id" << 1));
}
{
const char* array[] = {"a"}; // fields ignored with needWholeDocument
DepsTracker deps;
deps.fields = arrayToSet(array);
deps.needWholeDocument = true;
- ASSERT_EQUALS(deps.toProjection(), BSONObj());
+ ASSERT_BSONOBJ_EQ(deps.toProjection(), BSONObj());
}
{
const char* array[] = {"a"}; // needTextScore with needWholeDocument
@@ -185,14 +185,15 @@ public:
deps.fields = arrayToSet(array);
deps.needWholeDocument = true;
deps.setNeedTextScore(true);
- ASSERT_EQUALS(deps.toProjection(), BSON(Document::metaFieldTextScore << metaTextScore));
+ ASSERT_BSONOBJ_EQ(deps.toProjection(),
+ BSON(Document::metaFieldTextScore << metaTextScore));
}
{
const char* array[] = {"a"}; // needTextScore without needWholeDocument
DepsTracker deps(DepsTracker::MetadataAvailable::kTextScore);
deps.fields = arrayToSet(array);
deps.setNeedTextScore(true);
- ASSERT_EQUALS(
+ ASSERT_BSONOBJ_EQ(
deps.toProjection(),
BSON(Document::metaFieldTextScore << metaTextScore << "a" << 1 << "_id" << 0));
}
@@ -402,7 +403,7 @@ TEST(MakeMatchStageFromInput, NonArrayValueUsesEqQuery) {
Document input = DOC("local" << 1);
BSONObj matchStage = DocumentSourceLookUp::makeMatchStageFromInput(
input, FieldPath("local"), "foreign", BSONObj());
- ASSERT_EQ(matchStage, fromjson("{$match: {$and: [{foreign: {$eq: 1}}, {}]}}"));
+ ASSERT_BSONOBJ_EQ(matchStage, fromjson("{$match: {$and: [{foreign: {$eq: 1}}, {}]}}"));
}
TEST(MakeMatchStageFromInput, RegexValueUsesEqQuery) {
@@ -410,9 +411,10 @@ TEST(MakeMatchStageFromInput, RegexValueUsesEqQuery) {
Document input = DOC("local" << Value(regex));
BSONObj matchStage = DocumentSourceLookUp::makeMatchStageFromInput(
input, FieldPath("local"), "foreign", BSONObj());
- ASSERT_EQ(matchStage,
- BSON("$match" << BSON("$and" << BSON_ARRAY(BSON("foreign" << BSON("$eq" << regex))
- << BSONObj()))));
+ ASSERT_BSONOBJ_EQ(
+ matchStage,
+ BSON("$match" << BSON(
+ "$and" << BSON_ARRAY(BSON("foreign" << BSON("$eq" << regex)) << BSONObj()))));
}
TEST(MakeMatchStageFromInput, ArrayValueUsesInQuery) {
@@ -420,7 +422,7 @@ TEST(MakeMatchStageFromInput, ArrayValueUsesInQuery) {
Document input = DOC("local" << Value(inputArray));
BSONObj matchStage = DocumentSourceLookUp::makeMatchStageFromInput(
input, FieldPath("local"), "foreign", BSONObj());
- ASSERT_EQ(matchStage, fromjson("{$match: {$and: [{foreign: {$in: [1, 2]}}, {}]}}"));
+ ASSERT_BSONOBJ_EQ(matchStage, fromjson("{$match: {$and: [{foreign: {$in: [1, 2]}}, {}]}}"));
}
TEST(MakeMatchStageFromInput, ArrayValueWithRegexUsesOrQuery) {
@@ -429,13 +431,14 @@ TEST(MakeMatchStageFromInput, ArrayValueWithRegexUsesOrQuery) {
Document input = DOC("local" << Value(inputArray));
BSONObj matchStage = DocumentSourceLookUp::makeMatchStageFromInput(
input, FieldPath("local"), "foreign", BSONObj());
- ASSERT_EQ(matchStage,
- BSON("$match" << BSON(
- "$and" << BSON_ARRAY(
- BSON("$or" << BSON_ARRAY(BSON("foreign" << BSON("$eq" << Value(1)))
- << BSON("foreign" << BSON("$eq" << regex))
- << BSON("foreign" << BSON("$eq" << Value(2)))))
- << BSONObj()))));
+ ASSERT_BSONOBJ_EQ(
+ matchStage,
+ BSON("$match" << BSON(
+ "$and" << BSON_ARRAY(
+ BSON("$or" << BSON_ARRAY(BSON("foreign" << BSON("$eq" << Value(1)))
+ << BSON("foreign" << BSON("$eq" << regex))
+ << BSON("foreign" << BSON("$eq" << Value(2)))))
+ << BSONObj()))));
}
} // namespace DocumentSourceLookUp
@@ -485,7 +488,7 @@ private:
BSONElement specElement = spec.firstElement();
intrusive_ptr<DocumentSource> generated =
DocumentSourceGroup::createFromBson(specElement, ctx());
- ASSERT_EQUALS(spec, toBson(generated));
+ ASSERT_BSONOBJ_EQ(spec, toBson(generated));
}
intrusive_ptr<DocumentSource> _group;
TempDir _tempDir;
@@ -513,7 +516,7 @@ public:
boost::optional<Document> next = group()->getNext();
ASSERT(bool(next));
// The constant _id value from the $group spec is passed through.
- ASSERT_EQUALS(expected(), next->toBson());
+ ASSERT_BSONOBJ_EQ(expected(), next->toBson());
}
protected:
@@ -803,7 +806,7 @@ protected:
bsonResultSet << i->second;
}
// Check the result set.
- ASSERT_EQUALS(expectedResultSet(), bsonResultSet.arr());
+ ASSERT_BSONOBJ_EQ(expectedResultSet(), bsonResultSet.arr());
}
};
@@ -1419,8 +1422,8 @@ TEST_F(ProjectStageTest, ShouldOptimizeInnerExpressions) {
// The $and should have been replaced with its only argument.
vector<Value> serializedArray;
project()->serializeToArray(serializedArray);
- ASSERT_EQUALS(serializedArray[0].getDocument().toBson(),
- fromjson("{$project: {_id: true, a: {$const: true}}}"));
+ ASSERT_BSONOBJ_EQ(serializedArray[0].getDocument().toBson(),
+ fromjson("{$project: {_id: true, a: {$const: true}}}"));
};
TEST_F(ProjectStageTest, ShouldErrorOnNonObjectSpec) {
@@ -1882,7 +1885,7 @@ private:
void checkBsonRepresentation(const BSONObj& spec) {
Value serialized = static_cast<DocumentSourceSample*>(sample())->serialize(false);
auto generatedSpec = serialized.getDocument().toBson();
- ASSERT_EQUALS(spec, generatedSpec);
+ ASSERT_BSONOBJ_EQ(spec, generatedSpec);
}
};
@@ -2161,7 +2164,7 @@ private:
vector<Value> arr;
_sort->serializeToArray(arr);
BSONObj generatedSpec = arr[0].getDocument().toBson();
- ASSERT_EQUALS(spec, generatedSpec);
+ ASSERT_BSONOBJ_EQ(spec, generatedSpec);
}
intrusive_ptr<DocumentSource> _sort;
};
@@ -2178,7 +2181,7 @@ public:
{ // pre-limit checks
vector<Value> arr;
sort()->serializeToArray(arr);
- ASSERT_EQUALS(arr[0].getDocument().toBson(), BSON("$sort" << BSON("a" << 1)));
+ ASSERT_BSONOBJ_EQ(arr[0].getDocument().toBson(), BSON("$sort" << BSON("a" << 1)));
ASSERT(sort()->getShardSource() == NULL);
ASSERT(sort()->getMergeSource() != NULL);
@@ -2242,7 +2245,7 @@ public:
bsonResultSet << *i;
}
// Check the result set.
- ASSERT_EQUALS(expectedResultSet(), bsonResultSet.arr());
+ ASSERT_BSONOBJ_EQ(expectedResultSet(), bsonResultSet.arr());
}
protected:
@@ -2695,7 +2698,7 @@ private:
bsonResultSet << *i;
}
// Check the result set.
- ASSERT_EQUALS(expectedResults, bsonResultSet.arr());
+ ASSERT_BSONOBJ_EQ(expectedResults, bsonResultSet.arr());
}
/**
@@ -2706,8 +2709,8 @@ private:
vector<Value> arr;
_unwind->serializeToArray(arr);
BSONObj generatedSpec = Value(arr[0]).getDocument().toBson();
- ASSERT_EQUALS(expectedSerialization(preserveNullAndEmptyArrays, includeArrayIndex),
- generatedSpec);
+ ASSERT_BSONOBJ_EQ(expectedSerialization(preserveNullAndEmptyArrays, includeArrayIndex),
+ generatedSpec);
}
BSONObj expectedSerialization(bool preserveNullAndEmptyArrays, bool includeArrayIndex) const {
@@ -3356,7 +3359,7 @@ public:
void test(string input, string safePortion) {
try {
intrusive_ptr<DocumentSourceMatch> match = makeMatch(input);
- ASSERT_EQUALS(match->redactSafePortion(), fromjson(safePortion));
+ ASSERT_BSONOBJ_EQ(match->redactSafePortion(), fromjson(safePortion));
} catch (...) {
unittest::log() << "Problem with redactSafePortion() of: " << input;
throw;
@@ -3612,60 +3615,61 @@ public:
Pipeline::SourceContainer container;
// Check initial state
- ASSERT_EQUALS(match1->getQuery(), BSON("a" << 1));
- ASSERT_EQUALS(match2->getQuery(), BSON("b" << 1));
- ASSERT_EQUALS(match3->getQuery(), BSON("c" << 1));
+ ASSERT_BSONOBJ_EQ(match1->getQuery(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(match2->getQuery(), BSON("b" << 1));
+ ASSERT_BSONOBJ_EQ(match3->getQuery(), BSON("c" << 1));
container.push_back(match1);
container.push_back(match2);
match1->optimizeAt(container.begin(), &container);
ASSERT_EQUALS(container.size(), 1U);
- ASSERT_EQUALS(match1->getQuery(), fromjson("{'$and': [{a:1}, {b:1}]}"));
+ ASSERT_BSONOBJ_EQ(match1->getQuery(), fromjson("{'$and': [{a:1}, {b:1}]}"));
container.push_back(match3);
match1->optimizeAt(container.begin(), &container);
ASSERT_EQUALS(container.size(), 1U);
- ASSERT_EQUALS(match1->getQuery(),
- fromjson("{'$and': [{'$and': [{a:1}, {b:1}]},"
- "{c:1}]}"));
+ ASSERT_BSONOBJ_EQ(match1->getQuery(),
+ fromjson("{'$and': [{'$and': [{a:1}, {b:1}]},"
+ "{c:1}]}"));
}
};
TEST(ObjectForMatch, ShouldExtractTopLevelFieldIfDottedFieldNeeded) {
Document input(fromjson("{a: 1, b: {c: 1, d: 1}}"));
BSONObj expected = fromjson("{b: {c: 1, d: 1}}");
- ASSERT_EQUALS(expected, DocumentSourceMatch::getObjectForMatch(input, {"b.c"}));
+ ASSERT_BSONOBJ_EQ(expected, DocumentSourceMatch::getObjectForMatch(input, {"b.c"}));
}
TEST(ObjectForMatch, ShouldExtractEntireArray) {
Document input(fromjson("{a: [1, 2, 3], b: 1}"));
BSONObj expected = fromjson("{a: [1, 2, 3]}");
- ASSERT_EQUALS(expected, DocumentSourceMatch::getObjectForMatch(input, {"a"}));
+ ASSERT_BSONOBJ_EQ(expected, DocumentSourceMatch::getObjectForMatch(input, {"a"}));
}
TEST(ObjectForMatch, ShouldOnlyAddPrefixedFieldOnceIfTwoDottedSubfields) {
Document input(fromjson("{a: 1, b: {c: 1, f: {d: {e: 1}}}}"));
BSONObj expected = fromjson("{b: {c: 1, f: {d: {e: 1}}}}");
- ASSERT_EQUALS(expected, DocumentSourceMatch::getObjectForMatch(input, {"b.f", "b.f.d.e"}));
+ ASSERT_BSONOBJ_EQ(expected, DocumentSourceMatch::getObjectForMatch(input, {"b.f", "b.f.d.e"}));
}
TEST(ObjectForMatch, MissingFieldShouldNotAppearInResult) {
Document input(fromjson("{a: 1}"));
BSONObj expected;
- ASSERT_EQUALS(expected, DocumentSourceMatch::getObjectForMatch(input, {"b", "c"}));
+ ASSERT_BSONOBJ_EQ(expected, DocumentSourceMatch::getObjectForMatch(input, {"b", "c"}));
}
TEST(ObjectForMatch, ShouldSerializeNothingIfNothingIsNeeded) {
Document input(fromjson("{a: 1, b: {c: 1}}"));
BSONObj expected;
- ASSERT_EQUALS(expected, DocumentSourceMatch::getObjectForMatch(input, std::set<std::string>{}));
+ ASSERT_BSONOBJ_EQ(expected,
+ DocumentSourceMatch::getObjectForMatch(input, std::set<std::string>{}));
}
TEST(ObjectForMatch, ShouldExtractEntireArrayFromPrefixOfDottedField) {
Document input(fromjson("{a: [{b: 1}, {b: 2}], c: 1}"));
BSONObj expected = fromjson("{a: [{b: 1}, {b: 2}]}");
- ASSERT_EQUALS(expected, DocumentSourceMatch::getObjectForMatch(input, {"a.b"}));
+ ASSERT_BSONOBJ_EQ(expected, DocumentSourceMatch::getObjectForMatch(input, {"a.b"}));
}
@@ -4812,8 +4816,8 @@ TEST_F(AddFieldsTest, OptimizesInnerExpressions) {
// The $and should have been replaced with its only argument.
vector<Value> serializedArray;
addFields()->serializeToArray(serializedArray);
- ASSERT_EQUALS(serializedArray[0].getDocument().toBson(),
- fromjson("{$addFields: {a: {$const: true}}}"));
+ ASSERT_BSONOBJ_EQ(serializedArray[0].getDocument().toBson(),
+ fromjson("{$addFields: {a: {$const: true}}}"));
}
// Verify that the addFields stage requires a valid object specification.
diff --git a/src/mongo/db/pipeline/document_value_test.cpp b/src/mongo/db/pipeline/document_value_test.cpp
index 8c5f9a755c0..adec442f7b8 100644
--- a/src/mongo/db/pipeline/document_value_test.cpp
+++ b/src/mongo/db/pipeline/document_value_test.cpp
@@ -69,7 +69,7 @@ void assertRoundTrips(const Document& document1) {
BSONObj obj1 = toBson(document1);
Document document2 = fromBson(obj1);
BSONObj obj2 = toBson(document2);
- ASSERT_EQUALS(obj1, obj2);
+ ASSERT_BSONOBJ_EQ(obj1, obj2);
ASSERT_DOCUMENT_EQ(document1, document2);
}
@@ -407,7 +407,7 @@ public:
const Document doc2 = fromBson(obj);
// logical equality
- ASSERT_EQUALS(obj, obj2);
+ ASSERT_BSONOBJ_EQ(obj, obj2);
ASSERT_DOCUMENT_EQ(doc, doc2);
// binary equality
@@ -567,7 +567,7 @@ void assertRoundTrips(const Value& value1) {
BSONObj obj1 = toBson(value1);
Value value2 = fromBson(obj1);
BSONObj obj2 = toBson(value2);
- ASSERT_EQUALS(obj1, obj2);
+ ASSERT_BSONOBJ_EQ(obj1, obj2);
ASSERT_VALUE_EQ(value1, value2);
ASSERT_EQUALS(value1.getType(), value2.getType());
}
@@ -1428,9 +1428,9 @@ public:
Value(4.4).addToBsonObj(&bob, "a");
Value(22).addToBsonObj(&bob, "b");
Value("astring").addToBsonObj(&bob, "c");
- ASSERT_EQUALS(BSON("a" << 4.4 << "b" << 22 << "c"
- << "astring"),
- bob.obj());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 4.4 << "b" << 22 << "c"
+ << "astring"),
+ bob.obj());
}
};
@@ -1442,7 +1442,7 @@ public:
Value(4.4).addToBsonArray(&bab);
Value(22).addToBsonArray(&bab);
Value("astring").addToBsonArray(&bab);
- ASSERT_EQUALS(BSON_ARRAY(4.4 << 22 << "astring"), bab.arr());
+ ASSERT_BSONOBJ_EQ(BSON_ARRAY(4.4 << 22 << "astring"), bab.arr());
}
};
diff --git a/src/mongo/db/pipeline/expression_test.cpp b/src/mongo/db/pipeline/expression_test.cpp
index 144f2538723..ed579bd3bc3 100644
--- a/src/mongo/db/pipeline/expression_test.cpp
+++ b/src/mongo/db/pipeline/expression_test.cpp
@@ -99,7 +99,7 @@ static BSONObj constify(const BSONObj& obj, bool parentIsArray = false) {
/** Check binary equality, ensuring use of the same numeric types. */
static void assertBinaryEqual(const BSONObj& expected, const BSONObj& actual) {
- ASSERT_EQUALS(expected, actual);
+ ASSERT_BSONOBJ_EQ(expected, actual);
ASSERT(expected.binaryEqual(actual));
}
@@ -212,13 +212,13 @@ protected:
++i) {
dependenciesBson << *i;
}
- ASSERT_EQUALS(expectedDependencies, dependenciesBson.arr());
+ ASSERT_BSONOBJ_EQ(expectedDependencies, dependenciesBson.arr());
ASSERT_EQUALS(false, dependencies.needWholeDocument);
ASSERT_EQUALS(false, dependencies.getNeedTextScore());
}
void assertContents(const intrusive_ptr<Testable>& expr, const BSONArray& expectedContents) {
- ASSERT_EQUALS(constify(BSON("$testable" << expectedContents)), expressionToBson(expr));
+ ASSERT_BSONOBJ_EQ(constify(BSON("$testable" << expectedContents)), expressionToBson(expr));
}
void addOperandArrayToExpr(const intrusive_ptr<Testable>& expr, const BSONArray& operands) {
@@ -276,14 +276,14 @@ TEST_F(ExpressionNaryTest, ValidateObjectExpressionDependency) {
TEST_F(ExpressionNaryTest, SerializationToBsonObj) {
_notAssociativeNorCommutative->addOperand(ExpressionConstant::create(nullptr, Value(5)));
- ASSERT_EQUALS(BSON("foo" << BSON("$testable" << BSON_ARRAY(BSON("$const" << 5)))),
- BSON("foo" << _notAssociativeNorCommutative->serialize(false)));
+ ASSERT_BSONOBJ_EQ(BSON("foo" << BSON("$testable" << BSON_ARRAY(BSON("$const" << 5)))),
+ BSON("foo" << _notAssociativeNorCommutative->serialize(false)));
}
TEST_F(ExpressionNaryTest, SerializationToBsonArr) {
_notAssociativeNorCommutative->addOperand(ExpressionConstant::create(nullptr, Value(5)));
- ASSERT_EQUALS(constify(BSON_ARRAY(BSON("$testable" << BSON_ARRAY(5)))),
- BSON_ARRAY(_notAssociativeNorCommutative->serialize(false)));
+ ASSERT_BSONOBJ_EQ(constify(BSON_ARRAY(BSON("$testable" << BSON_ARRAY(5)))),
+ BSON_ARRAY(_notAssociativeNorCommutative->serialize(false)));
}
// Verify that the internal operands are optimized
@@ -303,7 +303,7 @@ TEST_F(ExpressionNaryTest, AllConstantOperandOptimization) {
assertContents(_notAssociativeNorCommutative, spec);
intrusive_ptr<Expression> optimized = _notAssociativeNorCommutative->optimize();
ASSERT(_notAssociativeNorCommutative != optimized);
- ASSERT_EQUALS(BSON("$const" << BSON_ARRAY(1 << 2)), expressionToBson(optimized));
+ ASSERT_BSONOBJ_EQ(BSON("$const" << BSON_ARRAY(1 << 2)), expressionToBson(optimized));
}
// Verify that the optimization of grouping constant and non-constant operands
@@ -917,7 +917,7 @@ public:
void run() {
intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
populateOperands(expression);
- ASSERT_EQUALS(expectedResult(), toBson(expression->evaluate(Document())));
+ ASSERT_BSONOBJ_EQ(expectedResult(), toBson(expression->evaluate(Document())));
}
protected:
@@ -932,7 +932,7 @@ public:
void run() {
intrusive_ptr<ExpressionNary> expression = new ExpressionAdd();
expression->addOperand(ExpressionConstant::create(nullptr, Value(2)));
- ASSERT_EQUALS(BSON("" << 2), toBson(expression->evaluate(Document())));
+ ASSERT_BSONOBJ_EQ(BSON("" << 2), toBson(expression->evaluate(Document())));
}
};
@@ -1198,12 +1198,12 @@ public:
VariablesParseState vps(&idGenerator);
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
expression->injectExpressionContext(expCtx);
- ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
- ASSERT_EQUALS(BSON("" << expectedResult()),
- toBson(expression->evaluate(fromBson(BSON("a" << 1)))));
+ ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
+ ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()),
+ toBson(expression->evaluate(fromBson(BSON("a" << 1)))));
intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS(BSON("" << expectedResult()),
- toBson(optimized->evaluate(fromBson(BSON("a" << 1)))));
+ ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()),
+ toBson(optimized->evaluate(fromBson(BSON("a" << 1)))));
}
protected:
@@ -1222,9 +1222,9 @@ public:
VariablesParseState vps(&idGenerator);
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
expression->injectExpressionContext(expCtx);
- ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS(expectedOptimized(), expressionToBson(optimized));
+ ASSERT_BSONOBJ_EQ(expectedOptimized(), expressionToBson(optimized));
}
protected:
@@ -1579,7 +1579,7 @@ public:
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
expression->injectExpressionContext(expCtx);
intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS(constify(expectedOptimized()), expressionToBson(optimized));
+ ASSERT_BSONOBJ_EQ(constify(expectedOptimized()), expressionToBson(optimized));
}
protected:
@@ -1612,12 +1612,12 @@ public:
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
expression->injectExpressionContext(expCtx);
// Check expression spec round trip.
- ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
// Check evaluation result.
- ASSERT_EQUALS(expectedResult(), toBson(expression->evaluate(Document())));
+ ASSERT_BSONOBJ_EQ(expectedResult(), toBson(expression->evaluate(Document())));
// Check that the result is the same after optimizing.
intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS(expectedResult(), toBson(optimized->evaluate(Document())));
+ ASSERT_BSONOBJ_EQ(expectedResult(), toBson(optimized->evaluate(Document())));
}
protected:
@@ -2575,12 +2575,12 @@ public:
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
- ASSERT_EQUALS(BSON("" << expectedResult()),
- toBson(expression->evaluate(fromBson(BSON("a" << 1)))));
+ ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
+ ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()),
+ toBson(expression->evaluate(fromBson(BSON("a" << 1)))));
intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS(BSON("" << expectedResult()),
- toBson(optimized->evaluate(fromBson(BSON("a" << 1)))));
+ ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()),
+ toBson(optimized->evaluate(fromBson(BSON("a" << 1)))));
}
protected:
@@ -2597,9 +2597,9 @@ public:
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
- ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
+ ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
intrusive_ptr<Expression> optimized = expression->optimize();
- ASSERT_EQUALS(expectedOptimized(), expressionToBson(optimized));
+ ASSERT_BSONOBJ_EQ(expectedOptimized(), expressionToBson(optimized));
}
protected:
@@ -3400,8 +3400,8 @@ private:
VariablesParseState vps(&idGenerator);
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
expression->injectExpressionContext(expCtx);
- ASSERT_EQUALS(constify(spec), expressionToBson(expression));
- ASSERT_EQUALS(BSON("" << expectedResult), toBson(expression->evaluate(Document())));
+ ASSERT_BSONOBJ_EQ(constify(spec), expressionToBson(expression));
+ ASSERT_BSONOBJ_EQ(BSON("" << expectedResult), toBson(expression->evaluate(Document())));
}
};
@@ -3528,8 +3528,8 @@ public:
VariablesParseState vps(&idGenerator);
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
expression->injectExpressionContext(expCtx);
- ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
- ASSERT_EQUALS(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
+ ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
+ ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
}
protected:
@@ -3785,8 +3785,8 @@ public:
VariablesParseState vps(&idGenerator);
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
expression->injectExpressionContext(expCtx);
- ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
- ASSERT_EQUALS(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
+ ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
+ ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
}
protected:
@@ -3844,8 +3844,8 @@ public:
VariablesParseState vps(&idGenerator);
intrusive_ptr<Expression> expression = Expression::parseOperand(specElement, vps);
expression->injectExpressionContext(expCtx);
- ASSERT_EQUALS(constify(spec()), expressionToBson(expression));
- ASSERT_EQUALS(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
+ ASSERT_BSONOBJ_EQ(constify(spec()), expressionToBson(expression));
+ ASSERT_BSONOBJ_EQ(BSON("" << expectedResult()), toBson(expression->evaluate(Document())));
}
protected:
diff --git a/src/mongo/db/pipeline/lookup_set_cache_test.cpp b/src/mongo/db/pipeline/lookup_set_cache_test.cpp
index 13f445e67c3..cd27aab99a9 100644
--- a/src/mongo/db/pipeline/lookup_set_cache_test.cpp
+++ b/src/mongo/db/pipeline/lookup_set_cache_test.cpp
@@ -43,7 +43,9 @@ namespace mongo {
bool vectorContains(const boost::optional<std::vector<BSONObj>>& vector,
const BSONObj& expectedObj) {
ASSERT_TRUE(vector);
- return std::find(vector->begin(), vector->end(), expectedObj) != vector->end();
+ return std::find_if(vector->begin(), vector->end(), [&expectedObj](const BSONObj& obj) {
+ return SimpleBSONObjComparator::kInstance.evaluate(expectedObj == obj);
+ }) != vector->end();
}
BSONObj intToObj(int value) {
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index e4e2e6bb67c..5dd74393a92 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/pipeline/pipeline_d.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
@@ -138,7 +139,8 @@ public:
const std::list<BSONObj>& originalIndexes) final {
Lock::GlobalWrite globalLock(_ctx->opCtx->lockState());
- if (originalCollectionOptions != getCollectionOptions(targetNs)) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(originalCollectionOptions !=
+ getCollectionOptions(targetNs))) {
return {ErrorCodes::CommandFailed,
str::stream() << "collection options of target collection " << targetNs.ns()
<< " changed during processing. Original options: "
@@ -146,7 +148,13 @@ public:
<< ", new options: "
<< getCollectionOptions(targetNs)};
}
- if (originalIndexes != _client.getIndexSpecs(targetNs.ns())) {
+
+ auto currentIndexes = _client.getIndexSpecs(targetNs.ns());
+ if (originalIndexes.size() != currentIndexes.size() ||
+ !std::equal(originalIndexes.begin(),
+ originalIndexes.end(),
+ currentIndexes.begin(),
+ SimpleBSONObjComparator::kInstance.makeEqualTo())) {
return {ErrorCodes::CommandFailed,
str::stream() << "indexes of target collection " << targetNs.ns()
<< " changed during processing."};
diff --git a/src/mongo/db/pipeline/pipeline_test.cpp b/src/mongo/db/pipeline/pipeline_test.cpp
index 864680bcbb9..16625527383 100644
--- a/src/mongo/db/pipeline/pipeline_test.cpp
+++ b/src/mongo/db/pipeline/pipeline_test.cpp
@@ -1071,7 +1071,7 @@ TEST(PipelineInitialSource, GeoNearInitialQuery) {
intrusive_ptr<ExpressionContext> ctx = new ExpressionContext(
&_opCtx, AggregationRequest(NamespaceString("a.collection"), rawPipeline));
auto pipe = uassertStatusOK(Pipeline::parse(rawPipeline, ctx));
- ASSERT_EQ(pipe->getInitialQuery(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(pipe->getInitialQuery(), BSON("a" << 1));
}
TEST(PipelineInitialSource, MatchInitialQuery) {
@@ -1081,7 +1081,7 @@ TEST(PipelineInitialSource, MatchInitialQuery) {
&_opCtx, AggregationRequest(NamespaceString("a.collection"), rawPipeline));
auto pipe = uassertStatusOK(Pipeline::parse(rawPipeline, ctx));
- ASSERT_EQ(pipe->getInitialQuery(), BSON("a" << 4));
+ ASSERT_BSONOBJ_EQ(pipe->getInitialQuery(), BSON("a" << 4));
}
TEST(PipelineInitialSource, ParseCollation) {
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index 9c95d347a08..c7a1f2a05a2 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -574,10 +574,11 @@ TEST(CanonicalQueryTest, CanonicalizeFromBaseQuery) {
// Descriptive test. The childCq's filter should be the relevant $or clause, rather than the
// entire query predicate.
- ASSERT_EQ(childCq->getQueryRequest().getFilter(), baseCq->getQueryRequest().getFilter());
+ ASSERT_BSONOBJ_EQ(childCq->getQueryRequest().getFilter(),
+ baseCq->getQueryRequest().getFilter());
- ASSERT_EQ(childCq->getQueryRequest().getProj(), baseCq->getQueryRequest().getProj());
- ASSERT_EQ(childCq->getQueryRequest().getSort(), baseCq->getQueryRequest().getSort());
+ ASSERT_BSONOBJ_EQ(childCq->getQueryRequest().getProj(), baseCq->getQueryRequest().getProj());
+ ASSERT_BSONOBJ_EQ(childCq->getQueryRequest().getSort(), baseCq->getQueryRequest().getSort());
ASSERT_TRUE(childCq->getQueryRequest().isExplain());
}
diff --git a/src/mongo/db/query/collation/collation_index_key_test.cpp b/src/mongo/db/query/collation/collation_index_key_test.cpp
index bcd341c9488..0b92c5d7cae 100644
--- a/src/mongo/db/query/collation/collation_index_key_test.cpp
+++ b/src/mongo/db/query/collation/collation_index_key_test.cpp
@@ -70,7 +70,7 @@ TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlyAppendsElementWithNullC
BSONObj dataObj = BSON("test" << 1);
BSONObjBuilder out;
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), nullptr, &out);
- ASSERT_EQ(out.obj(), BSON("" << 1));
+ ASSERT_BSONOBJ_EQ(out.obj(), BSON("" << 1));
}
TEST(CollationIndexKeyTest, CollationAwareAppendReversesStringWithReverseMockCollator) {
@@ -79,9 +79,9 @@ TEST(CollationIndexKeyTest, CollationAwareAppendReversesStringWithReverseMockCol
<< "string");
BSONObjBuilder out;
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), &collator, &out);
- ASSERT_EQ(out.obj(),
- BSON(""
- << "gnirts"));
+ ASSERT_BSONOBJ_EQ(out.obj(),
+ BSON(""
+ << "gnirts"));
}
TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlySerializesEmptyComparisonKey) {
@@ -96,7 +96,7 @@ TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlySerializesEmptyComparis
BSONObjBuilder out;
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), &collator, &out);
- ASSERT_EQ(out.obj(), expectedObj);
+ ASSERT_BSONOBJ_EQ(out.obj(), expectedObj);
}
TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlySerializesWithEmbeddedNullByte) {
@@ -111,7 +111,7 @@ TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlySerializesWithEmbeddedN
BSONObjBuilder out;
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), &collator, &out);
- ASSERT_EQ(out.obj(), expectedObj);
+ ASSERT_BSONOBJ_EQ(out.obj(), expectedObj);
}
TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlyReversesSimpleEmbeddedObject) {
@@ -123,7 +123,7 @@ TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlyReversesSimpleEmbeddedO
BSONObjBuilder out;
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), &collator, &out);
- ASSERT_EQ(out.obj(), expected);
+ ASSERT_BSONOBJ_EQ(out.obj(), expected);
}
TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlyReversesSimpleEmbeddedArray) {
@@ -135,7 +135,7 @@ TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlyReversesSimpleEmbeddedA
BSONObjBuilder out;
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), &collator, &out);
- ASSERT_EQ(out.obj(), expected);
+ ASSERT_BSONOBJ_EQ(out.obj(), expected);
}
TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlyReversesComplexNesting) {
@@ -151,7 +151,7 @@ TEST(CollationIndexKeyTest, CollationAwareAppendCorrectlyReversesComplexNesting)
BSONObjBuilder out;
CollationIndexKey::collationAwareIndexKeyAppend(dataObj.firstElement(), &collator, &out);
- ASSERT_EQ(out.obj(), expected);
+ ASSERT_BSONOBJ_EQ(out.obj(), expected);
}
} // namespace
diff --git a/src/mongo/db/query/collation/collation_spec_test.cpp b/src/mongo/db/query/collation/collation_spec_test.cpp
index cd8553c7305..29ab7f99473 100644
--- a/src/mongo/db/query/collation/collation_spec_test.cpp
+++ b/src/mongo/db/query/collation/collation_spec_test.cpp
@@ -203,7 +203,7 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesDefaults) {
<< "version"
<< "myVersion");
- ASSERT_EQ(expectedObj, collationSpec.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
}
TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstUpper) {
@@ -233,7 +233,7 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstUpper) {
<< "version"
<< "myVersion");
- ASSERT_EQ(expectedObj, collationSpec.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
}
TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstLower) {
@@ -263,7 +263,7 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesCaseFirstLower) {
<< "version"
<< "myVersion");
- ASSERT_EQ(expectedObj, collationSpec.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
}
TEST(CollationSpecTest, ToBSONCorrectlySerializesPrimaryStrength) {
@@ -293,7 +293,7 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesPrimaryStrength) {
<< "version"
<< "myVersion");
- ASSERT_EQ(expectedObj, collationSpec.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
}
TEST(CollationSpecTest, ToBSONCorrectlySerializesSecondaryStrength) {
@@ -323,7 +323,7 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesSecondaryStrength) {
<< "version"
<< "myVersion");
- ASSERT_EQ(expectedObj, collationSpec.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
}
TEST(CollationSpecTest, ToBSONCorrectlySerializesQuaternaryStrength) {
@@ -353,7 +353,7 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesQuaternaryStrength) {
<< "version"
<< "myVersion");
- ASSERT_EQ(expectedObj, collationSpec.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
}
TEST(CollationSpecTest, ToBSONCorrectlySerializesIdenticalStrength) {
@@ -383,7 +383,7 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesIdenticalStrength) {
<< "version"
<< "myVersion");
- ASSERT_EQ(expectedObj, collationSpec.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
}
TEST(CollationSpecTest, ToBSONCorrectlySerializesAlternateShifted) {
@@ -413,7 +413,7 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesAlternateShifted) {
<< "version"
<< "myVersion");
- ASSERT_EQ(expectedObj, collationSpec.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
}
TEST(CollationSpecTest, ToBSONCorrectlySerializesMaxVariableSpace) {
@@ -443,7 +443,7 @@ TEST(CollationSpecTest, ToBSONCorrectlySerializesMaxVariableSpace) {
<< "version"
<< "myVersion");
- ASSERT_EQ(expectedObj, collationSpec.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, collationSpec.toBSON());
}
} // namespace
diff --git a/src/mongo/db/query/collation/collator_factory_mock.cpp b/src/mongo/db/query/collation/collator_factory_mock.cpp
index ac606847131..13784517d1d 100644
--- a/src/mongo/db/query/collation/collator_factory_mock.cpp
+++ b/src/mongo/db/query/collation/collator_factory_mock.cpp
@@ -33,6 +33,7 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/stdx/memory.h"
@@ -40,7 +41,8 @@ namespace mongo {
StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryMock::makeFromBSON(
const BSONObj& spec) {
- if (spec == BSON(CollationSpec::kLocaleField << CollationSpec::kSimpleBinaryComparison)) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(
+ spec == BSON(CollationSpec::kLocaleField << CollationSpec::kSimpleBinaryComparison))) {
return {nullptr};
}
auto collator =
diff --git a/src/mongo/db/query/collation/collator_interface.h b/src/mongo/db/query/collation/collator_interface.h
index 76f236c7413..7d44a4c63ed 100644
--- a/src/mongo/db/query/collation/collator_interface.h
+++ b/src/mongo/db/query/collation/collator_interface.h
@@ -34,6 +34,7 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/string_data.h"
#include "mongo/base/string_data_comparator_interface.h"
+#include "mongo/bson/bsonobj_comparator_interface.h"
#include "mongo/db/query/collation/collation_spec.h"
namespace mongo {
@@ -46,7 +47,8 @@ namespace mongo {
*
* Does not throw exceptions.
*/
-class CollatorInterface : public StringData::ComparatorInterface {
+class CollatorInterface : public StringData::ComparatorInterface,
+ public BSONObj::ComparatorInterface {
MONGO_DISALLOW_COPYING(CollatorInterface);
public:
@@ -100,6 +102,8 @@ public:
*/
virtual int compare(StringData left, StringData right) const = 0;
+ virtual int compare(const BSONObj& left, const BSONObj& right) const = 0;
+
/**
* Hashes the string such that strings which are equal under this collation also have equal
* hashes.
diff --git a/src/mongo/db/query/collation/collator_interface_icu.cpp b/src/mongo/db/query/collation/collator_interface_icu.cpp
index 12498d12712..1040591fef5 100644
--- a/src/mongo/db/query/collation/collator_interface_icu.cpp
+++ b/src/mongo/db/query/collation/collator_interface_icu.cpp
@@ -71,6 +71,10 @@ int CollatorInterfaceICU::compare(StringData left, StringData right) const {
MONGO_UNREACHABLE;
}
+int CollatorInterfaceICU::compare(const BSONObj& left, const BSONObj& right) const {
+ return left.woCompare(right, BSONObj(), true, this);
+}
+
CollatorInterface::ComparisonKey CollatorInterfaceICU::getComparisonKey(
StringData stringData) const {
// A StringPiece is ICU's StringData. They are logically the same abstraction.
diff --git a/src/mongo/db/query/collation/collator_interface_icu.h b/src/mongo/db/query/collation/collator_interface_icu.h
index 9bb83824891..c35cc898c37 100644
--- a/src/mongo/db/query/collation/collator_interface_icu.h
+++ b/src/mongo/db/query/collation/collator_interface_icu.h
@@ -50,6 +50,8 @@ public:
int compare(StringData left, StringData right) const final;
+ int compare(const BSONObj& left, const BSONObj& right) const final;
+
ComparisonKey getComparisonKey(StringData stringData) const final;
private:
diff --git a/src/mongo/db/query/collation/collator_interface_mock.cpp b/src/mongo/db/query/collation/collator_interface_mock.cpp
index 638427bcd99..74fc7a26f4f 100644
--- a/src/mongo/db/query/collation/collator_interface_mock.cpp
+++ b/src/mongo/db/query/collation/collator_interface_mock.cpp
@@ -92,6 +92,10 @@ int CollatorInterfaceMock::compare(StringData left, StringData right) const {
MONGO_UNREACHABLE;
}
+int CollatorInterfaceMock::compare(const BSONObj& left, const BSONObj& right) const {
+ return left.woCompare(right, BSONObj(), true, this);
+}
+
CollatorInterface::ComparisonKey CollatorInterfaceMock::getComparisonKey(
StringData stringData) const {
switch (_mockType) {
diff --git a/src/mongo/db/query/collation/collator_interface_mock.h b/src/mongo/db/query/collation/collator_interface_mock.h
index 8ef8d63fb83..6ab8a3d072b 100644
--- a/src/mongo/db/query/collation/collator_interface_mock.h
+++ b/src/mongo/db/query/collation/collator_interface_mock.h
@@ -65,6 +65,8 @@ public:
int compare(StringData left, StringData right) const final;
+ int compare(const BSONObj& left, const BSONObj& right) const final;
+
ComparisonKey getComparisonKey(StringData stringData) const final;
private:
diff --git a/src/mongo/db/query/count_request_test.cpp b/src/mongo/db/query/count_request_test.cpp
index b3f5508422c..f534f755cf4 100644
--- a/src/mongo/db/query/count_request_test.cpp
+++ b/src/mongo/db/query/count_request_test.cpp
@@ -51,7 +51,7 @@ TEST(CountRequest, ParseDefaults) {
const CountRequest& countRequest = countRequestStatus.getValue();
ASSERT_EQ(countRequest.getNs().ns(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
+ ASSERT_BSONOBJ_EQ(countRequest.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
// Defaults
ASSERT_EQUALS(countRequest.getLimit(), 0);
@@ -84,11 +84,11 @@ TEST(CountRequest, ParseComplete) {
const CountRequest& countRequest = countRequestStatus.getValue();
ASSERT_EQ(countRequest.getNs().ns(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
+ ASSERT_BSONOBJ_EQ(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
ASSERT_EQUALS(countRequest.getLimit(), 100);
ASSERT_EQUALS(countRequest.getSkip(), 1000);
- ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
- ASSERT_EQUALS(countRequest.getCollation(), fromjson("{ locale : 'en_US' }"));
+ ASSERT_BSONOBJ_EQ(countRequest.getHint(), fromjson("{ b : 5 }"));
+ ASSERT_BSONOBJ_EQ(countRequest.getCollation(), fromjson("{ locale : 'en_US' }"));
}
TEST(CountRequest, ParseWithExplain) {
@@ -106,7 +106,7 @@ TEST(CountRequest, ParseWithExplain) {
const CountRequest& countRequest = countRequestStatus.getValue();
ASSERT_EQ(countRequest.getNs().ns(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
+ ASSERT_BSONOBJ_EQ(countRequest.getQuery(), fromjson("{ a : { '$lte' : 10 } }"));
// Defaults
ASSERT_EQUALS(countRequest.getLimit(), 0);
@@ -139,11 +139,11 @@ TEST(CountRequest, ParseNegativeLimit) {
const CountRequest& countRequest = countRequestStatus.getValue();
ASSERT_EQ(countRequest.getNs().ns(), "TestDB.TestColl");
- ASSERT_EQUALS(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
+ ASSERT_BSONOBJ_EQ(countRequest.getQuery(), fromjson("{ a : { '$gte' : 11 } }"));
ASSERT_EQUALS(countRequest.getLimit(), 100);
ASSERT_EQUALS(countRequest.getSkip(), 1000);
- ASSERT_EQUALS(countRequest.getHint(), fromjson("{ b : 5 }"));
- ASSERT_EQUALS(countRequest.getCollation(), fromjson("{ locale : 'en_US' }"));
+ ASSERT_BSONOBJ_EQ(countRequest.getHint(), fromjson("{ b : 5 }"));
+ ASSERT_BSONOBJ_EQ(countRequest.getCollation(), fromjson("{ locale : 'en_US' }"));
}
TEST(CountRequest, FailParseMissingNS) {
@@ -201,7 +201,7 @@ TEST(CountRequest, ToBSON) {
" hint : { b : 5 },"
" collation : { locale : 'en_US' } },"));
- ASSERT_EQUALS(actualObj, expectedObj);
+ ASSERT_BSONOBJ_EQ(actualObj, expectedObj);
}
} // namespace
diff --git a/src/mongo/db/query/cursor_response_test.cpp b/src/mongo/db/query/cursor_response_test.cpp
index 711d83d4213..c837cf81aed 100644
--- a/src/mongo/db/query/cursor_response_test.cpp
+++ b/src/mongo/db/query/cursor_response_test.cpp
@@ -50,8 +50,8 @@ TEST(CursorResponseTest, parseFromBSONFirstBatch) {
ASSERT_EQ(response.getCursorId(), CursorId(123));
ASSERT_EQ(response.getNSS().ns(), "db.coll");
ASSERT_EQ(response.getBatch().size(), 2U);
- ASSERT_EQ(response.getBatch()[0], BSON("_id" << 1));
- ASSERT_EQ(response.getBatch()[1], BSON("_id" << 2));
+ ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1));
+ ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2));
}
TEST(CursorResponseTest, parseFromBSONNextBatch) {
@@ -68,8 +68,8 @@ TEST(CursorResponseTest, parseFromBSONNextBatch) {
ASSERT_EQ(response.getCursorId(), CursorId(123));
ASSERT_EQ(response.getNSS().ns(), "db.coll");
ASSERT_EQ(response.getBatch().size(), 2U);
- ASSERT_EQ(response.getBatch()[0], BSON("_id" << 1));
- ASSERT_EQ(response.getBatch()[1], BSON("_id" << 2));
+ ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1));
+ ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2));
}
TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
@@ -86,8 +86,8 @@ TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
ASSERT_EQ(response.getCursorId(), CursorId(0));
ASSERT_EQ(response.getNSS().ns(), "db.coll");
ASSERT_EQ(response.getBatch().size(), 2U);
- ASSERT_EQ(response.getBatch()[0], BSON("_id" << 1));
- ASSERT_EQ(response.getBatch()[1], BSON("_id" << 2));
+ ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1));
+ ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2));
}
TEST(CursorResponseTest, parseFromBSONEmptyBatch) {
@@ -219,7 +219,7 @@ TEST(CursorResponseTest, toBSONInitialResponse) {
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
<< "ok"
<< 1.0);
- ASSERT_EQ(responseObj, expectedResponse);
+ ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
TEST(CursorResponseTest, toBSONSubsequentResponse) {
@@ -233,7 +233,7 @@ TEST(CursorResponseTest, toBSONSubsequentResponse) {
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
<< "ok"
<< 1.0);
- ASSERT_EQ(responseObj, expectedResponse);
+ ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
TEST(CursorResponseTest, addToBSONInitialResponse) {
@@ -251,7 +251,7 @@ TEST(CursorResponseTest, addToBSONInitialResponse) {
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
<< "ok"
<< 1.0);
- ASSERT_EQ(responseObj, expectedResponse);
+ ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
TEST(CursorResponseTest, addToBSONSubsequentResponse) {
@@ -269,7 +269,7 @@ TEST(CursorResponseTest, addToBSONSubsequentResponse) {
<< BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
<< "ok"
<< 1.0);
- ASSERT_EQ(responseObj, expectedResponse);
+ ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
} // namespace
diff --git a/src/mongo/db/query/find_and_modify_request_test.cpp b/src/mongo/db/query/find_and_modify_request_test.cpp
index 5d1ff3b358a..57b3f3bacc0 100644
--- a/src/mongo/db/query/find_and_modify_request_test.cpp
+++ b/src/mongo/db/query/find_and_modify_request_test.cpp
@@ -46,7 +46,7 @@ TEST(FindAndModifyRequest, BasicUpdate) {
update: { y: 1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, UpdateWithUpsert) {
@@ -62,7 +62,7 @@ TEST(FindAndModifyRequest, UpdateWithUpsert) {
upsert: true
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, UpdateWithUpsertFalse) {
@@ -78,7 +78,7 @@ TEST(FindAndModifyRequest, UpdateWithUpsertFalse) {
upsert: false
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, UpdateWithProjection) {
@@ -96,7 +96,7 @@ TEST(FindAndModifyRequest, UpdateWithProjection) {
fields: { z: 1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, UpdateWithNewTrue) {
@@ -113,7 +113,7 @@ TEST(FindAndModifyRequest, UpdateWithNewTrue) {
new: true
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, UpdateWithNewFalse) {
@@ -130,7 +130,7 @@ TEST(FindAndModifyRequest, UpdateWithNewFalse) {
new: false
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, UpdateWithSort) {
@@ -148,7 +148,7 @@ TEST(FindAndModifyRequest, UpdateWithSort) {
sort: { z: -1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, UpdateWithCollation) {
@@ -167,7 +167,7 @@ TEST(FindAndModifyRequest, UpdateWithCollation) {
collation: { locale: 'en_US' }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, UpdateWithWriteConcern) {
@@ -185,7 +185,7 @@ TEST(FindAndModifyRequest, UpdateWithWriteConcern) {
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, UpdateWithFullSpec) {
@@ -217,7 +217,7 @@ TEST(FindAndModifyRequest, UpdateWithFullSpec) {
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, BasicRemove) {
@@ -230,7 +230,7 @@ TEST(FindAndModifyRequest, BasicRemove) {
remove: true
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, RemoveWithProjection) {
@@ -247,7 +247,7 @@ TEST(FindAndModifyRequest, RemoveWithProjection) {
fields: { z: 1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, RemoveWithSort) {
@@ -264,7 +264,7 @@ TEST(FindAndModifyRequest, RemoveWithSort) {
sort: { z: -1 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, RemoveWithCollation) {
@@ -282,7 +282,7 @@ TEST(FindAndModifyRequest, RemoveWithCollation) {
collation: { locale: 'en_US' }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, RemoveWithWriteConcern) {
@@ -299,7 +299,7 @@ TEST(FindAndModifyRequest, RemoveWithWriteConcern) {
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, RemoveWithFullSpec) {
@@ -326,7 +326,7 @@ TEST(FindAndModifyRequest, RemoveWithFullSpec) {
writeConcern: { w: 2, fsync: true, wtimeout: 150 }
})json"));
- ASSERT_EQUALS(expectedObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedObj, request.toBSON());
}
TEST(FindAndModifyRequest, ParseWithUpdateOnlyRequiredFields) {
@@ -340,13 +340,13 @@ TEST(FindAndModifyRequest, ParseWithUpdateOnlyRequiredFields) {
auto request = parseStatus.getValue();
ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getQuery());
+ ASSERT_BSONOBJ_EQ(BSON("y" << 1), request.getUpdateObj());
ASSERT_EQUALS(false, request.isUpsert());
ASSERT_EQUALS(false, request.isRemove());
- ASSERT_EQUALS(BSONObj(), request.getFields());
- ASSERT_EQUALS(BSONObj(), request.getSort());
- ASSERT_EQUALS(BSONObj(), request.getCollation());
+ ASSERT_BSONOBJ_EQ(BSONObj(), request.getFields());
+ ASSERT_BSONOBJ_EQ(BSONObj(), request.getSort());
+ ASSERT_BSONOBJ_EQ(BSONObj(), request.getCollation());
ASSERT_EQUALS(false, request.shouldReturnNew());
}
@@ -366,15 +366,15 @@ TEST(FindAndModifyRequest, ParseWithUpdateFullSpec) {
auto request = parseStatus.getValue();
ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSON("y" << 1), request.getUpdateObj());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getQuery());
+ ASSERT_BSONOBJ_EQ(BSON("y" << 1), request.getUpdateObj());
ASSERT_EQUALS(true, request.isUpsert());
ASSERT_EQUALS(false, request.isRemove());
- ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
- ASSERT_EQUALS(BSON("z" << -1), request.getSort());
- ASSERT_EQUALS(BSON("locale"
- << "en_US"),
- request.getCollation());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1 << "y" << 1), request.getFields());
+ ASSERT_BSONOBJ_EQ(BSON("z" << -1), request.getSort());
+ ASSERT_BSONOBJ_EQ(BSON("locale"
+ << "en_US"),
+ request.getCollation());
ASSERT_EQUALS(true, request.shouldReturnNew());
}
@@ -389,13 +389,13 @@ TEST(FindAndModifyRequest, ParseWithRemoveOnlyRequiredFields) {
auto request = parseStatus.getValue();
ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getQuery());
+ ASSERT_BSONOBJ_EQ(BSONObj(), request.getUpdateObj());
ASSERT_EQUALS(false, request.isUpsert());
ASSERT_EQUALS(true, request.isRemove());
- ASSERT_EQUALS(BSONObj(), request.getFields());
- ASSERT_EQUALS(BSONObj(), request.getSort());
- ASSERT_EQUALS(BSONObj(), request.getCollation());
+ ASSERT_BSONOBJ_EQ(BSONObj(), request.getFields());
+ ASSERT_BSONOBJ_EQ(BSONObj(), request.getSort());
+ ASSERT_BSONOBJ_EQ(BSONObj(), request.getCollation());
ASSERT_EQUALS(false, request.shouldReturnNew());
}
@@ -414,15 +414,15 @@ TEST(FindAndModifyRequest, ParseWithRemoveFullSpec) {
auto request = parseStatus.getValue();
ASSERT_EQUALS(NamespaceString("a.b").toString(), request.getNamespaceString().toString());
- ASSERT_EQUALS(BSON("x" << 1), request.getQuery());
- ASSERT_EQUALS(BSONObj(), request.getUpdateObj());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getQuery());
+ ASSERT_BSONOBJ_EQ(BSONObj(), request.getUpdateObj());
ASSERT_EQUALS(false, request.isUpsert());
ASSERT_EQUALS(true, request.isRemove());
- ASSERT_EQUALS(BSON("x" << 1 << "y" << 1), request.getFields());
- ASSERT_EQUALS(BSON("z" << -1), request.getSort());
- ASSERT_EQUALS(BSON("locale"
- << "en_US"),
- request.getCollation());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1 << "y" << 1), request.getFields());
+ ASSERT_BSONOBJ_EQ(BSON("z" << -1), request.getSort());
+ ASSERT_BSONOBJ_EQ(BSON("locale"
+ << "en_US"),
+ request.getCollation());
ASSERT_EQUALS(false, request.shouldReturnNew());
}
diff --git a/src/mongo/db/query/getmore_request_test.cpp b/src/mongo/db/query/getmore_request_test.cpp
index 94dfb9ba366..828a434658d 100644
--- a/src/mongo/db/query/getmore_request_test.cpp
+++ b/src/mongo/db/query/getmore_request_test.cpp
@@ -217,7 +217,7 @@ TEST(GetMoreRequestTest, toBSONHasBatchSize) {
<< "testcoll"
<< "batchSize"
<< 99);
- ASSERT_EQ(requestObj, expectedRequest);
+ ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
TEST(GetMoreRequestTest, toBSONMissingMatchSize) {
@@ -230,7 +230,7 @@ TEST(GetMoreRequestTest, toBSONMissingMatchSize) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll");
- ASSERT_EQ(requestObj, expectedRequest);
+ ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
TEST(GetMoreRequestTest, toBSONHasTerm) {
@@ -243,7 +243,7 @@ TEST(GetMoreRequestTest, toBSONHasTerm) {
<< 99
<< "term"
<< 1);
- ASSERT_EQ(requestObj, expectedRequest);
+ ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
TEST(GetMoreRequestTest, toBSONHasCommitLevel) {
@@ -262,7 +262,7 @@ TEST(GetMoreRequestTest, toBSONHasCommitLevel) {
<< 1
<< "lastKnownCommittedOpTime"
<< BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
- ASSERT_EQ(requestObj, expectedRequest);
+ ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
TEST(GetMoreRequestTest, toBSONHasMaxTimeMS) {
@@ -277,7 +277,7 @@ TEST(GetMoreRequestTest, toBSONHasMaxTimeMS) {
<< "testcoll"
<< "maxTimeMS"
<< 789);
- ASSERT_EQ(requestObj, expectedRequest);
+ ASSERT_BSONOBJ_EQ(requestObj, expectedRequest);
}
} // namespace
diff --git a/src/mongo/db/query/index_bounds.cpp b/src/mongo/db/query/index_bounds.cpp
index 85708c6c09c..065d53ba510 100644
--- a/src/mongo/db/query/index_bounds.cpp
+++ b/src/mongo/db/query/index_bounds.cpp
@@ -32,6 +32,8 @@
#include <tuple>
#include <utility>
+#include "mongo/bson/simple_bsonobj_comparator.h"
+
namespace mongo {
using std::string;
@@ -100,8 +102,9 @@ bool IndexBounds::operator==(const IndexBounds& other) const {
}
if (this->isSimpleRange) {
- return std::tie(this->startKey, this->endKey, this->endKeyInclusive) ==
- std::tie(other.startKey, other.endKey, other.endKeyInclusive);
+ return SimpleBSONObjComparator::kInstance.evaluate(this->startKey == other.startKey) &&
+ SimpleBSONObjComparator::kInstance.evaluate(this->endKey == other.endKey) &&
+ (this->endKeyInclusive == other.endKeyInclusive);
}
if (this->fields.size() != other.fields.size()) {
diff --git a/src/mongo/db/query/killcursors_request_test.cpp b/src/mongo/db/query/killcursors_request_test.cpp
index 19c220a7408..8a2fda62b3c 100644
--- a/src/mongo/db/query/killcursors_request_test.cpp
+++ b/src/mongo/db/query/killcursors_request_test.cpp
@@ -133,7 +133,7 @@ TEST(KillCursorsRequestTest, toBSON) {
<< "coll"
<< "cursors"
<< BSON_ARRAY(CursorId(123) << CursorId(456)));
- ASSERT_EQ(requestObj, expectedObj);
+ ASSERT_BSONOBJ_EQ(requestObj, expectedObj);
}
} // namespace
diff --git a/src/mongo/db/query/killcursors_response_test.cpp b/src/mongo/db/query/killcursors_response_test.cpp
index 0fe3d996edf..afd9997ea39 100644
--- a/src/mongo/db/query/killcursors_response_test.cpp
+++ b/src/mongo/db/query/killcursors_response_test.cpp
@@ -122,7 +122,7 @@ TEST(KillCursorsResponseTest, toBSON) {
<< BSONArray()
<< "ok"
<< 1.0);
- ASSERT_EQ(responseObj, expectedResponse);
+ ASSERT_BSONOBJ_EQ(responseObj, expectedResponse);
}
} // namespace
diff --git a/src/mongo/db/query/parsed_projection.cpp b/src/mongo/db/query/parsed_projection.cpp
index 4baffb815d4..572b1cf57db 100644
--- a/src/mongo/db/query/parsed_projection.cpp
+++ b/src/mongo/db/query/parsed_projection.cpp
@@ -30,6 +30,8 @@
#include "mongo/db/query/query_request.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
+
namespace mongo {
using std::unique_ptr;
@@ -286,8 +288,9 @@ Status ParsedProjection::make(const BSONObj& spec,
// $meta sortKey should not be checked as a part of _requiredFields, since it can
// potentially produce a covered projection as long as the sort key is covered.
if (BSONType::Object == elt.type()) {
- dassert(elt.Obj() == BSON("$meta"
- << "sortKey"));
+ dassert(
+ SimpleBSONObjComparator::kInstance.evaluate(elt.Obj() == BSON("$meta"
+ << "sortKey")));
continue;
}
if (elt.trueValue()) {
diff --git a/src/mongo/db/query/parsed_projection_test.cpp b/src/mongo/db/query/parsed_projection_test.cpp
index b1b1b59324f..0aa4b4ffc74 100644
--- a/src/mongo/db/query/parsed_projection_test.cpp
+++ b/src/mongo/db/query/parsed_projection_test.cpp
@@ -216,7 +216,7 @@ TEST(ParsedProjectionTest, ParsedProjectionDefaults) {
TEST(ParsedProjectionTest, SortKeyMetaProjection) {
auto parsedProjection = createParsedProjection("{}", "{foo: {$meta: 'sortKey'}}");
- ASSERT_EQ(parsedProjection->getProjObj(), fromjson("{foo: {$meta: 'sortKey'}}"));
+ ASSERT_BSONOBJ_EQ(parsedProjection->getProjObj(), fromjson("{foo: {$meta: 'sortKey'}}"));
ASSERT_TRUE(parsedProjection->wantSortKey());
ASSERT_TRUE(parsedProjection->requiresDocument());
@@ -229,7 +229,8 @@ TEST(ParsedProjectionTest, SortKeyMetaProjection) {
TEST(ParsedProjectionTest, SortKeyMetaProjectionCovered) {
auto parsedProjection = createParsedProjection("{}", "{a: 1, foo: {$meta: 'sortKey'}, _id: 0}");
- ASSERT_EQ(parsedProjection->getProjObj(), fromjson("{a: 1, foo: {$meta: 'sortKey'}, _id: 0}"));
+ ASSERT_BSONOBJ_EQ(parsedProjection->getProjObj(),
+ fromjson("{a: 1, foo: {$meta: 'sortKey'}, _id: 0}"));
ASSERT_TRUE(parsedProjection->wantSortKey());
ASSERT_FALSE(parsedProjection->requiresDocument());
@@ -243,8 +244,8 @@ TEST(ParsedProjectionTest, SortKeyMetaAndSlice) {
auto parsedProjection =
createParsedProjection("{}", "{a: 1, foo: {$meta: 'sortKey'}, _id: 0, b: {$slice: 1}}");
- ASSERT_EQ(parsedProjection->getProjObj(),
- fromjson("{a: 1, foo: {$meta: 'sortKey'}, _id: 0, b: {$slice: 1}}"));
+ ASSERT_BSONOBJ_EQ(parsedProjection->getProjObj(),
+ fromjson("{a: 1, foo: {$meta: 'sortKey'}, _id: 0, b: {$slice: 1}}"));
ASSERT_TRUE(parsedProjection->wantSortKey());
ASSERT_TRUE(parsedProjection->requiresDocument());
@@ -258,8 +259,8 @@ TEST(ParsedProjectionTest, SortKeyMetaAndElemMatch) {
auto parsedProjection = createParsedProjection(
"{}", "{a: 1, foo: {$meta: 'sortKey'}, _id: 0, b: {$elemMatch: {a: 1}}}");
- ASSERT_EQ(parsedProjection->getProjObj(),
- fromjson("{a: 1, foo: {$meta: 'sortKey'}, _id: 0, b: {$elemMatch: {a: 1}}}"));
+ ASSERT_BSONOBJ_EQ(parsedProjection->getProjObj(),
+ fromjson("{a: 1, foo: {$meta: 'sortKey'}, _id: 0, b: {$elemMatch: {a: 1}}}"));
ASSERT_TRUE(parsedProjection->wantSortKey());
ASSERT_TRUE(parsedProjection->requiresDocument());
@@ -272,7 +273,8 @@ TEST(ParsedProjectionTest, SortKeyMetaAndElemMatch) {
TEST(ParsedProjectionTest, SortKeyMetaAndExclusion) {
auto parsedProjection = createParsedProjection("{}", "{a: 0, foo: {$meta: 'sortKey'}, _id: 0}");
- ASSERT_EQ(parsedProjection->getProjObj(), fromjson("{a: 0, foo: {$meta: 'sortKey'}, _id: 0}"));
+ ASSERT_BSONOBJ_EQ(parsedProjection->getProjObj(),
+ fromjson("{a: 0, foo: {$meta: 'sortKey'}, _id: 0}"));
ASSERT_TRUE(parsedProjection->wantSortKey());
ASSERT_TRUE(parsedProjection->requiresDocument());
diff --git a/src/mongo/db/query/planner_analysis_test.cpp b/src/mongo/db/query/planner_analysis_test.cpp
index a47e6780787..d01f9b72dcb 100644
--- a/src/mongo/db/query/planner_analysis_test.cpp
+++ b/src/mongo/db/query/planner_analysis_test.cpp
@@ -41,63 +41,72 @@ using namespace mongo;
namespace {
TEST(QueryPlannerAnalysis, GetSortPatternBasic) {
- ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1}")));
- ASSERT_EQUALS(fromjson("{a: -1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1}")));
- ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1}")));
- ASSERT_EQUALS(fromjson("{a: 1, b: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: -1}")));
- ASSERT_EQUALS(fromjson("{a: -1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 1}")));
- ASSERT_EQUALS(fromjson("{a: -1, b: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: -1}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1, b: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: -1}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: -1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 1}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: -1, b: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: -1}")));
}
TEST(QueryPlannerAnalysis, GetSortPatternOtherElements) {
- ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 0}")));
- ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 100}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: Infinity}")));
- ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: true}")));
- ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: false}")));
- ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: []}")));
- ASSERT_EQUALS(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: {}}")));
-
- ASSERT_EQUALS(fromjson("{a: -1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: -100}")));
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -Infinity}")));
-
- ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 0}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 100}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: Infinity}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: true}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: false}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: []}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: {}}")));
+
+ ASSERT_BSONOBJ_EQ(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -100}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -Infinity}")));
+
+ ASSERT_BSONOBJ_EQ(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{}")));
}
TEST(QueryPlannerAnalysis, GetSortPatternSpecialIndexTypes) {
- ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'hashed'}")));
- ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'text'}")));
- ASSERT_EQUALS(fromjson("{}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: '2dsphere'}")));
- ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: ''}")));
- ASSERT_EQUALS(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'foo'}")));
-
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 'text'}")));
- ASSERT_EQUALS(fromjson("{a: -1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: '2dsphere'}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text'}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere'}")));
-
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text', c: 1}")));
- ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere',"
- " c: 1}")));
-
- ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text'}")));
- ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text',"
- " d: 1}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'hashed'}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'text'}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: '2dsphere'}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: ''}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{}"), QueryPlannerAnalysis::getSortPattern(fromjson("{a: 'foo'}")));
+
+ ASSERT_BSONOBJ_EQ(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: 'text'}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: -1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: -1, b: '2dsphere'}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text'}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere'}")));
+
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text', c: 1}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere',"
+ " c: 1}")));
+
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text'}")));
+ ASSERT_BSONOBJ_EQ(fromjson("{a: 1, b: 1}"),
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text',"
+ " d: 1}")));
}
// Test the generation of sort orders provided by an index scan done by
diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp
index 9e71f1d0f8c..34b4226ee6e 100644
--- a/src/mongo/db/query/query_planner_test_lib.cpp
+++ b/src/mongo/db/query/query_planner_test_lib.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/query/query_planner_test_lib.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
@@ -270,7 +271,8 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
if (!pattern.isABSONObj()) {
return false;
}
- if (pattern.Obj() != ixn->index.keyPattern) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(pattern.Obj() !=
+ ixn->index.keyPattern)) {
return false;
}
}
@@ -330,7 +332,7 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
return false;
}
BSONObj geoObj = el.Obj();
- return geoObj == node->index.keyPattern;
+ return SimpleBSONObjComparator::kInstance.evaluate(geoObj == node->index.keyPattern);
} else if (STAGE_GEO_NEAR_2DSPHERE == trueSoln->getType()) {
const GeoNear2DSphereNode* node = static_cast<const GeoNear2DSphereNode*>(trueSoln);
BSONElement el = testSoln["geoNear2dsphere"];
@@ -343,7 +345,7 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
if (pattern.eoo() || !pattern.isABSONObj()) {
return false;
}
- if (pattern.Obj() != node->index.keyPattern) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(pattern.Obj() != node->index.keyPattern)) {
return false;
}
@@ -564,7 +566,8 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
return false;
}
- return (spec.Obj() == pn->projection) && solutionMatches(child.Obj(), pn->children[0]);
+ return SimpleBSONObjComparator::kInstance.evaluate(spec.Obj() == pn->projection) &&
+ solutionMatches(child.Obj(), pn->children[0]);
} else if (STAGE_SORT == trueSoln->getType()) {
const SortNode* sn = static_cast<const SortNode*>(trueSoln);
BSONElement el = testSoln["sort"];
@@ -587,8 +590,8 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
}
size_t expectedLimit = limitEl.numberInt();
- return (patternEl.Obj() == sn->pattern) && (expectedLimit == sn->limit) &&
- solutionMatches(child.Obj(), sn->children[0]);
+ return SimpleBSONObjComparator::kInstance.evaluate(patternEl.Obj() == sn->pattern) &&
+ (expectedLimit == sn->limit) && solutionMatches(child.Obj(), sn->children[0]);
} else if (STAGE_SORT_KEY_GENERATOR == trueSoln->getType()) {
const SortKeyGeneratorNode* keyGenNode = static_cast<const SortKeyGeneratorNode*>(trueSoln);
BSONElement el = testSoln["sortKeyGen"];
@@ -696,7 +699,8 @@ bool QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln,
return false;
}
- return (patternEl.Obj() == esn->pattern) && solutionMatches(child.Obj(), esn->children[0]);
+ return SimpleBSONObjComparator::kInstance.evaluate(patternEl.Obj() == esn->pattern) &&
+ solutionMatches(child.Obj(), esn->children[0]);
}
return false;
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index 96129eb0d52..b0aa759e853 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -32,6 +32,7 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/dbmessage.h"
#include "mongo/db/namespace_string.h"
@@ -586,7 +587,8 @@ Status QueryRequest::validate() const {
if (_tailable) {
// Tailable cursors cannot have any sort other than {$natural: 1}.
const BSONObj expectedSort = BSON("$natural" << 1);
- if (!_sort.isEmpty() && _sort != expectedSort) {
+ if (!_sort.isEmpty() &&
+ SimpleBSONObjComparator::kInstance.evaluate(_sort != expectedSort)) {
return Status(ErrorCodes::BadValue,
"cannot use tailable option with a sort other than {$natural: 1}");
}
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 6fa047efdca..f5fcc6b5245 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -231,7 +231,7 @@ TEST(QueryRequestTest, AllowTailableWithNaturalSort) {
auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
ASSERT_OK(result.getStatus());
ASSERT_TRUE(result.getValue()->isTailable());
- ASSERT_EQ(result.getValue()->getSort(), BSON("$natural" << 1));
+ ASSERT_BSONOBJ_EQ(result.getValue()->getSort(), BSON("$natural" << 1));
}
TEST(QueryRequestTest, IsIsolatedReturnsTrueWithIsolated) {
@@ -398,9 +398,9 @@ TEST(QueryRequestTest, ParseFromCommandHintAsString) {
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
BSONObj hintObj = qr->getHint();
- ASSERT_EQUALS(BSON("$hint"
- << "foo_1"),
- hintObj);
+ ASSERT_BSONOBJ_EQ(BSON("$hint"
+ << "foo_1"),
+ hintObj);
}
TEST(QueryRequestTest, ParseFromCommandValidSortProj) {
@@ -824,7 +824,7 @@ TEST(QueryRequestTest, ParseFromCommandSkipIsZero) {
bool isExplain = false;
unique_ptr<QueryRequest> qr(
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- ASSERT_EQ(BSON("a" << 3), qr->getFilter());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 3), qr->getFilter());
ASSERT_FALSE(qr->getSkip());
}
@@ -848,7 +848,7 @@ TEST(QueryRequestTest, ParseFromCommandLimitIsZero) {
bool isExplain = false;
unique_ptr<QueryRequest> qr(
assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- ASSERT_EQ(BSON("a" << 3), qr->getFilter());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 3), qr->getFilter());
ASSERT_FALSE(qr->getLimit());
}
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index 610dbad4cf7..e6480598f80 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -263,7 +263,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
_net->scheduleResponse(
@@ -316,7 +316,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
@@ -376,7 +376,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == HostAndPort("h4", 1)) {
@@ -442,8 +442,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == incompatibleHost) {
@@ -519,7 +519,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
@@ -585,7 +585,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
@@ -646,7 +646,7 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToNodeWithData) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
ReplSetHeartbeatResponse hbResp;
@@ -700,7 +700,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == HostAndPort("h1", 1)) {
@@ -755,7 +755,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == HostAndPort("h2", 1)) {
@@ -821,7 +821,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h5", 1)) {
@@ -882,7 +882,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == HostAndPort("h5", 1)) {
@@ -943,7 +943,7 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(hbRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(hbRequest, request.cmdObj);
ASSERT(seenHosts.insert(request.target).second) << "Already saw "
<< request.target.toString();
if (request.target == HostAndPort("h1", 1) || request.target == HostAndPort("h2", 1)) {
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index acab069829d..a92ebf3e674 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -444,10 +444,10 @@ TEST_F(CollectionClonerTest, BeginCollection) {
ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
ASSERT_EQUALS(nss.ns(), collNss.ns());
- ASSERT_EQUALS(options.toBSON(), collOptions.toBSON());
+ ASSERT_BSONOBJ_EQ(options.toBSON(), collOptions.toBSON());
ASSERT_EQUALS(specs.size(), collIndexSpecs.size());
for (std::vector<BSONObj>::size_type i = 0; i < specs.size(); ++i) {
- ASSERT_EQUALS(specs[i], collIndexSpecs[i]);
+ ASSERT_BSONOBJ_EQ(specs[i], collIndexSpecs[i]);
}
// Cloner is still active because it has to read the documents from the source collection.
diff --git a/src/mongo/db/repl/data_replicator.cpp b/src/mongo/db/repl/data_replicator.cpp
index 844f2ab8c82..3a101edf49c 100644
--- a/src/mongo/db/repl/data_replicator.cpp
+++ b/src/mongo/db/repl/data_replicator.cpp
@@ -36,6 +36,7 @@
#include "mongo/base/counter.h"
#include "mongo/base/status.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/client/fetcher.h"
#include "mongo/client/remote_command_retry_scheduler.h"
#include "mongo/db/commands/server_status_metric.h"
@@ -1070,7 +1071,7 @@ StatusWith<Operations> DataReplicator::_getNextApplierBatch_inlock() {
// Apply commands one-at-a-time.
ops.push_back(std::move(entry));
invariant(_oplogBuffer->tryPop(txn.get(), &op));
- dassert(ops.back().raw == op);
+ dassert(SimpleBSONObjComparator::kInstance.evaluate(ops.back().raw == op));
}
// Otherwise, apply what we have so far and come back for the command.
@@ -1113,7 +1114,7 @@ StatusWith<Operations> DataReplicator::_getNextApplierBatch_inlock() {
ops.push_back(std::move(entry));
totalBytes += ops.back().raw.objsize();
invariant(_oplogBuffer->tryPop(txn.get(), &op));
- dassert(ops.back().raw == op);
+ dassert(SimpleBSONObjComparator::kInstance.evaluate(ops.back().raw == op));
}
return std::move(ops);
}
diff --git a/src/mongo/db/repl/data_replicator_test.cpp b/src/mongo/db/repl/data_replicator_test.cpp
index 38cadfa4620..7d4145efe05 100644
--- a/src/mongo/db/repl/data_replicator_test.cpp
+++ b/src/mongo/db/repl/data_replicator_test.cpp
@@ -1309,10 +1309,10 @@ TEST_F(InitialSyncTest, InitialSyncStateIsResetAfterFailure) {
ASSERT_EQUALS(progress.getIntField("failedInitialSyncAttempts"), 0);
ASSERT_EQUALS(progress.getIntField("maxFailedInitialSyncAttempts"), 2);
ASSERT_EQUALS(progress["initialSyncStart"].type(), Date);
- ASSERT_EQUALS(progress.getObjectField("initialSyncAttempts"), BSONObj());
+ ASSERT_BSONOBJ_EQ(progress.getObjectField("initialSyncAttempts"), BSONObj());
ASSERT_EQUALS(progress.getIntField("fetchedMissingDocs"), 0);
ASSERT_EQUALS(progress.getIntField("appliedOps"), 0);
- ASSERT_EQUALS(progress.getObjectField("databases"), BSON("databasesCloned" << 0));
+ ASSERT_BSONOBJ_EQ(progress.getObjectField("databases"), BSON("databasesCloned" << 0));
// Play rest of the failed round of responses.
setResponses({failedResponses.begin() + 1, failedResponses.end()});
@@ -1331,7 +1331,7 @@ TEST_F(InitialSyncTest, InitialSyncStateIsResetAfterFailure) {
ASSERT_EQUALS(progress["initialSyncStart"].type(), Date);
ASSERT_EQUALS(progress.getIntField("fetchedMissingDocs"), 0);
ASSERT_EQUALS(progress.getIntField("appliedOps"), 0);
- ASSERT_EQUALS(progress.getObjectField("databases"), BSON("databasesCloned" << 0));
+ ASSERT_BSONOBJ_EQ(progress.getObjectField("databases"), BSON("databasesCloned" << 0));
BSONObj attempts = progress["initialSyncAttempts"].Obj();
ASSERT_EQUALS(attempts.nFields(), 1);
@@ -1354,13 +1354,14 @@ TEST_F(InitialSyncTest, InitialSyncStateIsResetAfterFailure) {
ASSERT_EQUALS(progress["initialSyncStart"].type(), Date);
ASSERT_EQUALS(progress.getIntField("fetchedMissingDocs"), 0);
ASSERT_EQUALS(progress.getIntField("appliedOps"), 4);
- ASSERT_EQUALS(progress.getObjectField("databases"),
- fromjson(str::stream() << "{databasesCloned: 1, a: {collections: 1, "
- "clonedCollections: 1, start: new Date(1406851200000), "
- "end: new Date(1406851200000), elapsedMillis: 0, "
- "'a.a': {documents: 1, indexes: 1, fetchedBatches: 1, "
- "start: new Date(1406851200000), end: new "
- "Date(1406851200000), elapsedMillis: 0}}}"));
+ ASSERT_BSONOBJ_EQ(progress.getObjectField("databases"),
+ fromjson(str::stream()
+ << "{databasesCloned: 1, a: {collections: 1, "
+ "clonedCollections: 1, start: new Date(1406851200000), "
+ "end: new Date(1406851200000), elapsedMillis: 0, "
+ "'a.a': {documents: 1, indexes: 1, fetchedBatches: 1, "
+ "start: new Date(1406851200000), end: new "
+ "Date(1406851200000), elapsedMillis: 0}}}"));
attempts = progress["initialSyncAttempts"].Obj();
ASSERT_EQUALS(attempts.nFields(), 1);
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index 27f345e5443..73cda0fc674 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -201,7 +201,7 @@ TEST_F(DatabaseClonerTest, FirstRemoteCommandWithoutFilter) {
ASSERT_TRUE(noiRequest.cmdObj.hasField("filter"));
BSONElement filterElement = noiRequest.cmdObj.getField("filter");
ASSERT_TRUE(filterElement.isABSONObj());
- ASSERT_EQUALS(ListCollectionsFilter::makeTypeCollectionFilter(), filterElement.Obj());
+ ASSERT_BSONOBJ_EQ(ListCollectionsFilter::makeTypeCollectionFilter(), filterElement.Obj());
ASSERT_FALSE(net->hasReadyRequests());
ASSERT_TRUE(_databaseCloner->isActive());
}
@@ -234,8 +234,8 @@ TEST_F(DatabaseClonerTest, FirstRemoteCommandWithFilter) {
ASSERT_EQUALS(1, noiRequest.cmdObj.firstElement().numberInt());
BSONElement filterElement = noiRequest.cmdObj.getField("filter");
ASSERT_TRUE(filterElement.isABSONObj());
- ASSERT_EQUALS(ListCollectionsFilter::addTypeCollectionFilter(listCollectionsFilter),
- filterElement.Obj());
+ ASSERT_BSONOBJ_EQ(ListCollectionsFilter::addTypeCollectionFilter(listCollectionsFilter),
+ filterElement.Obj());
ASSERT_FALSE(net->hasReadyRequests());
ASSERT_TRUE(_databaseCloner->isActive());
}
@@ -320,8 +320,8 @@ TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
const std::vector<BSONObj>& collectionInfos = _databaseCloner->getCollectionInfos_forTest();
ASSERT_EQUALS(2U, collectionInfos.size());
- ASSERT_EQUALS(sourceInfos[0], collectionInfos[0]);
- ASSERT_EQUALS(sourceInfos[2], collectionInfos[1]);
+ ASSERT_BSONOBJ_EQ(sourceInfos[0], collectionInfos[0]);
+ ASSERT_BSONOBJ_EQ(sourceInfos[2], collectionInfos[1]);
}
TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
@@ -346,7 +346,7 @@ TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
{
const std::vector<BSONObj>& collectionInfos = _databaseCloner->getCollectionInfos_forTest();
ASSERT_EQUALS(1U, collectionInfos.size());
- ASSERT_EQUALS(sourceInfos[0], collectionInfos[0]);
+ ASSERT_BSONOBJ_EQ(sourceInfos[0], collectionInfos[0]);
}
{
@@ -361,8 +361,8 @@ TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
{
const std::vector<BSONObj>& collectionInfos = _databaseCloner->getCollectionInfos_forTest();
ASSERT_EQUALS(2U, collectionInfos.size());
- ASSERT_EQUALS(sourceInfos[0], collectionInfos[0]);
- ASSERT_EQUALS(sourceInfos[1], collectionInfos[1]);
+ ASSERT_BSONOBJ_EQ(sourceInfos[0], collectionInfos[0]);
+ ASSERT_BSONOBJ_EQ(sourceInfos[1], collectionInfos[1]);
}
}
diff --git a/src/mongo/db/repl/elect_cmd_runner_test.cpp b/src/mongo/db/repl/elect_cmd_runner_test.cpp
index 593b1f10fc7..680689e713d 100644
--- a/src/mongo/db/repl/elect_cmd_runner_test.cpp
+++ b/src/mongo/db/repl/elect_cmd_runner_test.cpp
@@ -199,7 +199,7 @@ TEST_F(ElectCmdRunnerTest, TwoNodes) {
_net->enterNetwork();
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(stripRound(electRequest), stripRound(noi->getRequest().cmdObj));
+ ASSERT_BSONOBJ_EQ(stripRound(electRequest), stripRound(noi->getRequest().cmdObj));
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(noi,
startDate + Milliseconds(10),
diff --git a/src/mongo/db/repl/freshness_checker_test.cpp b/src/mongo/db/repl/freshness_checker_test.cpp
index 2aef6f626df..c9e695f04dd 100644
--- a/src/mongo/db/repl/freshness_checker_test.cpp
+++ b/src/mongo/db/repl/freshness_checker_test.cpp
@@ -181,7 +181,7 @@ TEST_F(FreshnessCheckerTest, TwoNodes) {
for (size_t i = 0; i < hosts.size(); ++i) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
@@ -252,7 +252,7 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshest) {
for (size_t i = 0; i < hosts.size(); ++i) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
@@ -304,7 +304,7 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTime) {
for (size_t i = 0; i < hosts.size(); ++i) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
@@ -354,7 +354,7 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponse) {
for (size_t i = 0; i < hosts.size(); ++i) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
@@ -407,7 +407,7 @@ TEST_F(FreshnessCheckerTest, ElectVetoed) {
for (size_t i = 0; i < hosts.size(); ++i) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
_net->scheduleResponse(
noi,
@@ -482,7 +482,7 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestManyNodes) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const HostAndPort target = noi->getRequest().target;
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT(seen.insert(target).second) << "Already saw " << target;
BSONObjBuilder responseBuilder;
responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
@@ -545,7 +545,7 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTimeManyNodes
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const HostAndPort target = noi->getRequest().target;
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT(seen.insert(target).second) << "Already saw " << target;
BSONObjBuilder responseBuilder;
if (target.host() == "h4") {
@@ -614,7 +614,7 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponseManyNodes) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const HostAndPort target = noi->getRequest().target;
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT(seen.insert(target).second) << "Already saw " << target;
BSONObjBuilder responseBuilder;
responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
@@ -676,7 +676,7 @@ TEST_F(FreshnessCheckerTest, ElectVetoedManyNodes) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const HostAndPort target = noi->getRequest().target;
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT(seen.insert(target).second) << "Already saw " << target;
BSONObjBuilder responseBuilder;
responseBuilder << "ok" << 1 << "id" << findIdForMember(config, target) << "set"
@@ -742,7 +742,7 @@ TEST_F(FreshnessCheckerTest, ElectVetoedAndTiedFreshnessManyNodes) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const HostAndPort target = noi->getRequest().target;
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT(seen.insert(target).second) << "Already saw " << target;
BSONObjBuilder responseBuilder;
if (target.host() == "h4") {
@@ -817,7 +817,7 @@ TEST_F(FreshnessCheckerTest, ElectManyNodesNotAllRespond) {
const NetworkInterfaceMock::NetworkOperationIterator noi = _net->getNextReadyRequest();
const HostAndPort target = noi->getRequest().target;
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(freshRequest, noi->getRequest().cmdObj);
ASSERT(seen.insert(target).second) << "Already saw " << target;
if (target.host() == "h2" || target.host() == "h3") {
_net->scheduleResponse(noi,
diff --git a/src/mongo/db/repl/multiapplier_test.cpp b/src/mongo/db/repl/multiapplier_test.cpp
index e598da3d71e..4ff21c99ae9 100644
--- a/src/mongo/db/repl/multiapplier_test.cpp
+++ b/src/mongo/db/repl/multiapplier_test.cpp
@@ -171,7 +171,7 @@ TEST_F(MultiApplierTest, MultiApplierInvokesCallbackWithCallbackCanceledStatusUp
ASSERT_EQUALS(ErrorCodes::CallbackCanceled, callbackResult);
ASSERT_EQUALS(1U, operationsApplied.size());
- ASSERT_EQUALS(operations[0].raw, operationsApplied[0].raw);
+ ASSERT_BSONOBJ_EQ(operations[0].raw, operationsApplied[0].raw);
}
TEST_F(MultiApplierTest, MultiApplierPassesMultiApplyErrorToCallback) {
@@ -208,7 +208,7 @@ TEST_F(MultiApplierTest, MultiApplierPassesMultiApplyErrorToCallback) {
ASSERT_EQUALS(multiApplyError, callbackResult);
ASSERT_EQUALS(1U, operationsApplied.size());
- ASSERT_EQUALS(operations[0].raw, operationsApplied[0].raw);
+ ASSERT_BSONOBJ_EQ(operations[0].raw, operationsApplied[0].raw);
}
TEST_F(MultiApplierTest, MultiApplierCatchesMultiApplyExceptionAndConvertsToCallbackStatus) {
@@ -246,7 +246,7 @@ TEST_F(MultiApplierTest, MultiApplierCatchesMultiApplyExceptionAndConvertsToCall
ASSERT_EQUALS(multiApplyError, callbackResult);
ASSERT_EQUALS(1U, operationsApplied.size());
- ASSERT_EQUALS(operations[0].raw, operationsApplied[0].raw);
+ ASSERT_BSONOBJ_EQ(operations[0].raw, operationsApplied[0].raw);
}
TEST_F(
@@ -286,12 +286,12 @@ TEST_F(
ASSERT_TRUE(multiApplyTxn);
ASSERT_EQUALS(1U, operationsToApply.size());
- ASSERT_EQUALS(operations[0].raw, operationsToApply[0].raw);
+ ASSERT_BSONOBJ_EQ(operations[0].raw, operationsToApply[0].raw);
ASSERT_OK(callbackResult);
ASSERT_EQUALS(operations.back().getOpTime().getTimestamp(), callbackResult.getValue());
ASSERT_EQUALS(1U, operationsApplied.size());
- ASSERT_EQUALS(operations[0].raw, operationsApplied[0].raw);
+ ASSERT_BSONOBJ_EQ(operations[0].raw, operationsApplied[0].raw);
ASSERT_FALSE(callbackTxn);
}
diff --git a/src/mongo/db/repl/oplog_buffer_collection_test.cpp b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
index 053f1084e7f..edd1a2fbcea 100644
--- a/src/mongo/db/repl/oplog_buffer_collection_test.cpp
+++ b/src/mongo/db/repl/oplog_buffer_collection_test.cpp
@@ -175,7 +175,7 @@ TEST_F(OplogBufferCollectionTest, extractEmbeddedOplogDocumentChangesIdToTimesta
const BSONObj expectedOp = makeOplogEntry(1);
BSONObj originalOp = BSON("_id" << Timestamp(1, 1) << "entry" << expectedOp);
- ASSERT_EQUALS(expectedOp, OplogBufferCollection::extractEmbeddedOplogDocument(originalOp));
+ ASSERT_BSONOBJ_EQ(expectedOp, OplogBufferCollection::extractEmbeddedOplogDocument(originalOp));
}
TEST_F(OplogBufferCollectionTest, addIdToDocumentChangesTimestampToId) {
@@ -185,7 +185,7 @@ TEST_F(OplogBufferCollectionTest, addIdToDocumentChangesTimestampToId) {
const BSONObj originalOp = makeOplogEntry(1);
BSONObj expectedOp = BSON("_id" << Timestamp(1, 1) << "entry" << originalOp);
auto testOpPair = OplogBufferCollection::addIdToDocument(originalOp);
- ASSERT_EQUALS(expectedOp, testOpPair.first);
+ ASSERT_BSONOBJ_EQ(expectedOp, testOpPair.first);
ASSERT_EQUALS(Timestamp(1, 1), testOpPair.second);
}
@@ -202,9 +202,9 @@ TEST_F(OplogBufferCollectionTest, PushOneDocumentWithPushAllNonBlockingAddsDocum
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog[0],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[0],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
}
@@ -222,9 +222,9 @@ TEST_F(OplogBufferCollectionTest, PushOneDocumentWithPushAddsDocument) {
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog,
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog,
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
}
@@ -245,9 +245,9 @@ TEST_F(OplogBufferCollectionTest, PushOneDocumentWithPushEvenIfFullAddsDocument)
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog,
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog,
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
}
@@ -264,15 +264,15 @@ TEST_F(OplogBufferCollectionTest, PeekDoesNotRemoveDocument) {
BSONObj doc;
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog);
+ ASSERT_BSONOBJ_EQ(doc, oplog);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog,
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog,
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
}
@@ -308,7 +308,7 @@ TEST_F(OplogBufferCollectionTest, PopRemovesDocument) {
BSONObj doc;
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog);
+ ASSERT_BSONOBJ_EQ(doc, oplog);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
{
@@ -352,41 +352,41 @@ TEST_F(OplogBufferCollectionTest, PopAndPeekReturnDocumentsInOrder) {
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog[2],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
- ASSERT_EQUALS(oplog[1],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
- ASSERT_EQUALS(oplog[0],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[2],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[1],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[0],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
BSONObj doc;
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[1]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[1]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[0]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[0]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[2]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[2]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[2]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[2]);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
}
@@ -403,7 +403,7 @@ TEST_F(OplogBufferCollectionTest, LastObjectPushedReturnsNewestOplogEntry) {
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
auto doc = oplogBuffer.lastObjectPushed(_txn.get());
- ASSERT_EQUALS(*doc, oplog[1]);
+ ASSERT_BSONOBJ_EQ(*doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
}
@@ -477,7 +477,7 @@ TEST_F(OplogBufferCollectionTest, BlockingPopBlocksAndRemovesDocument) {
oplogBuffer.push(_txn.get(), oplog);
poppingThread.join();
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
- ASSERT_EQUALS(doc, oplog);
+ ASSERT_BSONOBJ_EQ(doc, oplog);
ASSERT_EQUALS(count, 0UL);
}
@@ -511,9 +511,11 @@ TEST_F(OplogBufferCollectionTest, TwoBlockingPopsBlockAndRemoveDocuments) {
poppingThread1.join();
poppingThread2.join();
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_NOT_EQUALS(doc1, doc2);
- ASSERT_TRUE(doc1 == oplog[0] || doc1 == oplog[1]);
- ASSERT_TRUE(doc2 == oplog[0] || doc2 == oplog[1]);
+ ASSERT_BSONOBJ_NE(doc1, doc2);
+ ASSERT(SimpleBSONObjComparator::kInstance.evaluate(doc1 == oplog[0]) ||
+ SimpleBSONObjComparator::kInstance.evaluate(doc1 == oplog[1]));
+ ASSERT(SimpleBSONObjComparator::kInstance.evaluate(doc2 == oplog[0]) ||
+ SimpleBSONObjComparator::kInstance.evaluate(doc2 == oplog[1]));
}
TEST_F(OplogBufferCollectionTest, BlockingPeekBlocksAndFindsDocument) {
@@ -540,7 +542,7 @@ TEST_F(OplogBufferCollectionTest, BlockingPeekBlocksAndFindsDocument) {
peekingThread.join();
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(success);
- ASSERT_EQUALS(doc, oplog);
+ ASSERT_BSONOBJ_EQ(doc, oplog);
ASSERT_EQUALS(count, 1UL);
}
@@ -580,10 +582,10 @@ TEST_F(OplogBufferCollectionTest, TwoBlockingPeeksBlockAndFindSameDocument) {
peekingThread2.join();
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(success1);
- ASSERT_EQUALS(doc1, oplog);
+ ASSERT_BSONOBJ_EQ(doc1, oplog);
ASSERT_EQUALS(count1, 1UL);
ASSERT_TRUE(success2);
- ASSERT_EQUALS(doc2, oplog);
+ ASSERT_BSONOBJ_EQ(doc2, oplog);
ASSERT_EQUALS(count2, 1UL);
}
@@ -640,12 +642,12 @@ TEST_F(OplogBufferCollectionTest, PushPushesonSentinelsProperly) {
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog[4],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
- ASSERT_EQUALS(oplog[1],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[4],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[1],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
}
@@ -680,12 +682,12 @@ TEST_F(OplogBufferCollectionTest, PushEvenIfFullPushesOnSentinelsProperly) {
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog[4],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
- ASSERT_EQUALS(oplog[1],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[4],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[1],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
}
@@ -722,15 +724,15 @@ TEST_F(OplogBufferCollectionTest, SentinelInMiddleIsReturnedInOrder) {
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog[3],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
- ASSERT_EQUALS(oplog[1],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
- ASSERT_EQUALS(oplog[0],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[3],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[1],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[0],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -740,19 +742,19 @@ TEST_F(OplogBufferCollectionTest, SentinelInMiddleIsReturnedInOrder) {
BSONObj doc;
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[0]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 4UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[0]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[1]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 3UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[1]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
@@ -767,11 +769,11 @@ TEST_F(OplogBufferCollectionTest, SentinelInMiddleIsReturnedInOrder) {
ASSERT_EQUALS(sentinels.size(), 0UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[3]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[3]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[3]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[3]);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
}
@@ -789,9 +791,9 @@ TEST_F(OplogBufferCollectionTest, SentinelAtBeginningIsReturnedAtBeginning) {
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog[1],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[1],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -809,11 +811,11 @@ TEST_F(OplogBufferCollectionTest, SentinelAtBeginningIsReturnedAtBeginning) {
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[1]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[1]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 0UL);
}
@@ -831,9 +833,9 @@ TEST_F(OplogBufferCollectionTest, SentinelAtEndIsReturnedAtEnd) {
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog[0],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[0],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -843,11 +845,11 @@ TEST_F(OplogBufferCollectionTest, SentinelAtEndIsReturnedAtEnd) {
BSONObj doc;
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[0]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[0]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[0]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
@@ -879,12 +881,12 @@ TEST_F(OplogBufferCollectionTest, MultipleSentinelsAreReturnedInOrder) {
{
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(oplog[4],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
- ASSERT_EQUALS(oplog[1],
- OplogBufferCollection::extractEmbeddedOplogDocument(
- unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[4],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
+ ASSERT_BSONOBJ_EQ(oplog[1],
+ OplogBufferCollection::extractEmbeddedOplogDocument(
+ unittest::assertGet(iter->next()).first));
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -908,11 +910,11 @@ TEST_F(OplogBufferCollectionTest, MultipleSentinelsAreReturnedInOrder) {
ASSERT_EQUALS(oplogBuffer.getCount(), 5UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[1]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 5UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[1]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[1]);
ASSERT_EQUALS(oplogBuffer.getCount(), 4UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
@@ -932,11 +934,11 @@ TEST_F(OplogBufferCollectionTest, MultipleSentinelsAreReturnedInOrder) {
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[4]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[4]);
ASSERT_EQUALS(oplogBuffer.getCount(), 2UL);
ASSERT_TRUE(oplogBuffer.tryPop(_txn.get(), &doc));
- ASSERT_EQUALS(doc, oplog[4]);
+ ASSERT_BSONOBJ_EQ(doc, oplog[4]);
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
ASSERT_TRUE(oplogBuffer.peek(_txn.get(), &doc));
@@ -1011,9 +1013,9 @@ TEST_F(OplogBufferCollectionTest, TwoBlockingPopsBlockAndRemoveDocumentsWithSent
ASSERT_TRUE(sentinels.empty());
ASSERT_EQUALS(oplogBuffer.getCount(), 1UL);
- ASSERT_NOT_EQUALS(doc1, doc2);
- ASSERT_TRUE(doc1 == oplog[0] || doc1.isEmpty());
- ASSERT_TRUE(doc2 == oplog[0] || doc2.isEmpty());
+ ASSERT_BSONOBJ_NE(doc1, doc2);
+ ASSERT(SimpleBSONObjComparator::kInstance.evaluate(doc1 == oplog[0]) || doc1.isEmpty());
+ ASSERT(SimpleBSONObjComparator::kInstance.evaluate(doc2 == oplog[0]) || doc2.isEmpty());
}
TEST_F(OplogBufferCollectionTest, BlockingPeekBlocksAndFindsSentinel) {
diff --git a/src/mongo/db/repl/oplog_entry.h b/src/mongo/db/repl/oplog_entry.h
index 52e1dd15e06..f7c59d32c69 100644
--- a/src/mongo/db/repl/oplog_entry.h
+++ b/src/mongo/db/repl/oplog_entry.h
@@ -29,7 +29,7 @@
#pragma once
#include "mongo/bson/bsonobj.h"
-
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/repl/optime.h"
namespace mongo {
@@ -77,7 +77,7 @@ struct OplogEntry {
std::ostream& operator<<(std::ostream& s, const OplogEntry& o);
inline bool operator==(const OplogEntry& lhs, const OplogEntry& rhs) {
- return lhs.raw == rhs.raw;
+ return SimpleBSONObjComparator::kInstance.evaluate(lhs.raw == rhs.raw);
}
} // namespace repl
diff --git a/src/mongo/db/repl/oplog_fetcher_test.cpp b/src/mongo/db/repl/oplog_fetcher_test.cpp
index f24aa66a58d..7c554005fa8 100644
--- a/src/mongo/db/repl/oplog_fetcher_test.cpp
+++ b/src/mongo/db/repl/oplog_fetcher_test.cpp
@@ -208,8 +208,8 @@ std::unique_ptr<ShutdownState> OplogFetcherTest::processSingleBatch(
auto request = processNetworkResponse(response);
- ASSERT_EQUALS(oplogFetcher.getCommandObject_forTest(), request.cmdObj);
- ASSERT_EQUALS(oplogFetcher.getMetadataObject_forTest(), request.metadata);
+ ASSERT_BSONOBJ_EQ(oplogFetcher.getCommandObject_forTest(), request.cmdObj);
+ ASSERT_BSONOBJ_EQ(oplogFetcher.getMetadataObject_forTest(), request.metadata);
oplogFetcher.shutdown();
oplogFetcher.join();
@@ -296,8 +296,8 @@ TEST_F(
[](Status, OpTimeWithHash) {})
.getCommandObject_forTest();
ASSERT_EQUALS(mongo::BSONType::Object, cmdObj["filter"].type());
- ASSERT_EQUALS(BSON("ts" << BSON("$gte" << lastFetched.opTime.getTimestamp())),
- cmdObj["filter"].Obj());
+ ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.opTime.getTimestamp())),
+ cmdObj["filter"].Obj());
ASSERT_EQUALS(dataReplicatorExternalState->currentTerm, cmdObj["term"].numberLong());
_checkDefaultCommandObjectFields(cmdObj);
}
@@ -316,8 +316,8 @@ TEST_F(
[](Status, OpTimeWithHash) {})
.getCommandObject_forTest();
ASSERT_EQUALS(mongo::BSONType::Object, cmdObj["filter"].type());
- ASSERT_EQUALS(BSON("ts" << BSON("$gte" << lastFetched.opTime.getTimestamp())),
- cmdObj["filter"].Obj());
+ ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.opTime.getTimestamp())),
+ cmdObj["filter"].Obj());
ASSERT_FALSE(cmdObj.hasField("term"));
_checkDefaultCommandObjectFields(cmdObj);
}
@@ -346,9 +346,9 @@ TEST_F(OplogFetcherTest, MetadataObjectIsEmptyUnderProtocolVersion0) {
enqueueDocumentsFn,
[](Status, OpTimeWithHash) {})
.getMetadataObject_forTest();
- ASSERT_EQUALS(BSON(rpc::ServerSelectionMetadata::fieldName()
- << BSON(rpc::ServerSelectionMetadata::kSecondaryOkFieldName << 1)),
- metadataObj);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::ServerSelectionMetadata::fieldName()
+ << BSON(rpc::ServerSelectionMetadata::kSecondaryOkFieldName << 1)),
+ metadataObj);
}
TEST_F(OplogFetcherTest, RemoteCommandTimeoutShouldEqualElectionTimeout) {
@@ -556,8 +556,8 @@ TEST_F(OplogFetcherTest, OplogFetcherShouldExcludeFirstDocumentInFirstBatchWhenE
{makeCursorResponse(0, documents), rpc::makeEmptyMetadata(), Milliseconds(0)});
ASSERT_EQUALS(2U, lastEnqueuedDocuments.size());
- ASSERT_EQUALS(secondEntry, lastEnqueuedDocuments[0]);
- ASSERT_EQUALS(thirdEntry, lastEnqueuedDocuments[1]);
+ ASSERT_BSONOBJ_EQ(secondEntry, lastEnqueuedDocuments[0]);
+ ASSERT_BSONOBJ_EQ(thirdEntry, lastEnqueuedDocuments[1]);
ASSERT_EQUALS(3U, lastEnqueuedDocumentsInfo.networkDocumentCount);
ASSERT_EQUALS(size_t(firstEntry.objsize() + secondEntry.objsize() + thirdEntry.objsize()),
@@ -677,7 +677,7 @@ RemoteCommandRequest OplogFetcherTest::testTwoBatchHandling(bool isV1ElectionPro
processNetworkResponse(makeCursorResponse(cursorId, {firstEntry, secondEntry}), true);
ASSERT_EQUALS(1U, lastEnqueuedDocuments.size());
- ASSERT_EQUALS(secondEntry, lastEnqueuedDocuments[0]);
+ ASSERT_BSONOBJ_EQ(secondEntry, lastEnqueuedDocuments[0]);
// Set cursor ID to 0 in getMore response to indicate no more data available.
auto thirdEntry = makeNoopOplogEntry({{Seconds(789), 0}, lastFetched.opTime.getTerm()}, 300);
@@ -690,8 +690,8 @@ RemoteCommandRequest OplogFetcherTest::testTwoBatchHandling(bool isV1ElectionPro
request.cmdObj.getIntField("maxTimeMS"));
ASSERT_EQUALS(2U, lastEnqueuedDocuments.size());
- ASSERT_EQUALS(thirdEntry, lastEnqueuedDocuments[0]);
- ASSERT_EQUALS(fourthEntry, lastEnqueuedDocuments[1]);
+ ASSERT_BSONOBJ_EQ(thirdEntry, lastEnqueuedDocuments[0]);
+ ASSERT_BSONOBJ_EQ(fourthEntry, lastEnqueuedDocuments[1]);
oplogFetcher.shutdown();
oplogFetcher.join();
diff --git a/src/mongo/db/repl/read_concern_args_test.cpp b/src/mongo/db/repl/read_concern_args_test.cpp
index 75843f5a945..f915209e585 100644
--- a/src/mongo/db/repl/read_concern_args_test.cpp
+++ b/src/mongo/db/repl/read_concern_args_test.cpp
@@ -190,7 +190,7 @@ TEST(ReadAfterSerialize, Empty) {
BSONObj obj(builder.done());
- ASSERT_EQ(BSON(ReadConcernArgs::kReadConcernFieldName << BSONObj()), obj);
+ ASSERT_BSONOBJ_EQ(BSON(ReadConcernArgs::kReadConcernFieldName << BSONObj()), obj);
}
TEST(ReadAfterSerialize, ReadAfterOnly) {
@@ -203,7 +203,7 @@ TEST(ReadAfterSerialize, ReadAfterOnly) {
ReadConcernArgs::kAfterOpTimeFieldName << BSON(
OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
- ASSERT_EQ(expectedObj, builder.done());
+ ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
TEST(ReadAfterSerialize, CommitLevelOnly) {
@@ -214,7 +214,7 @@ TEST(ReadAfterSerialize, CommitLevelOnly) {
BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "local")));
- ASSERT_EQ(expectedObj, builder.done());
+ ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
TEST(ReadAfterSerialize, FullSpecification) {
@@ -231,7 +231,7 @@ TEST(ReadAfterSerialize, FullSpecification) {
<< BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
<< 2))));
- ASSERT_EQ(expectedObj, builder.done());
+ ASSERT_BSONOBJ_EQ(expectedObj, builder.done());
}
} // unnamed namespace
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index aa61d91d10a..bd3267cb172 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -349,7 +349,7 @@ TEST_F(ReplCoordTest, NodeReturnsNodeNotFoundWhenQuorumCheckFailsWhileInitiating
const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
ASSERT_EQUALS(HostAndPort("node2", 54321), noi->getRequest().target);
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(hbArgs.toBSON(), noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(hbArgs.toBSON(), noi->getRequest().cmdObj);
getNet()->scheduleResponse(
noi, startDate + Milliseconds(10), ResponseStatus(ErrorCodes::NoSuchKey, "No response"));
getNet()->runUntil(startDate + Milliseconds(10));
@@ -380,7 +380,7 @@ TEST_F(ReplCoordTest, InitiateSucceedsWhenQuorumCheckPasses) {
const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
ASSERT_EQUALS(HostAndPort("node2", 54321), noi->getRequest().target);
ASSERT_EQUALS("admin", noi->getRequest().dbname);
- ASSERT_EQUALS(hbArgs.toBSON(), noi->getRequest().cmdObj);
+ ASSERT_BSONOBJ_EQ(hbArgs.toBSON(), noi->getRequest().cmdObj);
ReplSetHeartbeatResponse hbResp;
hbResp.setConfigVersion(0);
getNet()->scheduleResponse(
diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp
index 42ee28c6aa5..483901d0fb5 100644
--- a/src/mongo/db/repl/reporter_test.cpp
+++ b/src/mongo/db/repl/reporter_test.cpp
@@ -397,7 +397,7 @@ TEST_F(
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::BadValue) << "errmsg"
<< "Unexpected field durableOpTime in UpdateInfoArgs"),
true);
- ASSERT_EQUALS(expectedNewStyleCommandRequest, commandRequest);
+ ASSERT_BSONOBJ_EQ(expectedNewStyleCommandRequest, commandRequest);
// Update command object should match old style (pre-3.2.4).
auto expectedOldStyleCommandRequest = unittest::assertGet(prepareReplSetUpdatePositionCommandFn(
@@ -408,7 +408,7 @@ TEST_F(
<< "newer config"
<< "configVersion"
<< posUpdater->getConfigVersion() + 1));
- ASSERT_EQUALS(expectedOldStyleCommandRequest, commandRequest);
+ ASSERT_BSONOBJ_EQ(expectedOldStyleCommandRequest, commandRequest);
ASSERT_TRUE(reporter->isActive());
}
@@ -543,7 +543,7 @@ TEST_F(ReporterTest,
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::BadValue) << "errmsg"
<< "Unexpected field durableOpTime in UpdateInfoArgs"),
true);
- ASSERT_EQUALS(expectedNewStyleCommandRequest, commandRequest);
+ ASSERT_BSONOBJ_EQ(expectedNewStyleCommandRequest, commandRequest);
auto expectedOldStyleCommandRequest = unittest::assertGet(prepareReplSetUpdatePositionCommandFn(
ReplicationCoordinator::ReplSetUpdatePositionCommandStyle::kOldStyle));
@@ -551,8 +551,8 @@ TEST_F(ReporterTest,
commandRequest = processNetworkResponse(BSON("ok" << 1));
// Update command object should match old style (pre-3.2.2).
- ASSERT_NOT_EQUALS(expectedNewStyleCommandRequest, expectedOldStyleCommandRequest);
- ASSERT_EQUALS(expectedOldStyleCommandRequest, commandRequest);
+ ASSERT_BSONOBJ_NE(expectedNewStyleCommandRequest, expectedOldStyleCommandRequest);
+ ASSERT_BSONOBJ_EQ(expectedOldStyleCommandRequest, commandRequest);
reporter->shutdown();
diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp
index 87e888a62d0..03abb00dfb2 100644
--- a/src/mongo/db/repl/roll_back_local_operations_test.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp
@@ -100,7 +100,7 @@ TEST(RollBackLocalOperationsTest, RollbackMultipleLocalOperations) {
OplogInterfaceMock localOplog(localOperations);
auto i = localOperations.cbegin();
auto rollbackOperation = [&](const BSONObj& operation) {
- ASSERT_EQUALS(i->first, operation);
+ ASSERT_BSONOBJ_EQ(i->first, operation);
i++;
return Status::OK();
};
@@ -110,7 +110,7 @@ TEST(RollBackLocalOperationsTest, RollbackMultipleLocalOperations) {
ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
ASSERT_EQUALS(commonOperation.second, result.getValue().second);
ASSERT_FALSE(i == localOperations.cend());
- ASSERT_EQUALS(commonOperation.first, i->first);
+ ASSERT_BSONOBJ_EQ(commonOperation.first, i->first);
i++;
ASSERT_TRUE(i == localOperations.cend());
}
@@ -148,7 +148,7 @@ TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
OplogInterfaceMock localOplog(localOperations);
auto i = localOperations.cbegin();
auto rollbackOperation = [&](const BSONObj& operation) {
- ASSERT_EQUALS(i->first, operation);
+ ASSERT_BSONOBJ_EQ(i->first, operation);
i++;
return Status::OK();
};
@@ -168,7 +168,7 @@ TEST(RollBackLocalOperationsTest, SkipRemoteOperations) {
ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
ASSERT_EQUALS(commonOperation.second, result.getValue().second);
ASSERT_FALSE(i == localOperations.cend());
- ASSERT_EQUALS(commonOperation.first, i->first);
+ ASSERT_BSONOBJ_EQ(commonOperation.first, i->first);
i++;
ASSERT_TRUE(i == localOperations.cend());
}
@@ -181,7 +181,7 @@ TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashess) {
OplogInterfaceMock localOplog(localOperations);
auto i = localOperations.cbegin();
auto rollbackOperation = [&](const BSONObj& operation) {
- ASSERT_EQUALS(i->first, operation);
+ ASSERT_BSONOBJ_EQ(i->first, operation);
i++;
return Status::OK();
};
@@ -201,7 +201,7 @@ TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashess) {
ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
ASSERT_EQUALS(commonOperation.second, result.getValue().second);
ASSERT_FALSE(i == localOperations.cend());
- ASSERT_EQUALS(commonOperation.first, i->first);
+ ASSERT_BSONOBJ_EQ(commonOperation.first, i->first);
i++;
ASSERT_TRUE(i == localOperations.cend());
}
@@ -266,7 +266,7 @@ TEST(SyncRollBackLocalOperationsTest, RollbackTwoOperations) {
auto result = syncRollBackLocalOperations(OplogInterfaceMock(localOperations),
OplogInterfaceMock({commonOperation}),
[&](const BSONObj& operation) {
- ASSERT_EQUALS(i->first, operation);
+ ASSERT_BSONOBJ_EQ(i->first, operation);
i++;
return Status::OK();
});
@@ -274,7 +274,7 @@ TEST(SyncRollBackLocalOperationsTest, RollbackTwoOperations) {
ASSERT_EQUALS(commonOperation.first["ts"].timestamp(), result.getValue().first);
ASSERT_EQUALS(commonOperation.second, result.getValue().second);
ASSERT_FALSE(i == localOperations.cend());
- ASSERT_EQUALS(commonOperation.first, i->first);
+ ASSERT_BSONOBJ_EQ(commonOperation.first, i->first);
i++;
ASSERT_TRUE(i == localOperations.cend());
}
@@ -303,7 +303,7 @@ TEST(SyncRollBackLocalOperationsTest, SameTimestampDifferentHashes) {
syncRollBackLocalOperations(OplogInterfaceMock({localOperation, commonOperation}),
OplogInterfaceMock({remoteOperation, commonOperation}),
[&](const BSONObj& operation) {
- ASSERT_EQUALS(localOperation.first, operation);
+ ASSERT_BSONOBJ_EQ(localOperation.first, operation);
called = true;
return Status::OK();
});
@@ -322,7 +322,7 @@ TEST(SyncRollBackLocalOperationsTest, SameTimestampEndOfLocalOplog) {
syncRollBackLocalOperations(OplogInterfaceMock({localOperation}),
OplogInterfaceMock({remoteOperation, commonOperation}),
[&](const BSONObj& operation) {
- ASSERT_EQUALS(localOperation.first, operation);
+ ASSERT_BSONOBJ_EQ(localOperation.first, operation);
called = true;
return Status::OK();
});
@@ -348,13 +348,14 @@ TEST(SyncRollBackLocalOperationsTest, SameTimestampEndOfRemoteOplog) {
auto localOperation = makeOpAndRecordId(1, 2);
auto remoteOperation = makeOpAndRecordId(1, 3);
bool called = false;
- auto result = syncRollBackLocalOperations(OplogInterfaceMock({localOperation, commonOperation}),
- OplogInterfaceMock({remoteOperation}),
- [&](const BSONObj& operation) {
- ASSERT_EQUALS(localOperation.first, operation);
- called = true;
- return Status::OK();
- });
+ auto result =
+ syncRollBackLocalOperations(OplogInterfaceMock({localOperation, commonOperation}),
+ OplogInterfaceMock({remoteOperation}),
+ [&](const BSONObj& operation) {
+ ASSERT_BSONOBJ_EQ(localOperation.first, operation);
+ called = true;
+ return Status::OK();
+ });
ASSERT_EQUALS(ErrorCodes::NoMatchingDocument, result.getStatus().code());
ASSERT_STRING_CONTAINS(result.getStatus().reason(), "RS100 reached beginning of remote oplog");
ASSERT_TRUE(called);
@@ -369,7 +370,7 @@ TEST(SyncRollBackLocalOperationsTest, DifferentTimestampEndOfLocalOplog) {
syncRollBackLocalOperations(OplogInterfaceMock({localOperation}),
OplogInterfaceMock({remoteOperation, commonOperation}),
[&](const BSONObj& operation) {
- ASSERT_EQUALS(localOperation.first, operation);
+ ASSERT_BSONOBJ_EQ(localOperation.first, operation);
called = true;
return Status::OK();
});
diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp
index 9a65b0b9457..9a1f35682fa 100644
--- a/src/mongo/db/repl/storage_interface_impl_test.cpp
+++ b/src/mongo/db/repl/storage_interface_impl_test.cpp
@@ -375,8 +375,8 @@ TEST_F(StorageInterfaceImplTest, InsertDocumentsSavesOperationsReturnsOpTimeOfLa
// Check contents of oplog. OplogInterface iterates over oplog collection in reverse.
repl::OplogInterfaceLocal oplog(txn.get(), nss.ns());
auto iter = oplog.makeIterator();
- ASSERT_EQUALS(op2, unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(op1, unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(op2, unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(op1, unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -602,15 +602,16 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
ASSERT_OK(
storage.insertDocuments(txn, nss, {BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 2)}));
- ASSERT_EQUALS(BSON("_id" << 0),
- storage.findOne(txn, nss, indexName, StorageInterface::ScanDirection::kForward));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0),
+ unittest::assertGet(storage.findOne(
+ txn, nss, indexName, StorageInterface::ScanDirection::kForward)));
// Check collection contents. OplogInterface returns documents in reverse natural order.
OplogInterfaceLocal oplog(txn, nss.ns());
auto iter = oplog.makeIterator();
- ASSERT_EQUALS(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -623,15 +624,16 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
ASSERT_OK(
storage.insertDocuments(txn, nss, {BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 2)}));
- ASSERT_EQUALS(BSON("_id" << 2),
- storage.findOne(txn, nss, indexName, StorageInterface::ScanDirection::kBackward));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 2),
+ unittest::assertGet(storage.findOne(
+ txn, nss, indexName, StorageInterface::ScanDirection::kBackward)));
// Check collection contents. OplogInterface returns documents in reverse natural order.
OplogInterfaceLocal oplog(txn, nss.ns());
auto iter = oplog.makeIterator();
- ASSERT_EQUALS(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -643,16 +645,16 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
ASSERT_OK(
storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
- ASSERT_EQUALS(
- BSON("_id" << 1),
- storage.findOne(txn, nss, boost::none, StorageInterface::ScanDirection::kForward));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1),
+ unittest::assertGet(storage.findOne(
+ txn, nss, boost::none, StorageInterface::ScanDirection::kForward)));
// Check collection contents. OplogInterface returns documents in reverse natural order.
OplogInterfaceLocal oplog(txn, nss.ns());
auto iter = oplog.makeIterator();
- ASSERT_EQUALS(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -664,16 +666,16 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
ASSERT_OK(
storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
- ASSERT_EQUALS(
- BSON("_id" << 0),
- storage.findOne(txn, nss, boost::none, StorageInterface::ScanDirection::kBackward));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0),
+ unittest::assertGet(storage.findOne(
+ txn, nss, boost::none, StorageInterface::ScanDirection::kBackward)));
// Check collection contents. OplogInterface returns documents in reverse natural order.
OplogInterfaceLocal oplog(txn, nss.ns());
auto iter = oplog.makeIterator();
- ASSERT_EQUALS(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -720,15 +722,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
ASSERT_OK(
storage.insertDocuments(txn, nss, {BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 2)}));
- ASSERT_EQUALS(
- BSON("_id" << 0),
- storage.deleteOne(txn, nss, indexName, StorageInterface::ScanDirection::kForward));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0),
+ unittest::assertGet(storage.deleteOne(
+ txn, nss, indexName, StorageInterface::ScanDirection::kForward)));
// Check collection contents. OplogInterface returns documents in reverse natural order.
OplogInterfaceLocal oplog(txn, nss.ns());
auto iter = oplog.makeIterator();
- ASSERT_EQUALS(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -741,15 +743,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
ASSERT_OK(
storage.insertDocuments(txn, nss, {BSON("_id" << 0), BSON("_id" << 1), BSON("_id" << 2)}));
- ASSERT_EQUALS(
- BSON("_id" << 2),
- storage.deleteOne(txn, nss, indexName, StorageInterface::ScanDirection::kBackward));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 2),
+ unittest::assertGet(storage.deleteOne(
+ txn, nss, indexName, StorageInterface::ScanDirection::kBackward)));
// Check collection contents. OplogInterface returns documents in reverse natural order.
OplogInterfaceLocal oplog(txn, nss.ns());
auto iter = oplog.makeIterator();
- ASSERT_EQUALS(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -761,15 +763,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
ASSERT_OK(
storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
- ASSERT_EQUALS(
- BSON("_id" << 1),
- storage.deleteOne(txn, nss, boost::none, StorageInterface::ScanDirection::kForward));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1),
+ unittest::assertGet(storage.deleteOne(
+ txn, nss, boost::none, StorageInterface::ScanDirection::kForward)));
// Check collection contents. OplogInterface returns documents in reverse natural order.
OplogInterfaceLocal oplog(txn, nss.ns());
auto iter = oplog.makeIterator();
- ASSERT_EQUALS(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -781,15 +783,15 @@ TEST_F(StorageInterfaceImplWithReplCoordTest,
ASSERT_OK(storage.createCollection(txn, nss, CollectionOptions()));
ASSERT_OK(
storage.insertDocuments(txn, nss, {BSON("_id" << 1), BSON("_id" << 2), BSON("_id" << 0)}));
- ASSERT_EQUALS(
- BSON("_id" << 0),
- storage.deleteOne(txn, nss, boost::none, StorageInterface::ScanDirection::kBackward));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0),
+ unittest::assertGet(storage.deleteOne(
+ txn, nss, boost::none, StorageInterface::ScanDirection::kBackward)));
// Check collection contents. OplogInterface returns documents in reverse natural order.
OplogInterfaceLocal oplog(txn, nss.ns());
auto iter = oplog.makeIterator();
- ASSERT_EQUALS(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 2), unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index 4caa36f26c7..897e5c3dd9b 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -274,7 +274,7 @@ TEST_F(SyncTailTest, SyncApplyNoOp) {
ASSERT_FALSE(txn->writesAreReplicated());
ASSERT_TRUE(documentValidationDisabled(txn));
ASSERT_TRUE(db);
- ASSERT_EQUALS(op, theOperation);
+ ASSERT_BSONOBJ_EQ(op, theOperation);
ASSERT_FALSE(convertUpdateToUpsert);
return Status::OK();
};
@@ -333,7 +333,7 @@ void SyncTailTest::_testSyncApplyInsertDocument(LockMode expectedMode) {
ASSERT_FALSE(txn->writesAreReplicated());
ASSERT_TRUE(documentValidationDisabled(txn));
ASSERT_TRUE(db);
- ASSERT_EQUALS(op, theOperation);
+ ASSERT_BSONOBJ_EQ(op, theOperation);
ASSERT_TRUE(convertUpdateToUpsert);
return Status::OK();
};
@@ -393,7 +393,7 @@ TEST_F(SyncTailTest, SyncApplyIndexBuild) {
ASSERT_FALSE(txn->writesAreReplicated());
ASSERT_TRUE(documentValidationDisabled(txn));
ASSERT_TRUE(db);
- ASSERT_EQUALS(op, theOperation);
+ ASSERT_BSONOBJ_EQ(op, theOperation);
ASSERT_FALSE(convertUpdateToUpsert);
return Status::OK();
};
@@ -429,7 +429,7 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
ASSERT_TRUE(txn->lockState()->isW());
ASSERT_TRUE(txn->writesAreReplicated());
ASSERT_FALSE(documentValidationDisabled(txn));
- ASSERT_EQUALS(op, theOperation);
+ ASSERT_BSONOBJ_EQ(op, theOperation);
return Status::OK();
};
ASSERT_TRUE(_txn->writesAreReplicated());
@@ -593,8 +593,8 @@ TEST_F(SyncTailTest, MultiApplyAssignsOperationsToWriterThreadsBasedOnNamespaceH
stdx::lock_guard<stdx::mutex> lock(mutex);
ASSERT_EQUALS(2U, operationsWrittenToOplog.size());
ASSERT_EQUALS(NamespaceString(rsOplogName), nssForInsert);
- ASSERT_EQUALS(op1.raw, operationsWrittenToOplog[0]);
- ASSERT_EQUALS(op2.raw, operationsWrittenToOplog[1]);
+ ASSERT_BSONOBJ_EQ(op1.raw, operationsWrittenToOplog[0]);
+ ASSERT_BSONOBJ_EQ(op2.raw, operationsWrittenToOplog[1]);
}
TEST_F(SyncTailTest, MultiSyncApplyUsesSyncApplyToApplyOperation) {
@@ -719,8 +719,8 @@ TEST_F(SyncTailTest, MultiSyncApplyGroupsInsertOperationByNamespaceBeforeApplyin
ASSERT_EQUALS(BSONType::Array, operationsApplied[2].o.type());
auto group1 = operationsApplied[2].o.Array();
ASSERT_EQUALS(2U, group1.size());
- ASSERT_EQUALS(insertOp1a.o.Obj(), group1[0].Obj());
- ASSERT_EQUALS(insertOp1b.o.Obj(), group1[1].Obj());
+ ASSERT_BSONOBJ_EQ(insertOp1a.o.Obj(), group1[0].Obj());
+ ASSERT_BSONOBJ_EQ(insertOp1b.o.Obj(), group1[1].Obj());
// Check grouped insert operations in namespace "nss2".
ASSERT_EQUALS(insertOp2a.getOpTime(), operationsApplied[3].getOpTime());
@@ -728,8 +728,8 @@ TEST_F(SyncTailTest, MultiSyncApplyGroupsInsertOperationByNamespaceBeforeApplyin
ASSERT_EQUALS(BSONType::Array, operationsApplied[3].o.type());
auto group2 = operationsApplied[3].o.Array();
ASSERT_EQUALS(2U, group2.size());
- ASSERT_EQUALS(insertOp2a.o.Obj(), group2[0].Obj());
- ASSERT_EQUALS(insertOp2b.o.Obj(), group2[1].Obj());
+ ASSERT_BSONOBJ_EQ(insertOp2a.o.Obj(), group2[0].Obj());
+ ASSERT_BSONOBJ_EQ(insertOp2b.o.Obj(), group2[1].Obj());
}
TEST_F(SyncTailTest, MultiSyncApplyUsesLimitWhenGroupingInsertOperation) {
@@ -776,7 +776,7 @@ TEST_F(SyncTailTest, MultiSyncApplyUsesLimitWhenGroupingInsertOperation) {
ASSERT_EQUALS(limit, groupedInsertDocuments.size());
for (std::size_t i = 0; i < limit; ++i) {
const auto& insertOp = insertOps[i];
- ASSERT_EQUALS(insertOp.o.Obj(), groupedInsertDocuments[i].Obj());
+ ASSERT_BSONOBJ_EQ(insertOp.o.Obj(), groupedInsertDocuments[i].Obj());
}
// (limit + 1)-th insert operations should not be included in group of first (limit) inserts.
@@ -882,8 +882,8 @@ TEST_F(SyncTailTest, MultiInitialSyncApplySkipsDocumentOnNamespaceNotFound) {
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(doc3, unittest::assertGet(iter->next()).first);
- ASSERT_EQUALS(doc1, unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(doc3, unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(doc1, unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
@@ -901,7 +901,7 @@ TEST_F(SyncTailTest, MultiInitialSyncApplyRetriesFailedUpdateIfDocumentIsAvailab
// with the OplogInterfaceLocal class.
OplogInterfaceLocal collectionReader(_txn.get(), nss.ns());
auto iter = collectionReader.makeIterator();
- ASSERT_EQUALS(updatedDocument, unittest::assertGet(iter->next()).first);
+ ASSERT_BSONOBJ_EQ(updatedDocument, unittest::assertGet(iter->next()).first);
ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, iter->next().getStatus());
}
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 593d3a9d873..77d988b2954 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -1660,7 +1660,7 @@ void TopologyCoordinatorImpl::prepareStatusResponse(const ReplSetStatusArgs& rsS
}
// sort members bson
- sort(membersOut.begin(), membersOut.end());
+ sort(membersOut.begin(), membersOut.end(), SimpleBSONObjComparator::kInstance.makeLessThan());
response->append("set", _rsConfig.isInitialized() ? _rsConfig.getReplSetName() : "");
response->append("date", now);
diff --git a/src/mongo/db/repl/topology_coordinator_impl_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
index ef019e242ea..011e9d780b2 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
@@ -1512,11 +1512,12 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
// Test results for all non-self members
ASSERT_EQUALS(setName, rsStatus["set"].String());
ASSERT_EQUALS(curTime.asInt64(), rsStatus["date"].Date().asInt64());
- ASSERT_EQUALS(lastCommittedOpTime.toBSON(), rsStatus["optimes"]["lastCommittedOpTime"].Obj());
+ ASSERT_BSONOBJ_EQ(lastCommittedOpTime.toBSON(),
+ rsStatus["optimes"]["lastCommittedOpTime"].Obj());
{
const auto optimes = rsStatus["optimes"].Obj();
- ASSERT_EQUALS(readConcernMajorityOpTime.toBSON(),
- optimes["readConcernMajorityOpTime"].Obj());
+ ASSERT_BSONOBJ_EQ(readConcernMajorityOpTime.toBSON(),
+ optimes["readConcernMajorityOpTime"].Obj());
ASSERT_EQUALS(oplogProgress.getTimestamp(), optimes["appliedOpTime"].timestamp());
ASSERT_EQUALS((oplogDurable).getTimestamp(), optimes["durableOpTime"].timestamp());
}
diff --git a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
index c8479a8735a..ef840dc006a 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
@@ -1514,13 +1514,14 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
// Test results for all non-self members
ASSERT_EQUALS(setName, rsStatus["set"].String());
ASSERT_EQUALS(curTime.asInt64(), rsStatus["date"].Date().asInt64());
- ASSERT_EQUALS(lastCommittedOpTime.toBSON(), rsStatus["optimes"]["lastCommittedOpTime"].Obj());
+ ASSERT_BSONOBJ_EQ(lastCommittedOpTime.toBSON(),
+ rsStatus["optimes"]["lastCommittedOpTime"].Obj());
{
const auto optimes = rsStatus["optimes"].Obj();
- ASSERT_EQUALS(readConcernMajorityOpTime.toBSON(),
- optimes["readConcernMajorityOpTime"].Obj());
- ASSERT_EQUALS(oplogProgress.toBSON(), optimes["appliedOpTime"].Obj());
- ASSERT_EQUALS((oplogDurable).toBSON(), optimes["durableOpTime"].Obj());
+ ASSERT_BSONOBJ_EQ(readConcernMajorityOpTime.toBSON(),
+ optimes["readConcernMajorityOpTime"].Obj());
+ ASSERT_BSONOBJ_EQ(oplogProgress.toBSON(), optimes["appliedOpTime"].Obj());
+ ASSERT_BSONOBJ_EQ((oplogDurable).toBSON(), optimes["durableOpTime"].Obj());
}
std::vector<BSONElement> memberArray = rsStatus["members"].Array();
ASSERT_EQUALS(4U, memberArray.size());
@@ -1551,7 +1552,7 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
ASSERT_EQUALS(MemberState(MemberState::RS_SECONDARY).toString(),
member1Status["stateStr"].String());
ASSERT_EQUALS(durationCount<Seconds>(uptimeSecs), member1Status["uptime"].numberInt());
- ASSERT_EQUALS(oplogProgress.toBSON(), member1Status["optime"].Obj());
+ ASSERT_BSONOBJ_EQ(oplogProgress.toBSON(), member1Status["optime"].Obj());
ASSERT_TRUE(member1Status.hasField("optimeDate"));
ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(oplogProgress.getSecs() * 1000ULL),
member1Status["optimeDate"].Date());
@@ -1581,13 +1582,13 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
ASSERT_EQUALS(MemberState::RS_PRIMARY, selfStatus["state"].numberInt());
ASSERT_EQUALS(MemberState(MemberState::RS_PRIMARY).toString(), selfStatus["stateStr"].str());
ASSERT_EQUALS(durationCount<Seconds>(uptimeSecs), selfStatus["uptime"].numberInt());
- ASSERT_EQUALS(oplogProgress.toBSON(), selfStatus["optime"].Obj());
+ ASSERT_BSONOBJ_EQ(oplogProgress.toBSON(), selfStatus["optime"].Obj());
ASSERT_TRUE(selfStatus.hasField("optimeDate"));
ASSERT_EQUALS(Date_t::fromMillisSinceEpoch(oplogProgress.getSecs() * 1000ULL),
selfStatus["optimeDate"].Date());
ASSERT_EQUALS(2000, rsStatus["heartbeatIntervalMillis"].numberInt());
- ASSERT_EQUALS(initialSyncStatus, rsStatus["initialSyncStatus"].Obj());
+ ASSERT_BSONOBJ_EQ(initialSyncStatus, rsStatus["initialSyncStatus"].Obj());
// TODO(spencer): Test electionTime and pingMs are set properly
}
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index e3a576d88d3..78815711446 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/s/collection_metadata.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/builder.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/stdx/memory.h"
@@ -576,7 +577,7 @@ void CollectionMetadata::fillRanges() {
max = currMax;
continue;
}
- if (max == currMin) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(max == currMin)) {
max = currMax;
continue;
}
diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp
index ffae537754b..c51b4a4ba7a 100644
--- a/src/mongo/db/s/metadata_manager.cpp
+++ b/src/mongo/db/s/metadata_manager.cpp
@@ -32,6 +32,7 @@
#include "mongo/db/s/metadata_manager.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/s/collection_range_deleter.h"
#include "mongo/db/s/sharding_state.h"
@@ -216,7 +217,7 @@ void MetadataManager::forgetReceive(const ChunkRange& range) {
invariant(it != _receivingChunks.end());
// Verify entire ChunkRange is identical, not just the min key.
- invariant(it->second == range.getMax());
+ invariant(SimpleBSONObjComparator::kInstance.evaluate(it->second == range.getMax()));
_receivingChunks.erase(it);
}
@@ -362,8 +363,9 @@ void MetadataManager::_removeRangeToClean_inlock(const ChunkRange& range, Status
--it;
}
- for (; it != _rangesToClean.end() && it->first < range.getMax();) {
- if (it->second.getMax() <= range.getMin()) {
+ for (; it != _rangesToClean.end() &&
+ SimpleBSONObjComparator::kInstance.evaluate(it->first < range.getMax());) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(it->second.getMax() <= range.getMin())) {
++it;
continue;
}
@@ -374,11 +376,11 @@ void MetadataManager::_removeRangeToClean_inlock(const ChunkRange& range, Status
BSONObj oldMax = it->second.getMax();
it->second.complete(deletionStatus);
_rangesToClean.erase(it++);
- if (oldMin < range.getMin()) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(oldMin < range.getMin())) {
_addRangeToClean_inlock(ChunkRange(oldMin, range.getMin()));
}
- if (oldMax > range.getMax()) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(oldMax > range.getMax())) {
_addRangeToClean_inlock(ChunkRange(range.getMax(), oldMax));
}
}
diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp
index 7e7ff13b9e2..977b35a5f70 100644
--- a/src/mongo/db/s/metadata_manager_test.cpp
+++ b/src/mongo/db/s/metadata_manager_test.cpp
@@ -356,7 +356,7 @@ TEST_F(MetadataManagerTest, BeginReceiveWithOverlappingRange) {
const auto it = copyOfPending.find(BSON("key" << 5));
ASSERT(it != copyOfPending.end());
- ASSERT_EQ(it->second, BSON("key" << 35));
+ ASSERT_BSONOBJ_EQ(it->second, BSON("key" << 35));
}
TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
@@ -382,8 +382,8 @@ TEST_F(MetadataManagerTest, RefreshMetadataAfterDropAndRecreate) {
ASSERT_EQ(manager.getActiveMetadata()->getChunks().size(), 1UL);
const auto chunkEntry = manager.getActiveMetadata()->getChunks().begin();
- ASSERT_EQ(BSON("key" << 20), chunkEntry->first);
- ASSERT_EQ(BSON("key" << 30), chunkEntry->second);
+ ASSERT_BSONOBJ_EQ(BSON("key" << 20), chunkEntry->first);
+ ASSERT_BSONOBJ_EQ(BSON("key" << 30), chunkEntry->second);
}
// Tests membership functions for _rangesToClean
diff --git a/src/mongo/db/s/split_vector_command.cpp b/src/mongo/db/s/split_vector_command.cpp
index 5ed698daad1..e00377e5a13 100644
--- a/src/mongo/db/s/split_vector_command.cpp
+++ b/src/mongo/db/s/split_vector_command.cpp
@@ -34,6 +34,7 @@
#include <string>
#include <vector>
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_manager.h"
@@ -271,7 +272,7 @@ public:
// Use every 'keyCount'-th key as a split point. We add the initial key as a sentinel,
// to be removed at the end. If a key appears more times than entries allowed on a
// chunk, we issue a warning and split on the following key.
- set<BSONObj> tooFrequentKeys;
+ auto tooFrequentKeys = SimpleBSONObjComparator::kInstance.makeOrderedBSONObjSet();
splitKeys.push_back(dps::extractElementsBasedOnTemplate(
prettyKey(idx->keyPattern(), currKey.getOwned()), keyPattern));
@@ -371,9 +372,8 @@ public:
}
// Make sure splitKeys is in ascending order
- std::sort(splitKeys.begin(),
- splitKeys.end(),
- [](const BSONObj& lhs, const BSONObj& rhs) -> bool { return lhs < rhs; });
+ std::sort(
+ splitKeys.begin(), splitKeys.end(), SimpleBSONObjComparator::kInstance.makeLessThan());
result.append("splitKeys", splitKeys);
return true;
}
diff --git a/src/mongo/db/s/start_chunk_clone_request_test.cpp b/src/mongo/db/s/start_chunk_clone_request_test.cpp
index 107c5628580..342d49d9130 100644
--- a/src/mongo/db/s/start_chunk_clone_request_test.cpp
+++ b/src/mongo/db/s/start_chunk_clone_request_test.cpp
@@ -71,9 +71,9 @@ TEST(StartChunkCloneRequest, CreateAsCommandComplete) {
.toString(),
request.getFromShardConnectionString().toString());
ASSERT_EQ("shard0002", request.getToShardId());
- ASSERT_EQ(BSON("Key" << -100), request.getMinKey());
- ASSERT_EQ(BSON("Key" << 100), request.getMaxKey());
- ASSERT_EQ(BSON("Key" << 1), request.getShardKeyPattern());
+ ASSERT_BSONOBJ_EQ(BSON("Key" << -100), request.getMinKey());
+ ASSERT_BSONOBJ_EQ(BSON("Key" << 100), request.getMaxKey());
+ ASSERT_BSONOBJ_EQ(BSON("Key" << 1), request.getShardKeyPattern());
ASSERT_EQ(MigrationSecondaryThrottleOptions::kOff,
request.getSecondaryThrottle().getSecondaryThrottle());
}
diff --git a/src/mongo/db/s/type_shard_identity_test.cpp b/src/mongo/db/s/type_shard_identity_test.cpp
index 960faff68ba..cb9af748106 100644
--- a/src/mongo/db/s/type_shard_identity_test.cpp
+++ b/src/mongo/db/s/type_shard_identity_test.cpp
@@ -61,7 +61,7 @@ TEST(ShardIdentityType, RoundTrip) {
ASSERT_TRUE(shardIdentity.isClusterIdSet());
ASSERT_EQ(clusterId, shardIdentity.getClusterId());
- ASSERT_EQ(doc, shardIdentity.toBSON());
+ ASSERT_BSONOBJ_EQ(doc, shardIdentity.toBSON());
}
TEST(ShardIdentityType, ParseMissingId) {
@@ -144,7 +144,7 @@ TEST(ShardIdentityType, CreateUpdateObject) {
auto updateObj = ShardIdentityType::createConfigServerUpdateObject("test/a:1,b:2");
auto expectedObj = BSON("$set" << BSON("configsvrConnectionString"
<< "test/a:1,b:2"));
- ASSERT_EQ(expectedObj, updateObj);
+ ASSERT_BSONOBJ_EQ(expectedObj, updateObj);
}
} // namespace mongo
diff --git a/src/mongo/db/stats/timer_stats_test.cpp b/src/mongo/db/stats/timer_stats_test.cpp
index c1284c55357..80cef94aae0 100644
--- a/src/mongo/db/stats/timer_stats_test.cpp
+++ b/src/mongo/db/stats/timer_stats_test.cpp
@@ -38,14 +38,14 @@ namespace {
using namespace mongo;
TEST(TimerStatsTest, GetReportNoRecording) {
- ASSERT_EQUALS(BSON("num" << 0 << "totalMillis" << 0), TimerStats().getReport());
+ ASSERT_BSONOBJ_EQ(BSON("num" << 0 << "totalMillis" << 0), TimerStats().getReport());
}
TEST(TimerStatsTest, GetReportOneRecording) {
TimerStats timerStats;
Timer timer;
int millis = timerStats.record(timer);
- ASSERT_EQUALS(BSON("num" << 1 << "totalMillis" << millis), timerStats.getReport());
+ ASSERT_BSONOBJ_EQ(BSON("num" << 1 << "totalMillis" << millis), timerStats.getReport());
}
} // namespace
diff --git a/src/mongo/db/storage/index_entry_comparison.h b/src/mongo/db/storage/index_entry_comparison.h
index 906192bb954..692731b00ec 100644
--- a/src/mongo/db/storage/index_entry_comparison.h
+++ b/src/mongo/db/storage/index_entry_comparison.h
@@ -32,6 +32,7 @@
#include <tuple>
#include <vector>
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/record_id.h"
@@ -51,11 +52,11 @@ struct IndexKeyEntry {
std::ostream& operator<<(std::ostream& stream, const IndexKeyEntry& entry);
inline bool operator==(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) {
- return std::tie(lhs.key, lhs.loc) == std::tie(rhs.key, rhs.loc);
+ return SimpleBSONObjComparator::kInstance.evaluate(lhs.key == rhs.key) && (lhs.loc == rhs.loc);
}
inline bool operator!=(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) {
- return std::tie(lhs.key, lhs.loc) != std::tie(rhs.key, rhs.loc);
+ return !(lhs == rhs);
}
/**
diff --git a/src/mongo/db/storage/key_string_test.cpp b/src/mongo/db/storage/key_string_test.cpp
index 751fa78733b..662503223e9 100644
--- a/src/mongo/db/storage/key_string_test.cpp
+++ b/src/mongo/db/storage/key_string_test.cpp
@@ -85,7 +85,7 @@ TEST_F(KeyStringTest, Simple1) {
BSONObj a = BSON("" << 5);
BSONObj b = BSON("" << 6);
- ASSERT_LESS_THAN(a, b);
+ ASSERT_BSONOBJ_LT(a, b);
ASSERT_LESS_THAN(KeyString(version, a, ALL_ASCENDING, RecordId()),
KeyString(version, b, ALL_ASCENDING, RecordId()));
@@ -96,7 +96,7 @@ TEST_F(KeyStringTest, Simple1) {
const BSONObj _orig = x; \
const KeyString _ks(version, _orig, order); \
const BSONObj _converted = toBson(_ks, order); \
- ASSERT_EQ(_converted, _orig); \
+ ASSERT_BSONOBJ_EQ(_converted, _orig); \
ASSERT(_converted.binaryEqual(_orig)); \
} while (0)
@@ -106,27 +106,27 @@ TEST_F(KeyStringTest, Simple1) {
ROUNDTRIP_ORDER(version, x, ONE_DESCENDING); \
} while (0)
-#define COMPARES_SAME(_v, _x, _y) \
- do { \
- KeyString _xKS(_v, _x, ONE_ASCENDING); \
- KeyString _yKS(_v, _y, ONE_ASCENDING); \
- if (_x == _y) { \
- ASSERT_EQUALS(_xKS, _yKS); \
- } else if (_x < _y) { \
- ASSERT_LESS_THAN(_xKS, _yKS); \
- } else { \
- ASSERT_LESS_THAN(_yKS, _xKS); \
- } \
- \
- _xKS.resetToKey(_x, ONE_DESCENDING); \
- _yKS.resetToKey(_y, ONE_DESCENDING); \
- if (_x == _y) { \
- ASSERT_EQUALS(_xKS, _yKS); \
- } else if (_x < _y) { \
- ASSERT_GREATER_THAN(_xKS, _yKS); \
- } else { \
- ASSERT_GREATER_THAN(_yKS, _xKS); \
- } \
+#define COMPARES_SAME(_v, _x, _y) \
+ do { \
+ KeyString _xKS(_v, _x, ONE_ASCENDING); \
+ KeyString _yKS(_v, _y, ONE_ASCENDING); \
+ if (SimpleBSONObjComparator::kInstance.evaluate(_x == _y)) { \
+ ASSERT_EQUALS(_xKS, _yKS); \
+ } else if (SimpleBSONObjComparator::kInstance.evaluate(_x < _y)) { \
+ ASSERT_LESS_THAN(_xKS, _yKS); \
+ } else { \
+ ASSERT_LESS_THAN(_yKS, _xKS); \
+ } \
+ \
+ _xKS.resetToKey(_x, ONE_DESCENDING); \
+ _yKS.resetToKey(_y, ONE_DESCENDING); \
+ if (SimpleBSONObjComparator::kInstance.evaluate(_x == _y)) { \
+ ASSERT_EQUALS(_xKS, _yKS); \
+ } else if (SimpleBSONObjComparator::kInstance.evaluate(_x < _y)) { \
+ ASSERT_GREATER_THAN(_xKS, _yKS); \
+ } else { \
+ ASSERT_GREATER_THAN(_yKS, _xKS); \
+ } \
} while (0)
TEST_F(KeyStringTest, ActualBytesDouble) {
@@ -433,9 +433,9 @@ TEST_F(KeyStringTest, Timestamp) {
ROUNDTRIP(version, b);
ROUNDTRIP(version, c);
- ASSERT_LESS_THAN(a, b);
- ASSERT_LESS_THAN(b, c);
- ASSERT_LESS_THAN(c, d);
+ ASSERT_BSONOBJ_LT(a, b);
+ ASSERT_BSONOBJ_LT(b, c);
+ ASSERT_BSONOBJ_LT(c, d);
KeyString ka(version, a, ALL_ASCENDING);
KeyString kb(version, b, ALL_ASCENDING);
@@ -1047,7 +1047,8 @@ void perfTest(KeyString::Version version, const Numbers& numbers) {
micros = t.micros();
}
- auto minmax = std::minmax_element(numbers.begin(), numbers.end());
+ auto minmax = std::minmax_element(
+ numbers.begin(), numbers.end(), SimpleBSONObjComparator::kInstance.makeLessThan());
log() << 1E3 * micros / static_cast<double>(iters * numbers.size()) << " ns per "
<< mongo::KeyString::versionToString(version) << " roundtrip"
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index 07a7eb8db2e..df9d85e9b84 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -1192,7 +1192,7 @@ protected:
virtual void validate() {
OperationContextNoop txn;
- ASSERT_NOT_EQUALS(_oldTop,
+ ASSERT_BSONOBJ_NE(_oldTop,
this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
@@ -1219,8 +1219,8 @@ protected:
virtual void validate() {
OperationContextNoop txn;
- ASSERT_TRUE(_oldTop !=
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ ASSERT_BSONOBJ_NE(_oldTop,
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
private:
@@ -1665,8 +1665,8 @@ class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight<OnDiskFormat> {
virtual void validate() {
OperationContextNoop txn;
- ASSERT_EQUALS(_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ ASSERT_BSONOBJ_EQ(_oldTop,
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
virtual bool merge() const {
@@ -1689,7 +1689,7 @@ class MoveBelowLowWaterMarkRight : public NoMoveAtLowWaterMarkRight<OnDiskFormat
virtual void validate() {
OperationContextNoop txn;
// Different top means we rebalanced
- ASSERT_NOT_EQUALS(this->_oldTop,
+ ASSERT_BSONOBJ_NE(this->_oldTop,
this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
};
@@ -1706,8 +1706,8 @@ class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft<OnDiskFormat> {
virtual void validate() {
OperationContextNoop txn;
- ASSERT_EQUALS(this->_oldTop,
- this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
+ ASSERT_BSONOBJ_EQ(this->_oldTop,
+ this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
virtual bool merge() const {
return false;
@@ -1729,7 +1729,7 @@ class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft<OnDiskFormat>
virtual void validate() {
OperationContextNoop txn;
// Different top means we rebalanced
- ASSERT_NOT_EQUALS(this->_oldTop,
+ ASSERT_BSONOBJ_NE(this->_oldTop,
this->getKey(this->_helper.headManager.getHead(&txn), 0).data.toBson());
}
};
diff --git a/src/mongo/db/storage/storage_engine_metadata_test.cpp b/src/mongo/db/storage/storage_engine_metadata_test.cpp
index 466c0016037..61379a2e894 100644
--- a/src/mongo/db/storage/storage_engine_metadata_test.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata_test.cpp
@@ -191,7 +191,7 @@ TEST(StorageEngineMetadataTest, Roundtrip) {
StorageEngineMetadata metadata(tempDir.path());
ASSERT_OK(metadata.read());
ASSERT_EQUALS("storageEngine1", metadata.getStorageEngine());
- ASSERT_EQUALS(options, metadata.getStorageEngineOptions());
+ ASSERT_BSONOBJ_EQ(options, metadata.getStorageEngineOptions());
metadata.reset();
ASSERT_TRUE(metadata.getStorageEngine().empty());
diff --git a/src/mongo/dbtests/chunktests.cpp b/src/mongo/dbtests/chunktests.cpp
index cadf44ae4f0..86a95684387 100644
--- a/src/mongo/dbtests/chunktests.cpp
+++ b/src/mongo/dbtests/chunktests.cpp
@@ -94,7 +94,7 @@ public:
for (const ShardId& shardId : shardIds) {
b << shardId;
}
- ASSERT_EQUALS(expectedShardNames(), b.arr());
+ ASSERT_BSONOBJ_EQ(expectedShardNames(), b.arr());
}
protected:
diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp
index aee6a0f406a..f7027bb8f91 100644
--- a/src/mongo/dbtests/dbhelper_tests.cpp
+++ b/src/mongo/dbtests/dbhelper_tests.cpp
@@ -78,7 +78,7 @@ public:
}
// Check that the expected documents remain.
- ASSERT_EQUALS(expected(), docs(&txn));
+ ASSERT_BSONOBJ_EQ(expected(), docs(&txn));
}
private:
diff --git a/src/mongo/dbtests/extensions_callback_real_test.cpp b/src/mongo/dbtests/extensions_callback_real_test.cpp
index abe62080fbe..60b1c662f79 100644
--- a/src/mongo/dbtests/extensions_callback_real_test.cpp
+++ b/src/mongo/dbtests/extensions_callback_real_test.cpp
@@ -256,7 +256,7 @@ TEST_F(ExtensionsCallbackRealTest, WhereExpressionsWithSameScopeHaveSameBSONRepr
BSONObjBuilder builder2;
expr2->serialize(&builder2);
- ASSERT_EQ(builder1.obj(), builder2.obj());
+ ASSERT_BSONOBJ_EQ(builder1.obj(), builder2.obj());
}
TEST_F(ExtensionsCallbackRealTest,
@@ -275,7 +275,7 @@ TEST_F(ExtensionsCallbackRealTest,
BSONObjBuilder builder2;
expr2->serialize(&builder2);
- ASSERT_NE(builder1.obj(), builder2.obj());
+ ASSERT_BSONOBJ_NE(builder1.obj(), builder2.obj());
}
TEST_F(ExtensionsCallbackRealTest, WhereExpressionsWithSameScopeAreEquivalent) {
diff --git a/src/mongo/dbtests/index_access_method_test.cpp b/src/mongo/dbtests/index_access_method_test.cpp
index 5460ce410f0..59c769d6e87 100644
--- a/src/mongo/dbtests/index_access_method_test.cpp
+++ b/src/mongo/dbtests/index_access_method_test.cpp
@@ -194,14 +194,14 @@ TEST(IndexAccessMethodSetDifference, ShouldNotReportOverlapsFromNonDisjointSets)
for (auto&& obj : diff.first) {
ASSERT(left.find(obj) != left.end());
// Make sure it's not in the intersection.
- ASSERT(obj != BSON("" << 1));
- ASSERT(obj != BSON("" << 4));
+ ASSERT_BSONOBJ_NE(obj, BSON("" << 1));
+ ASSERT_BSONOBJ_NE(obj, BSON("" << 4));
}
for (auto&& obj : diff.second) {
ASSERT(right.find(obj) != right.end());
// Make sure it's not in the intersection.
- ASSERT(obj != BSON("" << 1));
- ASSERT(obj != BSON("" << 4));
+ ASSERT_BSONOBJ_NE(obj, BSON("" << 1));
+ ASSERT_BSONOBJ_NE(obj, BSON("" << 4));
}
}
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 99f254cb60d..6b9ee3e23ae 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -967,11 +967,11 @@ protected:
class IndexCatatalogFixIndexKey {
public:
void run() {
- ASSERT_EQUALS(BSON("x" << 1), IndexCatalog::fixIndexKey(BSON("x" << 1)));
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), IndexCatalog::fixIndexKey(BSON("x" << 1)));
- ASSERT_EQUALS(BSON("_id" << 1), IndexCatalog::fixIndexKey(BSON("_id" << 1)));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), IndexCatalog::fixIndexKey(BSON("_id" << 1)));
- ASSERT_EQUALS(BSON("_id" << 1), IndexCatalog::fixIndexKey(BSON("_id" << true)));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), IndexCatalog::fixIndexKey(BSON("_id" << true)));
}
};
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
index f9583ea7b36..d2182b557fd 100644
--- a/src/mongo/dbtests/jsobjtests.cpp
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -569,7 +569,7 @@ public:
ASSERT(tmp.valid());
ASSERT(tmp.hasField("a"));
ASSERT(!tmp.hasField("b"));
- ASSERT(tmp == BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(tmp, BSON("a" << 1));
bb << "b" << 2;
BSONObj obj = bb.obj();
@@ -577,7 +577,7 @@ public:
ASSERT(obj.valid());
ASSERT(obj.hasField("a"));
ASSERT(obj.hasField("b"));
- ASSERT(obj == BSON("a" << 1 << "b" << 2));
+ ASSERT_BSONOBJ_EQ(obj, BSON("a" << 1 << "b" << 2));
}
{
BSONObjBuilder bb;
@@ -587,7 +587,7 @@ public:
ASSERT(tmp.valid());
ASSERT(tmp.hasField("a"));
ASSERT(!tmp.hasField("b"));
- ASSERT(tmp == BSON("a" << BSON("$gt" << 1)));
+ ASSERT_BSONOBJ_EQ(tmp, BSON("a" << BSON("$gt" << 1)));
bb << "b" << LT << 2;
BSONObj obj = bb.obj();
@@ -596,7 +596,7 @@ public:
ASSERT(obj.valid());
ASSERT(obj.hasField("a"));
ASSERT(obj.hasField("b"));
- ASSERT(obj == BSON("a" << BSON("$gt" << 1) << "b" << BSON("$lt" << 2)));
+ ASSERT_BSONOBJ_EQ(obj, BSON("a" << BSON("$gt" << 1) << "b" << BSON("$lt" << 2)));
}
{
BSONObjBuilder bb(32);
@@ -606,7 +606,7 @@ public:
ASSERT(tmp.valid());
ASSERT(tmp.hasField("a"));
ASSERT(!tmp.hasField("b"));
- ASSERT(tmp == BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(tmp, BSON("a" << 1));
// force a realloc
BSONArrayBuilder arr;
@@ -939,11 +939,11 @@ public:
BSONObj C = c.obj();
// test that nulls are ok within bson strings
- ASSERT(!(A == B));
- ASSERT(A > B);
+ ASSERT_BSONOBJ_NE(A, B);
+ ASSERT_BSONOBJ_GT(A, B);
- ASSERT(!(B == C));
- ASSERT(C > B);
+ ASSERT_BSONOBJ_NE(B, C);
+ ASSERT_BSONOBJ_GT(C, B);
// check iteration is ok
ASSERT(B["z"].Bool() && A["z"].Bool() && C["z"].Bool());
@@ -979,7 +979,7 @@ public:
BSONObj foo = BSON("foo" << 1);
b.appendAs(foo.firstElement(), "bar");
}
- ASSERT_EQUALS(BSON("bar" << 1), b.done());
+ ASSERT_BSONOBJ_EQ(BSON("bar" << 1), b.done());
}
};
@@ -1388,8 +1388,8 @@ public:
ASSERT_EQUALS(oid.asDateT(), now);
ASSERT_EQUALS(min.asDateT(), now);
ASSERT_EQUALS(max.asDateT(), now);
- ASSERT_LT(BSON("" << min), BSON("" << oid));
- ASSERT_GT(BSON("" << max), BSON("" << oid));
+ ASSERT_BSONOBJ_LT(BSON("" << min), BSON("" << oid));
+ ASSERT_BSONOBJ_GT(BSON("" << max), BSON("" << oid));
}
};
@@ -1853,8 +1853,8 @@ public:
struct NestedDottedConversions {
void t(const BSONObj& nest, const BSONObj& dot) {
- ASSERT_EQUALS(nested2dotted(nest), dot);
- ASSERT_EQUALS(nest, dotted2nested(dot));
+ ASSERT_BSONOBJ_EQ(nested2dotted(nest), dot);
+ ASSERT_BSONOBJ_EQ(nest, dotted2nested(dot));
}
void run() {
@@ -1927,7 +1927,7 @@ struct BSONArrayBuilderTest {
BSONObj obj = objb.obj();
BSONArray arr = arrb.arr();
- ASSERT_EQUALS(obj, arr);
+ ASSERT_BSONOBJ_EQ(obj, arr);
BSONObj o = BSON("obj" << obj << "arr" << arr << "arr2" << BSONArray(obj) << "regex"
<< BSONRegEx("reg", "x"));
@@ -1954,7 +1954,7 @@ struct ArrayMacroTest {
<< "baz"
<< "qux")));
- ASSERT_EQUALS(arr, obj);
+ ASSERT_BSONOBJ_EQ(arr, obj);
ASSERT_EQUALS(arr["2"].type(), Object);
ASSERT_EQUALS(arr["2"].embeddedObject()["foo"].type(), Array);
}
@@ -2141,7 +2141,7 @@ public:
char* crap = (char*)mongoMalloc(x.objsize());
memcpy(crap, x.objdata(), x.objsize());
BSONObj y(crap);
- ASSERT_EQUALS(x, y);
+ ASSERT_BSONOBJ_EQ(x, y);
free(crap);
}
@@ -2218,7 +2218,7 @@ public:
BSONObj y = BSON("a" << BSON("b" << 1.0));
keyTest(x);
keyTest(y);
- ASSERT_EQUALS(x, y);
+ ASSERT_BSONOBJ_EQ(x, y);
ASSERT_EQUALS(0, x.woCompare(y));
}
};
@@ -2249,7 +2249,7 @@ public:
ASSERT_EQUALS(3, i.next().numberInt());
ASSERT(!i.more());
- ASSERT_EQUALS(BSON("x" << 1 << "y" << 2 << "z" << 3), b.obj());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1 << "y" << 2 << "z" << 3), b.obj());
}
}
};
@@ -2279,13 +2279,13 @@ public:
BSONObj e = BSON("a" << 4);
BSONObj f = BSON("a" << 4);
- ASSERT(!(a < b));
- ASSERT(a <= b);
- ASSERT(a < c);
+ ASSERT(!SimpleBSONObjComparator::kInstance.evaluate((a < b)));
+ ASSERT(SimpleBSONObjComparator::kInstance.evaluate(a <= b));
+ ASSERT(SimpleBSONObjComparator::kInstance.evaluate(a < c));
- ASSERT(f > d);
- ASSERT(f >= e);
- ASSERT(!(f > e));
+ ASSERT(SimpleBSONObjComparator::kInstance.evaluate(f > d));
+ ASSERT(SimpleBSONObjComparator::kInstance.evaluate(f >= e));
+ ASSERT(!(SimpleBSONObjComparator::kInstance.evaluate(f > e)));
}
};
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 0e25183116b..5bd90289d79 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -736,7 +736,7 @@ public:
ASSERT(s->exec((string) "y = " + outString, "foo2", false, true, false));
BSONObj out = s->getObject("y");
- ASSERT_EQUALS(in, out);
+ ASSERT_BSONOBJ_EQ(in, out);
}
};
@@ -847,7 +847,7 @@ public:
ASSERT(s->exec((string) "y = " + outString, "foo2", false, true, false));
BSONObj out = s->getObject("y");
- ASSERT_EQUALS(in, out);
+ ASSERT_BSONOBJ_EQ(in, out);
}
};
@@ -2183,7 +2183,7 @@ public:
{
BSONObjBuilder b;
s->append(b, "z", "x");
- ASSERT_EQUALS(BSON("z" << 5), b.obj());
+ ASSERT_BSONOBJ_EQ(BSON("z" << 5), b.obj());
}
s->invokeSafe("x = function(){ return 17; }", 0, 0);
diff --git a/src/mongo/dbtests/mock/mock_replica_set.cpp b/src/mongo/dbtests/mock/mock_replica_set.cpp
index 1df5d5b7dee..5005aec6f6c 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.cpp
+++ b/src/mongo/dbtests/mock/mock_replica_set.cpp
@@ -304,7 +304,9 @@ void MockReplicaSet::mockReplSetGetStatusCmd() {
hostsField.push_back(hostMemberBuilder.obj());
}
- sort(hostsField.begin(), hostsField.end());
+ std::sort(hostsField.begin(),
+ hostsField.end(),
+ SimpleBSONObjComparator::kInstance.makeLessThan());
// TODO: syncingTo
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index 65f47ba7429..b487bc2c655 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -247,7 +247,7 @@ public:
ASSERT_TRUE(resultMember->obj.value().isOwned());
// Should be the old value.
- ASSERT_EQUALS(resultMember->obj.value(), oldDoc);
+ ASSERT_BSONOBJ_EQ(resultMember->obj.value(), oldDoc);
// Should have done the delete.
ASSERT_EQUALS(stats->docsDeleted, 1U);
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
index 3053a0d7edc..9e930375317 100644
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
@@ -104,7 +104,7 @@ public:
// Compare the results against what we expect.
BSONObj expectedObj = fromjson(expectedStr);
- ASSERT_EQ(outputObj, expectedObj);
+ ASSERT_BSONOBJ_EQ(outputObj, expectedObj);
}
protected:
diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp
index 9167ba0023b..3d22cf19fd9 100644
--- a/src/mongo/dbtests/query_stage_ixscan.cpp
+++ b/src/mongo/dbtests/query_stage_ixscan.cpp
@@ -168,7 +168,7 @@ public:
static_cast<const IndexScanStats*>(ixscan->getSpecificStats());
ASSERT(stats);
ASSERT_TRUE(stats->isMultiKey);
- ASSERT_EQUALS(stats->keyPattern, BSON("x" << 1));
+ ASSERT_BSONOBJ_EQ(stats->keyPattern, BSON("x" << 1));
}
};
@@ -188,10 +188,10 @@ public:
// Expect to get key {'': 5} and then key {'': 6}.
WorkingSetMember* member = getNext(ixscan.get());
ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 5));
+ ASSERT_BSONOBJ_EQ(member->keyData[0].keyData, BSON("" << 5));
member = getNext(ixscan.get());
ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
+ ASSERT_BSONOBJ_EQ(member->keyData[0].keyData, BSON("" << 6));
// Save state and insert a few indexed docs.
ixscan->saveState();
@@ -201,7 +201,7 @@ public:
member = getNext(ixscan.get());
ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 10));
+ ASSERT_BSONOBJ_EQ(member->keyData[0].keyData, BSON("" << 10));
WorkingSetID id;
ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
@@ -225,7 +225,7 @@ public:
// Expect to get key {'': 6}.
WorkingSetMember* member = getNext(ixscan.get());
ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
+ ASSERT_BSONOBJ_EQ(member->keyData[0].keyData, BSON("" << 6));
// Save state and insert an indexed doc.
ixscan->saveState();
@@ -234,7 +234,7 @@ public:
member = getNext(ixscan.get());
ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 7));
+ ASSERT_BSONOBJ_EQ(member->keyData[0].keyData, BSON("" << 7));
WorkingSetID id;
ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
@@ -258,7 +258,7 @@ public:
// Expect to get key {'': 6}.
WorkingSetMember* member = getNext(ixscan.get());
ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
+ ASSERT_BSONOBJ_EQ(member->keyData[0].keyData, BSON("" << 6));
// Save state and insert an indexed doc.
ixscan->saveState();
@@ -288,10 +288,10 @@ public:
// Expect to get key {'': 10} and then {'': 8}.
WorkingSetMember* member = getNext(ixscan.get());
ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 10));
+ ASSERT_BSONOBJ_EQ(member->keyData[0].keyData, BSON("" << 10));
member = getNext(ixscan.get());
ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 8));
+ ASSERT_BSONOBJ_EQ(member->keyData[0].keyData, BSON("" << 8));
// Save state and insert an indexed doc.
ixscan->saveState();
@@ -302,7 +302,7 @@ public:
// Ensure that we don't erroneously return {'': 9} or {'':3}.
member = getNext(ixscan.get());
ASSERT_EQ(WorkingSetMember::RID_AND_IDX, member->getState());
- ASSERT_EQ(member->keyData[0].keyData, BSON("" << 6));
+ ASSERT_BSONOBJ_EQ(member->keyData[0].keyData, BSON("" << 6));
WorkingSetID id;
ASSERT_EQ(PlanStage::IS_EOF, ixscan->work(&id));
diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp
index 3e95a7da805..d2a9139b2c6 100644
--- a/src/mongo/dbtests/query_stage_merge_sort.cpp
+++ b/src/mongo/dbtests/query_stage_merge_sort.cpp
@@ -706,7 +706,7 @@ public:
member = getNextResult(&ws, ms.get());
ASSERT_EQ(member->getState(), WorkingSetMember::RID_AND_OBJ);
ASSERT_EQ(member->recordId, *it);
- ASSERT_EQ(member->obj.value(), BSON("_id" << 4 << "a" << 4));
+ ASSERT_BSONOBJ_EQ(member->obj.value(), BSON("_id" << 4 << "a" << 4));
++it;
// Doc {a: 5} gets invalidated by an update.
@@ -715,14 +715,14 @@ public:
// Invalidated doc {a: 5} should still get returned.
member = getNextResult(&ws, ms.get());
ASSERT_EQ(member->getState(), WorkingSetMember::OWNED_OBJ);
- ASSERT_EQ(member->obj.value(), BSON("_id" << 5 << "a" << 5));
+ ASSERT_BSONOBJ_EQ(member->obj.value(), BSON("_id" << 5 << "a" << 5));
++it;
// We correctly dedup the invalidated doc and return {a: 6} next.
member = getNextResult(&ws, ms.get());
ASSERT_EQ(member->getState(), WorkingSetMember::RID_AND_OBJ);
ASSERT_EQ(member->recordId, *it);
- ASSERT_EQ(member->obj.value(), BSON("_id" << 6 << "a" << 6));
+ ASSERT_BSONOBJ_EQ(member->obj.value(), BSON("_id" << 6 << "a" << 6));
}
private:
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 04fdbb1c6b2..53c839d0fd4 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -564,8 +564,10 @@ public:
++numResults;
WorkingSetMember* member = ws.get(id);
ASSERT(member->hasObj());
- ASSERT(member->obj.value() == BSON("_id" << 1 << "a" << 1 << "b" << 2) ||
- member->obj.value() == BSON("_id" << 3 << "a" << 1 << "c" << 3));
+ ASSERT(SimpleBSONObjComparator::kInstance.evaluate(
+ member->obj.value() == BSON("_id" << 1 << "a" << 1 << "b" << 2)) ||
+ SimpleBSONObjComparator::kInstance.evaluate(
+ member->obj.value() == BSON("_id" << 3 << "a" << 1 << "c" << 3)));
}
}
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index 8c2897d7ac1..6d2b2842465 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -234,7 +234,7 @@ public:
// Expect a single document, {_id: 0, x: 1, y: 2}.
ASSERT_EQUALS(1U, objs.size());
- ASSERT_EQUALS(objs[0], fromjson("{_id: 0, x: 1, y: 2}"));
+ ASSERT_BSONOBJ_EQ(objs[0], fromjson("{_id: 0, x: 1, y: 2}"));
}
}
};
@@ -429,13 +429,13 @@ public:
ASSERT_TRUE(resultMember->obj.value().isOwned());
// Should be the old value.
- ASSERT_EQUALS(resultMember->obj.value(), oldDoc);
+ ASSERT_BSONOBJ_EQ(resultMember->obj.value(), oldDoc);
// Should have done the update.
BSONObj newDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex << "x" << 0);
vector<BSONObj> objs;
getCollContents(coll, &objs);
- ASSERT_EQUALS(objs[targetDocIndex], newDoc);
+ ASSERT_BSONOBJ_EQ(objs[targetDocIndex], newDoc);
// That should be it.
id = WorkingSet::INVALID_ID;
@@ -518,12 +518,12 @@ public:
// Should be the new value.
BSONObj newDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex << "x" << 0);
- ASSERT_EQUALS(resultMember->obj.value(), newDoc);
+ ASSERT_BSONOBJ_EQ(resultMember->obj.value(), newDoc);
// Should have done the update.
vector<BSONObj> objs;
getCollContents(coll, &objs);
- ASSERT_EQUALS(objs[targetDocIndex], newDoc);
+ ASSERT_BSONOBJ_EQ(objs[targetDocIndex], newDoc);
// That should be it.
id = WorkingSet::INVALID_ID;
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 9523e4e5820..cdcca08b505 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -136,7 +136,7 @@ public:
ASSERT(Helpers::findOne(&_txn, _collection, query, ret, true));
ASSERT_EQUALS(string("b"), ret.firstElement().fieldName());
// Cross check with findOne() returning location.
- ASSERT_EQUALS(
+ ASSERT_BSONOBJ_EQ(
ret,
_collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
}
@@ -152,7 +152,7 @@ public:
// Check findOne() returning object, allowing unindexed scan.
ASSERT(Helpers::findOne(&_txn, _collection, query, ret, false));
// Check findOne() returning location, allowing unindexed scan.
- ASSERT_EQUALS(
+ ASSERT_BSONOBJ_EQ(
ret,
_collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
@@ -166,7 +166,7 @@ public:
// Check findOne() returning object, requiring indexed scan with index.
ASSERT(Helpers::findOne(&_txn, _collection, query, ret, true));
// Check findOne() returning location, requiring indexed scan with index.
- ASSERT_EQUALS(
+ ASSERT_BSONOBJ_EQ(
ret,
_collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, true)).value());
}
@@ -208,7 +208,7 @@ public:
BSONObj ret;
ASSERT(Helpers::findOne(&_txn, _collection, query, ret, false));
ASSERT(ret.isEmpty());
- ASSERT_EQUALS(
+ ASSERT_BSONOBJ_EQ(
ret,
_collection->docFor(&_txn, Helpers::findOne(&_txn, _collection, query, false)).value());
}
@@ -1730,8 +1730,8 @@ namespace queryobjecttests {
class names1 {
public:
void run() {
- ASSERT_EQUALS(BSON("x" << 1), QUERY("query" << BSON("x" << 1)).getFilter());
- ASSERT_EQUALS(BSON("x" << 1), QUERY("$query" << BSON("x" << 1)).getFilter());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), QUERY("query" << BSON("x" << 1)).getFilter());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), QUERY("$query" << BSON("x" << 1)).getFilter());
}
};
}
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 10996568f12..326dffb364b 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -130,7 +130,7 @@ protected:
::mongo::log() << "expected: " << expected.toString() << ", got: " << got.toString()
<< endl;
}
- ASSERT_EQUALS(expected, got);
+ ASSERT_BSONOBJ_EQ(expected, got);
}
BSONObj oneOp() const {
return _client.findOne(cllNS(), BSONObj());
diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp
index f437cb68c64..99d90fec617 100644
--- a/src/mongo/dbtests/rollbacktests.cpp
+++ b/src/mongo/dbtests/rollbacktests.cpp
@@ -97,7 +97,7 @@ void assertOnlyRecord(OperationContext* txn, const NamespaceString& nss, const B
auto record = cursor->next();
ASSERT(record);
- ASSERT_EQ(data, record->data.releaseToBson());
+ ASSERT_BSONOBJ_EQ(data, record->data.releaseToBson());
ASSERT(!cursor->next());
}
diff --git a/src/mongo/dbtests/sort_key_generator_test.cpp b/src/mongo/dbtests/sort_key_generator_test.cpp
index 84a02e9525d..c808a52623a 100644
--- a/src/mongo/dbtests/sort_key_generator_test.cpp
+++ b/src/mongo/dbtests/sort_key_generator_test.cpp
@@ -102,13 +102,13 @@ BSONObj extractSortKeyCovered(const char* sortSpec,
TEST(SortKeyGeneratorTest, SortKeyNormal) {
BSONObj actualOut = extractSortKey("{a: 1}", "{_id: 0, a: 5}", "", nullptr);
BSONObj expectedOut = BSON("" << 5);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyNormal2) {
BSONObj actualOut = extractSortKey("{a: 1}", "{_id: 0, z: 10, a: 6, b: 16}", "", nullptr);
BSONObj expectedOut = BSON("" << 6);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyString) {
@@ -116,28 +116,28 @@ TEST(SortKeyGeneratorTest, SortKeyString) {
extractSortKey("{a: 1}", "{_id: 0, z: 'thing1', a: 'thing2', b: 16}", "", nullptr);
BSONObj expectedOut = BSON(""
<< "thing2");
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyCompound) {
BSONObj actualOut = extractSortKey(
"{a: 1, b: 1}", "{_id: 0, z: 'thing1', a: 99, c: {a: 4}, b: 16}", "", nullptr);
BSONObj expectedOut = BSON("" << 99 << "" << 16);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyEmbedded) {
BSONObj actualOut = extractSortKey(
"{'c.a': 1, b: 1}", "{_id: 0, z: 'thing1', a: 99, c: {a: 4}, b: 16}", "", nullptr);
BSONObj expectedOut = BSON("" << 4 << "" << 16);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyArray) {
BSONObj actualOut = extractSortKey(
"{'c': 1, b: 1}", "{_id: 0, z: 'thing1', a: 99, c: [2, 4, 1], b: 16}", "", nullptr);
BSONObj expectedOut = BSON("" << 1 << "" << 16);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyCoveredNormal) {
@@ -145,7 +145,7 @@ TEST(SortKeyGeneratorTest, SortKeyCoveredNormal) {
BSONObj actualOut = extractSortKeyCovered(
"{a: 1}", IndexKeyDatum(BSON("a" << 1), BSON("" << 5), nullptr), collator);
BSONObj expectedOut = BSON("" << 5);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyCoveredEmbedded) {
@@ -155,7 +155,7 @@ TEST(SortKeyGeneratorTest, SortKeyCoveredEmbedded) {
IndexKeyDatum(BSON("a.c" << 1 << "c" << 1), BSON("" << 5 << "" << 6), nullptr),
collator);
BSONObj expectedOut = BSON("" << 5);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyCoveredCompound) {
@@ -165,7 +165,7 @@ TEST(SortKeyGeneratorTest, SortKeyCoveredCompound) {
IndexKeyDatum(BSON("a" << 1 << "c" << 1), BSON("" << 5 << "" << 6), nullptr),
collator);
BSONObj expectedOut = BSON("" << 5 << "" << 6);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyCoveredCompound2) {
@@ -176,7 +176,7 @@ TEST(SortKeyGeneratorTest, SortKeyCoveredCompound2) {
nullptr),
collator);
BSONObj expectedOut = BSON("" << 5 << "" << 6);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyCoveredCompound3) {
@@ -188,7 +188,7 @@ TEST(SortKeyGeneratorTest, SortKeyCoveredCompound3) {
nullptr),
collator);
BSONObj expectedOut = BSON("" << 6 << "" << 4);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, ExtractStringSortKeyWithCollatorUsesComparisonKey) {
@@ -197,14 +197,14 @@ TEST(SortKeyGeneratorTest, ExtractStringSortKeyWithCollatorUsesComparisonKey) {
extractSortKey("{a: 1}", "{_id: 0, z: 'thing1', a: 'thing2', b: 16}", "", &collator);
BSONObj expectedOut = BSON(""
<< "2gniht");
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, CollatorHasNoEffectWhenExtractingNonStringSortKey) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
BSONObj actualOut = extractSortKey("{a: 1}", "{_id: 0, z: 10, a: 6, b: 16}", "", &collator);
BSONObj expectedOut = BSON("" << 6);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, CollatorHasNoAffectWhenExtractingCoveredSortKey) {
@@ -217,14 +217,14 @@ TEST(SortKeyGeneratorTest, CollatorHasNoAffectWhenExtractingCoveredSortKey) {
&collator);
BSONObj expectedOut = BSON(""
<< "foo");
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, SortKeyGenerationForArraysUsesTheQueryPredicate) {
BSONObj actualOut =
extractSortKey("{a: -1}", "{_id: 0, a: [1, 2, 3, 4]}", "{a: {$lt: 3}}", nullptr);
BSONObj expectedOut = BSON("" << 2);
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, EnsureSortKeyGenerationForArraysRespectsCollation) {
@@ -233,7 +233,7 @@ TEST(SortKeyGeneratorTest, EnsureSortKeyGenerationForArraysRespectsCollation) {
extractSortKey("{a: 1}", "{_id: 0, a: ['aaz', 'zza', 'yya', 'zzb']}", "", &collator);
BSONObj expectedOut = BSON(""
<< "ayy");
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
TEST(SortKeyGeneratorTest, EnsureSortKeyGenerationForArraysWithPredicateRespectsCollation) {
@@ -242,7 +242,7 @@ TEST(SortKeyGeneratorTest, EnsureSortKeyGenerationForArraysWithPredicateRespects
"{a: 1}", "{_id: 0, a: ['aaz', 'zza', 'yya', 'zzb']}", "{a: {$gt: 'yya'}}", &collator);
BSONObj expectedOut = BSON(""
<< "azz");
- ASSERT_EQ(actualOut, expectedOut);
+ ASSERT_BSONOBJ_EQ(actualOut, expectedOut);
}
} // namespace
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
index a8075f42cd9..494c590f1d5 100644
--- a/src/mongo/dbtests/updatetests.cpp
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -347,7 +347,7 @@ public:
_client.insert(ns(), initial);
_client.update(
ns(), initial, BSON("$setOnInsert" << BSON("a.b" << 1) << "$set" << BSON("d" << 1)));
- ASSERT_EQUALS(_client.findOne(ns(), initial), final);
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), initial), final);
}
};
@@ -497,7 +497,7 @@ public:
Query(),
BSON("$set" << BSON("z.0"
<< "a")));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,z:['a','b']}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,z:['a','b']}"));
}
};
@@ -515,7 +515,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), Query(), BSON("$set" << BSON("a" << 2 << "a.b" << 1)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0}"));
}
};
@@ -557,7 +557,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,a:[1]}"));
_client.update(ns(), Query(), BSON("$push" << BSON("a" << 5)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,5]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,5]}"));
}
};
@@ -584,7 +584,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), Query(), BSON("$push" << BSON("a" << 5)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[5]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[5]}"));
}
};
@@ -650,7 +650,7 @@ public:
// { $push : { a : { $each : [ 2, 3 ] } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(2 << 3));
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2,3]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2,3]}"));
}
};
@@ -661,7 +661,7 @@ public:
// { $push : { a : { $each : [ 1, 2, 3 ] } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3));
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2,3]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2,3]}"));
}
};
@@ -672,7 +672,7 @@ public:
// { $push : { a : { $each : [ 2 ] , $slice : -3 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(2) << "$slice" << -3);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}"));
}
};
@@ -683,7 +683,7 @@ public:
// { $push : { a : { $each : [ 2 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(2) << "$slice" << -2);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}"));
}
};
@@ -694,7 +694,7 @@ public:
// { $push : { a : { $each : [ 2 , 3 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(2 << 3) << "$slice" << -2);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -705,7 +705,7 @@ public:
// { $push : { a : { $each : [ 3 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -716,7 +716,7 @@ public:
// { $push : { a : { $each : [ 3 ] , $slice : 0 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 0);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[]}"));
}
};
@@ -727,7 +727,7 @@ public:
// { $push : { a : { $each : [ 3 ] , $slice : 0 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 0);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[]}"));
}
};
@@ -738,7 +738,7 @@ public:
// { $push : { a : { $each : [ 1 , 2 ] , $slice : -3 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2) << "$slice" << -3);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}"));
}
};
@@ -749,7 +749,7 @@ public:
// { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3) << "$slice" << -2);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -760,7 +760,7 @@ public:
// { $push : { a : { $each : [ 1 ] , $slice : -3 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1) << "$slice" << -3);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1]}"));
}
};
@@ -771,7 +771,7 @@ public:
// { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3) << "$slice" << -2);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -783,7 +783,7 @@ public:
BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2);
BSONObj objB = BSON("$each" << BSON_ARRAY(6) << "$slice" << -1);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "b" << objB)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,5],b:[6]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,5],b:[6]}"));
}
};
@@ -794,7 +794,7 @@ public:
// { $push : { a : { $each : [ 5 ] , $slice : -2 } , { b : 4 } }
BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "b" << 4)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,5],b:[3,4]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,5],b:[3,4]}"));
}
};
@@ -873,7 +873,7 @@ public:
// { $push : { a : { $each : [ 3 ], $slice : -2.0 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2.0);
_client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj)));
- ASSERT_EQUALS(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -1014,7 +1014,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
@@ -1022,7 +1022,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
}
}
@@ -1059,7 +1059,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
@@ -1067,7 +1067,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
}
}
@@ -1103,14 +1103,14 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case BOTTOMK_ASC:
@@ -1150,7 +1150,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
}
};
@@ -1183,7 +1183,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
}
};
@@ -1220,7 +1220,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
@@ -1228,7 +1228,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
}
}
@@ -1265,14 +1265,14 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case BOTTOMK_ASC:
@@ -1315,7 +1315,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
@@ -1323,7 +1323,7 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
}
}
@@ -1360,14 +1360,14 @@ public:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
_client.update(ns(), Query(), getUpdate(i));
result = _client.findOne(ns(), Query());
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
break;
case BOTTOMK_ASC:
@@ -1417,9 +1417,9 @@ public:
sort(workArea.begin(), workArea.end(), ProjectKeyCmp(BSON("b" << 1 << "a" << -1)));
- ASSERT_EQUALS(workArea[0], objs[1]);
- ASSERT_EQUALS(workArea[1], objs[0]);
- ASSERT_EQUALS(workArea[2], objs[2]);
+ ASSERT_BSONOBJ_EQ(workArea[0], objs[1]);
+ ASSERT_BSONOBJ_EQ(workArea[1], objs[0]);
+ ASSERT_BSONOBJ_EQ(workArea[2], objs[2]);
}
};
@@ -1438,9 +1438,9 @@ public:
sort(workArea.begin(), workArea.end(), ProjectKeyCmp(BSON("a" << 1 << "b" << 1)));
- ASSERT_EQUALS(workArea[0], objs[0]);
- ASSERT_EQUALS(workArea[1], objs[2]);
- ASSERT_EQUALS(workArea[2], objs[1]);
+ ASSERT_BSONOBJ_EQ(workArea[0], objs[0]);
+ ASSERT_BSONOBJ_EQ(workArea[1], objs[2]);
+ ASSERT_BSONOBJ_EQ(workArea[2], objs[1]);
}
};
@@ -1459,9 +1459,9 @@ public:
sort(workArea.begin(), workArea.end(), ProjectKeyCmp(BSON("a" << 1 << "b" << 1)));
- ASSERT_EQUALS(workArea[0], objs[0]);
- ASSERT_EQUALS(workArea[1], objs[2]);
- ASSERT_EQUALS(workArea[2], objs[1]);
+ ASSERT_BSONOBJ_EQ(workArea[0], objs[0]);
+ ASSERT_BSONOBJ_EQ(workArea[1], objs[2]);
+ ASSERT_BSONOBJ_EQ(workArea[2], objs[1]);
}
};
@@ -1480,9 +1480,9 @@ public:
sort(workArea.begin(), workArea.end(), ProjectKeyCmp(BSON("b" << 1 << "c" << 1)));
- ASSERT_EQUALS(workArea[0], objs[1]);
- ASSERT_EQUALS(workArea[1], objs[0]);
- ASSERT_EQUALS(workArea[2], objs[2]);
+ ASSERT_BSONOBJ_EQ(workArea[0], objs[1]);
+ ASSERT_BSONOBJ_EQ(workArea[1], objs[0]);
+ ASSERT_BSONOBJ_EQ(workArea[2], objs[2]);
}
};
@@ -1501,15 +1501,15 @@ public:
sort(workArea.begin(), workArea.end(), ProjectKeyCmp(fromjson("{'a.b.d':-1}")));
- ASSERT_EQUALS(workArea[0], objs[1]);
- ASSERT_EQUALS(workArea[1], objs[2]);
- ASSERT_EQUALS(workArea[2], objs[0]);
+ ASSERT_BSONOBJ_EQ(workArea[0], objs[1]);
+ ASSERT_BSONOBJ_EQ(workArea[1], objs[2]);
+ ASSERT_BSONOBJ_EQ(workArea[2], objs[0]);
sort(workArea.begin(), workArea.end(), ProjectKeyCmp(fromjson("{'a.b':1}")));
- ASSERT_EQUALS(workArea[0], objs[1]);
- ASSERT_EQUALS(workArea[1], objs[0]);
- ASSERT_EQUALS(workArea[2], objs[2]);
+ ASSERT_BSONOBJ_EQ(workArea[0], objs[1]);
+ ASSERT_BSONOBJ_EQ(workArea[1], objs[0]);
+ ASSERT_BSONOBJ_EQ(workArea[2], objs[2]);
}
};
@@ -1527,7 +1527,7 @@ public:
<< BSON("a..d" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
// { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a.:1} } } }
@@ -1535,28 +1535,28 @@ public:
<< BSON("a." << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
// { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.b:1} } } }
pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
<< BSON(".b" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
// { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.:1} } } }
pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
<< BSON("." << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
// { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {'':1} } } }
pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
<< BSON("" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1570,7 +1570,7 @@ public:
BSON("$each" << BSON_ARRAY(3) << "$slice" << -2 << "$sort" << BSON("a" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1584,7 +1584,7 @@ public:
BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << 2);
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1598,7 +1598,7 @@ public:
<< BSON("a" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1612,7 +1612,7 @@ public:
<< BSON("a" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1626,7 +1626,7 @@ public:
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1640,7 +1640,7 @@ public:
<< BSON_ARRAY(2 << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1654,7 +1654,7 @@ public:
<< BSON("a" << 10));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1668,7 +1668,7 @@ public:
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1683,7 +1683,7 @@ public:
<< BSON("a" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
- ASSERT_EQUALS(result, expected);
+ ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1720,9 +1720,10 @@ public:
ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("a" << 1)));
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), Query(), fromjson("{$set:{'a.b':4}}"));
- ASSERT_EQUALS(fromjson("{'_id':0,a:{b:4}}"), _client.findOne(ns(), Query()));
- ASSERT_EQUALS(fromjson("{'_id':0,a:{b:4}}"),
- _client.findOne(ns(), fromjson("{'a.b':4}"))); // make sure the index works
+ ASSERT_BSONOBJ_EQ(fromjson("{'_id':0,a:{b:4}}"), _client.findOne(ns(), Query()));
+ ASSERT_BSONOBJ_EQ(
+ fromjson("{'_id':0,a:{b:4}}"),
+ _client.findOne(ns(), fromjson("{'a.b':4}"))); // make sure the index works
}
};
@@ -1759,11 +1760,11 @@ public:
_client.insert(
ns(), BSON("_id" << 0 << "a" << 1 << "x" << BSONObj() << "x" << BSONObj() << "z" << 5));
_client.update(ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1)));
- ASSERT_EQUALS(BSON("_id" << 0 << "a" << 1 << "x" << BSON("b" << 1 << "c" << 1) << "x"
- << BSONObj()
- << "z"
- << 5),
- _client.findOne(ns(), BSONObj()));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "a" << 1 << "x" << BSON("b" << 1 << "c" << 1) << "x"
+ << BSONObj()
+ << "z"
+ << 5),
+ _client.findOne(ns(), BSONObj()));
}
};
@@ -1775,11 +1776,11 @@ public:
ns(), BSON("_id" << 0 << "x" << BSONObj() << "x" << BSONObj() << "x" << BSONObj()));
_client.update(
ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1 << "x.d" << 1)));
- ASSERT_EQUALS(BSON("_id" << 0 << "x" << BSON("b" << 1 << "c" << 1 << "d" << 1) << "x"
- << BSONObj()
- << "x"
- << BSONObj()),
- _client.findOne(ns(), BSONObj()));
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "x" << BSON("b" << 1 << "c" << 1 << "d" << 1) << "x"
+ << BSONObj()
+ << "x"
+ << BSONObj()),
+ _client.findOne(ns(), BSONObj()));
}
};
@@ -1820,7 +1821,7 @@ protected:
_client.dropCollection(ns());
insert(initial);
update(mod);
- ASSERT_EQUALS(after, findOne());
+ ASSERT_BSONOBJ_EQ(after, findOne());
_client.dropCollection(ns());
}
diff --git a/src/mongo/executor/network_interface_asio_test.cpp b/src/mongo/executor/network_interface_asio_test.cpp
index 29d24e2ed40..0c1117a1693 100644
--- a/src/mongo/executor/network_interface_asio_test.cpp
+++ b/src/mongo/executor/network_interface_asio_test.cpp
@@ -436,8 +436,8 @@ TEST_F(NetworkInterfaceASIOTest, StartCommand) {
auto& res = deferred.get();
ASSERT(res.elapsedMillis);
uassertStatusOK(res.status);
- ASSERT_EQ(res.data, expectedCommandReply);
- ASSERT_EQ(res.metadata, expectedMetadata);
+ ASSERT_BSONOBJ_EQ(res.data, expectedCommandReply);
+ ASSERT_BSONOBJ_EQ(res.metadata, expectedMetadata);
assertNumOps(0u, 0u, 0u, 1u);
}
@@ -780,7 +780,7 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, MakeRequestReturnsNone) {
// Simulate user command.
stream->simulateServer(rpc::Protocol::kOpCommandV1,
[&](RemoteCommandRequest request) -> RemoteCommandResponse {
- ASSERT_EQ(commandRequest, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(commandRequest, request.cmdObj);
RemoteCommandResponse response;
response.data = commandReply;
@@ -792,9 +792,9 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, MakeRequestReturnsNone) {
auto& result = deferred.get();
ASSERT(result.isOK());
- ASSERT(result.data == commandReply);
+ ASSERT_BSONOBJ_EQ(result.data, commandReply);
ASSERT(result.elapsedMillis);
- ASSERT(result.metadata == metadata);
+ ASSERT_BSONOBJ_EQ(result.metadata, metadata);
assertNumOps(0u, 0u, 0u, 1u);
}
@@ -829,7 +829,8 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, HandleReplyReturnsError) {
[&](const HostAndPort& remoteHost, RemoteCommandResponse&& response) {
handleReplyCalled = true;
handleReplyArgumentCorrect =
- (response.data == hookCommandReply) && (response.metadata == hookReplyMetadata);
+ SimpleBSONObjComparator::kInstance.evaluate(response.data == hookCommandReply) &&
+ SimpleBSONObjComparator::kInstance.evaluate(response.metadata == hookReplyMetadata);
return handleReplyError;
}));
@@ -850,8 +851,8 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, HandleReplyReturnsError) {
// Simulate hook reply
stream->simulateServer(rpc::Protocol::kOpCommandV1,
[&](RemoteCommandRequest request) -> RemoteCommandResponse {
- ASSERT_EQ(request.cmdObj, hookCommandRequest);
- ASSERT_EQ(request.metadata, hookRequestMetadata);
+ ASSERT_BSONOBJ_EQ(request.cmdObj, hookCommandRequest);
+ ASSERT_BSONOBJ_EQ(request.metadata, hookRequestMetadata);
RemoteCommandResponse response;
response.data = hookCommandReply;
diff --git a/src/mongo/executor/network_interface_mock_test.cpp b/src/mongo/executor/network_interface_mock_test.cpp
index e6907dc6769..b1a2e4edff2 100644
--- a/src/mongo/executor/network_interface_mock_test.cpp
+++ b/src/mongo/executor/network_interface_mock_test.cpp
@@ -136,7 +136,8 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHook) {
[&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply) {
validateCalled = true;
hostCorrectForValidate = (remoteHost == testHost());
- replyCorrectForValidate = (isMasterReply.data == isMasterReplyData);
+ replyCorrectForValidate = SimpleBSONObjComparator::kInstance.evaluate(
+ isMasterReply.data == isMasterReplyData);
return Status::OK();
},
[&](const HostAndPort& remoteHost) {
@@ -147,8 +148,8 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHook) {
[&](const HostAndPort& remoteHost, RemoteCommandResponse&& response) {
handleReplyCalled = true;
hostCorrectForRequest = (remoteHost == testHost());
- gotExpectedReply =
- (expectedResponse.data == response.data); // Don't bother checking all fields.
+ gotExpectedReply = SimpleBSONObjComparator::kInstance.evaluate(
+ expectedResponse.data == response.data); // Don't bother checking all fields.
return Status::OK();
}));
@@ -189,7 +190,7 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHook) {
net().enterNetwork();
ASSERT(net().hasReadyRequests());
auto req = net().getNextReadyRequest();
- ASSERT(req->getRequest().cmdObj == expectedRequest.cmdObj);
+ ASSERT_BSONOBJ_EQ(req->getRequest().cmdObj, expectedRequest.cmdObj);
net().scheduleResponse(req, net().now(), expectedResponse);
net().runReadyNetworkOperations();
net().exitNetwork();
@@ -206,7 +207,7 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHook) {
net().enterNetwork();
ASSERT(net().hasReadyRequests());
auto actualCommand = net().getNextReadyRequest();
- ASSERT(actualCommand->getRequest().cmdObj == actualCommandExpected.cmdObj);
+ ASSERT_BSONOBJ_EQ(actualCommand->getRequest().cmdObj, actualCommandExpected.cmdObj);
net().scheduleResponse(actualCommand, net().now(), actualResponseExpected);
net().runReadyNetworkOperations();
net().exitNetwork();
diff --git a/src/mongo/executor/remote_command_request.cpp b/src/mongo/executor/remote_command_request.cpp
index 613b5ac5824..e1028ae320a 100644
--- a/src/mongo/executor/remote_command_request.cpp
+++ b/src/mongo/executor/remote_command_request.cpp
@@ -32,6 +32,7 @@
#include <ostream>
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/platform/atomic_word.h"
#include "mongo/util/mongoutils/str.h"
@@ -95,8 +96,10 @@ bool RemoteCommandRequest::operator==(const RemoteCommandRequest& rhs) const {
if (this == &rhs) {
return true;
}
- return target == rhs.target && dbname == rhs.dbname && cmdObj == rhs.cmdObj &&
- metadata == rhs.metadata && timeout == rhs.timeout;
+ return target == rhs.target && dbname == rhs.dbname &&
+ SimpleBSONObjComparator::kInstance.evaluate(cmdObj == rhs.cmdObj) &&
+ SimpleBSONObjComparator::kInstance.evaluate(metadata == rhs.metadata) &&
+ timeout == rhs.timeout;
}
bool RemoteCommandRequest::operator!=(const RemoteCommandRequest& rhs) const {
diff --git a/src/mongo/executor/remote_command_response.cpp b/src/mongo/executor/remote_command_response.cpp
index e611ea0bc2b..b3b6832e973 100644
--- a/src/mongo/executor/remote_command_response.cpp
+++ b/src/mongo/executor/remote_command_response.cpp
@@ -30,6 +30,7 @@
#include "mongo/executor/remote_command_response.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/rpc/reply_interface.h"
#include "mongo/util/mongoutils/str.h"
@@ -99,7 +100,9 @@ bool RemoteCommandResponse::operator==(const RemoteCommandResponse& rhs) const {
if (this == &rhs) {
return true;
}
- return data == rhs.data && metadata == rhs.metadata && elapsedMillis == rhs.elapsedMillis;
+ SimpleBSONObjComparator bsonComparator;
+ return bsonComparator.evaluate(data == rhs.data) &&
+ bsonComparator.evaluate(metadata == rhs.metadata) && elapsedMillis == rhs.elapsedMillis;
}
bool RemoteCommandResponse::operator!=(const RemoteCommandResponse& rhs) const {
diff --git a/src/mongo/rpc/command_reply.cpp b/src/mongo/rpc/command_reply.cpp
index 172e7ac1ebe..25a0ce58195 100644
--- a/src/mongo/rpc/command_reply.cpp
+++ b/src/mongo/rpc/command_reply.cpp
@@ -35,6 +35,7 @@
#include "mongo/base/data_range_cursor.h"
#include "mongo/base/data_type_validated.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/rpc/object_check.h"
#include "mongo/util/net/message.h"
@@ -76,8 +77,10 @@ Protocol CommandReply::getProtocol() const {
}
bool operator==(const CommandReply& lhs, const CommandReply& rhs) {
- return std::tie(lhs._metadata, lhs._commandReply, lhs._outputDocs) ==
- std::tie(rhs._metadata, rhs._commandReply, rhs._outputDocs);
+ SimpleBSONObjComparator bsonComparator;
+ return bsonComparator.evaluate(lhs._metadata == rhs._metadata) &&
+ bsonComparator.evaluate(lhs._commandReply == rhs._commandReply) &&
+ (lhs._outputDocs == rhs._outputDocs);
}
bool operator!=(const CommandReply& lhs, const CommandReply& rhs) {
diff --git a/src/mongo/rpc/command_reply_test.cpp b/src/mongo/rpc/command_reply_test.cpp
index ba12fbe9748..26d6fc51c6b 100644
--- a/src/mongo/rpc/command_reply_test.cpp
+++ b/src/mongo/rpc/command_reply_test.cpp
@@ -99,17 +99,17 @@ TEST_F(ReplyTest, ParseAllFields) {
rpc::CommandReply opCmdReply{buildMessage()};
- ASSERT_EQUALS(opCmdReply.getMetadata(), metadata);
- ASSERT_EQUALS(opCmdReply.getCommandReply(), commandReply);
+ ASSERT_BSONOBJ_EQ(opCmdReply.getMetadata(), metadata);
+ ASSERT_BSONOBJ_EQ(opCmdReply.getCommandReply(), commandReply);
auto outputDocRange = opCmdReply.getOutputDocs();
auto outputDocRangeIter = outputDocRange.begin();
- ASSERT_EQUALS(*outputDocRangeIter, outputDoc1);
+ ASSERT_BSONOBJ_EQ(*outputDocRangeIter, outputDoc1);
// can't use assert equals since we don't have an op to print the iter.
ASSERT_FALSE(outputDocRangeIter == outputDocRange.end());
++outputDocRangeIter;
- ASSERT_EQUALS(*outputDocRangeIter, outputDoc2);
+ ASSERT_BSONOBJ_EQ(*outputDocRangeIter, outputDoc2);
ASSERT_FALSE(outputDocRangeIter == outputDocRange.end());
++outputDocRangeIter;
diff --git a/src/mongo/rpc/command_request.cpp b/src/mongo/rpc/command_request.cpp
index aa16d34f8bf..32a82ee388f 100644
--- a/src/mongo/rpc/command_request.cpp
+++ b/src/mongo/rpc/command_request.cpp
@@ -37,6 +37,7 @@
#include "mongo/base/data_type_string_data.h"
#include "mongo/base/data_type_terminated.h"
#include "mongo/base/data_type_validated.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/rpc/object_check.h"
@@ -136,9 +137,10 @@ DocumentRange CommandRequest::getInputDocs() const {
}
bool operator==(const CommandRequest& lhs, const CommandRequest& rhs) {
- return std::tie(
- lhs._database, lhs._commandName, lhs._metadata, lhs._commandArgs, lhs._inputDocs) ==
- std::tie(rhs._database, rhs._commandName, rhs._metadata, rhs._commandArgs, rhs._inputDocs);
+ return (lhs._database == rhs._database) && (lhs._commandName == rhs._commandName) &&
+ SimpleBSONObjComparator::kInstance.evaluate(lhs._metadata == rhs._metadata) &&
+ SimpleBSONObjComparator::kInstance.evaluate(lhs._commandArgs == rhs._commandArgs) &&
+ (lhs._inputDocs == rhs._inputDocs);
}
bool operator!=(const CommandRequest& lhs, const CommandRequest& rhs) {
diff --git a/src/mongo/rpc/command_request_builder_test.cpp b/src/mongo/rpc/command_request_builder_test.cpp
index c0f3e2c0ed8..6a8b7211ab9 100644
--- a/src/mongo/rpc/command_request_builder_test.cpp
+++ b/src/mongo/rpc/command_request_builder_test.cpp
@@ -82,8 +82,8 @@ TEST(RequestBuilder, RoundTrip) {
ASSERT_EQUALS(parsed.getDatabase(), databaseName);
ASSERT_EQUALS(parsed.getCommandName(), commandName);
- ASSERT_EQUALS(parsed.getMetadata(), metadata);
- ASSERT_EQUALS(parsed.getCommandArgs(), commandArgs);
+ ASSERT_BSONOBJ_EQ(parsed.getMetadata(), metadata);
+ ASSERT_BSONOBJ_EQ(parsed.getCommandArgs(), commandArgs);
// need ostream overloads for ASSERT_EQUALS
ASSERT_TRUE(parsed.getInputDocs() == inputDocRange);
}
diff --git a/src/mongo/rpc/command_request_test.cpp b/src/mongo/rpc/command_request_test.cpp
index fece0b22352..d0f4e61fddb 100644
--- a/src/mongo/rpc/command_request_test.cpp
+++ b/src/mongo/rpc/command_request_test.cpp
@@ -91,17 +91,17 @@ TEST(CommandRequest, ParseAllFields) {
ASSERT_EQUALS(opCmd.getCommandName(), commandName);
ASSERT_EQUALS(opCmd.getDatabase(), database);
- ASSERT_EQUALS(opCmd.getMetadata(), metadata);
- ASSERT_EQUALS(opCmd.getCommandArgs(), commandArgs);
+ ASSERT_BSONOBJ_EQ(opCmd.getMetadata(), metadata);
+ ASSERT_BSONOBJ_EQ(opCmd.getCommandArgs(), commandArgs);
auto inputDocRange = opCmd.getInputDocs();
auto inputDocRangeIter = inputDocRange.begin();
- ASSERT_EQUALS(*inputDocRangeIter, inputDoc1);
+ ASSERT_BSONOBJ_EQ(*inputDocRangeIter, inputDoc1);
// can't use assert equals since we don't have an op to print the iter.
ASSERT_FALSE(inputDocRangeIter == inputDocRange.end());
++inputDocRangeIter;
- ASSERT_EQUALS(*inputDocRangeIter, inputDoc2);
+ ASSERT_BSONOBJ_EQ(*inputDocRangeIter, inputDoc2);
ASSERT_FALSE(inputDocRangeIter == inputDocRange.end());
++inputDocRangeIter;
diff --git a/src/mongo/rpc/metadata/client_metadata_test.cpp b/src/mongo/rpc/metadata/client_metadata_test.cpp
index 8d64e5e7697..d3d46999e34 100644
--- a/src/mongo/rpc/metadata/client_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/client_metadata_test.cpp
@@ -87,7 +87,7 @@ TEST(ClientMetadatTest, TestLoopbackTest) {
<< BSON(kType << "c" << kName << "d" << kArchitecture << "e"
<< kVersion
<< "f")));
- ASSERT_EQUALS(obj, outDoc);
+ ASSERT_BSONOBJ_EQ(obj, outDoc);
}
// Serialize without application name
@@ -104,7 +104,7 @@ TEST(ClientMetadatTest, TestLoopbackTest) {
kDriver << BSON(kName << "a" << kVersion << "b") << kOperatingSystem
<< BSON(kType << "c" << kName << "d" << kArchitecture << "e" << kVersion
<< "f")));
- ASSERT_EQUALS(obj, outDoc);
+ ASSERT_BSONOBJ_EQ(obj, outDoc);
}
// Serialize with the os information automatically computed
diff --git a/src/mongo/rpc/metadata/config_server_metadata_test.cpp b/src/mongo/rpc/metadata/config_server_metadata_test.cpp
index 50c2679b8cf..6d8749ac495 100644
--- a/src/mongo/rpc/metadata/config_server_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/config_server_metadata_test.cpp
@@ -52,7 +52,7 @@ TEST(ConfigSvrMetadataTest, Roundtrip) {
"opTime" << BSON("ts" << opTime.getTimestamp() << "t" << opTime.getTerm()))));
BSONObj serializedObj = builder.obj();
- ASSERT_EQ(expectedObj, serializedObj);
+ ASSERT_BSONOBJ_EQ(expectedObj, serializedObj);
auto cloneStatus = ConfigServerMetadata::readFromMetadata(serializedObj);
ASSERT_OK(cloneStatus.getStatus());
@@ -64,7 +64,7 @@ TEST(ConfigSvrMetadataTest, Roundtrip) {
clonedMetadata.writeToMetadata(&clonedBuilder);
BSONObj clonedSerializedObj = clonedBuilder.obj();
- ASSERT_EQ(expectedObj, clonedSerializedObj);
+ ASSERT_BSONOBJ_EQ(expectedObj, clonedSerializedObj);
}
} // unnamed namespace
diff --git a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
index bd2109fe413..4f8d316e8fb 100644
--- a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
@@ -70,7 +70,7 @@ TEST(ReplResponseMetadataTest, Roundtrip) {
<< -1)));
BSONObj serializedObj = builder.obj();
- ASSERT_EQ(expectedObj, serializedObj);
+ ASSERT_BSONOBJ_EQ(expectedObj, serializedObj);
auto cloneStatus = ReplSetMetadata::readFromMetadata(serializedObj);
ASSERT_OK(cloneStatus.getStatus());
@@ -85,7 +85,7 @@ TEST(ReplResponseMetadataTest, Roundtrip) {
clonedMetadata.writeToMetadata(&clonedBuilder);
BSONObj clonedSerializedObj = clonedBuilder.obj();
- ASSERT_EQ(expectedObj, clonedSerializedObj);
+ ASSERT_BSONOBJ_EQ(expectedObj, clonedSerializedObj);
}
} // unnamed namespace
diff --git a/src/mongo/rpc/metadata/server_selection_metadata_test.cpp b/src/mongo/rpc/metadata/server_selection_metadata_test.cpp
index 90b66f86185..3775f489a06 100644
--- a/src/mongo/rpc/metadata/server_selection_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/server_selection_metadata_test.cpp
@@ -96,8 +96,8 @@ void checkUpconvert(const BSONObj& legacyCommand,
return bob.obj();
};
- ASSERT_EQ(upconvertedCommand, upconvertedCommandBob.done());
- ASSERT_EQ(sorted(upconvertedMetadata), sorted(upconvertedMetadataBob.done()));
+ ASSERT_BSONOBJ_EQ(upconvertedCommand, upconvertedCommandBob.done());
+ ASSERT_BSONOBJ_EQ(sorted(upconvertedMetadata), sorted(upconvertedMetadataBob.done()));
}
TEST(ServerSelectionMetadata, UpconvertValidMetadata) {
diff --git a/src/mongo/rpc/metadata/sharding_metadata_test.cpp b/src/mongo/rpc/metadata/sharding_metadata_test.cpp
index 974c7ebe0a8..cd199bd16f4 100644
--- a/src/mongo/rpc/metadata/sharding_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/sharding_metadata_test.cpp
@@ -118,8 +118,8 @@ void checkUpconvert(const BSONObj& legacyCommandReply,
BSONObjBuilder commandReplyBob;
BSONObjBuilder metadataBob;
ASSERT_OK(ShardingMetadata::upconvert(legacyCommandReply, &commandReplyBob, &metadataBob));
- ASSERT_EQ(commandReplyBob.done(), upconvertedCommandReply);
- ASSERT_EQ(metadataBob.done(), upconvertedReplyMetadata);
+ ASSERT_BSONOBJ_EQ(commandReplyBob.done(), upconvertedCommandReply);
+ ASSERT_BSONOBJ_EQ(metadataBob.done(), upconvertedReplyMetadata);
}
}
@@ -207,7 +207,7 @@ void checkDownconvert(const BSONObj& commandReply,
const BSONObj& downconvertedCommand) {
BSONObjBuilder downconvertedCommandBob;
ASSERT_OK(ShardingMetadata::downconvert(commandReply, metadata, &downconvertedCommandBob));
- ASSERT_EQ(downconvertedCommandBob.done(), downconvertedCommand);
+ ASSERT_BSONOBJ_EQ(downconvertedCommandBob.done(), downconvertedCommand);
}
TEST(ShardingMetadata, Downconvert) {
diff --git a/src/mongo/rpc/reply_builder_test.cpp b/src/mongo/rpc/reply_builder_test.cpp
index dddfbd535a1..c494c080ec9 100644
--- a/src/mongo/rpc/reply_builder_test.cpp
+++ b/src/mongo/rpc/reply_builder_test.cpp
@@ -96,8 +96,8 @@ TEST(CommandReplyBuilder, MemAccess) {
rpc::CommandReply parsed(&msg);
- ASSERT_EQUALS(parsed.getMetadata(), metadata);
- ASSERT_EQUALS(parsed.getCommandReply(), commandReply);
+ ASSERT_BSONOBJ_EQ(parsed.getMetadata(), metadata);
+ ASSERT_BSONOBJ_EQ(parsed.getCommandReply(), commandReply);
}
TEST(LegacyReplyBuilder, MemAccess) {
@@ -110,8 +110,8 @@ TEST(LegacyReplyBuilder, MemAccess) {
rpc::LegacyReply parsed(&msg);
- ASSERT_EQUALS(parsed.getMetadata(), metadata);
- ASSERT_EQUALS(parsed.getCommandReply(), commandReply);
+ ASSERT_BSONOBJ_EQ(parsed.getMetadata(), metadata);
+ ASSERT_BSONOBJ_EQ(parsed.getCommandReply(), commandReply);
}
template <typename T>
@@ -147,7 +147,7 @@ void testRoundTrip(rpc::ReplyBuilderInterface& replyBuilder) {
T parsed(&msg);
- ASSERT_EQUALS(parsed.getMetadata(), metadata);
+ ASSERT_BSONOBJ_EQ(parsed.getMetadata(), metadata);
if (replyBuilder.getProtocol() != rpc::Protocol::kOpQuery) {
ASSERT_TRUE(parsed.getOutputDocs() == outputDocRange);
}
diff --git a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
index d27edfb81ef..e2de583f254 100644
--- a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -36,6 +36,7 @@
#include <vector>
#include "mongo/base/status_with.h"
+#include "mongo/bson/bsonobj_comparator_interface.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/sharding_catalog_client.h"
#include "mongo/s/catalog/type_chunk.h"
@@ -49,7 +50,7 @@
namespace mongo {
-using ChunkMinimumsSet = std::set<BSONObj>;
+using ChunkMinimumsSet = BSONObj::ComparatorInterface::BSONObjSet;
using MigrateInfoVector = BalancerChunkSelectionPolicy::MigrateInfoVector;
using SplitInfoVector = BalancerChunkSelectionPolicy::SplitInfoVector;
using std::shared_ptr;
@@ -65,7 +66,7 @@ namespace {
StatusWith<std::pair<DistributionStatus, ChunkMinimumsSet>> createCollectionDistributionInfo(
OperationContext* txn, const ShardStatisticsVector& allShards, ChunkManager* chunkMgr) {
ShardToChunksMap shardToChunksMap;
- ChunkMinimumsSet chunkMinimums;
+ ChunkMinimumsSet chunkMinimums = SimpleBSONObjComparator::kInstance.makeOrderedBSONObjSet();
// Makes sure there is an entry in shardToChunksMap for every shard, so empty shards will also
// be accounted for
diff --git a/src/mongo/s/balancer/balancer_configuration_test.cpp b/src/mongo/s/balancer/balancer_configuration_test.cpp
index d7e03847a32..7e81885b662 100644
--- a/src/mongo/s/balancer/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer/balancer_configuration_test.cpp
@@ -66,7 +66,7 @@ protected:
*/
void expectSettingsQuery(StringData key, StatusWith<boost::optional<BSONObj>> result) {
onFindCommand([&](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), "config.settings");
@@ -74,7 +74,7 @@ protected:
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), "config.settings");
- ASSERT_EQ(query->getFilter(), BSON("_id" << key));
+ ASSERT_BSONOBJ_EQ(query->getFilter(), BSON("_id" << key));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
diff --git a/src/mongo/s/balancer/balancer_policy.cpp b/src/mongo/s/balancer/balancer_policy.cpp
index 49df42c7e21..d50751c6974 100644
--- a/src/mongo/s/balancer/balancer_policy.cpp
+++ b/src/mongo/s/balancer/balancer_policy.cpp
@@ -32,6 +32,7 @@
#include "mongo/s/balancer/balancer_policy.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/util/log.h"
@@ -112,9 +113,12 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
if (minIntersect != maxIntersect) {
invariant(minIntersect != _zoneRanges.end());
const auto& intersectingRange =
- (minIntersect->second.min < range.max) ? minIntersect->second : maxIntersect->second;
+ (SimpleBSONObjComparator::kInstance.evaluate(minIntersect->second.min < range.max))
+ ? minIntersect->second
+ : maxIntersect->second;
- if (intersectingRange.min == range.min && intersectingRange.max == range.max &&
+ if (SimpleBSONObjComparator::kInstance.evaluate(intersectingRange.min == range.min) &&
+ SimpleBSONObjComparator::kInstance.evaluate(intersectingRange.max == range.max) &&
intersectingRange.zone == range.zone) {
return Status::OK();
}
@@ -128,8 +132,8 @@ Status DistributionStatus::addRangeToZone(const ZoneRange& range) {
// Check for containment
if (minIntersect != _zoneRanges.end()) {
const ZoneRange& nextRange = minIntersect->second;
- if (range.max > nextRange.min) {
- invariant(range.max < nextRange.max);
+ if (SimpleBSONObjComparator::kInstance.evaluate(range.max > nextRange.min)) {
+ invariant(SimpleBSONObjComparator::kInstance.evaluate(range.max < nextRange.max));
return {ErrorCodes::RangeOverlapConflict,
str::stream() << "Zone range: " << range.toString()
<< " is overlapping with existing: "
@@ -159,7 +163,8 @@ string DistributionStatus::getTagForChunk(const ChunkType& chunk) const {
const ZoneRange& intersectRange = minIntersect->second;
// Check for containment
- if (intersectRange.min <= chunk.getMin() && chunk.getMax() <= intersectRange.max) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(intersectRange.min <= chunk.getMin()) &&
+ SimpleBSONObjComparator::kInstance.evaluate(chunk.getMax() <= intersectRange.max)) {
return intersectRange.zone;
}
diff --git a/src/mongo/s/balancer/balancer_policy_tests.cpp b/src/mongo/s/balancer/balancer_policy_tests.cpp
index 90ebce9d7d9..744279e71af 100644
--- a/src/mongo/s/balancer/balancer_policy_tests.cpp
+++ b/src/mongo/s/balancer/balancer_policy_tests.cpp
@@ -108,8 +108,8 @@ TEST(BalancerPolicy, Basic) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
@@ -123,8 +123,8 @@ TEST(BalancerPolicy, SmallClusterShouldBePerfectlyBalanced) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId1, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId1][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, SingleChunkShouldNotMove) {
@@ -150,13 +150,13 @@ TEST(BalancerPolicy, ParallelBalancing) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
ASSERT_EQ(kShardId1, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
- ASSERT_EQ(cluster.second[kShardId1][0].getMin(), migrations[1].minKey);
- ASSERT_EQ(cluster.second[kShardId1][0].getMax(), migrations[1].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[1].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), migrations[1].maxKey);
}
TEST(BalancerPolicy, JumboChunksNotMoved) {
@@ -174,8 +174,8 @@ TEST(BalancerPolicy, JumboChunksNotMoved) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId0][1].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
@@ -201,13 +201,13 @@ TEST(BalancerPolicy, JumboChunksNotMovedParallel) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId0][1].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][1].getMax(), migrations[0].maxKey);
ASSERT_EQ(kShardId2, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
- ASSERT_EQ(cluster.second[kShardId2][2].getMin(), migrations[1].minKey);
- ASSERT_EQ(cluster.second[kShardId2][2].getMax(), migrations[1].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][2].getMin(), migrations[1].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][2].getMax(), migrations[1].maxKey);
}
TEST(BalancerPolicy, DrainingSingleChunk) {
@@ -221,8 +221,8 @@ TEST(BalancerPolicy, DrainingSingleChunk) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
@@ -239,13 +239,13 @@ TEST(BalancerPolicy, DrainingSingleChunkPerShard) {
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
ASSERT_EQ(kShardId2, migrations[1].from);
ASSERT_EQ(kShardId3, migrations[1].to);
- ASSERT_EQ(cluster.second[kShardId2][0].getMin(), migrations[1].minKey);
- ASSERT_EQ(cluster.second[kShardId2][0].getMax(), migrations[1].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[1].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[1].maxKey);
}
TEST(BalancerPolicy, DrainingWithTwoChunksFirstOneSelected) {
@@ -259,8 +259,8 @@ TEST(BalancerPolicy, DrainingWithTwoChunksFirstOneSelected) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
@@ -276,8 +276,8 @@ TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId0, migrations[0].from);
ASSERT_EQ(kShardId2, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId0][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptChunks) {
@@ -306,8 +306,8 @@ TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToTag) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToTag) {
@@ -350,8 +350,8 @@ TEST(BalancerPolicy, BalancerRespectsMaxShardSizeOnlyBalanceToNonMaxed) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId1, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, BalancerRespectsMaxShardSizeWhenAllBalanced) {
@@ -383,8 +383,8 @@ TEST(BalancerPolicy, BalancerRespectsTagsWhenDraining) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId1, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId1][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, BalancerRespectsTagPolicyBeforeImbalance) {
@@ -402,8 +402,8 @@ TEST(BalancerPolicy, BalancerRespectsTagPolicyBeforeImbalance) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedCluster) {
@@ -420,8 +420,8 @@ TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedCluster) {
ASSERT_EQ(1U, migrations.size());
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
}
TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedClusterParallel) {
@@ -440,13 +440,13 @@ TEST(BalancerPolicy, BalancerFixesIncorrectTagsInOtherwiseBalancedClusterParalle
ASSERT_EQ(kShardId2, migrations[0].from);
ASSERT_EQ(kShardId0, migrations[0].to);
- ASSERT_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
- ASSERT_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[0].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMax(), migrations[0].maxKey);
ASSERT_EQ(kShardId3, migrations[1].from);
ASSERT_EQ(kShardId1, migrations[1].to);
- ASSERT_EQ(cluster.second[kShardId3][0].getMin(), migrations[1].minKey);
- ASSERT_EQ(cluster.second[kShardId3][0].getMax(), migrations[1].maxKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId3][0].getMin(), migrations[1].minKey);
+ ASSERT_BSONOBJ_EQ(cluster.second[kShardId3][0].getMax(), migrations[1].maxKey);
}
TEST(DistributionStatus, AddTagRangeOverlap) {
diff --git a/src/mongo/s/balancer/migration_manager.cpp b/src/mongo/s/balancer/migration_manager.cpp
index b7670e9a252..9a457ae7b38 100644
--- a/src/mongo/s/balancer/migration_manager.cpp
+++ b/src/mongo/s/balancer/migration_manager.cpp
@@ -34,6 +34,7 @@
#include <memory>
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/remote_command_targeter.h"
#include "mongo/db/client.h"
@@ -282,7 +283,8 @@ shared_ptr<Notification<Status>> MigrationManager::_schedule(
invariant(chunk);
// If the chunk is not found exactly as requested, the caller must have stale data
- if (chunk->getMin() != migrateInfo.minKey || chunk->getMax() != migrateInfo.maxKey) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(chunk->getMin() != migrateInfo.minKey) ||
+ SimpleBSONObjComparator::kInstance.evaluate(chunk->getMax() != migrateInfo.maxKey)) {
return std::make_shared<Notification<Status>>(Status(
ErrorCodes::IncompatibleShardingMetadata,
stream() << "Chunk " << ChunkRange(migrateInfo.minKey, migrateInfo.maxKey).toString()
diff --git a/src/mongo/s/balancer/migration_manager_test.cpp b/src/mongo/s/balancer/migration_manager_test.cpp
index 4db8191b404..fd06fdbc352 100644
--- a/src/mongo/s/balancer/migration_manager_test.cpp
+++ b/src/mongo/s/balancer/migration_manager_test.cpp
@@ -211,8 +211,8 @@ void MigrationManagerTest::expectMoveChunkCommand(const ChunkType& chunk,
ASSERT_OK(moveChunkRequestWithStatus.getStatus());
ASSERT_EQ(chunk.getNS(), moveChunkRequestWithStatus.getValue().getNss().ns());
- ASSERT_EQ(chunk.getMin(), moveChunkRequestWithStatus.getValue().getMinKey());
- ASSERT_EQ(chunk.getMax(), moveChunkRequestWithStatus.getValue().getMaxKey());
+ ASSERT_BSONOBJ_EQ(chunk.getMin(), moveChunkRequestWithStatus.getValue().getMinKey());
+ ASSERT_BSONOBJ_EQ(chunk.getMax(), moveChunkRequestWithStatus.getValue().getMaxKey());
ASSERT_EQ(chunk.getShard(), moveChunkRequestWithStatus.getValue().getFromShardId());
ASSERT_EQ(toShardId, moveChunkRequestWithStatus.getValue().getToShardId());
diff --git a/src/mongo/s/balancer/type_migration_test.cpp b/src/mongo/s/balancer/type_migration_test.cpp
index 8b020c57312..ff6c31ca9ae 100644
--- a/src/mongo/s/balancer/type_migration_test.cpp
+++ b/src/mongo/s/balancer/type_migration_test.cpp
@@ -75,7 +75,7 @@ TEST(MigrationTypeTest, ConvertFromMigrationInfo) {
BSONObj obj = builder.obj();
- ASSERT_EQUALS(obj, migrationType.toBSON());
+ ASSERT_BSONOBJ_EQ(obj, migrationType.toBSON());
}
TEST(MigrationTypeTest, FromAndToBSON) {
@@ -94,7 +94,7 @@ TEST(MigrationTypeTest, FromAndToBSON) {
BSONObj obj = builder.obj();
MigrationType migrationType = assertGet(MigrationType::fromBSON(obj));
- ASSERT_EQUALS(obj, migrationType.toBSON());
+ ASSERT_BSONOBJ_EQ(obj, migrationType.toBSON());
}
TEST(MigrationTypeTest, MissingRequiredNamespaceField) {
diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
index 1f6d3c7847e..73d42752764 100644
--- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
@@ -216,7 +216,7 @@ TEST_F(DistLockCatalogFixture, BasicPing) {
maxTimeMS: 30000
})"));
- ASSERT_EQUALS(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson(R"({
ok: 1,
@@ -370,7 +370,7 @@ TEST_F(DistLockCatalogFixture, GrabLockNoOp) {
maxTimeMS: 30000
})"));
- ASSERT_EQUALS(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson("{ ok: 1, value: null }");
});
@@ -418,7 +418,7 @@ TEST_F(DistLockCatalogFixture, GrabLockWithNewDoc) {
maxTimeMS: 30000
})"));
- ASSERT_EQUALS(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson(R"({
lastErrorObject: {
@@ -682,7 +682,7 @@ TEST_F(DistLockCatalogFixture, OvertakeLockNoOp) {
maxTimeMS: 30000
})"));
- ASSERT_EQUALS(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson("{ ok: 1, value: null }");
});
@@ -735,7 +735,7 @@ TEST_F(DistLockCatalogFixture, OvertakeLockWithNewDoc) {
maxTimeMS: 30000
})"));
- ASSERT_EQUALS(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson(R"({
lastErrorObject: {
@@ -924,7 +924,7 @@ TEST_F(DistLockCatalogFixture, BasicUnlock) {
maxTimeMS: 30000
})"));
- ASSERT_EQUALS(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson(R"({
ok: 1,
@@ -957,7 +957,7 @@ TEST_F(DistLockCatalogFixture, UnlockWithNoNewDoc) {
maxTimeMS: 30000
})"));
- ASSERT_EQUALS(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson(R"({
ok: 1,
@@ -1101,15 +1101,15 @@ TEST_F(DistLockCatalogFixture, BasicUnlockAll) {
BatchedUpdateRequest batchRequest;
ASSERT(batchRequest.parseBSON("config", request.cmdObj, &errmsg));
ASSERT_EQUALS(LocksType::ConfigNS, batchRequest.getNS().toString());
- ASSERT_EQUALS(BSON("w" << 1 << "wtimeout" << 0), batchRequest.getWriteConcern());
+ ASSERT_BSONOBJ_EQ(BSON("w" << 1 << "wtimeout" << 0), batchRequest.getWriteConcern());
auto updates = batchRequest.getUpdates();
ASSERT_EQUALS(1U, updates.size());
auto update = updates.front();
ASSERT_FALSE(update->getUpsert());
ASSERT_TRUE(update->getMulti());
- ASSERT_EQUALS(BSON(LocksType::process("processID")), update->getQuery());
- ASSERT_EQUALS(BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))),
- update->getUpdateExpr());
+ ASSERT_BSONOBJ_EQ(BSON(LocksType::process("processID")), update->getQuery());
+ ASSERT_BSONOBJ_EQ(BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))),
+ update->getUpdateExpr());
return BSON("ok" << 1);
});
@@ -1161,7 +1161,7 @@ TEST_F(DistLockCatalogFixture, BasicGetServerInfo) {
onCommand([](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
ASSERT_EQUALS(dummyHost, request.target);
ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS(BSON("serverStatus" << 1 << "maxTimeMS" << 30000), request.cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("serverStatus" << 1 << "maxTimeMS" << 30000), request.cmdObj);
return fromjson(R"({
localTime: { $date: "2015-05-26T13:06:27.293Z" },
@@ -1307,7 +1307,7 @@ TEST_F(DistLockCatalogFixture, BasicStopPing) {
maxTimeMS: 30000
})"));
- ASSERT_EQUALS(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson(R"({
ok: 1,
@@ -1439,29 +1439,28 @@ TEST_F(DistLockCatalogFixture, BasicGetPing) {
ASSERT_EQUALS(ping, pingDoc.getPing());
});
- onFindCommand(
- [](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(dummyHost, request.target);
- ASSERT_EQUALS("config", request.dbname);
+ onFindCommand([](const RemoteCommandRequest& request) {
+ ASSERT_EQUALS(dummyHost, request.target);
+ ASSERT_EQUALS("config", request.dbname);
- const auto& findCmd = request.cmdObj;
- ASSERT_EQUALS("lockpings", findCmd["find"].str());
- ASSERT_EQUALS(BSON("_id"
+ const auto& findCmd = request.cmdObj;
+ ASSERT_EQUALS("lockpings", findCmd["find"].str());
+ ASSERT_BSONOBJ_EQ(BSON("_id"
<< "test"),
findCmd["filter"].Obj());
- ASSERT_EQUALS(1, findCmd["limit"].numberLong());
- checkReadConcern(findCmd);
+ ASSERT_EQUALS(1, findCmd["limit"].numberLong());
+ checkReadConcern(findCmd);
- BSONObj pingDoc(fromjson(R"({
+ BSONObj pingDoc(fromjson(R"({
_id: "test",
ping: { $date: "2015-05-26T13:06:27.293Z" }
})"));
- std::vector<BSONObj> result;
- result.push_back(pingDoc);
+ std::vector<BSONObj> result;
+ result.push_back(pingDoc);
- return result;
- });
+ return result;
+ });
future.timed_get(kFutureTimeout);
}
@@ -1534,7 +1533,7 @@ TEST_F(DistLockCatalogFixture, BasicGetLockByTS) {
const auto& findCmd = request.cmdObj;
ASSERT_EQUALS("locks", findCmd["find"].str());
- ASSERT_EQUALS(BSON("ts" << OID("555f99712c99a78c5b083358")), findCmd["filter"].Obj());
+ ASSERT_BSONOBJ_EQ(BSON("ts" << OID("555f99712c99a78c5b083358")), findCmd["filter"].Obj());
ASSERT_EQUALS(1, findCmd["limit"].numberLong());
checkReadConcern(findCmd);
@@ -1613,29 +1612,28 @@ TEST_F(DistLockCatalogFixture, BasicGetLockByName) {
ASSERT_EQUALS(ts, lockDoc.getLockID());
});
- onFindCommand(
- [](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(dummyHost, request.target);
- ASSERT_EQUALS("config", request.dbname);
+ onFindCommand([](const RemoteCommandRequest& request) {
+ ASSERT_EQUALS(dummyHost, request.target);
+ ASSERT_EQUALS("config", request.dbname);
- const auto& findCmd = request.cmdObj;
- ASSERT_EQUALS("locks", findCmd["find"].str());
- ASSERT_EQUALS(BSON("_id"
+ const auto& findCmd = request.cmdObj;
+ ASSERT_EQUALS("locks", findCmd["find"].str());
+ ASSERT_BSONOBJ_EQ(BSON("_id"
<< "abc"),
findCmd["filter"].Obj());
- ASSERT_EQUALS(1, findCmd["limit"].numberLong());
- checkReadConcern(findCmd);
+ ASSERT_EQUALS(1, findCmd["limit"].numberLong());
+ checkReadConcern(findCmd);
- BSONObj lockDoc(fromjson(R"({
+ BSONObj lockDoc(fromjson(R"({
_id: "abc",
state: 2,
ts: ObjectId("555f99712c99a78c5b083358")
})"));
- std::vector<BSONObj> result;
- result.push_back(lockDoc);
- return result;
- });
+ std::vector<BSONObj> result;
+ result.push_back(lockDoc);
+ return result;
+ });
future.timed_get(kFutureTimeout);
}
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp
index 691d8dc76b0..c79ea362957 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_add_shard_test.cpp
@@ -106,8 +106,8 @@ protected:
onCommandForAddShard([&, target, isMasterResponse](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, target);
ASSERT_EQ(request.dbname, "admin");
- ASSERT_EQ(request.cmdObj, BSON("isMaster" << 1));
- ASSERT_EQUALS(rpc::makeEmptyMetadata(), request.metadata);
+ ASSERT_BSONOBJ_EQ(request.cmdObj, BSON("isMaster" << 1));
+ ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return isMasterResponse;
});
@@ -117,8 +117,8 @@ protected:
onCommandForAddShard([&](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, target);
ASSERT_EQ(request.dbname, "admin");
- ASSERT_EQ(request.cmdObj, BSON("listDatabases" << 1));
- ASSERT_EQUALS(rpc::makeEmptyMetadata(), request.metadata);
+ ASSERT_BSONOBJ_EQ(request.cmdObj, BSON("listDatabases" << 1));
+ ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
BSONArrayBuilder arr;
for (const auto& db : dbs) {
@@ -200,8 +200,8 @@ protected:
for (; itActual != actualUpdates.end(); itActual++, itExpected++) {
ASSERT_EQ((*itExpected)->getUpsert(), (*itActual)->getUpsert());
ASSERT_EQ((*itExpected)->getMulti(), (*itActual)->getMulti());
- ASSERT_EQ((*itExpected)->getQuery(), (*itActual)->getQuery());
- ASSERT_EQ((*itExpected)->getUpdateExpr(), (*itActual)->getUpdateExpr());
+ ASSERT_BSONOBJ_EQ((*itExpected)->getQuery(), (*itActual)->getQuery());
+ ASSERT_BSONOBJ_EQ((*itExpected)->getUpdateExpr(), (*itActual)->getUpdateExpr());
}
BatchedCommandResponse response;
@@ -246,8 +246,8 @@ protected:
for (; itActual != actualUpdates.end(); itActual++, itExpected++) {
ASSERT_EQ((*itExpected)->getUpsert(), (*itActual)->getUpsert());
ASSERT_EQ((*itExpected)->getMulti(), (*itActual)->getMulti());
- ASSERT_EQ((*itExpected)->getQuery(), (*itActual)->getQuery());
- ASSERT_EQ((*itExpected)->getUpdateExpr(), (*itActual)->getUpdateExpr());
+ ASSERT_BSONOBJ_EQ((*itExpected)->getQuery(), (*itActual)->getQuery());
+ ASSERT_BSONOBJ_EQ((*itExpected)->getUpdateExpr(), (*itActual)->getUpdateExpr());
}
return statusToReturn;
@@ -342,7 +342,7 @@ TEST_F(AddShardTest, CreateShardIdentityUpsertForAddShard) {
<< "majority"
<< "wtimeout"
<< 15000));
- ASSERT_EQUALS(
+ ASSERT_BSONOBJ_EQ(
expectedBSON,
catalogManager()->createShardIdentityUpsertForAddShard(operationContext(), shardName));
}
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_append_db_stats_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_append_db_stats_test.cpp
index ac50ab925cc..dfcfc324af4 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_append_db_stats_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_append_db_stats_test.cpp
@@ -67,10 +67,10 @@ TEST_F(ShardingCatalogClientAppendDbStatsTest, BasicAppendDBStats) {
});
onCommand([](const RemoteCommandRequest& request) {
- ASSERT_EQ(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQ("admin", request.dbname);
- ASSERT_EQ(BSON("listDatabases" << 1), request.cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("listDatabases" << 1), request.cmdObj);
return fromjson(R"({
databases: [
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_assign_key_range_to_zone_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_assign_key_range_to_zone_test.cpp
index e49ef0a3588..104a97a3f6a 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_assign_key_range_to_zone_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_assign_key_range_to_zone_test.cpp
@@ -104,8 +104,8 @@ public:
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(ns, tagDoc.getNS());
- ASSERT_EQ(range.getMin(), tagDoc.getMinKey());
- ASSERT_EQ(range.getMax(), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(range.getMin(), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(range.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName, tagDoc.getTag());
}
@@ -304,8 +304,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewMaxAlignsWithExistingMinShouldSucce
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
- ASSERT_EQ(BSON("x" << 2), tagDoc.getMinKey());
- ASSERT_EQ(BSON("x" << 4), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 2), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 4), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
@@ -321,8 +321,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewMaxAlignsWithExistingMinShouldSucce
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
- ASSERT_EQ(existingRange.getMin(), tagDoc.getMinKey());
- ASSERT_EQ(existingRange.getMax(), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
}
@@ -384,8 +384,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewRangeOverlappingWithDifferentNSShou
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
- ASSERT_EQ(existingRange.getMin(), tagDoc.getMinKey());
- ASSERT_EQ(existingRange.getMax(), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
{
@@ -398,8 +398,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewRangeOverlappingWithDifferentNSShou
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(shardedCollection.getNs().ns(), tagDoc.getNS());
- ASSERT_EQ(BSON("x" << 5), tagDoc.getMinKey());
- ASSERT_EQ(BSON("x" << 7), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 5), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 7), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
}
@@ -472,8 +472,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewMinAlignsWithExistingMaxShouldSucce
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
- ASSERT_EQ(existingRange.getMin(), tagDoc.getMinKey());
- ASSERT_EQ(existingRange.getMax(), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
@@ -487,8 +487,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewMinAlignsWithExistingMaxShouldSucce
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
- ASSERT_EQ(BSON("x" << 8), tagDoc.getMinKey());
- ASSERT_EQ(BSON("x" << 10), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 8), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 10), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
}
@@ -610,8 +610,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMinPrefixOfExistingSho
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(ns, tagDoc.getNS());
- ASSERT_EQ(existingRange.getMin(), tagDoc.getMinKey());
- ASSERT_EQ(existingRange.getMax(), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
@@ -627,8 +627,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMinPrefixOfExistingSho
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
- ASSERT_EQ(existingRange.getMin(), tagDoc.getMinKey());
- ASSERT_EQ(existingRange.getMax(), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
}
@@ -662,8 +662,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMaxPrefixOfExistingSho
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(ns, tagDoc.getNS());
- ASSERT_EQ(existingRange.getMin(), tagDoc.getMinKey());
- ASSERT_EQ(existingRange.getMax(), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
@@ -679,8 +679,8 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveThatIsOnlyMaxPrefixOfExistingSho
auto tagDoc = tagDocStatus.getValue();
ASSERT_EQ(shardedNS().ns(), tagDoc.getNS());
- ASSERT_EQ(existingRange.getMin(), tagDoc.getMinKey());
- ASSERT_EQ(existingRange.getMax(), tagDoc.getMaxKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMin(), tagDoc.getMinKey());
+ ASSERT_BSONOBJ_EQ(existingRange.getMax(), tagDoc.getMaxKey());
ASSERT_EQ(zoneName(), tagDoc.getTag());
}
}
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp
index f5609adef83..b62499cc355 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_drop_coll_test.cpp
@@ -91,11 +91,11 @@ public:
onCommand([this, shard](const RemoteCommandRequest& request) {
ASSERT_EQ(HostAndPort(shard.getHost()), request.target);
ASSERT_EQ(_dropNS.db(), request.dbname);
- ASSERT_EQ(BSON("drop" << _dropNS.coll() << "writeConcern"
- << BSON("w" << 0 << "wtimeout" << 0)),
- request.cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("drop" << _dropNS.coll() << "writeConcern"
+ << BSON("w" << 0 << "wtimeout" << 0)),
+ request.cmdObj);
- ASSERT_EQUALS(rpc::makeEmptyMetadata(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return BSON("ns" << _dropNS.ns() << "ok" << 1);
});
@@ -103,7 +103,7 @@ public:
void expectRemoveChunksAndMarkCollectionDropped() {
onCommand([this](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
ASSERT_EQ(_configHost, request.target);
ASSERT_EQ("config", request.dbname);
@@ -114,7 +114,7 @@ public:
maxTimeMS: 30000
})"));
- ASSERT_EQ(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return BSON("n" << 1 << "ok" << 1);
});
@@ -137,9 +137,9 @@ public:
onCommand([shard](const RemoteCommandRequest& request) {
ASSERT_EQ(HostAndPort(shard.getHost()), request.target);
ASSERT_EQ("admin", request.dbname);
- ASSERT_EQ(BSON("unsetSharding" << 1), request.cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("unsetSharding" << 1), request.cmdObj);
- ASSERT_EQUALS(rpc::makeEmptyMetadata(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return BSON("n" << 1 << "ok" << 1);
});
@@ -216,11 +216,11 @@ TEST_F(DropColl2ShardTest, NSNotFound) {
onCommand([this](const RemoteCommandRequest& request) {
ASSERT_EQ(HostAndPort(shard1().getHost()), request.target);
ASSERT_EQ(dropNS().db(), request.dbname);
- ASSERT_EQ(
+ ASSERT_BSONOBJ_EQ(
BSON("drop" << dropNS().coll() << "writeConcern" << BSON("w" << 0 << "wtimeout" << 0)),
request.cmdObj);
- ASSERT_EQUALS(rpc::makeEmptyMetadata(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return BSON("ok" << 0 << "code" << ErrorCodes::NamespaceNotFound);
});
@@ -228,11 +228,11 @@ TEST_F(DropColl2ShardTest, NSNotFound) {
onCommand([this](const RemoteCommandRequest& request) {
ASSERT_EQ(HostAndPort(shard2().getHost()), request.target);
ASSERT_EQ(dropNS().db(), request.dbname);
- ASSERT_EQ(
+ ASSERT_BSONOBJ_EQ(
BSON("drop" << dropNS().coll() << "writeConcern" << BSON("w" << 0 << "wtimeout" << 0)),
request.cmdObj);
- ASSERT_EQUALS(rpc::makeEmptyMetadata(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
return BSON("ok" << 0 << "code" << ErrorCodes::NamespaceNotFound);
});
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_merge_chunk_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_merge_chunk_test.cpp
index c2107be7390..23e3803379d 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_merge_chunk_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_merge_chunk_test.cpp
@@ -87,8 +87,8 @@ TEST_F(MergeChunkTest, MergeExistingChunksCorrectlyShouldSucceed) {
// MergedChunk should have range [chunkMin, chunkMax]
auto mergedChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.front()));
- ASSERT_EQ(chunkMin, mergedChunk.getMin());
- ASSERT_EQ(chunkMax, mergedChunk.getMax());
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
{
// Check for increment on mergedChunk's minor version
@@ -150,8 +150,8 @@ TEST_F(MergeChunkTest, MergeSeveralChunksCorrectlyShouldSucceed) {
// MergedChunk should have range [chunkMin, chunkMax]
auto mergedChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.front()));
- ASSERT_EQ(chunkMin, mergedChunk.getMin());
- ASSERT_EQ(chunkMax, mergedChunk.getMax());
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
{
// Check for increment on mergedChunk's minor version
@@ -217,8 +217,8 @@ TEST_F(MergeChunkTest, NewMergeShouldClaimHighestVersion) {
// MergedChunk should have range [chunkMin, chunkMax]
auto mergedChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.front()));
- ASSERT_EQ(chunkMin, mergedChunk.getMin());
- ASSERT_EQ(chunkMax, mergedChunk.getMax());
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
{
// Check for minor increment on collection version
@@ -280,8 +280,8 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
// MergedChunk should have range [chunkMin, chunkMax]
auto mergedChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.front()));
- ASSERT_EQ(chunkMin, mergedChunk.getMin());
- ASSERT_EQ(chunkMax, mergedChunk.getMax());
+ ASSERT_BSONOBJ_EQ(chunkMin, mergedChunk.getMin());
+ ASSERT_BSONOBJ_EQ(chunkMax, mergedChunk.getMax());
{
// Check for increment on mergedChunk's minor version
@@ -291,8 +291,8 @@ TEST_F(MergeChunkTest, MergeLeavesOtherChunksAlone) {
// OtherChunk should have been left alone
auto foundOtherChunk = uassertStatusOK(ChunkType::fromBSON(chunksVector.back()));
- ASSERT_EQ(otherChunk.getMin(), foundOtherChunk.getMin());
- ASSERT_EQ(otherChunk.getMax(), foundOtherChunk.getMax());
+ ASSERT_BSONOBJ_EQ(otherChunk.getMin(), foundOtherChunk.getMin());
+ ASSERT_BSONOBJ_EQ(otherChunk.getMax(), foundOtherChunk.getMax());
}
TEST_F(MergeChunkTest, NonExistingNamespace) {
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_remove_shard_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_remove_shard_test.cpp
index 36fcb6eba63..9c03758675a 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_remove_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_remove_shard_test.cpp
@@ -155,7 +155,7 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) {
ASSERT_EQUALS(configHost, request.target);
ASSERT_EQUALS("config", request.dbname);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BatchedUpdateRequest actualBatchedUpdate;
std::string errmsg;
@@ -167,8 +167,8 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) {
ASSERT_FALSE(update->getUpsert());
ASSERT_FALSE(update->getMulti());
- ASSERT_EQUALS(BSON(ShardType::name() << shardName), update->getQuery());
- ASSERT_EQUALS(BSON("$set" << BSON(ShardType::draining(true))), update->getUpdateExpr());
+ ASSERT_BSONOBJ_EQ(BSON(ShardType::name() << shardName), update->getQuery());
+ ASSERT_BSONOBJ_EQ(BSON("$set" << BSON(ShardType::draining(true))), update->getUpdateExpr());
BatchedCommandResponse response;
response.setOk(true);
@@ -180,14 +180,14 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) {
// Respond to request to reload information about existing shards
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
- ASSERT_EQ(BSONObj(), query->getFilter());
- ASSERT_EQ(BSONObj(), query->getSort());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getSort());
ASSERT_FALSE(query->getLimit().is_initialized());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -329,7 +329,7 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
ASSERT_EQUALS(configHost, request.target);
ASSERT_EQUALS("config", request.dbname);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BatchedDeleteRequest actualBatchedDelete;
std::string errmsg;
@@ -340,7 +340,7 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
auto deleteOp = deletes.front();
ASSERT_EQUALS(0, deleteOp->getLimit());
- ASSERT_EQUALS(BSON(ShardType::name() << shardName), deleteOp->getQuery());
+ ASSERT_BSONOBJ_EQ(BSON(ShardType::name() << shardName), deleteOp->getQuery());
BatchedCommandResponse response;
response.setOk(true);
@@ -352,14 +352,14 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
// Respond to request to reload information about existing shards
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
- ASSERT_EQ(BSONObj(), query->getFilter());
- ASSERT_EQ(BSONObj(), query->getSort());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getSort());
ASSERT_FALSE(query->getLimit().is_initialized());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_shard_collection_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_shard_collection_test.cpp
index 9045e7804a9..3f96485eb57 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_shard_collection_test.cpp
@@ -93,7 +93,7 @@ public:
void expectGetDatabase(const DatabaseType& expectedDb) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(DatabaseType::ConfigNS, nss.ns());
@@ -101,8 +101,8 @@ public:
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(DatabaseType::ConfigNS, query->ns());
- ASSERT_EQ(BSON(DatabaseType::name(expectedDb.getName())), query->getFilter());
- ASSERT_EQ(BSONObj(), query->getSort());
+ ASSERT_BSONOBJ_EQ(BSON(DatabaseType::name(expectedDb.getName())), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getSort());
ASSERT_EQ(1, query->getLimit().get());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -121,7 +121,7 @@ public:
ASSERT_EQUALS(configHost, request.target);
ASSERT_EQUALS("config", request.dbname);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BatchedInsertRequest actualBatchedInsert;
std::string errmsg;
@@ -138,8 +138,8 @@ public:
// generated OID so just check that the field exists and is *a* OID.
ASSERT_EQUALS(jstOID, chunkObj[ChunkType::DEPRECATED_epoch()].type());
ASSERT_EQUALS(expectedChunk.getNS(), chunkObj[ChunkType::ns()].String());
- ASSERT_EQUALS(expectedChunk.getMin(), chunkObj[ChunkType::min()].Obj());
- ASSERT_EQUALS(expectedChunk.getMax(), chunkObj[ChunkType::max()].Obj());
+ ASSERT_BSONOBJ_EQ(expectedChunk.getMin(), chunkObj[ChunkType::min()].Obj());
+ ASSERT_BSONOBJ_EQ(expectedChunk.getMax(), chunkObj[ChunkType::max()].Obj());
ASSERT_EQUALS(expectedChunk.getShard(), chunkObj[ChunkType::shard()].String());
actualVersion = ChunkVersion::fromBSON(chunkObj);
@@ -157,7 +157,7 @@ public:
void expectReloadChunks(const std::string& ns, const vector<ChunkType>& chunks) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
@@ -168,8 +168,8 @@ public:
BSONObj expectedSort = BSON(ChunkType::DEPRECATED_lastmod() << 1);
ASSERT_EQ(ChunkType::ConfigNS, query->ns());
- ASSERT_EQ(expectedQuery, query->getFilter());
- ASSERT_EQ(expectedSort, query->getSort());
+ ASSERT_BSONOBJ_EQ(expectedQuery, query->getFilter());
+ ASSERT_BSONOBJ_EQ(expectedSort, query->getSort());
ASSERT_FALSE(query->getLimit().is_initialized());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -189,7 +189,7 @@ public:
ASSERT_EQUALS(configHost, request.target);
ASSERT_EQUALS("config", request.dbname);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BatchedUpdateRequest actualBatchedUpdate;
std::string errmsg;
@@ -201,9 +201,9 @@ public:
ASSERT_TRUE(update->getUpsert());
ASSERT_FALSE(update->getMulti());
- ASSERT_EQUALS(BSON(CollectionType::fullNs(expectedCollection.getNs().toString())),
- update->getQuery());
- ASSERT_EQUALS(expectedCollection.toBSON(), update->getUpdateExpr());
+ ASSERT_BSONOBJ_EQ(BSON(CollectionType::fullNs(expectedCollection.getNs().toString())),
+ update->getQuery());
+ ASSERT_BSONOBJ_EQ(expectedCollection.toBSON(), update->getUpdateExpr());
BatchedCommandResponse response;
response.setOk(true);
@@ -764,15 +764,16 @@ TEST_F(ShardCollectionTest, withInitialData) {
ASSERT_EQUALS("splitVector", cmdName);
ASSERT_EQUALS(ns, request.cmdObj["splitVector"].String()); // splitVector uses full ns
- ASSERT_EQUALS(keyPattern.toBSON(), request.cmdObj["keyPattern"].Obj());
- ASSERT_EQUALS(keyPattern.getKeyPattern().globalMin(), request.cmdObj["min"].Obj());
- ASSERT_EQUALS(keyPattern.getKeyPattern().globalMax(), request.cmdObj["max"].Obj());
+ ASSERT_BSONOBJ_EQ(keyPattern.toBSON(), request.cmdObj["keyPattern"].Obj());
+ ASSERT_BSONOBJ_EQ(keyPattern.getKeyPattern().globalMin(), request.cmdObj["min"].Obj());
+ ASSERT_BSONOBJ_EQ(keyPattern.getKeyPattern().globalMax(), request.cmdObj["max"].Obj());
ASSERT_EQUALS(64 * 1024 * 1024ULL,
static_cast<uint64_t>(request.cmdObj["maxChunkSizeBytes"].numberLong()));
ASSERT_EQUALS(0, request.cmdObj["maxSplitPoints"].numberLong());
ASSERT_EQUALS(0, request.cmdObj["maxChunkObjects"].numberLong());
- ASSERT_EQUALS(rpc::ServerSelectionMetadata(true, boost::none).toBSON(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
+ request.metadata);
return BSON("ok" << 1 << "splitKeys"
<< BSON_ARRAY(splitPoint0 << splitPoint1 << splitPoint2 << splitPoint3));
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_split_chunk_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_split_chunk_test.cpp
index 3d78a9bfd16..0b741d6f255 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_split_chunk_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_split_chunk_test.cpp
@@ -69,7 +69,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ASSERT_OK(chunkDocStatus.getStatus());
auto chunkDoc = chunkDocStatus.getValue();
- ASSERT_EQ(chunkSplitPoint, chunkDoc.getMax());
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
// Check for increment on first chunkDoc's minor version
ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
@@ -80,7 +80,7 @@ TEST_F(SplitChunkTest, SplitExistingChunkCorrectlyShouldSucceed) {
ASSERT_OK(otherChunkDocStatus.getStatus());
auto otherChunkDoc = otherChunkDocStatus.getValue();
- ASSERT_EQ(chunkMax, otherChunkDoc.getMax());
+ ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
// Check for increment on second chunkDoc's minor version
ASSERT_EQ(origVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
@@ -118,7 +118,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
ASSERT_OK(chunkDocStatus.getStatus());
auto chunkDoc = chunkDocStatus.getValue();
- ASSERT_EQ(chunkSplitPoint, chunkDoc.getMax());
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
// Check for increment on first chunkDoc's minor version
ASSERT_EQ(origVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
@@ -129,7 +129,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
ASSERT_OK(midChunkDocStatus.getStatus());
auto midChunkDoc = midChunkDocStatus.getValue();
- ASSERT_EQ(chunkSplitPoint2, midChunkDoc.getMax());
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint2, midChunkDoc.getMax());
// Check for increment on second chunkDoc's minor version
ASSERT_EQ(origVersion.majorVersion(), midChunkDoc.getVersion().majorVersion());
@@ -140,7 +140,7 @@ TEST_F(SplitChunkTest, MultipleSplitsOnExistingChunkShouldSucceed) {
ASSERT_OK(lastChunkDocStatus.getStatus());
auto lastChunkDoc = lastChunkDocStatus.getValue();
- ASSERT_EQ(chunkMax, lastChunkDoc.getMax());
+ ASSERT_BSONOBJ_EQ(chunkMax, lastChunkDoc.getMax());
{
// Check for increment on third chunkDoc's minor version
@@ -189,7 +189,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
ASSERT_OK(chunkDocStatus.getStatus());
auto chunkDoc = chunkDocStatus.getValue();
- ASSERT_EQ(chunkSplitPoint, chunkDoc.getMax());
+ ASSERT_BSONOBJ_EQ(chunkSplitPoint, chunkDoc.getMax());
// Check for increment based on the competing chunk version
ASSERT_EQ(competingVersion.majorVersion(), chunkDoc.getVersion().majorVersion());
@@ -200,7 +200,7 @@ TEST_F(SplitChunkTest, NewSplitShouldClaimHighestVersion) {
ASSERT_OK(otherChunkDocStatus.getStatus());
auto otherChunkDoc = otherChunkDocStatus.getValue();
- ASSERT_EQ(chunkMax, otherChunkDoc.getMax());
+ ASSERT_BSONOBJ_EQ(chunkMax, otherChunkDoc.getMax());
// Check for increment based on the competing chunk version
ASSERT_EQ(competingVersion.majorVersion(), otherChunkDoc.getVersion().majorVersion());
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_test.cpp
index bff745ffb78..9d0e2e13deb 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_test.cpp
@@ -102,7 +102,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
onFindWithMetadataCommand(
[this, &expectedColl, newOpTime](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
@@ -111,8 +111,9 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
// Ensure the query is correct
ASSERT_EQ(query->ns(), CollectionType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSON(CollectionType::fullNs(expectedColl.getNs().ns())));
- ASSERT_EQ(query->getSort(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(),
+ BSON(CollectionType::fullNs(expectedColl.getNs().ns())));
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
ASSERT_EQ(query->getLimit().get(), 1);
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -127,7 +128,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
// Now wait for the getCollection call to return
const auto collOpTimePair = future.timed_get(kFutureTimeout);
ASSERT_EQ(newOpTime, collOpTimePair.opTime);
- ASSERT_EQ(expectedColl.toBSON(), collOpTimePair.value.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedColl.toBSON(), collOpTimePair.value.toBSON());
}
TEST_F(ShardingCatalogClientTest, GetCollectionNotExisting) {
@@ -168,13 +169,13 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseExisting) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), DatabaseType::ConfigNS);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), DatabaseType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSON(DatabaseType::name(expectedDb.getName())));
- ASSERT_EQ(query->getSort(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(), BSON(DatabaseType::name(expectedDb.getName())));
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
ASSERT_EQ(query->getLimit().get(), 1);
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -188,7 +189,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseExisting) {
const auto dbOpTimePair = future.timed_get(kFutureTimeout);
ASSERT_EQ(newOpTime, dbOpTimePair.opTime);
- ASSERT_EQ(expectedDb.toBSON(), dbOpTimePair.value.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedDb.toBSON(), dbOpTimePair.value.toBSON());
}
TEST_F(ShardingCatalogClientTest, GetDatabaseStaleSecondaryRetrySuccess) {
@@ -219,7 +220,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseStaleSecondaryRetrySuccess) {
});
const auto dbOpTimePair = future.timed_get(kFutureTimeout);
- ASSERT_EQ(expectedDb.toBSON(), dbOpTimePair.value.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedDb.toBSON(), dbOpTimePair.value.toBSON());
}
TEST_F(ShardingCatalogClientTest, GetDatabaseStaleSecondaryRetryNoPrimary) {
@@ -395,7 +396,7 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) {
});
onFindCommand([this, &s1, &s2, &s3](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), ShardType::ConfigNS);
@@ -403,8 +404,8 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), ShardType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSONObj());
- ASSERT_EQ(query->getSort(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
ASSERT_FALSE(query->getLimit().is_initialized());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -416,7 +417,7 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) {
ASSERT_EQ(actualShardsList.size(), expectedShardsList.size());
for (size_t i = 0; i < actualShardsList.size(); ++i) {
- ASSERT_EQ(actualShardsList[i].toBSON(), expectedShardsList[i].toBSON());
+ ASSERT_BSONOBJ_EQ(actualShardsList[i].toBSON(), expectedShardsList[i].toBSON());
}
}
@@ -491,7 +492,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
onFindWithMetadataCommand([this, &chunksQuery, chunkA, chunkB, newOpTime](
const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
@@ -499,8 +500,8 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), ChunkType::ConfigNS);
- ASSERT_EQ(query->getFilter(), chunksQuery);
- ASSERT_EQ(query->getSort(), BSON(ChunkType::DEPRECATED_lastmod() << -1));
+ ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSON(ChunkType::DEPRECATED_lastmod() << -1));
ASSERT_EQ(query->getLimit().get(), 1);
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -513,8 +514,8 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
});
const auto& chunks = future.timed_get(kFutureTimeout);
- ASSERT_EQ(chunkA.toBSON(), chunks[0].toBSON());
- ASSERT_EQ(chunkB.toBSON(), chunks[1].toBSON());
+ ASSERT_BSONOBJ_EQ(chunkA.toBSON(), chunks[0].toBSON());
+ ASSERT_BSONOBJ_EQ(chunkB.toBSON(), chunks[1].toBSON());
}
TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
@@ -538,7 +539,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
});
onFindCommand([this, &chunksQuery](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
@@ -546,8 +547,8 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), ChunkType::ConfigNS);
- ASSERT_EQ(query->getFilter(), chunksQuery);
- ASSERT_EQ(query->getSort(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
ASSERT_FALSE(query->getLimit().is_initialized());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -614,10 +615,10 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementReadCommand) {
});
onCommand([](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQUALS("test", request.dbname);
- ASSERT_EQUALS(BSON("usersInfo" << 1 << "maxTimeMS" << 30000), request.cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("usersInfo" << 1 << "maxTimeMS" << 30000), request.cmdObj);
return BSON("ok" << 1 << "users" << BSONArrayBuilder().arr());
});
@@ -658,18 +659,18 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandSuccess) {
onCommand([](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
// Since no write concern was sent we will add w:majority
- ASSERT_EQUALS(BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w"
- << "majority"
- << "wtimeout"
- << 0)
- << "maxTimeMS"
- << 30000),
- request.cmdObj);
-
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON("dropUser"
+ << "test"
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 0)
+ << "maxTimeMS"
+ << 30000),
+ request.cmdObj);
+
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BSONObjBuilder responseBuilder;
Command::appendCommandStatus(responseBuilder,
@@ -733,18 +734,18 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandRewriteWriteConce
onCommand([](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
- ASSERT_EQUALS(BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w"
- << "majority"
- << "wtimeout"
- << 30)
- << "maxTimeMS"
- << 30000),
- request.cmdObj);
-
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON("dropUser"
+ << "test"
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 30)
+ << "maxTimeMS"
+ << 30000),
+ request.cmdObj);
+
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BSONObjBuilder responseBuilder;
Command::appendCommandStatus(responseBuilder,
@@ -822,18 +823,18 @@ TEST_F(ShardingCatalogClientTest, RunUserManagementWriteCommandNotMasterRetrySuc
ASSERT_EQUALS(host2, request.target);
ASSERT_EQUALS("test", request.dbname);
// Since no write concern was sent we will add w:majority
- ASSERT_EQUALS(BSON("dropUser"
- << "test"
- << "writeConcern"
- << BSON("w"
- << "majority"
- << "wtimeout"
- << 0)
- << "maxTimeMS"
- << 30000),
- request.cmdObj);
-
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON("dropUser"
+ << "test"
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 0)
+ << "maxTimeMS"
+ << 30000),
+ request.cmdObj);
+
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
return BSON("ok" << 1);
});
@@ -886,7 +887,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
onFindWithMetadataCommand(
[this, coll1, coll2, coll3, newOpTime](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
@@ -894,8 +895,8 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), CollectionType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSONObj());
- ASSERT_EQ(query->getSort(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -909,9 +910,9 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
const auto& actualColls = future.timed_get(kFutureTimeout);
ASSERT_EQ(3U, actualColls.size());
- ASSERT_EQ(coll1.toBSON(), actualColls[0].toBSON());
- ASSERT_EQ(coll2.toBSON(), actualColls[1].toBSON());
- ASSERT_EQ(coll3.toBSON(), actualColls[2].toBSON());
+ ASSERT_BSONOBJ_EQ(coll1.toBSON(), actualColls[0].toBSON());
+ ASSERT_BSONOBJ_EQ(coll2.toBSON(), actualColls[1].toBSON());
+ ASSERT_BSONOBJ_EQ(coll3.toBSON(), actualColls[2].toBSON());
}
TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) {
@@ -943,7 +944,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) {
});
onFindCommand([this, coll1, coll2](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
@@ -954,7 +955,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) {
{
BSONObjBuilder b;
b.appendRegex(CollectionType::fullNs(), "^test\\.");
- ASSERT_EQ(query->getFilter(), b.obj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(), b.obj());
}
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -964,8 +965,8 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) {
const auto& actualColls = future.timed_get(kFutureTimeout);
ASSERT_EQ(2U, actualColls.size());
- ASSERT_EQ(coll1.toBSON(), actualColls[0].toBSON());
- ASSERT_EQ(coll2.toBSON(), actualColls[1].toBSON());
+ ASSERT_BSONOBJ_EQ(coll1.toBSON(), actualColls[0].toBSON());
+ ASSERT_BSONOBJ_EQ(coll2.toBSON(), actualColls[1].toBSON());
}
TEST_F(ShardingCatalogClientTest, GetCollectionsInvalidCollectionType) {
@@ -994,7 +995,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsInvalidCollectionType) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
@@ -1002,7 +1003,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsInvalidCollectionType) {
{
BSONObjBuilder b;
b.appendRegex(CollectionType::fullNs(), "^test\\.");
- ASSERT_EQ(query->getFilter(), b.obj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(), b.obj());
}
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1037,7 +1038,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardValid) {
});
onFindCommand([this, dbt1, dbt2](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), DatabaseType::ConfigNS);
@@ -1045,8 +1046,9 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardValid) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), DatabaseType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSON(DatabaseType::primary(dbt1.getPrimary().toString())));
- ASSERT_EQ(query->getSort(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(),
+ BSON(DatabaseType::primary(dbt1.getPrimary().toString())));
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1111,7 +1113,7 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollection) {
});
onFindCommand([this, tagA, tagB](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), TagsType::ConfigNS);
@@ -1119,8 +1121,8 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollection) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSON(TagsType::ns("TestDB.TestColl")));
- ASSERT_EQ(query->getSort(), BSON(TagsType::min() << 1));
+ ASSERT_BSONOBJ_EQ(query->getFilter(), BSON(TagsType::ns("TestDB.TestColl")));
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSON(TagsType::min() << 1));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1128,8 +1130,8 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollection) {
});
const auto& tags = future.timed_get(kFutureTimeout);
- ASSERT_EQ(tagA.toBSON(), tags[0].toBSON());
- ASSERT_EQ(tagB.toBSON(), tags[1].toBSON());
+ ASSERT_BSONOBJ_EQ(tagA.toBSON(), tags[0].toBSON());
+ ASSERT_BSONOBJ_EQ(tagB.toBSON(), tags[1].toBSON());
}
TEST_F(ShardingCatalogClientTest, GetTagsForCollectionNoTags) {
@@ -1197,7 +1199,7 @@ TEST_F(ShardingCatalogClientTest, GetTagForChunkOneTagFound) {
});
onFindCommand([this, chunk](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), TagsType::ConfigNS);
@@ -1205,11 +1207,11 @@ TEST_F(ShardingCatalogClientTest, GetTagForChunkOneTagFound) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
- ASSERT_EQ(query->getFilter(),
- BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
- << BSON("$lte" << chunk.getMin())
- << TagsType::max()
- << BSON("$gte" << chunk.getMax())));
+ ASSERT_BSONOBJ_EQ(query->getFilter(),
+ BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
+ << BSON("$lte" << chunk.getMin())
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax())));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1242,7 +1244,7 @@ TEST_F(ShardingCatalogClientTest, GetTagForChunkNoTagFound) {
});
onFindCommand([this, chunk](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), TagsType::ConfigNS);
@@ -1250,11 +1252,11 @@ TEST_F(ShardingCatalogClientTest, GetTagForChunkNoTagFound) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
- ASSERT_EQ(query->getFilter(),
- BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
- << BSON("$lte" << chunk.getMin())
- << TagsType::max()
- << BSON("$gte" << chunk.getMax())));
+ ASSERT_BSONOBJ_EQ(query->getFilter(),
+ BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
+ << BSON("$lte" << chunk.getMin())
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax())));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1284,7 +1286,7 @@ TEST_F(ShardingCatalogClientTest, GetTagForChunkInvalidTagDoc) {
});
onFindCommand([this, chunk](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), TagsType::ConfigNS);
@@ -1292,11 +1294,11 @@ TEST_F(ShardingCatalogClientTest, GetTagForChunkInvalidTagDoc) {
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
- ASSERT_EQ(query->getFilter(),
- BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
- << BSON("$lte" << chunk.getMin())
- << TagsType::max()
- << BSON("$gte" << chunk.getMax())));
+ ASSERT_BSONOBJ_EQ(query->getFilter(),
+ BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
+ << BSON("$lte" << chunk.getMin())
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax())));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1324,7 +1326,7 @@ TEST_F(ShardingCatalogClientTest, UpdateDatabase) {
onCommand([dbt](const RemoteCommandRequest& request) {
ASSERT_EQUALS("config", request.dbname);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BatchedUpdateRequest actualBatchedUpdate;
std::string errmsg;
@@ -1336,8 +1338,8 @@ TEST_F(ShardingCatalogClientTest, UpdateDatabase) {
ASSERT_TRUE(update->getUpsert());
ASSERT_FALSE(update->getMulti());
- ASSERT_EQUALS(update->getQuery(), BSON(DatabaseType::name(dbt.getName())));
- ASSERT_EQUALS(update->getUpdateExpr(), dbt.toBSON());
+ ASSERT_BSONOBJ_EQ(update->getQuery(), BSON(DatabaseType::name(dbt.getName())));
+ ASSERT_BSONOBJ_EQ(update->getUpdateExpr(), dbt.toBSON());
BatchedCommandResponse response;
response.setOk(true);
@@ -1399,20 +1401,19 @@ TEST_F(ShardingCatalogClientTest, ApplyChunkOpsDeprecatedSuccessful) {
ASSERT_OK(status);
});
- onCommand(
- [updateOps, preCondition, nss](const RemoteCommandRequest& request) {
- ASSERT_EQUALS("config", request.dbname);
- ASSERT_EQUALS(BSON("w"
+ onCommand([updateOps, preCondition, nss](const RemoteCommandRequest& request) {
+ ASSERT_EQUALS("config", request.dbname);
+ ASSERT_BSONOBJ_EQ(BSON("w"
<< "majority"
<< "wtimeout"
<< 15000),
request.cmdObj["writeConcern"].Obj());
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
- ASSERT_EQUALS(updateOps, request.cmdObj["applyOps"].Obj());
- ASSERT_EQUALS(preCondition, request.cmdObj["preCondition"].Obj());
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(updateOps, request.cmdObj["applyOps"].Obj());
+ ASSERT_BSONOBJ_EQ(preCondition, request.cmdObj["preCondition"].Obj());
- return BSON("ok" << 1);
- });
+ return BSON("ok" << 1);
+ });
// Now wait for the applyChunkOpsDeprecated call to return
future.timed_get(kFutureTimeout);
@@ -1515,14 +1516,14 @@ TEST_F(ShardingCatalogClientTest, createDatabaseSuccess) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
- ASSERT_EQ(BSONObj(), query->getFilter());
- ASSERT_EQ(BSONObj(), query->getSort());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getSort());
ASSERT_FALSE(query->getLimit().is_initialized());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1562,7 +1563,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseSuccess) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQ(DatabaseType::ConfigNS, nss.ns());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
return vector<BSONObj>{};
@@ -1576,7 +1577,8 @@ TEST_F(ShardingCatalogClientTest, createDatabaseSuccess) {
ASSERT_EQUALS("listDatabases", cmdName);
ASSERT_FALSE(request.cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName));
- ASSERT_EQUALS(rpc::ServerSelectionMetadata(true, boost::none).toBSON(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
+ request.metadata);
return BSON("ok" << 1 << "totalSize" << 10);
});
@@ -1589,7 +1591,8 @@ TEST_F(ShardingCatalogClientTest, createDatabaseSuccess) {
ASSERT_EQUALS("listDatabases", cmdName);
ASSERT_FALSE(request.cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName));
- ASSERT_EQUALS(rpc::ServerSelectionMetadata(true, boost::none).toBSON(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
+ request.metadata);
return BSON("ok" << 1 << "totalSize" << 1);
});
@@ -1601,7 +1604,8 @@ TEST_F(ShardingCatalogClientTest, createDatabaseSuccess) {
string cmdName = request.cmdObj.firstElement().fieldName();
ASSERT_EQUALS("listDatabases", cmdName);
- ASSERT_EQUALS(rpc::ServerSelectionMetadata(true, boost::none).toBSON(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
+ request.metadata);
return BSON("ok" << 1 << "totalSize" << 100);
});
@@ -1611,7 +1615,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseSuccess) {
ASSERT_EQUALS(configHost, request.target);
ASSERT_EQUALS("config", request.dbname);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BatchedInsertRequest actualBatchedInsert;
std::string errmsg;
@@ -1627,7 +1631,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseSuccess) {
ShardId(s1.getName())); // This is the one we reported with the smallest size
expectedDb.setSharded(false);
- ASSERT_EQUALS(expectedDb.toBSON(), insert);
+ ASSERT_BSONOBJ_EQ(expectedDb.toBSON(), insert);
BatchedCommandResponse response;
response.setOk(true);
@@ -1678,7 +1682,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDBExists) {
});
onFindCommand([this, dbname](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
@@ -1688,7 +1692,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDBExists) {
DatabaseType::name(), (string) "^" + pcrecpp::RE::QuoteMeta(dbname) + "$", "i");
ASSERT_EQ(DatabaseType::ConfigNS, query->ns());
- ASSERT_EQ(queryBuilder.obj(), query->getFilter());
+ ASSERT_BSONOBJ_EQ(queryBuilder.obj(), query->getFilter());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
return vector<BSONObj>{BSON("_id" << dbname)};
@@ -1717,7 +1721,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDBExistsDifferentCase) {
});
onFindCommand([this, dbname, dbnameDiffCase](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
@@ -1727,7 +1731,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDBExistsDifferentCase) {
DatabaseType::name(), (string) "^" + pcrecpp::RE::QuoteMeta(dbname) + "$", "i");
ASSERT_EQ(DatabaseType::ConfigNS, query->ns());
- ASSERT_EQ(queryBuilder.obj(), query->getFilter());
+ ASSERT_BSONOBJ_EQ(queryBuilder.obj(), query->getFilter());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
return vector<BSONObj>{BSON("_id" << dbnameDiffCase)};
@@ -1756,7 +1760,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseNoShards) {
// Report no databases with the same name already exist
onFindCommand([this, dbname](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(DatabaseType::ConfigNS, nss.ns());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1765,13 +1769,13 @@ TEST_F(ShardingCatalogClientTest, createDatabaseNoShards) {
// Report no shards exist
onFindCommand([this](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
- ASSERT_EQ(BSONObj(), query->getFilter());
- ASSERT_EQ(BSONObj(), query->getSort());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getSort());
ASSERT_FALSE(query->getLimit().is_initialized());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1804,13 +1808,13 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDuplicateKeyOnInsert) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
- ASSERT_EQ(BSONObj(), query->getFilter());
- ASSERT_EQ(BSONObj(), query->getSort());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getSort());
ASSERT_FALSE(query->getLimit().is_initialized());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1849,7 +1853,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDuplicateKeyOnInsert) {
// Report no databases with the same name already exist
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(DatabaseType::ConfigNS, nss.ns());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1864,7 +1868,8 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDuplicateKeyOnInsert) {
ASSERT_EQUALS("listDatabases", cmdName);
ASSERT_FALSE(request.cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName));
- ASSERT_EQUALS(rpc::ServerSelectionMetadata(true, boost::none).toBSON(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
+ request.metadata);
return BSON("ok" << 1 << "totalSize" << 10);
});
@@ -1877,7 +1882,8 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDuplicateKeyOnInsert) {
ASSERT_EQUALS("listDatabases", cmdName);
ASSERT_FALSE(request.cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName));
- ASSERT_EQUALS(rpc::ServerSelectionMetadata(true, boost::none).toBSON(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
+ request.metadata);
return BSON("ok" << 1 << "totalSize" << 1);
});
@@ -1890,7 +1896,8 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDuplicateKeyOnInsert) {
ASSERT_EQUALS("listDatabases", cmdName);
ASSERT_FALSE(request.cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName));
- ASSERT_EQUALS(rpc::ServerSelectionMetadata(true, boost::none).toBSON(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
+ request.metadata);
return BSON("ok" << 1 << "totalSize" << 100);
});
@@ -1901,7 +1908,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDuplicateKeyOnInsert) {
ASSERT_EQUALS("config", request.dbname);
ASSERT_FALSE(request.cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName));
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BatchedInsertRequest actualBatchedInsert;
std::string errmsg;
@@ -1917,7 +1924,7 @@ TEST_F(ShardingCatalogClientTest, createDatabaseDuplicateKeyOnInsert) {
ShardId(s1.getName())); // This is the one we reported with the smallest size
expectedDb.setSharded(false);
- ASSERT_EQUALS(expectedDb.toBSON(), insert);
+ ASSERT_BSONOBJ_EQ(expectedDb.toBSON(), insert);
BatchedCommandResponse response;
response.setOk(false);
@@ -1958,7 +1965,7 @@ TEST_F(ShardingCatalogClientTest, EnableShardingNoDBExists) {
// Query to find if db already exists in config.
onFindCommand([this](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(DatabaseType::ConfigNS, nss.toString());
@@ -1969,8 +1976,8 @@ TEST_F(ShardingCatalogClientTest, EnableShardingNoDBExists) {
BSONObj expectedQuery(fromjson(R"({ _id: { $regex: "^test$", $options: "i" }})"));
ASSERT_EQ(DatabaseType::ConfigNS, query->ns());
- ASSERT_EQ(expectedQuery, query->getFilter());
- ASSERT_EQ(BSONObj(), query->getSort());
+ ASSERT_BSONOBJ_EQ(expectedQuery, query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSONObj(), query->getSort());
ASSERT_EQ(1, query->getLimit().get());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1982,9 +1989,10 @@ TEST_F(ShardingCatalogClientTest, EnableShardingNoDBExists) {
onCommand([](const RemoteCommandRequest& request) {
ASSERT_EQ(HostAndPort("shard0:12"), request.target);
ASSERT_EQ("admin", request.dbname);
- ASSERT_EQ(BSON("listDatabases" << 1), request.cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("listDatabases" << 1), request.cmdObj);
- ASSERT_EQUALS(rpc::ServerSelectionMetadata(true, boost::none).toBSON(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
+ request.metadata);
return fromjson(R"({
databases: [],
@@ -1997,7 +2005,7 @@ TEST_F(ShardingCatalogClientTest, EnableShardingNoDBExists) {
ASSERT_EQ(HostAndPort("config:123"), request.target);
ASSERT_EQ("config", request.dbname);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BSONObj expectedCmd(fromjson(R"({
update: "databases",
@@ -2011,7 +2019,7 @@ TEST_F(ShardingCatalogClientTest, EnableShardingNoDBExists) {
maxTimeMS: 30000
})"));
- ASSERT_EQ(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson(R"({
nModified: 0,
@@ -2090,7 +2098,7 @@ TEST_F(ShardingCatalogClientTest, EnableShardingDBExists) {
ASSERT_EQ(HostAndPort("config:123"), request.target);
ASSERT_EQ("config", request.dbname);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
BSONObj expectedCmd(fromjson(R"({
update: "databases",
@@ -2104,7 +2112,7 @@ TEST_F(ShardingCatalogClientTest, EnableShardingDBExists) {
maxTimeMS: 30000
})"));
- ASSERT_EQ(expectedCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCmd, request.cmdObj);
return fromjson(R"({
nModified: 0,
@@ -2208,7 +2216,7 @@ TEST_F(ShardingCatalogClientTest, BasicReadAfterOpTime) {
onCommandWithMetadata([this, &newOpTime, &lastOpTime](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQ(string("dummy"), request.cmdObj.firstElementFieldName());
checkReadConcern(request.cmdObj, lastOpTime.getTimestamp(), lastOpTime.getTerm());
@@ -2243,7 +2251,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeShouldNotGoBack) {
onCommandWithMetadata([this, &newOpTime, &highestOpTime](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQ(string("dummy"), request.cmdObj.firstElementFieldName());
checkReadConcern(request.cmdObj, highestOpTime.getTimestamp(), highestOpTime.getTerm());
@@ -2271,7 +2279,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeShouldNotGoBack) {
onCommandWithMetadata([this, &oldOpTime, &highestOpTime](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQ(string("dummy"), request.cmdObj.firstElementFieldName());
checkReadConcern(request.cmdObj, highestOpTime.getTimestamp(), highestOpTime.getTerm());
@@ -2295,7 +2303,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeShouldNotGoBack) {
onCommandWithMetadata([this, &oldOpTime, &highestOpTime](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQ(string("dummy"), request.cmdObj.firstElementFieldName());
checkReadConcern(request.cmdObj, highestOpTime.getTimestamp(), highestOpTime.getTerm());
@@ -2322,7 +2330,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeFindThenCmd) {
onFindWithMetadataCommand(
[this, &newOpTime, &highestOpTime](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
checkReadConcern(request.cmdObj, highestOpTime.getTimestamp(), highestOpTime.getTerm());
ReplSetMetadata metadata(10, newOpTime, newOpTime, 100, OID(), 30, -1);
@@ -2353,7 +2361,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeFindThenCmd) {
onCommand([this, &oldOpTime, &highestOpTime](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQ(string("dummy"), request.cmdObj.firstElementFieldName());
checkReadConcern(request.cmdObj, highestOpTime.getTimestamp(), highestOpTime.getTerm());
@@ -2380,7 +2388,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeCmdThenFind) {
onCommandWithMetadata([this, &newOpTime, &highestOpTime](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQ(string("dummy"), request.cmdObj.firstElementFieldName());
checkReadConcern(request.cmdObj, highestOpTime.getTimestamp(), highestOpTime.getTerm());
@@ -2404,7 +2412,7 @@ TEST_F(ShardingCatalogClientTest, ReadAfterOpTimeCmdThenFind) {
const OpTime oldOpTime(Timestamp(3, 10), 5);
onFindCommand([this, &oldOpTime, &highestOpTime](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
ASSERT_EQ(string("find"), request.cmdObj.firstElementFieldName());
checkReadConcern(request.cmdObj, highestOpTime.getTimestamp(), highestOpTime.getTerm());
@@ -2453,7 +2461,7 @@ TEST_F(ShardingCatalogClientTest, RetryOnReadCommandNetworkErrorSucceedsAtMaxRet
operationContext(), "test", BSON("dummy" << 1), &responseBuilder);
ASSERT_TRUE(ok);
auto response = responseBuilder.obj();
- ASSERT_EQ(expectedResult, response);
+ ASSERT_BSONOBJ_EQ(expectedResult, response);
});
for (int i = 0; i < kMaxCommandRetry - 1; ++i) {
diff --git a/src/mongo/s/catalog/replset/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/replset/sharding_catalog_write_retry_test.cpp
index e520857395f..45a9e867f8f 100644
--- a/src/mongo/s/catalog/replset/sharding_catalog_write_retry_test.cpp
+++ b/src/mongo/s/catalog/replset/sharding_catalog_write_retry_test.cpp
@@ -174,7 +174,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMatch) {
ASSERT_EQ(request.target, kTestHosts[1]);
auto query =
assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
- ASSERT_EQ(BSON("_id" << 1), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), query->getFilter());
return vector<BSONObj>{objToInsert};
});
@@ -212,7 +212,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorNotFound) {
ASSERT_EQ(request.target, kTestHosts[1]);
auto query =
assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
- ASSERT_EQ(BSON("_id" << 1), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), query->getFilter());
return vector<BSONObj>();
});
@@ -250,7 +250,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMismatch) {
ASSERT_EQ(request.target, kTestHosts[1]);
auto query =
assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
- ASSERT_EQ(BSON("_id" << 1), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), query->getFilter());
return vector<BSONObj>{BSON("_id" << 1 << "Value"
<< "TestValue has changed")};
@@ -309,7 +309,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterWriteConcernFailureMatch) {
ASSERT_EQ(request.target, kTestHosts[0]);
auto query =
assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
- ASSERT_EQ(BSON("_id" << 1), query->getFilter());
+ ASSERT_BSONOBJ_EQ(BSON("_id" << 1), query->getFilter());
return vector<BSONObj>{objToInsert};
});
diff --git a/src/mongo/s/catalog/type_changelog_test.cpp b/src/mongo/s/catalog/type_changelog_test.cpp
index 904c9fdfa46..0c3eadb9bdf 100644
--- a/src/mongo/s/catalog/type_changelog_test.cpp
+++ b/src/mongo/s/catalog/type_changelog_test.cpp
@@ -64,9 +64,9 @@ TEST(ChangeLogType, Valid) {
ASSERT_EQUALS(logEntry.getTime(), Date_t::fromMillisSinceEpoch(1));
ASSERT_EQUALS(logEntry.getWhat(), "split");
ASSERT_EQUALS(logEntry.getNS(), "test.test");
- ASSERT_EQUALS(logEntry.getDetails(),
- BSON("dummy"
- << "info"));
+ ASSERT_BSONOBJ_EQ(logEntry.getDetails(),
+ BSON("dummy"
+ << "info"));
}
TEST(ChangeLogType, MissingChangeId) {
diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp
index 8b3ddd356f0..0b6bbfaf727 100644
--- a/src/mongo/s/catalog/type_chunk.cpp
+++ b/src/mongo/s/catalog/type_chunk.cpp
@@ -35,6 +35,7 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
@@ -61,7 +62,7 @@ const char kMaxKey[] = "max";
ChunkRange::ChunkRange(BSONObj minKey, BSONObj maxKey)
: _minKey(std::move(minKey)), _maxKey(std::move(maxKey)) {
- dassert(_minKey < _maxKey);
+ dassert(SimpleBSONObjComparator::kInstance.evaluate(_minKey < _maxKey));
}
StatusWith<ChunkRange> ChunkRange::fromBSON(const BSONObj& obj) {
@@ -91,7 +92,7 @@ StatusWith<ChunkRange> ChunkRange::fromBSON(const BSONObj& obj) {
}
}
- if (minKey.Obj() >= maxKey.Obj()) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(minKey.Obj() >= maxKey.Obj())) {
return {ErrorCodes::FailedToParse,
str::stream() << "min: " << minKey.Obj() << " should be less than max: "
<< maxKey.Obj()};
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index aba5ff49e1a..bcf1470240c 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -137,8 +137,8 @@ TEST(ChunkType, CorrectContents) {
ChunkType chunk = chunkRes.getValue();
ASSERT_EQUALS(chunk.getNS(), "test.mycol");
- ASSERT_EQUALS(chunk.getMin(), BSON("a" << 10));
- ASSERT_EQUALS(chunk.getMax(), BSON("a" << 20));
+ ASSERT_BSONOBJ_EQ(chunk.getMin(), BSON("a" << 10));
+ ASSERT_BSONOBJ_EQ(chunk.getMax(), BSON("a" << 20));
ASSERT_EQUALS(chunk.getVersion().toLong(), chunkVersion.toLong());
ASSERT_EQUALS(chunk.getVersion().epoch(), chunkVersion.epoch());
ASSERT_EQUALS(chunk.getShard(), "shard0001");
@@ -161,8 +161,8 @@ TEST(ChunkType, Pre22Format) {
ASSERT_OK(chunk.validate());
ASSERT_EQUALS(chunk.getNS(), "test.mycol");
- ASSERT_EQUALS(chunk.getMin(), BSON("a" << 10));
- ASSERT_EQUALS(chunk.getMax(), BSON("a" << 20));
+ ASSERT_BSONOBJ_EQ(chunk.getMin(), BSON("a" << 10));
+ ASSERT_BSONOBJ_EQ(chunk.getMax(), BSON("a" << 20));
ASSERT_EQUALS(chunk.getVersion().toLong(), 1ULL);
ASSERT(!chunk.getVersion().epoch().isSet());
ASSERT_EQUALS(chunk.getShard(), "shard0001");
@@ -180,8 +180,8 @@ TEST(ChunkRange, BasicBSONParsing) {
ASSERT_OK(parseStatus.getStatus());
auto chunkRange = parseStatus.getValue();
- ASSERT_EQ(BSON("x" << 0), chunkRange.getMin());
- ASSERT_EQ(BSON("x" << 10), chunkRange.getMax());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 0), chunkRange.getMin());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 10), chunkRange.getMax());
}
TEST(ChunkRange, MinGreaterThanMaxShouldError) {
diff --git a/src/mongo/s/catalog/type_collection_test.cpp b/src/mongo/s/catalog/type_collection_test.cpp
index eb88bb6030d..89ad96733b9 100644
--- a/src/mongo/s/catalog/type_collection_test.cpp
+++ b/src/mongo/s/catalog/type_collection_test.cpp
@@ -62,10 +62,10 @@ TEST(CollectionType, Basic) {
ASSERT(coll.getNs() == NamespaceString{"db.coll"});
ASSERT_EQUALS(coll.getEpoch(), oid);
ASSERT_EQUALS(coll.getUpdatedAt(), Date_t::fromMillisSinceEpoch(1));
- ASSERT_EQUALS(coll.getKeyPattern().toBSON(), BSON("a" << 1));
- ASSERT_EQUALS(coll.getDefaultCollation(),
- BSON("locale"
- << "fr_CA"));
+ ASSERT_BSONOBJ_EQ(coll.getKeyPattern().toBSON(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(coll.getDefaultCollation(),
+ BSON("locale"
+ << "fr_CA"));
ASSERT_EQUALS(coll.getUnique(), true);
ASSERT_EQUALS(coll.getAllowBalance(), true);
ASSERT_EQUALS(coll.getDropped(), false);
@@ -95,7 +95,7 @@ TEST(CollectionType, MissingDefaultCollationParses) {
CollectionType coll = status.getValue();
ASSERT_TRUE(coll.validate().isOK());
- ASSERT_EQUALS(coll.getDefaultCollation(), BSONObj());
+ ASSERT_BSONOBJ_EQ(coll.getDefaultCollation(), BSONObj());
}
TEST(CollectionType, DefaultCollationSerializesCorrectly) {
@@ -113,9 +113,9 @@ TEST(CollectionType, DefaultCollationSerializesCorrectly) {
CollectionType coll = status.getValue();
ASSERT_TRUE(coll.validate().isOK());
BSONObj serialized = coll.toBSON();
- ASSERT_EQUALS(serialized["defaultCollation"].Obj(),
- BSON("locale"
- << "fr_CA"));
+ ASSERT_BSONOBJ_EQ(serialized["defaultCollation"].Obj(),
+ BSON("locale"
+ << "fr_CA"));
}
TEST(CollectionType, MissingDefaultCollationIsNotSerialized) {
@@ -172,7 +172,7 @@ TEST(CollectionType, Pre22Format) {
ASSERT(coll.getNs() == NamespaceString{"db.coll"});
ASSERT(!coll.getEpoch().isSet());
ASSERT_EQUALS(coll.getUpdatedAt(), Date_t::fromMillisSinceEpoch(1));
- ASSERT_EQUALS(coll.getKeyPattern().toBSON(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(coll.getKeyPattern().toBSON(), BSON("a" << 1));
ASSERT_EQUALS(coll.getUnique(), false);
ASSERT_EQUALS(coll.getAllowBalance(), true);
ASSERT_EQUALS(coll.getDropped(), false);
diff --git a/src/mongo/s/catalog/type_config_version_test.cpp b/src/mongo/s/catalog/type_config_version_test.cpp
index db3ff5373f0..7028fa7e440 100644
--- a/src/mongo/s/catalog/type_config_version_test.cpp
+++ b/src/mongo/s/catalog/type_config_version_test.cpp
@@ -104,7 +104,7 @@ TEST(Validity, NewVersionRoundTrip) {
ASSERT_EQUALS(versionInfo.getCurrentVersion(), 4);
ASSERT_EQUALS(versionInfo.getClusterId(), clusterId);
ASSERT_EQUALS(versionInfo.getUpgradeId(), upgradeId);
- ASSERT_EQUALS(versionInfo.getUpgradeState(), upgradeState);
+ ASSERT_BSONOBJ_EQ(versionInfo.getUpgradeState(), upgradeState);
ASSERT_OK(versionInfo.validate());
}
diff --git a/src/mongo/s/catalog/type_tags_test.cpp b/src/mongo/s/catalog/type_tags_test.cpp
index 3f1a7a1175d..78af4d5fe03 100644
--- a/src/mongo/s/catalog/type_tags_test.cpp
+++ b/src/mongo/s/catalog/type_tags_test.cpp
@@ -52,8 +52,8 @@ TEST(TagsType, Valid) {
ASSERT_EQUALS(tag.getNS(), "test.mycol");
ASSERT_EQUALS(tag.getTag(), "tag");
- ASSERT_EQUALS(tag.getMinKey(), BSON("a" << 10));
- ASSERT_EQUALS(tag.getMaxKey(), BSON("a" << 20));
+ ASSERT_BSONOBJ_EQ(tag.getMinKey(), BSON("a" << 10));
+ ASSERT_BSONOBJ_EQ(tag.getMaxKey(), BSON("a" << 20));
}
TEST(TagsType, MissingNsField) {
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index d4566cd424b..c38dee72dd5 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -32,6 +32,7 @@
#include "mongo/s/chunk.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/client/connpool.h"
#include "mongo/db/commands.h"
#include "mongo/db/lasterror.h"
@@ -264,7 +265,7 @@ StatusWith<boost::optional<ChunkRange>> Chunk::split(OperationContext* txn,
// Normally, we'd have a sound split point here if the chunk is not empty.
// It's also a good place to sanity check.
- if (_min == splitPoints.front()) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(_min == splitPoints.front())) {
string msg(str::stream() << "not splitting chunk " << toString() << ", split point "
<< splitPoints.front()
<< " is exactly on chunk bounds");
@@ -272,7 +273,7 @@ StatusWith<boost::optional<ChunkRange>> Chunk::split(OperationContext* txn,
return Status(ErrorCodes::CannotSplit, msg);
}
- if (_max == splitPoints.back()) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(_max == splitPoints.back())) {
string msg(str::stream() << "not splitting chunk " << toString() << ", split point "
<< splitPoints.back()
<< " is exactly on chunk bounds");
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 3af2bc8fa43..8cb5b7fcb71 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -36,6 +36,7 @@
#include <map>
#include <set>
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/read_preference.h"
#include "mongo/client/remote_command_targeter.h"
@@ -143,14 +144,16 @@ bool isChunkMapValid(const ChunkMap& chunkMap) {
++it) {
ChunkMap::const_iterator last = boost::prior(it);
- if (!(it->second->getMin() == last->second->getMax())) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(it->second->getMin() !=
+ last->second->getMax())) {
log() << last->second->toString();
log() << it->second->toString();
log() << it->second->getMin();
log() << last->second->getMax();
}
- ENSURE(it->second->getMin() == last->second->getMax());
+ ENSURE(SimpleBSONObjComparator::kInstance.evaluate(it->second->getMin() ==
+ last->second->getMax()));
}
return true;
@@ -395,7 +398,7 @@ void ChunkManager::calcInitSplitsAndShards(OperationContext* txn,
shardIds->push_back(primaryShardId);
} else {
// make sure points are unique and ordered
- set<BSONObj> orderedPts;
+ auto orderedPts = SimpleBSONObjComparator::kInstance.makeOrderedBSONObjSet();
for (unsigned i = 0; i < initPoints->size(); ++i) {
BSONObj pt = (*initPoints)[i];
orderedPts.insert(pt);
@@ -783,7 +786,8 @@ ChunkManager::ChunkRangeMap ChunkManager::_constructRanges(const ChunkMap& chunk
if (insertResult.first != chunkRangeMap.begin()) {
// Make sure there are no gaps in the ranges
insertResult.first--;
- invariant(insertResult.first->first == rangeMin);
+ invariant(
+ SimpleBSONObjComparator::kInstance.evaluate(insertResult.first->first == rangeMin));
}
}
diff --git a/src/mongo/s/chunk_manager_targeter.h b/src/mongo/s/chunk_manager_targeter.h
index 9de403ff2fd..b1f5e05defc 100644
--- a/src/mongo/s/chunk_manager_targeter.h
+++ b/src/mongo/s/chunk_manager_targeter.h
@@ -31,6 +31,8 @@
#include <map>
#include "mongo/bson/bsonobj.h"
+#include "mongo/bson/bsonobj_comparator_interface.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/db/namespace_string.h"
#include "mongo/s/ns_targeter.h"
@@ -43,9 +45,12 @@ class Shard;
struct ChunkVersion;
struct TargeterStats {
+ TargeterStats()
+ : chunkSizeDelta(SimpleBSONObjComparator::kInstance.makeOrderedBSONObjMap<int>()) {}
+
// Map of chunk shard minKey -> approximate delta. This is used for deciding
// whether a chunk might need splitting or not.
- std::map<BSONObj, int> chunkSizeDelta;
+ BSONObj::ComparatorInterface::BSONObjMap<int> chunkSizeDelta;
};
/**
diff --git a/src/mongo/s/cluster_identity_loader_test.cpp b/src/mongo/s/cluster_identity_loader_test.cpp
index d4c6ed89cb2..7eaca44c8a4 100644
--- a/src/mongo/s/cluster_identity_loader_test.cpp
+++ b/src/mongo/s/cluster_identity_loader_test.cpp
@@ -76,7 +76,7 @@ public:
void expectConfigVersionLoad(StatusWith<OID> result) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(configHost, request.target);
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_BSONOBJ_EQ(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), "config.version");
@@ -84,7 +84,7 @@ public:
auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), "config.version");
- ASSERT_EQ(query->getFilter(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
ASSERT_FALSE(query->getLimit().is_initialized());
if (result.isOK()) {
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index efd4ea445d1..55dacd87207 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -35,6 +35,7 @@
#include <string>
#include <vector>
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/client/connpool.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/commands.h"
@@ -325,7 +326,7 @@ public:
BSONObjBuilder shardResultsB;
BSONObjBuilder shardCountsB;
map<string, int64_t> countsMap;
- set<BSONObj> splitPts;
+ auto splitPts = SimpleBSONObjComparator::kInstance.makeOrderedBSONObjSet();
{
bool ok = true;
@@ -509,7 +510,7 @@ public:
confOut->getChunkManager(txn, outputCollNss.ns(), true /* force */);
}
- map<BSONObj, int> chunkSizes;
+ auto chunkSizes = SimpleBSONObjComparator::kInstance.makeOrderedBSONObjMap<int>();
{
// Take distributed lock to prevent split / migration.
auto scopedDistLock = grid.catalogClient(txn)->distLock(
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 661d58bcaea..8bc1b75825d 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -34,6 +34,7 @@
#include <set>
#include <vector>
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/connpool.h"
#include "mongo/db/audit.h"
@@ -352,7 +353,8 @@ public:
for (list<BSONObj>::iterator it = indexes.begin(); it != indexes.end(); ++it) {
BSONObj idx = *it;
- if (idx["key"].embeddedObject() == proposedKey) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(idx["key"].embeddedObject() ==
+ proposedKey)) {
eqQueryResult = idx;
break;
}
@@ -457,7 +459,9 @@ public:
current += intervalSize;
}
- sort(allSplits.begin(), allSplits.end());
+ sort(allSplits.begin(),
+ allSplits.end(),
+ SimpleBSONObjComparator::kInstance.makeLessThan());
// 1. the initial splits define the "big chunks" that we will subdivide later
int lastIndex = -1;
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index bc340a92a95..d2e9f5c9a61 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -30,6 +30,7 @@
#include "mongo/platform/basic.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/connpool.h"
#include "mongo/db/auth/action_set.h"
@@ -912,7 +913,8 @@ public:
uassert(13408,
"keyPattern must equal shard key",
- cm->getShardKeyPattern().toBSON() == keyPattern);
+ SimpleBSONObjComparator::kInstance.evaluate(cm->getShardKeyPattern().toBSON() ==
+ keyPattern));
uassert(13405,
str::stream() << "min value " << min << " does not have shard key",
cm->getShardKeyPattern().isShardKey(min));
@@ -1376,7 +1378,8 @@ public:
shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, fullns);
massert(13091, "how could chunk manager be null!", cm);
- if (cm->getShardKeyPattern().toBSON() == BSON("files_id" << 1)) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(cm->getShardKeyPattern().toBSON() ==
+ BSON("files_id" << 1))) {
BSONObj finder = BSON("files_id" << cmdObj.firstElement());
vector<Strategy::CommandResult> results;
@@ -1388,7 +1391,8 @@ public:
result.appendElements(res);
return res["ok"].trueValue();
- } else if (cm->getShardKeyPattern().toBSON() == BSON("files_id" << 1 << "n" << 1)) {
+ } else if (SimpleBSONObjComparator::kInstance.evaluate(cm->getShardKeyPattern().toBSON() ==
+ BSON("files_id" << 1 << "n" << 1))) {
int n = 0;
BSONObj lastResult;
diff --git a/src/mongo/s/migration_secondary_throttle_options_test.cpp b/src/mongo/s/migration_secondary_throttle_options_test.cpp
index 9bde6a2700b..fb9300707bd 100644
--- a/src/mongo/s/migration_secondary_throttle_options_test.cpp
+++ b/src/mongo/s/migration_secondary_throttle_options_test.cpp
@@ -44,7 +44,7 @@ TEST(MigrationSecondaryThrottleOptions, CreateDefault) {
MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kDefault);
ASSERT_EQ(MigrationSecondaryThrottleOptions::kDefault, options.getSecondaryThrottle());
ASSERT(!options.isWriteConcernSpecified());
- ASSERT_EQUALS(BSONObj(), options.toBSON());
+ ASSERT_BSONOBJ_EQ(BSONObj(), options.toBSON());
}
TEST(MigrationSecondaryThrottleOptions, CreateOn) {
@@ -52,7 +52,7 @@ TEST(MigrationSecondaryThrottleOptions, CreateOn) {
MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kOn);
ASSERT_EQ(MigrationSecondaryThrottleOptions::kOn, options.getSecondaryThrottle());
ASSERT(!options.isWriteConcernSpecified());
- ASSERT_EQUALS(BSON("secondaryThrottle" << true), options.toBSON());
+ ASSERT_BSONOBJ_EQ(BSON("secondaryThrottle" << true), options.toBSON());
}
TEST(MigrationSecondaryThrottleOptions, CreateOff) {
@@ -60,7 +60,7 @@ TEST(MigrationSecondaryThrottleOptions, CreateOff) {
MigrationSecondaryThrottleOptions::create(MigrationSecondaryThrottleOptions::kOff);
ASSERT_EQ(MigrationSecondaryThrottleOptions::kOff, options.getSecondaryThrottle());
ASSERT(!options.isWriteConcernSpecified());
- ASSERT_EQUALS(BSON("secondaryThrottle" << false), options.toBSON());
+ ASSERT_BSONOBJ_EQ(BSON("secondaryThrottle" << false), options.toBSON());
}
TEST(MigrationSecondaryThrottleOptions, NotSpecifiedInCommandBSON) {
@@ -68,7 +68,7 @@ TEST(MigrationSecondaryThrottleOptions, NotSpecifiedInCommandBSON) {
MigrationSecondaryThrottleOptions::createFromCommand(BSON("someOtherField" << 1)));
ASSERT_EQ(MigrationSecondaryThrottleOptions::kDefault, options.getSecondaryThrottle());
ASSERT(!options.isWriteConcernSpecified());
- ASSERT_EQUALS(BSONObj(), options.toBSON());
+ ASSERT_BSONOBJ_EQ(BSONObj(), options.toBSON());
}
TEST(MigrationSecondaryThrottleOptions, EnabledInCommandBSONWithoutWriteConcern) {
@@ -121,7 +121,7 @@ TEST(MigrationSecondaryThrottleOptions, NotSpecifiedInBalancerConfig) {
MigrationSecondaryThrottleOptions::createFromBalancerConfig(BSON("someOtherField" << 1)));
ASSERT_EQ(MigrationSecondaryThrottleOptions::kDefault, options.getSecondaryThrottle());
ASSERT(!options.isWriteConcernSpecified());
- ASSERT_EQUALS(BSONObj(), options.toBSON());
+ ASSERT_BSONOBJ_EQ(BSONObj(), options.toBSON());
}
TEST(MigrationSecondaryThrottleOptions, EnabledInBalancerConfigLegacyStyle) {
diff --git a/src/mongo/s/move_chunk_request_test.cpp b/src/mongo/s/move_chunk_request_test.cpp
index fd6b69f401d..e4e60a46dfc 100644
--- a/src/mongo/s/move_chunk_request_test.cpp
+++ b/src/mongo/s/move_chunk_request_test.cpp
@@ -69,8 +69,8 @@ TEST(MoveChunkRequest, Roundtrip) {
ASSERT_EQ(kTestConnectionString.toString(), request.getConfigServerCS().toString());
ASSERT_EQ(ShardId("shard0001"), request.getFromShardId());
ASSERT_EQ(ShardId("shard0002"), request.getToShardId());
- ASSERT_EQ(BSON("Key" << -100), request.getMinKey());
- ASSERT_EQ(BSON("Key" << 100), request.getMaxKey());
+ ASSERT_BSONOBJ_EQ(BSON("Key" << -100), request.getMinKey());
+ ASSERT_BSONOBJ_EQ(BSON("Key" << 100), request.getMaxKey());
ASSERT(request.hasChunkVersion());
ASSERT_EQ(chunkVersion, request.getChunkVersion());
ASSERT_EQ(1024, request.getMaxChunkSizeBytes());
@@ -107,8 +107,8 @@ TEST(MoveChunkRequest, BackwardsCompatibilityNoChunkVersionAndDefaults) {
ASSERT_EQ(kTestConnectionString.toString(), request.getConfigServerCS().toString());
ASSERT_EQ(ShardId("shard0001"), request.getFromShardId());
ASSERT_EQ(ShardId("shard0002"), request.getToShardId());
- ASSERT_EQ(BSON("Key" << -1), request.getMinKey());
- ASSERT_EQ(BSON("Key" << 1), request.getMaxKey());
+ ASSERT_BSONOBJ_EQ(BSON("Key" << -1), request.getMinKey());
+ ASSERT_BSONOBJ_EQ(BSON("Key" << 1), request.getMaxKey());
ASSERT(!request.hasChunkVersion());
ASSERT_EQ(MigrationSecondaryThrottleOptions::kDefault,
request.getSecondaryThrottle().getSecondaryThrottle());
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index e4d609cc88d..a0a8af6c1de 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -256,17 +256,17 @@ TEST_F(AsyncResultsMergerTest, ClusterFind) {
ASSERT_TRUE(arm->remotesExhausted());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 4}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 4}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 5}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 5}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 6}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 6}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
}
@@ -291,17 +291,17 @@ TEST_F(AsyncResultsMergerTest, ClusterFindAndGetMore) {
ASSERT_FALSE(arm->remotesExhausted());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 4}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 4}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 5}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 5}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 6}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 6}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -321,13 +321,13 @@ TEST_F(AsyncResultsMergerTest, ClusterFindAndGetMore) {
ASSERT_FALSE(arm->remotesExhausted());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 10}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 10}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 7}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 7}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 8}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 8}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 9}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 9}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -343,7 +343,7 @@ TEST_F(AsyncResultsMergerTest, ClusterFindAndGetMore) {
ASSERT_TRUE(arm->remotesExhausted());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 11}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 11}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
}
@@ -370,23 +370,23 @@ TEST_F(AsyncResultsMergerTest, ClusterFindSorted) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 3, $sortKey: {'': 3}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 3, $sortKey: {'': 3}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 4, $sortKey: {'': 4}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 4, $sortKey: {'': 4}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 5, $sortKey: {'': 5}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 5, $sortKey: {'': 5}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 6, $sortKey: {'': 6}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 6, $sortKey: {'': 6}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 8, $sortKey: {'': 8}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 8, $sortKey: {'': 8}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 9, $sortKey: {'': 9}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 9, $sortKey: {'': 9}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
}
@@ -413,13 +413,17 @@ TEST_F(AsyncResultsMergerTest, ClusterFindAndGetMoreSorted) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 3}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 3}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 4}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 4}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 5}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 5}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 6}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 6}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -434,11 +438,14 @@ TEST_F(AsyncResultsMergerTest, ClusterFindAndGetMoreSorted) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 7}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 7}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 7}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 7}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 8}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 8}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -453,11 +460,14 @@ TEST_F(AsyncResultsMergerTest, ClusterFindAndGetMoreSorted) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 9}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 9}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 10}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 10}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 10}}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 10}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
}
@@ -484,23 +494,23 @@ TEST_F(AsyncResultsMergerTest, ClusterFindCompoundSortKey) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 10, '': 11}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 10, '': 11}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 10, '': 12}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 10, '': 12}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 5, '': 9}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 5, '': 9}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 5, '': 9}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 5, '': 9}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 4, '': 4}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 4, '': 4}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{$sortKey: {'': 4, '': 20}}"),
- *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{$sortKey: {'': 4, '': 20}}"),
+ *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
}
@@ -561,7 +571,7 @@ TEST_F(AsyncResultsMergerTest, ClusterFindInitialBatchSizeIsZero) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -584,7 +594,7 @@ TEST_F(AsyncResultsMergerTest, ClusterFindInitialBatchSizeIsZero) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
}
@@ -613,7 +623,7 @@ TEST_F(AsyncResultsMergerTest, ReceivedViewDefinitionFromShard) {
auto outputPipeline = (*viewDef)["pipeline"];
ASSERT(!outputPipeline.eoo());
- ASSERT_EQ(fromjson(inputPipeline), outputPipeline.Obj());
+ ASSERT_BSONOBJ_EQ(fromjson(inputPipeline), outputPipeline.Obj());
auto outputNs = (*viewDef)["ns"];
ASSERT(!outputNs.eoo());
@@ -637,13 +647,13 @@ TEST_F(AsyncResultsMergerTest, ExistingCursors) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 4}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 4}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT(unittest::assertGet(arm->nextReady()).isEOF());
}
@@ -667,13 +677,13 @@ TEST_F(AsyncResultsMergerTest, StreamResultsFromOneShardIfOtherDoesntRespond) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 4}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 4}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -690,9 +700,9 @@ TEST_F(AsyncResultsMergerTest, StreamResultsFromOneShardIfOtherDoesntRespond) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 5}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 5}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 6}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 6}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -707,9 +717,9 @@ TEST_F(AsyncResultsMergerTest, StreamResultsFromOneShardIfOtherDoesntRespond) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 7}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 7}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 8}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 8}"), *unittest::assertGet(arm->nextReady()).getResult());
// Kill cursor before deleting it, as the second remote cursor has not been exhausted. We don't
// wait on 'killEvent' here, as the blackholed request's callback will only run on shutdown of
@@ -734,11 +744,11 @@ TEST_F(AsyncResultsMergerTest, ErrorOnMismatchedCursorIds) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -833,9 +843,9 @@ TEST_F(AsyncResultsMergerTest, ErrorCantScheduleEventBeforeLastSignaled) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
@@ -940,7 +950,7 @@ TEST_F(AsyncResultsMergerTest, KillTwoOutstandingBatches) {
<< "testcoll"
<< "cursors"
<< BSON_ARRAY(CursorId(123)));
- ASSERT_EQ(getFirstPendingRequest().cmdObj, expectedCmdObj);
+ ASSERT_BSONOBJ_EQ(getFirstPendingRequest().cmdObj, expectedCmdObj);
// Ensure that we properly signal both those waiting for the kill, and those waiting for more
// results to be ready.
@@ -963,9 +973,9 @@ TEST_F(AsyncResultsMergerTest, KillOutstandingGetMore) {
// First batch received.
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
// This will schedule a getMore on cursor id 123.
ASSERT_FALSE(arm->ready());
@@ -987,7 +997,7 @@ TEST_F(AsyncResultsMergerTest, KillOutstandingGetMore) {
<< "testcoll"
<< "cursors"
<< BSON_ARRAY(CursorId(123)));
- ASSERT_EQ(getFirstPendingRequest().cmdObj, expectedCmdObj);
+ ASSERT_BSONOBJ_EQ(getFirstPendingRequest().cmdObj, expectedCmdObj);
// Ensure that we properly signal both those waiting for the kill, and those waiting for more
// results to be ready.
@@ -1042,9 +1052,9 @@ TEST_F(AsyncResultsMergerTest, TailableBasic) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
// In the tailable case, we expect EOF after every batch.
ASSERT_TRUE(arm->ready());
@@ -1064,7 +1074,7 @@ TEST_F(AsyncResultsMergerTest, TailableBasic) {
ASSERT_TRUE(arm->ready());
ASSERT_FALSE(arm->remotesExhausted());
- ASSERT_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
ASSERT_FALSE(arm->remotesExhausted());
@@ -1136,9 +1146,9 @@ TEST_F(AsyncResultsMergerTest, GetMoreBatchSizes) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
responses.clear();
@@ -1157,7 +1167,7 @@ TEST_F(AsyncResultsMergerTest, GetMoreBatchSizes) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
}
@@ -1172,7 +1182,7 @@ TEST_F(AsyncResultsMergerTest, SendsSecondaryOkAsMetadata) {
ASSERT_FALSE(arm->ready());
BSONObj cmdRequestMetadata = getFirstPendingRequest().metadata;
- ASSERT_EQ(cmdRequestMetadata, rpc::ServerSelectionMetadata(true, boost::none).toBSON());
+ ASSERT_BSONOBJ_EQ(cmdRequestMetadata, rpc::ServerSelectionMetadata(true, boost::none).toBSON());
std::vector<CursorResponse> responses;
std::vector<BSONObj> batch1 = {fromjson("{_id: 1}")};
@@ -1181,7 +1191,7 @@ TEST_F(AsyncResultsMergerTest, SendsSecondaryOkAsMetadata) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
}
@@ -1209,9 +1219,9 @@ TEST_F(AsyncResultsMergerTest, AllowPartialResults) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -1230,7 +1240,7 @@ TEST_F(AsyncResultsMergerTest, AllowPartialResults) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 3}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -1263,9 +1273,9 @@ TEST_F(AsyncResultsMergerTest, AllowPartialResultsSingleNode) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_FALSE(arm->ready());
readyEvent = unittest::assertGet(arm->nextEvent());
@@ -1301,7 +1311,7 @@ TEST_F(AsyncResultsMergerTest, RetryOnNotMasterNoSlaveOkSingleNode) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->remotesExhausted());
ASSERT_TRUE(arm->ready());
@@ -1364,7 +1374,7 @@ TEST_F(AsyncResultsMergerTest, RetryOnHostUnreachableAllowPartialResults) {
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->remotesExhausted());
ASSERT_TRUE(arm->ready());
@@ -1385,7 +1395,7 @@ TEST_F(AsyncResultsMergerTest, GetMoreRequestIncludesMaxTimeMS) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
@@ -1400,7 +1410,7 @@ TEST_F(AsyncResultsMergerTest, GetMoreRequestIncludesMaxTimeMS) {
<< "testcoll"
<< "maxTimeMS"
<< 789);
- ASSERT_EQ(getFirstPendingRequest().cmdObj, expectedCmdObj);
+ ASSERT_BSONOBJ_EQ(getFirstPendingRequest().cmdObj, expectedCmdObj);
responses.clear();
std::vector<BSONObj> batch2 = {fromjson("{_id: 2}")};
@@ -1410,7 +1420,7 @@ TEST_F(AsyncResultsMergerTest, GetMoreRequestIncludesMaxTimeMS) {
executor()->waitForEvent(readyEvent);
ASSERT_TRUE(arm->ready());
- ASSERT_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), *unittest::assertGet(arm->nextReady()).getResult());
ASSERT_TRUE(arm->ready());
ASSERT_TRUE(unittest::assertGet(arm->nextReady()).isEOF());
}
diff --git a/src/mongo/s/query/cluster_client_cursor_impl_test.cpp b/src/mongo/s/query/cluster_client_cursor_impl_test.cpp
index 06a193f3298..0b87488e651 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl_test.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_impl_test.cpp
@@ -53,7 +53,7 @@ TEST(ClusterClientCursorImpl, NumReturnedSoFar) {
for (int i = 1; i < 10; ++i) {
auto result = cursor.next();
ASSERT(result.isOK());
- ASSERT_EQ(*result.getValue().getResult(), BSON("a" << i));
+ ASSERT_BSONOBJ_EQ(*result.getValue().getResult(), BSON("a" << i));
ASSERT_EQ(cursor.getNumReturnedSoFar(), i);
}
// Now check that if nothing is fetched the getNumReturnedSoFar stays the same.
@@ -73,7 +73,7 @@ TEST(ClusterClientCursorImpl, QueueResult) {
auto firstResult = cursor.next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
cursor.queueResult(BSON("a" << 2));
cursor.queueResult(BSON("a" << 3));
@@ -81,17 +81,17 @@ TEST(ClusterClientCursorImpl, QueueResult) {
auto secondResult = cursor.next();
ASSERT_OK(secondResult.getStatus());
ASSERT(secondResult.getValue().getResult());
- ASSERT_EQ(*secondResult.getValue().getResult(), BSON("a" << 2));
+ ASSERT_BSONOBJ_EQ(*secondResult.getValue().getResult(), BSON("a" << 2));
auto thirdResult = cursor.next();
ASSERT_OK(thirdResult.getStatus());
ASSERT(thirdResult.getValue().getResult());
- ASSERT_EQ(*thirdResult.getValue().getResult(), BSON("a" << 3));
+ ASSERT_BSONOBJ_EQ(*thirdResult.getValue().getResult(), BSON("a" << 3));
auto fourthResult = cursor.next();
ASSERT_OK(fourthResult.getStatus());
ASSERT(fourthResult.getValue().getResult());
- ASSERT_EQ(*fourthResult.getValue().getResult(), BSON("a" << 4));
+ ASSERT_BSONOBJ_EQ(*fourthResult.getValue().getResult(), BSON("a" << 4));
auto fifthResult = cursor.next();
ASSERT_OK(fifthResult.getStatus());
@@ -118,7 +118,7 @@ TEST(ClusterClientCursorImpl, CursorPropagatesViewDefinition) {
ASSERT_OK(result.getStatus());
ASSERT(!result.getValue().getResult());
ASSERT(result.getValue().getViewDefinition());
- ASSERT_EQ(*result.getValue().getViewDefinition(), viewDef);
+ ASSERT_BSONOBJ_EQ(*result.getValue().getViewDefinition(), viewDef);
}
TEST(ClusterClientCursorImpl, RemotesExhausted) {
@@ -133,13 +133,13 @@ TEST(ClusterClientCursorImpl, RemotesExhausted) {
auto firstResult = cursor.next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
ASSERT_TRUE(cursor.remotesExhausted());
auto secondResult = cursor.next();
ASSERT_OK(secondResult.getStatus());
ASSERT(secondResult.getValue().getResult());
- ASSERT_EQ(*secondResult.getValue().getResult(), BSON("a" << 2));
+ ASSERT_BSONOBJ_EQ(*secondResult.getValue().getResult(), BSON("a" << 2));
ASSERT_TRUE(cursor.remotesExhausted());
auto thirdResult = cursor.next();
diff --git a/src/mongo/s/query/cluster_cursor_manager_test.cpp b/src/mongo/s/query/cluster_cursor_manager_test.cpp
index 3cb88e9a813..b12b332375b 100644
--- a/src/mongo/s/query/cluster_cursor_manager_test.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager_test.cpp
@@ -118,7 +118,7 @@ TEST_F(ClusterCursorManagerTest, RegisterCursor) {
auto nextResult = pinnedCursor.getValue().next();
ASSERT_OK(nextResult.getStatus());
ASSERT(nextResult.getValue().getResult());
- ASSERT_EQ(BSON("a" << 1), *nextResult.getValue().getResult());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 1), *nextResult.getValue().getResult());
nextResult = pinnedCursor.getValue().next();
ASSERT_OK(nextResult.getStatus());
ASSERT_TRUE(nextResult.getValue().isEOF());
@@ -149,7 +149,7 @@ TEST_F(ClusterCursorManagerTest, CheckOutCursorBasic) {
auto nextResult = checkedOutCursor.getValue().next();
ASSERT_OK(nextResult.getStatus());
ASSERT(nextResult.getValue().getResult());
- ASSERT_EQ(BSON("a" << 1), *nextResult.getValue().getResult());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 1), *nextResult.getValue().getResult());
nextResult = checkedOutCursor.getValue().next();
ASSERT_OK(nextResult.getStatus());
ASSERT_TRUE(nextResult.getValue().isEOF());
@@ -175,7 +175,7 @@ TEST_F(ClusterCursorManagerTest, CheckOutCursorMultipleCursors) {
auto nextResult = pinnedCursor.getValue().next();
ASSERT_OK(nextResult.getStatus());
ASSERT(nextResult.getValue().getResult());
- ASSERT_EQ(BSON("a" << i), *nextResult.getValue().getResult());
+ ASSERT_BSONOBJ_EQ(BSON("a" << i), *nextResult.getValue().getResult());
nextResult = pinnedCursor.getValue().next();
ASSERT_OK(nextResult.getStatus());
ASSERT_TRUE(nextResult.getValue().isEOF());
diff --git a/src/mongo/s/query/router_stage_limit_test.cpp b/src/mongo/s/query/router_stage_limit_test.cpp
index 658b6b4d48d..11c245f67ec 100644
--- a/src/mongo/s/query/router_stage_limit_test.cpp
+++ b/src/mongo/s/query/router_stage_limit_test.cpp
@@ -51,7 +51,7 @@ TEST(RouterStageLimitTest, LimitIsOne) {
auto firstResult = limitStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
auto secondResult = limitStage->next();
ASSERT_OK(secondResult.getStatus());
@@ -74,12 +74,12 @@ TEST(RouterStageLimitTest, LimitIsTwo) {
auto firstResult = limitStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
auto secondResult = limitStage->next();
ASSERT_OK(secondResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*secondResult.getValue().getResult(), BSON("a" << 2));
+ ASSERT_BSONOBJ_EQ(*secondResult.getValue().getResult(), BSON("a" << 2));
auto thirdResult = limitStage->next();
ASSERT_OK(thirdResult.getStatus());
@@ -98,7 +98,7 @@ TEST(RouterStageLimitTest, LimitStagePropagatesError) {
auto firstResult = limitStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
auto secondResult = limitStage->next();
ASSERT_NOT_OK(secondResult.getStatus());
@@ -124,7 +124,7 @@ TEST(RouterStageLimitTest, LimitStagePropagatesViewDefinition) {
ASSERT_OK(result.getStatus());
ASSERT(!result.getValue().getResult());
ASSERT(result.getValue().getViewDefinition());
- ASSERT_EQ(*result.getValue().getViewDefinition(), viewDef);
+ ASSERT_BSONOBJ_EQ(*result.getValue().getViewDefinition(), viewDef);
}
TEST(RouterStageLimitTest, LimitStageToleratesMidStreamEOF) {
@@ -142,7 +142,7 @@ TEST(RouterStageLimitTest, LimitStageToleratesMidStreamEOF) {
auto firstResult = limitStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
auto secondResult = limitStage->next();
ASSERT_OK(secondResult.getStatus());
@@ -151,7 +151,7 @@ TEST(RouterStageLimitTest, LimitStageToleratesMidStreamEOF) {
auto thirdResult = limitStage->next();
ASSERT_OK(thirdResult.getStatus());
ASSERT(thirdResult.getValue().getResult());
- ASSERT_EQ(*thirdResult.getValue().getResult(), BSON("a" << 2));
+ ASSERT_BSONOBJ_EQ(*thirdResult.getValue().getResult(), BSON("a" << 2));
auto fourthResult = limitStage->next();
ASSERT_OK(fourthResult.getStatus());
@@ -170,13 +170,13 @@ TEST(RouterStageLimitTest, LimitStageRemotesExhausted) {
auto firstResult = limitStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 1));
ASSERT_TRUE(limitStage->remotesExhausted());
auto secondResult = limitStage->next();
ASSERT_OK(secondResult.getStatus());
ASSERT(secondResult.getValue().getResult());
- ASSERT_EQ(*secondResult.getValue().getResult(), BSON("a" << 2));
+ ASSERT_BSONOBJ_EQ(*secondResult.getValue().getResult(), BSON("a" << 2));
ASSERT_TRUE(limitStage->remotesExhausted());
auto thirdResult = limitStage->next();
diff --git a/src/mongo/s/query/router_stage_remove_sortkey_test.cpp b/src/mongo/s/query/router_stage_remove_sortkey_test.cpp
index ad6ee3f55a2..d8e1f51605d 100644
--- a/src/mongo/s/query/router_stage_remove_sortkey_test.cpp
+++ b/src/mongo/s/query/router_stage_remove_sortkey_test.cpp
@@ -53,24 +53,24 @@ TEST(RouterStageRemoveSortKeyTest, RemovesSortKey) {
auto firstResult = sortKeyStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 4 << "b" << 3));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 4 << "b" << 3));
auto secondResult = sortKeyStage->next();
ASSERT_OK(secondResult.getStatus());
ASSERT(secondResult.getValue().getResult());
- ASSERT_EQ(*secondResult.getValue().getResult(),
- BSON("c" << BSON("d"
- << "foo")));
+ ASSERT_BSONOBJ_EQ(*secondResult.getValue().getResult(),
+ BSON("c" << BSON("d"
+ << "foo")));
auto thirdResult = sortKeyStage->next();
ASSERT_OK(thirdResult.getStatus());
ASSERT(thirdResult.getValue().getResult());
- ASSERT_EQ(*thirdResult.getValue().getResult(), BSON("a" << 3));
+ ASSERT_BSONOBJ_EQ(*thirdResult.getValue().getResult(), BSON("a" << 3));
auto fourthResult = sortKeyStage->next();
ASSERT_OK(fourthResult.getStatus());
ASSERT(fourthResult.getValue().getResult());
- ASSERT_EQ(*fourthResult.getValue().getResult(), BSONObj());
+ ASSERT_BSONOBJ_EQ(*fourthResult.getValue().getResult(), BSONObj());
auto fifthResult = sortKeyStage->next();
ASSERT_OK(fifthResult.getStatus());
@@ -87,7 +87,7 @@ TEST(RouterStageRemoveSortKeyTest, PropagatesError) {
auto firstResult = sortKeyStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSONObj());
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSONObj());
auto secondResult = sortKeyStage->next();
ASSERT_NOT_OK(secondResult.getStatus());
@@ -106,7 +106,7 @@ TEST(RouterStageRemoveSortKeyTest, ToleratesMidStreamEOF) {
auto firstResult = sortKeyStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 1 << "b" << 1));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 1 << "b" << 1));
auto secondResult = sortKeyStage->next();
ASSERT_OK(secondResult.getStatus());
@@ -115,7 +115,7 @@ TEST(RouterStageRemoveSortKeyTest, ToleratesMidStreamEOF) {
auto thirdResult = sortKeyStage->next();
ASSERT_OK(thirdResult.getStatus());
ASSERT(thirdResult.getValue().getResult());
- ASSERT_EQ(*thirdResult.getValue().getResult(), BSON("a" << 2 << "b" << 2));
+ ASSERT_BSONOBJ_EQ(*thirdResult.getValue().getResult(), BSON("a" << 2 << "b" << 2));
auto fourthResult = sortKeyStage->next();
ASSERT_OK(fourthResult.getStatus());
@@ -134,13 +134,13 @@ TEST(RouterStageRemoveSortKeyTest, RemotesExhausted) {
auto firstResult = sortKeyStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 1 << "b" << 1));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 1 << "b" << 1));
ASSERT_TRUE(sortKeyStage->remotesExhausted());
auto secondResult = sortKeyStage->next();
ASSERT_OK(secondResult.getStatus());
ASSERT(secondResult.getValue().getResult());
- ASSERT_EQ(*secondResult.getValue().getResult(), BSON("a" << 2 << "b" << 2));
+ ASSERT_BSONOBJ_EQ(*secondResult.getValue().getResult(), BSON("a" << 2 << "b" << 2));
ASSERT_TRUE(sortKeyStage->remotesExhausted());
auto thirdResult = sortKeyStage->next();
diff --git a/src/mongo/s/query/router_stage_skip_test.cpp b/src/mongo/s/query/router_stage_skip_test.cpp
index efd57eaf111..242a032375a 100644
--- a/src/mongo/s/query/router_stage_skip_test.cpp
+++ b/src/mongo/s/query/router_stage_skip_test.cpp
@@ -51,12 +51,12 @@ TEST(RouterStageSkipTest, SkipIsOne) {
auto firstResult = skipStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 2));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 2));
auto secondResult = skipStage->next();
ASSERT_OK(secondResult.getStatus());
ASSERT(secondResult.getValue().getResult());
- ASSERT_EQ(*secondResult.getValue().getResult(), BSON("a" << 3));
+ ASSERT_BSONOBJ_EQ(*secondResult.getValue().getResult(), BSON("a" << 3));
// Once end-of-stream is reached, the skip stage should keep returning boost::none.
auto thirdResult = skipStage->next();
@@ -80,7 +80,7 @@ TEST(RouterStageSkipTest, SkipIsThree) {
auto firstResult = skipStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 4));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 4));
auto secondResult = skipStage->next();
ASSERT_OK(secondResult.getStatus());
@@ -140,7 +140,7 @@ TEST(RouterStageSkipTest, ErrorAfterSkippingResults) {
auto firstResult = skipStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 3));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 3));
auto secondResult = skipStage->next();
ASSERT_NOT_OK(secondResult.getStatus());
@@ -164,11 +164,11 @@ TEST(RouterStageSkipTest, SkipStagePropagatesViewDefinition) {
ASSERT_OK(result.getStatus());
ASSERT(!result.getValue().getResult());
ASSERT(result.getValue().getViewDefinition());
- ASSERT_EQ(*result.getValue().getViewDefinition(),
- BSON("ns"
- << "view_ns"
- << "pipeline"
- << BSON_ARRAY(BSON("$match" << BSONNULL))));
+ ASSERT_BSONOBJ_EQ(*result.getValue().getViewDefinition(),
+ BSON("ns"
+ << "view_ns"
+ << "pipeline"
+ << BSON_ARRAY(BSON("$match" << BSONNULL))));
}
TEST(RouterStageSkipTest, SkipStageToleratesMidStreamEOF) {
@@ -188,7 +188,7 @@ TEST(RouterStageSkipTest, SkipStageToleratesMidStreamEOF) {
auto secondResult = skipStage->next();
ASSERT_OK(secondResult.getStatus());
ASSERT(secondResult.getValue().getResult());
- ASSERT_EQ(*secondResult.getValue().getResult(), BSON("a" << 3));
+ ASSERT_BSONOBJ_EQ(*secondResult.getValue().getResult(), BSON("a" << 3));
auto thirdResult = skipStage->next();
ASSERT_OK(thirdResult.getStatus());
@@ -208,13 +208,13 @@ TEST(RouterStageSkipTest, SkipStageRemotesExhausted) {
auto firstResult = skipStage->next();
ASSERT_OK(firstResult.getStatus());
ASSERT(firstResult.getValue().getResult());
- ASSERT_EQ(*firstResult.getValue().getResult(), BSON("a" << 2));
+ ASSERT_BSONOBJ_EQ(*firstResult.getValue().getResult(), BSON("a" << 2));
ASSERT_TRUE(skipStage->remotesExhausted());
auto secondResult = skipStage->next();
ASSERT_OK(secondResult.getStatus());
ASSERT(secondResult.getValue().getResult());
- ASSERT_EQ(*secondResult.getValue().getResult(), BSON("a" << 3));
+ ASSERT_BSONOBJ_EQ(*secondResult.getValue().getResult(), BSON("a" << 3));
ASSERT_TRUE(skipStage->remotesExhausted());
auto thirdResult = skipStage->next();
diff --git a/src/mongo/s/query/store_possible_cursor_test.cpp b/src/mongo/s/query/store_possible_cursor_test.cpp
index 23a56a3e5f8..8591c500097 100644
--- a/src/mongo/s/query/store_possible_cursor_test.cpp
+++ b/src/mongo/s/query/store_possible_cursor_test.cpp
@@ -73,8 +73,8 @@ TEST_F(StorePossibleCursorTest, ReturnsValidCursorResponse) {
ASSERT_EQ(nss.toString(), parsedOutgoingResponse.getValue().getNSS().toString());
ASSERT_EQ(0U, parsedOutgoingResponse.getValue().getCursorId());
ASSERT_EQ(2U, parsedOutgoingResponse.getValue().getBatch().size());
- ASSERT_EQ(fromjson("{_id: 1}"), parsedOutgoingResponse.getValue().getBatch()[0]);
- ASSERT_EQ(fromjson("{_id: 2}"), parsedOutgoingResponse.getValue().getBatch()[1]);
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), parsedOutgoingResponse.getValue().getBatch()[0]);
+ ASSERT_BSONOBJ_EQ(fromjson("{_id: 2}"), parsedOutgoingResponse.getValue().getBatch()[1]);
}
// Test that storePossibleCursor() propagates an error if it cannot parse the cursor response.
@@ -97,7 +97,7 @@ TEST_F(StorePossibleCursorTest, PassesUpCommandResultIfItDoesNotDescribeACursor)
nullptr, // TaskExecutor
getManager());
ASSERT_OK(outgoingCursorResponse.getStatus());
- ASSERT_EQ(notACursorObj, outgoingCursorResponse.getValue());
+ ASSERT_BSONOBJ_EQ(notACursorObj, outgoingCursorResponse.getValue());
}
} // namespace
diff --git a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
index 35e7b255f55..b147cf90d1e 100644
--- a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
+++ b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp
@@ -62,11 +62,11 @@ TEST(AddShardToZoneRequest, CommandBuilderShouldAlwaysCreateConfigCommand) {
request.appendAsConfigCommand(&builder);
auto cmdObj = builder.obj();
- ASSERT_EQ(BSON("_configsvrAddShardToZone"
- << "a"
- << "zone"
- << "z"),
- cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("_configsvrAddShardToZone"
+ << "a"
+ << "zone"
+ << "z"),
+ cmdObj);
}
TEST(AddShardToZoneRequest, MissingZoneErrors) {
@@ -120,11 +120,11 @@ TEST(CfgAddShardToZoneRequest, BasicValidConfigCommand) {
request.appendAsConfigCommand(&builder);
auto cmdObj = builder.obj();
- ASSERT_EQ(BSON("_configsvrAddShardToZone"
- << "a"
- << "zone"
- << "z"),
- cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("_configsvrAddShardToZone"
+ << "a"
+ << "zone"
+ << "z"),
+ cmdObj);
}
TEST(CfgAddShardToZoneRequest, MissingZoneErrors) {
diff --git a/src/mongo/s/request_types/balance_chunk_request_test.cpp b/src/mongo/s/request_types/balance_chunk_request_test.cpp
index e39fdf90674..97af5a292bf 100644
--- a/src/mongo/s/request_types/balance_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/balance_chunk_request_test.cpp
@@ -58,8 +58,8 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandNoSecondaryThrottle) {
<< version.epoch())));
const auto& chunk = request.getChunk();
ASSERT_EQ("TestDB.TestColl", chunk.getNS());
- ASSERT_EQ(BSON("a" << -100LL), chunk.getMin());
- ASSERT_EQ(BSON("a" << 100LL), chunk.getMax());
+ ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 100LL), chunk.getMax());
ASSERT_EQ(ShardId("TestShard0000"), chunk.getShard());
ASSERT_EQ(version, chunk.getVersion());
@@ -88,8 +88,8 @@ TEST(BalanceChunkRequest, ParseFromConfigCommandWithSecondaryThrottle) {
<< BSON("w" << 2)))));
const auto& chunk = request.getChunk();
ASSERT_EQ("TestDB.TestColl", chunk.getNS());
- ASSERT_EQ(BSON("a" << -100LL), chunk.getMin());
- ASSERT_EQ(BSON("a" << 100LL), chunk.getMax());
+ ASSERT_BSONOBJ_EQ(BSON("a" << -100LL), chunk.getMin());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 100LL), chunk.getMax());
ASSERT_EQ(ShardId("TestShard0000"), chunk.getShard());
ASSERT_EQ(version, chunk.getVersion());
diff --git a/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp b/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
index 911dc0e7360..87ee5d8245a 100644
--- a/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
+++ b/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
@@ -74,11 +74,11 @@ TEST(CommitChunkMigrationRequest, WithControlChunk) {
ASSERT_EQ(kNamespaceString, request.getNss());
ASSERT_EQ(kShardId0, request.getFromShard());
ASSERT_EQ(kShardId1, request.getToShard());
- ASSERT_EQ(kKey0, request.getMigratedChunkRange().getMin());
- ASSERT_EQ(kKey1, request.getMigratedChunkRange().getMax());
+ ASSERT_BSONOBJ_EQ(kKey0, request.getMigratedChunkRange().getMin());
+ ASSERT_BSONOBJ_EQ(kKey1, request.getMigratedChunkRange().getMax());
ASSERT(request.hasControlChunkRange());
- ASSERT_EQ(kKey2, request.getControlChunkRange().getMin());
- ASSERT_EQ(kKey3, request.getControlChunkRange().getMax());
+ ASSERT_BSONOBJ_EQ(kKey2, request.getControlChunkRange().getMin());
+ ASSERT_BSONOBJ_EQ(kKey3, request.getControlChunkRange().getMax());
}
TEST(CommitChunkMigrationRequest, WithoutControlChunk) {
@@ -99,8 +99,8 @@ TEST(CommitChunkMigrationRequest, WithoutControlChunk) {
ASSERT_EQ(kNamespaceString, request.getNss());
ASSERT_EQ(kShardId0, request.getFromShard());
ASSERT_EQ(kShardId1, request.getToShard());
- ASSERT_EQ(kKey0, request.getMigratedChunkRange().getMin());
- ASSERT_EQ(kKey1, request.getMigratedChunkRange().getMax());
+ ASSERT_BSONOBJ_EQ(kKey0, request.getMigratedChunkRange().getMin());
+ ASSERT_BSONOBJ_EQ(kKey1, request.getMigratedChunkRange().getMax());
ASSERT(!request.hasControlChunkRange());
}
diff --git a/src/mongo/s/request_types/merge_chunk_request_test.cpp b/src/mongo/s/request_types/merge_chunk_request_test.cpp
index 2bca08c5fba..094b7725707 100644
--- a/src/mongo/s/request_types/merge_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/merge_chunk_request_test.cpp
@@ -51,9 +51,9 @@ TEST(MergeChunkRequest, BasicValidConfigCommand) {
<< "shard0000")));
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
ASSERT_EQ(OID("7fffffff0000000000000001"), request.getEpoch());
- ASSERT_EQ(BSON("a" << 1), request.getChunkBoundaries().at(0));
- ASSERT_EQ(BSON("a" << 5), request.getChunkBoundaries().at(1));
- ASSERT_EQ(BSON("a" << 10), request.getChunkBoundaries().at(2));
+ ASSERT_BSONOBJ_EQ(BSON("a" << 1), request.getChunkBoundaries().at(0));
+ ASSERT_BSONOBJ_EQ(BSON("a" << 5), request.getChunkBoundaries().at(1));
+ ASSERT_BSONOBJ_EQ(BSON("a" << 10), request.getChunkBoundaries().at(2));
ASSERT_EQ("shard0000", request.getShardName());
}
@@ -79,7 +79,7 @@ TEST(MergeChunkRequest, ConfigCommandtoBSON) {
auto request = assertGet(MergeChunkRequest::parseFromConfigCommand(serializedRequest));
auto requestToBSON = request.toConfigCommandBSON(writeConcernObj);
- ASSERT_EQ(cmdBuilder.obj(), requestToBSON);
+ ASSERT_BSONOBJ_EQ(cmdBuilder.obj(), requestToBSON);
}
TEST(MergeChunkRequest, MissingNameSpaceErrors) {
diff --git a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
index 0115affed0c..f687d98f46b 100644
--- a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
+++ b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp
@@ -64,11 +64,11 @@ TEST(RemoveShardFromZoneRequest, CommandBuilderShouldAlwaysCreateConfigCommand)
request.appendAsConfigCommand(&builder);
auto cmdObj = builder.obj();
- ASSERT_EQ(BSON("_configsvrRemoveShardFromZone"
- << "a"
- << "zone"
- << "z"),
- cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("_configsvrRemoveShardFromZone"
+ << "a"
+ << "zone"
+ << "z"),
+ cmdObj);
}
TEST(RemoveShardFromZoneRequest, MissingZoneErrors) {
@@ -123,11 +123,11 @@ TEST(CfgRemoveShardFromZoneRequest, BasicValidConfigCommand) {
request.appendAsConfigCommand(&builder);
auto cmdObj = builder.obj();
- ASSERT_EQ(BSON("_configsvrRemoveShardFromZone"
- << "a"
- << "zone"
- << "z"),
- cmdObj);
+ ASSERT_BSONOBJ_EQ(BSON("_configsvrRemoveShardFromZone"
+ << "a"
+ << "zone"
+ << "z"),
+ cmdObj);
}
TEST(CfgRemoveShardFromZoneRequest, MissingZoneErrors) {
diff --git a/src/mongo/s/request_types/split_chunk_request_test.cpp b/src/mongo/s/request_types/split_chunk_request_test.cpp
index 57ca18f6350..deb5718c2bf 100644
--- a/src/mongo/s/request_types/split_chunk_request_test.cpp
+++ b/src/mongo/s/request_types/split_chunk_request_test.cpp
@@ -56,7 +56,7 @@ TEST(SplitChunkRequest, BasicValidConfigCommand) {
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
ASSERT_EQ(OID("7fffffff0000000000000001"), request.getEpoch());
ASSERT(ChunkRange(BSON("a" << 1), BSON("a" << 10)) == request.getChunkRange());
- ASSERT_EQ(BSON("a" << 5), request.getSplitPoints().at(0));
+ ASSERT_BSONOBJ_EQ(BSON("a" << 5), request.getSplitPoints().at(0));
ASSERT_EQ("shard0000", request.getShardName());
}
@@ -77,8 +77,8 @@ TEST(SplitChunkRequest, ValidWithMultipleSplits) {
ASSERT_EQ(NamespaceString("TestDB", "TestColl"), request.getNamespace());
ASSERT_EQ(OID("7fffffff0000000000000001"), request.getEpoch());
ASSERT(ChunkRange(BSON("a" << 1), BSON("a" << 10)) == request.getChunkRange());
- ASSERT_EQ(BSON("a" << 5), request.getSplitPoints().at(0));
- ASSERT_EQ(BSON("a" << 7), request.getSplitPoints().at(1));
+ ASSERT_BSONOBJ_EQ(BSON("a" << 5), request.getSplitPoints().at(0));
+ ASSERT_BSONOBJ_EQ(BSON("a" << 7), request.getSplitPoints().at(1));
ASSERT_EQ("shard0000", request.getShardName());
}
@@ -107,7 +107,7 @@ TEST(SplitChunkRequest, ConfigCommandtoBSON) {
auto request = assertGet(SplitChunkRequest::parseFromConfigCommand(serializedRequest));
auto requestToBSON = request.toConfigCommandBSON(writeConcernObj);
- ASSERT_EQ(cmdBuilder.obj(), requestToBSON);
+ ASSERT_BSONOBJ_EQ(cmdBuilder.obj(), requestToBSON);
}
TEST(SplitChunkRequest, MissingNamespaceErrors) {
diff --git a/src/mongo/s/request_types/update_zone_key_range_request_test.cpp b/src/mongo/s/request_types/update_zone_key_range_request_test.cpp
index d8df76a991f..0a4fa4452fb 100644
--- a/src/mongo/s/request_types/update_zone_key_range_request_test.cpp
+++ b/src/mongo/s/request_types/update_zone_key_range_request_test.cpp
@@ -49,8 +49,8 @@ TEST(UpdateZoneKeyRangeRequest, BasicValidMongosAssignCommand) {
auto request = requestStatus.getValue();
ASSERT_EQ("foo.bar", request.getNS().ns());
- ASSERT_EQ(BSON("x" << 1), request.getRange().getMin());
- ASSERT_EQ(BSON("x" << 100), request.getRange().getMax());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getRange().getMin());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 100), request.getRange().getMax());
ASSERT_FALSE(request.isRemove());
ASSERT_EQ("z", request.getZoneName());
}
@@ -66,8 +66,8 @@ TEST(UpdateZoneKeyRangeRequest, BasicValidMongosRemoveCommand) {
auto request = requestStatus.getValue();
ASSERT_EQ("foo.bar", request.getNS().ns());
- ASSERT_EQ(BSON("x" << 1), request.getRange().getMin());
- ASSERT_EQ(BSON("x" << 100), request.getRange().getMax());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getRange().getMin());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 100), request.getRange().getMax());
ASSERT_TRUE(request.isRemove());
}
@@ -102,7 +102,7 @@ TEST(UpdateZoneKeyRangeRequest, CommandBuilderShouldAlwaysCreateConfigCommandFor
max: { x: 100 },
zone: "z"
})BSON");
- ASSERT_EQ(expectedObj, configCmdObj);
+ ASSERT_BSONOBJ_EQ(expectedObj, configCmdObj);
}
TEST(UpdateZoneKeyRangeRequest, CommandBuilderShouldAlwaysCreateConfigCommandForRemoveType) {
@@ -126,7 +126,7 @@ TEST(UpdateZoneKeyRangeRequest, CommandBuilderShouldAlwaysCreateConfigCommandFor
max: { x: 100 },
zone: null
})BSON");
- ASSERT_EQ(expectedObj, configCmdObj);
+ ASSERT_BSONOBJ_EQ(expectedObj, configCmdObj);
}
@@ -227,8 +227,8 @@ TEST(CfgAssignKeyRangeToZoneRequest, BasicValidMongosAssignCommand) {
auto request = requestStatus.getValue();
ASSERT_EQ("foo.bar", request.getNS().ns());
- ASSERT_EQ(BSON("x" << 1), request.getRange().getMin());
- ASSERT_EQ(BSON("x" << 100), request.getRange().getMax());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getRange().getMin());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 100), request.getRange().getMax());
ASSERT_FALSE(request.isRemove());
ASSERT_EQ("z", request.getZoneName());
}
@@ -244,8 +244,8 @@ TEST(CfgAssignKeyRangeToZoneRequest, BasicValidMongosRemoveCommand) {
auto request = requestStatus.getValue();
ASSERT_EQ("foo.bar", request.getNS().ns());
- ASSERT_EQ(BSON("x" << 1), request.getRange().getMin());
- ASSERT_EQ(BSON("x" << 100), request.getRange().getMax());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getRange().getMin());
+ ASSERT_BSONOBJ_EQ(BSON("x" << 100), request.getRange().getMax());
ASSERT_TRUE(request.isRemove());
}
@@ -280,7 +280,7 @@ TEST(CfgAssignKeyRangeToZoneRequest, CommandBuilderShouldAlwaysCreateConfigComma
max: { x: 100 },
zone: "z"
})BSON");
- ASSERT_EQ(expectedObj, configCmdObj);
+ ASSERT_BSONOBJ_EQ(expectedObj, configCmdObj);
}
TEST(CfgAssignKeyRangeToZoneRequest, CommandBuilderShouldAlwaysCreateConfigCommandForRemoveType) {
@@ -304,7 +304,7 @@ TEST(CfgAssignKeyRangeToZoneRequest, CommandBuilderShouldAlwaysCreateConfigComma
max: { x: 100 },
zone: null
})BSON");
- ASSERT_EQ(expectedObj, configCmdObj);
+ ASSERT_BSONOBJ_EQ(expectedObj, configCmdObj);
}
diff --git a/src/mongo/s/set_shard_version_request_test.cpp b/src/mongo/s/set_shard_version_request_test.cpp
index 776a8e00c8f..baae7c77cb7 100644
--- a/src/mongo/s/set_shard_version_request_test.cpp
+++ b/src/mongo/s/set_shard_version_request_test.cpp
@@ -267,21 +267,21 @@ TEST(SetShardVersionRequest, ToSSVCommandInit) {
ASSERT_EQ(ssv.getShardName(), "TestShard");
ASSERT_EQ(ssv.getShardConnectionString().toString(), shardCS.toString());
- ASSERT_EQ(ssv.toBSON(),
- BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "maxTimeMS"
- << 30000));
+ ASSERT_BSONOBJ_EQ(ssv.toBSON(),
+ BSON("setShardVersion"
+ << ""
+ << "init"
+ << true
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "maxTimeMS"
+ << 30000));
}
TEST(SetShardVersionRequest, ToSSVCommandInitNoConnectionVersioning) {
@@ -295,23 +295,23 @@ TEST(SetShardVersionRequest, ToSSVCommandInitNoConnectionVersioning) {
ASSERT_EQ(ssv.getShardName(), "TestShard");
ASSERT_EQ(ssv.getShardConnectionString().toString(), shardCS.toString());
- ASSERT_EQ(ssv.toBSON(),
- BSON("setShardVersion"
- << ""
- << "init"
- << true
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "maxTimeMS"
- << 30000
- << "noConnectionVersioning"
- << true));
+ ASSERT_BSONOBJ_EQ(ssv.toBSON(),
+ BSON("setShardVersion"
+ << ""
+ << "init"
+ << true
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "maxTimeMS"
+ << 30000
+ << "noConnectionVersioning"
+ << true));
}
TEST(SetShardVersionRequest, ToSSVCommandFull) {
@@ -327,26 +327,26 @@ TEST(SetShardVersionRequest, ToSSVCommandFull) {
ASSERT_EQ(ssv.getShardName(), "TestShard");
ASSERT_EQ(ssv.getShardConnectionString().toString(), shardCS.toString());
ASSERT_EQ(ssv.getNS().ns(), "db.coll");
- ASSERT_EQ(ssv.getNSVersion().toBSONWithPrefix("version"),
- chunkVersion.toBSONWithPrefix("version"));
-
- ASSERT_EQ(ssv.toBSON(),
- BSON("setShardVersion"
- << "db.coll"
- << "init"
- << false
- << "authoritative"
- << false
- << "configdb"
- << configCS.toString()
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()));
+ ASSERT_BSONOBJ_EQ(ssv.getNSVersion().toBSONWithPrefix("version"),
+ chunkVersion.toBSONWithPrefix("version"));
+
+ ASSERT_BSONOBJ_EQ(ssv.toBSON(),
+ BSON("setShardVersion"
+ << "db.coll"
+ << "init"
+ << false
+ << "authoritative"
+ << false
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()));
}
TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
@@ -362,26 +362,26 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
ASSERT_EQ(ssv.getShardName(), "TestShard");
ASSERT_EQ(ssv.getShardConnectionString().toString(), shardCS.toString());
ASSERT_EQ(ssv.getNS().ns(), "db.coll");
- ASSERT_EQ(ssv.getNSVersion().toBSONWithPrefix("version"),
- chunkVersion.toBSONWithPrefix("version"));
-
- ASSERT_EQ(ssv.toBSON(),
- BSON("setShardVersion"
- << "db.coll"
- << "init"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()));
+ ASSERT_BSONOBJ_EQ(ssv.getNSVersion().toBSONWithPrefix("version"),
+ chunkVersion.toBSONWithPrefix("version"));
+
+ ASSERT_BSONOBJ_EQ(ssv.toBSON(),
+ BSON("setShardVersion"
+ << "db.coll"
+ << "init"
+ << false
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()));
}
TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
@@ -397,28 +397,28 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
ASSERT_EQ(ssv.getShardName(), "TestShard");
ASSERT_EQ(ssv.getShardConnectionString().toString(), shardCS.toString());
ASSERT_EQ(ssv.getNS().ns(), "db.coll");
- ASSERT_EQ(ssv.getNSVersion().toBSONWithPrefix("version"),
- chunkVersion.toBSONWithPrefix("version"));
-
- ASSERT_EQ(ssv.toBSON(),
- BSON("setShardVersion"
- << "db.coll"
- << "init"
- << false
- << "authoritative"
- << true
- << "configdb"
- << configCS.toString()
- << "shard"
- << "TestShard"
- << "shardHost"
- << shardCS.toString()
- << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch"
- << chunkVersion.epoch()
- << "noConnectionVersioning"
- << true));
+ ASSERT_BSONOBJ_EQ(ssv.getNSVersion().toBSONWithPrefix("version"),
+ chunkVersion.toBSONWithPrefix("version"));
+
+ ASSERT_BSONOBJ_EQ(ssv.toBSON(),
+ BSON("setShardVersion"
+ << "db.coll"
+ << "init"
+ << false
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()
+ << "noConnectionVersioning"
+ << true));
}
} // namespace
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index a00157179b9..e3b31a7a5d2 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -124,12 +124,14 @@ static BSONObj normKey(const ShardKeyPattern& pattern, const BSONObj& doc) {
TEST(ShardKeyPattern, NormalizeShardKey) {
ShardKeyPattern pattern(BSON("a.b" << 1 << "c" << 1.0f));
- ASSERT_EQUALS(normKey(pattern, BSON("a.b" << 10 << "c" << 30)), BSON("a.b" << 10 << "c" << 30));
- ASSERT_EQUALS(normKey(pattern, BSON("c" << 30 << "a.b" << 10)), BSON("a.b" << 10 << "c" << 30));
-
- ASSERT_EQUALS(normKey(pattern, BSON("b" << 10)), BSONObj());
- ASSERT_EQUALS(normKey(pattern, BSON("a" << 10 << "c" << 30)), BSONObj());
- ASSERT_EQUALS(normKey(pattern, BSON("a.b" << BSON("$gt" << 10) << "c" << 30)), BSONObj());
+ ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a.b" << 10 << "c" << 30)),
+ BSON("a.b" << 10 << "c" << 30));
+ ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("c" << 30 << "a.b" << 10)),
+ BSON("a.b" << 10 << "c" << 30));
+
+ ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("b" << 10)), BSONObj());
+ ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a" << 10 << "c" << 30)), BSONObj());
+ ASSERT_BSONOBJ_EQ(normKey(pattern, BSON("a.b" << BSON("$gt" << 10) << "c" << 30)), BSONObj());
}
static BSONObj docKey(const ShardKeyPattern& pattern, const BSONObj& doc) {
@@ -142,26 +144,26 @@ TEST(ShardKeyPattern, ExtractDocShardKeySingle) {
//
ShardKeyPattern pattern(BSON("a" << 1));
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:10}")), fromjson("{a:10}"));
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:10, b:'20'}")), fromjson("{a:10}"));
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{b:10}, c:30}")), fromjson("{a:{b:10}}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10}")), fromjson("{a:10}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:'20'}")), fromjson("{a:10}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{b:10}, c:30}")), fromjson("{a:{b:10}}"));
const BSONRegEx regex("abc");
- ASSERT_EQUALS(docKey(pattern,
- BSON("a" << regex << "b"
- << "20")),
- BSON("a" << regex));
+ ASSERT_BSONOBJ_EQ(docKey(pattern,
+ BSON("a" << regex << "b"
+ << "20")),
+ BSON("a" << regex));
const BSONObj ref = BSON("$ref"
<< "coll"
<< "$id"
<< 1);
- ASSERT_EQUALS(docKey(pattern, BSON("a" << ref)), BSON("a" << ref));
-
- ASSERT_EQUALS(docKey(pattern, BSONObj()), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{b:10}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, BSON("" << 10)), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:[1, 2]}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{$invalid:true}}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{$gt:10}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << ref)), BSON("a" << ref));
+
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSONObj()), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{b:10}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("" << 10)), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:[1,2]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$invalid:true}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$gt:10}}")), BSONObj());
// BSONObjIterator breaks this for now
// ASSERT_EQUALS(docKey(pattern, BSON("a" << 10 << "a" << 20)), BSONObj());
}
@@ -172,23 +174,24 @@ TEST(ShardKeyPattern, ExtractDocShardKeyCompound) {
//
ShardKeyPattern pattern(BSON("a" << 1 << "b" << 1.0));
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:10, b:'20'}")), fromjson("{a:10, b:'20'}"));
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:10, b:'20', c:30}")), fromjson("{a:10, b:'20'}"));
- ASSERT_EQUALS(docKey(pattern,
- BSON("c" << 30 << "b"
- << "20"
- << "a"
- << 10)),
- fromjson("{a:10, b:'20'}"));
-
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:10, b:{$invalid:true}}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{b:20}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern,
- BSON("" << 10 << "b"
- << "20")),
- BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:10, b:{$gt:20}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:'20'}")), fromjson("{a:10, b:'20'}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:'20', c:30}")),
+ fromjson("{a:10, b:'20'}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern,
+ BSON("c" << 30 << "b"
+ << "20"
+ << "a"
+ << 10)),
+ fromjson("{a:10, b:'20'}"));
+
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$invalid:true}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{b:20}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern,
+ BSON("" << 10 << "b"
+ << "20")),
+ BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, b:{$gt:20}}")), BSONObj());
// Ordering
ASSERT_EQUALS(docKey(pattern, BSON("b" << 20 << "a" << 10)).firstElement().numberInt(), 10);
@@ -200,20 +203,20 @@ TEST(ShardKeyPattern, ExtractDocShardKeyNested) {
//
ShardKeyPattern pattern(BSON("a.b" << 1 << "c" << 1.0f));
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{b:10}, c:30}")), fromjson("{'a.b':10, c:30}"));
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{d:[1,2],b:10},c:30,d:40}")),
- fromjson("{'a.b':10, c:30}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{b:10}, c:30}")), fromjson("{'a.b':10, c:30}"));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{d:[1,2],b:10},c:30,d:40}")),
+ fromjson("{'a.b':10, c:30}"));
const BSONObj ref = BSON("$ref"
<< "coll"
<< "$id"
<< 1);
- ASSERT_EQUALS(docKey(pattern, BSON("a" << BSON("b" << ref) << "c" << 30)),
- BSON("a.b" << ref << "c" << 30));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON("b" << ref) << "c" << 30)),
+ BSON("a.b" << ref << "c" << 30));
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:10, c:30}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{d:40}, c:30}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:[{b:10}, {b:20}], c:30}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{b:[10, 20]}, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:10, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{d:40}, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:[{b:10}, {b:20}], c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{b:[10, 20]}, c:30}")), BSONObj());
}
TEST(ShardKeyPattern, ExtractDocShardKeyDeepNested) {
@@ -222,15 +225,15 @@ TEST(ShardKeyPattern, ExtractDocShardKeyDeepNested) {
//
ShardKeyPattern pattern(BSON("a.b.c" << 1));
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{b:{c:10}}}")), fromjson("{'a.b.c':10}"));
-
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:[{b:{c:10}}]}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{b:[{c:10}]}}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{b:{c:[10, 20]}}}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{b:[{c:10}, {c:20}]}}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:[{b:{c:10}},{b:{c:20}}]}")), BSONObj());
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:[{b:[{c:10},{c:20}]},{b:[{c:30},{c:40}]}]}}")),
- BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{b:{c:10}}}")), fromjson("{'a.b.c':10}"));
+
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:[{b:{c:10}}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{b:[{c:10}]}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{b:{c:[10, 20]}}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{b:[{c:10}, {c:20}]}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:[{b:{c:10}},{b:{c:20}}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:[{b:[{c:10},{c:20}]},{b:[{c:30},{c:40}]}]}}")),
+ BSONObj());
}
TEST(ShardKeyPattern, ExtractDocShardKeyHashed) {
@@ -245,15 +248,15 @@ TEST(ShardKeyPattern, ExtractDocShardKeyHashed) {
ShardKeyPattern pattern(BSON("a.b"
<< "hashed"));
- ASSERT_EQUALS(docKey(pattern, BSON("a" << BSON("b" << value))), BSON("a.b" << hashValue));
- ASSERT_EQUALS(docKey(pattern, BSON("a" << BSON("b" << value) << "c" << 30)),
- BSON("a.b" << hashValue));
- ASSERT_EQUALS(docKey(pattern, BSON("a" << BSON("c" << 30 << "b" << value))),
- BSON("a.b" << hashValue));
-
- ASSERT_EQUALS(docKey(pattern, BSON("a" << BSON("c" << value))), BSONObj());
- ASSERT_EQUALS(docKey(pattern, BSON("a" << BSON("b" << BSON_ARRAY(value)))), BSONObj());
- ASSERT_EQUALS(docKey(pattern, BSON("a" << BSON_ARRAY(BSON("b" << value)))), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON("b" << value))), BSON("a.b" << hashValue));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON("b" << value) << "c" << 30)),
+ BSON("a.b" << hashValue));
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON("c" << 30 << "b" << value))),
+ BSON("a.b" << hashValue));
+
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON("c" << value))), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON("b" << BSON_ARRAY(value)))), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, BSON("a" << BSON_ARRAY(BSON("b" << value)))), BSONObj());
}
static BSONObj queryKey(const ShardKeyPattern& pattern, const BSONObj& query) {
@@ -272,34 +275,34 @@ TEST(ShardKeyPattern, ExtractQueryShardKeySingle) {
//
ShardKeyPattern pattern(BSON("a" << 1));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10}")), fromjson("{a:10}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, b:'20'}")), fromjson("{a:10}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:10}, c:30}")), fromjson("{a:{b:10}}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, b:{$gt:20}}")), fromjson("{a:10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10}")), fromjson("{a:10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, b:'20'}")), fromjson("{a:10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:10}, c:30}")), fromjson("{a:{b:10}}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, b:{$gt:20}}")), fromjson("{a:10}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{$gt:10}}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, b:{$invalid:'20'}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{$gt:10}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10,b:{$invalid:'20'}}")), BSONObj());
// Doc key extraction shouldn't work with query
- ASSERT_EQUALS(docKey(pattern, fromjson("{a:{$eq:[10, 20]}, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(docKey(pattern, fromjson("{a:{$eq:[10, 20]}, c:30}")), BSONObj());
// $eq/$or/$and/$all
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{$eq:10}}")), fromjson("{a:10}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$or:[{a:{$eq:10}}]}")), fromjson("{a:10}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$and:[{a:{$eq:10}},{b:'20'}]}")),
- fromjson("{a:10}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{$all:[10]}}")), fromjson("{a:10}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$or:[{a:{$eq:10}},{a:10}]}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$and:[{a:10},{a:10}]}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{$all:[10,10]}}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$or:[{a:{$eq:10}},{b:'20'}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{$eq:10}}")), fromjson("{a:10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$or:[{a:{$eq:10}}]}")), fromjson("{a:10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$and:[{a:{$eq:10}},{b:'20'}]}")),
+ fromjson("{a:10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{$all:[10]}}")), fromjson("{a:10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$or:[{a:{$eq:10}},{a:10}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$and:[{a:10},{a:10}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{$all:[10,10]}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$or:[{a:{$eq:10}},{b:'20'}]}")), BSONObj());
// Regex can't be extracted from query
const BSONRegEx regex("abc");
- ASSERT_EQUALS(queryKey(pattern,
- BSON("a" << regex << "b"
- << "20")),
- BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern,
+ BSON("a" << regex << "b"
+ << "20")),
+ BSONObj());
}
TEST(ShardKeyPattern, ExtractQueryShardKeyCompound) {
@@ -308,32 +311,33 @@ TEST(ShardKeyPattern, ExtractQueryShardKeyCompound) {
//
ShardKeyPattern pattern(BSON("a" << 1 << "b" << 1.0));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, b:'20'}")), fromjson("{a:10, b:'20'}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, b:'20', c:30}")), fromjson("{a:10, b:'20'}"));
- ASSERT_EQUALS(queryKey(pattern,
- BSON("c" << 30 << "b"
- << "20"
- << "a"
- << 10)),
- fromjson("{a:10, b:'20'}"));
-
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, b:{$invalid:true}}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{b:20}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern,
- BSON("" << 10 << "b"
- << "20")),
- BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, b:'20'}")), fromjson("{a:10, b:'20'}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, b:'20', c:30}")),
+ fromjson("{a:10, b:'20'}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern,
+ BSON("c" << 30 << "b"
+ << "20"
+ << "a"
+ << 10)),
+ fromjson("{a:10, b:'20'}"));
+
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10,b:{$invalid:true}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{b:20}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern,
+ BSON("" << 10 << "b"
+ << "20")),
+ BSONObj());
// $eq/$or/$and/$all
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{$eq:10}, b:{$all:['20']}}")),
- fromjson("{a:10, b:'20'}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$and:[{a:{$eq:10},b:{$eq:'20'}}]}")),
- fromjson("{a:10, b:'20'}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$and:[{a:{$eq:10}},{b:{$eq:'20'}}]}")),
- fromjson("{a:10, b:'20'}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, b:{$gt:20}}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$or:[{a:{$eq:10}},{b:'20'}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{$eq:10}, b:{$all:['20']}}")),
+ fromjson("{a:10, b:'20'}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$and:[{a:{$eq:10},b:{$eq:'20'}}]}")),
+ fromjson("{a:10, b:'20'}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$and:[{a:{$eq:10}},{b:{$eq:'20'}}]}")),
+ fromjson("{a:10, b:'20'}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, b:{$gt:20}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$or:[{a:{$eq:10}},{b:'20'}]}")), BSONObj());
// Ordering
ASSERT_EQUALS(queryKey(pattern, BSON("b" << 20 << "a" << 10)).firstElement().numberInt(), 10);
@@ -345,28 +349,29 @@ TEST(ShardKeyPattern, ExtractQueryShardKeyNested) {
//
ShardKeyPattern pattern(BSON("a.b" << 1 << "c" << 1.0f));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:10}, c:30}")), fromjson("{'a.b':10, c:30}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{'a.b':{$eq:10}, c:30, d:40}")),
- fromjson("{'a.b':10, c:30}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$or:[{'a.b':10, c:30, d:40}]}")),
- fromjson("{'a.b':10, c:30}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{'a.b':{$all:[10]}, c:30, d:40}")),
- fromjson("{'a.b':10, c:30}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:10,d:40}, c:30}")),
- fromjson("{'a.b':10, c:30}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$and:[{'a.b':{$eq:10}}, {c:30}]}")),
- fromjson("{'a.b':10, c:30}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:10}, c:30}")),
+ fromjson("{'a.b':10, c:30}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{'a.b':{$eq:10}, c:30, d:40}")),
+ fromjson("{'a.b':10, c:30}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$or:[{'a.b':10, c:30, d:40}]}")),
+ fromjson("{'a.b':10, c:30}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{'a.b':{$all:[10]}, c:30, d:40}")),
+ fromjson("{'a.b':10, c:30}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:10,d:40}, c:30}")),
+ fromjson("{'a.b':10, c:30}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$and:[{'a.b':{$eq:10}}, {c:30}]}")),
+ fromjson("{'a.b':10, c:30}"));
// Nested $eq is actually a document element
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:{$eq:10}}, c:30}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$and:[{a:{b:{$eq:10}}}, {c:30}]}")), BSONObj());
-
- ASSERT_EQUALS(queryKey(pattern, fromjson("{$or:[{a:{b:{$eq:10}}}, {c:30}]}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, c:30}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:10}, c:{$gt:30}}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{d:40}, c:30}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:[{b:10}, {b:20}], c:30}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:{$eq:[10, 20]}}, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:{$eq:10}}, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$and:[{a:{b:{$eq:10}}},{c:30}]}")), BSONObj());
+
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{$or:[{a:{b:{$eq:10}}},{c:30}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:10, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:10}, c:{$gt:30}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{d:40}, c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:[{b:10}, {b:20}],c:30}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:{$eq:[10, 20]}},c:30}")), BSONObj());
}
TEST(ShardKeyPattern, ExtractQueryShardKeyDeepNested) {
@@ -375,20 +380,20 @@ TEST(ShardKeyPattern, ExtractQueryShardKeyDeepNested) {
//
ShardKeyPattern pattern(BSON("a.b.c" << 1));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:{c:10}}}")), fromjson("{'a.b.c':10}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{'a.b.c':10}")), fromjson("{'a.b.c':10}"));
- ASSERT_EQUALS(queryKey(pattern, fromjson("{'a.b.c':{$eq:10}}")), fromjson("{'a.b.c':10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:{c:10}}}")), fromjson("{'a.b.c':10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{'a.b.c':10}")), fromjson("{'a.b.c':10}"));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{'a.b.c':{$eq:10}}")), fromjson("{'a.b.c':10}"));
// Arrays at any nesting level means we can't extract a shard key
- ASSERT_EQUALS(queryKey(pattern, fromjson("{'a.b.c':[10]}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{'a.b':[{c:10}]}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:[{b:{c:10}}]}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:[{c:10}]}}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:{c:[10, 20]}}}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:{b:[{c:10}, {c:20}]}}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:[{b:{c:10}},{b:{c:20}}]}")), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, fromjson("{a:[{b:[{c:10},{c:20}]},{b:[{c:30},{c:40}]}]}}")),
- BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{'a.b.c':[10]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{'a.b':[{c:10}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:[{b:{c:10}}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:[{c:10}]}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:{c:[10, 20]}}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:{b:[{c:10}, {c:20}]}}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:[{b:{c:10}},{b:{c:20}}]}")), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, fromjson("{a:[{b:[{c:10},{c:20}]},{b:[{c:30},{c:40}]}]}}")),
+ BSONObj());
}
TEST(ShardKeyPattern, ExtractQueryShardKeyHashed) {
@@ -404,22 +409,23 @@ TEST(ShardKeyPattern, ExtractQueryShardKeyHashed) {
// Hashed works basically the same as non-hashed, but applies the hash function at the end
ShardKeyPattern pattern(BSON("a.b"
<< "hashed"));
- ASSERT_EQUALS(queryKey(pattern, BSON("a.b" << value)), BSON("a.b" << hashValue));
- ASSERT_EQUALS(queryKey(pattern, BSON("a" << BSON("b" << value))), BSON("a.b" << hashValue));
- ASSERT_EQUALS(queryKey(pattern, BSON("a.b" << BSON("$eq" << value))), BSON("a.b" << hashValue));
- ASSERT_EQUALS(queryKey(pattern, BSON("a" << BSON("b" << value) << "c" << 30)),
- BSON("a.b" << hashValue));
- ASSERT_EQUALS(queryKey(pattern, BSON("a" << BSON("c" << 30 << "b" << value))),
- BSON("a.b" << hashValue));
- ASSERT_EQUALS(queryKey(pattern, //
- BSON("$and" << BSON_ARRAY(BSON("a.b" << BSON("$eq" << value))))),
- BSON("a.b" << hashValue));
-
- ASSERT_EQUALS(queryKey(pattern, BSON("a" << BSON("b" << BSON("$eq" << value)))), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, BSON("a.b" << BSON("$gt" << value))), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, BSON("a" << BSON("c" << value))), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, BSON("a" << BSON("b" << BSON_ARRAY(value)))), BSONObj());
- ASSERT_EQUALS(queryKey(pattern, BSON("a" << BSON_ARRAY(BSON("b" << value)))), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a.b" << value)), BSON("a.b" << hashValue));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a" << BSON("b" << value))), BSON("a.b" << hashValue));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a.b" << BSON("$eq" << value))),
+ BSON("a.b" << hashValue));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a" << BSON("b" << value) << "c" << 30)),
+ BSON("a.b" << hashValue));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a" << BSON("c" << 30 << "b" << value))),
+ BSON("a.b" << hashValue));
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, //
+ BSON("$and" << BSON_ARRAY(BSON("a.b" << BSON("$eq" << value))))),
+ BSON("a.b" << hashValue));
+
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a" << BSON("b" << BSON("$eq" << value)))), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a.b" << BSON("$gt" << value))), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a" << BSON("c" << value))), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a" << BSON("b" << BSON_ARRAY(value)))), BSONObj());
+ ASSERT_BSONOBJ_EQ(queryKey(pattern, BSON("a" << BSON_ARRAY(BSON("b" << value)))), BSONObj());
}
static bool indexComp(const ShardKeyPattern& pattern, const BSONObj& indexPattern) {
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp
index 59daaeec608..138d86321f8 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_test_fixture.cpp
@@ -34,6 +34,7 @@
#include <vector>
#include "mongo/base/status_with.h"
+#include "mongo/bson/simple_bsonobj_comparator.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/client.h"
@@ -275,8 +276,8 @@ void ShardingTestFixture::expectGetShards(const std::vector<ShardType>& shards)
const auto& query = queryResult.getValue();
ASSERT_EQ(query->ns(), ShardType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSONObj());
- ASSERT_EQ(query->getSort(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
+ ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj());
ASSERT_FALSE(query->getLimit().is_initialized());
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -310,7 +311,7 @@ void ShardingTestFixture::expectInserts(const NamespaceString& nss,
auto itExpected = expected.begin();
for (; itInserted != inserted.end(); itInserted++, itExpected++) {
- ASSERT_EQ(*itExpected, *itInserted);
+ ASSERT_BSONOBJ_EQ(*itExpected, *itInserted);
}
BatchedCommandResponse response;
@@ -336,7 +337,7 @@ void ShardingTestFixture::expectConfigCollectionCreate(const HostAndPort& config
<< 15000)
<< "maxTimeMS"
<< 30000);
- ASSERT_EQUALS(expectedCreateCmd, request.cmdObj);
+ ASSERT_BSONOBJ_EQ(expectedCreateCmd, request.cmdObj);
return response;
});
@@ -366,7 +367,7 @@ void ShardingTestFixture::expectConfigCollectionInsert(const HostAndPort& config
ASSERT_EQUALS(operationContext()->getClient()->clientAddress(true),
actualChangeLog.getClientAddr());
- ASSERT_EQUALS(detail, actualChangeLog.getDetails());
+ ASSERT_BSONOBJ_EQ(detail, actualChangeLog.getDetails());
ASSERT_EQUALS(ns, actualChangeLog.getNS());
ASSERT_EQUALS(network()->getHostName(), actualChangeLog.getServer());
ASSERT_EQUALS(timestamp, actualChangeLog.getTime());
@@ -412,7 +413,7 @@ void ShardingTestFixture::expectUpdateCollection(const HostAndPort& expectedHost
const CollectionType& coll) {
onCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQUALS(expectedHost, request.target);
- ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
+ ASSERT_BSONOBJ_EQ(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
ASSERT_EQUALS("config", request.dbname);
BatchedUpdateRequest actualBatchedUpdate;
@@ -425,8 +426,9 @@ void ShardingTestFixture::expectUpdateCollection(const HostAndPort& expectedHost
ASSERT_TRUE(update->getUpsert());
ASSERT_FALSE(update->getMulti());
- ASSERT_EQUALS(update->getQuery(), BSON(CollectionType::fullNs(coll.getNs().toString())));
- ASSERT_EQUALS(update->getUpdateExpr(), coll.toBSON());
+ ASSERT_BSONOBJ_EQ(update->getQuery(),
+ BSON(CollectionType::fullNs(coll.getNs().toString())));
+ ASSERT_BSONOBJ_EQ(update->getUpdateExpr(), coll.toBSON());
BatchedCommandResponse response;
response.setOk(true);
@@ -442,7 +444,7 @@ void ShardingTestFixture::expectSetShardVersion(const HostAndPort& expectedHost,
const ChunkVersion& expectedChunkVersion) {
onCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQ(expectedHost, request.target);
- ASSERT_EQUALS(rpc::makeEmptyMetadata(), request.metadata);
+ ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata);
SetShardVersionRequest ssv =
assertGet(SetShardVersionRequest::parseFromBSON(request.cmdObj));
@@ -474,7 +476,7 @@ void ShardingTestFixture::expectCount(const HostAndPort& configHost,
auto queryElem = request.cmdObj["query"];
ASSERT_TRUE(queryElem.eoo() || queryElem.Obj().isEmpty());
} else {
- ASSERT_EQUALS(expectedQuery, request.cmdObj["query"].Obj());
+ ASSERT_BSONOBJ_EQ(expectedQuery, request.cmdObj["query"].Obj());
}
if (response.isOK()) {
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index 7a09e8ec3b6..469de14205e 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -96,7 +96,7 @@ TEST(BatchedCommandRequest, InsertClone) {
ASSERT_EQ("xyz.abc", clonedRequest.getNS().toString());
ASSERT_EQ("xyz.abc", clonedRequest.getTargetingNSS().toString());
ASSERT_TRUE(clonedRequest.getOrdered());
- ASSERT_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
+ ASSERT_BSONOBJ_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
ASSERT_TRUE(clonedRequest.shouldBypassValidation());
batchedRequest.setShouldBypassValidation(false);
@@ -126,14 +126,14 @@ TEST(BatchedCommandRequest, InsertIndexClone) {
ASSERT_EQ("xyz.system.indexes", clonedRequest.getNS().toString());
ASSERT_EQ("xyz.user", clonedRequest.getTargetingNSS().toString());
ASSERT_TRUE(clonedRequest.getOrdered());
- ASSERT_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
+ ASSERT_BSONOBJ_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
auto* clonedInsert = clonedRequest.getInsertRequest();
ASSERT_TRUE(clonedInsert != nullptr);
auto insertDocs = clonedInsert->getDocuments();
ASSERT_EQ(1u, insertDocs.size());
- ASSERT_EQ(indexSpec, insertDocs.front());
+ ASSERT_BSONOBJ_EQ(indexSpec, insertDocs.front());
}
TEST(BatchedCommandRequest, InsertCloneWithId) {
@@ -152,7 +152,7 @@ TEST(BatchedCommandRequest, InsertCloneWithId) {
ASSERT_EQ("xyz.abc", clonedRequest->getNS().toString());
ASSERT_EQ("xyz.abc", clonedRequest->getTargetingNSS().toString());
ASSERT_TRUE(clonedRequest->getOrdered());
- ASSERT_EQ(BSON("w" << 2), clonedRequest->getWriteConcern());
+ ASSERT_BSONOBJ_EQ(BSON("w" << 2), clonedRequest->getWriteConcern());
ASSERT_TRUE(clonedRequest->shouldBypassValidation());
auto* clonedInsert = clonedRequest->getInsertRequest();
@@ -181,7 +181,7 @@ TEST(BatchedCommandRequest, UpdateClone) {
ASSERT_EQ("xyz.abc", clonedRequest.getNS().toString());
ASSERT_EQ("xyz.abc", clonedRequest.getTargetingNSS().toString());
ASSERT_TRUE(clonedRequest.getOrdered());
- ASSERT_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
+ ASSERT_BSONOBJ_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
ASSERT_TRUE(clonedRequest.shouldBypassValidation());
}
@@ -199,7 +199,7 @@ TEST(BatchedCommandRequest, DeleteClone) {
ASSERT_EQ("xyz.abc", clonedRequest.getNS().toString());
ASSERT_EQ("xyz.abc", clonedRequest.getTargetingNSS().toString());
ASSERT_TRUE(clonedRequest.getOrdered());
- ASSERT_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
+ ASSERT_BSONOBJ_EQ(BSON("w" << 2), clonedRequest.getWriteConcern());
}
} // namespace
diff --git a/src/mongo/s/write_ops/batched_delete_request_test.cpp b/src/mongo/s/write_ops/batched_delete_request_test.cpp
index 1951c617186..24b6edbdb9a 100644
--- a/src/mongo/s/write_ops/batched_delete_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_delete_request_test.cpp
@@ -57,7 +57,7 @@ TEST(BatchedDeleteRequest, Basic) {
ASSERT_EQ("foo.test", request.getNS().ns());
- ASSERT_EQUALS(origDeleteRequestObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(origDeleteRequestObj, request.toBSON());
}
TEST(BatchedDeleteRequest, CloneBatchedDeleteDocCopiesAllFields) {
@@ -70,13 +70,13 @@ TEST(BatchedDeleteRequest, CloneBatchedDeleteDocCopiesAllFields) {
BatchedDeleteDocument cloneToDoc;
deleteDoc.cloneTo(&cloneToDoc);
ASSERT_TRUE(cloneToDoc.isQuerySet());
- ASSERT_EQ(BSON("a" << 1), cloneToDoc.getQuery());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 1), cloneToDoc.getQuery());
ASSERT_TRUE(cloneToDoc.isLimitSet());
ASSERT_EQ(1, cloneToDoc.getLimit());
ASSERT_TRUE(cloneToDoc.isCollationSet());
- ASSERT_EQ(BSON("locale"
- << "en_US"),
- cloneToDoc.getCollation());
+ ASSERT_BSONOBJ_EQ(BSON("locale"
+ << "en_US"),
+ cloneToDoc.getCollation());
}
TEST(BatchedDeleteRequest, CanSetAndRetrieveCollationField) {
@@ -88,9 +88,9 @@ TEST(BatchedDeleteRequest, CanSetAndRetrieveCollationField) {
deleteDoc.setCollation(BSON("locale"
<< "en_US"));
ASSERT_TRUE(deleteDoc.isCollationSet());
- ASSERT_EQ(BSON("locale"
- << "en_US"),
- deleteDoc.getCollation());
+ ASSERT_BSONOBJ_EQ(BSON("locale"
+ << "en_US"),
+ deleteDoc.getCollation());
deleteDoc.unsetCollation();
ASSERT_FALSE(deleteDoc.isCollationSet());
}
@@ -118,7 +118,7 @@ TEST(BatchedDeleteRequest, CollationFieldSerializesToBSONCorrectly) {
<< BatchedDeleteDocument::limit(1)
<< BatchedDeleteDocument::collation(BSON("locale"
<< "en_US")));
- ASSERT_EQUALS(expectedDeleteObj, deleteDoc.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedDeleteObj, deleteDoc.toBSON());
}
TEST(BatchedDeleteRequest, CollationFieldParsesFromBSONCorrectly) {
@@ -136,12 +136,12 @@ TEST(BatchedDeleteRequest, CollationFieldParsesFromBSONCorrectly) {
ASSERT_EQ(1U, request.sizeDeletes());
ASSERT_TRUE(request.getDeletesAt(0)->isCollationSet());
- ASSERT_EQ(BSON("locale"
- << "en_US"),
- request.getDeletesAt(0)->getCollation());
+ ASSERT_BSONOBJ_EQ(BSON("locale"
+ << "en_US"),
+ request.getDeletesAt(0)->getCollation());
// Ensure we re-serialize to the original BSON request.
- ASSERT_EQUALS(origDeleteRequestObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(origDeleteRequestObj, request.toBSON());
}
} // namespace
diff --git a/src/mongo/s/write_ops/batched_insert_request_test.cpp b/src/mongo/s/write_ops/batched_insert_request_test.cpp
index ec9efb48aa5..94976195c59 100644
--- a/src/mongo/s/write_ops/batched_insert_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_insert_request_test.cpp
@@ -56,7 +56,7 @@ TEST(BatchedInsertRequest, Basic) {
ASSERT_EQ("foo.test", request.getNS().ns());
- ASSERT_EQUALS(origInsertRequestObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(origInsertRequestObj, request.toBSON());
}
TEST(BatchedInsertRequest, GenIDAll) {
diff --git a/src/mongo/s/write_ops/batched_update_request_test.cpp b/src/mongo/s/write_ops/batched_update_request_test.cpp
index 93fbfedeb16..0d496236ddf 100644
--- a/src/mongo/s/write_ops/batched_update_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_update_request_test.cpp
@@ -63,7 +63,7 @@ TEST(BatchedUpdateRequest, Basic) {
ASSERT_EQ("foo.test", request.getNS().ns());
- ASSERT_EQUALS(origUpdateRequestObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(origUpdateRequestObj, request.toBSON());
}
TEST(BatchedUpdateRequest, CloneBatchedUpdateDocCopiesAllFields) {
@@ -78,17 +78,17 @@ TEST(BatchedUpdateRequest, CloneBatchedUpdateDocCopiesAllFields) {
BatchedUpdateDocument cloneToDoc;
updateDoc.cloneTo(&cloneToDoc);
ASSERT_TRUE(cloneToDoc.isQuerySet());
- ASSERT_EQ(BSON("a" << 1), cloneToDoc.getQuery());
+ ASSERT_BSONOBJ_EQ(BSON("a" << 1), cloneToDoc.getQuery());
ASSERT_TRUE(cloneToDoc.isUpdateExprSet());
- ASSERT_EQ(BSON("$set" << BSON("a" << 2)), cloneToDoc.getUpdateExpr());
+ ASSERT_BSONOBJ_EQ(BSON("$set" << BSON("a" << 2)), cloneToDoc.getUpdateExpr());
ASSERT_TRUE(cloneToDoc.isMultiSet());
ASSERT_TRUE(cloneToDoc.getMulti());
ASSERT_TRUE(cloneToDoc.isUpsertSet());
ASSERT_TRUE(cloneToDoc.getUpsert());
ASSERT_TRUE(cloneToDoc.isCollationSet());
- ASSERT_EQ(BSON("locale"
- << "en_US"),
- cloneToDoc.getCollation());
+ ASSERT_BSONOBJ_EQ(BSON("locale"
+ << "en_US"),
+ cloneToDoc.getCollation());
}
TEST(BatchedUpdateRequest, CanSetAndRetrieveCollationField) {
@@ -100,9 +100,9 @@ TEST(BatchedUpdateRequest, CanSetAndRetrieveCollationField) {
updateDoc.setCollation(BSON("locale"
<< "en_US"));
ASSERT_TRUE(updateDoc.isCollationSet());
- ASSERT_EQ(BSON("locale"
- << "en_US"),
- updateDoc.getCollation());
+ ASSERT_BSONOBJ_EQ(BSON("locale"
+ << "en_US"),
+ updateDoc.getCollation());
updateDoc.unsetCollation();
ASSERT_FALSE(updateDoc.isCollationSet());
}
@@ -132,7 +132,7 @@ TEST(BatchedUpdateRequest, CollationFieldSerializesToBSONCorrectly) {
<< BatchedUpdateDocument::collation(BSON("locale"
<< "en_US")));
- ASSERT_EQUALS(expectedUpdateObj, updateDoc.toBSON());
+ ASSERT_BSONOBJ_EQ(expectedUpdateObj, updateDoc.toBSON());
}
TEST(BatchedUpdateRequest, CollationFieldParsesFromBSONCorrectly) {
@@ -151,12 +151,12 @@ TEST(BatchedUpdateRequest, CollationFieldParsesFromBSONCorrectly) {
ASSERT_EQ(1U, request.sizeUpdates());
ASSERT_TRUE(request.getUpdatesAt(0)->isCollationSet());
- ASSERT_EQ(BSON("locale"
- << "en_US"),
- request.getUpdatesAt(0)->getCollation());
+ ASSERT_BSONOBJ_EQ(BSON("locale"
+ << "en_US"),
+ request.getUpdatesAt(0)->getCollation());
// Ensure we re-serialize to the original BSON request.
- ASSERT_EQUALS(origUpdateRequestObj, request.toBSON());
+ ASSERT_BSONOBJ_EQ(origUpdateRequestObj, request.toBSON());
}
} // namespace
diff --git a/src/mongo/unittest/SConscript b/src/mongo/unittest/SConscript
index 6cc281da64d..e9274575c9e 100644
--- a/src/mongo/unittest/SConscript
+++ b/src/mongo/unittest/SConscript
@@ -4,19 +4,21 @@ Import("env")
env.Library(target="unittest",
source=[
+ 'bson_test_util.cpp',
'death_test.cpp',
'temp_dir.cpp',
'unittest.cpp',
'unittest_helpers.cpp',
],
- LIBDEPS=['$BUILD_DIR/mongo/util/foundation',
- '$BUILD_DIR/mongo/util/options_parser/options_parser',
+ LIBDEPS=[
+ '$BUILD_DIR/mongo/base',
+ '$BUILD_DIR/mongo/util/foundation',
+ '$BUILD_DIR/mongo/util/options_parser/options_parser',
])
env.Library("unittest_main", ['unittest_main.cpp'],
LIBDEPS=[
'unittest',
- '$BUILD_DIR/mongo/base',
])
env.Library(target="integration_test_main",
diff --git a/src/mongo/unittest/bson_test_util.cpp b/src/mongo/unittest/bson_test_util.cpp
new file mode 100644
index 00000000000..57d93d36026
--- /dev/null
+++ b/src/mongo/unittest/bson_test_util.cpp
@@ -0,0 +1,59 @@
+/**
+ * Copyright (C) 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/unittest/bson_test_util.h"
+
+namespace mongo {
+namespace unittest {
+
+#define GENERATE_BSON_CMP_FUNC(BSONTYPE, NAME, COMPARATOR, OPERATOR) \
+ void assertComparison_##BSONTYPE##NAME(const std::string& theFile, \
+ unsigned theLine, \
+ StringData aExpression, \
+ StringData bExpression, \
+ const BSONTYPE& aValue, \
+ const BSONTYPE& bValue) { \
+ if (!COMPARATOR.evaluate(aValue OPERATOR bValue)) { \
+ std::ostringstream os; \
+ os << "Expected [ " << aExpression << " " #OPERATOR " " << bExpression \
+ << " ] but found [ " << aValue << " " #OPERATOR " " << bValue << "]"; \
+ TestAssertionFailure(theFile, theLine, os.str()).stream(); \
+ } \
+ }
+
+GENERATE_BSON_CMP_FUNC(BSONObj, EQ, SimpleBSONObjComparator::kInstance, ==);
+GENERATE_BSON_CMP_FUNC(BSONObj, LT, SimpleBSONObjComparator::kInstance, <);
+GENERATE_BSON_CMP_FUNC(BSONObj, LTE, SimpleBSONObjComparator::kInstance, <=);
+GENERATE_BSON_CMP_FUNC(BSONObj, GT, SimpleBSONObjComparator::kInstance, >);
+GENERATE_BSON_CMP_FUNC(BSONObj, GTE, SimpleBSONObjComparator::kInstance, >=);
+GENERATE_BSON_CMP_FUNC(BSONObj, NE, SimpleBSONObjComparator::kInstance, !=);
+
+} // namespace unittest
+} // namespace mongo
diff --git a/src/mongo/unittest/bson_test_util.h b/src/mongo/unittest/bson_test_util.h
new file mode 100644
index 00000000000..9a83a3fd482
--- /dev/null
+++ b/src/mongo/unittest/bson_test_util.h
@@ -0,0 +1,70 @@
+/**
+ * Copyright (C) 2016 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/bson/simple_bsonobj_comparator.h"
+#include "mongo/unittest/unittest.h"
+
+/**
+ * BSON comparison utility macro. Do not use directly.
+ */
+#define ASSERT_BSON_COMPARISON(NAME, a, b) \
+ ::mongo::unittest::assertComparison_##NAME(__FILE__, __LINE__, #a, #b, a, b)
+
+/**
+ * Use to compare two instances of type BSONObj under the default comparator in unit tests.
+ */
+#define ASSERT_BSONOBJ_EQ(a, b) ASSERT_BSON_COMPARISON(BSONObjEQ, a, b)
+#define ASSERT_BSONOBJ_LT(a, b) ASSERT_BSON_COMPARISON(BSONObjLT, a, b)
+#define ASSERT_BSONOBJ_LTE(a, b) ASSERT_BSON_COMPARISON(BSONObjLTE, a, b)
+#define ASSERT_BSONOBJ_GT(a, b) ASSERT_BSON_COMPARISON(BSONObjGT, a, b)
+#define ASSERT_BSONOBJ_GTE(a, b) ASSERT_BSON_COMPARISON(BSONObjGTE, a, b)
+#define ASSERT_BSONOBJ_NE(a, b) ASSERT_BSON_COMPARISON(BSONObjNE, a, b)
+
+namespace mongo {
+namespace unittest {
+
+#define DECLARE_BSON_CMP_FUNC(BSONTYPE, NAME) \
+ void assertComparison_##BSONTYPE##NAME(const std::string& theFile, \
+ unsigned theLine, \
+ StringData aExpression, \
+ StringData bExpression, \
+ const BSONTYPE& aValue, \
+ const BSONTYPE& bValue);
+
+DECLARE_BSON_CMP_FUNC(BSONObj, EQ);
+DECLARE_BSON_CMP_FUNC(BSONObj, LT);
+DECLARE_BSON_CMP_FUNC(BSONObj, LTE);
+DECLARE_BSON_CMP_FUNC(BSONObj, GT);
+DECLARE_BSON_CMP_FUNC(BSONObj, GTE);
+DECLARE_BSON_CMP_FUNC(BSONObj, NE);
+#undef DECLARE_BSON_CMP_FUNC
+
+} // namespace unittest
+} // namespace mongo
diff --git a/src/mongo/unittest/unittest.h b/src/mongo/unittest/unittest.h
index 4b38e0d1cc2..427b8551fcd 100644
--- a/src/mongo/unittest/unittest.h
+++ b/src/mongo/unittest/unittest.h
@@ -46,6 +46,7 @@
#include "mongo/logger/logstream_builder.h"
#include "mongo/logger/message_log_domain.h"
#include "mongo/stdx/functional.h"
+#include "mongo/unittest/bson_test_util.h"
#include "mongo/unittest/unittest_helpers.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/unittest/unittest_test.cpp b/src/mongo/unittest/unittest_test.cpp
index 37d1805a951..c9d8fa7a9a8 100644
--- a/src/mongo/unittest/unittest_test.cpp
+++ b/src/mongo/unittest/unittest_test.cpp
@@ -35,6 +35,7 @@
#include <limits>
#include <string>
+#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/stdx/functional.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
@@ -138,6 +139,56 @@ TEST(UnitTestSelfTest, TestNoDoubleEvaluation) {
ASSERT_TEST_FAILS_MATCH(ASSERT_EQ(0, ++i), "(0 == 1)");
}
+TEST(UnitTestSelfTest, BSONObjEQ) {
+ ASSERT_BSONOBJ_EQ(BSON("foo"
+ << "bar"),
+ BSON("foo"
+ << "bar"));
+}
+
+TEST(UnitTestSelfTest, BSONObjNE) {
+ ASSERT_BSONOBJ_NE(BSON("foo"
+ << "bar"),
+ BSON("foo"
+ << "baz"));
+}
+
+TEST(UnitTestSelfTest, BSONObjLT) {
+ ASSERT_BSONOBJ_LT(BSON("foo"
+ << "bar"),
+ BSON("foo"
+ << "baz"));
+}
+
+TEST(UnitTestSelfTest, BSONObjLTE) {
+ ASSERT_BSONOBJ_LTE(BSON("foo"
+ << "bar"),
+ BSON("foo"
+ << "baz"));
+ ASSERT_BSONOBJ_LTE(BSON("foo"
+ << "bar"),
+ BSON("foo"
+ << "bar"));
+}
+
+TEST(UnitTestSelfTest, BSONObjGT) {
+ ASSERT_BSONOBJ_GT(BSON("foo"
+ << "baz"),
+ BSON("foo"
+ << "bar"));
+}
+
+TEST(UnitTestSelfTest, BSONObjGTE) {
+ ASSERT_BSONOBJ_GTE(BSON("foo"
+ << "baz"),
+ BSON("foo"
+ << "bar"));
+ ASSERT_BSONOBJ_GTE(BSON("foo"
+ << "bar"),
+ BSON("foo"
+ << "bar"));
+}
+
DEATH_TEST(DeathTestSelfTest, TestDeath, "Invariant failure false") {
invariant(false);
}
diff --git a/src/mongo/util/cmdline_utils/censor_cmdline_test.cpp b/src/mongo/util/cmdline_utils/censor_cmdline_test.cpp
index 70e37aacef5..19d7789623b 100644
--- a/src/mongo/util/cmdline_utils/censor_cmdline_test.cpp
+++ b/src/mongo/util/cmdline_utils/censor_cmdline_test.cpp
@@ -238,7 +238,7 @@ TEST(BSONObjCensorTests, Strings) {
<< false);
cmdline_utils::censorBSONObj(&obj);
- ASSERT_EQUALS(res, obj);
+ ASSERT_BSONOBJ_EQ(res, obj);
}
TEST(BSONObjCensorTests, Arrays) {
@@ -275,7 +275,7 @@ TEST(BSONObjCensorTests, Arrays) {
<< false);
cmdline_utils::censorBSONObj(&obj);
- ASSERT_EQUALS(res, obj);
+ ASSERT_BSONOBJ_EQ(res, obj);
}
TEST(BSONObjCensorTests, SubObjects) {
@@ -311,7 +311,7 @@ TEST(BSONObjCensorTests, SubObjects) {
<< false);
cmdline_utils::censorBSONObj(&obj);
- ASSERT_EQUALS(res, obj);
+ ASSERT_BSONOBJ_EQ(res, obj);
}
} // namespace
diff --git a/src/mongo/util/options_parser/environment_test.cpp b/src/mongo/util/options_parser/environment_test.cpp
index b5b7b774b1c..79764b382fa 100644
--- a/src/mongo/util/options_parser/environment_test.cpp
+++ b/src/mongo/util/options_parser/environment_test.cpp
@@ -137,7 +137,7 @@ TEST(ToBSONTests, NormalValues) {
<< "string");
// TODO: Put a comparison here that doesn't depend on the field order. Right now it is
// based on the sort order of keys in a std::map.
- ASSERT_EQUALS(obj, environment.toBSON());
+ ASSERT_BSONOBJ_EQ(obj, environment.toBSON());
}
TEST(ToBSONTests, DottedValues) {
@@ -151,7 +151,7 @@ TEST(ToBSONTests, DottedValues) {
<< true);
// TODO: Put a comparison here that doesn't depend on the field order. Right now it is
// based on the sort order of keys in a std::map.
- ASSERT_EQUALS(obj, environment.toBSON());
+ ASSERT_BSONOBJ_EQ(obj, environment.toBSON());
}
TEST(ToBSONTests, DeepDottedValues) {
@@ -171,6 +171,6 @@ TEST(ToBSONTests, DeepDottedValues) {
<< 6.0);
// TODO: Put a comparison here that doesn't depend on the field order. Right now it is
// based on the sort order of keys in a std::map.
- ASSERT_EQUALS(obj, environment.toBSON());
+ ASSERT_BSONOBJ_EQ(obj, environment.toBSON());
}
} // unnamed namespace
diff --git a/src/mongo/util/options_parser/options_parser_test.cpp b/src/mongo/util/options_parser/options_parser_test.cpp
index dba5910d395..a0d97c512bb 100644
--- a/src/mongo/util/options_parser/options_parser_test.cpp
+++ b/src/mongo/util/options_parser/options_parser_test.cpp
@@ -684,7 +684,7 @@ TEST(Parsing, DefaultValuesNotInBSON) {
ASSERT_OK(parser.run(testOpts, argv, env_map, &environment));
mongo::BSONObj expected = BSON("val1" << 6);
- ASSERT_EQUALS(expected, environment.toBSON());
+ ASSERT_BSONOBJ_EQ(expected, environment.toBSON());
}
TEST(Parsing, ImplicitValue) {