summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRomans Kasperovics <romans.kasperovics@mongodb.com>2022-12-20 11:26:52 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-12-20 11:59:17 +0000
commit5324ef1fab3f06ad008050ac12fadb12b8de877c (patch)
tree8d275c491bf7f5f378947269dca3d3c53ad0abf8
parentf84b645556a27b078b61260d55c53b664aff8b7f (diff)
downloadmongo-5324ef1fab3f06ad008050ac12fadb12b8de877c.tar.gz
SERVER-70042 Assign expected / actual semantics to assert.docEq() arguments
-rw-r--r--jstests/aggregation/bugs/expression_swap_limit.js8
-rw-r--r--jstests/aggregation/bugs/server6192_server6193.js2
-rw-r--r--jstests/aggregation/match_no_swap_rand.js2
-rw-r--r--jstests/aggregation/optimize_away_pipeline.js8
-rw-r--r--jstests/aggregation/sources/merge/requires_unique_index.js4
-rw-r--r--jstests/aggregation/sources/project/project_with_expressions.js2
-rw-r--r--jstests/aggregation/sources/setWindowFields/collation.js2
-rw-r--r--jstests/aggregation/sources/setWindowFields/derivative.js76
-rw-r--r--jstests/aggregation/sources/setWindowFields/optimize.js8
-rw-r--r--jstests/aggregation/sources/unionWith/unionWith_collation.js48
-rw-r--r--jstests/aggregation/testall.js50
-rw-r--r--jstests/auth/curop_auth_info.js6
-rw-r--r--jstests/change_streams/change_stream.js6
-rw-r--r--jstests/change_streams/collation.js14
-rw-r--r--jstests/change_streams/delete_in_txn_produces_correct_doc_key.js2
-rw-r--r--jstests/change_streams/does_not_implicitly_create_database.js8
-rw-r--r--jstests/change_streams/lookup_post_image.js2
-rw-r--r--jstests/change_streams/lookup_pre_image.js8
-rw-r--r--jstests/change_streams/resume_from_high_water_mark_token.js10
-rw-r--r--jstests/change_streams/shell_helper.js2
-rw-r--r--jstests/change_streams/start_after_invalidation_exception.js4
-rw-r--r--jstests/change_streams/whole_cluster_resumability.js12
-rw-r--r--jstests/change_streams/whole_db_resumability.js12
-rw-r--r--jstests/concurrency/fsm_workloads/drop_index_during_lookup.js2
-rw-r--r--jstests/core/and_or_index_sort.js53
-rw-r--r--jstests/core/awaitdata_getmore_cmd.js2
-rw-r--r--jstests/core/crud_api.js6
-rw-r--r--jstests/core/doc_validation.js4
-rw-r--r--jstests/core/doc_validation_error.js4
-rw-r--r--jstests/core/expr_index_use.js2
-rw-r--r--jstests/core/find_and_modify_hint.js14
-rw-r--r--jstests/core/find_and_modify_server6865.js4
-rw-r--r--jstests/core/insert_one.js8
-rw-r--r--jstests/core/mod_overflow.js10
-rw-r--r--jstests/core/mod_special_values.js2
-rw-r--r--jstests/core/projection_dotted_paths.js8
-rw-r--r--jstests/core/projection_semantics.js8
-rw-r--r--jstests/core/push_sort.js2
-rw-r--r--jstests/core/text_covered_matching.js6
-rw-r--r--jstests/core/timeseries/bucket_unpacking_with_sort.js4
-rw-r--r--jstests/core/timeseries/timeseries_bucket_manual_removal.js8
-rw-r--r--jstests/core/timeseries/timeseries_collation.js2
-rw-r--r--jstests/core/timeseries/timeseries_delete_hint.js2
-rw-r--r--jstests/core/timeseries/timeseries_filter_extended_range.js4
-rw-r--r--jstests/core/timeseries/timeseries_find.js2
-rw-r--r--jstests/core/timeseries/timeseries_geonear_measurements.js232
-rw-r--r--jstests/core/timeseries/timeseries_groupby_reorder.js7
-rw-r--r--jstests/core/timeseries/timeseries_hint.js2
-rw-r--r--jstests/core/timeseries/timeseries_index_partial.js25
-rw-r--r--jstests/core/timeseries/timeseries_index_stats.js8
-rw-r--r--jstests/core/timeseries/timeseries_insert_after_delete.js2
-rw-r--r--jstests/core/timeseries/timeseries_insert_after_update.js2
-rw-r--r--jstests/core/timeseries/timeseries_internal_bucket_geo_within.js14
-rw-r--r--jstests/core/timeseries/timeseries_list_collections.js4
-rw-r--r--jstests/core/timeseries/timeseries_match_pushdown.js8
-rw-r--r--jstests/core/timeseries/timeseries_match_pushdown_with_project.js2
-rw-r--r--jstests/core/timeseries/timeseries_out_of_order.js2
-rw-r--r--jstests/core/timeseries/timeseries_project.js38
-rw-r--r--jstests/core/txns/aggregation_in_transaction.js4
-rw-r--r--jstests/core/txns/timeseries/timeseries_reads_in_txns.js4
-rw-r--r--jstests/core/update3.js2
-rw-r--r--jstests/core/update_min_max_examples.js4
-rw-r--r--jstests/core/update_pipeline_shell_helpers.js4
-rw-r--r--jstests/core/upsert_and.js12
-rw-r--r--jstests/core/upsert_fields.js2
-rw-r--r--jstests/core/verify_update_mods.js2
-rw-r--r--jstests/core/wildcard_and_text_indexes.js2
-rw-r--r--jstests/core/wildcard_index_basic_index_bounds.js4
-rw-r--r--jstests/core/wildcard_index_multikey.js2
-rw-r--r--jstests/core/wildcard_index_nonblocking_sort.js2
-rw-r--r--jstests/core/wildcard_index_type.js2
-rw-r--r--jstests/fle2/libs/encrypted_client_util.js2
-rw-r--r--jstests/libs/aggregation_pipeline_utils.js2
-rw-r--r--jstests/libs/change_stream_util.js8
-rw-r--r--jstests/libs/clustered_collections/clustered_collection_util.js4
-rw-r--r--jstests/libs/command_line/test_parsed_options.js4
-rw-r--r--jstests/libs/database_versioning.js4
-rw-r--r--jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js4
-rw-r--r--jstests/noPassthrough/change_stream_failover.js2
-rw-r--r--jstests/noPassthrough/change_stream_resume_before_add_shard.js4
-rw-r--r--jstests/noPassthrough/change_streams_require_majority_read_concern.js4
-rw-r--r--jstests/noPassthrough/change_streams_update_lookup_collation.js14
-rw-r--r--jstests/noPassthrough/count_helper_read_preference.js4
-rw-r--r--jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js4
-rw-r--r--jstests/noPassthrough/getParameterWithDetails.js2
-rw-r--r--jstests/noPassthrough/lookup_metrics.js2
-rw-r--r--jstests/noPassthrough/lookup_pushdown.js7
-rw-r--r--jstests/noPassthrough/oplog_document_key.js12
-rw-r--r--jstests/noPassthrough/out_merge_majority_read.js8
-rw-r--r--jstests/noPassthrough/query_engine_stats.js2
-rw-r--r--jstests/noPassthrough/shardsvr_global_index_crud_bulk.js12
-rw-r--r--jstests/noPassthrough/shell_uses_correct_read_concern.js2
-rw-r--r--jstests/noPassthrough/timeseries_direct_remove.js6
-rw-r--r--jstests/noPassthrough/timeseries_direct_remove_conflict.js6
-rw-r--r--jstests/noPassthrough/timeseries_direct_remove_reopen.js4
-rw-r--r--jstests/noPassthrough/timeseries_direct_update.js6
-rw-r--r--jstests/noPassthrough/timeseries_direct_update_conflict.js6
-rw-r--r--jstests/noPassthrough/timeseries_insert_after_cycle_primary.js2
-rw-r--r--jstests/noPassthrough/timeseries_insert_after_failed_insert.js2
-rw-r--r--jstests/noPassthrough/timeseries_insert_invalid_timefield.js4
-rw-r--r--jstests/noPassthrough/timeseries_insert_ordered_false.js10
-rw-r--r--jstests/noPassthrough/timeseries_insert_ordered_true.js6
-rw-r--r--jstests/noPassthrough/timeseries_insert_rollback.js2
-rw-r--r--jstests/noPassthrough/timeseries_write_concern.js2
-rw-r--r--jstests/noPassthrough/update_post_image_validation.js4
-rw-r--r--jstests/noPassthrough/write_change_stream_pit_preimage_in_transaction.js2
-rw-r--r--jstests/noPassthroughWithMongod/default_read_pref.js2
-rw-r--r--jstests/noPassthroughWithMongod/log_component_helpers.js4
-rw-r--r--jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js6
-rw-r--r--jstests/readonly/aggregate.js4
-rw-r--r--jstests/readonly/catalog_ops.js2
-rw-r--r--jstests/replsets/abort_in_progress_transactions_on_step_up.js4
-rw-r--r--jstests/replsets/change_stream_pit_pre_images.js26
-rw-r--r--jstests/replsets/dbhash_system_collections.js4
-rw-r--r--jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js2
-rw-r--r--jstests/replsets/initial_sync_commit_prepared_transaction.js4
-rw-r--r--jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js2
-rw-r--r--jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js2
-rw-r--r--jstests/replsets/initial_sync_oplog_hole.js4
-rw-r--r--jstests/replsets/no_progress_updates_during_initial_sync.js8
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync.js10
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync_change_oldest_active_txn_timestamp.js4
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js2
-rw-r--r--jstests/replsets/rollback_files_no_prepare_conflict.js2
-rw-r--r--jstests/replsets/step_down_on_secondary.js4
-rw-r--r--jstests/replsets/tenant_migration_recipient_does_not_change_sync_source_after_step_down.js2
-rw-r--r--jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover.js2
-rw-r--r--jstests/replsets/txn_override_unittests.js10
-rw-r--r--jstests/sharding/array_shard_key.js2
-rw-r--r--jstests/sharding/change_stream_against_shard_mongod.js2
-rw-r--r--jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js2
-rw-r--r--jstests/sharding/change_stream_error_label.js4
-rw-r--r--jstests/sharding/change_stream_lookup_single_shard_cluster.js4
-rw-r--r--jstests/sharding/change_stream_no_shards.js8
-rw-r--r--jstests/sharding/change_stream_resume_from_different_mongos.js2
-rw-r--r--jstests/sharding/change_stream_resume_shard_key_change.js4
-rw-r--r--jstests/sharding/change_stream_shard_failover.js2
-rw-r--r--jstests/sharding/change_stream_update_lookup_collation.js4
-rw-r--r--jstests/sharding/change_stream_update_lookup_read_concern.js4
-rw-r--r--jstests/sharding/change_streams/lookup_change_stream_post_image_compound_shard_key.js8
-rw-r--r--jstests/sharding/change_streams/lookup_change_stream_post_image_hashed_shard_key.js2
-rw-r--r--jstests/sharding/change_streams/lookup_change_stream_post_image_id_shard_key.js4
-rw-r--r--jstests/sharding/change_streams_delete_in_txn_produces_correct_doc_key.js2
-rw-r--r--jstests/sharding/change_streams_new_shard_new_database.js4
-rw-r--r--jstests/sharding/change_streams_unsharded_update_resume.js4
-rw-r--r--jstests/sharding/change_streams_update_lookup_shard_metadata_missing.js4
-rw-r--r--jstests/sharding/merge_requires_unique_index.js12
-rw-r--r--jstests/sharding/mongos_local_explain.js2
-rw-r--r--jstests/sharding/oplog_document_key.js4
-rw-r--r--jstests/sharding/query/agg_mongos_merge.js2
-rw-r--r--jstests/sharding/query/aggregation_currentop.js8
-rw-r--r--jstests/sharding/timeseries_cluster_indexstats.js2
-rw-r--r--jstests/sharding/update_replace_id.js34
-rw-r--r--jstests/sharding/update_sharded.js26
-rw-r--r--jstests/sharding/upsert_sharded.js36
-rw-r--r--src/mongo/shell/assert.js34
156 files changed, 700 insertions, 650 deletions
diff --git a/jstests/aggregation/bugs/expression_swap_limit.js b/jstests/aggregation/bugs/expression_swap_limit.js
index eef11dea51d..c393dc74590 100644
--- a/jstests/aggregation/bugs/expression_swap_limit.js
+++ b/jstests/aggregation/bugs/expression_swap_limit.js
@@ -53,12 +53,12 @@ const pipeline1 = [
{
// The pipeline should succeed.
const aggResult = coll.aggregate(pipeline1).toArray();
- assert.docEq(aggResult, [{_id: 99, b: 123}]);
+ assert.docEq([{_id: 99, b: 123}], aggResult);
// The pipeline should succeed without pushing down to find.
const noOptResult =
coll.aggregate([{$_internalInhibitOptimization: {}}].concat(pipeline1)).toArray();
- assert.docEq(noOptResult, [{_id: 99, b: 123}]);
+ assert.docEq([{_id: 99, b: 123}], noOptResult);
}
// Similarly, we can select the 1 valid document by flipping the sort and skipping
@@ -72,11 +72,11 @@ const pipeline2 = [
{
// The pipeline should succeed.
const aggResult = coll.aggregate(pipeline2).toArray();
- assert.docEq(aggResult, [{_id: 99, b: 123}]);
+ assert.docEq([{_id: 99, b: 123}], aggResult);
// The pipeline should succeed without pushing down to find.
const noOptResult =
coll.aggregate([{$_internalInhibitOptimization: {}}].concat(pipeline2)).toArray();
- assert.docEq(noOptResult, [{_id: 99, b: 123}]);
+ assert.docEq([{_id: 99, b: 123}], noOptResult);
}
})();
diff --git a/jstests/aggregation/bugs/server6192_server6193.js b/jstests/aggregation/bugs/server6192_server6193.js
index 30103dfd0a8..114a9675987 100644
--- a/jstests/aggregation/bugs/server6192_server6193.js
+++ b/jstests/aggregation/bugs/server6192_server6193.js
@@ -37,7 +37,7 @@ function optimize(expression) {
function assertOptimized(expression, value) {
const optimized = optimize(expression);
- assert.docEq(optimized, {$const: value}, "ensure short-circuiting worked", optimized);
+ assert.docEq({$const: value}, optimized, "ensure short-circuiting worked");
}
function assertNotOptimized(expression) {
diff --git a/jstests/aggregation/match_no_swap_rand.js b/jstests/aggregation/match_no_swap_rand.js
index 508056e97f4..3206f6fa792 100644
--- a/jstests/aggregation/match_no_swap_rand.js
+++ b/jstests/aggregation/match_no_swap_rand.js
@@ -31,7 +31,7 @@ function assertScanFilterEq({coll, pipeline, filter}) {
// Sometimes explain will have 'filter' set to an empty object, other times there will be no
// 'filter'. If we are expecting there to be no filter on the COLLSCAN, either is acceptable.
if (filter) {
- assert.docEq(collScan.filter, filter);
+ assert.docEq(filter, collScan.filter);
} else {
assert(!collScan.filter || Object.keys(collScan.filter).length == 0);
}
diff --git a/jstests/aggregation/optimize_away_pipeline.js b/jstests/aggregation/optimize_away_pipeline.js
index d08bde70f5d..065691a0663 100644
--- a/jstests/aggregation/optimize_away_pipeline.js
+++ b/jstests/aggregation/optimize_away_pipeline.js
@@ -76,9 +76,9 @@ function assertPipelineUsesAggregation({
if (expectedResult) {
const actualResult = coll.aggregate(pipeline, pipelineOptions).toArray();
if (preserveResultOrder) {
- assert.docEq(actualResult, expectedResult);
+ assert.docEq(expectedResult, actualResult);
} else {
- assert.sameMembers(actualResult, expectedResult);
+ assert.sameMembers(expectedResult, actualResult);
}
}
@@ -120,9 +120,9 @@ function assertPipelineDoesNotUseAggregation({
if (expectedResult) {
const actualResult = coll.aggregate(pipeline, pipelineOptions).toArray();
if (preserveResultOrder) {
- assert.docEq(actualResult, expectedResult);
+ assert.docEq(expectedResult, actualResult);
} else {
- assert.sameMembers(actualResult, expectedResult);
+ assert.sameMembers(expectedResult, actualResult);
}
}
diff --git a/jstests/aggregation/sources/merge/requires_unique_index.js b/jstests/aggregation/sources/merge/requires_unique_index.js
index 1565381c6c3..2e94b8bddab 100644
--- a/jstests/aggregation/sources/merge/requires_unique_index.js
+++ b/jstests/aggregation/sources/merge/requires_unique_index.js
@@ -343,8 +343,8 @@ function dropWithoutImplicitRecreate(coll) {
}
}
]));
- assert.docEq(target.findOne({"a.b": "hi", proofOfUpdate: "PROOF"}),
- {_id: "TARGET", a: {b: "hi"}, proofOfUpdate: "PROOF"});
+ assert.docEq({_id: "TARGET", a: {b: "hi"}, proofOfUpdate: "PROOF"},
+ target.findOne({"a.b": "hi", proofOfUpdate: "PROOF"}));
}());
// Test that a unique index that is sparse can still be used.
diff --git a/jstests/aggregation/sources/project/project_with_expressions.js b/jstests/aggregation/sources/project/project_with_expressions.js
index 2b7a69c701c..bcfe921f1db 100644
--- a/jstests/aggregation/sources/project/project_with_expressions.js
+++ b/jstests/aggregation/sources/project/project_with_expressions.js
@@ -21,7 +21,7 @@ function assertProjectionResultForFindAndAgg(projection, expectedResults) {
function assertResultsEqual(results, expected) {
assert.eq(results.length, expected.length);
for (let i = 0; i < results.length; ++i) {
- assert.docEq(results[i], expected[i]);
+ assert.docEq(expected[i], results[i]);
}
}
assertResultsEqual(aggResults, expectedResults);
diff --git a/jstests/aggregation/sources/setWindowFields/collation.js b/jstests/aggregation/sources/setWindowFields/collation.js
index 4568e36f908..235ccd7aaf8 100644
--- a/jstests/aggregation/sources/setWindowFields/collation.js
+++ b/jstests/aggregation/sources/setWindowFields/collation.js
@@ -30,7 +30,7 @@ let results =
collation)
.toArray();
// Test document order before $_internalSetWindowFields rather than $setWindowFields output order.
-assert.docEq(results[0].arr, ["10", "3", "2"]);
+assert.docEq(["10", "3", "2"], results[0].arr);
// Test that partitionBy and window function respect collation.
results =
diff --git a/jstests/aggregation/sources/setWindowFields/derivative.js b/jstests/aggregation/sources/setWindowFields/derivative.js
index a57af814b30..aeb2339d15b 100644
--- a/jstests/aggregation/sources/setWindowFields/derivative.js
+++ b/jstests/aggregation/sources/setWindowFields/derivative.js
@@ -99,24 +99,26 @@ result = coll.aggregate([
{$sort: {time: 1}},
])
.toArray();
-assert.docEq(result, [
- // The first document looks behind 3, but can't go any further back than time: 0.
- // It also looks ahead 1. So the points it compares are time: 0 and time: 1.
- {time: 0, y: 100, dy: +5},
- // The second document gets time: 0 and time: 2.
- {time: 1, y: 105, dy: +8 / 2},
- // The third gets time: 0 and time: 3.
- {time: 2, y: 108, dy: +8 / 3},
- // This is the first document whose left endpoint lies within the partition.
- // So this one, and the next few, all have fully-populated windows.
- {time: 3, y: 108, dy: +15 / 4},
- {time: 4, y: 115, dy: +10 / 4},
- {time: 5, y: 115, dy: +10 / 4},
- {time: 6, y: 118, dy: +10 / 4},
- // For the last document, there is no document at offset +1, so it sees
- // time: 4 and time: 7.
- {time: 7, y: 118, dy: +3 / 3},
-]);
+assert.docEq(
+ [
+ // The first document looks behind 3, but can't go any further back than time: 0.
+ // It also looks ahead 1. So the points it compares are time: 0 and time: 1.
+ {time: 0, y: 100, dy: +5},
+ // The second document gets time: 0 and time: 2.
+ {time: 1, y: 105, dy: +8 / 2},
+ // The third gets time: 0 and time: 3.
+ {time: 2, y: 108, dy: +8 / 3},
+ // This is the first document whose left endpoint lies within the partition.
+ // So this one, and the next few, all have fully-populated windows.
+ {time: 3, y: 108, dy: +15 / 4},
+ {time: 4, y: 115, dy: +10 / 4},
+ {time: 5, y: 115, dy: +10 / 4},
+ {time: 6, y: 118, dy: +10 / 4},
+ // For the last document, there is no document at offset +1, so it sees
+ // time: 4 and time: 7.
+ {time: 7, y: 118, dy: +3 / 3},
+ ],
+ result);
// Because the derivative is the same irrespective of sort order (as long as we reexpress the
// bounds) we can compare this result with the result of the previous aggregation.
const resultDesc =
@@ -133,7 +135,7 @@ const resultDesc =
{$sort: {time: 1}},
])
.toArray();
-assert.docEq(result, resultDesc);
+assert.docEq(resultDesc, result);
// Example with range-based bounds.
coll.drop();
@@ -158,14 +160,16 @@ result = coll.aggregate([
{$sort: {time: 1}},
])
.toArray();
-assert.docEq(result, [
- {time: 0, y: 10, dy: null},
- {time: 10, y: 12, dy: (12 - 10) / (10 - 0)},
- {time: 11, y: 15, dy: (15 - 12) / (11 - 10)},
- {time: 12, y: 19, dy: (19 - 12) / (12 - 10)},
- {time: 13, y: 24, dy: (24 - 12) / (13 - 10)},
- {time: 20, y: 30, dy: (30 - 12) / (20 - 10)},
-]);
+assert.docEq(
+ [
+ {time: 0, y: 10, dy: null},
+ {time: 10, y: 12, dy: (12 - 10) / (10 - 0)},
+ {time: 11, y: 15, dy: (15 - 12) / (11 - 10)},
+ {time: 12, y: 19, dy: (19 - 12) / (12 - 10)},
+ {time: 13, y: 24, dy: (24 - 12) / (13 - 10)},
+ {time: 20, y: 30, dy: (30 - 12) / (20 - 10)},
+ ],
+ result);
// 'unit' only supports 'week' and smaller.
coll.drop();
@@ -377,12 +381,14 @@ result = coll.aggregate([
{$sort: {time: 1}},
])
.toArray();
-assert.docEq(result, [
- {time: ISODate("2020-01-01T00:00:00.00Z"), y: 10, dy: null},
- {time: ISODate("2020-01-01T00:00:10.00Z"), y: 12, dy: (12 - 10) / (10 - 0)},
- {time: ISODate("2020-01-01T00:00:11.00Z"), y: 15, dy: (15 - 12) / (11 - 10)},
- {time: ISODate("2020-01-01T00:00:12.00Z"), y: 19, dy: (19 - 12) / (12 - 10)},
- {time: ISODate("2020-01-01T00:00:13.00Z"), y: 24, dy: (24 - 12) / (13 - 10)},
- {time: ISODate("2020-01-01T00:00:20.00Z"), y: 30, dy: (30 - 12) / (20 - 10)},
-]);
+assert.docEq(
+ [
+ {time: ISODate("2020-01-01T00:00:00.00Z"), y: 10, dy: null},
+ {time: ISODate("2020-01-01T00:00:10.00Z"), y: 12, dy: (12 - 10) / (10 - 0)},
+ {time: ISODate("2020-01-01T00:00:11.00Z"), y: 15, dy: (15 - 12) / (11 - 10)},
+ {time: ISODate("2020-01-01T00:00:12.00Z"), y: 19, dy: (19 - 12) / (12 - 10)},
+ {time: ISODate("2020-01-01T00:00:13.00Z"), y: 24, dy: (24 - 12) / (13 - 10)},
+ {time: ISODate("2020-01-01T00:00:20.00Z"), y: 30, dy: (30 - 12) / (20 - 10)},
+ ],
+ result);
})();
diff --git a/jstests/aggregation/sources/setWindowFields/optimize.js b/jstests/aggregation/sources/setWindowFields/optimize.js
index 10afd74d9cc..0b250c2980f 100644
--- a/jstests/aggregation/sources/setWindowFields/optimize.js
+++ b/jstests/aggregation/sources/setWindowFields/optimize.js
@@ -47,7 +47,7 @@ const explain1 = coll.explain().aggregate([
// Redundant $sort should be removed.
assert.eq(1, numberOfStages(explain1, '$sort'), explain1);
// We keep the more specific sort.
-assert.docEq(getAggPlanStages(explain1, '$sort'), [{$sort: {sortKey: {a: 1, b: 1}}}], explain1);
+assert.docEq([{$sort: {sortKey: {a: 1, b: 1}}}], getAggPlanStages(explain1, '$sort'), explain1);
const explain2 = coll.explain().aggregate([
{$_internalInhibitOptimization: {}},
@@ -77,7 +77,7 @@ const explain3 = coll.explain().aggregate([
// $sort should be swapped with $_internalSetWindowFields, and the extra one removed.
assert.eq(1, numberOfStages(explain3, '$sort'), explain3);
// The sort we keep should be the more specific one.
-assert.docEq(getAggPlanStages(explain3, '$sort'), [{$sort: {sortKey: {a: 1, b: -1}}}], explain3);
+assert.docEq([{$sort: {sortKey: {a: 1, b: -1}}}], getAggPlanStages(explain3, '$sort'), explain3);
const explain4 = coll.explain().aggregate([
{$_internalInhibitOptimization: {}},
@@ -178,7 +178,7 @@ const explain10 = coll.explain().aggregate([
]);
assert.eq(1, numberOfStages(explain10, '$sort'), explain10);
assert.docEq(
- getAggPlanStages(explain10, '$sort'), [{$sort: {sortKey: {a: 1, b: 1, c: 1}}}], explain10);
+ [{$sort: {sortKey: {a: 1, b: 1, c: 1}}}], getAggPlanStages(explain10, '$sort'), explain10);
// Multiple compatible sorts are pushed down.
const explain11 = coll.explain().aggregate([
@@ -195,7 +195,7 @@ const explain11 = coll.explain().aggregate([
]);
assert.eq(1, numberOfStages(explain11, '$sort'), explain11);
assert.docEq(
- getAggPlanStages(explain11, '$sort'), [{$sort: {sortKey: {a: 1, b: 1, c: 1}}}], explain11);
+ [{$sort: {sortKey: {a: 1, b: 1, c: 1}}}], getAggPlanStages(explain11, '$sort'), explain11);
// An incompatible $meta sort should not be dropped or pushed down.
coll.createIndex({'$**': 'text'});
diff --git a/jstests/aggregation/sources/unionWith/unionWith_collation.js b/jstests/aggregation/sources/unionWith/unionWith_collation.js
index 1e0ee6c771f..178867c7ddf 100644
--- a/jstests/aggregation/sources/unionWith/unionWith_collation.js
+++ b/jstests/aggregation/sources/unionWith/unionWith_collation.js
@@ -59,7 +59,7 @@ const unionWith = (foreignCollName, values) => {
// Verify that a $unionWith whose local collection has no default collation uses the simple
// collation for comparisons on a foreign collection with a non-simple default collation.
let results = noCollationColl.aggregate(unionWith(caseInsensitiveColl.getName(), ["B"])).toArray();
-assert.docEq(results, [{val: "B", caseSensitiveColl: false}, {val: "B", caseSensitiveColl: true}]);
+assert.docEq([{val: "B", caseSensitiveColl: false}, {val: "B", caseSensitiveColl: true}], results);
// Verify that a $unionWith whose local collection has no default collation but which is running in
// a pipeline with a non-simple user-specified collation uses the latter for comparisons on the
// foreign collection.
@@ -67,22 +67,26 @@ results = noCollationColl
.aggregate(unionWith(caseInsensitiveColl.getName(), ["B"]),
{collation: caseInsensitiveCollation})
.toArray();
-assert.docEq(results, [
- {val: "B", caseSensitiveColl: false}, // Case insensitive match on local collection.
- {val: "b", caseSensitiveColl: false},
- {val: "b", caseSensitiveColl: true},
- {val: "B", caseSensitiveColl: true} // Case insensitive match on foreign collection.
-]);
+assert.docEq(
+ [
+ {val: "B", caseSensitiveColl: false}, // Case insensitive match on local collection.
+ {val: "b", caseSensitiveColl: false},
+ {val: "b", caseSensitiveColl: true},
+ {val: "B", caseSensitiveColl: true} // Case insensitive match on foreign collection.
+ ],
+ results);
// Verify that a $unionWith whose local collection has a non-simple collation uses the latter for
// comparisons on a foreign collection with no default collation.
results = caseInsensitiveColl.aggregate(unionWith(noCollationColl.getName(), ["B"])).toArray();
-assert.docEq(results, [
- {val: "B", caseSensitiveColl: false}, // Case insensitive match on local collection.
- {val: "b", caseSensitiveColl: false},
- {val: "b", caseSensitiveColl: true},
- {val: "B", caseSensitiveColl: true} // Case insensitive match on foreign collection.
-]);
+assert.docEq(
+ [
+ {val: "B", caseSensitiveColl: false}, // Case insensitive match on local collection.
+ {val: "b", caseSensitiveColl: false},
+ {val: "b", caseSensitiveColl: true},
+ {val: "B", caseSensitiveColl: true} // Case insensitive match on foreign collection.
+ ],
+ results);
// Verify that a $unionWith whose local collection has a non-simple collation but which is running
// in a pipeline with a user-specified simple collation uses the latter for comparisons on the
@@ -90,7 +94,7 @@ assert.docEq(results, [
results = caseInsensitiveColl
.aggregate(unionWith(noCollationColl.getName(), ["B"]), {collation: simpleCollation})
.toArray();
-assert.docEq(results, [{val: "B", caseSensitiveColl: false}, {val: "B", caseSensitiveColl: true}]);
+assert.docEq([{val: "B", caseSensitiveColl: false}, {val: "B", caseSensitiveColl: true}], results);
// Create a case-sensitive/simple view and a case-insensitive view.
testDB.noCollationView.drop();
@@ -112,7 +116,7 @@ assert.commandWorked(testDB.runCommand({
results =
caseInsensitiveColl.aggregate(unionWith("noCollationView", ["B"]), {collation: simpleCollation})
.toArray();
-assert.docEq(results, [{val: "B", caseSensitiveView: true}, {val: "B", caseSensitiveColl: false}]);
+assert.docEq([{val: "B", caseSensitiveView: true}, {val: "B", caseSensitiveColl: false}], results);
// Verify that the command fails if the collation of the pipeline doesn't match the collation of the
// view.
@@ -133,10 +137,12 @@ assert.commandFailedWithCode(noCollationColl.runCommand({
// Verify that the command succeeds if both the pipeline and the $unionWith'd view uses a
// case-insensitive collation.
results = caseInsensitiveColl.aggregate(unionWith("caseInsensitiveView", ["B"])).toArray();
-assert.docEq(results, [
- {val: "B", caseSensitiveView: false},
- {val: "b", caseSensitiveView: false},
- {val: "B", caseSensitiveColl: false},
- {val: "b", caseSensitiveColl: false}
-]);
+assert.docEq(
+ [
+ {val: "B", caseSensitiveView: false},
+ {val: "b", caseSensitiveView: false},
+ {val: "B", caseSensitiveColl: false},
+ {val: "b", caseSensitiveColl: false}
+ ],
+ results);
})();
diff --git a/jstests/aggregation/testall.js b/jstests/aggregation/testall.js
index 1dd0ec11813..f752b06b1fb 100644
--- a/jstests/aggregation/testall.js
+++ b/jstests/aggregation/testall.js
@@ -20,7 +20,7 @@ let p1result = [
{"_id": 3, "pageViews": 6, "tags": ["nasty", "filthy"]}
];
-assert.docEq(p1.cursor.firstBatch, p1result, 'p1 failed');
+assert.docEq(p1result, p1.cursor.firstBatch, 'p1 failed');
// a simple array unwinding
let u1 = testDB.runCommand({aggregate: "article", pipeline: [{$unwind: "$tags"}], cursor: {}});
@@ -129,7 +129,7 @@ let u2result = [
{"_id": 4, "a": 1, "b": {"e": 7, "f": 1}, "c": 12, "d": 17}
];
-assert.docEq(u2.cursor.firstBatch, u2result, 'u2 failed');
+assert.docEq(u2result, u2.cursor.firstBatch, 'u2 failed');
// combining a projection with unwinding an array
let p2 = testDB.runCommand({
@@ -160,7 +160,7 @@ let p3 = testDB.runCommand({
let p3result = [{"_id": 1, "otherfoo": 5}, {"_id": 2, "otherbar": 14}, {"_id": 3, "otherbar": 14}];
-assert.docEq(p3.cursor.firstBatch, p3result, 'p3 failed');
+assert.docEq(p3result, p3.cursor.firstBatch, 'p3 failed');
// projection includes a computed value
let p4 = testDB.runCommand({
@@ -175,7 +175,7 @@ let p4result = [
{"_id": 3, "author": "jane", "daveWroteIt": false}
];
-assert.docEq(p4.cursor.firstBatch, p4result, 'p4 failed');
+assert.docEq(p4result, p4.cursor.firstBatch, 'p4 failed');
// projection includes a virtual (fabricated) document
let p5 = testDB.runCommand({
@@ -295,7 +295,7 @@ let p7 = testDB.runCommand({
let p7result = [{"_id": 1, "theSum": 10}, {"_id": 2, "theSum": 21}, {"_id": 3, "theSum": 20}];
-assert.docEq(p7.cursor.firstBatch, p7result, 'p7 failed');
+assert.docEq(p7result, p7.cursor.firstBatch, 'p7 failed');
// dotted path inclusion; _id exclusion
let p8 = testDB.runCommand({
@@ -331,7 +331,7 @@ let p9result = [
{"author": "jane", "commentsAuthor": ["will", "jenny"]}
];
-assert.docEq(p9.cursor.firstBatch, p9result, 'p9 failed');
+assert.docEq(p9result, p9.cursor.firstBatch, 'p9 failed');
// simple sort
let p10 = testDB.runCommand({aggregate: "article", pipeline: [{$sort: {title: 1}}], cursor: {}});
@@ -376,7 +376,7 @@ let p10result = [
}
];
-assert.docEq(p10.cursor.firstBatch, p10result, 'p10 failed');
+assert.docEq(p10result, p10.cursor.firstBatch, 'p10 failed');
// unwind on nested array
testDB.p11.drop();
@@ -404,7 +404,7 @@ let p11result = [
{"_id": 5, "name": "MongoDB", "author": "vivek"},
];
-assert.docEq(p11.cursor.firstBatch, p11result, 'p11 failed');
+assert.docEq(p11result, p11.cursor.firstBatch, 'p11 failed');
// multiply test
let p12 = testDB.runCommand({
@@ -422,7 +422,7 @@ let p12 = testDB.runCommand({
let p12result =
[{"_id": 1, "theProduct": 25}, {"_id": 2, "theProduct": 98}, {"_id": 3, "theProduct": 84}];
-assert.docEq(p12.cursor.firstBatch, p12result, 'p12 failed');
+assert.docEq(p12result, p12.cursor.firstBatch, 'p12 failed');
// subtraction test
let p13 = testDB.runCommand({
@@ -445,7 +445,7 @@ let p13result = [
{"_id": 3, "theDifference": -8}
];
-assert.docEq(p13.cursor.firstBatch, p13result, 'p13 failed');
+assert.docEq(p13result, p13.cursor.firstBatch, 'p13 failed');
// mod test
let p14 = testDB.runCommand({
@@ -469,7 +469,7 @@ let p14 = testDB.runCommand({
let p14result =
[{"_id": 1, "theRemainder": 0}, {"_id": 2, "theRemainder": 0}, {"_id": 3, "theRemainder": 2}];
-assert.docEq(p14.cursor.firstBatch, p14result, 'p14 failed');
+assert.docEq(p14result, p14.cursor.firstBatch, 'p14 failed');
// toUpper test
let p15 = testDB.runCommand({
@@ -484,7 +484,7 @@ let p15result = [
{"_id": 3, "author": "JANE", "pageViews": 6}
];
-assert.docEq(p15.cursor.firstBatch, p15result, 'p15 failed');
+assert.docEq(p15result, p15.cursor.firstBatch, 'p15 failed');
// toLower test
let p16 = testDB.runCommand({
@@ -515,7 +515,7 @@ let p16result = [
}
];
-assert.docEq(p16.cursor.firstBatch, p16result, 'p16 failed');
+assert.docEq(p16result, p16.cursor.firstBatch, 'p16 failed');
// substr test
let p17 = testDB.runCommand({
@@ -534,7 +534,7 @@ let p17 = testDB.runCommand({
let p17result =
[{"_id": 1, "author": "ob"}, {"_id": 2, "author": "av"}, {"_id": 3, "author": "an"}];
-assert.docEq(p17.cursor.firstBatch, p17result, 'p17 failed');
+assert.docEq(p17result, p17.cursor.firstBatch, 'p17 failed');
// strcasecmp test
let p18 = testDB.runCommand({
@@ -558,7 +558,7 @@ let p18result = [
{"_id": 3, "tags": ["nasty", "filthy"], "thisisalametest": 1, "thisisalamepass": 0}
];
-assert.docEq(p18.cursor.firstBatch, p18result, 'p18 failed');
+assert.docEq(p18result, p18.cursor.firstBatch, 'p18 failed');
// date tests
let p19 = testDB.runCommand({
@@ -626,7 +626,7 @@ let p19result = [
}
];
-assert.docEq(p19.cursor.firstBatch, p19result, 'p19 failed');
+assert.docEq(p19result, p19.cursor.firstBatch, 'p19 failed');
testDB.lettype.drop();
testDB.lettype.save({x: 17, y: "foo"});
@@ -656,7 +656,7 @@ let p21result = [
{"author": "jane", "pageViews": 6}
];
-assert.docEq(p21.cursor.firstBatch, p21result, 'p21 failed');
+assert.docEq(p21result, p21.cursor.firstBatch, 'p21 failed');
// simple matching
let m1 =
@@ -676,7 +676,7 @@ let m1result = [{
"other": {"bar": 14}
}];
-assert.docEq(m1.cursor.firstBatch, m1result, 'm1 failed');
+assert.docEq(m1result, m1.cursor.firstBatch, 'm1 failed');
// combining matching with a projection
let m2 = testDB.runCommand({
@@ -715,7 +715,7 @@ let m2result = [
}
];
-assert.docEq(m2.cursor.firstBatch, m2result, 'm2 failed');
+assert.docEq(m2result, m2.cursor.firstBatch, 'm2 failed');
// group by tag, _id is a field reference
let g1 = testDB.runCommand({
@@ -736,7 +736,7 @@ let g1result = [
{"_id": "nasty", "docsByTag": 2, "viewsByTag": 13},
];
-assert.docEq(g1.cursor.firstBatch, g1result, 'g1 failed');
+assert.docEq(g1result, g1.cursor.firstBatch, 'g1 failed');
// $max, and averaging in a final projection; _id is structured
let g2 = testDB.runCommand({
@@ -780,7 +780,7 @@ let g2result = [
}
];
-assert.docEq(g2.cursor.firstBatch, g2result, 'g2 failed');
+assert.docEq(g2result, g2.cursor.firstBatch, 'g2 failed');
// $push as an accumulator; can pivot data
let g3 = testDB.runCommand({
@@ -807,7 +807,7 @@ let g3result = [
{"_id": {"tags": "nasty"}, "authors": ["dave", "jane"]}
];
-assert.docEq(g3.cursor.firstBatch, g3result, 'g3 failed');
+assert.docEq(g3result, g3.cursor.firstBatch, 'g3 failed');
// $avg, and averaging in a final projection
let g4 = testDB.runCommand({
@@ -835,7 +835,7 @@ let g4result = [
{"_id": {"tags": "nasty"}, "docsByTag": 2, "viewsByTag": 13, "avgByTag": 6.5}
];
-assert.docEq(g4.cursor.firstBatch, g4result, 'g4 failed');
+assert.docEq(g4result, g4.cursor.firstBatch, 'g4 failed');
// $addToSet as an accumulator; can pivot data
let g5 = testDB.runCommand({
@@ -878,7 +878,7 @@ let g5result = [
}
];
-assert.docEq(g5.cursor.firstBatch, g5result, 'g5 failed');
+assert.docEq(g5result, g5.cursor.firstBatch, 'g5 failed');
// $first and $last accumulators, constant _id
let g6 = testDB.runCommand({
@@ -933,5 +933,5 @@ let g8result = [
{"_id": "nasty", "docCount1": 2, "docCount2": 2},
];
-assert.docEq(g8.cursor.firstBatch, g8result, 'g8 failed');
+assert.docEq(g8result, g8.cursor.firstBatch, 'g8 failed');
}());
diff --git a/jstests/auth/curop_auth_info.js b/jstests/auth/curop_auth_info.js
index 30928933941..1e69c3d0f57 100644
--- a/jstests/auth/curop_auth_info.js
+++ b/jstests/auth/curop_auth_info.js
@@ -46,14 +46,14 @@ const runTest = function(conn, failPointConn) {
print(tojson(authedUsers), tojson(impersonators));
if (impersonators) {
assert.eq(authedUsers.length, 1);
- assert.docEq(authedUsers[0], {user: "testuser", db: "admin"});
+ assert.docEq({user: "testuser", db: "admin"}, authedUsers[0]);
assert(impersonators);
assert.eq(impersonators.length, 1);
- assert.docEq(impersonators[0], {user: "__system", db: "local"});
+ assert.docEq({user: "__system", db: "local"}, impersonators[0]);
} else {
assert(authedUsers);
assert.eq(authedUsers.length, 1);
- assert.docEq(authedUsers[0], {user: "testuser", db: "admin"});
+ assert.docEq({user: "testuser", db: "admin"}, authedUsers[0]);
}
};
diff --git a/jstests/change_streams/change_stream.js b/jstests/change_streams/change_stream.js
index ca89b28292e..a7a100ea124 100644
--- a/jstests/change_streams/change_stream.js
+++ b/jstests/change_streams/change_stream.js
@@ -220,7 +220,7 @@ let resumeCursor =
// Insert a document and save the resulting change stream.
assert.commandWorked(db.resume1.insert({_id: 1}));
const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
-assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
+assert.docEq({_id: 1}, firstInsertChangeDoc.fullDocument);
jsTestLog("Testing resume after one document.");
resumeCursor = cst.startWatchingChanges({
@@ -232,10 +232,10 @@ resumeCursor = cst.startWatchingChanges({
jsTestLog("Inserting additional documents.");
assert.commandWorked(db.resume1.insert({_id: 2}));
const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
-assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
+assert.docEq({_id: 2}, secondInsertChangeDoc.fullDocument);
assert.commandWorked(db.resume1.insert({_id: 3}));
const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
-assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
+assert.docEq({_id: 3}, thirdInsertChangeDoc.fullDocument);
jsTestLog("Testing resume after first document of three.");
resumeCursor = cst.startWatchingChanges({
diff --git a/jstests/change_streams/collation.js b/jstests/change_streams/collation.js
index 531690afd19..f6fa31a22ed 100644
--- a/jstests/change_streams/collation.js
+++ b/jstests/change_streams/collation.js
@@ -229,7 +229,7 @@ assert.commandWorked(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
assert.soon(() => changeStream.hasNext());
const next = changeStream.next();
-assert.docEq(next.documentKey, {_id: 0});
+assert.docEq({_id: 0}, next.documentKey);
const resumeToken = next._id;
// Insert a second document to see after resuming.
@@ -245,7 +245,7 @@ changeStream = caseInsensitiveCollection.watch(
{resumeAfter: resumeToken, collation: {locale: "simple"}});
assert.soon(() => changeStream.hasNext());
-assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+assert.docEq({_id: "dropped_coll"}, changeStream.next().documentKey);
// Test that a pipeline without an explicit collation is allowed to resume the change stream
// after the collection has been dropped, and it will use the simple collation. Do not
@@ -263,7 +263,7 @@ const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
changeStream = new DBCommandCursor(db, cmdRes);
assert.soon(() => changeStream.hasNext());
-assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+assert.docEq({_id: "dropped_coll"}, changeStream.next().documentKey);
}());
// Test that the default collation of a new version of the collection is not applied when
@@ -280,7 +280,7 @@ assert.commandWorked(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
assert.soon(() => changeStream.hasNext());
const next = changeStream.next();
-assert.docEq(next.documentKey, {_id: 0});
+assert.docEq({_id: 0}, next.documentKey);
const resumeToken = next._id;
// Insert a second document to see after resuming.
@@ -294,7 +294,7 @@ assert.commandWorked(caseInsensitiveCollection.insert({_id: "new collection", te
// Verify that the stream sees the insert before the drop and then is exhausted. We won't
// see the invalidate because the pipeline has a $match stage after the $changeStream.
assert.soon(() => changeStream.hasNext());
-assert.docEq(changeStream.next().fullDocument, {_id: "dropped_coll", text: "ABC"});
+assert.docEq({_id: "dropped_coll", text: "ABC"}, changeStream.next().fullDocument);
// Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
// 'isExhausted()' to force a getMore since the previous getMore may not include the
// collection drop, which is more likely with sharded collections on slow machines.
@@ -309,7 +309,7 @@ changeStream = caseInsensitiveCollection.watch(
{resumeAfter: resumeToken, collation: {locale: "fr"}});
assert.soon(() => changeStream.hasNext());
-assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+assert.docEq({_id: "dropped_coll"}, changeStream.next().documentKey);
// Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
// 'isExhausted()' to force a getMore since the previous getMore may not include the
// collection drop, which is more likely with sharded collections on slow machines.
@@ -329,6 +329,6 @@ const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
changeStream = new DBCommandCursor(db, cmdRes);
assert.soon(() => changeStream.hasNext());
-assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+assert.docEq({_id: "dropped_coll"}, changeStream.next().documentKey);
}());
})();
diff --git a/jstests/change_streams/delete_in_txn_produces_correct_doc_key.js b/jstests/change_streams/delete_in_txn_produces_correct_doc_key.js
index 0225bf4d0f5..4349c1540b3 100644
--- a/jstests/change_streams/delete_in_txn_produces_correct_doc_key.js
+++ b/jstests/change_streams/delete_in_txn_produces_correct_doc_key.js
@@ -56,7 +56,7 @@ function testDeleteInMultiDocTxn({collName, deleteCommand, expectedChanges}) {
collection: coll
});
assert.commandWorked(coll.insert({_id: 5}));
- assert.docEq(cst.getOneChange(cursor).documentKey, {_id: 5});
+ assert.docEq({_id: 5}, cst.getOneChange(cursor).documentKey);
cst.cleanUp();
}
diff --git a/jstests/change_streams/does_not_implicitly_create_database.js b/jstests/change_streams/does_not_implicitly_create_database.js
index a64cf812f99..59ced002e0c 100644
--- a/jstests/change_streams/does_not_implicitly_create_database.js
+++ b/jstests/change_streams/does_not_implicitly_create_database.js
@@ -14,7 +14,7 @@ assert.commandWorked(testDB.dropDatabase());
let dbList = assert.commandWorked(
db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
-assert.docEq(dbList.databases, []);
+assert.docEq([], dbList.databases);
const collName = "test";
@@ -29,13 +29,13 @@ assert.gt(changeStreamCursor.id, 0);
// Confirm that the database has not been implicitly created.
dbList = assert.commandWorked(
db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
-assert.docEq(dbList.databases, []);
+assert.docEq([], dbList.databases);
// Confirm that a non-$changeStream aggregation on the non-existent database returns an empty
// cursor.
const nonCsCmdRes = assert.commandWorked(
testDB.runCommand({aggregate: collName, pipeline: [{$match: {}}], cursor: {}}));
-assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
+assert.docEq([], nonCsCmdRes.cursor.firstBatch);
assert.eq(nonCsCmdRes.cursor.id, 0);
// Now perform some writes into the collection...
@@ -47,7 +47,7 @@ assert.commandWorked(testDB[collName].remove({_id: 2}));
// ... confirm that the database has been created...
dbList = assert.commandWorked(
db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
-assert.docEq(dbList.databases, [{name: testDB.getName()}]);
+assert.docEq([{name: testDB.getName()}], dbList.databases);
// ... and verify that the changes are observed by the stream.
const expectedChanges = [
diff --git a/jstests/change_streams/lookup_post_image.js b/jstests/change_streams/lookup_post_image.js
index 0dc2ddeee3f..dc472b3272c 100644
--- a/jstests/change_streams/lookup_post_image.js
+++ b/jstests/change_streams/lookup_post_image.js
@@ -223,7 +223,7 @@ cursor = cst.startWatchingChanges({
assert.commandWorked(coll.update({_id: "getMoreEnabled"}, {$set: {updated: true}}));
const doc = cst.getOneChange(cursor);
-assert.docEq(doc["fullDocument"], {_id: "getMoreEnabled", updated: true});
+assert.docEq({_id: "getMoreEnabled", updated: true}, doc["fullDocument"]);
// Test that invalidate entries don't have 'fullDocument' even if 'updateLookup' is
// specified.
diff --git a/jstests/change_streams/lookup_pre_image.js b/jstests/change_streams/lookup_pre_image.js
index 8337052d9a9..9b030755032 100644
--- a/jstests/change_streams/lookup_pre_image.js
+++ b/jstests/change_streams/lookup_pre_image.js
@@ -43,7 +43,7 @@ assert.commandWorked(coll.insert({_id: "x"}));
let latestChange = cst.getOneChange(csNoPreImages);
assert.eq(latestChange.operationType, "insert");
assert(!latestChange.hasOwnProperty("fullDocumentBeforeChange"));
-assert.docEq(latestChange.fullDocument, {_id: "x"});
+assert.docEq({_id: "x"}, latestChange.fullDocument);
assert.docEq(latestChange, cst.getOneChange(csPreImageWhenAvailableCursor));
assert.docEq(latestChange, cst.getOneChange(csPreImageRequiredCursor));
@@ -52,7 +52,7 @@ assert.commandWorked(coll.update({_id: "x"}, {foo: "bar"}));
latestChange = cst.getOneChange(csNoPreImages);
assert.eq(latestChange.operationType, "replace");
assert(!latestChange.hasOwnProperty("fullDocumentBeforeChange"));
-assert.docEq(latestChange.fullDocument, {_id: "x", foo: "bar"});
+assert.docEq({_id: "x", foo: "bar"}, latestChange.fullDocument);
// Add the expected "fullDocumentBeforeChange" and confirm that both pre-image cursors see it.
latestChange.fullDocumentBeforeChange = {
_id: "x"
@@ -104,7 +104,7 @@ assert.commandWorked(coll.insert({_id: "y"}));
latestChange = cst.getOneChange(csNoPreImages);
assert.eq(latestChange.operationType, "insert");
assert(!latestChange.hasOwnProperty("fullDocumentBeforeChange"));
-assert.docEq(latestChange.fullDocument, {_id: "y"});
+assert.docEq({_id: "y"}, latestChange.fullDocument);
assert.docEq(latestChange, cst.getOneChange(csPreImageWhenAvailableCursor));
assert.docEq(latestChange, cst.getOneChange(csPreImageRequiredCursor));
@@ -113,7 +113,7 @@ assert.commandWorked(coll.update({_id: "y"}, {foo: "bar"}));
latestChange = cst.getOneChange(csNoPreImages);
assert.eq(latestChange.operationType, "replace");
assert(!latestChange.hasOwnProperty("fullDocumentBeforeChange"));
-assert.docEq(latestChange.fullDocument, {_id: "y", foo: "bar"});
+assert.docEq({_id: "y", foo: "bar"}, latestChange.fullDocument);
// Add the expected "fullDocumentBeforeChange" and confirm that pre-image is not present.
latestChange.fullDocumentBeforeChange = null;
diff --git a/jstests/change_streams/resume_from_high_water_mark_token.js b/jstests/change_streams/resume_from_high_water_mark_token.js
index edd34db162b..e0004c2dd25 100644
--- a/jstests/change_streams/resume_from_high_water_mark_token.js
+++ b/jstests/change_streams/resume_from_high_water_mark_token.js
@@ -56,7 +56,7 @@ const testCollationCollection =
assert.commandWorked(testCollationCollection.insert({_id: "insert_one"}));
assert.commandWorked(testCollationCollection.insert({_id: "INSERT_TWO"}));
assert.soon(() => csCursor.hasNext());
-assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
+assert.docEq({_id: "INSERT_TWO"}, csCursor.next().fullDocument);
csCursor.close();
// We can resume from the pre-creation high water mark if we do not specify a collation...
@@ -73,7 +73,7 @@ let cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db,
// to the simple collation. We will therefore match 'INSERT_TWO' but not 'insert_one'.
csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
assert.soon(() => csCursor.hasNext());
-assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
+assert.docEq({_id: "INSERT_TWO"}, csCursor.next().fullDocument);
csCursor.close();
// If we do specify a non-simple collation, it will be adopted by the pipeline.
@@ -90,9 +90,9 @@ cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
// Now we match both 'insert_one' and 'INSERT_TWO'.
csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
assert.soon(() => csCursor.hasNext());
-assert.docEq(csCursor.next().fullDocument, {_id: "insert_one"});
+assert.docEq({_id: "insert_one"}, csCursor.next().fullDocument);
assert.soon(() => csCursor.hasNext());
-assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
+assert.docEq({_id: "INSERT_TWO"}, csCursor.next().fullDocument);
csCursor.close();
// Now open a change stream with batchSize:0 in order to produce a new high water mark.
@@ -126,7 +126,7 @@ csCursor = new DBCommandCursor(db, cmdResResumeWithCollation);
// ... but we do not inherit the collection's case-insensitive collation, matching 'INSERT_FOUR'
// but not the preceding 'insert_three'.
assert.soon(() => csCursor.hasNext());
-assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_FOUR"});
+assert.docEq({_id: "INSERT_FOUR"}, csCursor.next().fullDocument);
csCursor.close();
// Drop the collection and obtain a new pre-creation high water mark. We will use this later.
diff --git a/jstests/change_streams/shell_helper.js b/jstests/change_streams/shell_helper.js
index 65f40508694..bb136097467 100644
--- a/jstests/change_streams/shell_helper.js
+++ b/jstests/change_streams/shell_helper.js
@@ -74,7 +74,7 @@ resumeToken = change._id;
delete change._id;
delete change.clusterTime;
delete change.wallTime;
-assert.docEq(change, expected);
+assert.docEq(expected, change);
jsTestLog("Testing watch() with pipeline");
changeStreamCursor = coll.watch([{$project: {clusterTime: 1, docId: "$documentKey._id"}}]);
diff --git a/jstests/change_streams/start_after_invalidation_exception.js b/jstests/change_streams/start_after_invalidation_exception.js
index 55d51648b94..f9986efe27f 100644
--- a/jstests/change_streams/start_after_invalidation_exception.js
+++ b/jstests/change_streams/start_after_invalidation_exception.js
@@ -19,7 +19,7 @@ assert.commandWorked(coll.insert({_id: 0, a: 1}));
assert.soon(() => cursor.hasNext());
let next = cursor.next();
assert.eq(next.operationType, "insert");
-assert.docEq(next.fullDocument, {_id: 0, a: 1});
+assert.docEq({_id: 0, a: 1}, next.fullDocument);
// Drop the database, this will cause invalidation of the change streams.
assert.commandWorked(testDB.dropDatabase());
@@ -46,5 +46,5 @@ cursor = coll.watch([{$match: {operationType: 'insert'}}], {startAfter: invalida
assert.soon(() => cursor.hasNext());
next = cursor.next();
assert.eq(next.operationType, "insert");
-assert.docEq(next.fullDocument, {_id: 1, a: 101});
+assert.docEq({_id: 1, a: 101}, next.fullDocument);
}());
diff --git a/jstests/change_streams/whole_cluster_resumability.js b/jstests/change_streams/whole_cluster_resumability.js
index 8564d01e770..5f8adae4202 100644
--- a/jstests/change_streams/whole_cluster_resumability.js
+++ b/jstests/change_streams/whole_cluster_resumability.js
@@ -17,7 +17,7 @@ let resumeCursor = cst.startWatchingAllChangesForCluster();
// Insert a document in the first database and save the resulting change stream.
assert.commandWorked(db1Coll.insert({_id: 1}));
const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
-assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
+assert.docEq({_id: 1}, firstInsertChangeDoc.fullDocument);
// Test resume after the first insert.
resumeCursor = cst.startWatchingChanges({
@@ -30,12 +30,12 @@ resumeCursor = cst.startWatchingChanges({
// Write the next document into the second database.
assert.commandWorked(db2Coll.insert({_id: 2}));
const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
-assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
+assert.docEq({_id: 2}, secondInsertChangeDoc.fullDocument);
// Write the third document into the first database again.
assert.commandWorked(db1Coll.insert({_id: 3}));
const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
-assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
+assert.docEq({_id: 3}, thirdInsertChangeDoc.fullDocument);
// Test resuming after the first insert again.
resumeCursor = cst.startWatchingChanges({
@@ -44,8 +44,8 @@ resumeCursor = cst.startWatchingChanges({
collection: 1,
aggregateOptions: {cursor: {batchSize: 0}},
});
-assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
-assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+assert.docEq(secondInsertChangeDoc, cst.getOneChange(resumeCursor));
+assert.docEq(thirdInsertChangeDoc, cst.getOneChange(resumeCursor));
// Test resume after second insert.
resumeCursor = cst.startWatchingChanges({
@@ -54,7 +54,7 @@ resumeCursor = cst.startWatchingChanges({
collection: 1,
aggregateOptions: {cursor: {batchSize: 0}},
});
-assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+assert.docEq(thirdInsertChangeDoc, cst.getOneChange(resumeCursor));
// Rename the collection and obtain a resume token from the 'rename' notification. Skip this
// test when running on a sharded collection, since these cannot be renamed.
diff --git a/jstests/change_streams/whole_db_resumability.js b/jstests/change_streams/whole_db_resumability.js
index 2e2c0e183ec..ab049e1deeb 100644
--- a/jstests/change_streams/whole_db_resumability.js
+++ b/jstests/change_streams/whole_db_resumability.js
@@ -21,7 +21,7 @@ let resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], co
assert.commandWorked(coll.insert({_id: 1}));
assert.commandWorked(otherColl.insert({_id: 2}));
const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
-assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
+assert.docEq({_id: 1}, firstInsertChangeDoc.fullDocument);
assert.eq(firstInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
// Test resuming the change stream after the first insert should pick up the insert on the
@@ -33,13 +33,13 @@ resumeCursor = cst.startWatchingChanges({
});
const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
-assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
+assert.docEq({_id: 2}, secondInsertChangeDoc.fullDocument);
assert.eq(secondInsertChangeDoc.ns, {db: testDB.getName(), coll: otherColl.getName()});
// Insert a third document to the first collection and test that the change stream picks it up.
assert.commandWorked(coll.insert({_id: 3}));
const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
-assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
+assert.docEq({_id: 3}, thirdInsertChangeDoc.fullDocument);
assert.eq(thirdInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
// Test resuming after the first insert again.
@@ -48,8 +48,8 @@ resumeCursor = cst.startWatchingChanges({
collection: 1,
aggregateOptions: {cursor: {batchSize: 0}},
});
-assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
-assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+assert.docEq(secondInsertChangeDoc, cst.getOneChange(resumeCursor));
+assert.docEq(thirdInsertChangeDoc, cst.getOneChange(resumeCursor));
// Test resume after second insert.
resumeCursor = cst.startWatchingChanges({
@@ -57,7 +57,7 @@ resumeCursor = cst.startWatchingChanges({
collection: 1,
aggregateOptions: {cursor: {batchSize: 0}},
});
-assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+assert.docEq(thirdInsertChangeDoc, cst.getOneChange(resumeCursor));
// Rename the collection and attempt to resume from the 'rename' notification. Skip this
// test when running on a sharded collection, since these cannot be renamed.
diff --git a/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js b/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js
index 4802b42ce41..05b15c755ff 100644
--- a/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js
+++ b/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js
@@ -18,7 +18,7 @@ var $config = (function() {
const coll = db[this.collName];
const result = coll.aggregate([{$lookup: { from: this.foreignCollName, localField: 'a', foreignField: 'b', as: 'out'}}]).toArray();
assert.eq(result.length, 1);
- assert.docEq(result[0], {_id: 0, a: 0, out: [{_id: 0, b: 0}]});
+ assert.docEq({_id: 0, a: 0, out: [{_id: 0, b: 0}]}, result[0]);
} catch (e) {
// We expect any errors of query getting killed due to selected index for join is
// dropped.
diff --git a/jstests/core/and_or_index_sort.js b/jstests/core/and_or_index_sort.js
index ae91b8931bf..4503ddb5741 100644
--- a/jstests/core/and_or_index_sort.js
+++ b/jstests/core/and_or_index_sort.js
@@ -35,20 +35,18 @@ assert.commandWorked(coll.insert([
runWithDifferentIndexes(
[[], [{a: 1}, {b: 1, c: 1}], [{a: 1, c: 1}, {b: 1}], [{a: 1, c: 1}, {b: 1, c: 1}]], () => {
- assert.docEq(coll.find({a: {$lt: 3}}).sort({c: 1, a: 1}).toArray(),
- [{_id: 6, a: 2, b: 6, c: 7, d: 0}, {_id: 2, a: 1, b: 5, c: 9, d: 1}]);
+ assert.docEq([{_id: 6, a: 2, b: 6, c: 7, d: 0}, {_id: 2, a: 1, b: 5, c: 9, d: 1}],
+ coll.find({a: {$lt: 3}}).sort({c: 1, a: 1}).toArray());
- assert.docEq(coll.find({$or: [{a: {$gt: 8}}, {b: {$lt: 2}}]}).sort({c: 1}).toArray(), [
- {_id: 13, a: 9, b: 1.5, d: 1},
- {_id: 5, a: 9, b: 1, c: 5, d: 1},
- {_id: 12, a: 9, c: 5.5, d: 1}
- ]);
+ assert.docEq(
+ [
+ {_id: 13, a: 9, b: 1.5, d: 1},
+ {_id: 5, a: 9, b: 1, c: 5, d: 1},
+ {_id: 12, a: 9, c: 5.5, d: 1}
+ ],
+ coll.find({$or: [{a: {$gt: 8}}, {b: {$lt: 2}}]}).sort({c: 1}).toArray());
assert.docEq(
- coll.find(
- {$or: [{a: {$gt: 8}}, {$and: [{b: {$lt: 5}}, {$or: [{c: {$lt: 5}}, {d: 1}]}]}]})
- .sort({c: 1})
- .toArray(),
[
{_id: 13, a: 9, b: 1.5, d: 1},
{_id: 1, a: 8, b: 3, c: 4, d: 0},
@@ -56,18 +54,23 @@ runWithDifferentIndexes(
{_id: 5, a: 9, b: 1, c: 5, d: 1},
{_id: 12, a: 9, c: 5.5, d: 1},
{_id: 9, a: 7, b: 2, c: 6, d: 1}
- ]);
+ ],
+ coll.find(
+ {$or: [{a: {$gt: 8}}, {$and: [{b: {$lt: 5}}, {$or: [{c: {$lt: 5}}, {d: 1}]}]}]})
+ .sort({c: 1})
+ .toArray());
- assert.docEq(coll.find({$or: [{a: {$gt: 6}}, {b: {$lt: 4}}]}).sort({a: 1, b: 1}).toArray(),
- [
- {_id: 10, b: 3, c: 4.5, d: 0},
- {_id: 9, a: 7, b: 2, c: 6, d: 1},
- {_id: 1, a: 8, b: 3, c: 4, d: 0},
- {_id: 11, a: 8, b: 3.5, d: 0},
- {_id: 12, a: 9, c: 5.5, d: 1},
- {_id: 5, a: 9, b: 1, c: 5, d: 1},
- {_id: 13, a: 9, b: 1.5, d: 1}
- ]);
+ assert.docEq(
+ [
+ {_id: 10, b: 3, c: 4.5, d: 0},
+ {_id: 9, a: 7, b: 2, c: 6, d: 1},
+ {_id: 1, a: 8, b: 3, c: 4, d: 0},
+ {_id: 11, a: 8, b: 3.5, d: 0},
+ {_id: 12, a: 9, c: 5.5, d: 1},
+ {_id: 5, a: 9, b: 1, c: 5, d: 1},
+ {_id: 13, a: 9, b: 1.5, d: 1}
+ ],
+ coll.find({$or: [{a: {$gt: 6}}, {b: {$lt: 4}}]}).sort({a: 1, b: 1}).toArray());
assert.sameMembers(coll.find({$or: [{a: {$gt: 6}}, {b: {$lt: 4}}]}).toArray(), [
{_id: 9, a: 7, b: 2, c: 6, d: 1},
@@ -122,7 +125,8 @@ runWithDifferentIndexes(
}),
[{c: null}, {c: null}, {c: 4}, {c: 5}, {c: 5.5}, {c: 6}]);
- assert.docEq(coll.find({$or: [{a: {$gt: 6}}, {b: {$lt: 4}}]}, {c: 1, _id: 0})
+ assert.docEq([{c: null}, {c: null}, {c: 4}, {c: 4.5}, {c: 5}, {c: 5.5}, {c: 6}],
+ coll.find({$or: [{a: {$gt: 6}}, {b: {$lt: 4}}]}, {c: 1, _id: 0})
.sort({c: 1})
.toArray()
.map(obj => {
@@ -130,7 +134,6 @@ runWithDifferentIndexes(
obj.c = null;
}
return obj;
- }),
- [{c: null}, {c: null}, {c: 4}, {c: 4.5}, {c: 5}, {c: 5.5}, {c: 6}]);
+ }));
});
})();
diff --git a/jstests/core/awaitdata_getmore_cmd.js b/jstests/core/awaitdata_getmore_cmd.js
index e9d5454f45b..5efcf6590ef 100644
--- a/jstests/core/awaitdata_getmore_cmd.js
+++ b/jstests/core/awaitdata_getmore_cmd.js
@@ -235,7 +235,7 @@ try {
assert.eq(cmdRes.cursor.nextBatch.length,
1,
'Collection documents: ' + tojson(db.await_data.find({}).toArray()));
- assert.docEq(cmdRes.cursor.nextBatch[0], {_id: "match", x: 1});
+ assert.docEq({_id: "match", x: 1}, cmdRes.cursor.nextBatch[0]);
} finally {
db.setLogLevel(originalCmdLogLevel, 'command');
db.setLogLevel(originalQueryLogLevel, 'query');
diff --git a/jstests/core/crud_api.js b/jstests/core/crud_api.js
index a6679cde794..de8d72e50af 100644
--- a/jstests/core/crud_api.js
+++ b/jstests/core/crud_api.js
@@ -48,9 +48,9 @@ var crudAPISpecTests = function crudAPISpecTests() {
};
}
- function checkResultObject(first, second) {
+ function checkResultObject(expected, actual) {
// Only assert on the "modifiedCount" property
- assert.docEq(first, second);
+ assert.docEq(expected, actual);
}
// Setup executors
@@ -659,7 +659,7 @@ var crudAPISpecTests = function crudAPISpecTests() {
// Simple projection
var result = coll.find({}).sort({a: 1}).limit(1).skip(1).projection({_id: 0, a: 1}).toArray();
- assert.docEq(result, [{a: 1}]);
+ assert.docEq([{a: 1}], result);
// Simple tailable cursor
var cursor = coll.find({}).sort({a: 1}).tailable();
diff --git a/jstests/core/doc_validation.js b/jstests/core/doc_validation.js
index bff7ade34c8..f3ebbd03a5f 100644
--- a/jstests/core/doc_validation.js
+++ b/jstests/core/doc_validation.js
@@ -321,7 +321,7 @@ const expectedError = {
details:
{operatorName: "$eq", specifiedAs: {a: 1}, reason: "comparison failed", consideredValue: 2}
};
-assert.docEq(errorInfo, expectedError, tojson(res));
+assert.docEq(expectedError, errorInfo, tojson(res));
// Insert a valid document.
assert.commandWorked(coll.insert({_id: 1, a: 1}));
@@ -353,6 +353,6 @@ for (const command of [updateCommand, findAndModifyCommand]) {
consideredValue: 2
}
};
- assert.docEq(errorInfo, expectedError, tojson(res));
+ assert.docEq(expectedError, errorInfo, tojson(res));
}
})();
diff --git a/jstests/core/doc_validation_error.js b/jstests/core/doc_validation_error.js
index 22815a5d787..3c2f9b1d918 100644
--- a/jstests/core/doc_validation_error.js
+++ b/jstests/core/doc_validation_error.js
@@ -42,8 +42,8 @@ function executeDocumentValidationTestCase(testCase) {
// Verify that document validation failed and the document validation error matches the
// expected.
assertDocumentValidationFailure(result, coll);
- assert.docEq(result.getWriteError().errInfo.details,
- testCase.expectedError,
+ assert.docEq(testCase.expectedError,
+ result.getWriteError().errInfo.details,
`Test case ${testCase.name}`);
}
diff --git a/jstests/core/expr_index_use.js b/jstests/core/expr_index_use.js
index 95a5699eb48..a0f85bc69c9 100644
--- a/jstests/core/expr_index_use.js
+++ b/jstests/core/expr_index_use.js
@@ -111,7 +111,7 @@ function confirmExpectedExprExecution(expr, metricsToCheck, collation) {
const stage = getPlanStageFunc(explain, "IXSCAN");
assert.neq(null, stage, tojson(explain));
assert(stage.hasOwnProperty("keyPattern"), tojson(explain));
- assert.docEq(stage.keyPattern, metricsToCheck.expectedIndex, tojson(explain));
+ assert.docEq(metricsToCheck.expectedIndex, stage.keyPattern, tojson(explain));
} else {
assert(getPlanStageFunc(explain, "COLLSCAN"), tojson(explain));
}
diff --git a/jstests/core/find_and_modify_hint.js b/jstests/core/find_and_modify_hint.js
index 3636c4f0cdc..9298e92dd74 100644
--- a/jstests/core/find_and_modify_hint.js
+++ b/jstests/core/find_and_modify_hint.js
@@ -76,7 +76,7 @@ const coll = db.jstests_find_and_modify_hint;
famUpdateCmd =
{findAndModify: coll.getName(), query: {}, update: {$set: {y: 1}}, hint: {s: 1}, new: true};
let res = assert.commandWorked(coll.runCommand(famUpdateCmd));
- assert.docEq(res.value, {_id: 2, x: 1, s: 0, y: 1});
+ assert.docEq({_id: 2, x: 1, s: 0, y: 1}, res.value);
// Update hinting a sparse index with upsert option can result in an insert even if the
// correct behaviour would be to update an existing document.
@@ -91,13 +91,13 @@ const coll = db.jstests_find_and_modify_hint;
};
res = assert.commandWorked(coll.runCommand(famUpdateCmd));
assert.eq(res.lastErrorObject.upserted, 3); // value of _id
- assert.docEq(res.value, {_id: 3, x: 2, y: 1});
+ assert.docEq({_id: 3, x: 2, y: 1}, res.value);
// Make sure an indexed document gets deleted when index hint is provided.
assert.commandWorked(coll.insert({x: 1}));
const famRemoveCmd = {findAndModify: coll.getName(), query: {x: 1}, remove: true, hint: {s: 1}};
res = assert.commandWorked(coll.runCommand(famRemoveCmd));
- assert.docEq(res.value, {_id: 2, x: 1, s: 0, y: 1});
+ assert.docEq({_id: 2, x: 1, s: 0, y: 1}, res.value);
})();
(function shellHelpersTest() {
@@ -110,24 +110,24 @@ const coll = db.jstests_find_and_modify_hint;
// the sparse index.
let newDoc =
coll.findAndModify({query: {x: 1}, update: {$set: {y: 2}}, hint: {s: 1}, new: true});
- assert.docEq(newDoc, {_id: 2, x: 1, s: 0, y: 2});
+ assert.docEq({_id: 2, x: 1, s: 0, y: 2}, newDoc);
// Insert document that will not be in the sparse index. Update hinting sparse index should
// result in upsert.
assert.commandWorked(coll.insert({_id: 3, x: 2}));
newDoc = coll.findOneAndUpdate(
{x: 2}, {$set: {_id: 4, y: 2}}, {hint: {s: 1}, upsert: true, returnNewDocument: true});
- assert.docEq(newDoc, {_id: 4, x: 2, y: 2});
+ assert.docEq({_id: 4, x: 2, y: 2}, newDoc);
// Similarly, hinting the sparse index for a replacement should result in an upsert.
assert.commandWorked(coll.insert({_id: 5, x: 3}));
newDoc = coll.findOneAndReplace(
{x: 3}, {_id: 6, y: 2}, {hint: {s: 1}, upsert: true, returnNewDocument: true});
- assert.docEq(newDoc, {_id: 6, y: 2});
+ assert.docEq({_id: 6, y: 2}, newDoc);
// Make sure an indexed document gets deleted when index hint is provided.
newDoc = coll.findOneAndDelete({x: 2}, {hint: {s: 1}});
- assert.docEq(newDoc, {_id: 3, x: 2});
+ assert.docEq({_id: 3, x: 2}, newDoc);
})();
(function failedHintTest() {
diff --git a/jstests/core/find_and_modify_server6865.js b/jstests/core/find_and_modify_server6865.js
index 66d1453e6fa..7aba57e0030 100644
--- a/jstests/core/find_and_modify_server6865.js
+++ b/jstests/core/find_and_modify_server6865.js
@@ -35,7 +35,7 @@ function testFAMWorked(insert, cmdObj, expected) {
if (!cmdObj['new']) {
// Test that the find operation returns the expected result.
res = t.findOne(cmdObj['query'], cmdObj['fields']);
- assert.docEq(res, expected, 'positional projection failed for find');
+ assert.docEq(expected, res, 'positional projection failed for find');
}
// Test that the findAndModify command returns the expected result.
@@ -46,7 +46,7 @@ function testFAMWorked(insert, cmdObj, expected) {
if (cmdObj['new']) {
// Test that the find operation returns the expected result.
res = t.findOne(cmdObj['query'], cmdObj['fields']);
- assert.docEq(res, expected, 'positional projection failed for find');
+ assert.docEq(expected, res, 'positional projection failed for find');
}
}
diff --git a/jstests/core/insert_one.js b/jstests/core/insert_one.js
index 9a1a6d393f8..4c0a5d2af6f 100644
--- a/jstests/core/insert_one.js
+++ b/jstests/core/insert_one.js
@@ -18,8 +18,8 @@ assert.eq(col.find().itcount(), 0, "collection should still be empty");
var result = col.insertOne({abc: 'def'});
assert(result.acknowledged, "insertOne should succeed on documents");
-assert.docEq(col.findOne({_id: result.insertedId}),
- {_id: result.insertedId, abc: 'def'},
+assert.docEq({_id: result.insertedId, abc: 'def'},
+ col.findOne({_id: result.insertedId}),
"simple document not equal to collection find result");
var doc = new Number();
@@ -32,7 +32,7 @@ assert(result.acknowledged, "insertOne should succeed on documents");
assert(!('zeroPad' in col.findOne({_id: result.insertedId})),
"inserted result should not have functions from the number object's prototype");
-assert.docEq(col.findOne({_id: result.insertedId}),
- {_id: result.insertedId, x: doc.x},
+assert.docEq({_id: result.insertedId, x: doc.x},
+ col.findOne({_id: result.insertedId}),
"document with prototype not equal to collection find result");
})();
diff --git a/jstests/core/mod_overflow.js b/jstests/core/mod_overflow.js
index 2ec33300ce7..81a36bfa24a 100644
--- a/jstests/core/mod_overflow.js
+++ b/jstests/core/mod_overflow.js
@@ -16,13 +16,13 @@ assert.commandWorked(testColl.insert(insertedDocs));
// For each possible integral representation of -1, confirm that overflow does not occur.
for (let divisor of [-1.0, NumberInt("-1"), NumberLong("-1"), NumberDecimal("-1")]) {
- assert.docEq(testColl.find({val: {$mod: [divisor, 0]}}).sort({_id: 1}).toArray(), insertedDocs);
+ assert.docEq(insertedDocs, testColl.find({val: {$mod: [divisor, 0]}}).sort({_id: 1}).toArray());
assert.docEq(
+ insertedDocs,
testColl
.aggregate(
[{$match: {$expr: {$eq: [0, {$mod: ["$val", divisor]}]}}}, {$sort: {_id: 1}}])
- .toArray(),
- insertedDocs);
+ .toArray());
// Confirm that overflow does not occur during agg expression evaluation. Also confirm that the
// correct type is returned for each combination of input types.
@@ -37,9 +37,9 @@ for (let divisor of [-1.0, NumberInt("-1"), NumberLong("-1"), NumberDecimal("-1"
})
];
assert.docEq(
+ expectedResults,
testColl
.aggregate([{$project: {val: 1, modVal: {$mod: ["$val", divisor]}}}, {$sort: {_id: 1}}])
- .toArray(),
- expectedResults);
+ .toArray());
}
})(); \ No newline at end of file
diff --git a/jstests/core/mod_special_values.js b/jstests/core/mod_special_values.js
index 992fe9bb324..7e8d3e8d716 100644
--- a/jstests/core/mod_special_values.js
+++ b/jstests/core/mod_special_values.js
@@ -24,7 +24,7 @@ function executeTestCase(collection, testCase) {
if (testCase.hasOwnProperty("expectedError")) {
assert.throwsWithCode(findCommand, testCase.expectedError, [], testCase);
} else {
- assert.docEq(findCommand(), testCase.expectedResults, testCase);
+ assert.docEq(testCase.expectedResults, findCommand(), testCase);
}
}
diff --git a/jstests/core/projection_dotted_paths.js b/jstests/core/projection_dotted_paths.js
index 1f097b287f1..7f6eb1bfef2 100644
--- a/jstests/core/projection_dotted_paths.js
+++ b/jstests/core/projection_dotted_paths.js
@@ -38,7 +38,7 @@ assert(isIndexOnly(db, getWinningPlan(explain.queryPlanner)));
// Project exactly the set of fields in the index but also include _id. Verify that the
// projection is computed correctly and that the plan cannot be covered.
resultDoc = coll.findOne({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1});
-assert.docEq(resultDoc, {_id: 1, a: 1, b: {c: 1, d: 1}, c: 1});
+assert.docEq({_id: 1, a: 1, b: {c: 1, d: 1}, c: 1}, resultDoc);
explain = coll.find({a: 1}, {_id: 0, "b.c": 1, c: 1}).explain("queryPlanner");
explain = coll.find({a: 1}, {_id: 1, a: 1, "b.c": 1, "b.d": 1, c: 1}).explain("queryPlanner");
assert(isIxscan(db, getWinningPlan(explain.queryPlanner)));
@@ -46,7 +46,7 @@ assert(!isIndexOnly(db, getWinningPlan(explain.queryPlanner)));
// Project a not-indexed field that exists in the collection. The plan should not be covered.
resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1});
-assert.docEq(resultDoc, {b: {c: 1, e: 1}, c: 1});
+assert.docEq({b: {c: 1, e: 1}, c: 1}, resultDoc);
explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner");
assert(isIxscan(db, getWinningPlan(explain.queryPlanner)));
assert(!isIndexOnly(db, getWinningPlan(explain.queryPlanner)));
@@ -54,14 +54,14 @@ assert(!isIndexOnly(db, getWinningPlan(explain.queryPlanner)));
// Project a not-indexed field that does not exist in the collection. The plan should not be
// covered.
resultDoc = coll.findOne({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1});
-assert.docEq(resultDoc, {b: {c: 1}, c: 1});
+assert.docEq({b: {c: 1}, c: 1}, resultDoc);
explain = coll.find({a: 1}, {_id: 0, "b.c": 1, "b.z": 1, c: 1}).explain("queryPlanner");
assert(isIxscan(db, getWinningPlan(explain.queryPlanner)));
assert(!isIndexOnly(db, getWinningPlan(explain.queryPlanner)));
// Verify that the correct projection is computed with an idhack query.
resultDoc = coll.findOne({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1});
-assert.docEq(resultDoc, {b: {c: 1, e: 1}, c: 1});
+assert.docEq({b: {c: 1, e: 1}, c: 1}, resultDoc);
explain = coll.find({_id: 1}, {_id: 0, "b.c": 1, "b.e": 1, c: 1}).explain("queryPlanner");
assert(isIdhack(db, getWinningPlan(explain.queryPlanner)), explain);
diff --git a/jstests/core/projection_semantics.js b/jstests/core/projection_semantics.js
index 3c6d71a976e..cf71f951755 100644
--- a/jstests/core/projection_semantics.js
+++ b/jstests/core/projection_semantics.js
@@ -41,8 +41,8 @@ function testInputOutput({input, projection, expectedOutput, interestingIndexes
}
assert.commandWorked(coll.insert(input));
assert.docEq(
- coll.find({_id: input._id}, projection).limit(1).hint({$natural: 1}).toArray()[0],
expectedOutput,
+ coll.find({_id: input._id}, projection).limit(1).hint({$natural: 1}).toArray()[0],
() =>
tojson(coll.find({_id: input._id}, projection).limit(1).hint({$natural: 1}).explain()));
@@ -50,16 +50,16 @@ function testInputOutput({input, projection, expectedOutput, interestingIndexes
assert.commandWorkedOrFailedWithCode(coll.createIndex(indexSpec),
ErrorCodes.IndexAlreadyExists);
assert.docEq(
- coll.find({_id: input._id}, projection).hint(indexSpec).toArray()[0],
expectedOutput,
+ coll.find({_id: input._id}, projection).hint(indexSpec).toArray()[0],
() => tojson(
coll.find({_id: input._id}, projection).hint(indexSpec).explain("executionStats")));
}
assert.docEq(
+ expectedOutput,
coll.aggregate([{$match: {_id: input._id}}, {$project: projection}], {hint: {$natural: 1}})
- .toArray()[0],
- expectedOutput);
+ .toArray()[0]);
}
// The basics: what happens when I include a top-level field?
diff --git a/jstests/core/push_sort.js b/jstests/core/push_sort.js
index 5b599e04ec8..2d74a3909f8 100644
--- a/jstests/core/push_sort.js
+++ b/jstests/core/push_sort.js
@@ -63,7 +63,7 @@ var res = t.update({_id: 8}, {$push: {x: {$sort: {a: -1}}}});
// Test that when given a document with a $sort field that matches the form of a plain document
// (instead of a $sort modifier document), $push will add that field to the specified array.
assert.commandWorked(res);
-assert.docEq(t.findOne({_id: 8}), {_id: 8, x: [{a: 1}, {a: 2}, {$sort: {a: -1}}]});
+assert.docEq({_id: 8, x: [{a: 1}, {a: 2}, {$sort: {a: -1}}]}, t.findOne({_id: 8}));
t.save({_id: 100, x: [{a: 1}]});
diff --git a/jstests/core/text_covered_matching.js b/jstests/core/text_covered_matching.js
index 49cab673e3d..df4fced499a 100644
--- a/jstests/core/text_covered_matching.js
+++ b/jstests/core/text_covered_matching.js
@@ -52,7 +52,7 @@ assert.eq(explainResult.executionStats.nReturned,
let filteringStage = getPlanStages(explainResult, "IXSCAN")[0];
assert.hasFields(
filteringStage, ["filter"], "No filter found on IXSCAN: " + tojson(filteringStage));
-assert.docEq(filteringStage.filter, {"b": {"$eq": 1}}, "Incorrect filter on IXSCAN.");
+assert.docEq({"b": {"$eq": 1}}, filteringStage.filter, "Incorrect filter on IXSCAN.");
// When we include the text score in the projection, we use a TEXT_OR in our query plan, which
// changes how filtering is done. We should get the same result, however.
@@ -73,7 +73,7 @@ assert.eq(explainResult.executionStats.nReturned,
filteringStage = getPlanStages(explainResult, "TEXT_OR")[0];
assert.hasFields(
filteringStage, ["filter"], "No filter found on TEXT_OR: " + tojson(filteringStage));
-assert.docEq(filteringStage.filter, {"b": {"$eq": 1}}, "Incorrect filter on TEXT_OR.");
+assert.docEq({"b": {"$eq": 1}}, filteringStage.filter, "Incorrect filter on TEXT_OR.");
// When we search more than one term, we perform filtering in the OR stage rather than the
// underlying IXSCANs, but we should get an equivalent result.
@@ -91,7 +91,7 @@ assert.eq(explainResult.executionStats.nReturned,
"Unexpected number of results returned: " + tojson(explainResult));
filteringStage = getPlanStages(explainResult, "OR")[0];
assert.hasFields(filteringStage, ["filter"], "No filter found on OR: " + tojson(filteringStage));
-assert.docEq(filteringStage.filter, {"b": {"$eq": 1}}, "Incorrect filter on OR.");
+assert.docEq({"b": {"$eq": 1}}, filteringStage.filter, "Incorrect filter on OR.");
//
// Test the query {$text: {$search: "hello"}, c: 1} with and without the 'textScore' in the
diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort.js b/jstests/core/timeseries/bucket_unpacking_with_sort.js
index 97256a2d457..13248964ec8 100644
--- a/jstests/core/timeseries/bucket_unpacking_with_sort.js
+++ b/jstests/core/timeseries/bucket_unpacking_with_sort.js
@@ -207,7 +207,7 @@ const runRewritesTest = (sortSpec,
const ogExplainFull = testColl.explain().aggregate(ogPipeline, options);
// Assert correct
- assert.docEq(optResults, ogResults);
+ assert.docEq(ogResults, optResults);
// Make sure we're not testing trivial / empty queries.
assert.gt(ogResults.length, 0, 'Expected the queries in this test to have nonempty results');
@@ -242,7 +242,7 @@ const runRewritesTest = (sortSpec,
let foundMatch = findFirstMatch(optExplain);
if (!precise) {
assert.docEq(
- foundMatch, bucketSpanMatch, 'Expected an extra $match to check the bucket span');
+ bucketSpanMatch, foundMatch, 'Expected an extra $match to check the bucket span');
} else {
// (We don't have a 'assert.notDocEq' helper, but docEq is 'eq' + 'sortDoc'.)
assert.neq(sortDoc(foundMatch),
diff --git a/jstests/core/timeseries/timeseries_bucket_manual_removal.js b/jstests/core/timeseries/timeseries_bucket_manual_removal.js
index f704344c1bd..698aed1104f 100644
--- a/jstests/core/timeseries/timeseries_bucket_manual_removal.js
+++ b/jstests/core/timeseries/timeseries_bucket_manual_removal.js
@@ -48,23 +48,23 @@ TimeseriesTest.run((insert) => {
];
assert.commandWorked(insert(coll, docs1));
- assert.docEq(coll.find().toArray(), docs1);
+ assert.docEq(docs1, coll.find().toArray());
let buckets = bucketsColl.find().toArray();
assert.eq(buckets.length, 1, 'Expected one bucket but found ' + tojson(buckets));
const bucketId = buckets[0]._id;
assert.commandWorked(bucketsColl.remove({_id: bucketId}));
- assert.docEq(coll.find().toArray(), []);
+ assert.docEq([], coll.find().toArray());
buckets = bucketsColl.find().toArray();
assert.eq(buckets.length, 0, 'Expected no buckets but found ' + tojson(buckets));
assert.commandWorked(bucketsColl.remove({_id: bucketId}));
- assert.docEq(coll.find().toArray(), []);
+ assert.docEq([], coll.find().toArray());
buckets = bucketsColl.find().toArray();
assert.eq(buckets.length, 0, 'Expected no buckets but found ' + tojson(buckets));
assert.commandWorked(coll.insert(docs2, {ordered: false}));
- assert.docEq(coll.find().toArray(), docs2);
+ assert.docEq(docs2, coll.find().toArray());
buckets = bucketsColl.find().toArray();
assert.eq(buckets.length, 1, 'Expected one bucket but found ' + tojson(buckets));
assert.neq(buckets[0]._id, bucketId);
diff --git a/jstests/core/timeseries/timeseries_collation.js b/jstests/core/timeseries/timeseries_collation.js
index ee39304c96f..76ef7fca54a 100644
--- a/jstests/core/timeseries/timeseries_collation.js
+++ b/jstests/core/timeseries/timeseries_collation.js
@@ -66,7 +66,7 @@ TimeseriesTest.run((insert) => {
const results = coll.find().sort({_id: 1}).toArray();
assert.eq(docs.length, results.length);
for (let i = 0; i < results.length; i++) {
- assert.docEq(results[i], docs[i]);
+ assert.docEq(docs[i], results[i]);
}
// Now let's check that min and max appropriately ignore collation for field names, but not
diff --git a/jstests/core/timeseries/timeseries_delete_hint.js b/jstests/core/timeseries/timeseries_delete_hint.js
index 1f02a1ea0d7..c28a5f96d01 100644
--- a/jstests/core/timeseries/timeseries_delete_hint.js
+++ b/jstests/core/timeseries/timeseries_delete_hint.js
@@ -73,7 +73,7 @@ const validateDeleteIndex = (docsToInsert,
: assert.commandWorked(
testDB.runCommand({delete: coll.getName(), deletes: deleteQuery}));
assert.eq(res["n"], expectedNRemoved);
- assert.docEq(coll.find({}, {_id: 0}).toArray(), expectedRemainingDocs);
+ assert.docEq(expectedRemainingDocs, coll.find({}, {_id: 0}).toArray());
assert(coll.drop());
},
docsToInsert,
diff --git a/jstests/core/timeseries/timeseries_filter_extended_range.js b/jstests/core/timeseries/timeseries_filter_extended_range.js
index 86dc112dbe0..9294dd44300 100644
--- a/jstests/core/timeseries/timeseries_filter_extended_range.js
+++ b/jstests/core/timeseries/timeseries_filter_extended_range.js
@@ -54,14 +54,14 @@ function runTest(underflow, overflow, query, results) {
// avoid BUS and index selection, so we sort after gathering the results.
const aggActuals = tsColl.aggregate(pipeline).toArray();
aggActuals.sort(cmpTimeFields);
- assert.docEq(aggActuals, results);
+ assert.docEq(results, aggActuals);
// Verify the equivalent find command. We again don't want to go through a plan that
// encourages a sort order to avoid BUS and index selection, so we sort after gathering the
// results.
let findActuals = tsColl.find(query, {_id: 0, [timeFieldName]: 1}).toArray();
findActuals.sort(cmpTimeFields);
- assert.docEq(findActuals, results);
+ assert.docEq(results, findActuals);
}
runTest(false,
diff --git a/jstests/core/timeseries/timeseries_find.js b/jstests/core/timeseries/timeseries_find.js
index 5b37ff20c9d..9d1356b2d3f 100644
--- a/jstests/core/timeseries/timeseries_find.js
+++ b/jstests/core/timeseries/timeseries_find.js
@@ -41,7 +41,7 @@ function runTest(docs, query, results) {
docs.forEach(d => tsColl.insert(Object.assign({[timeFieldName]: new Date("2021-01-01")}, d)));
// Check that the result is in the result set.
- assert.docEq(tsColl.aggregate(pipeline).toArray(), results);
+ assert.docEq(results, tsColl.aggregate(pipeline).toArray());
// Ensure $type operator was not used.
const explain = tsColl.explain().aggregate(pipeline);
diff --git a/jstests/core/timeseries/timeseries_geonear_measurements.js b/jstests/core/timeseries/timeseries_geonear_measurements.js
index c973aeee94f..a79b7ed9809 100644
--- a/jstests/core/timeseries/timeseries_geonear_measurements.js
+++ b/jstests/core/timeseries/timeseries_geonear_measurements.js
@@ -79,9 +79,7 @@ function runFlatExamples(coll, isTimeseries) {
{$project: {_id: 0, loc: 1, distance: {$floor: "$distance"}}},
{$limit: 1},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 0], distance: 180},
- ]);
+ assert.docEq([{loc: [0, 0], distance: 180}], coll.aggregate(pipeline).toArray());
// For the rest of the examples, query from [0, 0] because it makes distances more convenient.
@@ -96,18 +94,20 @@ function runFlatExamples(coll, isTimeseries) {
},
{$project: {_id: 0, loc: 1, distance: 1}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 0], distance: 0},
- {loc: [0, 1], distance: 1},
- {loc: [0, 2], distance: 2},
- {loc: [0, 3], distance: 3},
- {loc: [0, 4], distance: 4},
- {loc: [0, 5], distance: 5},
- {loc: [0, 6], distance: 6},
- {loc: [0, 7], distance: 7},
- {loc: [0, 8], distance: 8},
- {loc: [0, 9], distance: 9},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 0], distance: 0},
+ {loc: [0, 1], distance: 1},
+ {loc: [0, 2], distance: 2},
+ {loc: [0, 3], distance: 3},
+ {loc: [0, 4], distance: 4},
+ {loc: [0, 5], distance: 5},
+ {loc: [0, 6], distance: 6},
+ {loc: [0, 7], distance: 7},
+ {loc: [0, 8], distance: 8},
+ {loc: [0, 9], distance: 9},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
// Since we don't support '2d' index on time-series metrics, and '2dsphere' indexes can't
// answer flat queries, we always expect a collscan for timeseries.
@@ -129,13 +129,15 @@ function runFlatExamples(coll, isTimeseries) {
{$limit: 5},
{$project: {_id: 0, loc: 1, distance: 1}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 0], distance: 0},
- {loc: [0, 1], distance: 1},
- {loc: [0, 2], distance: 2},
- {loc: [0, 3], distance: 3},
- {loc: [0, 4], distance: 4},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 0], distance: 0},
+ {loc: [0, 1], distance: 1},
+ {loc: [0, 2], distance: 2},
+ {loc: [0, 3], distance: 3},
+ {loc: [0, 4], distance: 4},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
assert(aggPlanHasStage(plan, 'COLLSCAN'), plan);
@@ -155,15 +157,17 @@ function runFlatExamples(coll, isTimeseries) {
},
{$project: {_id: 0, loc: 1, distance: 1}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 0], distance: 0},
- {loc: [0, 1], distance: 1},
- {loc: [0, 2], distance: 2},
- {loc: [0, 3], distance: 3},
- {loc: [0, 4], distance: 4},
- {loc: [0, 5], distance: 5},
- {loc: [0, 6], distance: 6},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 0], distance: 0},
+ {loc: [0, 1], distance: 1},
+ {loc: [0, 2], distance: 2},
+ {loc: [0, 3], distance: 3},
+ {loc: [0, 4], distance: 4},
+ {loc: [0, 5], distance: 5},
+ {loc: [0, 6], distance: 6},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
assert(aggPlanHasStage(plan, 'COLLSCAN'), plan);
@@ -183,15 +187,17 @@ function runFlatExamples(coll, isTimeseries) {
},
{$project: {_id: 0, loc: 1, distance: 1}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 3], distance: 3},
- {loc: [0, 4], distance: 4},
- {loc: [0, 5], distance: 5},
- {loc: [0, 6], distance: 6},
- {loc: [0, 7], distance: 7},
- {loc: [0, 8], distance: 8},
- {loc: [0, 9], distance: 9},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 3], distance: 3},
+ {loc: [0, 4], distance: 4},
+ {loc: [0, 5], distance: 5},
+ {loc: [0, 6], distance: 6},
+ {loc: [0, 7], distance: 7},
+ {loc: [0, 8], distance: 8},
+ {loc: [0, 9], distance: 9},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
assert(aggPlanHasStage(plan, 'COLLSCAN'), plan);
@@ -212,12 +218,14 @@ function runFlatExamples(coll, isTimeseries) {
},
{$project: {_id: 0, loc: 1, distance: 1}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 3], distance: 3},
- {loc: [0, 4], distance: 4},
- {loc: [0, 5], distance: 5},
- {loc: [0, 6], distance: 6},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 3], distance: 3},
+ {loc: [0, 4], distance: 4},
+ {loc: [0, 5], distance: 5},
+ {loc: [0, 6], distance: 6},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
assert(aggPlanHasStage(plan, 'COLLSCAN'), plan);
@@ -239,12 +247,14 @@ function runFlatExamples(coll, isTimeseries) {
},
{$project: {_id: 0, loc: 1, distance: 1}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 3], distance: 30},
- {loc: [0, 4], distance: 40},
- {loc: [0, 5], distance: 50},
- {loc: [0, 6], distance: 60},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 3], distance: 30},
+ {loc: [0, 4], distance: 40},
+ {loc: [0, 5], distance: 50},
+ {loc: [0, 6], distance: 60},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
assert(aggPlanHasStage(plan, 'COLLSCAN'), plan);
@@ -305,18 +315,20 @@ function runSphereExamples(coll, isTimeseries, has2dsphereIndex, scaleResult, qu
},
{$project: {_id: 0, loc: 1, distance: {$floor: {$multiply: [scaleResult, "$distance"]}}}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 9], distance: Math.floor(degreesToMeters(180 - 9))},
- {loc: [0, 8], distance: Math.floor(degreesToMeters(180 - 8))},
- {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
- {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
- {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
- {loc: [0, 4], distance: Math.floor(degreesToMeters(180 - 4))},
- {loc: [0, 3], distance: Math.floor(degreesToMeters(180 - 3))},
- {loc: [0, 2], distance: Math.floor(degreesToMeters(180 - 2))},
- {loc: [0, 1], distance: Math.floor(degreesToMeters(180 - 1))},
- {loc: [0, 0], distance: Math.floor(degreesToMeters(180 - 0))},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 9], distance: Math.floor(degreesToMeters(180 - 9))},
+ {loc: [0, 8], distance: Math.floor(degreesToMeters(180 - 8))},
+ {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
+ {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
+ {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
+ {loc: [0, 4], distance: Math.floor(degreesToMeters(180 - 4))},
+ {loc: [0, 3], distance: Math.floor(degreesToMeters(180 - 3))},
+ {loc: [0, 2], distance: Math.floor(degreesToMeters(180 - 2))},
+ {loc: [0, 1], distance: Math.floor(degreesToMeters(180 - 1))},
+ {loc: [0, 0], distance: Math.floor(degreesToMeters(180 - 0))},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
// Without a maxDistance we have to unpack every bucket and sort the events.
@@ -344,13 +356,15 @@ function runSphereExamples(coll, isTimeseries, has2dsphereIndex, scaleResult, qu
{$limit: 5},
{$project: {_id: 0, loc: 1, distance: {$floor: {$multiply: [scaleResult, "$distance"]}}}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 9], distance: Math.floor(degreesToMeters(180 - 9))},
- {loc: [0, 8], distance: Math.floor(degreesToMeters(180 - 8))},
- {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
- {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
- {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 9], distance: Math.floor(degreesToMeters(180 - 9))},
+ {loc: [0, 8], distance: Math.floor(degreesToMeters(180 - 8))},
+ {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
+ {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
+ {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
// Without a maxDistance we have to unpack every bucket and sort the events.
@@ -376,15 +390,17 @@ function runSphereExamples(coll, isTimeseries, has2dsphereIndex, scaleResult, qu
},
{$project: {_id: 0, loc: 1, distance: {$floor: {$multiply: [scaleResult, "$distance"]}}}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 9], distance: Math.floor(degreesToMeters(180 - 9))},
- {loc: [0, 8], distance: Math.floor(degreesToMeters(180 - 8))},
- {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
- {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
- {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
- {loc: [0, 4], distance: Math.floor(degreesToMeters(180 - 4))},
- {loc: [0, 3], distance: Math.floor(degreesToMeters(180 - 3))},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 9], distance: Math.floor(degreesToMeters(180 - 9))},
+ {loc: [0, 8], distance: Math.floor(degreesToMeters(180 - 8))},
+ {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
+ {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
+ {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
+ {loc: [0, 4], distance: Math.floor(degreesToMeters(180 - 4))},
+ {loc: [0, 3], distance: Math.floor(degreesToMeters(180 - 3))},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
// With maxDistance we can generate a $geoWithin predicate, which can use an index when
@@ -409,16 +425,18 @@ function runSphereExamples(coll, isTimeseries, has2dsphereIndex, scaleResult, qu
},
{$project: {_id: 0, loc: 1, distance: {$floor: {$multiply: [scaleResult, "$distance"]}}}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
- {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
- {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
- {loc: [0, 4], distance: Math.floor(degreesToMeters(180 - 4))},
- {loc: [0, 3], distance: Math.floor(degreesToMeters(180 - 3))},
- {loc: [0, 2], distance: Math.floor(degreesToMeters(180 - 2))},
- {loc: [0, 1], distance: Math.floor(degreesToMeters(180 - 1))},
- {loc: [0, 0], distance: Math.floor(degreesToMeters(180 - 0))},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
+ {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
+ {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
+ {loc: [0, 4], distance: Math.floor(degreesToMeters(180 - 4))},
+ {loc: [0, 3], distance: Math.floor(degreesToMeters(180 - 3))},
+ {loc: [0, 2], distance: Math.floor(degreesToMeters(180 - 2))},
+ {loc: [0, 1], distance: Math.floor(degreesToMeters(180 - 1))},
+ {loc: [0, 0], distance: Math.floor(degreesToMeters(180 - 0))},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
if (has2dsphereIndex) {
@@ -442,13 +460,15 @@ function runSphereExamples(coll, isTimeseries, has2dsphereIndex, scaleResult, qu
},
{$project: {_id: 0, loc: 1, distance: {$floor: {$multiply: [scaleResult, "$distance"]}}}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
- {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
- {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
- {loc: [0, 4], distance: Math.floor(degreesToMeters(180 - 4))},
- {loc: [0, 3], distance: Math.floor(degreesToMeters(180 - 3))},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 7], distance: Math.floor(degreesToMeters(180 - 7))},
+ {loc: [0, 6], distance: Math.floor(degreesToMeters(180 - 6))},
+ {loc: [0, 5], distance: Math.floor(degreesToMeters(180 - 5))},
+ {loc: [0, 4], distance: Math.floor(degreesToMeters(180 - 4))},
+ {loc: [0, 3], distance: Math.floor(degreesToMeters(180 - 3))},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
if (has2dsphereIndex) {
@@ -473,13 +493,15 @@ function runSphereExamples(coll, isTimeseries, has2dsphereIndex, scaleResult, qu
},
{$project: {_id: 0, loc: 1, distance: {$floor: {$multiply: [scaleResult, "$distance"]}}}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), [
- {loc: [0, 7], distance: Math.floor(10 * degreesToMeters(180 - 7))},
- {loc: [0, 6], distance: Math.floor(10 * degreesToMeters(180 - 6))},
- {loc: [0, 5], distance: Math.floor(10 * degreesToMeters(180 - 5))},
- {loc: [0, 4], distance: Math.floor(10 * degreesToMeters(180 - 4))},
- {loc: [0, 3], distance: Math.floor(10 * degreesToMeters(180 - 3))},
- ]);
+ assert.docEq(
+ [
+ {loc: [0, 7], distance: Math.floor(10 * degreesToMeters(180 - 7))},
+ {loc: [0, 6], distance: Math.floor(10 * degreesToMeters(180 - 6))},
+ {loc: [0, 5], distance: Math.floor(10 * degreesToMeters(180 - 5))},
+ {loc: [0, 4], distance: Math.floor(10 * degreesToMeters(180 - 4))},
+ {loc: [0, 3], distance: Math.floor(10 * degreesToMeters(180 - 3))},
+ ],
+ coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
if (has2dsphereIndex) {
@@ -520,7 +542,7 @@ function runSphereExamples(coll, isTimeseries, has2dsphereIndex, scaleResult, qu
},
{$match: {no_such_field: 456}},
];
- assert.docEq(coll.aggregate(pipeline).toArray(), []);
+ assert.docEq([], coll.aggregate(pipeline).toArray());
plan = coll.explain().aggregate(pipeline);
if (isTimeseries) {
if (has2dsphereIndex) {
diff --git a/jstests/core/timeseries/timeseries_groupby_reorder.js b/jstests/core/timeseries/timeseries_groupby_reorder.js
index 29d07f8b4fa..0f6ee2aef6d 100644
--- a/jstests/core/timeseries/timeseries_groupby_reorder.js
+++ b/jstests/core/timeseries/timeseries_groupby_reorder.js
@@ -31,13 +31,14 @@ if (!isMongos(db)) {
{$group: {_id: '$meta', accmin: {$min: '$b'}, accmax: {$max: '$c'}}}
]);
- assert.docEq(res.stages[1], {
+ assert.docEq({
"$group":
{_id: "$meta", accmin: {"$min": "$control.min.b"}, accmax: {"$max": "$control.max.c"}}
- });
+ },
+ res.stages[1]);
}
const res = coll.aggregate([{$group: {_id: '$meta', accmin: {$min: '$b'}, accmax: {$max: '$c'}}}])
.toArray();
-assert.docEq(res, [{"_id": null, "accmin": 1, "accmax": 3}]);
+assert.docEq([{"_id": null, "accmin": 1, "accmax": 3}], res);
})();
diff --git a/jstests/core/timeseries/timeseries_hint.js b/jstests/core/timeseries/timeseries_hint.js
index 684005541c4..72a3d698a54 100644
--- a/jstests/core/timeseries/timeseries_hint.js
+++ b/jstests/core/timeseries/timeseries_hint.js
@@ -38,7 +38,7 @@ assert.commandWorked(coll.insert(docsAsc));
function runTest({command, expectedResult, expectedDirection}) {
const result = assert.commandWorked(db.runCommand(command));
- assert.docEq(result.cursor.firstBatch, expectedResult);
+ assert.docEq(expectedResult, result.cursor.firstBatch);
const plan = db.runCommand({explain: command});
const scan = getAggPlanStage(plan, 'COLLSCAN');
diff --git a/jstests/core/timeseries/timeseries_index_partial.js b/jstests/core/timeseries/timeseries_index_partial.js
index 7d97209173e..ec7a390c579 100644
--- a/jstests/core/timeseries/timeseries_index_partial.js
+++ b/jstests/core/timeseries/timeseries_index_partial.js
@@ -266,18 +266,21 @@ assert.sameMembers(buckets.getIndexes(), extraBucketIndexes.concat([
// Queries on the collection use the collection's collation by default.
assert.docEq(
- coll.find({}, {_id: 0, [metaField + '.x']: 1}).sort({[metaField + '.x']: 1}).toArray(), [
+ [
{[metaField]: {x: "500"}},
{[metaField]: {x: "500"}},
{[metaField]: {x: "1000"}},
{[metaField]: {x: "1000"}},
- ]);
- assert.docEq(coll.find({}, {_id: 0, a: 1}).sort({a: 1}).toArray(), [
- {a: "3"},
- {a: "3"},
- {a: "120"},
- {a: "120"},
- ]);
+ ],
+ coll.find({}, {_id: 0, [metaField + '.x']: 1}).sort({[metaField + '.x']: 1}).toArray());
+ assert.docEq(
+ [
+ {a: "3"},
+ {a: "3"},
+ {a: "120"},
+ {a: "120"},
+ ],
+ coll.find({}, {_id: 0, a: 1}).sort({a: 1}).toArray());
// Specifying a collation and partialFilterExpression together fails, even if the collation
// matches the collection's default collation.
@@ -300,8 +303,8 @@ assert.sameMembers(buckets.getIndexes(), extraBucketIndexes.concat([
{a: 1}, {name: "a_lt_25_default", partialFilterExpression: {a: {$lt: "25"}}}));
// Verify that the index contains what we expect.
- assert.docEq(coll.find({}, {_id: 0, a: 1}).hint("a_lt_25_default").toArray(),
- [{a: "3"}, {a: "3"}]);
+ assert.docEq([{a: "3"}, {a: "3"}],
+ coll.find({}, {_id: 0, a: 1}).hint("a_lt_25_default").toArray());
// Verify that the index is used when possible.
function checkPlanAndResult({predicate, collation, stageName, indexName, expectedResults}) {
@@ -318,7 +321,7 @@ assert.sameMembers(buckets.getIndexes(), extraBucketIndexes.concat([
}
const results = cur.toArray();
- assert.docEq(results, expectedResults);
+ assert.docEq(expectedResults, results);
}
// a < "25" can use the index, since the collations match.
diff --git a/jstests/core/timeseries/timeseries_index_stats.js b/jstests/core/timeseries/timeseries_index_stats.js
index 7cd7b27e766..5ac1b4c10d4 100644
--- a/jstests/core/timeseries/timeseries_index_stats.js
+++ b/jstests/core/timeseries/timeseries_index_stats.js
@@ -71,12 +71,12 @@ TimeseriesTest.run((insert) => {
const stat = indexStatsDocs[i];
assert(indexKeys.hasOwnProperty(stat.name),
'$indexStats returned unknown index: ' + stat.name + ': ' + tojson(indexStatsDocs));
- assert.docEq(indexKeys[stat.name],
- stat.key,
+ assert.docEq(stat.key,
+ indexKeys[stat.name],
'$indexStats returned unexpected top-level key for index: ' + stat.name +
': ' + tojson(indexStatsDocs));
- assert.docEq(indexKeys[stat.name],
- stat.spec.key,
+ assert.docEq(stat.spec.key,
+ indexKeys[stat.name],
'$indexStats returned unexpected nested key in spec for index: ' + stat.name +
': ' + tojson(indexStatsDocs));
}
diff --git a/jstests/core/timeseries/timeseries_insert_after_delete.js b/jstests/core/timeseries/timeseries_insert_after_delete.js
index f43a7b245da..ca5874820e8 100644
--- a/jstests/core/timeseries/timeseries_insert_after_delete.js
+++ b/jstests/core/timeseries/timeseries_insert_after_delete.js
@@ -45,7 +45,7 @@ TimeseriesTest.run((insert) => {
1);
assert.commandWorked(insert(coll, [objB]));
const docs = coll.find({}, {_id: 0}).toArray();
- assert.docEq(docs, [objB]);
+ assert.docEq([objB], docs);
assert(coll.drop());
});
})();
diff --git a/jstests/core/timeseries/timeseries_insert_after_update.js b/jstests/core/timeseries/timeseries_insert_after_update.js
index 68a79d16371..193cdc9d6c9 100644
--- a/jstests/core/timeseries/timeseries_insert_after_update.js
+++ b/jstests/core/timeseries/timeseries_insert_after_update.js
@@ -68,7 +68,7 @@ TimeseriesTest.run((insert) => {
const expectedNumBucketsReopened = stats.timeseries['numBucketsReopened'] + 1;
assert.commandWorked(insert(coll, docs.slice(1)));
- assert.docEq(coll.find({}, {_id: 0}).sort({[timeFieldName]: 1}).toArray(), docs);
+ assert.docEq(docs, coll.find({}, {_id: 0}).sort({[timeFieldName]: 1}).toArray());
if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB)) {
assert.eq(bucketsColl.find().itcount(), 2, bucketsColl.find().toArray());
diff --git a/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js b/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js
index 5f2f4104d4b..31c8b4f80b7 100644
--- a/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js
+++ b/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js
@@ -93,7 +93,7 @@ results = coll.aggregate([
])
.toArray();
assert.eq(results.length, 1, results);
-assert.docEq(results[0].a, {b: {type: "Point", coordinates: [0, 0]}});
+assert.docEq({b: {type: "Point", coordinates: [0, 0]}}, results[0].a);
// Test a scenario where $geoWithin does implicit array traversal.
coll.drop();
@@ -118,11 +118,13 @@ results = coll.aggregate([
])
.toArray();
assert.eq(results.length, 1, results);
-assert.docEq(results[0].a, [
- 12345,
- {type: "Point", coordinates: [180, 0]},
- {"1": {type: "Point", coordinates: [0, 0]}},
-]);
+assert.docEq(
+ [
+ 12345,
+ {type: "Point", coordinates: [180, 0]},
+ {"1": {type: "Point", coordinates: [0, 0]}},
+ ],
+ results[0].a);
pipeline = [{
$match: {
diff --git a/jstests/core/timeseries/timeseries_list_collections.js b/jstests/core/timeseries/timeseries_list_collections.js
index 235c2115256..318f50b8f3a 100644
--- a/jstests/core/timeseries/timeseries_list_collections.js
+++ b/jstests/core/timeseries/timeseries_list_collections.js
@@ -70,8 +70,8 @@ const testOptions = function(options) {
assert(collections.find(entry => entry.name === 'system.views'));
assert(collections.find(entry => entry.name === 'system.buckets.' + coll.getName()));
assert.docEq(
- collections.find(entry => entry.name === coll.getName()),
- {name: coll.getName(), type: 'timeseries', options: options, info: {readOnly: false}});
+ {name: coll.getName(), type: 'timeseries', options: options, info: {readOnly: false}},
+ collections.find(entry => entry.name === coll.getName()));
};
testOptions({timeseries: {timeField: timeFieldName}});
diff --git a/jstests/core/timeseries/timeseries_match_pushdown.js b/jstests/core/timeseries/timeseries_match_pushdown.js
index 8fcdd1dc6ed..1b45895b46e 100644
--- a/jstests/core/timeseries/timeseries_match_pushdown.js
+++ b/jstests/core/timeseries/timeseries_match_pushdown.js
@@ -55,10 +55,10 @@ const runTest = function({pipeline, eventFilter, wholeBucketFilter, expectedDocs
unpackStages.length,
"Should only have a single $_internalUnpackBucket stage: " + tojson(explain));
const unpackStage = unpackStages[0].$_internalUnpackBucket;
- assert.docEq(unpackStage.eventFilter, eventFilter, "Incorrect eventFilter: " + tojson(explain));
+ assert.docEq(eventFilter, unpackStage.eventFilter, "Incorrect eventFilter: " + tojson(explain));
if (wholeBucketFilter) {
- assert.docEq(unpackStage.wholeBucketFilter,
- wholeBucketFilter,
+ assert.docEq(wholeBucketFilter,
+ unpackStage.wholeBucketFilter,
"Incorrect wholeBucketFilter: " + tojson(explain));
} else {
assert(!unpackStage.wholeBucketFilter, "Incorrect wholeBucketFilter: " + tojson(explain));
@@ -69,7 +69,7 @@ const runTest = function({pipeline, eventFilter, wholeBucketFilter, expectedDocs
docs.forEach((doc, i) => {
// Do not need to check document _id, since checking time is already unique.
delete doc._id;
- assert.docEq(doc, expectedDocs[i], "Incorrect docs: " + tojson(docs));
+ assert.docEq(expectedDocs[i], doc, "Incorrect docs: " + tojson(docs));
});
};
diff --git a/jstests/core/timeseries/timeseries_match_pushdown_with_project.js b/jstests/core/timeseries/timeseries_match_pushdown_with_project.js
index 1c4e768d98a..bec8998f9d4 100644
--- a/jstests/core/timeseries/timeseries_match_pushdown_with_project.js
+++ b/jstests/core/timeseries/timeseries_match_pushdown_with_project.js
@@ -58,7 +58,7 @@ const runTest = function({pipeline, behaviour, expectedDocs}) {
const docs = coll.aggregate([...pipeline, {$sort: {a: 1, b: 1, _id: 1}}]).toArray();
assert.eq(docs.length, expectedDocs.length, "Incorrect docs: " + tojson(docs));
docs.forEach((doc, i) => {
- assert.docEq(doc, expectedDocs[i], "Incorrect docs: " + tojson(docs));
+ assert.docEq(expectedDocs[i], doc, "Incorrect docs: " + tojson(docs));
});
};
diff --git a/jstests/core/timeseries/timeseries_out_of_order.js b/jstests/core/timeseries/timeseries_out_of_order.js
index 0a7f0afa365..ff842393773 100644
--- a/jstests/core/timeseries/timeseries_out_of_order.js
+++ b/jstests/core/timeseries/timeseries_out_of_order.js
@@ -37,7 +37,7 @@ TimeseriesTest.run((insert) => {
assert.contains(bucketsColl.getName(), db.getCollectionNames());
assert.commandWorked(insert(coll, docs));
- assert.docEq(coll.find().sort({_id: 1}).toArray(), docs);
+ assert.docEq(docs, coll.find().sort({_id: 1}).toArray());
const buckets = bucketsColl.find().sort({_id: 1}).toArray();
jsTestLog('Checking buckets:' + tojson(buckets));
diff --git a/jstests/core/timeseries/timeseries_project.js b/jstests/core/timeseries/timeseries_project.js
index 1352501d221..85093904aba 100644
--- a/jstests/core/timeseries/timeseries_project.js
+++ b/jstests/core/timeseries/timeseries_project.js
@@ -28,59 +28,59 @@ let result =
{a: 1, b: "$meta", c: {$multiply: [2, "$meta"]}, d: {$multiply: [2, "$meta"]}}
}])
.toArray();
-assert.docEq(result, [{_id: 0, a: {b: 1}, b: 4, c: 8, d: 8}]);
+assert.docEq([{_id: 0, a: {b: 1}, b: 4, c: 8, d: 8}], result);
// Same as above, but keep the rest of the document.
result = coll.aggregate([{$set: {b: "$meta"}}]).toArray();
-assert.docEq(result, [{_id: 0, time: docDate, meta: 4, a: {b: 1}, b: 4, c: [{}, {}]}]);
+assert.docEq([{_id: 0, time: docDate, meta: 4, a: {b: 1}, b: 4, c: [{}, {}]}], result);
// Check that nested meta project is not overwritten by the unpacked value.
result = coll.aggregate([{$project: {"a.b": "$meta"}}]).toArray();
-assert.docEq(result, [{_id: 0, a: {b: 4}}]);
+assert.docEq([{_id: 0, a: {b: 4}}], result);
// Check that meta project pushed down writes to each value in an array.
result = coll.aggregate([{$project: {"c.a": "$meta"}}]).toArray();
-assert.docEq(result, [{_id: 0, c: [{a: 4}, {a: 4}]}]);
+assert.docEq([{_id: 0, c: [{a: 4}, {a: 4}]}], result);
// Replace meta field with unpacked field.
result = coll.aggregate([{$project: {"meta": "$b"}}]).toArray();
-assert.docEq(result, [{_id: 0, meta: 3}]);
+assert.docEq([{_id: 0, meta: 3}], result);
// Replace meta field with time field.
result = coll.aggregate([{$project: {"meta": "$time"}}]).toArray();
-assert.docEq(result, [{_id: 0, meta: docDate}]);
+assert.docEq([{_id: 0, meta: docDate}], result);
// Replace meta field with constant.
result = coll.aggregate([{$project: {"meta": {$const: 5}}}]).toArray();
-assert.docEq(result, [{_id: 0, meta: 5}]);
+assert.docEq([{_id: 0, meta: 5}], result);
// Make sure the time field can be overwritten by the meta field correctly.
result = coll.aggregate([{$set: {time: "$meta"}}]).toArray();
-assert.docEq(result, [{_id: 0, time: 4, meta: 4, a: {b: 1}, b: 3, c: [{}, {}]}]);
+assert.docEq([{_id: 0, time: 4, meta: 4, a: {b: 1}, b: 3, c: [{}, {}]}], result);
// Check that the time field can be overwritten by the an unpacked field correctly.
result = coll.aggregate([{$set: {time: "$b"}}]).toArray();
-assert.docEq(result, [{_id: 0, time: 3, meta: 4, a: {b: 1}, b: 3, c: [{}, {}]}]);
+assert.docEq([{_id: 0, time: 3, meta: 4, a: {b: 1}, b: 3, c: [{}, {}]}], result);
// Make sure the time field can be overwritten by a constant correctly.
result = coll.aggregate([{$project: {time: {$const: 5}}}]).toArray();
-assert.docEq(result, [{_id: 0, time: 5}]);
+assert.docEq([{_id: 0, time: 5}], result);
// Test that a pushed down meta field projection can correctly be excluded.
result = coll.aggregate([{$set: {b: "$meta"}}, {$unset: "a"}]).toArray();
-assert.docEq(result, [{_id: 0, time: docDate, meta: 4, b: 4, c: [{}, {}]}]);
+assert.docEq([{_id: 0, time: docDate, meta: 4, b: 4, c: [{}, {}]}], result);
// Exclude behavior for time field.
result = coll.aggregate([{$set: {b: "$time"}}, {$unset: "a"}]).toArray();
-assert.docEq(result, [{_id: 0, time: docDate, meta: 4, b: docDate, c: [{}, {}]}]);
+assert.docEq([{_id: 0, time: docDate, meta: 4, b: docDate, c: [{}, {}]}], result);
// Exclude behavior for consecutive projects.
result = coll.aggregate([{$set: {b: "$meta"}}, {$unset: "meta"}]).toArray();
-assert.docEq(result, [{_id: 0, time: docDate, a: {b: 1}, b: 4, c: [{}, {}]}]);
+assert.docEq([{_id: 0, time: docDate, a: {b: 1}, b: 4, c: [{}, {}]}], result);
// Test that an exclude does not overwrite meta field pushdown.
result = coll.aggregate([{$unset: "b"}, {$set: {b: "$meta"}}]).toArray();
-assert.docEq(result, [{_id: 0, time: docDate, meta: 4, a: {b: 1}, b: 4, c: [{}, {}]}]);
+assert.docEq([{_id: 0, time: docDate, meta: 4, a: {b: 1}, b: 4, c: [{}, {}]}], result);
// Test that a field reference in a projection refers to the stage's input document
// rather than another field with the same name in the projection.
@@ -106,27 +106,27 @@ assert.commandWorked(regColl.insert(doc));
let pipeline = [{$project: {_id: 0, a: "$x", b: "$a"}}];
let tsDoc = tsColl.aggregate(pipeline).toArray();
let regDoc = regColl.aggregate(pipeline).toArray();
-assert.docEq(tsDoc, regDoc);
+assert.docEq(regDoc, tsDoc);
pipeline = [{$project: {_id: 0, obj: "$x", b: {$add: ["$obj.a", 1]}}}];
tsDoc = tsColl.aggregate(pipeline).toArray();
regDoc = regColl.aggregate(pipeline).toArray();
-assert.docEq(tsDoc, regDoc);
+assert.docEq(regDoc, tsDoc);
// Test $addFields.
pipeline = [{$addFields: {a: "$x", b: "$a"}}, {$project: {_id: 0}}];
tsDoc = tsColl.aggregate(pipeline).toArray();
regDoc = regColl.aggregate(pipeline).toArray();
-assert.docEq(tsDoc, regDoc);
+assert.docEq(regDoc, tsDoc);
pipeline = [{$addFields: {obj: "$x", b: {$add: ["$obj.a", 1]}}}, {$project: {_id: 0}}];
tsDoc = tsColl.aggregate(pipeline).toArray();
regDoc = regColl.aggregate(pipeline).toArray();
-assert.docEq(tsDoc, regDoc);
+assert.docEq(regDoc, tsDoc);
pipeline = [{$project: {a: 1, _id: 0}}, {$project: {newMeta: "$x"}}];
tsDoc = tsColl.aggregate(pipeline).toArray();
regDoc = regColl.aggregate(pipeline).toArray();
-assert.docEq(tsDoc, regDoc);
+assert.docEq(regDoc, tsDoc);
})();
})();
diff --git a/jstests/core/txns/aggregation_in_transaction.js b/jstests/core/txns/aggregation_in_transaction.js
index ec1c62133bb..76195d0caa0 100644
--- a/jstests/core/txns/aggregation_in_transaction.js
+++ b/jstests/core/txns/aggregation_in_transaction.js
@@ -84,7 +84,7 @@ withTxnAndAutoRetryOnMongos(session, () => {
as: "lookup",
}
});
- assert.docEq(cursor.next(), lookupDoc);
+ assert.docEq(lookupDoc, cursor.next());
assert(!cursor.hasNext());
jsTestLog("Testing $graphLookup within a transaction.");
@@ -98,7 +98,7 @@ withTxnAndAutoRetryOnMongos(session, () => {
as: "lookup"
}
});
- assert.docEq(cursor.next(), lookupDoc);
+ assert.docEq(lookupDoc, cursor.next());
assert(!cursor.hasNext());
}
diff --git a/jstests/core/txns/timeseries/timeseries_reads_in_txns.js b/jstests/core/txns/timeseries/timeseries_reads_in_txns.js
index 8b1f8237018..ce5c57330b3 100644
--- a/jstests/core/txns/timeseries/timeseries_reads_in_txns.js
+++ b/jstests/core/txns/timeseries/timeseries_reads_in_txns.js
@@ -55,12 +55,12 @@ assert.commandWorked(sessionRegularColl.insert(doc4));
// The last insert should be visible in this session.
doc4 = sessionRegularColl.findOne({_id: 1, x: 1});
assert.neq(null, doc4);
-assert.docEq(doc4, doc1);
+assert.docEq(doc1, doc4);
session.commitTransaction();
// Verify that after a commit the update persists.
let doc5 = regularColl.findOne({_id: 1, x: 1});
assert.neq(null, doc5);
-assert.docEq(doc5, doc4);
+assert.docEq(doc4, doc5);
})();
diff --git a/jstests/core/update3.js b/jstests/core/update3.js
index 4974f8c022b..5a61b8bcfc9 100644
--- a/jstests/core/update3.js
+++ b/jstests/core/update3.js
@@ -30,4 +30,4 @@ assert.eq(0, f.findOne()._id, "D");
f.drop();
f.save({_id: 1, a: 1});
f.update({}, {$unset: {"a": 1, "b.c": 1}});
-assert.docEq(f.findOne(), {_id: 1}, "E"); \ No newline at end of file
+assert.docEq({_id: 1}, f.findOne(), "E"); \ No newline at end of file
diff --git a/jstests/core/update_min_max_examples.js b/jstests/core/update_min_max_examples.js
index b127b4c3db5..bc5a7302e79 100644
--- a/jstests/core/update_min_max_examples.js
+++ b/jstests/core/update_min_max_examples.js
@@ -59,7 +59,7 @@ coll.insert(insertdoc);
res = coll.update({_id: 7, "y.a": 6}, {$max: {"y.$.a": 7}});
assert.commandWorked(res);
insertdoc.y[1].a = 7;
-assert.docEq(coll.findOne({_id: 7}), insertdoc);
+assert.docEq(insertdoc, coll.findOne({_id: 7}));
// $min with positional operator
insertdoc = {
@@ -70,5 +70,5 @@ coll.insert(insertdoc);
res = coll.update({_id: 8, "y.a": 6}, {$min: {"y.$.a": 5}});
assert.commandWorked(res);
insertdoc.y[1].a = 5;
-assert.docEq(coll.findOne({_id: 8}), insertdoc);
+assert.docEq(insertdoc, coll.findOne({_id: 8}));
}());
diff --git a/jstests/core/update_pipeline_shell_helpers.js b/jstests/core/update_pipeline_shell_helpers.js
index 3c867c8b965..0ac1b35f4ce 100644
--- a/jstests/core/update_pipeline_shell_helpers.js
+++ b/jstests/core/update_pipeline_shell_helpers.js
@@ -80,10 +80,10 @@ const expectedFindOneAndUpdatePostImage =
Object.merge(expectedFindAndModifyPostImage, {findOneAndUpdate: true});
const findAndModifyPostImage =
testColl.findAndModify({query: {_id: 1}, update: [{$set: {findAndModify: true}}], new: true});
-assert.docEq(findAndModifyPostImage, expectedFindAndModifyPostImage);
+assert.docEq(expectedFindAndModifyPostImage, findAndModifyPostImage);
const findOneAndUpdatePostImage = testColl.findOneAndUpdate(
{_id: 1}, [{$set: {findOneAndUpdate: true}}], {returnNewDocument: true});
-assert.docEq(findOneAndUpdatePostImage, expectedFindOneAndUpdatePostImage);
+assert.docEq(expectedFindOneAndUpdatePostImage, findOneAndUpdatePostImage);
//
// Explain for updates that use an _id lookup query.
diff --git a/jstests/core/upsert_and.js b/jstests/core/upsert_and.js
index 8c13f9b63d4..1e45cbe8dc2 100644
--- a/jstests/core/upsert_and.js
+++ b/jstests/core/upsert_and.js
@@ -10,34 +10,34 @@ coll.drop();
res = coll.update({_id: 1, $and: [{c: 1}, {d: 1}], a: 12}, {$inc: {y: 1}}, true);
assert.commandWorked(res);
-assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1, a: 12, y: 1});
+assert.docEq({_id: 1, c: 1, d: 1, a: 12, y: 1}, coll.findOne());
coll.remove({});
res = coll.update({$and: [{c: 1}, {d: 1}]}, {$setOnInsert: {_id: 1}}, true);
assert.commandWorked(res);
-assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
+assert.docEq({_id: 1, c: 1, d: 1}, coll.findOne());
coll.remove({});
res = coll.update({$and: [{c: 1}, {d: 1}, {$or: [{x: 1}]}]}, {$setOnInsert: {_id: 1}}, true);
assert.commandWorked(res);
-assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1, x: 1});
+assert.docEq({_id: 1, c: 1, d: 1, x: 1}, coll.findOne());
coll.remove({});
res = coll.update({$and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]}, {$setOnInsert: {_id: 1}}, true);
assert.commandWorked(res);
-assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
+assert.docEq({_id: 1, c: 1, d: 1}, coll.findOne());
coll.remove({});
res = coll.update(
{r: {$gt: 3}, $and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]}, {$setOnInsert: {_id: 1}}, true);
assert.commandWorked(res);
-assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
+assert.docEq({_id: 1, c: 1, d: 1}, coll.findOne());
coll.remove({});
res = coll.update(
{r: /s/, $and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]}, {$setOnInsert: {_id: 1}}, true);
assert.commandWorked(res);
-assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
+assert.docEq({_id: 1, c: 1, d: 1}, coll.findOne());
coll.remove({});
res = coll.update({c: 2, $and: [{c: 1}, {d: 1}]}, {$setOnInsert: {_id: 1}}, true);
diff --git a/jstests/core/upsert_fields.js b/jstests/core/upsert_fields.js
index c8944424bf8..310bace4907 100644
--- a/jstests/core/upsert_fields.js
+++ b/jstests/core/upsert_fields.js
@@ -226,4 +226,4 @@ assert.eq(1, upsertedId({_id: 1, x: [1, {x: 1}], "x.x": 1}, {}));
// make sure query doesn't error when creating doc for insert, since it's missing the rest of the
// dbref fields. SERVER-14024
// Fails in 2.6.1->3
-assert.docEq(tojson(DBRef("a", 1)), upsertedXVal({"x.$id": 1}, {$set: {x: DBRef("a", 1)}}));
+assert.docEq(DBRef("a", 1), upsertedXVal({"x.$id": 1}, {$set: {x: DBRef("a", 1)}}));
diff --git a/jstests/core/verify_update_mods.js b/jstests/core/verify_update_mods.js
index 0dc59281ade..4161c0f5b86 100644
--- a/jstests/core/verify_update_mods.js
+++ b/jstests/core/verify_update_mods.js
@@ -30,7 +30,7 @@ function executeUpdateTestCase(testCase) {
if (testCase.expectedErrorCode == undefined) {
// Verify that the command succeeded and collection's contents match the expected results.
assert.commandWorked(result);
- assert.docEq(coll.find({}).sort({_id: 1}).toArray(), testCase.expectedResults);
+ assert.docEq(testCase.expectedResults, coll.find({}).sort({_id: 1}).toArray());
} else {
assert.commandFailedWithCode(result, testCase.expectedErrorCode);
}
diff --git a/jstests/core/wildcard_and_text_indexes.js b/jstests/core/wildcard_and_text_indexes.js
index 1b8f960915a..a3c2d9b9b88 100644
--- a/jstests/core/wildcard_and_text_indexes.js
+++ b/jstests/core/wildcard_and_text_indexes.js
@@ -26,7 +26,7 @@ function assertWildcardQuery(query, expectedPath) {
const ixScans = getPlanStages(getWinningPlan(explainOutput.queryPlanner), "IXSCAN");
// Verify that the winning plan uses the $** index with the expected path.
assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
- assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1});
+ assert.docEq({"$_path": 1, [expectedPath]: 1}, ixScans[0].keyPattern);
// Verify that the results obtained from the $** index are identical to a COLLSCAN.
assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray());
}
diff --git a/jstests/core/wildcard_index_basic_index_bounds.js b/jstests/core/wildcard_index_basic_index_bounds.js
index 88e5b7eed52..94a13d9e8ed 100644
--- a/jstests/core/wildcard_index_basic_index_bounds.js
+++ b/jstests/core/wildcard_index_basic_index_bounds.js
@@ -136,8 +136,8 @@ function runWildcardIndexTest(keyPattern, pathProjection, expectedPaths) {
// Verify that the winning plan uses the $** index with the expected bounds.
assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
- assert.docEq(ixScans[0].keyPattern, {$_path: 1, [path]: 1});
- assert.docEq(ixScans[0].indexBounds, expectedBounds);
+ assert.docEq({$_path: 1, [path]: 1}, ixScans[0].keyPattern);
+ assert.docEq(expectedBounds, ixScans[0].indexBounds);
// Verify that the results obtained from the $** index are identical to a COLLSCAN.
// We must explicitly hint the wildcard index, because we also sort on {_id: 1} to
diff --git a/jstests/core/wildcard_index_multikey.js b/jstests/core/wildcard_index_multikey.js
index 04ce2d28500..3263ce4a44e 100644
--- a/jstests/core/wildcard_index_multikey.js
+++ b/jstests/core/wildcard_index_multikey.js
@@ -108,7 +108,7 @@ function assertWildcardQuery(query, expectedPath, explainStats = {}) {
// Verify that the winning plan uses the $** index with the expected path.
const ixScans = getPlanStages(getWinningPlan(explainOutput.queryPlanner), "IXSCAN");
assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll));
- assert.docEq(ixScans[0].keyPattern, {"$_path": 1, [expectedPath]: 1});
+ assert.docEq({"$_path": 1, [expectedPath]: 1}, ixScans[0].keyPattern);
// Verify that the results obtained from the $** index are identical to a COLLSCAN.
assertArrayEq(coll.find(query).toArray(), coll.find(query).hint({$natural: 1}).toArray());
}
diff --git a/jstests/core/wildcard_index_nonblocking_sort.js b/jstests/core/wildcard_index_nonblocking_sort.js
index 628bfac3a63..c0cada14004 100644
--- a/jstests/core/wildcard_index_nonblocking_sort.js
+++ b/jstests/core/wildcard_index_nonblocking_sort.js
@@ -42,7 +42,7 @@ function checkQueryUsesSortType(query, sort, projection, isBlocking) {
assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll), explain);
const sortKey = Object.keys(sort)[0];
- assert.docEq(ixScans[0].keyPattern, {$_path: 1, [sortKey]: 1});
+ assert.docEq({$_path: 1, [sortKey]: 1}, ixScans[0].keyPattern);
}
}
diff --git a/jstests/core/wildcard_index_type.js b/jstests/core/wildcard_index_type.js
index 7c0143a9773..9225b97c34f 100644
--- a/jstests/core/wildcard_index_type.js
+++ b/jstests/core/wildcard_index_type.js
@@ -43,7 +43,7 @@ function assertExpectedDocAnswersWildcardIndexQuery(doc, query, match, expectedB
// Expected bounds were used.
if (expectedBounds !== undefined) {
- ixScans.forEach((ixScan) => assert.docEq(ixScan.indexBounds, expectedBounds));
+ ixScans.forEach((ixScan) => assert.docEq(expectedBounds, ixScan.indexBounds));
}
}
diff --git a/jstests/fle2/libs/encrypted_client_util.js b/jstests/fle2/libs/encrypted_client_util.js
index c080b22d6a7..0104fb5145d 100644
--- a/jstests/fle2/libs/encrypted_client_util.js
+++ b/jstests/fle2/libs/encrypted_client_util.js
@@ -293,7 +293,7 @@ var EncryptedClient = class {
let onDiskDocs = coll.find({}, {[kSafeContentField]: 0}).sort({_id: 1}).toArray();
- assert.docEq(onDiskDocs, docs);
+ assert.docEq(docs, onDiskDocs);
}
assertStateCollectionsAfterCompact(collName, ecocExists) {
diff --git a/jstests/libs/aggregation_pipeline_utils.js b/jstests/libs/aggregation_pipeline_utils.js
index e584ded3d1c..06f786d1924 100644
--- a/jstests/libs/aggregation_pipeline_utils.js
+++ b/jstests/libs/aggregation_pipeline_utils.js
@@ -22,7 +22,7 @@ function executeAggregationTestCase(collection, testCase) {
assert(testCase.expectedErrorCode === undefined,
`Expected an exception with code ${testCase.expectedErrorCode}`);
}
- assert.docEq(actualResults, testCase.expectedResults);
+ assert.docEq(testCase.expectedResults, actualResults);
} catch (error) {
if (testCase.expectedErrorCode === undefined) {
throw error;
diff --git a/jstests/libs/change_stream_util.js b/jstests/libs/change_stream_util.js
index 40c8ee7b6bb..20af14f28d3 100644
--- a/jstests/libs/change_stream_util.js
+++ b/jstests/libs/change_stream_util.js
@@ -563,13 +563,13 @@ function assertChangeStreamPreAndPostImagesCollectionOptionIsAbsent(db, collName
assert(!collectionInfos[0].options.hasOwnProperty("changeStreamPreAndPostImages"));
}
-function getPreImagesCollection(db) {
- return db.getSiblingDB(kPreImagesCollectionDatabase).getCollection(kPreImagesCollectionName);
+function getPreImagesCollection(connection) {
+ return connection.getDB(kPreImagesCollectionDatabase).getCollection(kPreImagesCollectionName);
}
// Returns the pre-images written while performing the write operations.
function preImagesForOps(db, writeOps) {
- const preImagesColl = getPreImagesCollection(db);
+ const preImagesColl = getPreImagesCollection(db.getMongo());
const preImagesCollSortSpec = {"_id.ts": 1, "_id.applyOpsIndex": 1};
// Determine the id of the last pre-image document written to be able to determine the pre-image
@@ -601,7 +601,7 @@ function preImagesForOps(db, writeOps) {
* _id.applyOpsIndex ascending.
*/
function getPreImages(connection) {
- return connection.getDB(kPreImagesCollectionDatabase)[kPreImagesCollectionName]
+ return getPreImagesCollection(connection)
.find()
.sort({"_id.ts": 1, "_id.applyOpsIndex": 1})
.allowDiskUse()
diff --git a/jstests/libs/clustered_collections/clustered_collection_util.js b/jstests/libs/clustered_collections/clustered_collection_util.js
index ad6c0111c8a..baefc679461 100644
--- a/jstests/libs/clustered_collections/clustered_collection_util.js
+++ b/jstests/libs/clustered_collections/clustered_collection_util.js
@@ -70,7 +70,7 @@ var ClusteredCollectionUtil = class {
assert.commandWorked(db.runCommand({listCollections: 1, filter: {name: collName}}));
const listCollsOptions = listColls.cursor.firstBatch[0].options;
assert(listCollsOptions.clusteredIndex);
- assert.docEq(listCollsOptions.clusteredIndex, fullCreateOptions.clusteredIndex);
+ assert.docEq(fullCreateOptions.clusteredIndex, listCollsOptions.clusteredIndex);
}
// The clusteredIndex should appear in listIndexes with additional "clustered" field.
@@ -79,7 +79,7 @@ var ClusteredCollectionUtil = class {
const listIndexes = assert.commandWorked(db[collName].runCommand("listIndexes"));
const expectedListIndexesOutput =
Object.extend({clustered: true}, fullCreateOptions.clusteredIndex);
- assert.docEq(listIndexes.cursor.firstBatch[0], expectedListIndexesOutput);
+ assert.docEq(expectedListIndexesOutput, listIndexes.cursor.firstBatch[0]);
}
static testBasicClusteredCollection(db, collName, clusterKey) {
diff --git a/jstests/libs/command_line/test_parsed_options.js b/jstests/libs/command_line/test_parsed_options.js
index 7972bb6e22d..d2955462825 100644
--- a/jstests/libs/command_line/test_parsed_options.js
+++ b/jstests/libs/command_line/test_parsed_options.js
@@ -114,7 +114,7 @@ function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
delete getCmdLineOptsResult.parsed.setParameter.backtraceLogFile;
// Make sure the options are equal to what we expect
- assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+ assert.docEq(expectedResult.parsed, getCmdLineOptsResult.parsed);
// Cleanup
mongod.getDB("admin").logout();
@@ -206,7 +206,7 @@ function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
}
// Make sure the options are equal to what we expect
- assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+ assert.docEq(expectedResult.parsed, getCmdLineOptsResult.parsed);
}
// Tests that the passed configuration will not run a new mongod instances. Mainly used to test
diff --git a/jstests/libs/database_versioning.js b/jstests/libs/database_versioning.js
index 3d0d107fe5e..5cb066a1206 100644
--- a/jstests/libs/database_versioning.js
+++ b/jstests/libs/database_versioning.js
@@ -3,8 +3,8 @@
function checkInMemoryDatabaseVersion(conn, dbName, expectedVersion) {
const res = conn.adminCommand({getDatabaseVersion: dbName});
assert.commandWorked(res);
- assert.docEq(res.dbVersion,
- expectedVersion,
+ assert.docEq(expectedVersion,
+ res.dbVersion,
conn + " did not have expected in-memory database version for " + dbName);
}
diff --git a/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js b/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js
index b5a0f70b740..3b9bbf90cab 100644
--- a/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js
+++ b/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js
@@ -66,14 +66,14 @@ function runTest(docs, query, results, path, bounds) {
bucketColl = db.getCollection('system.buckets.' + tsColl.getName());
// Confirm expected results.
- assert.docEq(tsColl.aggregate(pipeline).toArray(), results);
+ assert.docEq(results, tsColl.aggregate(pipeline).toArray());
const buckets = bucketColl.aggregate(controlPipeline).toArray();
// Check that we only have one bucket.
assert.eq(buckets.length, 1);
// Check that the bounds are what we expect.
- assert.docEq(buckets[0].value, bounds);
+ assert.docEq(bounds, buckets[0].value);
}
rst.stopSet();
diff --git a/jstests/noPassthrough/change_stream_failover.js b/jstests/noPassthrough/change_stream_failover.js
index 92c3eea9518..c777f9c33c4 100644
--- a/jstests/noPassthrough/change_stream_failover.js
+++ b/jstests/noPassthrough/change_stream_failover.js
@@ -37,7 +37,7 @@ for (let key of Object.keys(ChangeStreamWatchMode)) {
assert.commandWorked(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
const firstChange = cst.getOneChange(changeStream);
- assert.docEq(firstChange.fullDocument, {_id: 0});
+ assert.docEq({_id: 0}, firstChange.fullDocument);
// Make the primary step down
assert.commandWorked(primaryDB.adminCommand({replSetStepDown: 30}));
diff --git a/jstests/noPassthrough/change_stream_resume_before_add_shard.js b/jstests/noPassthrough/change_stream_resume_before_add_shard.js
index 7a8a78fbbcb..62dc65a6f2b 100644
--- a/jstests/noPassthrough/change_stream_resume_before_add_shard.js
+++ b/jstests/noPassthrough/change_stream_resume_before_add_shard.js
@@ -43,7 +43,7 @@ function addShardToCluster(shardName) {
// is used by change streams as a sentinel to indicate that no writes have occurred on the
// replica set before this point.
const firstOplogEntry = replTest.getPrimary().getCollection("local.oplog.rs").findOne();
- assert.docEq(firstOplogEntry.o, {msg: "initiating set"});
+ assert.docEq({msg: "initiating set"}, firstOplogEntry.o);
assert.eq(firstOplogEntry.op, "n");
return replTest;
@@ -56,7 +56,7 @@ function assertCanResumeFromEachEvent(eventList) {
const resumedStream = coll.watch([], {resumeAfter: eventList[i]._id});
for (let j = i + 1; j < eventList.length; ++j) {
assert.soon(() => resumedStream.hasNext());
- assert.docEq(resumedStream.next(), eventList[j]);
+ assert.docEq(eventList[j], resumedStream.next());
}
resumedStream.close();
}
diff --git a/jstests/noPassthrough/change_streams_require_majority_read_concern.js b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
index 54af9e9806f..c7b99a8df1a 100644
--- a/jstests/noPassthrough/change_streams_require_majority_read_concern.js
+++ b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
@@ -79,7 +79,7 @@ rst.awaitLastOpCommitted();
// Verify that the expected doc is returned because it has been committed.
let doc = cst.getOneChange(cursor);
-assert.docEq(doc.operationType, "insert");
-assert.docEq(doc.fullDocument, {_id: 2});
+assert.docEq("insert", doc.operationType);
+assert.docEq({_id: 2}, doc.fullDocument);
rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_streams_update_lookup_collation.js b/jstests/noPassthrough/change_streams_update_lookup_collation.js
index df322d41413..5bc3ffdc83c 100644
--- a/jstests/noPassthrough/change_streams_update_lookup_collation.js
+++ b/jstests/noPassthrough/change_streams_update_lookup_collation.js
@@ -52,10 +52,10 @@ const idIndexUsagesBeforeIteration = numIdIndexUsages();
// Both cursors should produce a document describing this update, since the "x" value of the
// first document will match both filters.
assert.soon(() => changeStreamDefaultCollation.hasNext());
-assert.docEq(changeStreamDefaultCollation.next().fullDocument,
- {_id: "abc", x: "abc", updated: true});
+assert.docEq({_id: "abc", x: "abc", updated: true},
+ changeStreamDefaultCollation.next().fullDocument);
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 1);
-assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abc", x: "abc", updated: true});
+assert.docEq({_id: "abc", x: "abc", updated: true}, strengthOneChangeStream.next().fullDocument);
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 2);
assert.commandWorked(coll.update({_id: "abç"}, {$set: {updated: true}}));
@@ -63,10 +63,10 @@ assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 3);
// Again, both cursors should produce a document describing this update.
assert.soon(() => changeStreamDefaultCollation.hasNext());
-assert.docEq(changeStreamDefaultCollation.next().fullDocument,
- {_id: "abç", x: "ABC", updated: true});
+assert.docEq({_id: "abç", x: "ABC", updated: true},
+ changeStreamDefaultCollation.next().fullDocument);
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 4);
-assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abç", x: "ABC", updated: true});
+assert.docEq({_id: "abç", x: "ABC", updated: true}, strengthOneChangeStream.next().fullDocument);
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 5);
assert.commandWorked(coll.update({_id: "Ã¥bC"}, {$set: {updated: true}}));
@@ -77,7 +77,7 @@ assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 6);
// subsequent $match stage will reject the document because it does not consider "AbÇ" equal to
// "abc". Only the strengthOneChangeStream will output the final document.
assert.soon(() => strengthOneChangeStream.hasNext());
-assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "åbC", x: "AbÇ", updated: true});
+assert.docEq({_id: "åbC", x: "AbÇ", updated: true}, strengthOneChangeStream.next().fullDocument);
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 7);
assert(!changeStreamDefaultCollation.hasNext());
assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 8);
diff --git a/jstests/noPassthrough/count_helper_read_preference.js b/jstests/noPassthrough/count_helper_read_preference.js
index 17e702c2076..ed4571306a1 100644
--- a/jstests/noPassthrough/count_helper_read_preference.js
+++ b/jstests/noPassthrough/count_helper_read_preference.js
@@ -34,7 +34,7 @@ db.foo.count();
// Check that there is no readPref on the command document.
assert.eq(commandsRan.length, 1);
-assert.docEq(commandsRan[0].cmd, {count: "foo", query: {}});
+assert.docEq({count: "foo", query: {}}, commandsRan[0].cmd);
commandsRan = [];
@@ -44,5 +44,5 @@ db.foo.count();
// Check that we have correctly attached the read preference to the command.
assert.eq(commandsRan.length, 1);
-assert.docEq(commandsRan[0].cmd, {count: "foo", query: {}, $readPreference: {mode: "secondary"}});
+assert.docEq({count: "foo", query: {}, $readPreference: {mode: "secondary"}}, commandsRan[0].cmd);
})();
diff --git a/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js b/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js
index f35a75e7abb..6313762bf3d 100644
--- a/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js
+++ b/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js
@@ -28,8 +28,8 @@ for (let testCursor of [coll.find({}).sort({_id: 1}).batchSize(2),
}));
// Consume the documents from the first batch, leaving the cursor open.
- assert.docEq(testCursor.next(), {_id: 0});
- assert.docEq(testCursor.next(), {_id: 1});
+ assert.docEq({_id: 0}, testCursor.next());
+ assert.docEq({_id: 1}, testCursor.next());
assert.eq(testCursor.objsLeftInBatch(), 0);
// Issue a getMore and confirm that the failpoint throws the expected exception.
diff --git a/jstests/noPassthrough/getParameterWithDetails.js b/jstests/noPassthrough/getParameterWithDetails.js
index 331073d0d1c..a4039a1a450 100644
--- a/jstests/noPassthrough/getParameterWithDetails.js
+++ b/jstests/noPassthrough/getParameterWithDetails.js
@@ -33,7 +33,7 @@
assert.eq(resultsPlain[parameter["name"]], expectedValue, plainErrMsg);
assert.docEq(
- resultsWithDetail[parameter["name"]], expectedDetailedResultObj, detailErrMsg);
+ expectedDetailedResultObj, resultsWithDetail[parameter["name"]], detailErrMsg);
});
}
diff --git a/jstests/noPassthrough/lookup_metrics.js b/jstests/noPassthrough/lookup_metrics.js
index 810fb81f812..44082f99ac2 100644
--- a/jstests/noPassthrough/lookup_metrics.js
+++ b/jstests/noPassthrough/lookup_metrics.js
@@ -75,7 +75,7 @@ function generateExpectedCounters(joinStrategy = lookupStrategy.nonSbe, spillToD
// Compare the values of the lookup counters to an object that represents the expected values.
function compareLookupCounters(expectedCounters) {
let counters = db.serverStatus().metrics.query.lookup;
- assert.docEq(counters, expectedCounters);
+ assert.docEq(expectedCounters, counters);
}
// Run a lookup pipeline that does not get pushed down to SBE because it's querying against a view.
diff --git a/jstests/noPassthrough/lookup_pushdown.js b/jstests/noPassthrough/lookup_pushdown.js
index c644649a2df..8c6496d10f1 100644
--- a/jstests/noPassthrough/lookup_pushdown.js
+++ b/jstests/noPassthrough/lookup_pushdown.js
@@ -54,10 +54,7 @@ function verifyEqLookupNodeStrategy(
if (strategy === "IndexedLoopJoin") {
assert(indexKeyPattern,
"expected indexKeyPattern should be set for IndexedLoopJoin algorithm");
- assert.docEq(eqLookupNode.indexKeyPattern,
- indexKeyPattern,
- "expected IndexedLoopJoin node to have index " + tojson(indexKeyPattern) +
- ", got plan " + tojson(eqLookupNode));
+ assert.docEq(indexKeyPattern, eqLookupNode.indexKeyPattern);
}
}
@@ -968,7 +965,7 @@ MongoRunner.stopMongod(conn);
session.startTransaction({readConcern: {level: "snapshot"}});
function verifySingleDoc(cursor) {
- assert.docEq(cursor.next(), {_id: 0, a: 0, out: [{_id: 0, b: 0}]});
+ assert.docEq({_id: 0, a: 0, out: [{_id: 0, b: 0}]}, cursor.next());
assert(!cursor.hasNext());
}
diff --git a/jstests/noPassthrough/oplog_document_key.js b/jstests/noPassthrough/oplog_document_key.js
index cb94a93cf7d..f4b01b0ad40 100644
--- a/jstests/noPassthrough/oplog_document_key.js
+++ b/jstests/noPassthrough/oplog_document_key.js
@@ -70,18 +70,18 @@ const testWriteOplogDocumentKey = ({sharded, inTransaction}) => {
const [insertOplog, replaceOplog, updateOplog, deleteOplog] = oplogs;
const docKey = sharded ? docKeys.sharded : docKeys.unsharded;
assert.eq(insertOplog.op, 'i', insertOplog);
- assert.docEq(insertOplog.o, doc0, insertOplog);
- assert.docEq(insertOplog.o2, docKey, insertOplog);
+ assert.docEq(doc0, insertOplog.o, insertOplog);
+ assert.docEq(docKey, insertOplog.o2, insertOplog);
assert.eq(replaceOplog.op, 'u', replaceOplog);
- assert.docEq(replaceOplog.o, doc1, replaceOplog);
- assert.docEq(replaceOplog.o2, docKey, replaceOplog);
+ assert.docEq(doc1, replaceOplog.o, replaceOplog);
+ assert.docEq(docKey, replaceOplog.o2, replaceOplog);
assert.eq(updateOplog.op, 'u', updateOplog);
- assert.docEq(updateOplog.o2, docKey, updateOplog);
+ assert.docEq(docKey, updateOplog.o2, updateOplog);
assert.eq(deleteOplog.op, 'd', deleteOplog);
- assert.docEq(deleteOplog.o, docKey, deleteOplog);
+ assert.docEq(docKey, deleteOplog.o, deleteOplog);
performWrites(function largeInsert(coll) {
const largeDoc = {_id: 'x'.repeat(16 * 1024 * 1024), a: 0};
diff --git a/jstests/noPassthrough/out_merge_majority_read.js b/jstests/noPassthrough/out_merge_majority_read.js
index 3c56851e6b6..3b177293b7c 100644
--- a/jstests/noPassthrough/out_merge_majority_read.js
+++ b/jstests/noPassthrough/out_merge_majority_read.js
@@ -74,7 +74,7 @@ function runTests(sourceColl, mongodConnection) {
res = targetColl.find().sort({_id: 1});
// Only a single document is visible ($merge did not see the second insert).
- assert.docEq(res.next(), {_id: 1, state: 'merge'});
+ assert.docEq({_id: 1, state: 'merge'}, res.next());
assert(res.isExhausted());
// The same $merge but with whenMatched set to "replace".
@@ -100,9 +100,9 @@ function runTests(sourceColl, mongodConnection) {
res = targetReplaceDocsColl.find().sort({_id: 1});
// The first document must overwrite the update that the read portion of $merge did not see.
- assert.docEq(res.next(), {_id: 1, state: 'merge'});
+ assert.docEq({_id: 1, state: 'merge'}, res.next());
// The second document is the result of the independent insert that $merge did not see.
- assert.docEq(res.next(), {_id: 2, state: 'before'});
+ assert.docEq({_id: 2, state: 'before'}, res.next());
assert(res.isExhausted());
assert.commandWorked(targetColl.remove({}));
@@ -155,7 +155,7 @@ function runTests(sourceColl, mongodConnection) {
res = targetColl.find().sort({_id: 1});
// Only a single document is visible ($merge did not see the second insert).
- assert.docEq(res.next(), {_id: 2, state: 'merge'});
+ assert.docEq({_id: 2, state: 'merge'}, res.next());
assert(res.isExhausted());
}
diff --git a/jstests/noPassthrough/query_engine_stats.js b/jstests/noPassthrough/query_engine_stats.js
index 161946da2c6..bf0cef1f043 100644
--- a/jstests/noPassthrough/query_engine_stats.js
+++ b/jstests/noPassthrough/query_engine_stats.js
@@ -110,7 +110,7 @@ function generateExpectedCounters(queryFramework) {
// values.
function compareQueryEngineCounters(expectedCounters) {
let counters = db.serverStatus().metrics.query.queryFramework;
- assert.docEq(counters, expectedCounters);
+ assert.docEq(expectedCounters, counters);
}
// Start with SBE off.
diff --git a/jstests/noPassthrough/shardsvr_global_index_crud_bulk.js b/jstests/noPassthrough/shardsvr_global_index_crud_bulk.js
index dfe1ee7878b..ffc50d6f554 100644
--- a/jstests/noPassthrough/shardsvr_global_index_crud_bulk.js
+++ b/jstests/noPassthrough/shardsvr_global_index_crud_bulk.js
@@ -632,13 +632,13 @@ assert.commandFailedWithCode(adminDB.runCommand({_shardsvrWriteGlobalIndexKeys:
delete oplogEntryPlain.wall;
assert.docEq(oplogEntryBulk, oplogEntryPlain);
assert.eq(oplogEntryBulk["o"]["applyOps"][0]["op"], "xi");
- assert.docEq(oplogEntryBulk["o"]["applyOps"][0]["o"]["ik"], {myKey: "insertAndRemove"});
- assert.docEq(oplogEntryBulk["o"]["applyOps"][0]["o"]["dk"],
- {shardKey: "insert", _id: "andRemove"});
+ assert.docEq({myKey: "insertAndRemove"}, oplogEntryBulk["o"]["applyOps"][0]["o"]["ik"]);
+ assert.docEq({shardKey: "insert", _id: "andRemove"},
+ oplogEntryBulk["o"]["applyOps"][0]["o"]["dk"]);
assert.eq(oplogEntryBulk["o"]["applyOps"][1]["op"], "xd");
- assert.docEq(oplogEntryBulk["o"]["applyOps"][1]["o"]["ik"], {myKey: "insertAndRemove"});
- assert.docEq(oplogEntryBulk["o"]["applyOps"][1]["o"]["dk"],
- {shardKey: "insert", _id: "andRemove"});
+ assert.docEq({myKey: "insertAndRemove"}, oplogEntryBulk["o"]["applyOps"][1]["o"]["ik"]);
+ assert.docEq({shardKey: "insert", _id: "andRemove"},
+ oplogEntryBulk["o"]["applyOps"][1]["o"]["dk"]);
}
session.endSession();
diff --git a/jstests/noPassthrough/shell_uses_correct_read_concern.js b/jstests/noPassthrough/shell_uses_correct_read_concern.js
index f3005c0f346..956612c310c 100644
--- a/jstests/noPassthrough/shell_uses_correct_read_concern.js
+++ b/jstests/noPassthrough/shell_uses_correct_read_concern.js
@@ -48,7 +48,7 @@ session.startTransaction({readConcern: {level: "snapshot"}});
assert.eq(coll.runCommand({"find": coll.getName()}).cursor.firstBatch.length, 1);
assert.eq(coll.runCommand({"find": coll.getName()}).cursor.firstBatch.length, 1);
assert.eq(coll.find({"_id": 0}).itcount(), 1);
-assert.docEq(coll.findOne({"_id": 0}), testDoc);
+assert.docEq(testDoc, coll.findOne({"_id": 0}));
assert.commandWorked(session.commitTransaction_forTesting());
assert.eq(getSnapshotRCCount(), 4);
diff --git a/jstests/noPassthrough/timeseries_direct_remove.js b/jstests/noPassthrough/timeseries_direct_remove.js
index 09d659c756c..6eec9cd69a6 100644
--- a/jstests/noPassthrough/timeseries_direct_remove.js
+++ b/jstests/noPassthrough/timeseries_direct_remove.js
@@ -37,7 +37,7 @@ assert.commandWorked(
assert.contains(bucketsColl.getName(), testDB.getCollectionNames());
assert.commandWorked(coll.insert(docs[0]));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 1));
+assert.docEq(docs.slice(0, 1), coll.find().sort({_id: 1}).toArray());
let buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
@@ -51,7 +51,7 @@ buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 0);
assert.commandWorked(coll.insert(docs[1]));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(1, 2));
+assert.docEq(docs.slice(1, 2), coll.find().sort({_id: 1}).toArray());
buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
@@ -72,7 +72,7 @@ assert.eq(removeResult.nRemoved, 1);
fpInsert.off();
awaitInsert();
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(2, 3));
+assert.docEq(docs.slice(2, 3), coll.find().sort({_id: 1}).toArray());
buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
diff --git a/jstests/noPassthrough/timeseries_direct_remove_conflict.js b/jstests/noPassthrough/timeseries_direct_remove_conflict.js
index 20876bde2b4..4fa642eaeca 100644
--- a/jstests/noPassthrough/timeseries_direct_remove_conflict.js
+++ b/jstests/noPassthrough/timeseries_direct_remove_conflict.js
@@ -38,7 +38,7 @@ assert.commandWorked(
assert.contains(bucketsColl.getName(), testDB.getCollectionNames());
assert.commandWorked(coll.insert(docs[0]));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 1));
+assert.docEq(docs.slice(0, 1), coll.find().sort({_id: 1}).toArray());
let buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
@@ -69,7 +69,7 @@ awaitInsert();
// The expected ordering is that the insert finished, then the remove deleted the bucket document,
// so there should be no documents left.
-assert.docEq(coll.find().sort({_id: 1}).toArray().length, 0);
+assert.docEq(0, coll.find().sort({_id: 1}).toArray().length);
buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 0);
@@ -77,7 +77,7 @@ assert.eq(buckets.length, 0);
// Now another insert should generate a new bucket.
assert.commandWorked(coll.insert(docs[2]));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(2, 3));
+assert.docEq(docs.slice(2, 3), coll.find().sort({_id: 1}).toArray());
buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
diff --git a/jstests/noPassthrough/timeseries_direct_remove_reopen.js b/jstests/noPassthrough/timeseries_direct_remove_reopen.js
index e8aea3e228a..dbbd7898181 100644
--- a/jstests/noPassthrough/timeseries_direct_remove_reopen.js
+++ b/jstests/noPassthrough/timeseries_direct_remove_reopen.js
@@ -40,7 +40,7 @@ assert.commandWorked(testDB.createCollection(
assert.contains(bucketsColl.getName(), testDB.getCollectionNames());
assert.commandWorked(coll.insert(docs[0]));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 1));
+assert.docEq(docs.slice(0, 1), coll.find().sort({_id: 1}).toArray());
let buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
@@ -82,7 +82,7 @@ awaitInsert();
// The expected ordering is that the remove finishes, then the insert opens a new bucket.
-assert.docEq(coll.find().sort({_id: 1}).toArray().length, 1);
+assert.docEq(1, coll.find().sort({_id: 1}).toArray().length);
buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
diff --git a/jstests/noPassthrough/timeseries_direct_update.js b/jstests/noPassthrough/timeseries_direct_update.js
index 4399efcf3ce..e2e51ea8134 100644
--- a/jstests/noPassthrough/timeseries_direct_update.js
+++ b/jstests/noPassthrough/timeseries_direct_update.js
@@ -37,7 +37,7 @@ assert.commandWorked(
assert.contains(bucketsColl.getName(), testDB.getCollectionNames());
assert.commandWorked(coll.insert(docs[0]));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 1));
+assert.docEq(docs.slice(0, 1), coll.find().sort({_id: 1}).toArray());
let buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
@@ -57,7 +57,7 @@ assert.eq(buckets[0].control.max[timeFieldName], times[0]);
assert(buckets[0].control.closed);
assert.commandWorked(coll.insert(docs[1]));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 2));
+assert.docEq(docs.slice(0, 2), coll.find().sort({_id: 1}).toArray());
buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 2);
@@ -81,7 +81,7 @@ assert.eq(updateResult.nModified, 1);
fpInsert.off();
awaitInsert();
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 3));
+assert.docEq(docs.slice(0, 3), coll.find().sort({_id: 1}).toArray());
buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 3);
diff --git a/jstests/noPassthrough/timeseries_direct_update_conflict.js b/jstests/noPassthrough/timeseries_direct_update_conflict.js
index 8bd00d174d8..89bfb424626 100644
--- a/jstests/noPassthrough/timeseries_direct_update_conflict.js
+++ b/jstests/noPassthrough/timeseries_direct_update_conflict.js
@@ -38,7 +38,7 @@ assert.commandWorked(
assert.contains(bucketsColl.getName(), testDB.getCollectionNames());
assert.commandWorked(coll.insert(docs[0]));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 1));
+assert.docEq(docs.slice(0, 1), coll.find().sort({_id: 1}).toArray());
let buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
@@ -75,7 +75,7 @@ awaitInsert();
// The expected ordering is that the insert finished, then the update overwrote the bucket document,
// so there should be one document, and a closed flag.
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 1));
+assert.docEq(docs.slice(0, 1), coll.find().sort({_id: 1}).toArray());
buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
@@ -86,7 +86,7 @@ assert(buckets[0].control.closed);
// Now another insert should generate a new bucket.
assert.commandWorked(coll.insert(docs[2]));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), [docs[0], docs[2]]);
+assert.docEq([docs[0], docs[2]], coll.find().sort({_id: 1}).toArray());
buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 2);
diff --git a/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js b/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js
index b35226b63e6..44c922a6b79 100644
--- a/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js
+++ b/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js
@@ -71,7 +71,7 @@ for (let i = 0; i < numColls; i++) {
const checkColl = function(num, numBuckets) {
jsTestLog('Checking collection ' + num);
- assert.docEq(coll(num).find().sort({_id: 1}).toArray(), docs);
+ assert.docEq(docs, coll(num).find().sort({_id: 1}).toArray());
const buckets = bucketsColl(num).find().toArray();
assert.eq(buckets.length,
numBuckets,
diff --git a/jstests/noPassthrough/timeseries_insert_after_failed_insert.js b/jstests/noPassthrough/timeseries_insert_after_failed_insert.js
index 6ef97fe30ff..c719b7863a3 100644
--- a/jstests/noPassthrough/timeseries_insert_after_failed_insert.js
+++ b/jstests/noPassthrough/timeseries_insert_after_failed_insert.js
@@ -45,7 +45,7 @@ const runTest = function(ordered) {
assert.commandWorked(coll.insert(docs[1], {ordered: ordered}));
// There should not be any leftover state from the failed insert.
- assert.docEq(coll.find().toArray(), [docs[1]]);
+ assert.docEq([docs[1]], coll.find().toArray());
const buckets = bucketsColl.find().sort({['control.min.' + timeFieldName]: 1}).toArray();
jsTestLog('Checking buckets: ' + tojson(buckets));
assert.eq(buckets.length, 1);
diff --git a/jstests/noPassthrough/timeseries_insert_invalid_timefield.js b/jstests/noPassthrough/timeseries_insert_invalid_timefield.js
index e77678ddb14..9fc0ed6e334 100644
--- a/jstests/noPassthrough/timeseries_insert_invalid_timefield.js
+++ b/jstests/noPassthrough/timeseries_insert_invalid_timefield.js
@@ -38,13 +38,13 @@ const goodDocs = [
];
assert.commandWorked(coll.insert(goodDocs[0]));
assert.eq(1, coll.count());
-assert.docEq(coll.find().toArray(), [goodDocs[0]]);
+assert.docEq([goodDocs[0]], coll.find().toArray());
// now make sure we reject if timeField is missing or isn't a valid BSON datetime
let mixedDocs = [{meta: "B", data: true}, goodDocs[1], {time: "invalid", meta: "B", data: false}];
assert.commandFailedWithCode(coll.insert(mixedDocs, {ordered: false}), ErrorCodes.BadValue);
assert.eq(coll.count(), 2);
-assert.docEq(coll.find().toArray(), goodDocs);
+assert.docEq(goodDocs, coll.find().toArray());
assert.eq(null, coll.findOne({meta: mixedDocs[0].meta}));
assert.eq(null, coll.findOne({meta: mixedDocs[2].meta}));
diff --git a/jstests/noPassthrough/timeseries_insert_ordered_false.js b/jstests/noPassthrough/timeseries_insert_ordered_false.js
index 7ae7da7f95e..5434415c2a0 100644
--- a/jstests/noPassthrough/timeseries_insert_ordered_false.js
+++ b/jstests/noPassthrough/timeseries_insert_ordered_false.js
@@ -57,13 +57,13 @@ function runTest(conn, failPointConn, shardColl) {
docs.length - resWithCannotContinue.nInserted - 1);
for (let i = 0; i < resWithCannotContinue.getWriteErrors().length; i++) {
assert.eq(resWithCannotContinue.getWriteErrors()[i].index, i);
- assert.docEq(resWithCannotContinue.getWriteErrors()[i].getOperation(), docs[i + 1]);
+ assert.docEq(docs[i + 1], resWithCannotContinue.getWriteErrors()[i].getOperation());
}
//
// Test with failPoint which can allow subsequent write operations of the batch.
//
- assert.docEq(coll.find().sort({_id: 1}).toArray(), []);
+ assert.docEq([], coll.find().sort({_id: 1}).toArray());
assert.eq(bucketsColl.count(),
0,
'Expected zero buckets but found: ' + tojson(bucketsColl.find().toArray()));
@@ -82,10 +82,10 @@ function runTest(conn, failPointConn, shardColl) {
assert.eq(res.getWriteErrors().length, docs.length - res.nInserted - 1);
for (let i = 0; i < res.getWriteErrors().length; i++) {
assert.eq(res.getWriteErrors()[i].index, i);
- assert.docEq(res.getWriteErrors()[i].getOperation(), docs[i + 1]);
+ assert.docEq(docs[i + 1], res.getWriteErrors()[i].getOperation());
}
- assert.docEq(coll.find().sort({_id: 1}).toArray(), [docs[0], docs[3], docs[4]]);
+ assert.docEq([docs[0], docs[3], docs[4]], coll.find().sort({_id: 1}).toArray());
assert.eq(bucketsColl.count(),
2,
'Expected two buckets but found: ' + tojson(bucketsColl.find().toArray()));
@@ -94,7 +94,7 @@ function runTest(conn, failPointConn, shardColl) {
// The documents should go into two new buckets due to the failed insert on the existing bucket.
assert.commandWorked(coll.insert(docs.slice(1, 3), {ordered: false}));
- assert.docEq(coll.find().sort({_id: 1}).toArray(), docs);
+ assert.docEq(docs, coll.find().sort({_id: 1}).toArray());
// If we allow bucket reopening, we will save out on opening another bucket.
const expectedBucketCount =
(TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB)) ? 2 : 3;
diff --git a/jstests/noPassthrough/timeseries_insert_ordered_true.js b/jstests/noPassthrough/timeseries_insert_ordered_true.js
index f8b8a9104d2..cbb2f04a78c 100644
--- a/jstests/noPassthrough/timeseries_insert_ordered_true.js
+++ b/jstests/noPassthrough/timeseries_insert_ordered_true.js
@@ -40,11 +40,11 @@ jsTestLog('Checking insert result: ' + tojson(res));
assert.eq(res.nInserted, 1);
assert.eq(res.getWriteErrors().length, 1);
assert.eq(res.getWriteErrors()[0].index, 1);
-assert.docEq(res.getWriteErrors()[0].getOperation(), docs[3]);
+assert.docEq(docs[3], res.getWriteErrors()[0].getOperation());
// The document that successfully inserted should go into a new bucket due to the failed insert on
// the existing bucket.
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 3));
+assert.docEq(docs.slice(0, 3), coll.find().sort({_id: 1}).toArray());
// If we allow bucket reopening, we will save out on opening another bucket.
let expectedBucketCount = (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB)) ? 2 : 3;
assert.eq(bucketsColl.count(),
@@ -57,7 +57,7 @@ fp2.off();
// The documents should go into two new buckets due to the failed insert on the existing bucket.
assert.commandWorked(coll.insert(docs.slice(3), {ordered: true}));
-assert.docEq(coll.find().sort({_id: 1}).toArray(), docs);
+assert.docEq(docs, coll.find().sort({_id: 1}).toArray());
// If we allow bucket reopening, we will save out on opening new buckets. Resulting in one bucket
// per unique meta field.
expectedBucketCount = (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB)) ? 3 : 5;
diff --git a/jstests/noPassthrough/timeseries_insert_rollback.js b/jstests/noPassthrough/timeseries_insert_rollback.js
index 54451ea72c9..460bc95cfe8 100644
--- a/jstests/noPassthrough/timeseries_insert_rollback.js
+++ b/jstests/noPassthrough/timeseries_insert_rollback.js
@@ -53,7 +53,7 @@ rollbackTest.transitionToSteadyStateOperations();
assert.commandWorked(coll.insert(docs[2], {ordered: true}));
assert.commandWorked(coll.insert(docs[3], {ordered: false}));
-assert.docEq(coll.find().toArray(), docs.slice(2));
+assert.docEq(docs.slice(2), coll.find().toArray());
const buckets = bucketsColl.find().toArray();
assert.eq(buckets.length, 2, 'Expected two bucket but found: ' + tojson(buckets));
diff --git a/jstests/noPassthrough/timeseries_write_concern.js b/jstests/noPassthrough/timeseries_write_concern.js
index 5d6e9de6749..1b08b283edc 100644
--- a/jstests/noPassthrough/timeseries_write_concern.js
+++ b/jstests/noPassthrough/timeseries_write_concern.js
@@ -69,7 +69,7 @@ assert.eq(
restartReplicationOnSecondaries(replTest);
awaitInsert();
-assert.docEq(coll.find().toArray(), docs);
+assert.docEq(docs, coll.find().toArray());
const buckets = bucketsColl.find().toArray();
assert.eq(buckets.length, 1, 'Expected one bucket but found: ' + tojson(buckets));
const serverStatus = assert.commandWorked(testDB.serverStatus()).bucketCatalog;
diff --git a/jstests/noPassthrough/update_post_image_validation.js b/jstests/noPassthrough/update_post_image_validation.js
index ba2e6ecf6ab..228d449f828 100644
--- a/jstests/noPassthrough/update_post_image_validation.js
+++ b/jstests/noPassthrough/update_post_image_validation.js
@@ -14,13 +14,13 @@ const testDB = conn.getDB("test");
assert.commandWorked(testDB.coll.insert({_id: 0, a: []}));
assert.commandWorked(
testDB.coll.update({_id: 0}, {$set: {"a.1": 0, "a.0": {$ref: "coll", $db: "test"}}}));
-assert.docEq(testDB.coll.findOne({_id: 0}), {_id: 0, a: [{$ref: "coll", $db: "test"}, 0]});
+assert.docEq({_id: 0, a: [{$ref: "coll", $db: "test"}, 0]}, testDB.coll.findOne({_id: 0}));
// Test validation of modified array elements that are accessed using a string that is
// numerically equivalent to their fieldname. The modified element is valid.
assert.commandWorked(testDB.coll.insert({_id: 1, a: [0]}));
assert.commandWorked(testDB.coll.update({_id: 1}, {$set: {"a.00": {$ref: "coll", $db: "test"}}}));
-assert.docEq(testDB.coll.findOne({_id: 1}), {_id: 1, a: [{$ref: "coll", $db: "test"}]});
+assert.docEq({_id: 1, a: [{$ref: "coll", $db: "test"}]}, testDB.coll.findOne({_id: 1}));
MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/write_change_stream_pit_preimage_in_transaction.js b/jstests/noPassthrough/write_change_stream_pit_preimage_in_transaction.js
index 3801e371b0d..1b237ea9fb8 100644
--- a/jstests/noPassthrough/write_change_stream_pit_preimage_in_transaction.js
+++ b/jstests/noPassthrough/write_change_stream_pit_preimage_in_transaction.js
@@ -120,7 +120,7 @@ function assertDocumentInsertedAtTimestamp(commitTimestamp, insertedDocumentId)
// collection 'coll' was written at timestamp 'commitTimestamp'.
function assertDocumentPreImageWrittenAtTimestamp(commitTimestamp, modifiedDocumentId) {
const beforeCommitTimestamp = getPreviousTimestampValue(commitTimestamp);
- const preImagesCollection = getPreImagesCollection(testDB);
+ const preImagesCollection = getPreImagesCollection(testDB.getMongo());
assert.eq(0,
preImagesCollection.find({"preImage._id": modifiedDocumentId})
.readConcern("snapshot", beforeCommitTimestamp)
diff --git a/jstests/noPassthroughWithMongod/default_read_pref.js b/jstests/noPassthroughWithMongod/default_read_pref.js
index 6ff888c1c8c..b64a92ae4cf 100644
--- a/jstests/noPassthroughWithMongod/default_read_pref.js
+++ b/jstests/noPassthroughWithMongod/default_read_pref.js
@@ -44,7 +44,7 @@ try {
db.runReadCommand({ping: 1});
assert.eq(commandsRan.length, 1);
- assert.docEq(commandsRan[0].cmd, {ping: 1}, "The command should not have been wrapped.");
+ assert.docEq({ping: 1}, commandsRan[0].cmd, "The command should not have been wrapped.");
assert.eq(
commandsRan[0].opts & DBQuery.Option.slaveOk, 0, "The slaveOk bit should not be set.");
diff --git a/jstests/noPassthroughWithMongod/log_component_helpers.js b/jstests/noPassthroughWithMongod/log_component_helpers.js
index e609bbc936c..79851dad42f 100644
--- a/jstests/noPassthroughWithMongod/log_component_helpers.js
+++ b/jstests/noPassthroughWithMongod/log_component_helpers.js
@@ -11,11 +11,11 @@ var originalSettings =
// getLogComponents
var components1 = mongo.getLogComponents();
-assert.docEq(components1, originalSettings);
+assert.docEq(originalSettings, components1);
// getLogComponents via db
var components2 = db.getLogComponents();
-assert.docEq(components2, originalSettings);
+assert.docEq(originalSettings, components2);
// setLogLevel - default component
mongo.setLogLevel(2);
diff --git a/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js b/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js
index dfa70c375b1..9804c41cea4 100644
--- a/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js
+++ b/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js
@@ -62,10 +62,10 @@ let testMixedTypeQuerying = () => {
// Compare the results.
try {
assert.docEq(
- // Is timeseries.
- db.test.find({[path]: {[compare]: val}}, {_id: 0}).toArray(),
// Isn't timeseries.
- db.control.find({[path]: {[compare]: val}}, {_id: 0}).toArray());
+ db.control.find({[path]: {[compare]: val}}, {_id: 0}).toArray(),
+ // Is timeseries.
+ db.test.find({[path]: {[compare]: val}}, {_id: 0}).toArray());
return true;
} catch (e) {
printjson(
diff --git a/jstests/readonly/aggregate.js b/jstests/readonly/aggregate.js
index 21fc050b4f8..4bd20c4c2ec 100644
--- a/jstests/readonly/aggregate.js
+++ b/jstests/readonly/aggregate.js
@@ -73,8 +73,8 @@ runReadOnlyTest(function() {
{$limit: 2},
];
- assert.docEq(readableCollection.aggregate(mostAwardsPipeline).toArray(),
- [{_id: "Spotlight", count: 3}, {_id: "The Revenant", count: 3}]);
+ assert.docEq([{_id: "Spotlight", count: 3}, {_id: "The Revenant", count: 3}],
+ readableCollection.aggregate(mostAwardsPipeline).toArray());
}
};
}());
diff --git a/jstests/readonly/catalog_ops.js b/jstests/readonly/catalog_ops.js
index 023cf765d46..f3f87fee2ca 100644
--- a/jstests/readonly/catalog_ops.js
+++ b/jstests/readonly/catalog_ops.js
@@ -61,7 +61,7 @@ runReadOnlyTest(function() {
});
var expectedIndexes = [{_id: 1}].concat(this.indexSpecs);
- assert.docEq(actualIndexes, expectedIndexes);
+ assert.docEq(expectedIndexes, actualIndexes);
// Check that createIndexes fails.
assert.commandFailed(
diff --git a/jstests/replsets/abort_in_progress_transactions_on_step_up.js b/jstests/replsets/abort_in_progress_transactions_on_step_up.js
index 73fd930a08b..d6d5ae1a829 100644
--- a/jstests/replsets/abort_in_progress_transactions_on_step_up.js
+++ b/jstests/replsets/abort_in_progress_transactions_on_step_up.js
@@ -144,8 +144,8 @@ const secondDoc = {
};
assert.commandWorked(newSession.getDatabase(dbName).getCollection(collName).insert(secondDoc));
assert.commandWorked(newSession.commitTransaction_forTesting());
-assert.docEq(testDB.getCollection(collName).find().toArray(), [secondDoc]);
-assert.docEq(newTestDB.getCollection(collName).find().toArray(), [secondDoc]);
+assert.docEq([secondDoc], testDB.getCollection(collName).find().toArray());
+assert.docEq([secondDoc], newTestDB.getCollection(collName).find().toArray());
replTest.stopSet();
})();
diff --git a/jstests/replsets/change_stream_pit_pre_images.js b/jstests/replsets/change_stream_pit_pre_images.js
index a09adb23398..d6710943055 100644
--- a/jstests/replsets/change_stream_pit_pre_images.js
+++ b/jstests/replsets/change_stream_pit_pre_images.js
@@ -33,9 +33,17 @@ replTest.initiate();
// Asserts that documents in the pre-images collection on the primary node are the same as on a
// secondary node.
function assertPreImagesCollectionOnPrimaryMatchesSecondary() {
- assert.docEq(getPreImages(replTest.getPrimary()),
- getPreImages(replTest.getSecondary()),
- "pre-images collection content differs");
+ function detailedError() {
+ return "pre-images collection on primary " + tojson(getPreImages(replTest.getPrimary())) +
+ " does not match pre-images collection on secondary " +
+ tojson(getPreImages(replTest.getSecondary()));
+ }
+ const preImagesCollOnPrimary = getPreImagesCollection(replTest.getPrimary());
+ const preImagesCollOnSecondary = getPreImagesCollection(replTest.getSecondary());
+ assert.eq(preImagesCollOnPrimary.count(), preImagesCollOnSecondary.count(), detailedError);
+ assert.eq(preImagesCollOnPrimary.hashAllDocs(),
+ preImagesCollOnSecondary.hashAllDocs(),
+ detailedError);
}
for (const [collectionName, collectionOptions] of [
@@ -57,17 +65,17 @@ for (const [collectionName, collectionOptions] of [
assert.commandWorked(coll.insert({_id: 5, v: 1}));
// Issue "findAndModify" command to return a document version before update.
- assert.docEq(coll.findAndModify({query: {_id: 5}, update: {$inc: {v: 1}}, new: false}),
- {_id: 5, v: 1});
+ assert.docEq({_id: 5, v: 1},
+ coll.findAndModify({query: {_id: 5}, update: {$inc: {v: 1}}, new: false}));
// Issue "findAndModify" command to return a document version after update.
- assert.docEq(coll.findAndModify({query: {_id: 5}, update: {$inc: {v: 1}}, new: true}),
- {_id: 5, v: 3});
+ assert.docEq({_id: 5, v: 3},
+ coll.findAndModify({query: {_id: 5}, update: {$inc: {v: 1}}, new: true}));
// Issue "findAndModify" command to return a document version before deletion.
assert.docEq(
- coll.findAndModify({query: {_id: 5}, new: false, remove: true, writeConcern: {w: 2}}),
- {_id: 5, v: 3});
+ {_id: 5, v: 3},
+ coll.findAndModify({query: {_id: 5}, new: false, remove: true, writeConcern: {w: 2}}));
}
function issueWriteCommandsInTransaction(testDB) {
diff --git a/jstests/replsets/dbhash_system_collections.js b/jstests/replsets/dbhash_system_collections.js
index 3eb08e02fb9..e681ebc1e7c 100644
--- a/jstests/replsets/dbhash_system_collections.js
+++ b/jstests/replsets/dbhash_system_collections.js
@@ -38,11 +38,11 @@ function checkDbHash(mongo) {
var res = testDB.runCommand('dbhash');
assert.commandWorked(res);
- assert.docEq(Object.keys(res.collections), replicatedSystemCollections, tojson(res));
+ assert.docEq(replicatedSystemCollections, Object.keys(res.collections), tojson(res));
res = adminDB.runCommand('dbhash');
assert.commandWorked(res);
- assert.docEq(Object.keys(res.collections), replicatedAdminSystemCollections, tojson(res));
+ assert.docEq(replicatedAdminSystemCollections, Object.keys(res.collections), tojson(res));
return res.md5;
}
diff --git a/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js b/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js
index 4c330de1133..95ec8d68f19 100644
--- a/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js
+++ b/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js
@@ -47,7 +47,7 @@ assert.commandWorked(
sessionDb.runCommand({insert: "foo", documents: [{_id: 3}], txnNumber: NumberLong(1)}));
// Assert documents inserted.
-assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 1}, {_id: 3}]);
+assert.docEq([{_id: 1}, {_id: 3}], sessionColl.find().sort({_id: 1}).toArray());
replSet.stopSet();
}());
diff --git a/jstests/replsets/initial_sync_commit_prepared_transaction.js b/jstests/replsets/initial_sync_commit_prepared_transaction.js
index a9ce04f469d..f9ea4477926 100644
--- a/jstests/replsets/initial_sync_commit_prepared_transaction.js
+++ b/jstests/replsets/initial_sync_commit_prepared_transaction.js
@@ -105,7 +105,7 @@ jsTestLog("Initial sync completed");
// Make sure the transaction committed properly and is reflected after the initial sync.
let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
-assert.docEq(res, {_id: 2}, res);
+assert.docEq({_id: 2}, res);
// Step up the secondary after initial sync is done and make sure we can successfully run
// another transaction.
@@ -120,7 +120,7 @@ assert.commandWorked(sessionColl2.insert({_id: 4}));
let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
res = newPrimary.getDB(dbName).getCollection(collName).findOne({_id: 4});
-assert.docEq(res, {_id: 4}, res);
+assert.docEq({_id: 4}, res);
replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js
index 597e1cefc4b..4b2e1d53188 100644
--- a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js
+++ b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js
@@ -152,7 +152,7 @@ assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
// Make sure the first transaction committed properly and is reflected after the initial sync.
let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
-assert.docEq(res, {_id: 2}, res);
+assert.docEq({_id: 2}, res);
jsTestLog("Stepping up the secondary");
diff --git a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js
index 176309bfa86..164664a9a76 100644
--- a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js
+++ b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js
@@ -111,7 +111,7 @@ assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
// Make sure the first transaction committed properly and is reflected after the initial sync.
let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
-assert.docEq(res, {_id: 2}, res);
+assert.docEq({_id: 2}, res);
jsTestLog("Aborting the second transaction");
diff --git a/jstests/replsets/initial_sync_oplog_hole.js b/jstests/replsets/initial_sync_oplog_hole.js
index 44d666f7a3a..777ef461433 100644
--- a/jstests/replsets/initial_sync_oplog_hole.js
+++ b/jstests/replsets/initial_sync_oplog_hole.js
@@ -87,11 +87,11 @@ joinHungWrite();
jsTestLog("Checking that primary has all data items.");
// Make sure the primary collection has all three data items.
-assert.docEq(primaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
+assert.docEq([{"_id": "a"}, {"_id": "b"}, {"_id": "c"}], primaryColl.find().toArray());
jsTestLog("Checking that secondary has all data items.");
replTest.awaitReplication();
-assert.docEq(secondaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
+assert.docEq([{"_id": "a"}, {"_id": "b"}, {"_id": "c"}], secondaryColl.find().toArray());
replTest.stopSet();
})();
diff --git a/jstests/replsets/no_progress_updates_during_initial_sync.js b/jstests/replsets/no_progress_updates_during_initial_sync.js
index ae9b3557554..cc4d9a51c93 100644
--- a/jstests/replsets/no_progress_updates_during_initial_sync.js
+++ b/jstests/replsets/no_progress_updates_during_initial_sync.js
@@ -117,11 +117,11 @@ assert.neq(nullWallTime, secondaryOpTimes.optimeDurableDate, () => tojson(second
// ...the primary thinks they're still null as they were null in the heartbeat responses.
const primaryStatusRes = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}));
const secondaryOpTimesAsSeenByPrimary = primaryStatusRes.members[2];
-assert.docEq(secondaryOpTimesAsSeenByPrimary.optime,
- nullOpTime,
+assert.docEq(nullOpTime,
+ secondaryOpTimesAsSeenByPrimary.optime,
() => tojson(secondaryOpTimesAsSeenByPrimary));
-assert.docEq(secondaryOpTimesAsSeenByPrimary.optimeDurable,
- nullOpTime,
+assert.docEq(nullOpTime,
+ secondaryOpTimesAsSeenByPrimary.optimeDurable,
() => tojson(secondaryOpTimesAsSeenByPrimary));
assert.eq(nullWallTime,
secondaryOpTimesAsSeenByPrimary.optimeDate,
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js
index 99018d864c5..e41ea9ca1f4 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js
@@ -151,7 +151,7 @@ jsTestLog("Checking that the first transaction is properly prepared");
// Make sure that we can't read changes to the document from the first prepared transaction
// after initial sync.
-assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1});
+assert.docEq({_id: 1}, secondaryColl.findOne({_id: 1}));
jsTestLog("Committing the first transaction");
@@ -160,13 +160,13 @@ replTest.awaitReplication();
// Make sure that we can see the data from a committed transaction on the secondary if it was
// applied during secondary oplog application.
-assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
+assert.docEq({_id: 1, a: 1}, secondaryColl.findOne({_id: 1}));
jsTestLog("Checking that the fourth transaction is properly prepared");
// Make sure that we can't read changes to the document from the first prepared transaction
// after initial sync.
-assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4});
+assert.docEq({_id: 4}, secondaryColl.findOne({_id: 4}));
jsTestLog("Committing the fourth transaction");
@@ -175,7 +175,7 @@ replTest.awaitReplication();
// Make sure that we can see the data from a committed transaction on the secondary if it was
// applied during secondary oplog application.
-assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4, a: 1});
+assert.docEq({_id: 4, a: 1}, secondaryColl.findOne({_id: 4}));
jsTestLog("Stepping up the secondary");
@@ -232,7 +232,7 @@ session2.startTransaction();
assert.commandWorked(sessionDB2[collName].update({_id: 2}, {_id: 2, a: 3}));
prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
-assert.docEq(testColl.findOne({_id: 2}), {_id: 2, a: 3});
+assert.docEq({_id: 2, a: 3}, testColl.findOne({_id: 2}));
// Force the third session to use the same lsid and txnNumber as from before the restart. This
// ensures that we're working with the same session and transaction.
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_change_oldest_active_txn_timestamp.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_change_oldest_active_txn_timestamp.js
index 0cd745addc7..fba44289095 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_change_oldest_active_txn_timestamp.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_change_oldest_active_txn_timestamp.js
@@ -99,7 +99,7 @@ jsTestLog("Checking that the first transaction is properly prepared");
// Make sure that we can't read changes to the document from the first prepared transaction
// after initial sync.
-assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1});
+assert.docEq({_id: 1}, secondaryColl.findOne({_id: 1}));
jsTestLog("Committing the transaction");
@@ -108,7 +108,7 @@ replTest.awaitReplication();
// Make sure that we can see the data from a committed transaction on the secondary if it was
// applied during secondary oplog application.
-assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
+assert.docEq({_id: 1, a: 1}, secondaryColl.findOne({_id: 1}));
replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js
index 84b65d831a4..423c59307d2 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js
@@ -129,7 +129,7 @@ replTest.awaitReplication();
awaitIndexBuild();
// Make sure that we can see the data from the committed transaction on the secondary.
-assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 2});
+assert.docEq({_id: 1, a: 2}, secondaryColl.findOne({_id: 1}));
replTest.stopSet();
})();
diff --git a/jstests/replsets/rollback_files_no_prepare_conflict.js b/jstests/replsets/rollback_files_no_prepare_conflict.js
index 40cc954b068..25d0aa3f8de 100644
--- a/jstests/replsets/rollback_files_no_prepare_conflict.js
+++ b/jstests/replsets/rollback_files_no_prepare_conflict.js
@@ -50,7 +50,7 @@ jsTestLog("Verify that the document is in the same state as it was at the common
primary = rollbackTest.getPrimary();
testDB = primary.getDB(dbName);
testColl = testDB.getCollection(collName);
-assert.docEq(testColl.findOne({_id: 42}), {_id: 42, a: "one"});
+assert.docEq({_id: 42, a: "one"}, testColl.findOne({_id: 42}));
rollbackTest.stop();
})();
diff --git a/jstests/replsets/step_down_on_secondary.js b/jstests/replsets/step_down_on_secondary.js
index 98ff70506a4..c74263f7e34 100644
--- a/jstests/replsets/step_down_on_secondary.js
+++ b/jstests/replsets/step_down_on_secondary.js
@@ -128,9 +128,9 @@ assert.eq(replMetrics.stateTransition.lastStateTransition, "stepDown");
assert.eq(replMetrics.stateTransition.userOperationsKilled, 1, replMetrics);
jsTestLog("Check nodes have correct data");
-assert.docEq(newPrimary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
+assert.docEq([{_id: 0, b: 1}], newPrimary.getDB(dbName)[collName].find({_id: 0}).toArray());
rst.awaitReplication();
-assert.docEq(primary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
+assert.docEq([{_id: 0, b: 1}], primary.getDB(dbName)[collName].find({_id: 0}).toArray());
rst.stopSet();
})();
diff --git a/jstests/replsets/tenant_migration_recipient_does_not_change_sync_source_after_step_down.js b/jstests/replsets/tenant_migration_recipient_does_not_change_sync_source_after_step_down.js
index 7a97b75c6c0..b6039175c44 100644
--- a/jstests/replsets/tenant_migration_recipient_does_not_change_sync_source_after_step_down.js
+++ b/jstests/replsets/tenant_migration_recipient_does_not_change_sync_source_after_step_down.js
@@ -114,7 +114,7 @@ hangDuringCollectionClone.off();
// verify the sync source is still the donor's old primary.
TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts));
assert.eq(recipientColl.find().itcount(), docs1.length + docs2.length);
-assert.docEq(recipientColl.find().sort({_id: 1}).toArray(), docs1.concat(docs2));
+assert.docEq(docs1.concat(docs2), recipientColl.find().sort({_id: 1}).toArray());
verifySyncSource(recipientPrimary, migrationId, donorPrimary.host);
tenantMigrationTest.stop();
diff --git a/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover.js b/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover.js
index 13eef15e0a9..035cbacd900 100644
--- a/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover.js
+++ b/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover.js
@@ -100,7 +100,7 @@ const tenantMigrationFailoverTest = function(isTimeSeries, createCollFn, docs) {
// Check that recipient has cloned all documents in the collection.
recipientColl = newRecipientPrimary.getDB(dbName).getCollection(collName);
assert.eq(docs.length, recipientColl.find().itcount());
- assert.docEq(recipientColl.find().sort({_id: 1}).toArray(), docs);
+ assert.docEq(docs, recipientColl.find().sort({_id: 1}).toArray());
TenantMigrationUtil.checkTenantDBHashes({
donorRst: tenantMigrationTest.getDonorRst(),
recipientRst: tenantMigrationTest.getRecipientRst(),
diff --git a/jstests/replsets/txn_override_unittests.js b/jstests/replsets/txn_override_unittests.js
index d50caea8fd0..d61666a24d1 100644
--- a/jstests/replsets/txn_override_unittests.js
+++ b/jstests/replsets/txn_override_unittests.js
@@ -365,10 +365,10 @@ const retryOnNetworkErrorTests = [
let obj2 = {_id: 2, x: 5};
assert.commandWorked(coll1.insert(obj1));
assert.commandWorked(coll1.insert(obj2));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
+ assert.docEq([{_id: 1, x: 5}, {_id: 2, x: 5}], coll1.find().toArray());
obj1.x = 7;
assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
+ assert.docEq([{_id: 1, x: 5}, {_id: 2, x: 8}], coll1.find().toArray());
}
},
{
@@ -1398,13 +1398,13 @@ const txnOverridePlusRetryOnNetworkErrorTests = [
let obj2 = {_id: 2, x: 5};
assert.commandWorked(coll1.insert(obj1));
assert.commandWorked(coll1.insert(obj2));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
+ assert.docEq([{_id: 1, x: 5}, {_id: 2, x: 5}], coll1.find().toArray());
obj1.x = 7;
assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
+ assert.docEq([{_id: 1, x: 5}, {_id: 2, x: 8}], coll1.find().toArray());
endCurrentTransactionIfOpen();
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
+ assert.docEq([{_id: 1, x: 5}, {_id: 2, x: 8}], coll1.find().toArray());
}
},
{
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index 686d1d1085f..de93e2154ef 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -52,7 +52,7 @@ assert.commandFailedWithCode(coll.update(value, Object.merge(value, {i: [3, 4]})
// Multi-update the value with other fields (won't work, but no error)
value = coll.findOne({i: 1});
assert.commandWorked(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
-assert.docEq(coll.findOne({i: 1}, {_id: 0}), {i: 1, j: 2});
+assert.docEq({i: 1, j: 2}, coll.findOne({i: 1}, {_id: 0}));
// Query the value with other fields (won't work, but no error)
value = coll.findOne({i: 1});
diff --git a/jstests/sharding/change_stream_against_shard_mongod.js b/jstests/sharding/change_stream_against_shard_mongod.js
index 4aadb68a3ab..e66571caeb5 100644
--- a/jstests/sharding/change_stream_against_shard_mongod.js
+++ b/jstests/sharding/change_stream_against_shard_mongod.js
@@ -42,7 +42,7 @@ for (let event of expectedEvents) {
assert.soon(() => csCursor.hasNext());
const nextDoc = csCursor.next();
assert.eq(nextDoc.operationType, event.op);
- assert.docEq(nextDoc.fullDocument, event.doc);
+ assert.docEq(event.doc, nextDoc.fullDocument);
}
st.stop();
diff --git a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
index 7102cfc0078..2b49882d622 100644
--- a/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
+++ b/jstests/sharding/change_stream_enforce_max_time_ms_on_mongos.js
@@ -157,7 +157,7 @@ startTime = (new Date()).getTime();
const csResult = assert.commandWorked(mongosDB.runCommand(
{getMore: csCursorId, collection: mongosColl.getName(), maxTimeMS: thirtyMins}));
assert.lte((new Date()).getTime() - startTime, fiveMins);
-assert.docEq(csResult.cursor.nextBatch[0].fullDocument, {_id: -1});
+assert.docEq({_id: -1}, csResult.cursor.nextBatch[0].fullDocument);
// Open a change stream with the default maxTimeMS. Then verify that if the client starts
// issuing getMores with a subsecond maxTimeMS, that mongos eventually schedules getMores on the
diff --git a/jstests/sharding/change_stream_error_label.js b/jstests/sharding/change_stream_error_label.js
index fdfbf784c7b..7c72b3475db 100644
--- a/jstests/sharding/change_stream_error_label.js
+++ b/jstests/sharding/change_stream_error_label.js
@@ -94,8 +94,8 @@ assert.soon(() => {
// Issue a "find" query to retrieve the first few documents, leaving the cursor open.
const findCursor = coll.find({}).sort({_id: 1}).batchSize(2);
-assert.docEq(findCursor.next(), {_id: -10});
-assert.docEq(findCursor.next(), {_id: -9});
+assert.docEq({_id: -10}, findCursor.next());
+assert.docEq({_id: -9}, findCursor.next());
assert.eq(findCursor.objsLeftInBatch(), 0);
// Open a non-$changeStream agg cursor. Set the batchSize to 0, since otherwise the aggregation will
diff --git a/jstests/sharding/change_stream_lookup_single_shard_cluster.js b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
index 3d8117c619e..e962ee4a92a 100644
--- a/jstests/sharding/change_stream_lookup_single_shard_cluster.js
+++ b/jstests/sharding/change_stream_lookup_single_shard_cluster.js
@@ -38,10 +38,10 @@ mongosColl.update({_id: 1}, {$set: {updated: true}});
// Verify that the document is successfully retrieved from the single-collection and whole-db
// change streams.
assert.soon(() => stream.hasNext());
-assert.docEq(stream.next().fullDocument, {_id: 1, updated: true});
+assert.docEq({_id: 1, updated: true}, stream.next().fullDocument);
assert.soon(() => wholeDbStream.hasNext());
-assert.docEq(wholeDbStream.next().fullDocument, {_id: 1, updated: true});
+assert.docEq({_id: 1, updated: true}, wholeDbStream.next().fullDocument);
stream.close();
wholeDbStream.close();
diff --git a/jstests/sharding/change_stream_no_shards.js b/jstests/sharding/change_stream_no_shards.js
index b6c7c7c9f61..901473e221f 100644
--- a/jstests/sharding/change_stream_no_shards.js
+++ b/jstests/sharding/change_stream_no_shards.js
@@ -12,27 +12,27 @@ const testDB = st.s.getDB("test");
// cursor response.
let csCmdRes = assert.commandWorked(
testDB.runCommand({aggregate: "testing", pipeline: [{$changeStream: {}}], cursor: {}}));
-assert.docEq(csCmdRes.cursor.firstBatch, []);
+assert.docEq([], csCmdRes.cursor.firstBatch);
assert.eq(csCmdRes.cursor.id, 0);
// Test that attempting to open a whole-db stream results in an empty, closed cursor response.
csCmdRes = assert.commandWorked(
testDB.runCommand({aggregate: 1, pipeline: [{$changeStream: {}}], cursor: {}}));
-assert.docEq(csCmdRes.cursor.firstBatch, []);
+assert.docEq([], csCmdRes.cursor.firstBatch);
assert.eq(csCmdRes.cursor.id, 0);
// Test that attempting to open a cluster-wide stream results in an empty, closed cursor
// response.
csCmdRes = assert.commandWorked(adminDB.runCommand(
{aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}], cursor: {}}));
-assert.docEq(csCmdRes.cursor.firstBatch, []);
+assert.docEq([], csCmdRes.cursor.firstBatch);
assert.eq(csCmdRes.cursor.id, 0);
// Test that a regular, non-$changeStream aggregation also results in an empty cursor when no
// shards are present.
const nonCsCmdRes = assert.commandWorked(
testDB.runCommand({aggregate: "testing", pipeline: [{$match: {}}], cursor: {}}));
-assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
+assert.docEq([], nonCsCmdRes.cursor.firstBatch);
assert.eq(nonCsCmdRes.cursor.id, 0);
st.stop();
diff --git a/jstests/sharding/change_stream_resume_from_different_mongos.js b/jstests/sharding/change_stream_resume_from_different_mongos.js
index 27fca91e6b7..897272a7fe2 100644
--- a/jstests/sharding/change_stream_resume_from_different_mongos.js
+++ b/jstests/sharding/change_stream_resume_from_different_mongos.js
@@ -59,7 +59,7 @@ for (let key of Object.keys(ChangeStreamWatchMode)) {
const docsFoundInOrder = [firstChange];
for (let i = 0; i < nDocs - 1; i++) {
const change = cst.getOneChange(changeStream);
- assert.docEq(change.ns, {db: s0DB.getName(), coll: coll.getName()});
+ assert.docEq({db: s0DB.getName(), coll: coll.getName()}, change.ns);
assert.eq(change.operationType, "insert");
docsFoundInOrder.push(change);
diff --git a/jstests/sharding/change_stream_resume_shard_key_change.js b/jstests/sharding/change_stream_resume_shard_key_change.js
index 945af3e2337..c0aaf7dc889 100644
--- a/jstests/sharding/change_stream_resume_shard_key_change.js
+++ b/jstests/sharding/change_stream_resume_shard_key_change.js
@@ -72,8 +72,8 @@ const verifyChanges = (changeStream, startingIndex) => {
}
return changes.length === docs.length - startingIndex;
});
- assert.docEq(changes.map(x => x.fullDocument), docs.slice(startingIndex));
- assert.docEq(changes.map(x => x.documentKey), docKeys.slice(startingIndex));
+ assert.docEq(docs.slice(startingIndex), changes.map(x => x.fullDocument));
+ assert.docEq(docKeys.slice(startingIndex), changes.map(x => x.documentKey));
return changes;
};
diff --git a/jstests/sharding/change_stream_shard_failover.js b/jstests/sharding/change_stream_shard_failover.js
index eadc821dec9..f9dcdec6ded 100644
--- a/jstests/sharding/change_stream_shard_failover.js
+++ b/jstests/sharding/change_stream_shard_failover.js
@@ -82,7 +82,7 @@ for (let key of Object.keys(ChangeStreamWatchMode)) {
const docsFoundInOrder = [firstChange];
for (let i = 0; i < nDocs - 1; i++) {
const change = cst.getOneChange(changeStream);
- assert.docEq(change.ns, {db: sDB.getName(), coll: coll.getName()});
+ assert.docEq({db: sDB.getName(), coll: coll.getName()}, change.ns);
assert.eq(change.operationType, "insert");
docsFoundInOrder.push(change);
diff --git a/jstests/sharding/change_stream_update_lookup_collation.js b/jstests/sharding/change_stream_update_lookup_collation.js
index d47f3862edd..b545b195f78 100644
--- a/jstests/sharding/change_stream_update_lookup_collation.js
+++ b/jstests/sharding/change_stream_update_lookup_collation.js
@@ -99,7 +99,7 @@ for (let nextDocKey of [{shardKey: "abc", _id: "abc_1"}, {shardKey: "ABC", _id:
let next = changeStream.next();
assert.eq(next.operationType, "update");
assert.eq(next.documentKey, nextDocKey, tojson(next));
- assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 1}));
+ assert.docEq(Object.merge(nextDocKey, {updatedCount: 1}), next.fullDocument);
}
assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
@@ -145,7 +145,7 @@ for (let nextDocKey of [{shardKey: "ABC", _id: "abc_1"}, {shardKey: "abc", _id:
let next = strengthOneChangeStream.next();
assert.eq(next.operationType, "update");
assert.eq(next.documentKey, nextDocKey, tojson(next));
- assert.docEq(next.fullDocument, Object.merge(nextDocKey, {updatedCount: 2}));
+ assert.docEq(Object.merge(nextDocKey, {updatedCount: 2}), next.fullDocument);
}
assert.eq(numIdIndexUsages(st.rs0.getPrimary()), idIndexUsagesPreIteration.shard0 + 1);
assert.eq(numIdIndexUsages(st.rs1.getPrimary()), idIndexUsagesPreIteration.shard1 + 1);
diff --git a/jstests/sharding/change_stream_update_lookup_read_concern.js b/jstests/sharding/change_stream_update_lookup_read_concern.js
index 95e5bcd9f58..50368a33cf4 100644
--- a/jstests/sharding/change_stream_update_lookup_read_concern.js
+++ b/jstests/sharding/change_stream_update_lookup_read_concern.js
@@ -101,7 +101,7 @@ assert.commandWorked(mongosColl.update({_id: 1}, {$set: {updatedCount: 1}}));
assert.soon(() => changeStream.hasNext());
let latestChange = changeStream.next();
assert.eq(latestChange.operationType, "update");
-assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 1});
+assert.docEq({_id: 1, updatedCount: 1}, latestChange.fullDocument);
// Test that the change stream itself goes to the secondary. There might be more than one if we
// needed multiple getMores to retrieve the changes.
@@ -208,7 +208,7 @@ const joinResumeReplicationShell =
assert.soon(() => changeStream.hasNext());
latestChange = changeStream.next();
assert.eq(latestChange.operationType, "update");
-assert.docEq(latestChange.fullDocument, {_id: 1, updatedCount: 2});
+assert.docEq({_id: 1, updatedCount: 2}, latestChange.fullDocument);
joinResumeReplicationShell();
// Test that the update lookup goes to the new closest secondary.
diff --git a/jstests/sharding/change_streams/lookup_change_stream_post_image_compound_shard_key.js b/jstests/sharding/change_streams/lookup_change_stream_post_image_compound_shard_key.js
index 97fb61631f3..be654754e3c 100644
--- a/jstests/sharding/change_streams/lookup_change_stream_post_image_compound_shard_key.js
+++ b/jstests/sharding/change_streams/lookup_change_stream_post_image_compound_shard_key.js
@@ -75,8 +75,8 @@ for (let id = 0; id < nDocs; ++id) {
next = changeStream.next();
assert.eq(next.operationType, "update");
assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
- assert.docEq(next.fullDocument,
- Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 1}));
+ assert.docEq(Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 1}),
+ next.fullDocument);
}
});
@@ -101,8 +101,8 @@ assert.commandWorked(mongosDB.adminCommand({
let next = changeStream.next();
assert.eq(next.operationType, "update");
assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
- assert.docEq(next.fullDocument,
- Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 2}));
+ assert.docEq(Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 2}),
+ next.fullDocument);
}
});
diff --git a/jstests/sharding/change_streams/lookup_change_stream_post_image_hashed_shard_key.js b/jstests/sharding/change_streams/lookup_change_stream_post_image_hashed_shard_key.js
index bc94cfbfe6e..b6f0a12c595 100644
--- a/jstests/sharding/change_streams/lookup_change_stream_post_image_hashed_shard_key.js
+++ b/jstests/sharding/change_streams/lookup_change_stream_post_image_hashed_shard_key.js
@@ -65,7 +65,7 @@ for (let id = 0; id < nDocs; ++id) {
next = changeStream.next();
assert.eq(next.operationType, "update");
assert.eq(next.documentKey, {shardKey: id, _id: id});
- assert.docEq(next.fullDocument, {_id: id, shardKey: id, updatedCount: 1});
+ assert.docEq({_id: id, shardKey: id, updatedCount: 1}, next.fullDocument);
}
st.stop();
diff --git a/jstests/sharding/change_streams/lookup_change_stream_post_image_id_shard_key.js b/jstests/sharding/change_streams/lookup_change_stream_post_image_id_shard_key.js
index f577ff932e3..17dbac13b94 100644
--- a/jstests/sharding/change_streams/lookup_change_stream_post_image_id_shard_key.js
+++ b/jstests/sharding/change_streams/lookup_change_stream_post_image_id_shard_key.js
@@ -62,7 +62,7 @@ for (let nextId of [1000, -1000]) {
assert.eq(next.operationType, "update");
// Only the "_id" field is present in next.documentKey because the shard key is the _id.
assert.eq(next.documentKey, {_id: nextId});
- assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 1});
+ assert.docEq({_id: nextId, updatedCount: 1}, next.fullDocument);
}
// Test that the change stream can still see the updated post image, even if a chunk is
@@ -81,7 +81,7 @@ for (let nextId of [1000, -1000]) {
let next = changeStream.next();
assert.eq(next.operationType, "update");
assert.eq(next.documentKey, {_id: nextId});
- assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 2});
+ assert.docEq({_id: nextId, updatedCount: 2}, next.fullDocument);
}
st.stop();
diff --git a/jstests/sharding/change_streams_delete_in_txn_produces_correct_doc_key.js b/jstests/sharding/change_streams_delete_in_txn_produces_correct_doc_key.js
index 634b8b39d1e..34777bc69fd 100644
--- a/jstests/sharding/change_streams_delete_in_txn_produces_correct_doc_key.js
+++ b/jstests/sharding/change_streams_delete_in_txn_produces_correct_doc_key.js
@@ -66,7 +66,7 @@ function testDeleteInMultiDocTxn({collName, deleteCommand, expectedChanges}) {
collection: coll
});
assert.commandWorked(coll.insert({a: 1, _id: 5}));
- assert.docEq(cst.getOneChange(cursor).documentKey, {a: 1, _id: 5});
+ assert.docEq({a: 1, _id: 5}, cst.getOneChange(cursor).documentKey);
cst.cleanUp();
}
diff --git a/jstests/sharding/change_streams_new_shard_new_database.js b/jstests/sharding/change_streams_new_shard_new_database.js
index f1805e7bb5f..4212e233b48 100644
--- a/jstests/sharding/change_streams_new_shard_new_database.js
+++ b/jstests/sharding/change_streams_new_shard_new_database.js
@@ -41,7 +41,7 @@ function assertAllEventsObserved(changeStream, expectedDocs) {
for (let expectedDoc of expectedDocs) {
assert.soon(() => changeStream.hasNext());
const nextEvent = changeStream.next();
- assert.docEq(nextEvent.fullDocument, expectedDoc, tojson(nextEvent));
+ assert.docEq(expectedDoc, nextEvent.fullDocument, tojson(nextEvent));
}
}
@@ -50,7 +50,7 @@ function assertCollectionDropEventObserved(changeStream, dbName, collectionName)
assert.soon(() => changeStream.hasNext());
const nextEvent = changeStream.next();
assert.eq(nextEvent.operationType, "drop", tojson(nextEvent));
- assert.docEq(nextEvent.ns, {db: dbName, coll: collectionName}, tojson(nextEvent));
+ assert.docEq({db: dbName, coll: collectionName}, nextEvent.ns, tojson(nextEvent));
}
// Open a whole-db change stream on the as yet non-existent database.
diff --git a/jstests/sharding/change_streams_unsharded_update_resume.js b/jstests/sharding/change_streams_unsharded_update_resume.js
index 96a8a0230da..be692f6e758 100644
--- a/jstests/sharding/change_streams_unsharded_update_resume.js
+++ b/jstests/sharding/change_streams_unsharded_update_resume.js
@@ -32,7 +32,7 @@ assert.commandWorked(mongosColl.update({_id: 0}, {$set: {updated: true}}));
assert.soon(() => csCur.hasNext());
let updateEvent = csCur.next();
assert.eq(updateEvent.operationType, "update");
-assert.docEq(updateEvent.fullDocument, {_id: 0, a: -100, updated: true});
+assert.docEq({_id: 0, a: -100, updated: true}, updateEvent.fullDocument);
// Now shard the collection on {a: 1} and move the upper chunk to the other shard.
assert.commandWorked(mongosColl.createIndex({a: 1}));
@@ -44,7 +44,7 @@ csCur = mongosColl.watch([], {resumeAfter: insertEvent._id, fullDocument: "updat
assert.soon(() => csCur.hasNext());
updateEvent = csCur.next();
assert.eq(updateEvent.operationType, "update");
-assert.docEq(updateEvent.fullDocument, {_id: 0, a: -100, updated: true});
+assert.docEq({_id: 0, a: -100, updated: true}, updateEvent.fullDocument);
// Insert a second document with the same _id on the second shard.
assert.commandWorked(mongosColl.insert({_id: 0, a: 100}));
diff --git a/jstests/sharding/change_streams_update_lookup_shard_metadata_missing.js b/jstests/sharding/change_streams_update_lookup_shard_metadata_missing.js
index 39dd6a7417c..cb830f804e1 100644
--- a/jstests/sharding/change_streams_update_lookup_shard_metadata_missing.js
+++ b/jstests/sharding/change_streams_update_lookup_shard_metadata_missing.js
@@ -65,14 +65,14 @@ assert.soonNoExcept(
// this alone does not prove that the multi-update actually wrote its shard key into the oplog.
csCursor = mongosColl.watch([], {resumeAfter: resumeToken, fullDocument: "updateLookup"});
assert.soon(() => csCursor.hasNext());
-assert.docEq(csCursor.next().fullDocument, {_id: 0, a: -100, updated: true});
+assert.docEq({_id: 0, a: -100, updated: true}, csCursor.next().fullDocument);
// Now insert a new document with the same _id on the other shard. Update lookup will be able to
// distinguish between the two, proving that they both have full shard keys available.
assert.commandWorked(mongosColl.insert({_id: 0, a: 100}));
csCursor = mongosColl.watch([], {resumeAfter: resumeToken, fullDocument: "updateLookup"});
assert.soon(() => csCursor.hasNext());
-assert.docEq(csCursor.next().fullDocument, {_id: 0, a: -100, updated: true});
+assert.docEq({_id: 0, a: -100, updated: true}, csCursor.next().fullDocument);
st.stop();
})(); \ No newline at end of file
diff --git a/jstests/sharding/merge_requires_unique_index.js b/jstests/sharding/merge_requires_unique_index.js
index 1f0c10bbf2d..1555c130476 100644
--- a/jstests/sharding/merge_requires_unique_index.js
+++ b/jstests/sharding/merge_requires_unique_index.js
@@ -210,9 +210,9 @@ function runOnFieldsTests(targetShardKey, targetSplit) {
on: Object.keys(dottedPathIndexSpec)
}
}]));
- assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
- {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
- {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
+ assert.docEq({newField: {subField: "hi"}, proofOfUpdate: "PROOF"},
+ targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
+ {"newField.subField": 1, proofOfUpdate: 1, _id: 0}));
} else {
assertErrCodeAndErrMsgContains(sourceColl,
[{
@@ -243,9 +243,9 @@ function runOnFieldsTests(targetShardKey, targetSplit) {
}
}
]));
- assert.docEq(targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
- {"newField.subField": 1, proofOfUpdate: 1, _id: 0}),
- {newField: {subField: "hi"}, proofOfUpdate: "PROOF"});
+ assert.docEq({newField: {subField: "hi"}, proofOfUpdate: "PROOF"},
+ targetColl.findOne({"newField.subField": "hi", proofOfUpdate: "PROOF"},
+ {"newField.subField": 1, proofOfUpdate: 1, _id: 0}));
}
}
diff --git a/jstests/sharding/mongos_local_explain.js b/jstests/sharding/mongos_local_explain.js
index d21ee745306..95c8598c8a0 100644
--- a/jstests/sharding/mongos_local_explain.js
+++ b/jstests/sharding/mongos_local_explain.js
@@ -24,7 +24,7 @@ const explainPlan = assert.commandWorked(mongosConn.getDB("admin").runCommand(
// We expect the stages to appear under the 'mongos' heading, for 'splitPipeline' to be
// null, and for the 'mongos.host' field to be the hostname:port of the mongoS itself.
-assert.docEq(explainPlan.mongos.stages, expectedExplainStages);
+assert.docEq(expectedExplainStages, explainPlan.mongos.stages);
assert.eq(explainPlan.mongos.host, mongosConn.name);
assert.isnull(explainPlan.splitPipeline);
diff --git a/jstests/sharding/oplog_document_key.js b/jstests/sharding/oplog_document_key.js
index 294e3043aef..e09888cbbca 100644
--- a/jstests/sharding/oplog_document_key.js
+++ b/jstests/sharding/oplog_document_key.js
@@ -12,14 +12,14 @@ function validateDocumentKeyInOplogForRemove(ns, _id, docKey) {
const deleteEntry = oplog.findOne({ns: ns, op: 'd', 'o._id': _id});
const o = docKey ? {_id: _id, x: docKey} : {_id: _id};
if (deleteEntry) {
- assert.docEq(deleteEntry.o, o);
+ assert.docEq(o, deleteEntry.o);
} else {
// Validate this is a batched delete, which includes the document key.
const elemMatch = docKey ? {'ns': ns, 'op': 'd', 'o._id': _id, 'o.x': docKey}
: {'ns': ns, 'op': 'd', 'o._id': _id};
const applyOpsEntry =
oplog.findOne({ns: 'admin.$cmd', op: 'c', 'o.applyOps': {$elemMatch: elemMatch}});
- assert.docEq(applyOpsEntry.o.applyOps[0].o, o);
+ assert.docEq(o, applyOpsEntry.o.applyOps[0].o);
}
}
diff --git a/jstests/sharding/query/agg_mongos_merge.js b/jstests/sharding/query/agg_mongos_merge.js
index 508b387fedd..21c7b5e9e4f 100644
--- a/jstests/sharding/query/agg_mongos_merge.js
+++ b/jstests/sharding/query/agg_mongos_merge.js
@@ -515,7 +515,7 @@ const metaDataTests = [
{
pipeline: [{$match: {$text: {$search: "txt"}}}, {$sort: {text: 1}}],
verifyNoMetaData: (doc) =>
- assert.docEq([doc.$textScore, doc.$sortKey], [undefined, undefined])
+ assert.docEq([undefined, undefined], [doc.$textScore, doc.$sortKey])
}
];
diff --git a/jstests/sharding/query/aggregation_currentop.js b/jstests/sharding/query/aggregation_currentop.js
index 9603d9ebcb9..ec74ee2cf5d 100644
--- a/jstests/sharding/query/aggregation_currentop.js
+++ b/jstests/sharding/query/aggregation_currentop.js
@@ -414,16 +414,16 @@ function runCommonTests(conn, curOpSpec) {
let expectedStages = [{$currentOp: {idleConnections: true}}, {$match: {desc: {$eq: "test"}}}];
if (isRemoteShardCurOp) {
- assert.docEq(explainPlan.splitPipeline.shardsPart, expectedStages);
+ assert.docEq(expectedStages, explainPlan.splitPipeline.shardsPart);
for (let i = 0; i < stParams.shards; i++) {
let shardName = st["rs" + i].name;
- assert.docEq(explainPlan.shards[shardName].stages, expectedStages);
+ assert.docEq(expectedStages, explainPlan.shards[shardName].stages);
}
} else if (isLocalMongosCurOp) {
expectedStages[0].$currentOp.localOps = true;
- assert.docEq(explainPlan.mongos.stages, expectedStages);
+ assert.docEq(expectedStages, explainPlan.mongos.stages);
} else {
- assert.docEq(explainPlan.stages, expectedStages);
+ assert.docEq(expectedStages, explainPlan.stages);
}
// Test that a user with the inprog privilege can run getMore on a $currentOp aggregation
diff --git a/jstests/sharding/timeseries_cluster_indexstats.js b/jstests/sharding/timeseries_cluster_indexstats.js
index b3610381e75..32dd3c70752 100644
--- a/jstests/sharding/timeseries_cluster_indexstats.js
+++ b/jstests/sharding/timeseries_cluster_indexstats.js
@@ -67,7 +67,7 @@ function checkIndexStats(coll, keys, sharded) {
: `Index stats 'shard' field should not exist on a non-sharded collection.\n${
tojson(index)}`);
assert.docEq(
- index.key, keys[i], `Index should have key spec ${tojson(keys[i])}.\n${tojson(index)}`);
+ keys[i], index.key, `Index should have key spec ${tojson(keys[i])}.\n${tojson(index)}`);
});
}
diff --git a/jstests/sharding/update_replace_id.js b/jstests/sharding/update_replace_id.js
index 5c80a54840c..0320cadc40e 100644
--- a/jstests/sharding/update_replace_id.js
+++ b/jstests/sharding/update_replace_id.js
@@ -48,8 +48,8 @@ function restartProfiling() {
function setUpData() {
// Write a single document to shard0 and verify that it is present.
mongosColl.insert({_id: -100, a: -100, msg: "not_updated"});
- assert.docEq(shard0DB.test.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "not_updated"}]);
+ assert.docEq([{_id: -100, a: -100, msg: "not_updated"}],
+ shard0DB.test.find({_id: -100}).toArray());
// Write a document with the same key directly to shard1. This simulates an orphaned
// document, or the duplicate document which temporarily exists during a chunk migration.
@@ -70,15 +70,15 @@ function runReplacementUpdateTestsForHashedShardKey() {
mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
// Verify that the update did not modify the orphan document.
- assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "not_updated"}]);
+ assert.docEq([{_id: -100, a: -100, msg: "not_updated"}],
+ shard1DB.test.find({_id: -100}).toArray());
assert.eq(writeRes.nMatched, 1);
assert.eq(writeRes.nModified, 1);
// Verify that the update only targeted shard0 and that the resulting document appears as
// expected.
- assert.docEq(mongosColl.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
+ assert.docEq([{_id: -100, a: -100, msg: "update_extracted_id_from_query"}],
+ mongosColl.find({_id: -100}).toArray());
profilerHasSingleMatchingEntryOrThrow({
profileDB: shard0DB,
filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
@@ -99,10 +99,10 @@ function runReplacementUpdateTestsForHashedShardKey() {
// expected. At this point in the test we expect shard1 to be stale, because it was the
// destination shard for the first moveChunk; we therefore explicitly check the profiler for
// a successful update, i.e. one which did not report a stale config exception.
- assert.docEq(mongosColl.find({_id: 101}).toArray(),
- [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
- assert.docEq(shard1DB.test.find({_id: 101}).toArray(),
- [{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}]);
+ assert.docEq([{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}],
+ mongosColl.find({_id: 101}).toArray());
+ assert.docEq([{_id: 101, a: 101, msg: "upsert_extracted_id_from_query"}],
+ shard1DB.test.find({_id: 101}).toArray());
profilerHasZeroMatchingEntriesOrThrow({
profileDB: shard0DB,
filter: {op: "update", "command.u.msg": "upsert_extracted_id_from_query"}
@@ -128,15 +128,15 @@ function runReplacementUpdateTestsForCompoundShardKey() {
mongosColl.update({_id: -100}, {a: -100, msg: "update_extracted_id_from_query"}));
// Verify that the update did not modify the orphan document.
- assert.docEq(shard1DB.test.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "not_updated"}]);
+ assert.docEq([{_id: -100, a: -100, msg: "not_updated"}],
+ shard1DB.test.find({_id: -100}).toArray());
assert.eq(writeRes.nMatched, 1);
assert.eq(writeRes.nModified, 1);
// Verify that the update only targeted shard0 and that the resulting document appears as
// expected.
- assert.docEq(mongosColl.find({_id: -100}).toArray(),
- [{_id: -100, a: -100, msg: "update_extracted_id_from_query"}]);
+ assert.docEq([{_id: -100, a: -100, msg: "update_extracted_id_from_query"}],
+ mongosColl.find({_id: -100}).toArray());
profilerHasSingleMatchingEntryOrThrow({
profileDB: shard0DB,
filter: {op: "update", "command.u.msg": "update_extracted_id_from_query"}
@@ -153,7 +153,7 @@ function runReplacementUpdateTestsForCompoundShardKey() {
ErrorCodes.ShardKeyNotFound);
// Verify that the document did not perform any writes.
- assert.docEq(mongosColl.find({_id: 101}).itcount(), 0);
+ assert.eq(0, mongosColl.find({_id: 101}).itcount());
// Verify that an update whose query contains an exact match on _id but whose replacement
// doc does not contain all other shard key fields will be targeted as if the missing shard
@@ -169,8 +169,8 @@ function runReplacementUpdateTestsForCompoundShardKey() {
assert.commandWorked(
sessionColl.update({_id: -99}, {_id: -99, msg: "update_missing_shard_key_field"}));
- assert.docEq(sessionColl.find({_id: -99}).toArray(),
- [{_id: -99, msg: "update_missing_shard_key_field"}]);
+ assert.docEq([{_id: -99, msg: "update_missing_shard_key_field"}],
+ sessionColl.find({_id: -99}).toArray());
// Verify that an upsert whose query contains an exact match on _id but whose replacement
// document does not contain all other shard key fields will work properly.
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index 73da8316f4e..ea88af06061 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -149,22 +149,22 @@ for (let i = 0; i < 2; i++) {
// Can unset shard key with op style update.
assert.commandWorked(coll.insert({_id: 11, key: 1}));
assert.commandWorked(sessionColl.update({_id: 11, key: 1}, {$unset: {key: 1}}));
- assert.docEq(sessionColl.findOne({_id: 11}), {_id: 11});
+ assert.docEq({_id: 11}, sessionColl.findOne({_id: 11}));
// Can unset shard key with replacement style update.
assert.commandWorked(coll.insert({_id: 12, key: 1}));
assert.commandWorked(sessionColl.update({_id: 12, key: 1}, {_id: 12}));
- assert.docEq(sessionColl.findOne({_id: 12}), {_id: 12});
+ assert.docEq({_id: 12}, sessionColl.findOne({_id: 12}));
// Can unset shard key with pipeline style update.
assert.commandWorked(coll.insert({_id: 13, key: 1}));
assert.commandWorked(sessionColl.update({_id: 13, key: 1}, [{$unset: "key"}, {$set: {x: 1}}]));
- assert.docEq(sessionColl.findOne({_id: 13}), {_id: 13, x: 1});
+ assert.docEq({_id: 13, x: 1}, sessionColl.findOne({_id: 13}));
// Can unset nested fields in the shard key.
assert.commandWorked(coll.insert({_id: 14, key: {a: 1, b: 1}}));
assert.commandWorked(sessionColl.update({_id: 14, key: {a: 1, b: 1}}, {$unset: {"key.a": 1}}));
- assert.docEq(sessionColl.findOne({_id: 14}), {_id: 14, key: {b: 1}});
+ assert.docEq({_id: 14, key: {b: 1}}, sessionColl.findOne({_id: 14}));
}
// Tests for nested shard keys.
@@ -181,18 +181,18 @@ function testNestedShardKeys(collName, keyPattern) {
// Can unset shard key with op style update.
assert.commandWorked(coll.insert({_id: 11, skey: {skey: 1}}));
assert.commandWorked(sessionColl.update({_id: 11, "skey.skey": 1}, {$unset: {skey: 1}}));
- assert.docEq(sessionColl.findOne({_id: 11}), {_id: 11});
+ assert.docEq({_id: 11}, sessionColl.findOne({_id: 11}));
// Can unset shard key with replacement style update.
assert.commandWorked(coll.insert({_id: 12, skey: {skey: 1}}));
assert.commandWorked(sessionColl.update({_id: 12, "skey.skey": 1}, {_id: 12}));
- assert.docEq(sessionColl.findOne({_id: 12}), {_id: 12});
+ assert.docEq({_id: 12}, sessionColl.findOne({_id: 12}));
// Can unset shard key with pipeline style update.
assert.commandWorked(coll.insert({_id: 13, skey: {skey: 1}}));
assert.commandWorked(
sessionColl.update({_id: 13, "skey.skey": 1}, [{$unset: "skey"}, {$set: {x: 1}}]));
- assert.docEq(sessionColl.findOne({_id: 13}), {_id: 13, x: 1});
+ assert.docEq({_id: 13, x: 1}, sessionColl.findOne({_id: 13}));
//
// Verify each field in a nested shard key can be unset.
@@ -201,25 +201,25 @@ function testNestedShardKeys(collName, keyPattern) {
// For op-style.
assert.commandWorked(coll.insert({_id: 14, skey: {skey: 1}}));
assert.commandWorked(sessionColl.update({_id: 14, "skey.skey": 1}, {$unset: {"skey.skey": 1}}));
- assert.docEq(sessionColl.findOne({_id: 14}), {_id: 14, skey: {}});
+ assert.docEq({_id: 14, skey: {}}, sessionColl.findOne({_id: 14}));
assert.commandWorked(sessionColl.update({_id: 14, skey: {}}, {$unset: {skey: 1}}));
- assert.docEq(sessionColl.findOne({_id: 14}), {_id: 14});
+ assert.docEq({_id: 14}, sessionColl.findOne({_id: 14}));
// For replacement style.
assert.commandWorked(coll.insert({_id: 15, skey: {skey: 1}}));
assert.commandWorked(sessionColl.update({_id: 15, "skey.skey": 1}, {skey: 1}));
- assert.docEq(sessionColl.findOne({_id: 15}), {_id: 15, skey: 1});
+ assert.docEq({_id: 15, skey: 1}, sessionColl.findOne({_id: 15}));
assert.commandWorked(sessionColl.update({_id: 15, skey: 1}, {$unset: {skey: 1}}));
- assert.docEq(sessionColl.findOne({_id: 15}), {_id: 15});
+ assert.docEq({_id: 15}, sessionColl.findOne({_id: 15}));
// This can be used to make sure pipeline-based updates generate delta oplog entries.
const largeStr = '*'.repeat(128);
// For pipeline style.
assert.commandWorked(coll.insert({_id: 16, skey: {skey: 1}, largeStr: largeStr}));
assert.commandWorked(sessionColl.update({_id: 16, "skey.skey": 1}, [{$unset: "skey.skey"}]));
- assert.docEq(sessionColl.findOne({_id: 16}), {_id: 16, skey: {}, largeStr: largeStr});
+ assert.docEq({_id: 16, skey: {}, largeStr: largeStr}, sessionColl.findOne({_id: 16}));
assert.commandWorked(sessionColl.update({_id: 16, skey: {}}, [{$unset: "skey"}]));
- assert.docEq(sessionColl.findOne({_id: 16}), {_id: 16, largeStr: largeStr});
+ assert.docEq({_id: 16, largeStr: largeStr}, sessionColl.findOne({_id: 16}));
}
testNestedShardKeys("update_nested", {"skey.skey": 1});
diff --git a/jstests/sharding/upsert_sharded.js b/jstests/sharding/upsert_sharded.js
index 424cb9790e2..10d705bc532 100644
--- a/jstests/sharding/upsert_sharded.js
+++ b/jstests/sharding/upsert_sharded.js
@@ -55,19 +55,19 @@ st.printShardingStatus();
// Upserted replacement update can result in no shard key.
assert.commandWorked(upsertedResult(coll, {x: -1}, {_id: 1}));
-assert.docEq(coll.findOne({}), {_id: 1});
+assert.docEq({_id: 1}, coll.findOne({}));
// Upserted with supplied document can result in no shard key.
assert.commandWorked(upsertSuppliedResult(coll, {x: -1}, {_id: 1}));
-assert.docEq(coll.findOne({}), {_id: 1});
+assert.docEq({_id: 1}, coll.findOne({}));
// Upserted op style update will propagate shard key by default.
assert.commandWorked(upsertedResult(coll, {x: -1}, {$set: {_id: 1}}));
-assert.docEq(coll.findOne({}), {_id: 1, x: -1});
+assert.docEq({_id: 1, x: -1}, coll.findOne({}));
// Upserted op style update can unset propagated shard key.
assert.commandWorked(upsertedResult(coll, {x: -1}, {$set: {_id: 1}, $unset: {x: 1}}));
-assert.docEq(coll.findOne({}), {_id: 1});
+assert.docEq({_id: 1}, coll.findOne({}));
// Updates with upsert must contain shard key in query when $op style
assert.eq(1, upsertedXVal(coll, {x: 1}, {$set: {a: 1}}));
@@ -122,22 +122,22 @@ st.printShardingStatus();
// Upserted replacement update can result in no shard key with nested shard key.
assert.commandWorked(upsertedResult(coll, {"x.x": -1}, {_id: 1}));
-assert.docEq(coll.findOne({}), {_id: 1});
+assert.docEq({_id: 1}, coll.findOne({}));
// Upserted with supplied document can result in no shard key with nested shard key.
assert.commandWorked(upsertSuppliedResult(coll, {"x.x": -1}, {_id: 1}));
-assert.docEq(coll.findOne({}), {_id: 1});
+assert.docEq({_id: 1}, coll.findOne({}));
// Upserted op style update will propagate shard key by default with nested shard key.
assert.commandWorked(upsertedResult(coll, {"x.x": -1}, {$set: {_id: 1}}));
-assert.docEq(coll.findOne({}), {_id: 1, x: {x: -1}});
+assert.docEq({_id: 1, x: {x: -1}}, coll.findOne({}));
// Upserted op style update can unset propagated shard key fields with nested shard key.
assert.commandWorked(upsertedResult(coll, {"x.x": -1}, {$set: {_id: 1}, $unset: {"x.x": 1}}));
-assert.docEq(coll.findOne({}), {_id: 1, x: {}});
+assert.docEq({_id: 1, x: {}}, coll.findOne({}));
assert.commandWorked(upsertedResult(coll, {"x.x": -1}, {$set: {_id: 1}, $unset: {"x": 1}}));
-assert.docEq(coll.findOne({}), {_id: 1});
+assert.docEq({_id: 1}, coll.findOne({}));
// Nested field extraction with nested shard key
assert.docEq({x: 1}, upsertedXVal(coll, {"x.x": 1}, {$set: {a: 1}}));
@@ -193,13 +193,13 @@ st.printShardingStatus();
// No upsert type can result in a missing shard key for nested _id key.
assert.commandWorked(upsertedResult(coll, {_id: {x: -1}}, {}));
-assert.docEq(coll.findOne({}), {_id: {x: -1}});
+assert.docEq({_id: {x: -1}}, coll.findOne({}));
assert.commandWorked(upsertSuppliedResult(coll, {_id: {x: -1}}, {}));
-assert.docEq(coll.findOne({}), {_id: {x: -1}});
+assert.docEq({_id: {x: -1}}, coll.findOne({}));
assert.commandWorked(upsertedResult(coll, {_id: {x: -1}}, {$set: {y: 1}}));
-assert.docEq(coll.findOne({}), {_id: {x: -1}, y: 1});
+assert.docEq({_id: {x: -1}, y: 1}, coll.findOne({}));
assert.commandFailedWithCode(
upsertedResult(coll, {_id: {x: -1}}, {$set: {y: 1}, $unset: {"_id.x": 1}}),
@@ -207,16 +207,16 @@ assert.commandFailedWithCode(
// All update types can re-state shard key for nested _id key.
assert.commandWorked(upsertedResult(coll, {_id: {x: -1}}, {_id: {x: -1}, y: 1}));
-assert.docEq(coll.findOne({}), {_id: {x: -1}, y: 1});
+assert.docEq({_id: {x: -1}, y: 1}, coll.findOne({}));
assert.commandWorked(upsertSuppliedResult(coll, {_id: {x: -1}}, {_id: {x: -1}, y: 1}));
-assert.docEq(coll.findOne({}), {_id: {x: -1}, y: 1});
+assert.docEq({_id: {x: -1}, y: 1}, coll.findOne({}));
assert.commandWorked(upsertedResult(coll, {_id: {x: -1}}, {$set: {_id: {x: -1}, y: 1}}));
-assert.docEq(coll.findOne({}), {_id: {x: -1}, y: 1});
+assert.docEq({_id: {x: -1}, y: 1}, coll.findOne({}));
assert.commandWorked(upsertedResult(coll, {_id: {x: -1}}, {$set: {"_id.x": -1, y: 1}}));
-assert.docEq(coll.findOne({}), {_id: {x: -1}, y: 1});
+assert.docEq({_id: {x: -1}, y: 1}, coll.findOne({}));
// No upsert type can modify shard key for nested _id key.
assert.commandFailedWithCode(upsertedResult(coll, {_id: {x: -1}}, {_id: {x: -2}}),
@@ -276,13 +276,13 @@ assert.commandFailedWithCode(upsertedResult(coll, {"_id.x": -1}, {_id: {x: -1}})
ErrorCodes.NotExactValueField);
assert.commandWorked(upsertSuppliedResult(coll, {"_id.x": -1}, {_id: {x: -1}}));
-assert.docEq(coll.findOne({}), {_id: {x: -1}});
+assert.docEq({_id: {x: -1}}, coll.findOne({}));
assert.commandFailedWithCode(upsertedResult(coll, {"_id.x": -1}, {$set: {_id: {x: -1}}}),
ErrorCodes.ImmutableField);
assert.commandWorked(upsertedResult(coll, {"_id.x": -1}, {$set: {"_id.x": -1}}));
-assert.docEq(coll.findOne({}), {_id: {x: -1}});
+assert.docEq({_id: {x: -1}}, coll.findOne({}));
st.stop();
})();
diff --git a/src/mongo/shell/assert.js b/src/mongo/shell/assert.js
index c42476a39f4..b893ec32616 100644
--- a/src/mongo/shell/assert.js
+++ b/src/mongo/shell/assert.js
@@ -182,32 +182,34 @@ assert = (function() {
};
function _isDocEq(a, b) {
- if (a == b) {
- return true;
- }
-
- var aSorted = sortDoc(a);
- var bSorted = sortDoc(b);
-
- if ((aSorted != null && bSorted != null) && friendlyEqual(aSorted, bSorted)) {
- return true;
- }
-
- return false;
+ return a === b || bsonUnorderedFieldsCompare(a, b) === 0;
}
- assert.docEq = function(a, b, msg) {
+ /**
+ * Throws if 'actualDoc' object is not equal to 'expectedDoc' object. The order of fields
+ * (properties) within objects is disregarded.
+ * Throws if object representation in BSON exceeds 16793600 bytes.
+ */
+ assert.docEq = function(expectedDoc, actualDoc, msg) {
_validateAssertionMessage(msg);
- if (_isDocEq(a, b)) {
+ if (_isDocEq(expectedDoc, actualDoc)) {
return;
}
- doassert(_buildAssertionMessage(
- msg, "[" + tojson(a) + "] != [" + tojson(b) + "] are not equal"));
+ doassert(_buildAssertionMessage(msg,
+ "expected document " + tojson(expectedDoc) +
+ " and actual document " + tojson(actualDoc) +
+ " are not equal"));
};
+ /**
+ * Throws if the elements of the two given sets are not the same. Use only for primitive
+ * (non-object) set element types.
+ */
assert.setEq = function(expectedSet, actualSet, msg) {
+ _validateAssertionMessage(msg);
+
const failAssertion = function() {
doassert(_buildAssertionMessage(msg,
"expected set " + tojson(expectedSet) +