summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--buildscripts/idl/check_stable_api_commands_have_idl_definitions.py6
-rwxr-xr-xbuildscripts/resmokelib/powercycle/powercycle.py6
-rw-r--r--buildscripts/resmokelib/testing/fixtures/interface.py12
-rw-r--r--buildscripts/resmokelib/testing/fixtures/replicaset.py23
-rw-r--r--buildscripts/resmokelib/testing/fixtures/shard_merge.py2
-rw-r--r--buildscripts/resmokelib/testing/fixtures/shardedcluster.py42
-rw-r--r--buildscripts/resmokelib/testing/fixtures/standalone.py2
-rw-r--r--buildscripts/resmokelib/testing/fixtures/talk_directly_to_shardsvrs.py3
-rw-r--r--buildscripts/resmokelib/testing/fixtures/tenant_migration.py2
-rw-r--r--buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py4
-rw-r--r--buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py8
-rw-r--r--buildscripts/resmokelib/testing/hooks/shard_merge.py22
-rw-r--r--buildscripts/resmokelib/testing/hooks/simulate_crash.py2
-rw-r--r--buildscripts/resmokelib/testing/hooks/stepdown.py10
-rw-r--r--buildscripts/resmokelib/testing/hooks/tenant_migration.py18
-rw-r--r--etc/pip/components/core.req2
-rw-r--r--jstests/free_mon/free_mon_register.js4
-rw-r--r--jstests/free_mon/free_mon_rs_register.js16
19 files changed, 95 insertions, 92 deletions
diff --git a/.gitignore b/.gitignore
index 0d8b983943e..bafd2b62441 100644
--- a/.gitignore
+++ b/.gitignore
@@ -250,3 +250,6 @@ test_success.ninja
# test report generation result
report.json
+
+# suggested resmoke binary location
+dist-test/
diff --git a/buildscripts/idl/check_stable_api_commands_have_idl_definitions.py b/buildscripts/idl/check_stable_api_commands_have_idl_definitions.py
index dc539b01a52..9367e1aa3c2 100644
--- a/buildscripts/idl/check_stable_api_commands_have_idl_definitions.py
+++ b/buildscripts/idl/check_stable_api_commands_have_idl_definitions.py
@@ -35,7 +35,7 @@ import logging
import os
import sys
from tempfile import TemporaryDirectory
-from typing import Dict, List, Set
+from typing import Any, Dict, List, Mapping, Set
from pymongo import MongoClient
@@ -112,8 +112,8 @@ def list_commands_for_api(api_version: str, mongod_or_mongos: str, install_dir:
fixture.await_ready()
try:
- client = MongoClient(fixture.get_driver_connection_url())
- reply = client.admin.command('listCommands')
+ client = MongoClient(fixture.get_driver_connection_url()) # type: MongoClient
+ reply = client.admin.command('listCommands') # type: Mapping[str, Any]
commands = {
name
for name, info in reply['commands'].items() if api_version in info['apiVersions']
diff --git a/buildscripts/resmokelib/powercycle/powercycle.py b/buildscripts/resmokelib/powercycle/powercycle.py
index 38f7b9413f7..f0f5091864f 100755
--- a/buildscripts/resmokelib/powercycle/powercycle.py
+++ b/buildscripts/resmokelib/powercycle/powercycle.py
@@ -1214,19 +1214,19 @@ def mongo_seed_docs(mongo, db_name, coll_name, num_docs):
random.choice(string.ascii_letters) for _ in range(random.randint(1, max_length)))
LOGGER.info("Seeding DB '%s' collection '%s' with %d documents, %d already exist", db_name,
- coll_name, num_docs, mongo[db_name][coll_name].count())
+ coll_name, num_docs, mongo[db_name][coll_name].estimated_document_count())
random.seed()
base_num = 100000
bulk_num = min(num_docs, 10000)
bulk_loops = num_docs // bulk_num
for _ in range(bulk_loops):
- num_coll_docs = mongo[db_name][coll_name].count()
+ num_coll_docs = mongo[db_name][coll_name].estimated_document_count()
if num_coll_docs >= num_docs:
break
mongo[db_name][coll_name].insert_many(
[{"x": random.randint(0, base_num), "doc": rand_string(1024)} for _ in range(bulk_num)])
LOGGER.info("After seeding there are %d documents in the collection",
- mongo[db_name][coll_name].count())
+ mongo[db_name][coll_name].estimated_document_count())
return 0
diff --git a/buildscripts/resmokelib/testing/fixtures/interface.py b/buildscripts/resmokelib/testing/fixtures/interface.py
index 57d22f87ec6..1f9bf738fca 100644
--- a/buildscripts/resmokelib/testing/fixtures/interface.py
+++ b/buildscripts/resmokelib/testing/fixtures/interface.py
@@ -403,13 +403,15 @@ def create_fixture_table(fixture):
return "Fixture status:\n" + table
-def authenticate(client, auth_options=None):
+def build_client(node, auth_options=None, read_preference=pymongo.ReadPreference.PRIMARY):
"""Authenticate client for the 'authenticationDatabase' and return the client."""
if auth_options is not None:
- auth_db = client[auth_options["authenticationDatabase"]]
- auth_db.authenticate(auth_options["username"], password=auth_options["password"],
- mechanism=auth_options["authenticationMechanism"])
- return client
+ return node.mongo_client(
+ username=auth_options["username"], password=auth_options["password"],
+ authSource=auth_options["authenticationDatabase"],
+ authMechanism=auth_options["authenticationMechanism"], read_preference=read_preference)
+ else:
+ return node.mongo_client(read_preference=read_preference)
# Represents a row in a node info table.
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py
index 773f092598a..b8d2d4c4439 100644
--- a/buildscripts/resmokelib/testing/fixtures/replicaset.py
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -153,8 +153,7 @@ class ReplicaSetFixture(interface.ReplFixture):
members.append(member_config)
repl_config = {"_id": self.replset_name, "protocolVersion": 1}
- client = self.nodes[0].mongo_client()
- interface.authenticate(client, self.auth_options)
+ client = interface.build_client(self.nodes[0], self.auth_options)
if client.local.system.replset.count_documents(filter={}):
# Skip initializing the replset if there is an existing configuration.
@@ -283,8 +282,7 @@ class ReplicaSetFixture(interface.ReplFixture):
def await_last_op_committed(self, timeout_secs=None):
"""Wait for the last majority committed op to be visible."""
- primary_client = self.get_primary().mongo_client()
- interface.authenticate(primary_client, self.auth_options)
+ primary_client = interface.build_client(self.get_primary(), self.auth_options)
primary_optime = get_last_optime(primary_client, self.fixturelib)
up_to_date_nodes = set()
@@ -363,8 +361,7 @@ class ReplicaSetFixture(interface.ReplFixture):
# Since this method is called at startup we expect the first node to be primary even when
# self.all_nodes_electable is True.
- primary_client = self.nodes[0].mongo_client()
- interface.authenticate(primary_client, self.auth_options)
+ primary_client = interface.build_client(self.nodes[0], self.auth_options)
# All nodes must be in primary/secondary state prior to this point. Perform a majority
# write to ensure there is a committed operation on the set. The commit point will
@@ -377,8 +374,8 @@ class ReplicaSetFixture(interface.ReplFixture):
for node in self.nodes:
self.logger.info("Waiting for node on port %d to have a stable recovery timestamp.",
node.port)
- client = node.mongo_client(read_preference=pymongo.ReadPreference.SECONDARY)
- interface.authenticate(client, self.auth_options)
+ client = interface.build_client(node, self.auth_options,
+ read_preference=pymongo.ReadPreference.SECONDARY)
client_admin = client["admin"]
@@ -435,8 +432,7 @@ class ReplicaSetFixture(interface.ReplFixture):
self.logger.info("Waiting to remove all 'newlyAdded' fields")
primary = self.get_primary()
- client = primary.mongo_client()
- interface.authenticate(client, self.auth_options)
+ client = interface.build_client(primary, self.auth_options)
while self._should_await_newly_added_removals_longer(client):
time.sleep(0.1) # Wait a little bit before trying again.
self.logger.info("All 'newlyAdded' fields removed")
@@ -531,8 +527,7 @@ class ReplicaSetFixture(interface.ReplFixture):
try:
if node.port not in clients:
- clients[node.port] = interface.authenticate(node.mongo_client(),
- self.auth_options)
+ clients[node.port] = interface.build_client(node, self.auth_options)
if fn(clients[node.port], node):
return node
@@ -613,7 +608,7 @@ class ReplicaSetFixture(interface.ReplFixture):
return self.nodes[chosen_index]
- primary_client = interface.authenticate(primary.mongo_client(), auth_options)
+ primary_client = interface.build_client(primary, auth_options)
retry_time_secs = self.AWAIT_REPL_TIMEOUT_MINS * 60
retry_start_time = time.time()
@@ -654,7 +649,7 @@ class ReplicaSetFixture(interface.ReplFixture):
self.logger.info(
"Attempting to step up the chosen secondary on port %d of replica set '%s'.",
node.port, self.replset_name)
- client = interface.authenticate(node.mongo_client(), auth_options)
+ client = interface.build_client(node, auth_options)
client.admin.command("replSetStepUp")
return True
except pymongo.errors.OperationFailure:
diff --git a/buildscripts/resmokelib/testing/fixtures/shard_merge.py b/buildscripts/resmokelib/testing/fixtures/shard_merge.py
index 28fea75cb0d..60044f5007e 100644
--- a/buildscripts/resmokelib/testing/fixtures/shard_merge.py
+++ b/buildscripts/resmokelib/testing/fixtures/shard_merge.py
@@ -159,7 +159,7 @@ class ShardMergeFixture(interface.MultiClusterFixture): # pylint: disable=too-m
def _create_shard_merge_donor_and_recipient_roles(self, rs):
"""Create a role for shard merge donor and recipient."""
primary = rs.get_primary()
- primary_client = interface.authenticate(primary.mongo_client(), self.auth_options)
+ primary_client = interface.build_client(primary, self.auth_options)
try:
primary_client.admin.command({
diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
index 50df2abb1bf..dddb01ca8d2 100644
--- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
+++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -113,8 +113,7 @@ class ShardedClusterFixture(interface.Fixture):
def get_shard_ids(self):
"""Get the list of shard ids in the cluster."""
- client = self.mongo_client()
- interface.authenticate(client, self.auth_options)
+ client = interface.build_client(self, self.auth_options)
res = client.admin.command("listShards")
return [shard_info["_id"] for shard_info in res["shards"]]
@@ -137,8 +136,7 @@ class ShardedClusterFixture(interface.Fixture):
# Wait for the mongos.
mongos.await_ready()
- client = self.mongo_client()
- interface.authenticate(client, self.auth_options)
+ client = interface.build_client(self, self.auth_options)
# Turn off the balancer if it is not meant to be enabled.
if not self.enable_balancer:
@@ -182,36 +180,34 @@ class ShardedClusterFixture(interface.Fixture):
) + ShardedClusterFixture.AWAIT_SHARDING_INITIALIZATION_TIMEOUT_SECS
timeout_occurred = lambda: deadline - time.time() <= 0.0
- mongod_clients = [(mongod.mongo_client(), mongod.port) for shard in self.shards
- for mongod in shard.nodes]
+ for shard in self.shards:
+ for mongod in shard.nodes:
- for client, port in mongod_clients:
- interface.authenticate(client, self.auth_options)
+ client = interface.build_client(mongod, self.auth_options)
+ port = mongod.port
- while True:
- # The choice of namespace (local.fooCollection) does not affect the output.
- get_shard_version_result = client.admin.command(
- "getShardVersion", "local.fooCollection", check=False)
- if get_shard_version_result["ok"]:
- break
+ while True:
+ # The choice of namespace (local.fooCollection) does not affect the output.
+ get_shard_version_result = client.admin.command(
+ "getShardVersion", "local.fooCollection", check=False)
+ if get_shard_version_result["ok"]:
+ break
- if timeout_occurred():
- raise self.fixturelib.ServerFailure(
- "mongod on port: {} failed waiting for getShardVersion success after {} seconds"
- .format(port, interface.Fixture.AWAIT_READY_TIMEOUT_SECS))
- time.sleep(0.1)
+ if timeout_occurred():
+ raise self.fixturelib.ServerFailure(
+ "mongod on port: {} failed waiting for getShardVersion success after {} seconds"
+ .format(port, interface.Fixture.AWAIT_READY_TIMEOUT_SECS))
+ time.sleep(0.1)
def stop_balancer(self, timeout_ms=60000):
"""Stop the balancer."""
- client = self.mongo_client()
- interface.authenticate(client, self.auth_options)
+ client = interface.build_client(self, self.auth_options)
client.admin.command({"balancerStop": 1}, maxTimeMS=timeout_ms)
self.logger.info("Stopped the balancer")
def start_balancer(self, timeout_ms=60000):
"""Start the balancer."""
- client = self.mongo_client()
- interface.authenticate(client, self.auth_options)
+ client = interface.build_client(self, self.auth_options)
client.admin.command({"balancerStart": 1}, maxTimeMS=timeout_ms)
self.logger.info("Started the balancer")
diff --git a/buildscripts/resmokelib/testing/fixtures/standalone.py b/buildscripts/resmokelib/testing/fixtures/standalone.py
index 71d439ca6af..31eece3668d 100644
--- a/buildscripts/resmokelib/testing/fixtures/standalone.py
+++ b/buildscripts/resmokelib/testing/fixtures/standalone.py
@@ -181,7 +181,7 @@ class MongoDFixture(interface.Fixture):
def get_driver_connection_url(self):
"""Return the driver connection URL."""
- return "mongodb://" + self.get_internal_connection_string()
+ return "mongodb://" + self.get_internal_connection_string() + "/?directConnection=true"
# The below parameters define the default 'logComponentVerbosity' object passed to mongod processes
diff --git a/buildscripts/resmokelib/testing/fixtures/talk_directly_to_shardsvrs.py b/buildscripts/resmokelib/testing/fixtures/talk_directly_to_shardsvrs.py
index 87ee370d5af..387b20f4214 100644
--- a/buildscripts/resmokelib/testing/fixtures/talk_directly_to_shardsvrs.py
+++ b/buildscripts/resmokelib/testing/fixtures/talk_directly_to_shardsvrs.py
@@ -138,8 +138,7 @@ class TalkDirectlyToShardsvrsFixture(interface.MultiClusterFixture):
self.logger.info("Adding %s as a shard...", connection_string)
config_primary = self.configsvr.get_primary()
- config_primary_client = interface.authenticate(config_primary.mongo_client(),
- self.auth_options)
+ config_primary_client = interface.build_client(config_primary, self.auth_options)
try:
config_primary_client.admin.command(
diff --git a/buildscripts/resmokelib/testing/fixtures/tenant_migration.py b/buildscripts/resmokelib/testing/fixtures/tenant_migration.py
index 1518bdb792e..3a45424a66a 100644
--- a/buildscripts/resmokelib/testing/fixtures/tenant_migration.py
+++ b/buildscripts/resmokelib/testing/fixtures/tenant_migration.py
@@ -158,7 +158,7 @@ class TenantMigrationFixture(interface.MultiClusterFixture):
def _create_tenant_migration_donor_and_recipient_roles(self, rs):
"""Create a role for tenant migration donor and recipient."""
primary = rs.get_primary()
- primary_client = interface.authenticate(primary.mongo_client(), self.auth_options)
+ primary_client = interface.build_client(primary, self.auth_options)
try:
primary_client.admin.command({
diff --git a/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py b/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py
index 2ad144e5ec8..e391f8e5664 100644
--- a/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py
+++ b/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py
@@ -59,7 +59,7 @@ class CleanupConcurrencyWorkloadsTestCase(interface.DynamicTestCase):
"""Execute drop databases hook."""
same_db_name = None
client = self._hook.fixture.mongo_client()
- db_names = client.database_names()
+ db_names = client.list_database_names()
exclude_dbs = copy.copy(self._hook.exclude_dbs)
if self._hook.same_db_name:
@@ -84,7 +84,7 @@ class CleanupConcurrencyWorkloadsTestCase(interface.DynamicTestCase):
if self._hook.same_collection_name and same_db_name:
self.logger.info("Dropping all collections in db %s except for %s", same_db_name,
self._hook.same_collection_name)
- colls = client[same_db_name].collection_names()
+ colls = client[same_db_name].list_collection_names()
for coll in [coll for coll in colls if coll != self._hook.same_collection_name]:
self.logger.info("Dropping db %s collection %s", same_db_name, coll)
try:
diff --git a/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py b/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py
index 09050c94929..91b0cdcd705 100644
--- a/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py
+++ b/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py
@@ -488,14 +488,14 @@ class _InitialSyncThread(threading.Thread):
client = mongos_fixture.mongo_client()
except pymongo.errors.AutoReconnect:
pass
- for db in client.database_names():
+ for db in client.list_database_names():
self.logger.info("Waiting for mongos %s to retarget db: %s", mongos_conn_str, db)
start_time = time.time()
while True:
try:
- coll_names = client[db].collection_names()
+ coll_names = client[db].list_collection_names()
break
- except pymongo.errors.NotMasterError:
+ except pymongo.errors.NotPrimaryError:
pass
retarget_time = time.time() - start_time
if retarget_time >= 60:
@@ -508,7 +508,7 @@ class _InitialSyncThread(threading.Thread):
try:
client[db].command({"collStats": coll})
break
- except pymongo.errors.NotMasterError:
+ except pymongo.errors.NotPrimaryError:
pass
retarget_time = time.time() - start_time
if retarget_time >= 60:
diff --git a/buildscripts/resmokelib/testing/hooks/shard_merge.py b/buildscripts/resmokelib/testing/hooks/shard_merge.py
index 6518040e567..9b6216b2cdc 100644
--- a/buildscripts/resmokelib/testing/hooks/shard_merge.py
+++ b/buildscripts/resmokelib/testing/hooks/shard_merge.py
@@ -379,7 +379,7 @@ class _ShardMergeThread(threading.Thread): # pylint: disable=too-many-instance-
self.logger, donor_rs_index, recipient_rs_index)
def _create_client(self, node):
- return fixture_interface.authenticate(node.mongo_client(), self._auth_options)
+ return fixture_interface.build_client(node, self._auth_options)
def _check_tenant_migration_dbhash(self, migration_opts):
# Set the donor connection string, recipient connection string, and migration uuid string
@@ -461,7 +461,7 @@ class _ShardMergeThread(threading.Thread): # pylint: disable=too-many-instance-
bson.SON([("configureFailPoint",
"abortTenantMigrationBeforeLeavingBlockingState"), ("mode", "off")]))
return
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
self.logger.info(
"Retrying connection to donor primary in order to disable abort failpoint for shard merge."
)
@@ -514,7 +514,7 @@ class _ShardMergeThread(threading.Thread): # pylint: disable=too-many-instance-
res = donor_primary_client.admin.command(
cmd_obj,
bson.codec_options.CodecOptions(uuid_representation=bson.binary.UUID_SUBTYPE))
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
donor_primary = migration_opts.get_donor_primary()
self.logger.info(
"Retrying shard merge '%s' against donor primary on port %d of" +
@@ -562,7 +562,7 @@ class _ShardMergeThread(threading.Thread): # pylint: disable=too-many-instance-
cmd_obj,
bson.codec_options.CodecOptions(uuid_representation=bson.binary.UUID_SUBTYPE))
return
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
donor_primary = migration_opts.get_donor_primary()
self.logger.info(
"Retrying forgetting shard merge '%s' against donor primary on port %d of " +
@@ -606,8 +606,8 @@ class _ShardMergeThread(threading.Thread): # pylint: disable=too-many-instance-
})
if res["n"] == 0:
break
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
- # Ignore NotMasterErrors because it's possible to fail with
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
+ # Ignore NotPrimaryErrors because it's possible to fail with
# InterruptedDueToReplStateChange if the donor primary steps down or shuts
# down during the garbage collection check.
self.logger.info(
@@ -634,8 +634,8 @@ class _ShardMergeThread(threading.Thread): # pylint: disable=too-many-instance-
})
if res["n"] == 0:
break
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
- # Ignore NotMasterErrors because it's possible to fail with
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
+ # Ignore NotPrimaryErrors because it's possible to fail with
# InterruptedDueToReplStateChange if the recipient primary steps down or
# shuts down during the garbage collection check.
self.logger.info(
@@ -669,7 +669,7 @@ class _ShardMergeThread(threading.Thread): # pylint: disable=too-many-instance-
{"_id": bson.Binary(migration_opts.migration_id.bytes, 4)})
if doc is not None:
return
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
donor_primary = migration_opts.get_donor_primary()
self.logger.info(
"Retrying waiting for donor primary on port '%d' of replica set '%s' for " +
@@ -704,7 +704,7 @@ class _ShardMergeThread(threading.Thread): # pylint: disable=too-many-instance-
return
# We retry on all write concern errors because we assume the only reason waiting for
# write concern should fail is because of a failover.
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError,
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError,
pymongo.errors.WriteConcernError) as err:
primary = get_primary(rs, self.logger)
self.logger.info(
@@ -731,7 +731,7 @@ class _ShardMergeThread(threading.Thread): # pylint: disable=too-many-instance-
server_status = client.admin.command({"serverStatus": 1})
pending_drop_idents = server_status["storageEngine"]["dropPendingIdents"]
break
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError,
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError,
pymongo.errors.WriteConcernError) as err:
self.logger.info(
"Retrying getting dropPendingIdents against primary on port %d after error %s.",
diff --git a/buildscripts/resmokelib/testing/hooks/simulate_crash.py b/buildscripts/resmokelib/testing/hooks/simulate_crash.py
index a0b82a1e98c..c03db61bba4 100644
--- a/buildscripts/resmokelib/testing/hooks/simulate_crash.py
+++ b/buildscripts/resmokelib/testing/hooks/simulate_crash.py
@@ -21,7 +21,7 @@ from buildscripts.resmokelib.testing.hooks import bghook
def validate(mdb, logger, acceptable_err_codes):
"""Return true if all collections are valid."""
- for db in mdb.database_names():
+ for db in mdb.list_database_names():
for coll in mdb.get_database(db).list_collection_names():
res = mdb.get_database(db).command({"validate": coll}, check=False)
diff --git a/buildscripts/resmokelib/testing/hooks/stepdown.py b/buildscripts/resmokelib/testing/hooks/stepdown.py
index 04595e82a4f..e88b3048498 100644
--- a/buildscripts/resmokelib/testing/hooks/stepdown.py
+++ b/buildscripts/resmokelib/testing/hooks/stepdown.py
@@ -303,7 +303,7 @@ class _StepdownThread(threading.Thread):
fixture.get_primary()
def _create_client(self, node):
- return fixture_interface.authenticate(node.mongo_client(), self._auth_options)
+ return fixture_interface.build_client(node, self._auth_options)
def _step_down_all(self):
for rs_fixture in self._rs_fixtures:
@@ -414,14 +414,14 @@ class _StepdownThread(threading.Thread):
client = self._create_client(mongos_fixture)
except pymongo.errors.AutoReconnect:
pass
- for db in client.database_names():
+ for db in client.list_database_names():
self.logger.info("Waiting for mongos %s to retarget db: %s", mongos_conn_str, db)
start_time = time.time()
while True:
try:
- coll_names = client[db].collection_names()
+ coll_names = client[db].list_collection_names()
break
- except pymongo.errors.NotMasterError:
+ except pymongo.errors.NotPrimaryError:
pass
retarget_time = time.time() - start_time
if retarget_time >= 60:
@@ -434,7 +434,7 @@ class _StepdownThread(threading.Thread):
try:
client[db].command({"collStats": coll})
break
- except pymongo.errors.NotMasterError:
+ except pymongo.errors.NotPrimaryError:
pass
retarget_time = time.time() - start_time
if retarget_time >= 60:
diff --git a/buildscripts/resmokelib/testing/hooks/tenant_migration.py b/buildscripts/resmokelib/testing/hooks/tenant_migration.py
index 0ec5d69897b..19d527418dc 100644
--- a/buildscripts/resmokelib/testing/hooks/tenant_migration.py
+++ b/buildscripts/resmokelib/testing/hooks/tenant_migration.py
@@ -372,7 +372,7 @@ class _TenantMigrationThread(threading.Thread):
self.logger)
def _create_client(self, node):
- return fixture_interface.authenticate(node.mongo_client(), self._auth_options)
+ return fixture_interface.build_client(node, self._auth_options)
def _check_tenant_migration_dbhash(self, migration_opts):
# Set the donor connection string, recipient connection string, and migration uuid string
@@ -474,7 +474,7 @@ class _TenantMigrationThread(threading.Thread):
res = donor_primary_client.admin.command(
cmd_obj,
bson.codec_options.CodecOptions(uuid_representation=bson.binary.UUID_SUBTYPE))
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
donor_primary = migration_opts.get_donor_primary()
self.logger.info(
"Retrying tenant migration '%s' against donor primary on port %d of replica " +
@@ -522,7 +522,7 @@ class _TenantMigrationThread(threading.Thread):
cmd_obj,
bson.codec_options.CodecOptions(uuid_representation=bson.binary.UUID_SUBTYPE))
return
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
donor_primary = migration_opts.get_donor_primary()
self.logger.info(
"Retrying forgetting tenant migration '%s' against donor primary on port %d of "
@@ -566,8 +566,8 @@ class _TenantMigrationThread(threading.Thread):
})
if res["n"] == 0:
break
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
- # Ignore NotMasterErrors because it's possible to fail with
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
+ # Ignore NotPrimaryErrors because it's possible to fail with
# InterruptedDueToReplStateChange if the donor primary steps down or shuts
# down during the garbage collection check.
self.logger.info(
@@ -594,8 +594,8 @@ class _TenantMigrationThread(threading.Thread):
})
if res["n"] == 0:
break
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
- # Ignore NotMasterErrors because it's possible to fail with
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
+ # Ignore NotPrimaryErrors because it's possible to fail with
# InterruptedDueToReplStateChange if the recipient primary steps down or
# shuts down during the garbage collection check.
self.logger.info(
@@ -629,7 +629,7 @@ class _TenantMigrationThread(threading.Thread):
{"_id": bson.Binary(migration_opts.migration_id.bytes, 4)})
if doc is not None:
return
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError):
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError):
donor_primary = migration_opts.get_donor_primary()
self.logger.info(
"Retrying waiting for donor primary on port '%d' of replica set '%s' for " +
@@ -664,7 +664,7 @@ class _TenantMigrationThread(threading.Thread):
return
# We retry on all write concern errors because we assume the only reason waiting for
# write concern should fail is because of a failover.
- except (pymongo.errors.AutoReconnect, pymongo.errors.NotMasterError,
+ except (pymongo.errors.AutoReconnect, pymongo.errors.NotPrimaryError,
pymongo.errors.WriteConcernError) as err:
primary = get_primary(rs, self.logger)
self.logger.info(
diff --git a/etc/pip/components/core.req b/etc/pip/components/core.req
index 0c1692e10fd..8222e5fcd8f 100644
--- a/etc/pip/components/core.req
+++ b/etc/pip/components/core.req
@@ -1,6 +1,6 @@
# Core (we need these for most buildscripts)
psutil <= 5.8.0
-pymongo >= 3.9, < 4.0
+pymongo == 4.3.3
PyYAML >= 3.0.0, <= 6.0.0
types-PyYAML ~= 6.0.5
requests >= 2.0.0, <= 2.26.0
diff --git a/jstests/free_mon/free_mon_register.js b/jstests/free_mon/free_mon_register.js
index 19bb2e59244..60d342ee8d6 100644
--- a/jstests/free_mon/free_mon_register.js
+++ b/jstests/free_mon/free_mon_register.js
@@ -32,7 +32,7 @@ const last_register = mock_web.query("last_register");
print(tojson(last_register));
assert.eq(last_register.version, 2);
-assert.gt(new Date().setTime(last_register.localTime["$date"]), localTime);
+assert.gt(new Date(last_register.localTime["$date"]), localTime);
assert.eq(last_register.payload.buildInfo.bits, 64);
assert.eq(last_register.payload.buildInfo.ok, 1);
assert.eq(last_register.payload.storageEngine.readOnly, false);
@@ -45,7 +45,7 @@ const last_metrics = mock_web.query("last_metrics");
print(tojson(last_metrics));
assert.eq(last_metrics.version, 2);
-assert.gt(new Date().setTime(last_metrics.localTime["$date"]), localTime);
+assert.gt(new Date(last_metrics.localTime["$date"]), localTime);
MongoRunner.stopMongod(conn);
diff --git a/jstests/free_mon/free_mon_rs_register.js b/jstests/free_mon/free_mon_rs_register.js
index 1c312d5fb4c..6af4f58d269 100644
--- a/jstests/free_mon/free_mon_rs_register.js
+++ b/jstests/free_mon/free_mon_rs_register.js
@@ -44,15 +44,23 @@ assert.gte(last_register.payload.replSetGetConfig.config.version, 2);
function isUUID(val) {
// Mock webserver gives us back unpacked BinData/UUID in the form:
- // { '$uuid': '0123456789abcdef0123456789abcdef' }.
+ //"$binary" : {"base64" : "2gzkSY3bTlu/k3bXfpPUKg==", "subType" : "04"}
if ((typeof val) !== 'object') {
return false;
}
- const uuid = val['$uuid'];
- if ((typeof uuid) !== 'string') {
+ const binary = val['$binary'];
+ const subType = binary['subType'];
+ const base64 = binary['base64'];
+
+ // This number is the indentifier for a UUID.
+ // https://www.mongodb.com/docs/manual/reference/bson-types/#binary-data
+ if (subType !== '04') {
return false;
}
- return uuid.match(/^[0-9a-fA-F]{32}$/) !== null;
+
+ // Validate base64
+ return base64.match('^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$') !==
+ null;
}
assert.eq(isUUID(last_register.payload.uuid['local.oplog.rs']), true);