summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml1
-rw-r--r--jstests/core/views/views_all_commands.js1
-rw-r--r--jstests/sharding/track_unsharded_collections_check_shard_version.js483
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp15
-rw-r--r--src/mongo/db/commands/mr.cpp3
-rw-r--r--src/mongo/db/db_raii.cpp17
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp12
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp3
-rw-r--r--src/mongo/db/pipeline/process_interface_shardsvr.cpp5
-rw-r--r--src/mongo/db/query/get_executor.cpp3
-rw-r--r--src/mongo/db/query/stage_builder.cpp3
-rw-r--r--src/mongo/db/s/collection_metadata_filtering_test.cpp6
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp35
-rw-r--r--src/mongo/db/s/collection_sharding_state.h8
-rw-r--r--src/mongo/db/s/get_shard_version_command.cpp3
-rw-r--r--src/mongo/db/s/op_observer_sharding_impl.cpp8
-rw-r--r--src/mongo/db/s/operation_sharding_state.cpp8
-rw-r--r--src/mongo/db/s/operation_sharding_state.h5
18 files changed, 566 insertions, 53 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
index bdd60ea3e53..828d6d3dc4c 100644
--- a/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_last_stable_mongos_and_mixed_shards.yml
@@ -149,6 +149,7 @@ selector:
- jstests/sharding/database_and_shard_versioning_all_commands.js
- jstests/sharding/shard1.js
- jstests/sharding/track_unsharded_collections_create_collection.js
+ - jstests/sharding/track_unsharded_collections_check_shard_version.js
- jstests/sharding/banned_txn_databases_sharded.js
# Enable if SERVER-41813 is backported or 4.4 becomes last-stable
- jstests/sharding/invalid_system_views_sharded_collection.js
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index ed6f137d6d3..6fba75008b3 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -292,7 +292,6 @@ let viewsCommandTests = {
getShardVersion: {
command: {getShardVersion: "test.view"},
isAdminCommand: true,
- expectFailure: true,
skipSharded: true, // mongos is tested in views/views_sharded.js
},
getnonce: {skip: isUnrelated},
diff --git a/jstests/sharding/track_unsharded_collections_check_shard_version.js b/jstests/sharding/track_unsharded_collections_check_shard_version.js
new file mode 100644
index 00000000000..d6c34fb5311
--- /dev/null
+++ b/jstests/sharding/track_unsharded_collections_check_shard_version.js
@@ -0,0 +1,483 @@
+/**
+ * Tests the FCV 4.4 checkShardVersion protocol for each command, that is, when the shard does not
+ * have a shardVersion cached for a namespace:
+ * - if the namespace is a collection, the shard returns StaleShardVersion
+ * - if the namespace is a view, the shard executes the request
+ * -- the request will either return a view definition or CommandNotSupportedOnView
+ * - if the namespace does not exist, the shard executes the request
+ * -- the request will either return empty results or NamespaceNotFound
+ */
+
+// TODO (PM-1051): The UUID consistency hook should not check shards' caches once shards have an
+// "UNKNOWN" state for collection filtering metadata.
+TestData.skipCheckingCatalogCacheConsistencyWithShardingCatalog = true;
+
+(function() {
+'use strict';
+
+load('jstests/sharding/libs/last_stable_mongos_commands.js');
+load('jstests/sharding/libs/track_unsharded_collections_helpers.js');
+
+const dbName = "test";
+let collName, ns;
+
+function validateTestCase(testCase) {
+ // Check that only expected test case fields are present.
+ for (let key of Object.keys(testCase)) {
+ assert(
+ [
+ "skip",
+ "whenNamespaceDoesNotExistFailsWith",
+ "implicitlyCreatesCollection",
+ "whenNamespaceIsViewFailsWith",
+ "doesNotCheckShardVersion",
+ "doesNotSendShardVersionIfTracked",
+ "command"
+ ].includes(key),
+ "Found unexpected field " + key + " in test case " + tojson(testCase));
+ }
+
+ assert(testCase.skip || testCase.command,
+ "Must specify exactly one of 'skip' or 'command' for test case " + tojson(testCase));
+
+ if (testCase.skip) {
+ for (let key of Object.keys(testCase)) {
+ assert(
+ key === "skip" || key === "conditional",
+ "if a test case specifies 'skip', it must not specify any other fields besides 'conditional': " +
+ key + ": " + tojson(testCase));
+ }
+ return;
+ }
+}
+
+function expectShardsCachedShardVersionToBe(shardConn, ns, expectedShardVersion) {
+ const shardVersion = assert.commandWorked(st.shard0.adminCommand({getShardVersion: ns})).global;
+ assert.eq(expectedShardVersion, shardVersion);
+}
+
+let testCases = {
+ _hashBSONElement: {skip: "executes locally on mongos (not sent to any remote node)"},
+ _isSelf: {skip: "executes locally on mongos (not sent to any remote node)"},
+ _mergeAuthzCollections: {skip: "always targets the config server"},
+ abortTransaction: {skip: "unversioned and uses special targetting rules"},
+ addShard: {skip: "not on a user database"},
+ addShardToZone: {skip: "not on a user database"},
+ aggregate: {
+ command: collName => {
+ return {aggregate: collName, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}};
+ },
+ },
+ authenticate: {skip: "does not forward command to primary shard"},
+ availableQueryOptions: {skip: "executes locally on mongos (not sent to any remote node)"},
+ balancerStart: {skip: "not on a user database"},
+ balancerStatus: {skip: "not on a user database"},
+ balancerStop: {skip: "not on a user database"},
+ buildInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
+ clearLog: {skip: "executes locally on mongos (not sent to any remote node)"},
+ collMod: {
+ whenNamespaceDoesNotExistFailsWith: ErrorCodes.NamespaceNotFound,
+ doesNotSendShardVersionIfTracked: true,
+ command: collName => {
+ return {collMod: collName};
+ },
+ },
+ collStats: {
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {collStats: collName};
+ },
+ },
+ commitTransaction: {skip: "unversioned and uses special targetting rules"},
+ compact: {skip: "not allowed through mongos"},
+ configureFailPoint: {skip: "executes locally on mongos (not sent to any remote node)"},
+ connPoolStats: {skip: "executes locally on mongos (not sent to any remote node)"},
+ connPoolSync: {skip: "executes locally on mongos (not sent to any remote node)"},
+ connectionStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
+ convertToCapped: {skip: "will be removed in 4.4"},
+ count: {
+ command: collName => {
+ return {count: collName, query: {x: 1}};
+ },
+ },
+ create: {skip: "always targets the config server"},
+ createIndexes: {
+ implicitlyCreatesCollection: true,
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ doesNotCheckShardVersion: true,
+ doesNotSendShardVersionIfTracked: true,
+ command: collName => {
+ return {createIndexes: collName, indexes: [{key: {a: 1}, name: "index"}]};
+ },
+ },
+ createRole: {skip: "always targets the config server"},
+ createUser: {skip: "always targets the config server"},
+ currentOp: {skip: "not on a user database"},
+ dataSize: {
+ skip: "TODO (SERVER-42638): fails due to special checks on mongos if chunk manager exists",
+ },
+ dbStats: {skip: "always broadcast to all shards"},
+ delete: {
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {delete: collName, deletes: [{q: {_id: 1}, limit: 1}]};
+ },
+ },
+ distinct: {
+ command: collName => {
+ return {distinct: collName, key: "x"};
+ },
+ },
+ drop: {skip: "always targets the config server"},
+ dropAllRolesFromDatabase: {skip: "always targets the config server"},
+ dropAllUsersFromDatabase: {skip: "always targets the config server"},
+ dropConnections: {skip: "not on a user database"},
+ dropDatabase: {skip: "drops the database from the cluster, changing the UUID"},
+ dropIndexes: {
+ whenNamespaceDoesNotExistFailsWith: ErrorCodes.NamespaceNotFound,
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ doesNotSendShardVersionIfTracked: true,
+ command: collName => {
+ return {dropIndexes: collName, index: "*"};
+ },
+ },
+ dropRole: {skip: "always targets the config server"},
+ dropUser: {skip: "always targets the config server"},
+ echo: {skip: "does not forward command to primary shard"},
+ enableSharding: {skip: "does not forward command to primary shard"},
+ endSessions: {skip: "goes through the cluster write path"},
+ explain: {skip: "TODO SERVER-31226"},
+ features: {skip: "executes locally on mongos (not sent to any remote node)"},
+ filemd5: {
+ doesNotCheckShardVersion: true,
+ command: collName => {
+ return {filemd5: ObjectId(), root: collName};
+ },
+ },
+ find: {
+ command: collName => {
+ return {find: collName, filter: {x: 1}};
+ },
+ },
+ findAndModify: {
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {findAndModify: collName, query: {_id: 0}, remove: true};
+ },
+ },
+ flushRouterConfig: {skip: "executes locally on mongos (not sent to any remote node)"},
+ fsync: {skip: "broadcast to all shards"},
+ getCmdLineOpts: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getDiagnosticData: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getLastError: {skip: "does not forward command to primary shard"},
+ getLog: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getMore: {skip: "requires a previously established cursor"},
+ getParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getShardMap: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getShardVersion: {skip: "executes locally on mongos (not sent to any remote node)"},
+ getnonce: {skip: "not on a user database"},
+ grantPrivilegesToRole: {skip: "always targets the config server"},
+ grantRolesToRole: {skip: "always targets the config server"},
+ grantRolesToUser: {skip: "always targets the config server"},
+ hostInfo: {skip: "executes locally on mongos (not sent to any remote node)"},
+ insert: {
+ implicitlyCreatesCollection: true,
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {insert: collName, documents: [{_id: 1}]};
+ },
+ },
+ invalidateUserCache: {skip: "executes locally on mongos (not sent to any remote node)"},
+ isdbgrid: {skip: "executes locally on mongos (not sent to any remote node)"},
+ isMaster: {skip: "executes locally on mongos (not sent to any remote node)"},
+ killCursors: {skip: "requires a previously established cursor"},
+ killAllSessions: {skip: "always broadcast to all hosts in the cluster"},
+ killAllSessionsByPattern: {skip: "always broadcast to all hosts in the cluster"},
+ killOp: {skip: "does not forward command to primary shard"},
+ killSessions: {skip: "always broadcast to all hosts in the cluster"},
+ listCollections: {skip: "not a command on a namespace"},
+ listCommands: {skip: "executes locally on mongos (not sent to any remote node)"},
+ listDatabases: {skip: "does not forward command to primary shard"},
+ listIndexes: {
+ whenNamespaceDoesNotExistFailsWith: ErrorCodes.NamespaceNotFound,
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ doesNotCheckShardVersion: true,
+ command: collName => {
+ return {listIndexes: collName};
+ },
+ },
+ listShards: {skip: "does not forward command to primary shard"},
+ logApplicationMessage: {skip: "not on a user database", conditional: true},
+ logRotate: {skip: "executes locally on mongos (not sent to any remote node)"},
+ logout: {skip: "not on a user database"},
+ mapReduce: {
+ // Uses connection versioning.
+ whenNamespaceDoesNotExistFailsWith: ErrorCodes.NamespaceNotFound,
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {
+ mapReduce: collName,
+ map: function() {
+ emit(this.x, 1);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: {inline: 1}
+ };
+ },
+ },
+ mergeChunks: {skip: "does not forward command to primary shard"},
+ moveChunk: {skip: "does not forward command to primary shard"},
+ movePrimary: {skip: "reads primary shard from sharding catalog with readConcern: local"},
+ multicast: {skip: "does not forward command to primary shard"},
+ netstat: {skip: "executes locally on mongos (not sent to any remote node)"},
+ ping: {skip: "executes locally on mongos (not sent to any remote node)"},
+ planCacheClear: {
+ // Uses connection versioning.
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {planCacheClear: collName};
+ },
+ },
+ planCacheClearFilters: {
+ // Uses connection versioning.
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {planCacheClearFilters: collName};
+ },
+ },
+ planCacheListFilters: {
+ // Uses connection versioning.
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {planCacheListFilters: collName};
+ },
+ },
+ planCacheListPlans: {
+ // Uses connection versioning.
+ whenNamespaceDoesNotExistFailsWith: ErrorCodes.BadValue,
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {planCacheListPlans: collName, query: {_id: "A"}};
+ },
+ },
+ planCacheListQueryShapes: {
+ // Uses connection versioning.
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {planCacheListQueryShapes: collName};
+ },
+ },
+ planCacheSetFilter: {
+ // Uses connection versioning.
+ whenNamespaceDoesNotExistFailsWith: ErrorCodes.BadValue,
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {planCacheSetFilter: collName, query: {_id: "A"}, indexes: [{_id: 1}]};
+ },
+ },
+ profile: {skip: "not supported in mongos"},
+ reapLogicalSessionCacheNow: {skip: "is a no-op on mongos"},
+ refineCollectionShardKey: {skip: "not on a user database"},
+ refreshLogicalSessionCacheNow: {skip: "goes through the cluster write path"},
+ refreshSessions: {skip: "executes locally on mongos (not sent to any remote node)"},
+ refreshSessionsInternal:
+ {skip: "executes locally on mongos (not sent to any remote node)", conditional: true},
+ removeShard: {skip: "not on a user database"},
+ removeShardFromZone: {skip: "not on a user database"},
+ renameCollection: {skip: "always targets the config server"},
+ replSetGetStatus: {skip: "not supported in mongos"},
+ resetError: {skip: "not on a user database"},
+ restartCatalog: {skip: "not on a user database"},
+ revokePrivilegesFromRole: {skip: "always targets the config server"},
+ revokeRolesFromRole: {skip: "always targets the config server"},
+ revokeRolesFromUser: {skip: "always targets the config server"},
+ rolesInfo: {skip: "always targets the config server"},
+ saslContinue: {skip: "not on a user database"},
+ saslStart: {skip: "not on a user database"},
+ serverStatus: {skip: "executes locally on mongos (not sent to any remote node)"},
+ setIndexCommitQuorum: {skip: "TODO what is this"},
+ setFeatureCompatibilityVersion: {skip: "not on a user database"},
+ setFreeMonitoring:
+ {skip: "explicitly fails for mongos, primary mongod only", conditional: true},
+ setParameter: {skip: "executes locally on mongos (not sent to any remote node)"},
+ shardCollection: {skip: "does not forward command to primary shard"},
+ shardConnPoolStats: {skip: "does not forward command to primary shard"},
+ shutdown: {skip: "does not forward command to primary shard"},
+ split: {skip: "does not forward command to primary shard"},
+ splitVector: {skip: "does not forward command to primary shard"},
+ startRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
+ startSession: {skip: "executes locally on mongos (not sent to any remote node)"},
+ stopRecordingTraffic: {skip: "executes locally on mongos (not sent to any remote node)"},
+ update: {
+ implicitlyCreatesCollection: true,
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ command: collName => {
+ return {
+ update: collName,
+ updates: [{q: {_id: 2}, u: {_id: 2}, upsert: true, multi: false}]
+ };
+ },
+ },
+ updateRole: {skip: "always targets the config server"},
+ updateUser: {skip: "always targets the config server"},
+ updateZoneKeyRange: {skip: "not on a user database"},
+ usersInfo: {skip: "always targets the config server"},
+ validate: {
+ whenNamespaceDoesNotExistFailsWith: ErrorCodes.NamespaceNotFound,
+ whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
+ doesNotCheckShardVersion: true,
+ command: collName => {
+ return {validate: collName};
+ },
+ },
+ whatsmyuri: {skip: "executes locally on mongos (not sent to any remote node)"},
+};
+
+commandsRemovedFromMongosIn44.forEach(function(cmd) {
+ testCases[cmd] = {skip: "must define test coverage for 4.0 backwards compatibility"};
+});
+
+const st = new ShardingTest({
+ shards: 1,
+ config: 1,
+ other: {
+ configOptions: {
+ setParameter:
+ {"failpoint.writeUnshardedCollectionsToShardingCatalog": "{mode: 'alwaysOn'}"},
+ },
+ shardOptions: {
+ setParameter: {"failpoint.useFCV44CheckShardVersionProtocol": "{mode: 'alwaysOn'}"},
+ },
+ },
+});
+
+assert.commandWorked(st.s.getDB(dbName).runCommand({create: "underlying_collection_for_views"}));
+
+let res = st.s.adminCommand({listCommands: 1});
+assert.commandWorked(res);
+
+for (let command of Object.keys(res.commands)) {
+ let testCase = testCases[command];
+ assert(testCase !== undefined, "coverage failure: must define a test case for " + command);
+ if (!testCases[command].validated) {
+ validateTestCase(testCase);
+ testCases[command].validated = true;
+ }
+
+ if (testCase.skip) {
+ print("skipping " + command + ": " + testCase.skip);
+ continue;
+ }
+
+ //
+ // Test command when namespace does not exist.
+ //
+
+ [collName, ns] = getNewNs(dbName);
+ jsTest.log(`Testing ${command} when namespace does not exist; ns: ${ns}`);
+
+ expectShardsCachedShardVersionToBe(st.shard0, ns, "UNKNOWN");
+
+ if (testCase.whenNamespaceDoesNotExistFailsWith) {
+ assert.commandFailedWithCode(st.s.getDB(dbName).runCommand(testCase.command(collName)),
+ testCase.whenNamespaceDoesNotExistFailsWith);
+ } else {
+ assert.commandWorked(st.s.getDB(dbName).runCommand(testCase.command(collName)));
+ }
+
+ if (testCase.implicitlyCreatesCollection &&
+ !(testCase.doesNotCheckShardVersion || testCase.doesNotSendShardVersionIfTracked)) {
+ expectShardsCachedShardVersionToBe(st.shard0, ns, Timestamp(1, 0));
+ } else {
+ expectShardsCachedShardVersionToBe(st.shard0, ns, "UNKNOWN");
+ }
+
+ //
+ // Test command when namespace is a view.
+ //
+
+ [collName, ns] = getNewNs(dbName);
+ jsTest.log(`Testing ${command} when namespace is a view; ns: ${ns}`);
+
+ assert.commandWorked(st.s.getDB(dbName).runCommand({
+ create: collName,
+ viewOn: "underlying_collection_for_views",
+ pipeline: [{$project: {_id: 1}}]
+ }));
+ expectShardsCachedShardVersionToBe(st.shard0, ns, "UNKNOWN");
+
+ if (testCase.whenNamespaceIsViewFailsWith) {
+ assert.commandFailedWithCode(st.s.getDB(dbName).runCommand(testCase.command(collName)),
+ testCase.whenNamespaceIsViewFailsWith);
+ } else {
+ assert.commandWorked(st.s.getDB(dbName).runCommand(testCase.command(collName)));
+ }
+
+ expectShardsCachedShardVersionToBe(st.shard0, ns, "UNKNOWN");
+
+ //
+ // Test command when namespace is an unsharded collection and mongos sends UNSHARDED version.
+ //
+
+ [collName, ns] = getNewNs(dbName);
+ jsTest.log(`Testing ${command} when namespace is an unsharded collection and mongos sends
+ UNSHARDED version; ns: ${ns}`);
+
+ assert.commandWorked(st.s.getDB(dbName).runCommand({create: collName}));
+ expectShardsCachedShardVersionToBe(st.shard0, ns, "UNKNOWN");
+
+ assert.commandWorked(st.s.getDB(dbName).runCommand(testCase.command(collName)));
+
+ if (testCase.doesNotCheckShardVersion) {
+ expectShardsCachedShardVersionToBe(st.shard0, ns, "UNKNOWN");
+ } else {
+ expectShardsCachedShardVersionToBe(st.shard0, ns, Timestamp(1, 0));
+ }
+
+ //
+ // Test command when namespace is an unsharded collection and mongos sends a real version.
+ //
+
+ [collName, ns] = getNewNs(dbName);
+ jsTest.log(`Testing ${command} when namespace is an unsharded collection and mongos sends a real
+ version; ns: ${ns}`);
+
+ assert.commandWorked(st.s.getDB(dbName).runCommand({create: collName}));
+ expectShardsCachedShardVersionToBe(st.shard0, ns, "UNKNOWN");
+
+ // Flushing the router's config ensures the router will load the db and all collections, so the
+ // router will send a real version on the first request.
+ assert.commandWorked(st.s.adminCommand({flushRouterConfig: dbName}));
+ assert.commandWorked(st.s.getDB(dbName).runCommand(testCase.command(collName)));
+
+ if (testCase.doesNotCheckShardVersion || testCase.doesNotSendShardVersionIfTracked) {
+ expectShardsCachedShardVersionToBe(st.shard0, ns, "UNKNOWN");
+ } else {
+ expectShardsCachedShardVersionToBe(st.shard0, ns, Timestamp(1, 0));
+ }
+}
+
+// After iterating through all the existing commands, ensure there were no additional test cases
+// that did not correspond to any mongos command.
+for (let key of Object.keys(testCases)) {
+ // We have defined real test cases for commands added in 4.2/4.4 so that the test cases are
+ // exercised in the regular suites, but because these test cases can't run in the last stable
+ // suite, we skip processing them here to avoid failing the below assertion. We have defined
+ // "skip" test cases for commands removed in 4.2 so the test case is defined in last stable
+ // suites (in which these commands still exist on the mongos), but these test cases won't be run
+ // in regular suites, so we skip processing them below as well.
+ if (commandsAddedToMongosIn44.includes(key) || commandsRemovedFromMongosIn44.includes(key)) {
+ continue;
+ }
+ if (commandsAddedToMongosIn44.includes(key)) {
+ continue;
+ }
+ assert(testCases[key].validated || testCases[key].conditional,
+ "you defined a test case for a command '" + key +
+ "' that does not exist on mongos: " + tojson(testCases[key]));
+}
+
+st.stop();
+})();
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index b12ccc69717..6c8fde275fb 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -175,7 +175,7 @@ void appendCommandResponse(const PlanExecutor* exec,
}
}
-void assertCanWrite(OperationContext* opCtx, const NamespaceString& nsString) {
+void assertCanWrite(OperationContext* opCtx, const NamespaceString& nsString, bool isCollection) {
uassert(ErrorCodes::NotMaster,
str::stream() << "Not primary while running findAndModify command on collection "
<< nsString.ns(),
@@ -183,7 +183,7 @@ void assertCanWrite(OperationContext* opCtx, const NamespaceString& nsString) {
// Check for shard version match
auto css = CollectionShardingState::get(opCtx, nsString);
- css->checkShardVersionOrThrow(opCtx);
+ css->checkShardVersionOrThrow(opCtx, isCollection);
}
void recordStatsForTopCommand(OperationContext* opCtx) {
@@ -276,7 +276,7 @@ public:
autoColl.getDb());
auto css = CollectionShardingState::get(opCtx, nsString);
- css->checkShardVersionOrThrow(opCtx);
+ css->checkShardVersionOrThrow(opCtx, autoColl.getCollection());
Collection* const collection = autoColl.getCollection();
const auto exec =
@@ -301,7 +301,7 @@ public:
autoColl.getDb());
auto css = CollectionShardingState::get(opCtx, nsString);
- css->checkShardVersionOrThrow(opCtx);
+ css->checkShardVersionOrThrow(opCtx, autoColl.getCollection());
Collection* const collection = autoColl.getCollection();
const auto exec =
@@ -389,7 +389,7 @@ public:
CurOp::get(opCtx)->enter_inlock(nsString.ns().c_str(), dbProfilingLevel);
}
- assertCanWrite(opCtx, nsString);
+ assertCanWrite(opCtx, nsString, autoColl.getCollection());
Collection* const collection = autoColl.getCollection();
checkIfTransactionOnCappedColl(collection, inTransaction);
@@ -455,7 +455,7 @@ public:
CurOp::get(opCtx)->enter_inlock(nsString.ns().c_str(), dbProfilingLevel);
}
- assertCanWrite(opCtx, nsString);
+ assertCanWrite(opCtx, nsString, autoColl->getCollection());
Collection* collection = autoColl->getCollection();
@@ -472,7 +472,8 @@ public:
autoColl.reset();
autoDb.emplace(opCtx, dbName, MODE_X);
- assertCanWrite(opCtx, nsString);
+ assertCanWrite(
+ opCtx, nsString, autoDb->getDb()->getCollection(opCtx, nsString));
collection = autoDb->getDb()->getCollection(opCtx, nsString);
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 805ea654158..e67f97700b9 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1413,7 +1413,8 @@ bool runMapReduce(OperationContext* opCtx,
const auto metadata = [&] {
AutoGetCollectionForReadCommand autoColl(opCtx, config.nss);
- return CollectionShardingState::get(opCtx, config.nss)->getOrphansFilter(opCtx);
+ return CollectionShardingState::get(opCtx, config.nss)
+ ->getOrphansFilter(opCtx, autoColl.getCollection());
}();
bool shouldHaveData = false;
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index c851fcfcc9c..854b02fbdf6 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -316,13 +316,13 @@ AutoGetCollectionForReadCommand::AutoGetCollectionForReadCommand(
_autoCollForRead.getDb() ? _autoCollForRead.getDb()->getProfilingLevel()
: kDoNotChangeProfilingLevel,
deadline) {
- if (!_autoCollForRead.getView()) {
- // Perform the check early so the query planner would be able to extract the correct
- // shard key. Also make sure that version is compatible if query planner decides to
- // use an empty plan.
- auto css = CollectionShardingState::get(opCtx, _autoCollForRead.getNss());
- css->checkShardVersionOrThrow(opCtx);
- }
+
+ // Perform the check early so the query planner would be able to extract the correct
+ // shard key. Also make sure that version is compatible if query planner decides to
+ // use an empty plan.
+ invariant(!_autoCollForRead.getView() || !_autoCollForRead.getCollection());
+ auto css = CollectionShardingState::get(opCtx, _autoCollForRead.getNss());
+ css->checkShardVersionOrThrow(opCtx, _autoCollForRead.getCollection());
}
OldClientContext::OldClientContext(OperationContext* opCtx, const std::string& ns, bool doVersion)
@@ -344,7 +344,8 @@ OldClientContext::OldClientContext(OperationContext* opCtx, const std::string& n
break;
default:
CollectionShardingState::get(_opCtx, NamespaceString(ns))
- ->checkShardVersionOrThrow(_opCtx);
+ ->checkShardVersionOrThrow(_opCtx,
+ _db->getCollection(opCtx, NamespaceString(ns)));
break;
}
}
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 6dd5bca5b4c..f5b9791cf00 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -193,12 +193,12 @@ private:
repl::OpTime _opTimeAtLastOpStart;
};
-void assertCanWrite_inlock(OperationContext* opCtx, const NamespaceString& ns) {
+void assertCanWrite_inlock(OperationContext* opCtx, const NamespaceString& ns, bool isCollection) {
uassert(ErrorCodes::PrimarySteppedDown,
str::stream() << "Not primary while writing to " << ns.ns(),
repl::ReplicationCoordinator::get(opCtx->getServiceContext())
->canAcceptWritesFor(opCtx, ns));
- CollectionShardingState::get(opCtx, ns)->checkShardVersionOrThrow(opCtx);
+ CollectionShardingState::get(opCtx, ns)->checkShardVersionOrThrow(opCtx, isCollection);
}
void makeCollection(OperationContext* opCtx, const NamespaceString& ns) {
@@ -213,7 +213,7 @@ void makeCollection(OperationContext* opCtx, const NamespaceString& ns) {
AutoGetOrCreateDb db(opCtx, ns.db(), MODE_IX);
Lock::CollectionLock collLock(opCtx, ns, MODE_X);
- assertCanWrite_inlock(opCtx, ns);
+ assertCanWrite_inlock(opCtx, ns, db.getDb()->getCollection(opCtx, ns));
if (!db.getDb()->getCollection(opCtx, ns)) { // someone else may have beat us to it.
uassertStatusOK(userAllowedCreateNS(ns.db(), ns.coll()));
WriteUnitOfWork wuow(opCtx);
@@ -390,7 +390,7 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
}
curOp.raiseDbProfileLevel(collection->getDb()->getProfilingLevel());
- assertCanWrite_inlock(opCtx, wholeOp.getNamespace());
+ assertCanWrite_inlock(opCtx, wholeOp.getNamespace(), collection->getCollection());
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangWithLockDuringBatchInsert, opCtx, "hangWithLockDuringBatchInsert");
@@ -635,7 +635,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx,
curOp.raiseDbProfileLevel(collection->getDb()->getProfilingLevel());
}
- assertCanWrite_inlock(opCtx, ns);
+ assertCanWrite_inlock(opCtx, ns, collection->getCollection());
auto exec = uassertStatusOK(
getExecutorUpdate(opCtx, &curOp.debug(), collection->getCollection(), &parsedUpdate));
@@ -871,7 +871,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx,
curOp.raiseDbProfileLevel(collection.getDb()->getProfilingLevel());
}
- assertCanWrite_inlock(opCtx, ns);
+ assertCanWrite_inlock(opCtx, ns, collection.getCollection());
CurOpFailpointHelpers::waitWhileFailPointEnabled(
&hangWithLockDuringBatchRemove, opCtx, "hangWithLockDuringBatchRemove");
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 3efb9b5813f..bd7a5cf7739 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -127,7 +127,8 @@ StatusWith<unique_ptr<PlanExecutor, PlanExecutor::Deleter>> createRandomCursorEx
// If the incoming operation is sharded, use the CSS to infer the filtering metadata for the
// collection, otherwise treat it as unsharded
- auto shardMetadata = CollectionShardingState::get(opCtx, coll->ns())->getOrphansFilter(opCtx);
+ auto shardMetadata =
+ CollectionShardingState::get(opCtx, coll->ns())->getOrphansFilter(opCtx, coll);
// Because 'numRecords' includes orphan documents, our initial decision to optimize the $sample
// cursor may have been mistaken. For sharded collections, build a TRIAL plan that will switch
diff --git a/src/mongo/db/pipeline/process_interface_shardsvr.cpp b/src/mongo/db/pipeline/process_interface_shardsvr.cpp
index 74508c4a10e..b9b69d4e9d9 100644
--- a/src/mongo/db/pipeline/process_interface_shardsvr.cpp
+++ b/src/mongo/db/pipeline/process_interface_shardsvr.cpp
@@ -206,8 +206,9 @@ unique_ptr<Pipeline, PipelineDeleter> MongoInterfaceShardServer::attachCursorSou
std::unique_ptr<ShardFilterer> MongoInterfaceShardServer::getShardFilterer(
const boost::intrusive_ptr<ExpressionContext>& expCtx) const {
- auto shardingMetadata =
- CollectionShardingState::get(expCtx->opCtx, expCtx->ns)->getOrphansFilter(expCtx->opCtx);
+ const bool aggNsIsCollection = expCtx->uuid != boost::none;
+ auto shardingMetadata = CollectionShardingState::get(expCtx->opCtx, expCtx->ns)
+ ->getOrphansFilter(expCtx->opCtx, aggNsIsCollection);
return std::make_unique<ShardFiltererImpl>(std::move(shardingMetadata));
}
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 7e8c0d1e9b4..804baeb123d 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -382,7 +382,8 @@ StatusWith<PrepareExecutionResult> prepareExecution(OperationContext* opCtx,
if (plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) {
root = std::make_unique<ShardFilterStage>(
opCtx,
- CollectionShardingState::get(opCtx, canonicalQuery->nss())->getOrphansFilter(opCtx),
+ CollectionShardingState::get(opCtx, canonicalQuery->nss())
+ ->getOrphansFilter(opCtx, collection),
ws,
root.release());
}
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 382e3652354..508690ae278 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -312,7 +312,8 @@ PlanStage* buildStages(OperationContext* opCtx,
}
auto css = CollectionShardingState::get(opCtx, collection->ns());
- return new ShardFilterStage(opCtx, css->getOrphansFilter(opCtx), ws, childStage);
+ return new ShardFilterStage(
+ opCtx, css->getOrphansFilter(opCtx, collection), ws, childStage);
}
case STAGE_DISTINCT_SCAN: {
const DistinctNode* dn = static_cast<const DistinctNode*>(root);
diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp
index 34ff588020f..da000652d40 100644
--- a/src/mongo/db/s/collection_metadata_filtering_test.cpp
+++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp
@@ -139,7 +139,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInTheFuture) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
auto* const css = CollectionShardingState::get(operationContext(), kNss);
- testFn(css->getOrphansFilter(operationContext()));
+ testFn(css->getOrphansFilter(operationContext(), true /* isCollection */));
}
{
@@ -169,7 +169,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsInThePast) {
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
auto* const css = CollectionShardingState::get(operationContext(), kNss);
- testFn(css->getOrphansFilter(operationContext()));
+ testFn(css->getOrphansFilter(operationContext(), true /* isCollection */));
}
{
@@ -207,7 +207,7 @@ TEST_F(CollectionMetadataFilteringTest, FilterDocumentsTooFarInThePastThrowsStal
AutoGetCollection autoColl(operationContext(), kNss, MODE_IS);
auto* const css = CollectionShardingState::get(operationContext(), kNss);
- testFn(css->getOrphansFilter(operationContext()));
+ testFn(css->getOrphansFilter(operationContext(), true /* isCollection */));
}
{
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index feb519090e3..02b5406f625 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -37,10 +37,14 @@
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/s/stale_exception.h"
+#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/string_map.h"
namespace mongo {
+
+MONGO_FAIL_POINT_DEFINE(useFCV44CheckShardVersionProtocol);
+
namespace {
class CollectionShardingStateMap {
@@ -159,9 +163,10 @@ void CollectionShardingState::report(OperationContext* opCtx, BSONObjBuilder* bu
collectionsMap->report(opCtx, builder);
}
-ScopedCollectionMetadata CollectionShardingState::getOrphansFilter(OperationContext* opCtx) {
+ScopedCollectionMetadata CollectionShardingState::getOrphansFilter(OperationContext* opCtx,
+ bool isCollection) {
const auto atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime();
- auto optMetadata = _getMetadataWithVersionCheckAt(opCtx, atClusterTime);
+ auto optMetadata = _getMetadataWithVersionCheckAt(opCtx, atClusterTime, isCollection);
if (!optMetadata)
return {kUnshardedCollection};
@@ -194,12 +199,14 @@ boost::optional<ChunkVersion> CollectionShardingState::getCurrentShardVersionIfK
return metadata->getCollVersion();
}
-void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx) {
- (void)_getMetadataWithVersionCheckAt(opCtx, boost::none);
+void CollectionShardingState::checkShardVersionOrThrow(OperationContext* opCtx, bool isCollection) {
+ (void)_getMetadataWithVersionCheckAt(opCtx, boost::none, isCollection);
}
boost::optional<ScopedCollectionMetadata> CollectionShardingState::_getMetadataWithVersionCheckAt(
- OperationContext* opCtx, const boost::optional<mongo::LogicalTime>& atClusterTime) {
+ OperationContext* opCtx,
+ const boost::optional<mongo::LogicalTime>& atClusterTime,
+ bool isCollection) {
const auto optReceivedShardVersion = getOperationReceivedVersion(opCtx, _nss);
if (!optReceivedShardVersion)
@@ -218,8 +225,22 @@ boost::optional<ScopedCollectionMetadata> CollectionShardingState::_getMetadataW
auto metadata = _getMetadata(atClusterTime);
auto wantedShardVersion = ChunkVersion::UNSHARDED();
- if (metadata && (*metadata)->isSharded()) {
- wantedShardVersion = (*metadata)->getShardVersion();
+
+ if (MONGO_FAIL_POINT(useFCV44CheckShardVersionProtocol)) {
+ LOG(0) << "Received shardVersion: " << receivedShardVersion << " for " << _nss.ns();
+ if (isCollection) {
+ LOG(0) << "Namespace " << _nss.ns() << " is collection, "
+ << (metadata ? "have shardVersion cached" : "don't know shardVersion");
+ uassert(StaleConfigInfo(_nss, receivedShardVersion, wantedShardVersion),
+ "don't know shardVersion",
+ metadata);
+ wantedShardVersion = (*metadata)->getShardVersion();
+ }
+ LOG(0) << "Wanted shardVersion: " << wantedShardVersion << " for " << _nss.ns();
+ } else {
+ if (metadata && (*metadata)->isSharded()) {
+ wantedShardVersion = (*metadata)->getShardVersion();
+ }
}
auto criticalSectionSignal = [&] {
diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h
index 906c366b8fb..cc246dacdab 100644
--- a/src/mongo/db/s/collection_sharding_state.h
+++ b/src/mongo/db/s/collection_sharding_state.h
@@ -97,7 +97,7 @@ public:
*
* The returned object is safe to access even after the collection lock has been dropped.
*/
- ScopedCollectionMetadata getOrphansFilter(OperationContext* opCtx);
+ ScopedCollectionMetadata getOrphansFilter(OperationContext* opCtx, bool isCollection);
/**
* See the comments for 'getOrphansFilter' above for more information on this method.
@@ -120,7 +120,7 @@ public:
* version of the collection and if not, throws StaleConfigException populated with the received
* and wanted versions.
*/
- void checkShardVersionOrThrow(OperationContext* opCtx);
+ void checkShardVersionOrThrow(OperationContext* opCtx, bool isCollection);
/**
* Methods to control the collection's critical section. Methods listed below must be called
@@ -161,7 +161,9 @@ private:
* atClusterTime if specified.
*/
boost::optional<ScopedCollectionMetadata> _getMetadataWithVersionCheckAt(
- OperationContext* opCtx, const boost::optional<mongo::LogicalTime>& atClusterTime);
+ OperationContext* opCtx,
+ const boost::optional<mongo::LogicalTime>& atClusterTime,
+ bool isCollection);
// Object-wide ResourceMutex to protect changes to the CollectionShardingRuntime or objects
// held within. Use only the CollectionShardingRuntimeLock to lock this mutex.
diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp
index 87ea9653eb1..4dcc90be124 100644
--- a/src/mongo/db/s/get_shard_version_command.cpp
+++ b/src/mongo/db/s/get_shard_version_command.cpp
@@ -106,7 +106,8 @@ public:
result.appendTimestamp("mine", 0);
}
- AutoGetCollection autoColl(opCtx, nss, MODE_IS);
+ AutoGetCollection autoColl(
+ opCtx, nss, MODE_IS, AutoGetCollection::ViewMode::kViewsPermitted);
auto* const css = CollectionShardingRuntime::get(opCtx, nss);
const auto optMetadata = css->getCurrentMetadataIfKnown();
diff --git a/src/mongo/db/s/op_observer_sharding_impl.cpp b/src/mongo/db/s/op_observer_sharding_impl.cpp
index 606d8ae7dfc..a76bbd1bcb2 100644
--- a/src/mongo/db/s/op_observer_sharding_impl.cpp
+++ b/src/mongo/db/s/op_observer_sharding_impl.cpp
@@ -55,7 +55,7 @@ void assertIntersectingChunkHasNotMoved(OperationContext* opCtx,
if (!repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime())
return;
- const auto metadata = csr->getOrphansFilter(opCtx);
+ const auto metadata = csr->getOrphansFilter(opCtx, true /* isCollection */);
if (!metadata->isSharded())
return;
@@ -105,7 +105,7 @@ void OpObserverShardingImpl::shardObserveInsertOp(OperationContext* opCtx,
return;
}
- csr->checkShardVersionOrThrow(opCtx);
+ csr->checkShardVersionOrThrow(opCtx, true /* isCollection */);
if (inMultiDocumentTransaction) {
assertIntersectingChunkHasNotMoved(opCtx, csr, insertedDoc);
@@ -127,7 +127,7 @@ void OpObserverShardingImpl::shardObserveUpdateOp(OperationContext* opCtx,
const repl::OpTime& prePostImageOpTime,
const bool inMultiDocumentTransaction) {
auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
- csr->checkShardVersionOrThrow(opCtx);
+ csr->checkShardVersionOrThrow(opCtx, true /* isCollection */);
if (inMultiDocumentTransaction) {
assertIntersectingChunkHasNotMoved(opCtx, csr, postImageDoc);
@@ -148,7 +148,7 @@ void OpObserverShardingImpl::shardObserveDeleteOp(OperationContext* opCtx,
const repl::OpTime& preImageOpTime,
const bool inMultiDocumentTransaction) {
auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
- csr->checkShardVersionOrThrow(opCtx);
+ csr->checkShardVersionOrThrow(opCtx, true /* isCollection */);
if (inMultiDocumentTransaction) {
assertIntersectingChunkHasNotMoved(opCtx, csr, documentKey);
diff --git a/src/mongo/db/s/operation_sharding_state.cpp b/src/mongo/db/s/operation_sharding_state.cpp
index 3284b5e825c..0a726714fd6 100644
--- a/src/mongo/db/s/operation_sharding_state.cpp
+++ b/src/mongo/db/s/operation_sharding_state.cpp
@@ -110,7 +110,8 @@ bool OperationShardingState::hasShardVersion() const {
return _globalUnshardedShardVersion || !_shardVersions.empty();
}
-ChunkVersion OperationShardingState::getShardVersion(const NamespaceString& nss) const {
+boost::optional<ChunkVersion> OperationShardingState::getShardVersion(
+ const NamespaceString& nss) const {
if (_globalUnshardedShardVersion) {
return ChunkVersion::UNSHARDED();
}
@@ -119,9 +120,8 @@ ChunkVersion OperationShardingState::getShardVersion(const NamespaceString& nss)
if (it != _shardVersions.end()) {
return it->second;
}
- // If the client did not send a shardVersion for the requested namespace, assume the client
- // expected the namespace to be unsharded.
- return ChunkVersion::UNSHARDED();
+
+ return boost::none;
}
bool OperationShardingState::hasDbVersion() const {
diff --git a/src/mongo/db/s/operation_sharding_state.h b/src/mongo/db/s/operation_sharding_state.h
index c24b451e274..d99227e0254 100644
--- a/src/mongo/db/s/operation_sharding_state.h
+++ b/src/mongo/db/s/operation_sharding_state.h
@@ -108,10 +108,9 @@ public:
* operation. Documents in chunks which did not belong on this shard at this shard version
* will be filtered out.
*
- * Returns ChunkVersion::UNSHARDED() if this operation has no shard version information
- * for the requested namespace.
+ * Returns ChunkVersion::UNSHARDED() if setGlobalUnshardedShardVersion has been called.
*/
- ChunkVersion getShardVersion(const NamespaceString& nss) const;
+ boost::optional<ChunkVersion> getShardVersion(const NamespaceString& nss) const;
/**
* Returns true if the client sent a databaseVersion for any namespace.