summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/auth/curop_auth_info.js30
-rw-r--r--jstests/core/txns/commands_in_txns_read_concern.js39
-rw-r--r--jstests/core/txns/create_indexes.js31
-rw-r--r--jstests/core/txns/create_indexes_parallel.js52
-rw-r--r--jstests/sharding/create_existing_indexes_prepared_transactions.js103
-rw-r--r--jstests/sharding/query/comment_field_sharded.js8
-rw-r--r--jstests/sharding/stale_mongos_and_restarted_shards_agree_on_shard_version.js23
-rw-r--r--src/mongo/db/s/op_observer_sharding_impl.cpp14
8 files changed, 142 insertions, 158 deletions
diff --git a/jstests/auth/curop_auth_info.js b/jstests/auth/curop_auth_info.js
index 2bb329b1eee..5e855b799e5 100644
--- a/jstests/auth/curop_auth_info.js
+++ b/jstests/auth/curop_auth_info.js
@@ -10,22 +10,20 @@ const runTest = function(conn, failPointConn) {
assert.commandWorked(db.runCommand({createUser: "testuser", pwd: "pwd", roles: []}));
db.grantRolesToUser("testuser", [{role: "readWrite", db: "test"}]);
- const queryFn = function() {
- assert.eq(db.getSiblingDB("admin").auth("testuser", "pwd"), 1);
- let testDB = db.getSiblingDB("test");
- testDB.test.insert({});
- assert.eq(testDB.test.find({}).comment("curop_auth_info.js query").itcount(), 1);
- };
+ assert.commandWorked(db.getSiblingDB("test").test.insert({}));
jsTestLog("blocking finds and starting parallel shell to create op");
assert.commandWorked(failPointConn.getDB("admin").runCommand(
{configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
- let finderWait = startParallelShell(queryFn, conn.port);
- let myOp;
+ let finderWait = startParallelShell(function() {
+ assert.eq(db.getSiblingDB("admin").auth("testuser", "pwd"), 1);
+ let testDB = db.getSiblingDB("test");
+ assert.eq(testDB.test.find({}).comment("curop_auth_info.js query").itcount(), 1);
+ }, conn.port);
+ let myOp;
assert.soon(function() {
- const curOpResults = db.runCommand({currentOp: 1});
- assert.commandWorked(curOpResults);
+ const curOpResults = assert.commandWorked(db.runCommand({currentOp: 1}));
print(tojson(curOpResults));
const myOps = curOpResults["inprog"].filter((op) => {
return (op["command"]["comment"] == "curop_auth_info.js query");
@@ -63,15 +61,7 @@ const m = MongoRunner.runMongod();
runTest(m, m);
MongoRunner.stopMongod(m);
-const st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- keyFile: 'jstests/libs/key1',
- other: {
- shardAsReplicaSet: false,
- }
-});
-runTest(st.s0, st.d0);
+const st = new ShardingTest({shards: 1, mongos: 1, config: 1, keyFile: 'jstests/libs/key1'});
+runTest(st.s0, st.shard0);
st.stop();
})();
diff --git a/jstests/core/txns/commands_in_txns_read_concern.js b/jstests/core/txns/commands_in_txns_read_concern.js
index d873d91cebb..59e3d1d1c10 100644
--- a/jstests/core/txns/commands_in_txns_read_concern.js
+++ b/jstests/core/txns/commands_in_txns_read_concern.js
@@ -11,6 +11,7 @@
(function() {
"use strict";
+load("jstests/libs/auto_retry_transaction_in_sharding.js");
load("jstests/libs/create_collection_txn_helpers.js");
load("jstests/libs/create_index_txn_helpers.js");
@@ -26,7 +27,9 @@ otherColl.drop({writeConcern: {w: "majority"}});
jsTest.log("Testing createCollection in a transaction with local readConcern");
session.startTransaction({readConcern: {level: "local"}, writeConcern: {w: "majority"}});
-createCollAndCRUDInTxn(sessionDB, collName, "insert", true /*explicitCreate*/);
+retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createCollAndCRUDInTxn(sessionDB, collName, "insert", true /*explicitCreate*/);
+}, {readConcern: {level: "local"}, writeConcern: {w: "majority"}});
assert.commandWorked(session.commitTransaction_forTesting());
assert.eq(sessionColl.find({}).itcount(), 1);
@@ -34,7 +37,10 @@ sessionColl.drop({writeConcern: {w: "majority"}});
jsTest.log("Testing createIndexes in a transaction with local readConcern");
session.startTransaction({readConcern: {level: "local"}, writeConcern: {w: "majority"}});
-createIndexAndCRUDInTxn(sessionDB, collName, false /*explicitCollCreate*/, false /*multikeyIndex*/);
+retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(
+ sessionDB, collName, false /*explicitCollCreate*/, false /*multikeyIndex*/);
+}, {readConcern: {level: "local"}, writeConcern: {w: "majority"}});
assert.commandWorked(session.commitTransaction_forTesting());
assert.eq(sessionColl.find({}).itcount(), 1);
assert.eq(sessionColl.getIndexes().length, 2);
@@ -47,8 +53,10 @@ assert.eq(otherColl.find({}).itcount(), 1);
jsTest.log("Testing createCollection in a transaction with local readConcern, with other " +
"operations preceeding it");
session.startTransaction({readConcern: {level: "local"}, writeConcern: {w: "majority"}});
-assert.eq(otherColl.find({a: 1}).itcount(), 1);
-createCollAndCRUDInTxn(sessionDB, collName, "insert", true /*explicitCreate*/);
+retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ assert.eq(otherColl.find({a: 1}).itcount(), 1);
+ createCollAndCRUDInTxn(sessionDB, collName, "insert", true /*explicitCreate*/);
+}, {readConcern: {level: "local"}, writeConcern: {w: "majority"}});
assert.commandWorked(session.commitTransaction_forTesting());
assert.eq(sessionColl.find({}).itcount(), 1);
@@ -60,8 +68,11 @@ assert.eq(otherColl.find({}).itcount(), 1);
jsTest.log("Testing createIndexes in a transaction with local readConcern, with other " +
"operations preceeding it");
session.startTransaction({readConcern: {level: "local"}, writeConcern: {w: "majority"}});
-assert.eq(otherColl.find({a: 1}).itcount(), 1);
-createIndexAndCRUDInTxn(sessionDB, collName, false /*explicitCollCreate*/, false /*multikeyIndex*/);
+retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ assert.eq(otherColl.find({a: 1}).itcount(), 1);
+ createIndexAndCRUDInTxn(
+ sessionDB, collName, false /*explicitCollCreate*/, false /*multikeyIndex*/);
+}, {readConcern: {level: "local"}, writeConcern: {w: "majority"}});
assert.commandWorked(session.commitTransaction_forTesting());
assert.eq(sessionColl.find({}).itcount(), 1);
assert.eq(sessionColl.getIndexes().length, 2);
@@ -73,14 +84,14 @@ assert.eq(otherColl.find({}).itcount(), 1);
jsTest.log("Testing createCollection in a transaction with non-local readConcern (SHOULD FAIL)");
session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
-let res = sessionDB.createCollection(collName);
-assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(sessionDB.createCollection(collName), ErrorCodes.InvalidOptions);
assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
jsTest.log("Testing createIndexes in a transaction with non-local readConcern (SHOULD FAIL)");
session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
-res = sessionColl.runCommand({createIndexes: collName, indexes: [indexSpecs]});
-assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ sessionColl.runCommand({createIndexes: collName, indexes: [indexSpecs]}),
+ ErrorCodes.InvalidOptions);
assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
otherColl.drop({writeConcern: {w: "majority"}});
@@ -91,8 +102,7 @@ jsTest.log("Testing createCollection in a transaction with non-local readConcern
"operations preceeding it (SHOULD FAIL)");
session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
assert.eq(otherColl.find({a: 1}).itcount(), 1);
-res = sessionDB.createCollection(collName);
-assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(sessionDB.createCollection(collName), ErrorCodes.InvalidOptions);
assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
otherColl.drop({writeConcern: {w: "majority"}});
@@ -103,8 +113,9 @@ jsTest.log("Testing createIndexes in a transaction with non-local readConcern, w
"operations preceeding it (SHOULD FAIL)");
session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
assert.eq(otherColl.find({a: 1}).itcount(), 1);
-res = sessionColl.runCommand({createIndexes: collName, indexes: [indexSpecs]});
-assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(
+ sessionColl.runCommand({createIndexes: collName, indexes: [indexSpecs]}),
+ ErrorCodes.InvalidOptions);
assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
session.endSession();
diff --git a/jstests/core/txns/create_indexes.js b/jstests/core/txns/create_indexes.js
index e160a51b650..bdb88ecc13a 100644
--- a/jstests/core/txns/create_indexes.js
+++ b/jstests/core/txns/create_indexes.js
@@ -9,6 +9,7 @@
(function() {
"use strict";
+load("jstests/libs/auto_retry_transaction_in_sharding.js");
load("jstests/libs/create_index_txn_helpers.js");
let doCreateIndexesTest = function(explicitCollectionCreate, multikeyIndex) {
@@ -23,18 +24,18 @@ let doCreateIndexesTest = function(explicitCollectionCreate, multikeyIndex) {
secondSessionColl.drop({writeConcern: {w: "majority"}});
jsTest.log("Testing createIndexes in a transaction");
- session.startTransaction({writeConcern: {w: "majority"}});
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
- session.commitTransaction();
+ withTxnAndAutoRetryOnMongos(session, function() {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
assert.eq(sessionColl.find({}).itcount(), 1);
assert.eq(sessionColl.getIndexes().length, 2);
sessionColl.drop({writeConcern: {w: "majority"}});
jsTest.log("Testing multiple createIndexess in a transaction");
- session.startTransaction({writeConcern: {w: "majority"}});
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
- createIndexAndCRUDInTxn(sessionDB, secondCollName, explicitCollectionCreate, multikeyIndex);
- session.commitTransaction();
+ withTxnAndAutoRetryOnMongos(session, function() {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ createIndexAndCRUDInTxn(sessionDB, secondCollName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
assert.eq(sessionColl.find({}).itcount(), 1);
assert.eq(secondSessionColl.find({}).itcount(), 1);
assert.eq(sessionColl.getIndexes().length, 2);
@@ -45,16 +46,20 @@ let doCreateIndexesTest = function(explicitCollectionCreate, multikeyIndex) {
jsTest.log("Testing createIndexes in a transaction that aborts");
session.startTransaction({writeConcern: {w: "majority"}});
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
- assert.commandWorked(session.abortTransaction_forTesting());
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
+ session.abortTransaction();
assert.eq(sessionColl.find({}).itcount(), 0);
assert.eq(sessionColl.getIndexes().length, 0);
jsTest.log("Testing multiple createIndexes in a transaction that aborts");
session.startTransaction({writeConcern: {w: "majority"}});
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
- createIndexAndCRUDInTxn(sessionDB, secondCollName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ createIndexAndCRUDInTxn(sessionDB, secondCollName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
session.abortTransaction();
assert.eq(sessionColl.find({}).itcount(), 0);
assert.eq(sessionColl.getIndexes().length, 0);
@@ -66,7 +71,9 @@ let doCreateIndexesTest = function(explicitCollectionCreate, multikeyIndex) {
jsTest.log(
"Testing createIndexes with conflicting index specs in a transaction that aborts (SHOULD FAIL)");
session.startTransaction({writeConcern: {w: "majority"}});
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
assert.commandFailedWithCode(
sessionColl.runCommand({createIndexes: collName, indexes: [conflictingIndexSpecs]}),
ErrorCodes.IndexKeySpecsConflict);
diff --git a/jstests/core/txns/create_indexes_parallel.js b/jstests/core/txns/create_indexes_parallel.js
index f2e93bad3d3..61a77e4a1ff 100644
--- a/jstests/core/txns/create_indexes_parallel.js
+++ b/jstests/core/txns/create_indexes_parallel.js
@@ -8,6 +8,7 @@
(function() {
"use strict";
+load("jstests/libs/auto_retry_transaction_in_sharding.js");
load("jstests/libs/create_index_txn_helpers.js");
let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyIndex) {
@@ -27,11 +28,12 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd
distinctSessionColl.drop({writeConcern: {w: "majority"}});
jsTest.log("Testing duplicate sequential createIndexes, both succeed");
-
session.startTransaction({writeConcern: {w: "majority"}}); // txn 1
secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
jsTest.log("Committing transaction 1");
session.commitTransaction();
assert.eq(sessionColl.find({}).itcount(), 1);
@@ -50,7 +52,9 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd
session.startTransaction({writeConcern: {w: "majority"}}); // txn 1
secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2
- createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(secondSession, () => {
+ createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
jsTest.log("Committing transaction 2");
secondSession.commitTransaction();
assert.eq(secondSessionColl.find({}).itcount(), 1);
@@ -73,8 +77,14 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd
session.startTransaction({writeConcern: {w: "majority"}}); // txn 1
secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2
- createIndexAndCRUDInTxn(sessionDB, distinctCollName, explicitCollectionCreate, multikeyIndex);
- createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(
+ sessionDB, distinctCollName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
+
+ retryOnceOnTransientAndRestartTxnOnMongos(secondSession, () => {
+ createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
jsTest.log("Committing transaction 2");
secondSession.commitTransaction();
assert.eq(secondSessionColl.find({}).itcount(), 1);
@@ -98,12 +108,15 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd
jsTest.log(
"Testing duplicate createIndexes in parallel, both attempt to commit, second to commit fails");
-
secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2
- createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(secondSession, () => {
+ createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
session.startTransaction({writeConcern: {w: "majority"}}); // txn 1
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
jsTest.log("Committing transaction 2");
secondSession.commitTransaction();
@@ -120,7 +133,9 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd
jsTest.log("Testing createIndexes inside txn and createCollection on conflicting collection " +
"in parallel.");
session.startTransaction({writeConcern: {w: "majority"}}); // txn 1
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
assert.commandWorked(secondSessionDB.createCollection(collName));
assert.commandWorked(secondSessionDB.getCollection(collName).insert({a: 1}));
@@ -132,12 +147,15 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd
assert.commandWorked(sessionDB.dropDatabase());
jsTest.log("Testing duplicate createIndexes which implicitly create a database in parallel" +
", both attempt to commit, second to commit fails");
-
secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2
- createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(secondSession, () => {
+ createIndexAndCRUDInTxn(secondSessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
session.startTransaction({writeConcern: {w: "majority"}}); // txn 1
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
jsTest.log("Committing transaction 2");
secondSession.commitTransaction();
@@ -153,11 +171,15 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd
jsTest.log("Testing distinct createIndexes in parallel, both successfully commit.");
session.startTransaction({writeConcern: {w: "majority"}}); // txn 1
- createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(session, () => {
+ createIndexAndCRUDInTxn(sessionDB, collName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
secondSession.startTransaction({writeConcern: {w: "majority"}}); // txn 2
- createIndexAndCRUDInTxn(
- secondSessionDB, distinctCollName, explicitCollectionCreate, multikeyIndex);
+ retryOnceOnTransientAndRestartTxnOnMongos(secondSession, () => {
+ createIndexAndCRUDInTxn(
+ secondSessionDB, distinctCollName, explicitCollectionCreate, multikeyIndex);
+ }, {writeConcern: {w: "majority"}});
session.commitTransaction();
secondSession.commitTransaction();
diff --git a/jstests/sharding/create_existing_indexes_prepared_transactions.js b/jstests/sharding/create_existing_indexes_prepared_transactions.js
index 7dda83c7158..1b5b512f059 100644
--- a/jstests/sharding/create_existing_indexes_prepared_transactions.js
+++ b/jstests/sharding/create_existing_indexes_prepared_transactions.js
@@ -12,101 +12,72 @@
(function() {
"use strict";
-function expectChunks(st, ns, chunks) {
- for (let i = 0; i < chunks.length; i++) {
- assert.eq(chunks[i],
- st.s.getDB("config").chunks.count({ns: ns, shard: st["shard" + i].shardName}),
- "unexpected number of chunks on shard " + i);
- }
-}
-
-const dbName = "test";
-const dbNameShard2 = "testOther";
+const dbName = "TestDB";
const collName = "foo";
const ns = dbName + '.' + collName;
const st = new ShardingTest({
- shards: 3,
+ shards: 2,
mongos: 1,
});
-// Set up one sharded collection with 2 chunks, both on the primary shard.
-
-assert.commandWorked(
- st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
-assert.commandWorked(
- st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
-
-assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, st.shard0.shardName);
+// Set up one sharded collection with 2 chunks distributd across 2 shards
+assert.commandWorked(st.enableSharding(dbName, st.shard0.shardName));
assert.commandWorked(st.s.getDB(dbName).runCommand({
createIndexes: collName,
indexes: [{key: {a: 1}, name: "a_1"}],
writeConcern: {w: "majority"}
}));
-// Set up another collection with a different shard (shard 2) as its primary shard.
-assert.commandWorked(
- st.s.getDB(dbNameShard2)[collName].insert({_id: 4}, {writeConcern: {w: "majority"}}));
-st.ensurePrimaryShard(dbNameShard2, st.shard2.shardName);
-
-const session = st.s.getDB(dbName).getMongo().startSession({causalConsistency: false});
-
-let sessionDB = session.getDatabase(dbName);
-let sessionColl = sessionDB[collName];
-let sessionDBShard2 = session.getDatabase(dbNameShard2);
-let sessionCollShard2 = sessionDBShard2[collName];
-
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
-
-expectChunks(st, ns, [2, 0, 0]);
-
-st.stopBalancer();
-
-// Ensure collection `ns` has chunks distributed across two shards.
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 5}, to: st.shard1.shardName}));
-expectChunks(st, ns, [1, 1, 0]);
-// Ensure no stale version errors occur.
-let doc = st.s.getDB(dbName).getCollection(collName).findOne({_id: 5});
-assert.eq(doc._id, 5);
-let doc2 = st.s.getDB(dbNameShard2).getCollection(collName).findOne({_id: 4});
-assert.eq(doc2._id, 4);
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: -5}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ st.s.getDB(dbName)[collName].insert({_id: 5}, {writeConcern: {w: "majority"}}));
-jsTest.log("Testing createIndexes on an existing index in a transaction");
-session.startTransaction({writeConcern: {w: "majority"}});
+const session = st.s.getDB(dbName).getMongo().startSession({causalConsistency: false});
+let sessionDB = session.getDatabase(dbName);
+let sessionColl = sessionDB[collName];
-assert.commandWorked(
- sessionColl.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}));
-// Perform cross-shard writes to execute prepare path.
-assert.commandWorked(sessionColl.insert({n: 2}));
-assert.commandWorked(sessionCollShard2.insert({m: 1}));
-assert.eq(sessionCollShard2.findOne({m: 1}).m, 1);
-assert.eq(sessionColl.findOne({n: 2}).n, 2);
+{
+ jsTest.log("Testing createIndexes on an existing index in a transaction");
+ session.startTransaction({writeConcern: {w: "majority"}});
-session.commitTransaction();
+ assert.commandWorked(
+ sessionColl.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}));
+ // Perform cross-shard writes to execute prepare path.
+ assert.commandWorked(sessionColl.insert({_id: -1, m: -1}));
+ assert.commandWorked(sessionColl.insert({_id: +1, m: +1}));
+ assert.eq(-1, sessionColl.findOne({m: -1})._id);
+ assert.eq(+1, sessionColl.findOne({m: +1})._id);
-jsTest.log("Testing createIndexes on an existing index in a transaction when not all shards are" +
- " aware of that index (should abort)");
+ session.commitTransaction();
+}
+{
+ jsTest.log(
+ "Testing createIndexes on an existing index in a transaction when not all shards are" +
+ " aware of that index (should abort)");
-// Simulate a scenario where one shard with chunks for a collection is unaware of one of the
-// collection's indexes.
-st.shard1.getDB(dbName).getCollection(collName).dropIndexes("a_1");
+ // Simulate a scenario where one shard with chunks for a collection is unaware of one of the
+ // collection's indexes
+ assert.commandWorked(st.shard1.getDB(dbName).getCollection(collName).dropIndexes("a_1"));
-session.startTransaction({writeConcern: {w: "majority"}});
+ session.startTransaction({writeConcern: {w: "majority"}});
-assert.commandFailedWithCode(
- sessionColl.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}),
- ErrorCodes.OperationNotSupportedInTransaction);
+ assert.commandFailedWithCode(
+ sessionColl.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}),
+ ErrorCodes.OperationNotSupportedInTransaction);
-assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+}
// Resolve index inconsistency to pass consistency checks.
st.shard1.getDB(dbName).getCollection(collName).runCommand(
{createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]});
-st.startBalancer();
-
st.stop();
})();
diff --git a/jstests/sharding/query/comment_field_sharded.js b/jstests/sharding/query/comment_field_sharded.js
index 462adb7a7b9..2a8753bac80 100644
--- a/jstests/sharding/query/comment_field_sharded.js
+++ b/jstests/sharding/query/comment_field_sharded.js
@@ -110,18 +110,10 @@ function runCommentParamTest({
// error upon sending an agg request, causing it to retry the agg command from the top and
// resulting in more profiler entries than what is expected.
assert.commandWorked(st.rs0.getPrimary().getDB(testDB.getName()).adminCommand({
- _flushRoutingTableCacheUpdates: coll.getFullName(),
- syncFromConfig: true
- }));
- assert.commandWorked(st.rs0.getPrimary().getDB(testDB.getName()).adminCommand({
_flushDatabaseCacheUpdates: testDB.getName(),
syncFromConfig: true
}));
assert.commandWorked(st.rs1.getPrimary().getDB(testDB.getName()).adminCommand({
- _flushRoutingTableCacheUpdates: coll.getFullName(),
- syncFromConfig: true
- }));
- assert.commandWorked(st.rs1.getPrimary().getDB(testDB.getName()).adminCommand({
_flushDatabaseCacheUpdates: testDB.getName(),
syncFromConfig: true
}));
diff --git a/jstests/sharding/stale_mongos_and_restarted_shards_agree_on_shard_version.js b/jstests/sharding/stale_mongos_and_restarted_shards_agree_on_shard_version.js
index 988317d1be6..283eac9ff48 100644
--- a/jstests/sharding/stale_mongos_and_restarted_shards_agree_on_shard_version.js
+++ b/jstests/sharding/stale_mongos_and_restarted_shards_agree_on_shard_version.js
@@ -19,7 +19,7 @@ const shard0Name = st.shard0.shardName;
const shard1Name = st.shard1.shardName;
const kDatabaseName = 'TestDB';
-st.enableSharding(kDatabaseName);
+st.enableSharding(kDatabaseName, st.shard1.shardName);
// Creates and shard collName with 2 chunks, one per shard. Only the router referenced by st.s0
// knows that collName is sharded, and all the shards are restarted so they don't have the
@@ -61,21 +61,18 @@ const staleMongoS = st.s1;
insertBulkOp.insert({Key: 1});
insertBulkOp.execute();
- // TODO (SERVER-32198): After SERVER-32198 is fixed and backported change neq to eq
- assert.neq(4, freshMongoS.getDB(kDatabaseName).TestInsertColl.find().itcount());
- assert.neq(4, staleMongoS.getDB(kDatabaseName).TestInsertColl.find().itcount());
+ assert.eq(4, freshMongoS.getDB(kDatabaseName).TestInsertColl.find().itcount());
+ assert.eq(4, staleMongoS.getDB(kDatabaseName).TestInsertColl.find().itcount());
}
{
jsTest.log('Testing: Multi-update with sharded collection unknown on a stale mongos');
setupCollectionForTest('TestUpdateColl');
- var updateBulkOp = staleMongoS.getDB(kDatabaseName).TestUpdateColl.initializeUnorderedBulkOp();
- updateBulkOp.find({}).update({$inc: {inc: 1}});
- updateBulkOp.execute();
+ assert.commandWorked(staleMongoS.getDB(kDatabaseName)
+ .TestUpdateColl.update({}, {$inc: {inc: 1}}, {multi: true}));
var s0Doc = freshMongoS.getDB(kDatabaseName).TestUpdateColl.findOne({Key: -1});
- // TODO (SERVER-32198): After SERVER-32198 is fixed and backported change neq to eq
- assert.neq(1, s0Doc.inc);
+ assert.eq(1, s0Doc.inc);
var s1Doc = freshMongoS.getDB(kDatabaseName).TestUpdateColl.findOne({Key: 0});
assert.eq(1, s1Doc.inc);
}
@@ -83,12 +80,10 @@ const staleMongoS = st.s1;
jsTest.log('Testing: Multi-remove with sharded collection unknown on a stale mongos');
setupCollectionForTest('TestRemoveColl');
- var removeBulkOp = staleMongoS.getDB(kDatabaseName).TestRemoveColl.initializeUnorderedBulkOp();
- removeBulkOp.find({}).remove({});
- removeBulkOp.execute();
+ assert.commandWorked(
+ staleMongoS.getDB(kDatabaseName).TestRemoveColl.remove({}, {justOne: false}));
- // TODO (SERVER-32198): After SERVER-32198 is fixed and backported change neq to eq
- assert.neq(0, freshMongoS.getDB(kDatabaseName).TestRemoveColl.find().itcount());
+ assert.eq(0, freshMongoS.getDB(kDatabaseName).TestRemoveColl.find().itcount());
}
{
jsTest.log('Testing: Find with sharded collection unknown on a stale mongos');
diff --git a/src/mongo/db/s/op_observer_sharding_impl.cpp b/src/mongo/db/s/op_observer_sharding_impl.cpp
index c6eb5abab8e..8ba84469ff8 100644
--- a/src/mongo/db/s/op_observer_sharding_impl.cpp
+++ b/src/mongo/db/s/op_observer_sharding_impl.cpp
@@ -103,15 +103,11 @@ void OpObserverShardingImpl::shardObserveInsertOp(OperationContext* opCtx,
const repl::OpTime& opTime,
const bool fromMigrate,
const bool inMultiDocumentTransaction) {
- auto* const csr = (nss == NamespaceString::kSessionTransactionsTableNamespace || fromMigrate)
- ? nullptr
- : CollectionShardingRuntime::get(opCtx, nss);
-
- if (!csr) {
+ if (nss == NamespaceString::kSessionTransactionsTableNamespace || fromMigrate)
return;
- }
- csr->checkShardVersionOrThrow_DEPRECATED(opCtx);
+ auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
+ csr->checkShardVersionOrThrow(opCtx);
if (inMultiDocumentTransaction) {
assertIntersectingChunkHasNotMoved(opCtx, csr, insertedDoc);
@@ -133,7 +129,7 @@ void OpObserverShardingImpl::shardObserveUpdateOp(OperationContext* opCtx,
const repl::OpTime& prePostImageOpTime,
const bool inMultiDocumentTransaction) {
auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
- csr->checkShardVersionOrThrow_DEPRECATED(opCtx);
+ csr->checkShardVersionOrThrow(opCtx);
if (inMultiDocumentTransaction) {
assertIntersectingChunkHasNotMoved(opCtx, csr, postImageDoc);
@@ -154,7 +150,7 @@ void OpObserverShardingImpl::shardObserveDeleteOp(OperationContext* opCtx,
const repl::OpTime& preImageOpTime,
const bool inMultiDocumentTransaction) {
auto* const csr = CollectionShardingRuntime::get(opCtx, nss);
- csr->checkShardVersionOrThrow_DEPRECATED(opCtx);
+ csr->checkShardVersionOrThrow(opCtx);
if (inMultiDocumentTransaction) {
assertIntersectingChunkHasNotMoved(opCtx, csr, documentKey);