summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2021-02-11 11:28:13 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-02-21 11:29:04 +0000
commitd01febfefb28d3aab5305eb1dbcd6d047139c654 (patch)
treede0f45a2c56eb9d5dfb8751837aaba6766872d25
parent5199abc5b9113e310a79d9ec29a5ac6b77ad5682 (diff)
downloadmongo-d01febfefb28d3aab5305eb1dbcd6d047139c654.tar.gz
SERVER-52812 Unify the implicit createDatabase/enableSharding flow
This change makes both enableSharding and the implicit createDatabase from the routers to go through the same _configsvrCreateDatabase command. This command has the sole responsibility now of creating (or ensuring it is created) a database with the specified name, optional primary flag and optional enableSharding field.
-rw-r--r--buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml2
-rw-r--r--jstests/sharding/enable_sharding.js106
-rw-r--r--jstests/sharding/enable_sharding_basic.js58
-rw-r--r--jstests/sharding/enable_sharding_with_primary.js29
-rw-r--r--jstests/sharding/shard_collection_basic.js12
-rw-r--r--jstests/sharding/shard_collection_config_db.js (renamed from jstests/sharding/shard_config_db_collections.js)21
-rw-r--r--jstests/sharding/shard_collection_existing_zones.js2
-rw-r--r--jstests/sharding/shard_collection_verify_initial_chunks.js2
-rw-r--r--src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp6
-rw-r--r--src/mongo/db/s/SConscript2
-rw-r--r--src/mongo/db/s/balancer/balancer.cpp1
-rw-r--r--src/mongo/db/s/balancer/migration_manager.h1
-rw-r--r--src/mongo/db/s/config/config_server_test_fixture.h4
-rw-r--r--src/mongo/db/s/config/configsvr_create_database_command.cpp39
-rw-r--r--src/mongo/db/s/config/configsvr_enable_sharding_command.cpp63
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp4
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.h38
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp219
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp156
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp209
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp6
-rw-r--r--src/mongo/db/s/drop_collection_legacy.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_test.cpp1
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp1
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp1
-rw-r--r--src/mongo/s/SConscript10
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h6
-rw-r--r--src/mongo/s/catalog_cache.cpp1
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp (renamed from src/mongo/s/write_ops/chunk_manager_targeter.cpp)6
-rw-r--r--src/mongo/s/chunk_manager_targeter.h (renamed from src/mongo/s/write_ops/chunk_manager_targeter.h)0
-rw-r--r--src/mongo/s/chunk_manager_targeter_test.cpp (renamed from src/mongo/s/write_ops/chunk_manager_targeter_test.cpp)2
-rw-r--r--src/mongo/s/cluster_commands_helpers.cpp32
-rw-r--r--src/mongo/s/cluster_commands_helpers.h6
-rw-r--r--src/mongo/s/cluster_ddl.cpp79
-rw-r--r--src/mongo/s/cluster_ddl.h46
-rw-r--r--src/mongo/s/cluster_write.cpp (renamed from src/mongo/s/write_ops/cluster_write.cpp)16
-rw-r--r--src/mongo/s/cluster_write.h (renamed from src/mongo/s/write_ops/cluster_write.h)28
-rw-r--r--src/mongo/s/commands/cluster_create_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_create_indexes_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_enable_sharding_cmd.cpp42
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp7
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp8
-rw-r--r--src/mongo/s/commands/document_shard_key_update_util.cpp6
-rw-r--r--src/mongo/s/database_version.h26
-rw-r--r--src/mongo/s/mock_ns_targeter.cpp (renamed from src/mongo/s/write_ops/mock_ns_targeter.cpp)2
-rw-r--r--src/mongo/s/mock_ns_targeter.h (renamed from src/mongo/s/write_ops/mock_ns_targeter.h)0
-rw-r--r--src/mongo/s/request_types/create_database.idl45
-rw-r--r--src/mongo/s/request_types/sharded_ddl_commands.idl42
-rw-r--r--src/mongo/s/sessions_collection_sharded.cpp7
-rw-r--r--src/mongo/s/sharding_router_test_fixture.h2
-rw-r--r--src/mongo/s/write_ops/SConscript1
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp2
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp2
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp2
54 files changed, 501 insertions, 917 deletions
diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
index fa0130bfdb9..a0d8bab65d5 100644
--- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
+++ b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml
@@ -157,7 +157,7 @@ selector:
# listCollections is not retryable
- jstests/sharding/sessions_collection_auto_healing.js
# shardCollection is not retryable
- - jstests/sharding/shard_config_db_collections.js
+ - jstests/sharding/shard_collection_config_db.js
# creates collection, does movePrimary, then shards the collection and moves a chunk to the old
# primary (SERVER-31909)
- jstests/sharding/mongos_validate_writes.js
diff --git a/jstests/sharding/enable_sharding.js b/jstests/sharding/enable_sharding.js
new file mode 100644
index 00000000000..0fe510083c6
--- /dev/null
+++ b/jstests/sharding/enable_sharding.js
@@ -0,0 +1,106 @@
+//
+// Basic tests for enableSharding command.
+//
+
+(function() {
+'use strict';
+
+var st = new ShardingTest({mongos: 2, shards: 2});
+
+jsTest.log('enableSharding can run only against the admin database');
+{
+ assert.commandFailedWithCode(st.s0.getDB('test').runCommand({enableSharding: 'db'}),
+ ErrorCodes.Unauthorized);
+}
+
+jsTest.log('Cannot shard system databases except for the config db');
+{
+ assert.commandWorked(st.s0.adminCommand({enableSharding: 'config'}));
+ assert.commandFailed(st.s0.adminCommand({enableSharding: 'local'}));
+ assert.commandFailed(st.s0.adminCommand({enableSharding: 'admin'}));
+}
+
+jsTest.log('Cannot shard db with the name that just differ on case');
+{
+ assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
+ assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+ assert.commandFailedWithCode(st.s0.adminCommand({enableSharding: 'DB'}),
+ ErrorCodes.DatabaseDifferCase);
+}
+
+jsTest.log('Cannot shard invalid db name');
+{
+ assert.commandFailed(st.s0.adminCommand({enableSharding: 'a.b'}));
+ assert.commandFailed(st.s0.adminCommand({enableSharding: ''}));
+}
+
+jsTest.log('Attempting to shard already sharded database returns success');
+{
+ assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
+ assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+}
+
+jsTest.log('Verify config.databases metadata');
+{
+ assert.commandWorked(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
+ assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
+ assert.commandWorked(st.s0.adminCommand({enableSharding: 'unsharded'}));
+ assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
+}
+
+jsTest.log('Sharding a collection before enableSharding is called fails');
+{
+ assert.commandFailed(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+ assert.commandFailed(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+
+ assert.commandWorked(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
+ assert.commandWorked(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
+}
+
+jsTest.log('Calling enableSharding on one mongos and shardCollection through another must work');
+{
+ assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+ assert.commandWorked(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+ assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
+}
+
+jsTest.log('Cannot enable sharding on a database using a wrong shard name');
+{
+ assert.commandFailed(st.s0.adminCommand(
+ {enableSharding: 'db2', primaryShard: st.shard1.shardName + '_unenxisting_name_postfix'}));
+}
+
+jsTest.log('Enabling sharding on a database with a valid shard name must work');
+{
+ assert.commandWorked(
+ st.s0.adminCommand({enableSharding: 'db_on_shard0', primaryShard: st.shard0.shardName}));
+ assert.commandWorked(
+ st.s0.adminCommand({enableSharding: 'db_on_shard1', primaryShard: st.shard1.shardName}));
+ assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db_on_shard0'}).primary,
+ st.shard0.shardName);
+ assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db_on_shard1'}).primary,
+ st.shard1.shardName);
+}
+
+jsTest.log(
+ 'Enable sharding on a database already created with the correct primary shard name must work');
+{
+ assert.commandWorked(
+ st.s0.adminCommand({enableSharding: 'db_on_shard0', primaryShard: st.shard0.shardName}));
+ assert.commandWorked(
+ st.s0.adminCommand({enableSharding: 'db_on_shard1', primaryShard: st.shard1.shardName}));
+}
+
+jsTest.log(
+ 'Cannot enable sharding of a database already created with a different primary shard name');
+{
+ assert.commandFailedWithCode(
+ st.s0.adminCommand({enableSharding: 'db_on_shard0', primaryShard: st.shard1.shardName}),
+ ErrorCodes.NamespaceExists);
+ assert.commandFailedWithCode(
+ st.s0.adminCommand({enableSharding: 'db_on_shard1', primaryShard: st.shard0.shardName}),
+ ErrorCodes.NamespaceExists);
+}
+
+st.stop();
+})();
diff --git a/jstests/sharding/enable_sharding_basic.js b/jstests/sharding/enable_sharding_basic.js
deleted file mode 100644
index 046b4f6e520..00000000000
--- a/jstests/sharding/enable_sharding_basic.js
+++ /dev/null
@@ -1,58 +0,0 @@
-//
-// Basic tests for enableSharding command.
-//
-
-(function() {
-'use strict';
-
-var st = new ShardingTest({mongos: 2, shards: 2});
-
-// enableSharding can run only on mongos.
-assert.commandFailedWithCode(st.rs0.getPrimary().getDB('admin').runCommand({enableSharding: 'db'}),
- ErrorCodes.CommandNotFound);
-
-// enableSharding can run only against the admin database.
-assert.commandFailedWithCode(st.s0.getDB('test').runCommand({enableSharding: 'db'}),
- ErrorCodes.Unauthorized);
-
-// Can't shard 'local' database.
-assert.commandFailed(st.s0.adminCommand({enableSharding: 'local'}));
-
-// Can't shard 'admin' database.
-assert.commandFailed(st.s0.adminCommand({enableSharding: 'admin'}));
-
-// Can't shard db with the name that just differ on case.
-assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
-assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
-
-assert.commandFailedWithCode(st.s0.adminCommand({enableSharding: 'DB'}),
- ErrorCodes.DatabaseDifferCase);
-
-// Can't shard invalid db name.
-assert.commandFailed(st.s0.adminCommand({enableSharding: 'a.b'}));
-assert.commandFailed(st.s0.adminCommand({enableSharding: ''}));
-
-// Attempting to shard already sharded database returns success.
-assert.commandWorked(st.s0.adminCommand({enableSharding: 'db'}));
-assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
-
-// Verify config.databases metadata.
-assert.commandWorked(st.s0.getDB('unsharded').foo.insert({aKey: "aValue"}));
-assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
-assert.commandWorked(st.s0.adminCommand({enableSharding: 'unsharded'}));
-assert.eq(st.s0.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
-
-// Sharding a collection before 'enableSharding' is called fails
-assert.commandFailed(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
-assert.commandFailed(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
-
-assert.commandWorked(st.s0.getDB('TestDB').TestColl.insert({_id: 0}));
-assert.commandWorked(st.s1.getDB('TestDB').TestColl.insert({_id: 1}));
-
-// Calling 'enableSharding' on one mongos and 'shardCollection' through another must work
-assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
-assert.commandWorked(st.s1.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
-assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {_id: 1}}));
-
-st.stop();
-})();
diff --git a/jstests/sharding/enable_sharding_with_primary.js b/jstests/sharding/enable_sharding_with_primary.js
deleted file mode 100644
index 1c42b07ac54..00000000000
--- a/jstests/sharding/enable_sharding_with_primary.js
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Enable sharding with custom primary shard tests
-//
-
-(function() {
-'use strict';
-
-var st = new ShardingTest({mongos: 1, shards: 2});
-
-// Can't enable sharding on a database using a wrong shard name
-assert.commandFailed(st.s0.adminCommand(
- {enableSharding: 'db2', primaryShard: st.shard1.shardName + '_unenxisting_name_postfix'}));
-
-// Enabling sharding on a database with a valid shard name must work
-assert.commandWorked(
- st.s0.adminCommand({enableSharding: 'db2', primaryShard: st.shard1.shardName}));
-assert.eq(st.s0.getDB('config').databases.findOne({_id: 'db2'}).primary, st.shard1.shardName);
-
-// Enable sharding on a database already created with the correct primary shard name must work
-assert.commandWorked(
- st.s0.adminCommand({enableSharding: 'db2', primaryShard: st.shard1.shardName}));
-
-// Can't enable sharding of a database already created with a different primary shard name
-assert.commandFailedWithCode(
- st.s0.adminCommand({enableSharding: 'db2', primaryShard: st.shard0.shardName}),
- ErrorCodes.NamespaceExists);
-
-st.stop();
-})(); \ No newline at end of file
diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js
index 8341a84ca87..741f1469f60 100644
--- a/jstests/sharding/shard_collection_basic.js
+++ b/jstests/sharding/shard_collection_basic.js
@@ -1,15 +1,13 @@
-//
-// Basic tests for shardCollection.
-//
-
-load("jstests/sharding/libs/find_chunks_util.js");
-
(function() {
'use strict';
-var st = new ShardingTest({mongos: 1, shards: 2});
+load("jstests/sharding/libs/find_chunks_util.js");
+
+var st = new ShardingTest({shards: 2});
var kDbName = 'db';
var mongos = st.s0;
+var config = st.s0.getDB('config');
+var admin = st.s0.getDB('admin');
function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
diff --git a/jstests/sharding/shard_config_db_collections.js b/jstests/sharding/shard_collection_config_db.js
index 8f8f324957b..e4787dc13a0 100644
--- a/jstests/sharding/shard_config_db_collections.js
+++ b/jstests/sharding/shard_collection_config_db.js
@@ -1,12 +1,12 @@
(function() {
'use strict';
-// Database-level tests
-{
- var st = new ShardingTest({shards: 2});
- var config = st.s.getDB('config');
- var admin = st.s.getDB('admin');
+var st = new ShardingTest({shards: 2});
+var config = st.s.getDB('config');
+var admin = st.s.getDB('admin');
+jsTest.log('Cannot movePrimary on the config database');
+{
// At first, there should not be an entry for config
assert.eq(0, config.databases.count({"_id": "config"}));
@@ -19,24 +19,19 @@
// Test that you cannot set the primary shard for config (not even to 'config')
assert.commandFailed(admin.runCommand({movePrimary: 'config', to: st.shard0.shardName}));
assert.commandFailed(admin.runCommand({movePrimary: 'config', to: 'config'}));
-
- st.stop();
}
-// Test that only system.sessions may be sharded.
+jsTest.log('Only system.sessions may be sharded');
{
- var st = new ShardingTest({shards: 2});
- var admin = st.s.getDB('admin');
-
assert.commandWorked(
admin.runCommand({shardCollection: "config.system.sessions", key: {_id: 1}}));
assert.eq(0, st.s.getDB('config').chunks.count({"shard": "config"}));
assert.commandFailed(admin.runCommand({shardCollection: "config.anythingelse", key: {_id: 1}}));
-
- st.stop();
}
+st.stop();
+
// Cannot shard things in config without shards.
{
var st = new ShardingTest({shards: 0});
diff --git a/jstests/sharding/shard_collection_existing_zones.js b/jstests/sharding/shard_collection_existing_zones.js
index cc52b416bc2..b0c5b2a31a1 100644
--- a/jstests/sharding/shard_collection_existing_zones.js
+++ b/jstests/sharding/shard_collection_existing_zones.js
@@ -5,7 +5,7 @@
load("jstests/sharding/libs/find_chunks_util.js");
-var st = new ShardingTest({mongos: 1, shards: 3});
+var st = new ShardingTest({shards: 3});
var kDbName = 'test';
var kCollName = 'foo';
var ns = kDbName + '.' + kCollName;
diff --git a/jstests/sharding/shard_collection_verify_initial_chunks.js b/jstests/sharding/shard_collection_verify_initial_chunks.js
index 38e3d4d67a6..77d752ba164 100644
--- a/jstests/sharding/shard_collection_verify_initial_chunks.js
+++ b/jstests/sharding/shard_collection_verify_initial_chunks.js
@@ -5,7 +5,7 @@
(function() {
'use strict';
-let st = new ShardingTest({mongos: 1, shards: 2});
+let st = new ShardingTest({shards: 2});
let mongos = st.s0;
let config = mongos.getDB("config");
diff --git a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
index 80d0ce29f03..d211bbccb38 100644
--- a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp
@@ -48,10 +48,10 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/cluster_write.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/document_source_merge_cursors.h"
#include "mongo/s/stale_shard_version_helpers.h"
-#include "mongo/s/write_ops/cluster_write.h"
namespace mongo {
@@ -119,7 +119,7 @@ Status ShardServerProcessInterface::insert(const boost::intrusive_ptr<Expression
insertCommand.setWriteConcern(wc.toBSON());
- ClusterWriter::write(expCtx->opCtx, insertCommand, &stats, &response, targetEpoch);
+ cluster::write(expCtx->opCtx, insertCommand, &stats, &response, targetEpoch);
return response.toStatus();
}
@@ -139,7 +139,7 @@ StatusWith<MongoProcessInterface::UpdateResult> ShardServerProcessInterface::upd
updateCommand.setWriteConcern(wc.toBSON());
- ClusterWriter::write(expCtx->opCtx, updateCommand, &stats, &response, targetEpoch);
+ cluster::write(expCtx->opCtx, updateCommand, &stats, &response, targetEpoch);
if (auto status = response.toStatus(); status != Status::OK()) {
return status;
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 2c77e6e8155..7943e866bea 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -548,9 +548,7 @@ env.CppUnitTest(
'config/sharding_catalog_manager_clear_jumbo_flag_test.cpp',
'config/sharding_catalog_manager_commit_chunk_migration_test.cpp',
'config/sharding_catalog_manager_config_initialization_test.cpp',
- 'config/sharding_catalog_manager_create_database_test.cpp',
'config/sharding_catalog_manager_drop_coll_test.cpp',
- 'config/sharding_catalog_manager_enable_sharding_test.cpp',
'config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp',
'config/sharding_catalog_manager_merge_chunks_test.cpp',
'config/sharding_catalog_manager_remove_shard_from_zone_test.cpp',
diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp
index f4c8a85c3e1..13c676ec04a 100644
--- a/src/mongo/db/s/balancer/balancer.cpp
+++ b/src/mongo/db/s/balancer/balancer.cpp
@@ -46,6 +46,7 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h"
#include "mongo/db/s/balancer/cluster_statistics_impl.h"
+#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_logging.h"
#include "mongo/logv2/log.h"
#include "mongo/s/balancer_configuration.h"
diff --git a/src/mongo/db/s/balancer/migration_manager.h b/src/mongo/db/s/balancer/migration_manager.h
index ce02f008fab..631e6057c2c 100644
--- a/src/mongo/db/s/balancer/migration_manager.h
+++ b/src/mongo/db/s/balancer/migration_manager.h
@@ -37,7 +37,6 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/balancer/balancer_policy.h"
#include "mongo/db/s/balancer/type_migration.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/executor/task_executor.h"
#include "mongo/platform/mutex.h"
diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h
index 30c59329287..e19c528485a 100644
--- a/src/mongo/db/s/config/config_server_test_fixture.h
+++ b/src/mongo/db/s/config/config_server_test_fixture.h
@@ -31,6 +31,7 @@
#include "mongo/db/catalog_raii.h"
#include "mongo/db/s/sharding_mongod_test_fixture.h"
+#include "mongo/s/catalog/type_shard.h"
namespace mongo {
@@ -40,9 +41,6 @@ struct ChunkVersion;
class KeysCollectionDocument;
class NamespaceString;
class Shard;
-class ShardId;
-class ShardRegistry;
-class ShardType;
/**
* Provides config-specific functionality in addition to the mock storage engine and mock network
diff --git a/src/mongo/db/s/config/configsvr_create_database_command.cpp b/src/mongo/db/s/config/configsvr_create_database_command.cpp
index d875dd50482..b34651b73ca 100644
--- a/src/mongo/db/s/config/configsvr_create_database_command.cpp
+++ b/src/mongo/db/s/config/configsvr_create_database_command.cpp
@@ -42,29 +42,23 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/db/s/dist_lock_manager.h"
-#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
-#include "mongo/s/request_types/create_database_gen.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
namespace {
-/**
- * Internal sharding command run on config servers to create a database.
- * Call with { _configsvrCreateDatabase: <string dbName> }
- */
class ConfigSvrCreateDatabaseCommand final : public TypedCommand<ConfigSvrCreateDatabaseCommand> {
public:
using Request = ConfigsvrCreateDatabase;
+ using Response = ConfigsvrCreateDatabaseResponse;
class Invocation final : public InvocationBase {
public:
using InvocationBase::InvocationBase;
- void typedRun(OperationContext* opCtx) {
+ Response typedRun(OperationContext* opCtx) {
uassert(ErrorCodes::IllegalOperation,
"_configsvrCreateDatabase can only be run on config servers",
serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
@@ -80,18 +74,21 @@ public:
auto dbname = request().getCommandParameter();
- uassert(ErrorCodes::InvalidNamespace,
- str::stream() << "invalid db name specified: " << dbname,
- NamespaceString::validDBName(dbname));
-
- // Make sure to force update of any stale metadata
- ON_BLOCK_EXIT(
- [opCtx, dbname] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbname); });
-
- auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
- opCtx, dbname, "createDatabase", DistLockManager::kDefaultLockTimeout));
-
- ShardingCatalogManager::get(opCtx)->createDatabase(opCtx, dbname, ShardId());
+ if (request().getEnableSharding()) {
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "Enable sharding can only be set to `true`",
+ *request().getEnableSharding());
+ }
+
+ auto dbt = ShardingCatalogManager::get(opCtx)->createDatabase(
+ opCtx,
+ dbname,
+ request().getPrimaryShardId()
+ ? boost::optional<ShardId>(request().getPrimaryShardId()->toString())
+ : boost::optional<ShardId>(),
+ request().getEnableSharding().value_or(false));
+
+ return {dbt.getVersion()};
}
private:
diff --git a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
index 7ebcb2e0d14..6a20e63a6a0 100644
--- a/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
+++ b/src/mongo/db/s/config/configsvr_enable_sharding_command.cpp
@@ -37,15 +37,10 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
-#include "mongo/db/dbdirectclient.h"
#include "mongo/db/field_parser.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/ops/write_ops.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/db/s/dist_lock_manager.h"
-#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog_cache.h"
#include "mongo/s/grid.h"
#include "mongo/util/scopeguard.h"
@@ -113,58 +108,12 @@ public:
const std::string dbname = parseNs("", cmdObj);
auto shardElem = cmdObj[kShardNameField];
- ShardId shardId = shardElem.ok() ? ShardId(shardElem.String()) : ShardId();
-
- // If assigned, check that the shardId is valid
- uassert(ErrorCodes::BadValue,
- str::stream() << "invalid shard name: " << shardId,
- !shardElem.ok() || shardId.isValid());
-
- uassert(
- ErrorCodes::InvalidNamespace,
- str::stream() << "invalid db name specified: " << dbname,
- NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow));
-
- if (dbname == NamespaceString::kAdminDb || dbname == NamespaceString::kLocalDb) {
- uasserted(ErrorCodes::InvalidOptions,
- str::stream() << "can't shard " + dbname + " database");
- }
-
- // Make sure to force update of any stale metadata
- ON_BLOCK_EXIT([opCtx, dbname] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbname); });
-
- // For an existing database, the enableSharding operation is just adding the {sharded: true}
- // field to config.database. First do an optimistic attempt to add it and if the write
- // succeeds do not go through the createDatabase flow.
- DBDirectClient client(opCtx);
- auto response = UpdateOp::parseResponse([&] {
- write_ops::Update updateOp(DatabaseType::ConfigNS);
- updateOp.setUpdates({[&] {
- BSONObjBuilder queryFilterBuilder;
- queryFilterBuilder.append(DatabaseType::name.name(), dbname);
- if (shardId.isValid())
- queryFilterBuilder.append(DatabaseType::primary.name(), shardId.toString());
- auto updateModification = write_ops::UpdateModification(
- write_ops::UpdateModification::parseFromClassicUpdate(
- BSON("$set" << BSON(DatabaseType::sharded(true)))));
- write_ops::UpdateOpEntry updateEntry(queryFilterBuilder.obj(), updateModification);
- updateEntry.setMulti(false);
- updateEntry.setUpsert(false);
- return updateEntry;
- }()});
-
- auto response = client.runCommand(updateOp.serialize({}));
- return response->getCommandReply();
- }());
-
- // If an entry for the database was found it can be assumed that it was either updated or
- // already had 'sharded' enabled, so we can assume success
- if (response.getN() != 1) {
- auto dbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
- opCtx, dbname, "enableSharding", DistLockManager::kDefaultLockTimeout));
-
- ShardingCatalogManager::get(opCtx)->enableSharding(opCtx, dbname, shardId);
- }
+ ShardingCatalogManager::get(opCtx)->createDatabase(
+ opCtx,
+ dbname,
+ shardElem.ok() ? boost::optional<ShardId>(shardElem.String())
+ : boost::optional<ShardId>(),
+ true);
audit::logEnableSharding(Client::getCurrent(), dbname);
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index ac993136494..a35d3fd56ff 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -312,9 +312,9 @@ ShardingCatalogManager::ShardingCatalogManager(
ServiceContext* serviceContext, std::unique_ptr<executor::TaskExecutor> addShardExecutor)
: _serviceContext(serviceContext),
_executorForAddShard(std::move(addShardExecutor)),
- _kZoneOpLock("zoneOpLock"),
+ _kShardMembershipLock("shardMembershipLock"),
_kChunkOpLock("chunkOpLock"),
- _kShardMembershipLock("shardMembershipLock") {
+ _kZoneOpLock("zoneOpLock") {
startup();
}
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h
index c907c044af5..9de828a0b46 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.h
+++ b/src/mongo/db/s/config/sharding_catalog_manager.h
@@ -306,24 +306,14 @@ public:
//
/**
- * Checks if a database with the same name already exists, and if not, selects a primary shard
- * for the database and creates a new entry for it in config.databases.
- *
- * Returns the database entry.
- *
- * Throws DatabaseDifferCase if the database already exists with a different case.
+ * Checks if a database with the same name, optPrimaryShard and enableSharding state already
+ * exists, and if not, creates a new one that matches these prerequisites. If a database already
+ * exists and matches all the prerequisites returns success, otherwise throws NamespaceNotFound.
*/
DatabaseType createDatabase(OperationContext* opCtx,
StringData dbName,
- const ShardId& primaryShard);
-
- /**
- * Creates the database if it does not exist, then marks its entry in config.databases as
- * sharding-enabled.
- *
- * Throws DatabaseDifferCase if the database already exists with a different case.
- */
- void enableSharding(OperationContext* opCtx, StringData dbName, const ShardId& primaryShard);
+ const boost::optional<ShardId>& optPrimaryShard,
+ bool enableSharding);
/**
* Updates metadata in config.databases collection to show the given primary database on its
@@ -610,13 +600,10 @@ private:
// _kZoneOpLock
/**
- * Lock for shard zoning operations. This should be acquired when doing any operations that
- * can affect the config.tags collection or the tags field of the config.shards collection.
- * No other locks should be held when locking this. If an operation needs to take database
- * locks (for example to write to a local collection) those locks should be taken after
- * taking this.
+ * Lock that guards changes to the set of shards in the cluster (ie addShard and removeShard
+ * requests).
*/
- Lock::ResourceMutex _kZoneOpLock;
+ Lock::ResourceMutex _kShardMembershipLock;
/**
* Lock for chunk split/merge/move operations. This should be acquired when doing split/merge/
@@ -628,10 +615,13 @@ private:
Lock::ResourceMutex _kChunkOpLock;
/**
- * Lock that guards changes to the set of shards in the cluster (ie addShard and removeShard
- * requests).
+ * Lock for shard zoning operations. This should be acquired when doing any operations that
+ * can affect the config.tags collection or the tags field of the config.shards collection.
+ * No other locks should be held when locking this. If an operation needs to take database
+ * locks (for example to write to a local collection) those locks should be taken after
+ * taking this.
*/
- Lock::ResourceMutex _kShardMembershipLock;
+ Lock::ResourceMutex _kZoneOpLock;
};
} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
deleted file mode 100644
index 2d0c3ea0b36..00000000000
--- a/src/mongo/db/s/config/sharding_catalog_manager_create_database_test.cpp
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/client/remote_command_targeter_factory_mock.h"
-#include "mongo/client/remote_command_targeter_mock.h"
-#include "mongo/db/commands.h"
-#include "mongo/db/query/query_request_helper.h"
-#include "mongo/db/repl/read_concern_args.h"
-#include "mongo/db/s/config/config_server_test_fixture.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/db/s/dist_lock_catalog_replset.h"
-#include "mongo/rpc/get_status_from_command_result.h"
-#include "mongo/rpc/metadata/tracking_metadata.h"
-#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog/type_shard.h"
-#include "mongo/util/time_support.h"
-
-namespace mongo {
-namespace {
-
-using executor::RemoteCommandRequest;
-using unittest::assertGet;
-
-using CreateDatabaseTest = ConfigServerTestFixture;
-
-TEST_F(CreateDatabaseTest, createDatabaseSuccessWithoutCustomPrimary) {
- const std::string dbname = "db1";
-
- const std::vector<ShardType> shards{{"shard0000", "ShardHost0:27017"},
- {"shard0001", "ShardHost1:27017"},
- {"shard0002", "ShardHost2:27017"}};
- setupShards(shards);
-
- for (const auto& shard : shards) {
- targeterFactory()->addTargeterToReturn(ConnectionString(HostAndPort{shard.getHost()}), [&] {
- auto targeter = std::make_unique<RemoteCommandTargeterMock>();
- targeter->setFindHostReturnValue(HostAndPort{shard.getHost()});
- return targeter;
- }());
- }
-
- // Prime the shard registry with information about the existing shards
- shardRegistry()->reload(operationContext());
-
- const std::vector<int> shardSizes{10, 1, 100};
- auto getShardSize = [&](const std::string& shardHost) {
- for (std::vector<ShardType>::size_type i = 0; i < shards.size(); ++i) {
- if (shardHost == shards[i].getHost()) {
- return shardSizes[i];
- }
- }
- FAIL(str::stream() << "Unexpected shard's host: " << shardHost);
- MONGO_UNREACHABLE;
- };
-
- auto listDatabasesResponse = [&](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
- ASSERT_EQUALS("admin", request.dbname);
- ASSERT_EQUALS("listDatabases", std::string(request.cmdObj.firstElement().fieldName()));
- ASSERT_FALSE(request.cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName));
-
- ASSERT_BSONOBJ_EQ(
- ReadPreferenceSetting(ReadPreference::PrimaryPreferred).toContainingBSON(),
- rpc::TrackingMetadata::removeTrackingData(request.metadata));
-
- const auto shardSize = getShardSize(request.target.toString());
- return BSON("ok" << 1 << "totalSize" << shardSize);
- };
-
- auto future = launchAsync([this, dbname] {
- ThreadClient tc("Test", getServiceContext());
- auto opCtx = tc->makeOperationContext();
- ShardingCatalogManager::get(opCtx.get())->createDatabase(opCtx.get(), dbname, ShardId());
- });
-
- // Expect one listDatabase request for each shard
- onCommand(listDatabasesResponse);
- onCommand(listDatabasesResponse);
- onCommand(listDatabasesResponse);
-
- // Return OK for _flushDatabaseCacheUpdates
- onCommand([&](const RemoteCommandRequest& request) {
- std::string cmdName = request.cmdObj.firstElement().fieldName();
- ASSERT_EQUALS("_flushDatabaseCacheUpdates", cmdName);
-
- return BSON("ok" << 1);
- });
-
- future.default_timed_get();
-}
-
-TEST_F(CreateDatabaseTest, createDatabaseSuccessWithCustomPrimary) {
- const ShardId primaryShardName("shard0002");
- const std::string dbname = "dbWithCustomPrimary1";
-
- const std::vector<ShardType> shards{{"shard0000", "ShardHost0:27017"},
- {"shard0001", "ShardHost1:27017"},
- {"shard0002", "ShardHost2:27017"}};
- setupShards(shards);
-
- for (const auto& shard : shards) {
- targeterFactory()->addTargeterToReturn(ConnectionString(HostAndPort{shard.getHost()}), [&] {
- auto targeter = std::make_unique<RemoteCommandTargeterMock>();
- targeter->setFindHostReturnValue(HostAndPort{shard.getHost()});
- return targeter;
- }());
- }
-
- // Prime the shard registry with information about the existing shards
- shardRegistry()->reload(operationContext());
-
- auto future = launchAsync([this, dbname, primaryShardName] {
- ThreadClient tc("Test", getServiceContext());
- auto opCtx = tc->makeOperationContext();
- ShardingCatalogManager::get(opCtx.get())
- ->createDatabase(opCtx.get(), dbname, primaryShardName);
- });
-
- // Return OK for _flushDatabaseCacheUpdates
- onCommand([&](const RemoteCommandRequest& request) {
- std::string cmdName = request.cmdObj.firstElement().fieldName();
- ASSERT_EQUALS("_flushDatabaseCacheUpdates", cmdName);
-
- return BSON("ok" << 1);
- });
-
- future.default_timed_get();
-
- auto databaseDoc = assertGet(findOneOnConfigCollection(
- operationContext(), DatabaseType::ConfigNS, BSON("_id" << dbname)));
-
- DatabaseType foundDatabase = assertGet(DatabaseType::fromBSON(databaseDoc));
-
- ASSERT_EQUALS(primaryShardName, foundDatabase.getPrimary());
-}
-
-TEST_F(CreateDatabaseTest, createDatabaseDBExists) {
- const std::string dbname = "db3";
- const ShardType shard{"shard0", "shard0:12345"};
- setupShards({shard});
- setupDatabase(dbname, shard.getName(), false);
-
- targeterFactory()->addTargeterToReturn(ConnectionString(HostAndPort{shard.getHost()}), [&] {
- auto targeter = std::make_unique<RemoteCommandTargeterMock>();
- targeter->setFindHostReturnValue(HostAndPort{shard.getHost()});
- return targeter;
- }());
-
- // Prime the shard registry with information about the existing shard
- shardRegistry()->reload(operationContext());
-
- auto future = launchAsync([this, dbname] {
- ThreadClient tc("Test", getServiceContext());
- auto opCtx = tc->makeOperationContext();
- ShardingCatalogManager::get(opCtx.get())->createDatabase(opCtx.get(), dbname, ShardId());
- });
-
- // Return OK for _flushDatabaseCacheUpdates
- onCommand([&](const RemoteCommandRequest& request) {
- std::string cmdName = request.cmdObj.firstElement().fieldName();
- ASSERT_EQUALS("_flushDatabaseCacheUpdates", cmdName);
-
- return BSON("ok" << 1);
- });
-
- future.default_timed_get();
-}
-
-TEST_F(CreateDatabaseTest, createDatabaseDBExistsDifferentCase) {
- const std::string dbname = "db4";
-
- setupShards({{"shard0", "shard0:12345"}});
- setupDatabase("DB4", ShardId("shard0"), false);
-
- ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
- ->createDatabase(operationContext(), dbname, ShardId()),
- AssertionException,
- ErrorCodes::DatabaseDifferCase);
-}
-
-TEST_F(CreateDatabaseTest, createDatabaseNoShards) {
- const std::string dbname = "db5";
- ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
- ->createDatabase(operationContext(), dbname, ShardId()),
- AssertionException,
- ErrorCodes::ShardNotFound);
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index ff2ba0d90fc..67c8c0103f5 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -35,8 +35,11 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/commands/feature_compatibility_version.h"
+#include "mongo/db/dbdirectclient.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/ops/write_ops.h"
#include "mongo/db/repl/repl_client_info.h"
+#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/server_options.h"
#include "mongo/db/vector_clock.h"
#include "mongo/db/write_concern.h"
@@ -44,17 +47,13 @@
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard.h"
-#include "mongo/s/database_version.h"
#include "mongo/s/grid.h"
-#include "mongo/s/shard_id.h"
#include "mongo/s/shard_util.h"
#include "mongo/s/sharded_collections_ddl_parameters_gen.h"
namespace mongo {
namespace {
-const ReadPreferenceSetting kConfigReadSelector(ReadPreference::Nearest, TagSet{});
-
/**
* Selects an optimal shard on which to place a newly created database from the set of available
* shards. Will return ShardNotFound if shard could not be found.
@@ -92,40 +91,84 @@ ShardId selectShardForNewDatabase(OperationContext* opCtx, ShardRegistry* shardR
DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
StringData dbName,
- const ShardId& primaryShard) {
- invariant(nsIsDbOnly(dbName));
-
- // The admin and config databases should never be explicitly created. They "just exist",
- // i.e. getDatabase will always return an entry for them.
- if (dbName == NamespaceString::kAdminDb || dbName == NamespaceString::kConfigDb) {
- uasserted(ErrorCodes::InvalidOptions,
- str::stream() << "cannot manually create database '" << dbName << "'");
+ const boost::optional<ShardId>& optPrimaryShard,
+ bool enableSharding) {
+ if (dbName == NamespaceString::kConfigDb) {
+ return DatabaseType(
+ dbName.toString(), ShardId::kConfigServerId, true, DatabaseVersion::makeFixed());
}
+ uassert(ErrorCodes::InvalidOptions,
+ str::stream() << "Cannot manually create or shard database '" << dbName << "'",
+ dbName != NamespaceString::kAdminDb && dbName != NamespaceString::kLocalDb);
+
+ uassert(ErrorCodes::InvalidNamespace,
+ str::stream() << "Invalid db name specified: " << dbName,
+ NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow));
+
+ // Make sure to force update of any stale metadata
+ ON_BLOCK_EXIT([&] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbName); });
+
+ DBDirectClient client(opCtx);
+
+ boost::optional<DistLockManager::ScopedDistLock> dbDistLock;
+
+ // First perform an optimistic attempt to write the 'sharded' field to the database entry, in
+ // case this is the only thing, which is missing. If that doesn't succeed, go through the
+ // expensive createDatabase flow.
+ while (true) {
+ auto response = client.findAndModify([&] {
+ write_ops::FindAndModifyCommand findAndModify(DatabaseType::ConfigNS);
+ findAndModify.setQuery([&] {
+ BSONObjBuilder queryFilterBuilder;
+ queryFilterBuilder.append(DatabaseType::name.name(), dbName);
+ if (optPrimaryShard) {
+ uassert(ErrorCodes::BadValue,
+ str::stream() << "invalid shard name: " << *optPrimaryShard,
+ optPrimaryShard->isValid());
+ queryFilterBuilder.append(DatabaseType::primary.name(),
+ optPrimaryShard->toString());
+ }
+ return queryFilterBuilder.obj();
+ }());
+ findAndModify.setUpdate(write_ops::UpdateModification::parseFromClassicUpdate(
+ BSON("$set" << BSON(DatabaseType::sharded(enableSharding)))));
+ findAndModify.setUpsert(false);
+ findAndModify.setNew(true);
+ return findAndModify;
+ }());
+
+ if (response.getLastErrorObject().getNumDocs()) {
+ uassert(528120, "Missing value in the response", response.getValue());
+ return uassertStatusOK(DatabaseType::fromBSON(*response.getValue()));
+ }
+
+ if (dbDistLock) {
+ break;
+ }
+
+ // Do another loop, with the dist lock held in order to avoid taking the expensive path on
+ // concurrent create database operations
+ dbDistLock.emplace(uassertStatusOK(DistLockManager::get(opCtx)->lock(
+ opCtx, dbName, "createDatabase", DistLockManager::kDefaultLockTimeout)));
+ }
+
+ // Expensive createDatabase code path
const auto catalogClient = Grid::get(opCtx)->catalogClient();
const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ auto& replClient = repl::ReplClientInfo::forClient(opCtx->getClient());
// Check if a database already exists with the same name (case sensitive), and if so, return the
// existing entry.
-
BSONObjBuilder queryBuilder;
queryBuilder.appendRegex(DatabaseType::name(),
(std::string) "^" + pcrecpp::RE::QuoteMeta(dbName.toString()) + "$",
"i");
- auto docs = uassertStatusOK(catalogClient->_exhaustiveFindOnConfig(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- repl::ReadConcernLevel::kLocalReadConcern,
- DatabaseType::ConfigNS,
- queryBuilder.obj(),
- BSONObj(),
- 1))
- .value;
-
+ auto dbDoc = client.findOne(DatabaseType::ConfigNS.ns(), {queryBuilder.obj()});
auto const [primaryShardPtr, database] = [&] {
- if (!docs.empty()) {
- auto actualDb = uassertStatusOK(DatabaseType::fromBSON(docs.front()));
+ if (!dbDoc.isEmpty()) {
+ auto actualDb = uassertStatusOK(DatabaseType::fromBSON(dbDoc));
uassert(ErrorCodes::DatabaseDifferCase,
str::stream() << "can't have 2 databases that just differ on case "
@@ -135,23 +178,17 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
uassert(
ErrorCodes::NamespaceExists,
- str::stream() << "database already created on a primary which is different from: "
- << primaryShard,
- !primaryShard.isValid() || actualDb.getPrimary() == primaryShard);
+ str::stream() << "database already created on a primary which is different from "
+ << *optPrimaryShard,
+ !optPrimaryShard || *optPrimaryShard == actualDb.getPrimary());
// We did a local read of the database entry above and found that the database already
// exists. However, the data may not be majority committed (a previous createDatabase
// attempt may have failed with a writeConcern error).
// Since the current Client doesn't know the opTime of the last write to the database
// entry, make it wait for the last opTime in the system when we wait for writeConcern.
- auto& replClient = repl::ReplClientInfo::forClient(opCtx->getClient());
replClient.setLastOpToSystemLastOpTime(opCtx);
- WriteConcernResult unusedResult;
- uassertStatusOK(waitForWriteConcern(opCtx,
- replClient.getLastOp(),
- ShardingCatalogClient::kMajorityWriteConcern,
- &unusedResult));
return std::make_pair(
uassertStatusOK(shardRegistry->getShard(opCtx, actualDb.getPrimary())), actualDb);
} else {
@@ -159,8 +196,8 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
// catalog.
auto const shardPtr = uassertStatusOK(shardRegistry->getShard(
opCtx,
- primaryShard.isValid() ? primaryShard
- : selectShardForNewDatabase(opCtx, shardRegistry)));
+ optPrimaryShard ? *optPrimaryShard
+ : selectShardForNewDatabase(opCtx, shardRegistry)));
boost::optional<Timestamp> clusterTime;
if (feature_flags::gShardingFullDDLSupportTimestampedVersion.isEnabled(
@@ -172,7 +209,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
// Pick a primary shard for the new database.
DatabaseType db(dbName.toString(),
shardPtr->getId(),
- false,
+ enableSharding,
DatabaseVersion(UUID::gen(), clusterTime));
LOGV2(21938,
@@ -192,6 +229,12 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
}
}();
+ WriteConcernResult unusedResult;
+ uassertStatusOK(waitForWriteConcern(opCtx,
+ replClient.getLastOp(),
+ ShardingCatalogClient::kMajorityWriteConcern,
+ &unusedResult));
+
// Note, making the primary shard refresh its databaseVersion here is not required for
// correctness, since either:
// 1) This is the first time this database is being created. The primary shard will not have a
@@ -216,45 +259,6 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
return database;
}
-void ShardingCatalogManager::enableSharding(OperationContext* opCtx,
- StringData dbName,
- const ShardId& primaryShard) {
- // Sharding is enabled automatically on the config db.
- if (dbName == NamespaceString::kConfigDb) {
- return;
- }
-
- // Creates the database if it doesn't exist and returns the new database entry, else returns the
- // existing database entry.
- auto dbType = createDatabase(opCtx, dbName, primaryShard);
- dbType.setSharded(true);
-
- // We must wait for the database entry to be majority committed, because it's possible that
- // reading from the majority snapshot has been set on the RecoveryUnit due to an earlier read,
- // such as overtaking a distlock or loading the ShardRegistry.
- WriteConcernResult unusedResult;
- uassertStatusOK(
- waitForWriteConcern(opCtx,
- repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(),
- WriteConcernOptions(WriteConcernOptions::kMajority,
- WriteConcernOptions::SyncMode::UNSET,
- Milliseconds{30000}),
- &unusedResult));
-
- LOGV2(21939,
- "Persisted sharding enabled for database {db}",
- "Persisted sharding enabled for database",
- "db"_attr = dbName);
-
- uassertStatusOK(Grid::get(opCtx)->catalogClient()->updateConfigDocument(
- opCtx,
- DatabaseType::ConfigNS,
- BSON(DatabaseType::name(dbName.toString())),
- BSON("$set" << BSON(DatabaseType::sharded(true))),
- false,
- ShardingCatalogClient::kLocalWriteConcern));
-}
-
Status ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx,
const StringData dbname,
const ShardId& toShard) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
deleted file mode 100644
index ae41929f63e..00000000000
--- a/src/mongo/db/s/config/sharding_catalog_manager_enable_sharding_test.cpp
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/client/remote_command_targeter_factory_mock.h"
-#include "mongo/client/remote_command_targeter_mock.h"
-#include "mongo/db/commands.h"
-#include "mongo/db/query/query_request_helper.h"
-#include "mongo/db/repl/read_concern_args.h"
-#include "mongo/db/s/config/config_server_test_fixture.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
-#include "mongo/rpc/get_status_from_command_result.h"
-#include "mongo/rpc/metadata/tracking_metadata.h"
-#include "mongo/s/catalog/type_database.h"
-#include "mongo/s/catalog/type_shard.h"
-#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/write_ops/batched_command_response.h"
-#include "mongo/stdx/future.h"
-#include "mongo/util/time_support.h"
-
-namespace mongo {
-namespace {
-
-using executor::RemoteCommandRequest;
-
-using EnableShardingTest = ConfigServerTestFixture;
-
-TEST_F(EnableShardingTest, noDBExists) {
- ShardType shard;
- shard.setName("shard0");
- shard.setHost("shard0:12");
-
- setupShards({shard});
-
- targeterFactory()->addTargeterToReturn(ConnectionString(HostAndPort{shard.getHost()}), [&] {
- auto targeter = std::make_unique<RemoteCommandTargeterMock>();
- targeter->setFindHostReturnValue(HostAndPort{shard.getHost()});
- return targeter;
- }());
-
- auto future = launchAsync([&] {
- ThreadClient tc("Test", getGlobalServiceContext());
- auto opCtx = cc().makeOperationContext();
- ShardingCatalogManager::get(opCtx.get())->enableSharding(opCtx.get(), "db1", ShardId());
- });
-
- // list databases for checking shard size.
- onCommand([](const RemoteCommandRequest& request) {
- ASSERT_EQ(HostAndPort("shard0:12"), request.target);
- ASSERT_EQ("admin", request.dbname);
- ASSERT_BSONOBJ_EQ(BSON("listDatabases" << 1 << "maxTimeMS" << 600000), request.cmdObj);
-
- ASSERT_BSONOBJ_EQ(
- ReadPreferenceSetting(ReadPreference::PrimaryPreferred).toContainingBSON(),
- rpc::TrackingMetadata::removeTrackingData(request.metadata));
-
- return fromjson(R"({
- databases: [],
- totalSize: 1,
- ok: 1
- })");
- });
-
- // Return OK for _flushDatabaseCacheUpdates
- onCommand([&](const RemoteCommandRequest& request) {
- std::string cmdName = request.cmdObj.firstElement().fieldName();
- ASSERT_EQUALS("_flushDatabaseCacheUpdates", cmdName);
-
- return BSON("ok" << 1);
- });
-
- future.default_timed_get();
-}
-
-TEST_F(EnableShardingTest, dbExistsWithDifferentCase) {
- ShardType shard;
- shard.setName("shard0");
- shard.setHost("shard0:12");
- setupShards({shard});
- setupDatabase("Db3", shard.getName(), false);
- ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
- ->enableSharding(operationContext(), "db3", ShardId()),
- AssertionException,
- ErrorCodes::DatabaseDifferCase);
-}
-
-TEST_F(EnableShardingTest, dbExists) {
- const std::string dbname = "db4";
- ShardType shard{"shard0", "shard0:12"};
- setupShards({shard});
- setupDatabase(dbname, shard.getName(), false);
-
- targeterFactory()->addTargeterToReturn(ConnectionString(HostAndPort{shard.getHost()}), [&] {
- auto targeter = std::make_unique<RemoteCommandTargeterMock>();
- targeter->setFindHostReturnValue(HostAndPort{shard.getHost()});
- return targeter;
- }());
-
- // Prime the shard registry with information about the existing shard
- shardRegistry()->reload(operationContext());
-
- auto future = launchAsync([this, dbname] {
- ThreadClient tc("Test", getServiceContext());
- auto opCtx = tc->makeOperationContext();
- ShardingCatalogManager::get(opCtx.get())->enableSharding(opCtx.get(), dbname, ShardId());
- });
-
- // Return OK for _flushDatabaseCacheUpdates
- onCommand([&](const RemoteCommandRequest& request) {
- std::string cmdName = request.cmdObj.firstElement().fieldName();
- ASSERT_EQUALS("_flushDatabaseCacheUpdates", cmdName);
-
- return BSON("ok" << 1);
- });
-
- future.default_timed_get();
-}
-
-TEST_F(EnableShardingTest, succeedsWhenTheDatabaseIsAlreadySharded) {
- const std::string dbname = "db5";
- ShardType shard{"shard0", "shard0:12"};
- setupShards({shard});
- setupDatabase(dbname, shard.getName(), true);
-
- targeterFactory()->addTargeterToReturn(ConnectionString(HostAndPort{shard.getHost()}), [&] {
- auto targeter = std::make_unique<RemoteCommandTargeterMock>();
- targeter->setFindHostReturnValue(HostAndPort{shard.getHost()});
- return targeter;
- }());
-
- // Prime the shard registry with information about the existing shard
- shardRegistry()->reload(operationContext());
-
- auto future = launchAsync([this, dbname] {
- ThreadClient tc("Test", getServiceContext());
- auto opCtx = tc->makeOperationContext();
- ShardingCatalogManager::get(opCtx.get())->enableSharding(opCtx.get(), dbname, ShardId());
- });
-
- // Return OK for _flushDatabaseCacheUpdates
- onCommand([&](const RemoteCommandRequest& request) {
- std::string cmdName = request.cmdObj.firstElement().fieldName();
- ASSERT_EQUALS("_flushDatabaseCacheUpdates", cmdName);
-
- return BSON("ok" << 1);
- });
-
- future.default_timed_get();
-}
-
-TEST_F(EnableShardingTest, dbExistsInvalidFormat) {
- ShardType shard;
- shard.setName("shard0");
- shard.setHost("shard0:12");
- setupShards({shard});
-
- // Set up database with bad type for primary field.
- ASSERT_OK(
- catalogClient()->insertConfigDocument(operationContext(),
- DatabaseType::ConfigNS,
- BSON("_id"
- << "db6"
- << "primary" << 12 << "partitioned" << false),
- ShardingCatalogClient::kMajorityWriteConcern));
-
- ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
- ->enableSharding(operationContext(), "db6", ShardId()),
- AssertionException,
- ErrorCodes::TypeMismatch);
-}
-
-TEST_F(EnableShardingTest, noDBExistsNoShards) {
- ASSERT_THROWS_CODE(ShardingCatalogManager::get(operationContext())
- ->enableSharding(operationContext(), "db7", ShardId()),
- AssertionException,
- ErrorCodes::ShardNotFound);
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 704e6d0a113..f18faea2e9a 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -44,10 +44,10 @@
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/cluster_write.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/shard_collection_gen.h"
#include "mongo/s/sharded_collections_ddl_parameters_gen.h"
-#include "mongo/s/write_ops/cluster_write.h"
namespace mongo {
namespace {
@@ -274,7 +274,7 @@ void upsertChunks(OperationContext* opCtx, std::vector<ChunkType>& chunks) {
updateRequest.setWriteConcern(ShardingCatalogClient::kMajorityWriteConcern.toBSON());
- ClusterWriter::write(opCtx, updateRequest, &stats, &response);
+ cluster::write(opCtx, updateRequest, &stats, &response);
uassertStatusOK(response.toStatus());
}
@@ -296,7 +296,7 @@ void updateCatalogEntry(OperationContext* opCtx, const NamespaceString& nss, Col
updateRequest.setWriteConcern(ShardingCatalogClient::kMajorityWriteConcern.toBSON());
try {
- ClusterWriter::write(opCtx, updateRequest, &stats, &response);
+ cluster::write(opCtx, updateRequest, &stats, &response);
uassertStatusOK(response.toStatus());
} catch (const DBException&) {
// If an error happens when contacting the config server, we don't know if the update
diff --git a/src/mongo/db/s/drop_collection_legacy.cpp b/src/mongo/db/s/drop_collection_legacy.cpp
index dad6be97dc6..61d91590628 100644
--- a/src/mongo/db/s/drop_collection_legacy.cpp
+++ b/src/mongo/db/s/drop_collection_legacy.cpp
@@ -33,13 +33,13 @@
#include "mongo/db/s/drop_collection_legacy.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/dist_lock_manager.h"
#include "mongo/db/s/sharding_logging.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/catalog_cache.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/set_shard_version_request.h"
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
index 08741817814..f86d51a40c2 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
@@ -39,7 +39,6 @@
#include "mongo/db/repl/storage_interface_impl.h"
#include "mongo/db/repl/storage_interface_mock.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/resharding/resharding_coordinator_service.h"
#include "mongo/db/s/resharding_util.h"
#include "mongo/db/s/transaction_coordinator_service.h"
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 47d5d8139bc..715d8cda56d 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -36,7 +36,6 @@
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/s/collection_sharding_runtime.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/logv2/log.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index a92a39c080c..ab2bb5565d6 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -35,7 +35,6 @@
#include "mongo/db/logical_session_cache_noop.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/s/config/config_server_test_fixture.h"
-#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_ddl_util.h"
#include "mongo/db/s/transaction_coordinator_service.h"
#include "mongo/logv2/log.h"
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 7a9d1685e92..fa4ba54a71b 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -29,7 +29,9 @@ env.SConscript(
env.Library(
target='sharding_api',
source=[
- 'write_ops/cluster_write.cpp',
+ 'chunk_manager_targeter.cpp',
+ 'cluster_ddl.cpp',
+ 'cluster_write.cpp',
],
LIBDEPS=[
'query/cluster_query',
@@ -154,7 +156,6 @@ env.Library(
'request_types/clone_catalog_data.idl',
'request_types/clone_collection_options_from_primary_shard.idl',
'request_types/commit_chunk_migration_request_type.cpp',
- 'request_types/create_database.idl',
'request_types/ensure_chunk_version_is_greater_than.idl',
'request_types/flush_database_cache_updates.idl',
'request_types/flush_routing_table_cache_updates.idl',
@@ -584,6 +585,7 @@ env.CppUnitTest(
'catalog/type_tags_test.cpp',
'chunk_manager_index_bounds_test.cpp',
'chunk_manager_query_test.cpp',
+ 'chunk_manager_targeter_test.cpp',
'chunk_map_test.cpp',
'chunk_test.cpp',
'chunk_version_test.cpp',
@@ -594,6 +596,7 @@ env.CppUnitTest(
'comparable_chunk_version_test.cpp',
'comparable_database_version_test.cpp',
'hedge_options_util_test.cpp',
+ 'mock_ns_targeter.cpp',
'mongos_topology_coordinator_test.cpp',
'request_types/add_shard_request_test.cpp',
'request_types/add_shard_to_zone_request_test.cpp',
@@ -617,8 +620,6 @@ env.CppUnitTest(
'write_ops/batch_write_op_test.cpp',
'write_ops/batched_command_request_test.cpp',
'write_ops/batched_command_response_test.cpp',
- 'write_ops/chunk_manager_targeter_test.cpp',
- 'write_ops/mock_ns_targeter.cpp',
'write_ops/write_op_test.cpp',
],
LIBDEPS=[
@@ -635,6 +636,7 @@ env.CppUnitTest(
'coreshard',
'mongos_topology_coordinator',
'sessions_collection_sharded',
+ 'sharding_api',
'sharding_router_test_fixture',
'sharding_task_executor',
'vector_clock_mongos',
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index d2d7fde1165..184441c6f9b 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -37,6 +37,7 @@
#include "mongo/db/keys_collection_document_gen.h"
#include "mongo/db/repl/optime_with.h"
#include "mongo/db/write_concern_options.h"
+#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard.h"
namespace mongo {
@@ -57,8 +58,6 @@ class NamespaceString;
class OperationContext;
class ShardingCatalogManager;
class ShardKeyPattern;
-class ShardRegistry;
-class ShardType;
class Status;
template <typename T>
class StatusWith;
@@ -84,9 +83,6 @@ class ShardingCatalogClient {
ShardingCatalogClient(const ShardingCatalogClient&) = delete;
ShardingCatalogClient& operator=(const ShardingCatalogClient&) = delete;
- // Allows ShardingCatalogManager to access _exhaustiveFindOnConfig
- friend class ShardingCatalogManager;
-
public:
// Constant to use for configuration data majority writes
static const WriteConcernOptions kMajorityWriteConcern;
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 28d4c3a470e..3b473c8f335 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -151,7 +151,6 @@ StatusWith<ChunkManager> CatalogCache::_getCollectionRoutingInfoAt(
try {
const auto swDbInfo = getDatabase(opCtx, nss.db(), allowLocks);
-
if (!swDbInfo.isOK()) {
if (swDbInfo == ErrorCodes::NamespaceNotFound) {
LOGV2_FOR_CATALOG_REFRESH(
diff --git a/src/mongo/s/write_ops/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index a6770cc03c7..ef9c3d9616c 100644
--- a/src/mongo/s/write_ops/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -31,6 +31,8 @@
#include "mongo/platform/basic.h"
+#include "mongo/s/chunk_manager_targeter.h"
+
#include "mongo/base/counter.h"
#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/curop.h"
@@ -45,10 +47,10 @@
#include "mongo/logv2/log.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/cluster_ddl.h"
#include "mongo/s/database_version.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/s/write_ops/chunk_manager_targeter.h"
#include "mongo/util/intrusive_counter.h"
#include "mongo/util/str.h"
#include "signal.h"
@@ -336,7 +338,7 @@ ChunkManagerTargeter::ChunkManagerTargeter(OperationContext* opCtx,
}
void ChunkManagerTargeter::_init(OperationContext* opCtx) {
- createShardDatabase(opCtx, _nss.db());
+ cluster::createDatabase(opCtx, _nss.db());
_cm = uassertStatusOK(getCollectionRoutingInfoForTxnCmd(opCtx, _nss));
if (_targetEpoch) {
diff --git a/src/mongo/s/write_ops/chunk_manager_targeter.h b/src/mongo/s/chunk_manager_targeter.h
index caaa8884399..caaa8884399 100644
--- a/src/mongo/s/write_ops/chunk_manager_targeter.h
+++ b/src/mongo/s/chunk_manager_targeter.h
diff --git a/src/mongo/s/write_ops/chunk_manager_targeter_test.cpp b/src/mongo/s/chunk_manager_targeter_test.cpp
index b11aa4b1a0f..2ef0a29cbfb 100644
--- a/src/mongo/s/write_ops/chunk_manager_targeter_test.cpp
+++ b/src/mongo/s/chunk_manager_targeter_test.cpp
@@ -33,10 +33,10 @@
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/service_context_test_fixture.h"
#include "mongo/s/catalog_cache_test_fixture.h"
+#include "mongo/s/chunk_manager_targeter.h"
#include "mongo/s/session_catalog_router.h"
#include "mongo/s/transaction_router.h"
#include "mongo/s/write_ops/batched_command_request.h"
-#include "mongo/s/write_ops/chunk_manager_targeter.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/s/cluster_commands_helpers.cpp b/src/mongo/s/cluster_commands_helpers.cpp
index 1430c0487bd..9de77c56b22 100644
--- a/src/mongo/s/cluster_commands_helpers.cpp
+++ b/src/mongo/s/cluster_commands_helpers.cpp
@@ -54,7 +54,7 @@
#include "mongo/s/database_version.h"
#include "mongo/s/grid.h"
#include "mongo/s/multi_statement_transaction_requests_sender.h"
-#include "mongo/s/request_types/create_database_gen.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
#include "mongo/s/shard_id.h"
#include "mongo/s/stale_exception.h"
#include "mongo/s/transaction_router.h"
@@ -708,36 +708,6 @@ bool appendEmptyResultSet(OperationContext* opCtx,
return true;
}
-void createShardDatabase(OperationContext* opCtx, StringData dbName) {
- auto dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
-
- if (dbStatus == ErrorCodes::NamespaceNotFound) {
- ConfigsvrCreateDatabase configCreateDatabaseRequest(dbName.toString());
- configCreateDatabaseRequest.setDbName(NamespaceString::kAdminDb);
-
- auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
-
- auto createDbResponse = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
- opCtx,
- ReadPreferenceSetting(ReadPreference::PrimaryOnly),
- "admin",
- CommandHelpers::appendMajorityWriteConcern(configCreateDatabaseRequest.toBSON({})),
- Shard::RetryPolicy::kIdempotent));
-
- uassertStatusOK(createDbResponse.writeConcernStatus);
-
- if (createDbResponse.commandStatus != ErrorCodes::NamespaceExists) {
- uassertStatusOKWithContext(createDbResponse.commandStatus,
- str::stream()
- << "Database " << dbName << " could not be created");
- }
-
- dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
- }
-
- uassertStatusOKWithContext(dbStatus, str::stream() << "Database " << dbName << " not found");
-}
-
std::set<ShardId> getTargetedShardsForQuery(boost::intrusive_ptr<ExpressionContext> expCtx,
const ChunkManager& cm,
const BSONObj& query,
diff --git a/src/mongo/s/cluster_commands_helpers.h b/src/mongo/s/cluster_commands_helpers.h
index a02adfb94ff..56dc915f7ef 100644
--- a/src/mongo/s/cluster_commands_helpers.h
+++ b/src/mongo/s/cluster_commands_helpers.h
@@ -329,12 +329,6 @@ bool appendEmptyResultSet(OperationContext* opCtx,
const std::string& ns);
/**
- * If the specified database exists already, loads it in the cache (if not already there).
- * Otherwise, if it does not exist, this call will implicitly create it as non-sharded.
- */
-void createShardDatabase(OperationContext* opCtx, StringData dbName);
-
-/**
* Returns the shards that would be targeted for the given query according to the given routing
* info.
*/
diff --git a/src/mongo/s/cluster_ddl.cpp b/src/mongo/s/cluster_ddl.cpp
new file mode 100644
index 00000000000..30e5b85ecf6
--- /dev/null
+++ b/src/mongo/s/cluster_ddl.cpp
@@ -0,0 +1,79 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
+
+#include "mongo/platform/basic.h"
+
+#include "mongo/s/cluster_ddl.h"
+
+#include "mongo/s/grid.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
+
+namespace mongo {
+namespace cluster {
+
+CachedDatabaseInfo createDatabase(OperationContext* opCtx,
+ StringData dbName,
+ boost::optional<ShardId> suggestedPrimaryId) {
+ auto catalogCache = Grid::get(opCtx)->catalogCache();
+
+ auto dbStatus = catalogCache->getDatabase(opCtx, dbName);
+
+ if (dbStatus == ErrorCodes::NamespaceNotFound) {
+ ConfigsvrCreateDatabase request(dbName.toString());
+ request.setDbName(NamespaceString::kAdminDb);
+ if (suggestedPrimaryId)
+ request.setPrimaryShardId(StringData(suggestedPrimaryId->toString()));
+
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto response = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting(ReadPreference::PrimaryOnly),
+ "admin",
+ CommandHelpers::appendMajorityWriteConcern(request.toBSON({})),
+ Shard::RetryPolicy::kIdempotent));
+ uassertStatusOK(response.writeConcernStatus);
+ uassertStatusOKWithContext(response.commandStatus,
+ str::stream()
+ << "Database " << dbName << " could not be created");
+
+ auto createDbResponse = ConfigsvrCreateDatabaseResponse::parse(
+ IDLParserErrorContext("configsvrCreateDatabaseResponse"), response.response);
+ catalogCache->onStaleDatabaseVersion(
+ dbName, DatabaseVersion(createDbResponse.getDatabaseVersion()));
+
+ dbStatus = catalogCache->getDatabase(opCtx, dbName);
+ }
+
+ return uassertStatusOK(std::move(dbStatus));
+}
+
+} // namespace cluster
+} // namespace mongo
diff --git a/src/mongo/s/cluster_ddl.h b/src/mongo/s/cluster_ddl.h
new file mode 100644
index 00000000000..aa1ca192b54
--- /dev/null
+++ b/src/mongo/s/cluster_ddl.h
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2021-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#pragma once
+
+#include "mongo/s/catalog_cache.h"
+
+namespace mongo {
+namespace cluster {
+
+/**
+ * Creates (or ensures that it is created) a database `dbName`, with `suggestedPrimaryId` as the
+ * primary node and the `shardingEnabled` field set to true.
+ */
+CachedDatabaseInfo createDatabase(OperationContext* opCtx,
+ StringData dbName,
+ boost::optional<ShardId> suggestedPrimaryId = boost::none);
+
+} // namespace cluster
+} // namespace mongo
diff --git a/src/mongo/s/write_ops/cluster_write.cpp b/src/mongo/s/cluster_write.cpp
index 5b8f6639ebb..f5e51142f72 100644
--- a/src/mongo/s/write_ops/cluster_write.cpp
+++ b/src/mongo/s/cluster_write.cpp
@@ -33,19 +33,20 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/write_ops/cluster_write.h"
+#include "mongo/s/cluster_write.h"
#include "mongo/db/lasterror.h"
+#include "mongo/s/chunk_manager_targeter.h"
#include "mongo/s/grid.h"
-#include "mongo/s/write_ops/chunk_manager_targeter.h"
namespace mongo {
+namespace cluster {
-void ClusterWriter::write(OperationContext* opCtx,
- const BatchedCommandRequest& request,
- BatchWriteExecStats* stats,
- BatchedCommandResponse* response,
- boost::optional<OID> targetEpoch) {
+void write(OperationContext* opCtx,
+ const BatchedCommandRequest& request,
+ BatchWriteExecStats* stats,
+ BatchedCommandResponse* response,
+ boost::optional<OID> targetEpoch) {
LastError::Disabled disableLastError(&LastError::get(opCtx->getClient()));
ChunkManagerTargeter targeter(opCtx, request.getNS(), targetEpoch);
@@ -59,4 +60,5 @@ void ClusterWriter::write(OperationContext* opCtx,
4817401, 2, {logv2::LogComponent::kShardMigrationPerf}, "Finished batch write");
}
+} // namespace cluster
} // namespace mongo
diff --git a/src/mongo/s/write_ops/cluster_write.h b/src/mongo/s/cluster_write.h
index 0b2979afae3..66333aefe9b 100644
--- a/src/mongo/s/write_ops/cluster_write.h
+++ b/src/mongo/s/cluster_write.h
@@ -29,26 +29,20 @@
#pragma once
-#include <string>
-
#include "mongo/s/write_ops/batch_write_exec.h"
namespace mongo {
+namespace cluster {
-class BSONObj;
-class OperationContext;
-
-class ClusterWriter {
-public:
- /**
- * If 'targetEpoch' is set, throws a 'StaleEpoch' error if the targeted namespace is found to no
- * longer have the epoch given by 'targetEpoch'.
- */
- static void write(OperationContext* opCtx,
- const BatchedCommandRequest& request,
- BatchWriteExecStats* stats,
- BatchedCommandResponse* response,
- boost::optional<OID> targetEpoch = boost::none);
-};
+/**
+ * If 'targetEpoch' is set, throws a 'StaleEpoch' error if the targeted namespace is found to no
+ * longer have the epoch given by 'targetEpoch'.
+ */
+void write(OperationContext* opCtx,
+ const BatchedCommandRequest& request,
+ BatchWriteExecStats* stats,
+ BatchedCommandResponse* response,
+ boost::optional<OID> targetEpoch = boost::none);
+} // namespace cluster
} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_create_cmd.cpp b/src/mongo/s/commands/cluster_create_cmd.cpp
index 8999dd01aba..a82a85ad02b 100644
--- a/src/mongo/s/commands/cluster_create_cmd.cpp
+++ b/src/mongo/s/commands/cluster_create_cmd.cpp
@@ -36,9 +36,9 @@
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/cluster_ddl.h"
#include "mongo/s/grid.h"
-
namespace mongo {
namespace {
@@ -114,7 +114,7 @@ public:
CreateCommandReply typedRun(OperationContext* opCtx) final {
auto cmd = request();
auto dbName = cmd.getDbName();
- createShardDatabase(opCtx, dbName);
+ cluster::createDatabase(opCtx, dbName);
uassert(ErrorCodes::InvalidOptions,
"specify size:<n> when capped is true",
diff --git a/src/mongo/s/commands/cluster_create_indexes_cmd.cpp b/src/mongo/s/commands/cluster_create_indexes_cmd.cpp
index c569774a706..408fff5148f 100644
--- a/src/mongo/s/commands/cluster_create_indexes_cmd.cpp
+++ b/src/mongo/s/commands/cluster_create_indexes_cmd.cpp
@@ -37,6 +37,7 @@
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/cluster_ddl.h"
#include "mongo/s/grid.h"
namespace mongo {
@@ -85,7 +86,7 @@ public:
"namespace"_attr = nss,
"command"_attr = redact(cmdObj));
- createShardDatabase(opCtx, dbName);
+ cluster::createDatabase(opCtx, dbName);
auto routingInfo =
uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss));
diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
index 9feea9a8a8e..65d52e254e1 100644
--- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
+++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
@@ -37,8 +37,9 @@
#include "mongo/db/commands.h"
#include "mongo/db/field_parser.h"
#include "mongo/s/catalog_cache.h"
-#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/cluster_ddl.h"
#include "mongo/s/grid.h"
+#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
@@ -59,7 +60,6 @@ public:
bool supportsWriteConcern(const BSONObj& cmd) const override {
return true;
}
- static constexpr StringData kShardNameField = "primaryShard"_sd;
std::string help() const override {
return "Enable sharding for a database. Optionally allows the caller to specify the shard "
@@ -89,35 +89,37 @@ public:
const BSONObj& cmdObj,
std::string& errmsg,
BSONObjBuilder& result) override {
+ const std::string dbName = parseNs("", cmdObj);
- const std::string db = parseNs("", cmdObj);
+ auto catalogCache = Grid::get(opCtx)->catalogCache();
+ ON_BLOCK_EXIT([opCtx, dbName] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbName); });
+ constexpr StringData kShardNameField = "primaryShard"_sd;
auto shardElem = cmdObj[kShardNameField];
- std::string shardId = shardElem.ok() ? shardElem.String() : "";
- // Invalidate the routing table cache entry for this database so that we reload the
- // collection the next time it's accessed, even if we receive a failure, e.g. NetworkError.
- auto guard =
- makeGuard([opCtx, db] { Grid::get(opCtx)->catalogCache()->purgeDatabase(db); });
-
-
- BSONObjBuilder remoteCmdObj;
- remoteCmdObj.append("_configsvrEnableSharding", db);
- if (shardElem.ok()) {
- remoteCmdObj.append(kShardNameField, shardId);
- }
+ ConfigsvrCreateDatabase request(dbName);
+ request.setDbName(NamespaceString::kAdminDb);
+ request.setEnableSharding(true);
+ if (shardElem.ok())
+ request.setPrimaryShardId(StringData(shardElem.String()));
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- auto cmdResponse = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
+ auto response = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
opCtx,
ReadPreferenceSetting(ReadPreference::PrimaryOnly),
"admin",
- CommandHelpers::appendMajorityWriteConcern(
- CommandHelpers::appendGenericCommandArgs(cmdObj, remoteCmdObj.obj()),
- opCtx->getWriteConcern()),
+ CommandHelpers::appendMajorityWriteConcern(request.toBSON({})),
Shard::RetryPolicy::kIdempotent));
+ uassertStatusOKWithContext(response.commandStatus,
+ str::stream()
+ << "Database " << dbName << " could not be created");
+ uassertStatusOK(response.writeConcernStatus);
+
+ auto createDbResponse = ConfigsvrCreateDatabaseResponse::parse(
+ IDLParserErrorContext("configsvrCreateDatabaseResponse"), response.response);
+ catalogCache->onStaleDatabaseVersion(
+ dbName, DatabaseVersion(createDbResponse.getDatabaseVersion()));
- CommandHelpers::filterCommandReplyForPassthrough(cmdResponse.response, &result);
return true;
}
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index 2da5a5d859a..df3ff3bf894 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -46,6 +46,7 @@
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/cluster_ddl.h"
#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/commands/document_shard_key_update_util.h"
#include "mongo/s/commands/strategy.h"
@@ -307,9 +308,9 @@ public:
// Collect metrics.
_updateMetrics.collectMetrics(cmdObj);
- // findAndModify should only be creating database if upsert is true, but this would require
- // that the parsing be pulled into this function.
- createShardDatabase(opCtx, nss.db());
+ // Technically, findAndModify should only be creating database if upsert is true, but this
+ // would require that the parsing be pulled into this function.
+ cluster::createDatabase(opCtx, nss.db());
// Append mongoS' runtime constants to the command object before forwarding it to the shard.
auto cmdObjForShard = appendLegacyRuntimeConstantsToCommandObject(opCtx, cmdObj);
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index ddf09e6b5df..432a0476318 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -46,10 +46,12 @@
#include "mongo/executor/task_executor_pool.h"
#include "mongo/logv2/log.h"
#include "mongo/rpc/get_status_from_command_result.h"
+#include "mongo/s/chunk_manager_targeter.h"
#include "mongo/s/client/num_hosts_targeted_metrics.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_commands_helpers.h"
#include "mongo/s/cluster_last_error_info.h"
+#include "mongo/s/cluster_write.h"
#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/commands/document_shard_key_update_util.h"
#include "mongo/s/grid.h"
@@ -59,8 +61,6 @@
#include "mongo/s/would_change_owning_shard_exception.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
-#include "mongo/s/write_ops/chunk_manager_targeter.h"
-#include "mongo/s/write_ops/cluster_write.h"
#include "mongo/util/timer.h"
namespace mongo {
@@ -224,7 +224,7 @@ bool handleWouldChangeOwningShardError(OperationContext* opCtx,
documentShardKeyUpdateUtil::startTransactionForShardKeyUpdate(opCtx);
// Clear the error details from the response object before sending the write again
response->unsetErrDetails();
- ClusterWriter::write(opCtx, *request, &stats, response);
+ cluster::write(opCtx, *request, &stats, response);
wouldChangeOwningShardErrorInfo =
getWouldChangeOwningShardErrorInfo(opCtx, *request, response, !isRetryableWrite);
if (!wouldChangeOwningShardErrorInfo)
@@ -468,7 +468,7 @@ private:
batchedRequest.unsetWriteConcern();
}
- ClusterWriter::write(opCtx, batchedRequest, &stats, &response);
+ cluster::write(opCtx, batchedRequest, &stats, &response);
bool updatedShardKey = false;
if (_batchedRequest.getBatchType() == BatchedCommandRequest::BatchType_Update) {
diff --git a/src/mongo/s/commands/document_shard_key_update_util.cpp b/src/mongo/s/commands/document_shard_key_update_util.cpp
index 9611de97138..164007fe36f 100644
--- a/src/mongo/s/commands/document_shard_key_update_util.cpp
+++ b/src/mongo/s/commands/document_shard_key_update_util.cpp
@@ -34,10 +34,10 @@
#include "mongo/base/status_with.h"
#include "mongo/db/namespace_string.h"
#include "mongo/logv2/log.h"
+#include "mongo/s/cluster_write.h"
#include "mongo/s/would_change_owning_shard_exception.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
-#include "mongo/s/write_ops/cluster_write.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/str.h"
@@ -63,7 +63,7 @@ bool executeOperationsAsPartOfShardKeyUpdate(OperationContext* opCtx,
BatchedCommandResponse deleteResponse;
BatchWriteExecStats deleteStats;
- ClusterWriter::write(opCtx, deleteRequest, &deleteStats, &deleteResponse);
+ cluster::write(opCtx, deleteRequest, &deleteStats, &deleteResponse);
uassertStatusOK(deleteResponse.toStatus());
// If shouldUpsert is true, this means the original command specified {upsert: true} and did not
// match any docs, so we should not match any when doing this delete. If shouldUpsert is false
@@ -87,7 +87,7 @@ bool executeOperationsAsPartOfShardKeyUpdate(OperationContext* opCtx,
BatchedCommandResponse insertResponse;
BatchWriteExecStats insertStats;
- ClusterWriter::write(opCtx, insertRequest, &insertStats, &insertResponse);
+ cluster::write(opCtx, insertRequest, &insertStats, &insertResponse);
uassertStatusOK(insertResponse.toStatus());
uassert(ErrorCodes::NamespaceNotFound,
"Document not successfully inserted while changing shard key for namespace " +
diff --git a/src/mongo/s/database_version.h b/src/mongo/s/database_version.h
index a31bab383ec..1385db40313 100644
--- a/src/mongo/s/database_version.h
+++ b/src/mongo/s/database_version.h
@@ -43,28 +43,16 @@ namespace mongo {
* Once uuids are gone, relational operators should be implemented in this class.
*
*/
-class DatabaseVersion : private DatabaseVersionBase {
+class DatabaseVersion : public DatabaseVersionBase {
public:
- // Make field names accessible
- using DatabaseVersionBase::kTimestampFieldName;
-
- // Make getters and setters accessible
- using DatabaseVersionBase::getLastMod;
- using DatabaseVersionBase::getTimestamp;
- using DatabaseVersionBase::serialize;
- using DatabaseVersionBase::toBSON;
-
- // It returns a new DatabaseVersion marked as fixed. A fixed database version is used to
- // distinguish databases that do not have entries in the sharding catalog, such as 'config' and
- // 'admin'
- static DatabaseVersion makeFixed();
-
DatabaseVersion() = default;
explicit DatabaseVersion(const BSONObj& obj) {
DatabaseVersionBase::parseProtected(IDLParserErrorContext("DatabaseVersion"), obj);
}
+ explicit DatabaseVersion(const DatabaseVersionBase& dbv) : DatabaseVersionBase(dbv) {}
+
/**
* Constructor of a DatabaseVersion based on epochs
*/
@@ -80,6 +68,12 @@ public:
setTimestamp(timestamp);
}
+ // Returns a new hardcoded DatabaseVersion value, which is used to distinguish databases that do
+ // not have entries in the sharding catalog, namely 'config' and 'admin'.
+ static DatabaseVersion makeFixed();
+
+ // Returns a new DatabaseVersion with just the lastMod incremented. This indicates that the
+ // database changed primary, as opposed to being dropped and recreated.
DatabaseVersion makeUpdated() const;
/**
@@ -97,7 +91,7 @@ public:
return getLastMod() == 0;
}
- mongo::UUID getUuid() const {
+ UUID getUuid() const {
return *DatabaseVersionBase::getUuid();
}
};
diff --git a/src/mongo/s/write_ops/mock_ns_targeter.cpp b/src/mongo/s/mock_ns_targeter.cpp
index 8a677d15dc4..640f2974239 100644
--- a/src/mongo/s/write_ops/mock_ns_targeter.cpp
+++ b/src/mongo/s/mock_ns_targeter.cpp
@@ -29,7 +29,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/s/write_ops/mock_ns_targeter.h"
+#include "mongo/s/mock_ns_targeter.h"
namespace mongo {
namespace {
diff --git a/src/mongo/s/write_ops/mock_ns_targeter.h b/src/mongo/s/mock_ns_targeter.h
index 4cde0765dd1..4cde0765dd1 100644
--- a/src/mongo/s/write_ops/mock_ns_targeter.h
+++ b/src/mongo/s/mock_ns_targeter.h
diff --git a/src/mongo/s/request_types/create_database.idl b/src/mongo/s/request_types/create_database.idl
deleted file mode 100644
index 50d0753916c..00000000000
--- a/src/mongo/s/request_types/create_database.idl
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (C) 2018-present MongoDB, Inc.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the Server Side Public License, version 1,
-# as published by MongoDB, Inc.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# Server Side Public License for more details.
-#
-# You should have received a copy of the Server Side Public License
-# along with this program. If not, see
-# <http://www.mongodb.com/licensing/server-side-public-license>.
-#
-# As a special exception, the copyright holders give permission to link the
-# code of portions of this program with the OpenSSL library under certain
-# conditions as described in each individual source file and distribute
-# linked combinations including the program with the OpenSSL library. You
-# must comply with the Server Side Public License in all respects for
-# all of the code used other than as permitted herein. If you modify file(s)
-# with this exception, you may extend this exception to your version of the
-# file(s), but you are not obligated to do so. If you do not wish to do so,
-# delete this exception statement from your version. If you delete this
-# exception statement from all source files in the program, then also delete
-# it in the license file.
-#
-
-# createDatabase IDL File
-
-global:
- cpp_namespace: "mongo"
-
-imports:
- - "mongo/idl/basic_types.idl"
-
-commands:
- _configsvrCreateDatabase:
- command_name: _configsvrCreateDatabase
- cpp_name : ConfigsvrCreateDatabase
- description: "The internal createDatabase command on the config server"
- strict: false
- namespace: type
- api_version: ""
- type: string
diff --git a/src/mongo/s/request_types/sharded_ddl_commands.idl b/src/mongo/s/request_types/sharded_ddl_commands.idl
index 7e198edbcc3..bebedb8853f 100644
--- a/src/mongo/s/request_types/sharded_ddl_commands.idl
+++ b/src/mongo/s/request_types/sharded_ddl_commands.idl
@@ -30,14 +30,24 @@ global:
cpp_namespace: "mongo"
imports:
- - "mongo/db/drop_database.idl"
- "mongo/db/commands/rename_collection.idl"
+ - "mongo/db/drop_database.idl"
- "mongo/db/keypattern.idl"
- "mongo/idl/basic_types.idl"
- "mongo/s/chunk_version.idl"
- "mongo/s/database_version.idl"
structs:
+
+ ConfigsvrCreateDatabaseResponse:
+ description: "Response for the create database command"
+ strict: false
+ fields:
+ databaseVersion:
+ type: DatabaseVersionBase
+ description: "The version of the newly-created or already existing database"
+ optional: false
+
RenameCollectionResponse:
description: "Response for the rename collection command"
strict: false
@@ -94,7 +104,7 @@ commands:
type: object
description: "The collation to use for the shard key index."
optional: true
-
+
_shardsvrCreateCollectionParticipant:
command_name: _shardsvrCreateCollectionParticipant
cpp_name: ShardsvrCreateCollectionParticipant
@@ -116,7 +126,6 @@ commands:
idIndex:
type: object
description: "Id index."
-
_shardsvrDropDatabase:
description: "Internal command sent to the primary shard of a database to drop it."
@@ -124,7 +133,7 @@ commands:
namespace: ignored
api_version: ""
cpp_name: ShardsvrDropDatabase
- # The reply can completely removed once 5.0 became last lts
+ # The reply can completely removed once 5.0 becomes last lts
reply_type: DropDatabaseReply
_shardsvrDropDatabaseParticipant:
@@ -246,11 +255,34 @@ commands:
_shardsvrRefineCollectionShardKey:
description: "Parser for the _shardsvrRefineCollectionShardKey command"
command_name: _shardsvrRefineCollectionShardKey
+ cpp_name: ShardsvrRefineCollectionShardKey
namespace: concatenate_with_db
api_version: ""
+ strict: false
fields:
newShardKey:
type: KeyPattern
description: "The index specification document to use as the new shard key."
optional: false
- cpp_name: ShardsvrRefineCollectionShardKey
+
+ _configsvrCreateDatabase:
+ description: "The internal createDatabase command on the config server"
+ api_version: ""
+ command_name: _configsvrCreateDatabase
+ cpp_name : ConfigsvrCreateDatabase
+ namespace: type
+ type: string
+ reply_type: ConfigsvrCreateDatabaseResponse
+ strict: false
+ fields:
+ primaryShardId:
+ type: string
+ description: "If set, indicates to the system which shard should be used as the
+ primary for the database (if not already created). Otherwise, the
+ system will use the balancer in order to select a primary."
+ optional: true
+ enableSharding:
+ type: bool
+ description: "May only be set to 'true'. If set, indicates to the config server that
+ it must turn on the 'enableSharding' bit for that database."
+ optional: true
diff --git a/src/mongo/s/sessions_collection_sharded.cpp b/src/mongo/s/sessions_collection_sharded.cpp
index c986fabc535..8fc8183fbd5 100644
--- a/src/mongo/s/sessions_collection_sharded.cpp
+++ b/src/mongo/s/sessions_collection_sharded.cpp
@@ -42,16 +42,15 @@
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/cluster_write.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_find.h"
#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
-#include "mongo/s/write_ops/cluster_write.h"
#include "mongo/util/assert_util.h"
namespace mongo {
-
namespace {
BSONObj lsidQuery(const LogicalSessionId& lsid) {
@@ -135,7 +134,7 @@ void SessionsCollectionSharded::refreshSessions(OperationContext* opCtx,
BatchedCommandResponse response;
BatchWriteExecStats stats;
- ClusterWriter::write(opCtx, request, &stats, &response);
+ cluster::write(opCtx, request, &stats, &response);
uassertStatusOK(response.toStatus());
};
@@ -154,7 +153,7 @@ void SessionsCollectionSharded::removeRecords(OperationContext* opCtx,
BatchedCommandResponse response;
BatchWriteExecStats stats;
- ClusterWriter::write(opCtx, request, &stats, &response);
+ cluster::write(opCtx, request, &stats, &response);
uassertStatusOK(response.toStatus());
};
diff --git a/src/mongo/s/sharding_router_test_fixture.h b/src/mongo/s/sharding_router_test_fixture.h
index 2758c3b58af..ff0cdf0ccc6 100644
--- a/src/mongo/s/sharding_router_test_fixture.h
+++ b/src/mongo/s/sharding_router_test_fixture.h
@@ -29,6 +29,7 @@
#pragma once
+#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/sharding_test_fixture_common.h"
namespace mongo {
@@ -38,7 +39,6 @@ class ShardingCatalogClient;
struct ChunkVersion;
class CollectionType;
class ShardRegistry;
-class ShardType;
namespace transport {
class TransportLayerMock;
diff --git a/src/mongo/s/write_ops/SConscript b/src/mongo/s/write_ops/SConscript
index a38386bb15d..f834c0167cd 100644
--- a/src/mongo/s/write_ops/SConscript
+++ b/src/mongo/s/write_ops/SConscript
@@ -30,7 +30,6 @@ env.Library(
source=[
'batch_write_exec.cpp',
'batch_write_op.cpp',
- 'chunk_manager_targeter.cpp',
'write_op.cpp',
],
LIBDEPS=[
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index 951d9837069..db148b18228 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -37,6 +37,7 @@
#include "mongo/db/vector_clock.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/mock_ns_targeter.h"
#include "mongo/s/session_catalog_router.h"
#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/stale_exception.h"
@@ -44,7 +45,6 @@
#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h"
-#include "mongo/s/write_ops/mock_ns_targeter.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index db4bb7a05ba..3e52a01db1f 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -30,12 +30,12 @@
#include "mongo/platform/basic.h"
#include "mongo/base/owned_pointer_map.h"
+#include "mongo/s/mock_ns_targeter.h"
#include "mongo/s/session_catalog_router.h"
#include "mongo/s/sharding_router_test_fixture.h"
#include "mongo/s/transaction_router.h"
#include "mongo/s/write_ops/batch_write_op.h"
#include "mongo/s/write_ops/batched_command_request.h"
-#include "mongo/s/write_ops/mock_ns_targeter.h"
#include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index b262e0f4544..acbbe528b8d 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -31,10 +31,10 @@
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/service_context_test_fixture.h"
+#include "mongo/s/mock_ns_targeter.h"
#include "mongo/s/session_catalog_router.h"
#include "mongo/s/transaction_router.h"
#include "mongo/s/write_ops/batched_command_request.h"
-#include "mongo/s/write_ops/mock_ns_targeter.h"
#include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/s/write_ops/write_op.h"
#include "mongo/unittest/unittest.h"