summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEnrico Golfieri <enrico.golfieri@mongodb.com>2022-06-22 08:41:11 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-06-22 09:30:27 +0000
commitad2b9ae23ad19f871f2033dc338c96ad9aa8d161 (patch)
treeba9f2630e6a06aa55e9ea210dfca76164c9f536d
parentb987618f40153ceaeeed328fffb07991f8c6a7cf (diff)
downloadmongo-ad2b9ae23ad19f871f2033dc338c96ad9aa8d161.tar.gz
SERVER-65645 IDL-ify all commands on the chunk migration path
-rw-r--r--jstests/auth/lib/commands_lib.js25
-rw-r--r--jstests/concurrency/fsm_workload_helpers/chunks.js9
-rw-r--r--jstests/core/views/views_all_commands.js2
-rw-r--r--jstests/sharding/compound_hashed_shard_key_sharding_cmds.js6
-rw-r--r--src/mongo/db/s/SConscript1
-rw-r--r--src/mongo/db/s/commit_chunk_migration.idl85
-rw-r--r--src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp109
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp22
-rw-r--r--src/mongo/s/SConscript2
-rw-r--r--src/mongo/s/commands/cluster_commands.idl49
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp251
-rw-r--r--src/mongo/s/request_types/commit_chunk_migration_request_test.cpp93
-rw-r--r--src/mongo/s/request_types/commit_chunk_migration_request_type.cpp172
-rw-r--r--src/mongo/s/request_types/commit_chunk_migration_request_type.h109
-rw-r--r--src/mongo/shell/shardingtest.js9
15 files changed, 378 insertions, 566 deletions
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index d4941a18b1f..f67108737f0 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -3028,8 +3028,27 @@ var authCommandsLib = {
]
},
{
- testname: "_configsvrCommitChunkMigration",
- command: {_configsvrCommitChunkMigration: "x.y"},
+ testname: "_configsvrCommitChunkMigration",
+ command: {
+ _configsvrCommitChunkMigration: "db.fooHashed",
+ fromShard: "move_chunk_basic-rs0",
+ toShard: "move_chunk_basic-rs1",
+ migratedChunk: {
+ lastmod: {
+ e: new ObjectId('62b052ac7f5653479a67a54f'),
+ t: new Timestamp(1655722668, 22),
+ v: new Timestamp(1, 0)
+ },
+ min: {_id: MinKey},
+ max: {_id: -4611686018427387902}
+ },
+ fromShardCollectionVersion: {
+ e: new ObjectId('62b052ac7f5653479a67a54f'),
+ t: new Timestamp(1655722668, 22),
+ v: new Timestamp(1, 3)
+ },
+ validAfter: new Timestamp(1655722670, 6)
+ },
skipSharded: true,
expectFail: true,
testcases: [
@@ -5010,7 +5029,7 @@ var authCommandsLib = {
},
{
testname: "s_moveChunk",
- command: {moveChunk: "test.x"},
+ command: {moveChunk: "test.x", find:{}, to:"a"},
skipUnlessSharded: true,
testcases: [
{
diff --git a/jstests/concurrency/fsm_workload_helpers/chunks.js b/jstests/concurrency/fsm_workload_helpers/chunks.js
index caa84a6c38c..2c71eda6a87 100644
--- a/jstests/concurrency/fsm_workload_helpers/chunks.js
+++ b/jstests/concurrency/fsm_workload_helpers/chunks.js
@@ -70,13 +70,16 @@ var ChunkHelper = (function() {
moveChunk: db[collName].getFullName(),
bounds: bounds,
to: toShard,
- _waitForDelete: waitForDelete
};
+ if (waitForDelete != null) {
+ cmd._waitForDelete = waitForDelete;
+ }
+
// Using _secondaryThrottle adds coverage for additional waits for write concern on the
// recipient during cloning.
- if (secondaryThrottle) {
- cmd._secondaryThrottle = true;
+ if (secondaryThrottle != null) {
+ cmd._secondaryThrottle = secondaryThrottle;
cmd.writeConcern = {w: "majority"}; // _secondaryThrottle requires a write concern.
}
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 3aadf9a5171..d0a15afe8f6 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -495,7 +495,7 @@ let viewsCommandTests = {
expectedErrorCode: ErrorCodes.NamespaceNotSharded,
},
moveChunk: {
- command: {moveChunk: "test.view"},
+ command: {moveChunk: "test.view", find: {}, to: "a"},
skipStandalone: true,
isAdminCommand: true,
expectFailure: true,
diff --git a/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js b/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js
index 4be26f1c18d..09885c4b3d7 100644
--- a/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js
+++ b/jstests/sharding/compound_hashed_shard_key_sharding_cmds.js
@@ -133,9 +133,11 @@ function testMoveChunk(shardKey) {
// Error if either of the bounds is not a valid shard key.
assert.commandFailedWithCode(
- st.s0.adminCommand({moveChunk: ns, bounds: [NaN, aChunk.max], to: shard1}), 10065);
+ st.s0.adminCommand({moveChunk: ns, bounds: [NaN, aChunk.max], to: shard1}),
+ ErrorCodes.TypeMismatch);
assert.commandFailedWithCode(
- st.s0.adminCommand({moveChunk: ns, bounds: [aChunk.min, NaN], to: shard1}), 10065);
+ st.s0.adminCommand({moveChunk: ns, bounds: [aChunk.min, NaN], to: shard1}),
+ ErrorCodes.TypeMismatch);
assert.commandWorked(
st.s0.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript
index 072247f2de5..96f4e84813a 100644
--- a/src/mongo/db/s/SConscript
+++ b/src/mongo/db/s/SConscript
@@ -55,6 +55,7 @@ env.Library(
'collection_critical_section_document.idl',
'collection_sharding_runtime.cpp',
'collection_sharding_state_factory_shard.cpp',
+ 'commit_chunk_migration.idl',
'config_server_op_observer.cpp',
'global_index_metrics.cpp',
'metadata_manager.cpp',
diff --git a/src/mongo/db/s/commit_chunk_migration.idl b/src/mongo/db/s/commit_chunk_migration.idl
new file mode 100644
index 00000000000..6484623cd5c
--- /dev/null
+++ b/src/mongo/db/s/commit_chunk_migration.idl
@@ -0,0 +1,85 @@
+
+ # Copyright (C) 2019-present MongoDB, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program. If not, see
+# <http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library. You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein. If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so. If you do not wish to do so,
+# delete this exception statement from your version. If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+
+global:
+ cpp_namespace: "mongo"
+
+imports:
+ - "mongo/idl/basic_types.idl"
+ - "mongo/s/sharding_types.idl"
+ - "mongo/s/chunk_version.idl"
+
+structs:
+ ConfigSvrCommitChunkMigrationResponse:
+ description: "Response of the _configsvrCommitChunkMigration command."
+ strict: false
+ fields:
+ shardVersion:
+ type: ChunkVersion
+ description: "Collection version at the end of the migration."
+
+ MigratedChunkType:
+ description: "ChunkType describing a migrated chunk"
+ strict: false
+ fields:
+ lastmod : ChunkVersion
+ min: object
+ max: object
+
+commands:
+ _configsvrCommitChunkMigration:
+ command_name: _configsvrCommitChunkMigration
+ cpp_name: CommitChunkMigrationRequest
+ description: "internal _configsvrCommitChunkMigration command for config server"
+ namespace: type
+ api_version: ""
+ type: namespacestring
+ strict: false
+ reply_type: ConfigSvrCommitChunkMigrationResponse
+ fields:
+ fromShard:
+ type: shard_id
+ description: "from shard name"
+
+ toShard:
+ type: shard_id
+ description: "to shard name"
+
+ migratedChunk:
+ type: MigratedChunkType
+ description: "ChunkType describing a migrated chunk"
+
+ fromShardCollectionVersion:
+ type: ChunkVersion
+ description: "{ shardVersionField: <version> }"
+
+ validAfter:
+ type: timestamp
+ description: "The time after which this chunk is at the new shard" \ No newline at end of file
diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
index 9dcff9c96d0..a50f499662f 100644
--- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
+++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp
@@ -31,12 +31,14 @@
#include "mongo/platform/basic.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/s/chunk_move_write_concern_options.h"
+#include "mongo/db/s/commit_chunk_migration_gen.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -44,7 +46,6 @@
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding
@@ -79,9 +80,23 @@ namespace {
* }
*
*/
-class ConfigSvrCommitChunkMigrationCommand : public BasicCommand {
+
+
+ChunkType toChunkType(const MigratedChunkType& migratedChunk) {
+
+ ChunkType chunk;
+ chunk.setMin(migratedChunk.getMin());
+ chunk.setMax(migratedChunk.getMax());
+ chunk.setVersion(migratedChunk.getLastmod());
+ return chunk;
+}
+
+
+class ConfigSvrCommitChunkMigrationCommand
+ : public TypedCommand<ConfigSvrCommitChunkMigrationCommand> {
public:
- ConfigSvrCommitChunkMigrationCommand() : BasicCommand("_configsvrCommitChunkMigration") {}
+ using Request = CommitChunkMigrationRequest;
+ using Response = ConfigSvrCommitChunkMigrationResponse;
bool skipApiVersionCheck() const override {
// Internal command (server to server).
@@ -100,51 +115,57 @@ public:
return true;
}
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
- return true;
- }
+ class Invocation : public InvocationBase {
+ public:
+ using InvocationBase::InvocationBase;
+
+ ConfigSvrCommitChunkMigrationResponse typedRun(OperationContext* opCtx) {
+
+ uassert(ErrorCodes::IllegalOperation,
+ "_configsvrClearJumboFlag can only be run on config servers",
+ serverGlobalParams.clusterRole == ClusterRole::ConfigServer);
+
+ // Set the operation context read concern level to local for reads into the config
+ // database.
+ repl::ReadConcernArgs::get(opCtx) =
+ repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
+
+ const NamespaceString nss = ns();
+ auto migratedChunk = toChunkType(request().getMigratedChunk());
- Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const override {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::internal)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
+ StatusWith<BSONObj> chunkVersionResponse =
+ ShardingCatalogManager::get(opCtx)->commitChunkMigration(
+ opCtx,
+ nss,
+ migratedChunk,
+ request().getFromShardCollectionVersion().epoch(),
+ request().getFromShardCollectionVersion().getTimestamp(),
+ request().getFromShard(),
+ request().getToShard(),
+ request().getValidAfter());
+
+ auto chunkVersionObj = uassertStatusOK(chunkVersionResponse);
+
+ return Response{ChunkVersion::parse(chunkVersionObj[ChunkVersion::kShardVersionField])};
}
- return Status::OK();
- }
- std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return CommandHelpers::parseNsFullyQualified(cmdObj);
- }
+ private:
+ bool supportsWriteConcern() const override {
+ return true;
+ }
- bool run(OperationContext* opCtx,
- const std::string& dbName,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) override {
-
- // Set the operation context read concern level to local for reads into the config database.
- repl::ReadConcernArgs::get(opCtx) =
- repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern);
-
- const NamespaceString nss = NamespaceString(parseNs(dbName, cmdObj));
-
- auto commitRequest =
- uassertStatusOK(CommitChunkMigrationRequest::createFromCommand(nss, cmdObj));
-
- StatusWith<BSONObj> response = ShardingCatalogManager::get(opCtx)->commitChunkMigration(
- opCtx,
- nss,
- commitRequest.getMigratedChunk(),
- commitRequest.getCollectionEpoch(),
- commitRequest.getCollectionTimestamp(),
- commitRequest.getFromShard(),
- commitRequest.getToShard(),
- commitRequest.getValidAfter());
- uassertStatusOK(response.getStatus());
- result.appendElements(response.getValue());
- return true;
- }
+ NamespaceString ns() const override {
+ return request().getCommandParameter();
+ }
+
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(),
+ ActionType::internal));
+ }
+ };
} configsvrCommitChunkMigrationCommand;
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index d82f5c1790c..a0fc3e650ee 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -39,6 +39,7 @@
#include "mongo/db/read_concern.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/auto_split_vector.h"
+#include "mongo/db/s/commit_chunk_migration_gen.h"
#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/migration_coordinator.h"
#include "mongo/db/s/migration_util.h"
@@ -59,7 +60,6 @@
#include "mongo/s/catalog_cache_loader.h"
#include "mongo/s/grid.h"
#include "mongo/s/pm2423_feature_flags_gen.h"
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/duration.h"
#include "mongo/util/elapsed_tracker.h"
@@ -557,20 +557,18 @@ void MigrationSourceManager::commitChunkMetadataOnConfig() {
{
const auto metadata = _getCurrentMetadataAndCheckEpoch();
- ChunkType migratedChunkType;
- migratedChunkType.setMin(*_args.getMin());
- migratedChunkType.setMax(*_args.getMax());
- migratedChunkType.setVersion(*_chunkVersion);
+ auto migratedChunk = MigratedChunkType(*_chunkVersion, *_args.getMin(), *_args.getMax());
const auto currentTime = VectorClock::get(_opCtx)->getTime();
- CommitChunkMigrationRequest::appendAsCommand(&builder,
- nss(),
- _args.getFromShard(),
- _args.getToShard(),
- migratedChunkType,
- metadata.getCollVersion(),
- currentTime.clusterTime().asTimestamp());
+ CommitChunkMigrationRequest request(nss(),
+ _args.getFromShard(),
+ _args.getToShard(),
+ migratedChunk,
+ metadata.getCollVersion(),
+ currentTime.clusterTime().asTimestamp());
+
+ request.serialize({}, &builder);
builder.append(kWriteConcernField, kMajorityWriteConcern.toBSON());
}
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index 002b6e5f804..1998f44dc5e 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -192,7 +192,6 @@ env.Library(
'request_types/balancer_collection_status.idl',
'request_types/cleanup_reshard_collection.idl',
'request_types/clone_catalog_data.idl',
- 'request_types/commit_chunk_migration_request_type.cpp',
'request_types/commit_reshard_collection.idl',
'request_types/configure_collection_balancing.idl',
'request_types/drop_collection_if_uuid_not_matching.idl',
@@ -640,7 +639,6 @@ env.CppUnitTest(
'request_types/add_shard_request_test.cpp',
'request_types/add_shard_to_zone_request_test.cpp',
'request_types/balance_chunk_request_test.cpp',
- 'request_types/commit_chunk_migration_request_test.cpp',
'request_types/merge_chunks_request_test.cpp',
'request_types/migration_secondary_throttle_options_test.cpp',
'request_types/move_chunk_request_test.cpp',
diff --git a/src/mongo/s/commands/cluster_commands.idl b/src/mongo/s/commands/cluster_commands.idl
index 26d5ad8760c..bc5edc8c7a4 100644
--- a/src/mongo/s/commands/cluster_commands.idl
+++ b/src/mongo/s/commands/cluster_commands.idl
@@ -107,3 +107,52 @@ commands:
description: "The shard key value that is within a chunk's boundaries.
Cannot be used on collections with hashed shard keys."
optional: true
+
+ moveChunk:
+ description : "The public moveChunk command on mongos."
+ command_name : moveChunk
+ command_alias: movechunk
+ cpp_name: ClusterMoveChunkRequest
+ strict: false
+ namespace: type
+ api_version: ""
+ type: namespacestring
+ fields:
+ bounds:
+ type: array<object>
+ description: "The bounds of a specific chunk to move. The array must consist of two documents that specify the lower and upper shard key values of a chunk to move. Specify either the bounds field or the find field but not both."
+ optional: true
+ find:
+ type: object
+ description: "An equality match on the shard key that specifies the shard-key value of the chunk to move. Specify either the bounds field or the find field but not both."
+ optional: true
+ to:
+ type: string
+ description: "The name of the destination shard for the chunk."
+
+ forceJumbo:
+ type: bool
+ description: "Specifies whether or not forcing jumbo chunks move"
+ default: false
+
+ writeConcern:
+ type: object_owned
+ description: "A document that expresses the write concern that the _secondaryThrottle will use to wait for secondaries during the chunk migration."
+ default: BSONObj()
+
+ # Secondary throttle can be specified by passing one of the following 2 parameters
+ secondaryThrottle:
+ type: optionalBool
+ description: "Secondary throttle policy to adopt during the migration"
+ _secondaryThrottle:
+ type: optionalBool
+ description: "Secondary throttle policy to adopt during the migration"
+
+ # Wait for delete can be specified with one of the following 2 parameters
+ waitForDelete:
+ type: optionalBool
+ description: "Internal option for testing purposes. The default is false. If set to true, the delete phase of a moveChunk operation blocks."
+ _waitForDelete:
+ type: optionalBool
+ description: "Internal option for testing purposes. The default is false. If set to true, the delete phase of a moveChunk operation blocks."
+
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index b1149c67c5c..9ab1d5a45ae 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -42,6 +42,7 @@
#include "mongo/s/catalog_cache.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_commands_helpers.h"
+#include "mongo/s/commands/cluster_commands_gen.h"
#include "mongo/s/grid.h"
#include "mongo/s/request_types/migration_secondary_throttle_options.h"
#include "mongo/s/request_types/move_range_request_gen.h"
@@ -53,20 +54,22 @@
namespace mongo {
namespace {
-class MoveChunkCmd : public ErrmsgCommandDeprecated {
+class MoveChunkCmd final : public TypedCommand<MoveChunkCmd> {
public:
- MoveChunkCmd() : ErrmsgCommandDeprecated("moveChunk", "movechunk") {}
+ MoveChunkCmd()
+ : TypedCommand(ClusterMoveChunkRequest::kCommandName,
+ ClusterMoveChunkRequest::kCommandAlias) {}
+
+ using Request = ClusterMoveChunkRequest;
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
return AllowedOnSecondary::kAlways;
}
+
bool adminOnly() const override {
return true;
}
- bool supportsWriteConcern(const BSONObj& cmd) const override {
- return true;
- }
std::string help() const override {
return "Example: move chunk that contains the doc {num : 7} to shard001\n"
@@ -76,148 +79,150 @@ public:
" , to : 'shard001' }\n";
}
- Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const override {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forExactNamespace(NamespaceString(parseNs(dbname, cmdObj))),
- ActionType::moveChunk)) {
- return Status(ErrorCodes::Unauthorized, "Unauthorized");
- }
-
- return Status::OK();
- }
+ class Invocation : public MinimalInvocationBase {
+ public:
+ using MinimalInvocationBase::MinimalInvocationBase;
- std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
- return CommandHelpers::parseNsFullyQualified(cmdObj);
- }
+ private:
+ bool supportsWriteConcern() const override {
+ return true;
+ }
- bool errmsgRun(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- std::string& errmsg,
- BSONObjBuilder& result) override {
- Timer t;
-
- const NamespaceString nss(parseNs(dbname, cmdObj));
-
- const auto cm = uassertStatusOK(
- Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
- nss));
-
- const auto toElt = cmdObj["to"];
- uassert(ErrorCodes::TypeMismatch,
- "'to' must be of type String",
- toElt.type() == BSONType::String);
- const std::string toString = toElt.str();
- if (!toString.size()) {
- errmsg = "you have to specify where you want to move the chunk";
- return false;
+ void doCheckAuthorization(OperationContext* opCtx) const override {
+ uassert(ErrorCodes::Unauthorized,
+ "Unauthorized",
+ AuthorizationSession::get(opCtx->getClient())
+ ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns()),
+ ActionType::moveChunk));
}
- const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, toString);
- if (!toStatus.isOK()) {
- LOGV2_OPTIONS(22755,
- {logv2::UserAssertAfterLog(ErrorCodes::ShardNotFound)},
- "Could not move chunk in {namespace} to {toShardId} because that shard"
- " does not exist",
- "moveChunk destination shard does not exist",
- "toShardId"_attr = toString,
- "namespace"_attr = nss.ns());
+ NamespaceString ns() const override {
+ return request().getCommandParameter();
}
- const auto to = toStatus.getValue();
- const auto forceJumboElt = cmdObj["forceJumbo"];
- const auto forceJumbo = forceJumboElt && forceJumboElt.Bool();
+ void run(OperationContext* opCtx, rpc::ReplyBuilderInterface* result) {
- BSONObj find = cmdObj.getObjectField("find");
- BSONObj bounds = cmdObj.getObjectField("bounds");
+ Timer t;
+ const auto chunkManager = uassertStatusOK(
+ Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx,
+ ns()));
- // check that only one of the two chunk specification methods is used
- if (find.isEmpty() == bounds.isEmpty()) {
- errmsg = "need to specify either a find query, or both lower and upper bounds.";
- return false;
- }
+ uassert(ErrorCodes::InvalidOptions,
+ "bounds can only have exactly 2 elements",
+ !request().getBounds() || request().getBounds()->size() == 2);
- boost::optional<Chunk> chunk;
+ uassert(ErrorCodes::InvalidOptions,
+ "cannot specify bounds and query at the same time",
+ !(request().getFind() && request().getBounds()));
- if (!find.isEmpty()) {
- // find
- BSONObj shardKey =
- uassertStatusOK(cm.getShardKeyPattern().extractShardKeyFromQuery(opCtx, nss, find));
- if (shardKey.isEmpty()) {
- errmsg = str::stream() << "no shard key found in chunk query " << find;
- return false;
- }
+ uassert(ErrorCodes::InvalidOptions,
+ "need to specify query or bounds",
+ request().getFind() || request().getBounds());
- chunk.emplace(cm.findIntersectingChunkWithSimpleCollation(shardKey));
- } else {
- // bounds
- if (!cm.getShardKeyPattern().isShardKey(bounds[0].Obj()) ||
- !cm.getShardKeyPattern().isShardKey(bounds[1].Obj())) {
- errmsg = str::stream()
- << "shard key bounds "
- << "[" << bounds[0].Obj() << "," << bounds[1].Obj() << ")"
- << " are not valid for shard key pattern " << cm.getShardKeyPattern().toBSON();
- return false;
+
+ std::string destination = request().getTo().toString();
+ const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, destination);
+
+ if (!toStatus.isOK()) {
+ LOGV2_OPTIONS(
+ 22755,
+ {logv2::UserAssertAfterLog(ErrorCodes::ShardNotFound)},
+ "Could not move chunk in {namespace} to {toShardId} because that shard"
+ " does not exist",
+ "moveChunk destination shard does not exist",
+ "toShardId"_attr = destination,
+ "namespace"_attr = ns());
}
- BSONObj minKey = cm.getShardKeyPattern().normalizeShardKey(bounds[0].Obj());
- BSONObj maxKey = cm.getShardKeyPattern().normalizeShardKey(bounds[1].Obj());
- chunk.emplace(cm.findIntersectingChunkWithSimpleCollation(minKey));
+ const auto to = toStatus.getValue();
+
+ auto find = request().getFind();
+ auto bounds = request().getBounds();
- if (chunk->getMin().woCompare(minKey) != 0 || chunk->getMax().woCompare(maxKey) != 0) {
- errmsg = str::stream() << "no chunk found with the shard key bounds "
- << ChunkRange(minKey, maxKey).toString();
- return false;
+
+ boost::optional<Chunk> chunk;
+
+ if (find) {
+ // find
+ BSONObj shardKey = uassertStatusOK(
+ chunkManager.getShardKeyPattern().extractShardKeyFromQuery(opCtx, ns(), *find));
+
+ uassert(656450,
+ str::stream() << "no shard key found in chunk query " << *find,
+ !shardKey.isEmpty());
+
+ chunk.emplace(chunkManager.findIntersectingChunkWithSimpleCollation(shardKey));
+ } else {
+
+ auto minBound = bounds->front();
+ auto maxBound = bounds->back();
+ uassert(656451,
+ str::stream() << "shard key bounds "
+ << "[" << minBound << "," << maxBound << ")"
+ << " are not valid for shard key pattern "
+ << chunkManager.getShardKeyPattern().toBSON(),
+ chunkManager.getShardKeyPattern().isShardKey(minBound) &&
+ chunkManager.getShardKeyPattern().isShardKey(maxBound));
+
+ BSONObj minKey = chunkManager.getShardKeyPattern().normalizeShardKey(minBound);
+ BSONObj maxKey = chunkManager.getShardKeyPattern().normalizeShardKey(maxBound);
+
+ chunk.emplace(chunkManager.findIntersectingChunkWithSimpleCollation(minKey));
+ uassert(656452,
+ str::stream() << "no chunk found with the shard key bounds "
+ << ChunkRange(minKey, maxKey).toString(),
+ chunk->getMin().woCompare(minKey) == 0 &&
+ chunk->getMax().woCompare(maxKey) == 0);
}
- }
- const auto secondaryThrottle =
- uassertStatusOK(MigrationSecondaryThrottleOptions::createFromCommand(cmdObj));
- const bool waitForDelete =
- cmdObj["_waitForDelete"].trueValue() || cmdObj["waitForDelete"].trueValue();
+ MoveRangeRequestBase moveRangeReq;
+ moveRangeReq.setToShard(to->getId());
+ moveRangeReq.setMin(chunk->getMin());
+ moveRangeReq.setMax(chunk->getMax());
+ moveRangeReq.setWaitForDelete(request().getWaitForDelete().value_or(false) ||
+ request().get_waitForDelete().value_or(false));
+
+
+ ConfigsvrMoveRange configsvrRequest(ns());
+ configsvrRequest.setDbName(NamespaceString::kAdminDb);
+ configsvrRequest.setMoveRangeRequestBase(moveRangeReq);
- MoveRangeRequestBase moveRangeReq;
- moveRangeReq.setToShard(to->getId());
- moveRangeReq.setMin(chunk->getMin());
- moveRangeReq.setMax(chunk->getMax());
- moveRangeReq.setWaitForDelete(waitForDelete);
+ const auto secondaryThrottle = uassertStatusOK(
+ MigrationSecondaryThrottleOptions::createFromCommand(request().toBSON({})));
- ConfigsvrMoveRange configsvrRequest(nss);
- configsvrRequest.setDbName(NamespaceString::kAdminDb);
- configsvrRequest.setMoveRangeRequestBase(moveRangeReq);
- configsvrRequest.setForceJumbo(forceJumbo ? ForceJumbo::kForceManual
- : ForceJumbo::kDoNotForce);
- if (secondaryThrottle.getSecondaryThrottle() == MigrationSecondaryThrottleOptions::kOn) {
configsvrRequest.setSecondaryThrottle(secondaryThrottle);
+
+ configsvrRequest.setForceJumbo(request().getForceJumbo() ? ForceJumbo::kForceManual
+ : ForceJumbo::kDoNotForce);
+
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
+ auto commandResponse = configShard->runCommandWithFixedRetryAttempts(
+ opCtx,
+ ReadPreferenceSetting{ReadPreference::PrimaryOnly},
+ NamespaceString::kAdminDb.toString(),
+ CommandHelpers::appendMajorityWriteConcern(configsvrRequest.toBSON({})),
+ Shard::RetryPolicy::kIdempotent);
+ uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(std::move(commandResponse)));
+
+ Grid::get(opCtx)
+ ->catalogCache()
+ ->invalidateShardOrEntireCollectionEntryForShardedCollection(
+ ns(), boost::none, chunk->getShardId());
+ Grid::get(opCtx)
+ ->catalogCache()
+ ->invalidateShardOrEntireCollectionEntryForShardedCollection(
+ ns(), boost::none, to->getId());
+
+ BSONObjBuilder resultbson;
+ resultbson.append("millis", t.millis());
+ result->getBodyBuilder().appendElements(resultbson.obj());
}
+ };
- auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
- auto commandResponse = configShard->runCommand(
- opCtx,
- ReadPreferenceSetting{ReadPreference::PrimaryOnly},
- NamespaceString::kAdminDb.toString(),
- CommandHelpers::appendMajorityWriteConcern(configsvrRequest.toBSON({})),
- Shard::RetryPolicy::kIdempotent);
- uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(std::move(commandResponse)));
-
- Grid::get(opCtx)
- ->catalogCache()
- ->invalidateShardOrEntireCollectionEntryForShardedCollection(
- nss, boost::none, chunk->getShardId());
- Grid::get(opCtx)
- ->catalogCache()
- ->invalidateShardOrEntireCollectionEntryForShardedCollection(
- nss, boost::none, to->getId());
-
- result.append("millis", t.millis());
- return true;
- }
-} moveChunk;
+} clusterMoveChunk;
} // namespace
} // namespace mongo
diff --git a/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp b/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
deleted file mode 100644
index 38254acf008..00000000000
--- a/src/mongo/s/request_types/commit_chunk_migration_request_test.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/bson/bsonmisc.h"
-#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-
-using unittest::assertGet;
-
-namespace {
-
-const auto kNamespaceString = NamespaceString("TestDB", "TestColl");
-
-const auto kShardId0 = ShardId("shard0");
-const auto kShardId1 = ShardId("shard1");
-
-const auto kKey0 = BSON("Key" << -100);
-const auto kKey1 = BSON("Key" << 100);
-const auto kKey2 = BSON("Key" << -50);
-const auto kKey3 = BSON("Key" << 50);
-
-const char kConfigSvrCommitChunkMigration[] = "_configsvrCommitChunkMigration";
-
-TEST(CommitChunkMigrationRequest, WithoutControlChunk) {
- BSONObjBuilder builder;
-
- ChunkType migratedChunk;
- migratedChunk.setCollectionUUID(UUID::gen());
- migratedChunk.setMin(kKey0);
- migratedChunk.setMax(kKey1);
- migratedChunk.setVersion({12, 7, OID::gen(), Timestamp(1, 1)});
-
- ChunkVersion fromShardCollectionVersion(1, 2, OID::gen(), Timestamp(1, 1));
-
- Timestamp validAfter{1};
-
- CommitChunkMigrationRequest::appendAsCommand(&builder,
- kNamespaceString,
- kShardId0,
- kShardId1,
- migratedChunk,
- fromShardCollectionVersion,
- validAfter);
-
- BSONObj cmdObj = builder.obj();
-
- auto request = assertGet(CommitChunkMigrationRequest::createFromCommand(
- NamespaceString(cmdObj[kConfigSvrCommitChunkMigration].String()), cmdObj));
-
- ASSERT_EQ(kNamespaceString, request.getNss());
- ASSERT_EQ(kShardId0, request.getFromShard());
- ASSERT_EQ(kShardId1, request.getToShard());
- ASSERT_BSONOBJ_EQ(kKey0, request.getMigratedChunk().getMin());
- ASSERT_BSONOBJ_EQ(kKey1, request.getMigratedChunk().getMax());
- ASSERT_TRUE(request.getMigratedChunk().isVersionSet() &&
- request.getMigratedChunk().getVersion().isSet() &&
- request.getMigratedChunk().getVersion().epoch().isSet());
- ASSERT_EQ(fromShardCollectionVersion.epoch(), request.getCollectionEpoch());
-}
-
-} // namespace
-} // namespace mongo
diff --git a/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp b/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp
deleted file mode 100644
index 15cb397a2af..00000000000
--- a/src/mongo/s/request_types/commit_chunk_migration_request_type.cpp
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include "mongo/s/request_types/commit_chunk_migration_request_type.h"
-
-#include "mongo/bson/util/bson_extract.h"
-
-namespace mongo {
-namespace {
-
-const char kConfigSvrCommitChunkMigration[] = "_configsvrCommitChunkMigration";
-const char kFromShard[] = "fromShard";
-const char kToShard[] = "toShard";
-const char kMigratedChunk[] = "migratedChunk";
-const char kFromShardCollectionVersion[] = "fromShardCollectionVersion";
-const char kValidAfter[] = "validAfter";
-
-/**
- * Attempts to parse a (range-only!) ChunkType from "field" in "source".
- */
-StatusWith<ChunkType> extractChunk(const BSONObj& source, StringData field) {
- BSONElement fieldElement;
- auto status = bsonExtractTypedField(source, field, BSONType::Object, &fieldElement);
- if (!status.isOK())
- return status;
-
- const auto fieldObj = fieldElement.Obj();
-
- auto rangeWith = ChunkRange::fromBSON(fieldObj);
- if (!rangeWith.isOK())
- return rangeWith.getStatus();
-
- ChunkVersion version;
- try {
- version = ChunkVersion::parse(fieldObj[ChunkType::lastmod()]);
- uassert(644490, "Version must be set", version.isSet());
- } catch (const DBException& ex) {
- return ex.toStatus();
- }
-
- ChunkType chunk;
- chunk.setMin(rangeWith.getValue().getMin());
- chunk.setMax(rangeWith.getValue().getMax());
- chunk.setVersion(version);
- return chunk;
-}
-
-/**
- * Attempts to parse a ShardId from "field" in "source".
- */
-StatusWith<ShardId> extractShardId(const BSONObj& source, StringData field) {
- std::string stringResult;
-
- auto status = bsonExtractStringField(source, field, &stringResult);
- if (!status.isOK()) {
- return status;
- }
-
- if (stringResult.empty()) {
- return Status(ErrorCodes::UnsupportedFormat,
- "The field '" + field.toString() + "' cannot be empty");
- }
-
- return ShardId(stringResult);
-}
-
-} // namespace
-
-StatusWith<CommitChunkMigrationRequest> CommitChunkMigrationRequest::createFromCommand(
- const NamespaceString& nss, const BSONObj& obj) {
-
- auto migratedChunk = extractChunk(obj, kMigratedChunk);
- if (!migratedChunk.isOK()) {
- return migratedChunk.getStatus();
- }
-
- CommitChunkMigrationRequest request(nss, std::move(migratedChunk.getValue()));
-
- {
- auto fromShard = extractShardId(obj, kFromShard);
- if (!fromShard.isOK()) {
- return fromShard.getStatus();
- }
-
- request._fromShard = std::move(fromShard.getValue());
- }
-
- {
- auto toShard = extractShardId(obj, kToShard);
- if (!toShard.isOK()) {
- return toShard.getStatus();
- }
-
- request._toShard = std::move(toShard.getValue());
- }
-
- try {
- auto fromShardVersion = ChunkVersion::parse(obj[kFromShardCollectionVersion]);
- request._collectionEpoch = fromShardVersion.epoch();
- request._collectionTimestamp = fromShardVersion.getTimestamp();
- } catch (const DBException& ex) {
- return ex.toStatus();
- }
-
- {
- Timestamp validAfter;
- auto status = bsonExtractTimestampField(obj, kValidAfter, &validAfter);
- if (!status.isOK() && status != ErrorCodes::NoSuchKey) {
- return status;
- }
-
- if (status.isOK()) {
- request._validAfter = validAfter;
- } else {
- request._validAfter = boost::none;
- }
- }
-
- return request;
-}
-
-void CommitChunkMigrationRequest::appendAsCommand(BSONObjBuilder* builder,
- const NamespaceString& nss,
- const ShardId& fromShard,
- const ShardId& toShard,
- const ChunkType& migratedChunk,
- const ChunkVersion& fromShardCollectionVersion,
- const Timestamp& validAfter) {
- invariant(builder->asTempObj().isEmpty());
- invariant(nss.isValid());
-
- builder->append(kConfigSvrCommitChunkMigration, nss.ns());
- builder->append(kFromShard, fromShard.toString());
- builder->append(kToShard, toShard.toString());
- {
- BSONObjBuilder migrateChunk(builder->subobjStart(kMigratedChunk));
- migratedChunk.getRange().append(&migrateChunk);
- migratedChunk.getVersion().serializeToBSON(ChunkType::lastmod(), &migrateChunk);
- }
- fromShardCollectionVersion.serializeToBSON(kFromShardCollectionVersion, builder);
- builder->append(kValidAfter, validAfter);
-}
-
-} // namespace mongo
diff --git a/src/mongo/s/request_types/commit_chunk_migration_request_type.h b/src/mongo/s/request_types/commit_chunk_migration_request_type.h
deleted file mode 100644
index 16d5f0ef8ce..00000000000
--- a/src/mongo/s/request_types/commit_chunk_migration_request_type.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Copyright (C) 2018-present MongoDB, Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the Server Side Public License, version 1,
- * as published by MongoDB, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * Server Side Public License for more details.
- *
- * You should have received a copy of the Server Side Public License
- * along with this program. If not, see
- * <http://www.mongodb.com/licensing/server-side-public-license>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the Server Side Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#pragma once
-
-#include <string>
-
-#include "mongo/db/namespace_string.h"
-#include "mongo/s/catalog/type_chunk.h"
-
-namespace mongo {
-
-/**
- * Creates and parses commit chunk migration command BSON objects.
- */
-class CommitChunkMigrationRequest {
-public:
- CommitChunkMigrationRequest(const NamespaceString& nss, const ChunkType& chunk)
- : _nss(nss), _migratedChunk(chunk) {}
-
- /**
- * Parses the input command and produces a request corresponding to its arguments.
- */
- static StatusWith<CommitChunkMigrationRequest> createFromCommand(const NamespaceString& nss,
- const BSONObj& obj);
-
- /**
- * Constructs a commitChunkMigration command with the specified parameters and writes it to
- * the builder, without closing the builder. The builder must be empty, but callers are free
- * to append more fields once the command has been constructed.
- */
- static void appendAsCommand(BSONObjBuilder* builder,
- const NamespaceString& nss,
- const ShardId& fromShard,
- const ShardId& toShard,
- const ChunkType& migratedChunkType,
- const ChunkVersion& fromShardChunkVersion,
- const Timestamp& validAfter);
-
- const NamespaceString& getNss() const {
- return _nss;
- }
- const ShardId& getFromShard() const {
- return _fromShard;
- }
- const ShardId& getToShard() const {
- return _toShard;
- }
- const ChunkType& getMigratedChunk() const {
- return _migratedChunk;
- }
- const OID& getCollectionEpoch() {
- return _collectionEpoch;
- }
- const Timestamp& getCollectionTimestamp() {
- return _collectionTimestamp;
- }
- const boost::optional<Timestamp>& getValidAfter() {
- return _validAfter;
- }
-
-private:
- // The collection for which this request applies.
- NamespaceString _nss;
-
- // The source shard name.
- ShardId _fromShard;
-
- // The recipient shard name.
- ShardId _toShard;
-
- // The chunk being moved.
- ChunkType _migratedChunk;
-
- // Epoch/Timestamp of the collection, matches the ones set in `_migratedChunk`.
- OID _collectionEpoch;
- Timestamp _collectionTimestamp;
-
- // The time of the move
- boost::optional<Timestamp> _validAfter;
-};
-
-} // namespace mongo
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index dab1615f2f9..08ffea17f3c 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -725,8 +725,13 @@ var ShardingTest = function(params) {
var result;
for (var i = 0; i < 5; i++) {
var otherShard = this.getOther(this.getPrimaryShard(dbName)).name;
- result = this.s.adminCommand(
- {movechunk: c, find: move, to: otherShard, _waitForDelete: waitForDelete});
+ let cmd = {movechunk: c, find: move, to: otherShard};
+
+ if (waitForDelete != null) {
+ cmd._waitForDelete = waitForDelete;
+ }
+
+ result = this.s.adminCommand(cmd);
if (result.ok)
break;