summaryrefslogtreecommitdiff
path: root/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/db/commands/cleanup_orphaned_cmd.cpp')
-rw-r--r--src/mongo/db/commands/cleanup_orphaned_cmd.cpp443
1 files changed, 212 insertions, 231 deletions
diff --git a/src/mongo/db/commands/cleanup_orphaned_cmd.cpp b/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
index a6ff2b90a6d..50666033aa6 100644
--- a/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/commands/cleanup_orphaned_cmd.cpp
@@ -50,277 +50,258 @@
#include "mongo/util/log.h"
namespace {
- using mongo::WriteConcernOptions;
+using mongo::WriteConcernOptions;
- const int kDefaultWTimeoutMs = 60 * 1000;
- const WriteConcernOptions DefaultWriteConcern(WriteConcernOptions::kMajority,
- WriteConcernOptions::NONE,
- kDefaultWTimeoutMs);
+const int kDefaultWTimeoutMs = 60 * 1000;
+const WriteConcernOptions DefaultWriteConcern(WriteConcernOptions::kMajority,
+ WriteConcernOptions::NONE,
+ kDefaultWTimeoutMs);
}
namespace mongo {
- using std::endl;
- using std::string;
-
- using mongoutils::str::stream;
-
- enum CleanupResult {
- CleanupResult_Done, CleanupResult_Continue, CleanupResult_Error
- };
-
- /**
- * Cleans up one range of orphaned data starting from a range that overlaps or starts at
- * 'startingFromKey'. If empty, startingFromKey is the minimum key of the sharded range.
- *
- * @return CleanupResult_Continue and 'stoppedAtKey' if orphaned range was found and cleaned
- * @return CleanupResult_Done if no orphaned ranges remain
- * @return CleanupResult_Error and 'errMsg' if an error occurred
- *
- * If the collection is not sharded, returns CleanupResult_Done.
- */
- CleanupResult cleanupOrphanedData( OperationContext* txn,
- const NamespaceString& ns,
- const BSONObj& startingFromKeyConst,
- const WriteConcernOptions& secondaryThrottle,
- BSONObj* stoppedAtKey,
- string* errMsg ) {
-
- BSONObj startingFromKey = startingFromKeyConst;
-
- CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( ns.toString() );
- if ( !metadata || metadata->getKeyPattern().isEmpty() ) {
-
- warning() << "skipping orphaned data cleanup for " << ns.toString()
- << ", collection is not sharded" << endl;
-
- return CleanupResult_Done;
- }
+using std::endl;
+using std::string;
- BSONObj keyPattern = metadata->getKeyPattern();
- if ( !startingFromKey.isEmpty() ) {
- if ( !metadata->isValidKey( startingFromKey ) ) {
+using mongoutils::str::stream;
- *errMsg = stream() << "could not cleanup orphaned data, start key "
- << startingFromKey
- << " does not match shard key pattern " << keyPattern;
+enum CleanupResult { CleanupResult_Done, CleanupResult_Continue, CleanupResult_Error };
- warning() << *errMsg << endl;
- return CleanupResult_Error;
- }
- }
- else {
- startingFromKey = metadata->getMinKey();
- }
-
- KeyRange orphanRange;
- if ( !metadata->getNextOrphanRange( startingFromKey, &orphanRange ) ) {
+/**
+ * Cleans up one range of orphaned data starting from a range that overlaps or starts at
+ * 'startingFromKey'. If empty, startingFromKey is the minimum key of the sharded range.
+ *
+ * @return CleanupResult_Continue and 'stoppedAtKey' if orphaned range was found and cleaned
+ * @return CleanupResult_Done if no orphaned ranges remain
+ * @return CleanupResult_Error and 'errMsg' if an error occurred
+ *
+ * If the collection is not sharded, returns CleanupResult_Done.
+ */
+CleanupResult cleanupOrphanedData(OperationContext* txn,
+ const NamespaceString& ns,
+ const BSONObj& startingFromKeyConst,
+ const WriteConcernOptions& secondaryThrottle,
+ BSONObj* stoppedAtKey,
+ string* errMsg) {
+ BSONObj startingFromKey = startingFromKeyConst;
+
+ CollectionMetadataPtr metadata = shardingState.getCollectionMetadata(ns.toString());
+ if (!metadata || metadata->getKeyPattern().isEmpty()) {
+ warning() << "skipping orphaned data cleanup for " << ns.toString()
+ << ", collection is not sharded" << endl;
+
+ return CleanupResult_Done;
+ }
- LOG( 1 ) << "orphaned data cleanup requested for " << ns.toString()
- << " starting from " << startingFromKey
- << ", no orphan ranges remain" << endl;
+ BSONObj keyPattern = metadata->getKeyPattern();
+ if (!startingFromKey.isEmpty()) {
+ if (!metadata->isValidKey(startingFromKey)) {
+ *errMsg = stream() << "could not cleanup orphaned data, start key " << startingFromKey
+ << " does not match shard key pattern " << keyPattern;
- return CleanupResult_Done;
- }
- orphanRange.ns = ns;
- *stoppedAtKey = orphanRange.maxKey;
-
- // We're done with this metadata now, no matter what happens
- metadata.reset();
-
- LOG( 1 ) << "orphaned data cleanup requested for " << ns.toString()
- << " starting from " << startingFromKey
- << ", removing next orphan range"
- << " [" << orphanRange.minKey << "," << orphanRange.maxKey << ")"
- << endl;
-
- // Metadata snapshot may be stale now, but deleter checks metadata again in write lock
- // before delete.
- RangeDeleterOptions deleterOptions(orphanRange);
- deleterOptions.writeConcern = secondaryThrottle;
- deleterOptions.onlyRemoveOrphanedDocs = true;
- deleterOptions.fromMigrate = true;
- // Must wait for cursors since there can be existing cursors with an older
- // CollectionMetadata.
- deleterOptions.waitForOpenCursors = true;
- deleterOptions.removeSaverReason = "cleanup-cmd";
-
- if (!getDeleter()->deleteNow(txn, deleterOptions, errMsg)) {
warning() << *errMsg << endl;
return CleanupResult_Error;
}
-
- return CleanupResult_Continue;
+ } else {
+ startingFromKey = metadata->getMinKey();
}
- /**
- * Cleanup orphaned data command. Called on a particular namespace, and if the collection
- * is sharded will clean up a single orphaned data range which overlaps or starts after a
- * passed-in 'startingFromKey'. Returns true and a 'stoppedAtKey' (which will start a
- * search for the next orphaned range if the command is called again) or no key if there
- * are no more orphaned ranges in the collection.
- *
- * If the collection is not sharded, returns true but no 'stoppedAtKey'.
- * On failure, returns false and an error message.
- *
- * Calling this command repeatedly until no 'stoppedAtKey' is returned ensures that the
- * full collection range is searched for orphaned documents, but since sharding state may
- * change between calls there is no guarantee that all orphaned documents were found unless
- * the balancer is off.
- *
- * Safe to call with the balancer on.
- *
- * Format:
- *
- * {
- * cleanupOrphaned: <ns>,
- * // optional parameters:
- * startingAtKey: { <shardKeyValue> }, // defaults to lowest value
- * secondaryThrottle: <bool>, // defaults to true
- * // defaults to { w: "majority", wtimeout: 60000 }. Applies to individual writes.
- * writeConcern: { <writeConcern options> }
- * }
- */
- class CleanupOrphanedCommand : public Command {
- public:
- CleanupOrphanedCommand() :
- Command( "cleanupOrphaned" ) {}
-
- virtual bool slaveOk() const { return false; }
- virtual bool adminOnly() const { return true; }
- virtual bool localHostOnlyIfNoAuth( const BSONObj& cmdObj ) { return false; }
-
- virtual Status checkAuthForCommand( ClientBasic* client,
- const std::string& dbname,
- const BSONObj& cmdObj ) {
- if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
- ResourcePattern::forClusterResource(), ActionType::cleanupOrphaned)) {
- return Status(ErrorCodes::Unauthorized,
- "Not authorized for cleanupOrphaned command.");
- }
- return Status::OK();
- }
+ KeyRange orphanRange;
+ if (!metadata->getNextOrphanRange(startingFromKey, &orphanRange)) {
+ LOG(1) << "orphaned data cleanup requested for " << ns.toString() << " starting from "
+ << startingFromKey << ", no orphan ranges remain" << endl;
- virtual bool isWriteCommandForConfigServer() const { return false; }
+ return CleanupResult_Done;
+ }
+ orphanRange.ns = ns;
+ *stoppedAtKey = orphanRange.maxKey;
+
+ // We're done with this metadata now, no matter what happens
+ metadata.reset();
+
+ LOG(1) << "orphaned data cleanup requested for " << ns.toString() << " starting from "
+ << startingFromKey << ", removing next orphan range"
+ << " [" << orphanRange.minKey << "," << orphanRange.maxKey << ")" << endl;
+
+ // Metadata snapshot may be stale now, but deleter checks metadata again in write lock
+ // before delete.
+ RangeDeleterOptions deleterOptions(orphanRange);
+ deleterOptions.writeConcern = secondaryThrottle;
+ deleterOptions.onlyRemoveOrphanedDocs = true;
+ deleterOptions.fromMigrate = true;
+ // Must wait for cursors since there can be existing cursors with an older
+ // CollectionMetadata.
+ deleterOptions.waitForOpenCursors = true;
+ deleterOptions.removeSaverReason = "cleanup-cmd";
+
+ if (!getDeleter()->deleteNow(txn, deleterOptions, errMsg)) {
+ warning() << *errMsg << endl;
+ return CleanupResult_Error;
+ }
- // Input
- static BSONField<string> nsField;
- static BSONField<BSONObj> startingFromKeyField;
+ return CleanupResult_Continue;
+}
- // Output
- static BSONField<BSONObj> stoppedAtKeyField;
+/**
+ * Cleanup orphaned data command. Called on a particular namespace, and if the collection
+ * is sharded will clean up a single orphaned data range which overlaps or starts after a
+ * passed-in 'startingFromKey'. Returns true and a 'stoppedAtKey' (which will start a
+ * search for the next orphaned range if the command is called again) or no key if there
+ * are no more orphaned ranges in the collection.
+ *
+ * If the collection is not sharded, returns true but no 'stoppedAtKey'.
+ * On failure, returns false and an error message.
+ *
+ * Calling this command repeatedly until no 'stoppedAtKey' is returned ensures that the
+ * full collection range is searched for orphaned documents, but since sharding state may
+ * change between calls there is no guarantee that all orphaned documents were found unless
+ * the balancer is off.
+ *
+ * Safe to call with the balancer on.
+ *
+ * Format:
+ *
+ * {
+ * cleanupOrphaned: <ns>,
+ * // optional parameters:
+ * startingAtKey: { <shardKeyValue> }, // defaults to lowest value
+ * secondaryThrottle: <bool>, // defaults to true
+ * // defaults to { w: "majority", wtimeout: 60000 }. Applies to individual writes.
+ * writeConcern: { <writeConcern options> }
+ * }
+ */
+class CleanupOrphanedCommand : public Command {
+public:
+ CleanupOrphanedCommand() : Command("cleanupOrphaned") {}
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ return false;
+ }
- bool run( OperationContext* txn,
- string const &db,
- BSONObj &cmdObj,
- int,
- string &errmsg,
- BSONObjBuilder &result) {
+ virtual Status checkAuthForCommand(ClientBasic* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forClusterResource(), ActionType::cleanupOrphaned)) {
+ return Status(ErrorCodes::Unauthorized, "Not authorized for cleanupOrphaned command.");
+ }
+ return Status::OK();
+ }
- string ns;
- if ( !FieldParser::extract( cmdObj, nsField, &ns, &errmsg ) ) {
- return false;
- }
+ virtual bool isWriteCommandForConfigServer() const {
+ return false;
+ }
- if ( ns == "" ) {
- errmsg = "no collection name specified";
- return false;
- }
+ // Input
+ static BSONField<string> nsField;
+ static BSONField<BSONObj> startingFromKeyField;
+
+ // Output
+ static BSONField<BSONObj> stoppedAtKeyField;
+
+ bool run(OperationContext* txn,
+ string const& db,
+ BSONObj& cmdObj,
+ int,
+ string& errmsg,
+ BSONObjBuilder& result) {
+ string ns;
+ if (!FieldParser::extract(cmdObj, nsField, &ns, &errmsg)) {
+ return false;
+ }
- BSONObj startingFromKey;
- if ( !FieldParser::extract( cmdObj,
- startingFromKeyField,
- &startingFromKey,
- &errmsg ) ) {
- return false;
- }
+ if (ns == "") {
+ errmsg = "no collection name specified";
+ return false;
+ }
- WriteConcernOptions writeConcern;
- Status status = writeConcern.parseSecondaryThrottle(cmdObj, NULL);
+ BSONObj startingFromKey;
+ if (!FieldParser::extract(cmdObj, startingFromKeyField, &startingFromKey, &errmsg)) {
+ return false;
+ }
- if (!status.isOK()){
- if (status.code() != ErrorCodes::WriteConcernNotDefined) {
- return appendCommandStatus(result, status);
- }
+ WriteConcernOptions writeConcern;
+ Status status = writeConcern.parseSecondaryThrottle(cmdObj, NULL);
- writeConcern = DefaultWriteConcern;
- }
- else {
- repl::ReplicationCoordinator* replCoordinator =
- repl::getGlobalReplicationCoordinator();
- Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcern);
-
- if (replCoordinator->getReplicationMode() ==
- repl::ReplicationCoordinator::modeMasterSlave &&
- writeConcern.shouldWaitForOtherNodes()) {
- warning() << "cleanupOrphaned cannot check if write concern setting "
- << writeConcern.toBSON()
- << " can be enforced in a master slave configuration";
- }
-
- if (!status.isOK() && status != ErrorCodes::NoReplicationEnabled) {
- return appendCommandStatus(result, status);
- }
+ if (!status.isOK()) {
+ if (status.code() != ErrorCodes::WriteConcernNotDefined) {
+ return appendCommandStatus(result, status);
}
- if (writeConcern.shouldWaitForOtherNodes() &&
- writeConcern.wTimeout == WriteConcernOptions::kNoTimeout) {
- // Don't allow no timeout.
- writeConcern.wTimeout = kDefaultWTimeoutMs;
+ writeConcern = DefaultWriteConcern;
+ } else {
+ repl::ReplicationCoordinator* replCoordinator = repl::getGlobalReplicationCoordinator();
+ Status status = replCoordinator->checkIfWriteConcernCanBeSatisfied(writeConcern);
+
+ if (replCoordinator->getReplicationMode() ==
+ repl::ReplicationCoordinator::modeMasterSlave &&
+ writeConcern.shouldWaitForOtherNodes()) {
+ warning() << "cleanupOrphaned cannot check if write concern setting "
+ << writeConcern.toBSON()
+ << " can be enforced in a master slave configuration";
}
- if (!shardingState.enabled()) {
- errmsg = str::stream() << "server is not part of a sharded cluster or "
- << "the sharding metadata is not yet initialized.";
- return false;
+ if (!status.isOK() && status != ErrorCodes::NoReplicationEnabled) {
+ return appendCommandStatus(result, status);
}
+ }
- ChunkVersion shardVersion;
- status = shardingState.refreshMetadataNow(txn, ns, &shardVersion);
- if ( !status.isOK() ) {
- if ( status.code() == ErrorCodes::RemoteChangeDetected ) {
- warning() << "Shard version in transition detected while refreshing "
- << "metadata for " << ns << " at version " << shardVersion << endl;
- }
- else {
- errmsg = str::stream() << "failed to refresh shard metadata: "
- << status.reason();
- return false;
- }
- }
+ if (writeConcern.shouldWaitForOtherNodes() &&
+ writeConcern.wTimeout == WriteConcernOptions::kNoTimeout) {
+ // Don't allow no timeout.
+ writeConcern.wTimeout = kDefaultWTimeoutMs;
+ }
- BSONObj stoppedAtKey;
- CleanupResult cleanupResult = cleanupOrphanedData( txn,
- NamespaceString( ns ),
- startingFromKey,
- writeConcern,
- &stoppedAtKey,
- &errmsg );
+ if (!shardingState.enabled()) {
+ errmsg = str::stream() << "server is not part of a sharded cluster or "
+ << "the sharding metadata is not yet initialized.";
+ return false;
+ }
- if ( cleanupResult == CleanupResult_Error ) {
+ ChunkVersion shardVersion;
+ status = shardingState.refreshMetadataNow(txn, ns, &shardVersion);
+ if (!status.isOK()) {
+ if (status.code() == ErrorCodes::RemoteChangeDetected) {
+ warning() << "Shard version in transition detected while refreshing "
+ << "metadata for " << ns << " at version " << shardVersion << endl;
+ } else {
+ errmsg = str::stream() << "failed to refresh shard metadata: " << status.reason();
return false;
}
+ }
- if ( cleanupResult == CleanupResult_Continue ) {
- result.append( stoppedAtKeyField(), stoppedAtKey );
- }
- else {
- dassert( cleanupResult == CleanupResult_Done );
- }
+ BSONObj stoppedAtKey;
+ CleanupResult cleanupResult = cleanupOrphanedData(
+ txn, NamespaceString(ns), startingFromKey, writeConcern, &stoppedAtKey, &errmsg);
- return true;
+ if (cleanupResult == CleanupResult_Error) {
+ return false;
}
- };
- BSONField<string> CleanupOrphanedCommand::nsField( "cleanupOrphaned" );
- BSONField<BSONObj> CleanupOrphanedCommand::startingFromKeyField( "startingFromKey" );
- BSONField<BSONObj> CleanupOrphanedCommand::stoppedAtKeyField( "stoppedAtKey" );
+ if (cleanupResult == CleanupResult_Continue) {
+ result.append(stoppedAtKeyField(), stoppedAtKey);
+ } else {
+ dassert(cleanupResult == CleanupResult_Done);
+ }
- MONGO_INITIALIZER(RegisterCleanupOrphanedCommand)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
- new CleanupOrphanedCommand();
- return Status::OK();
+ return true;
}
+};
+
+BSONField<string> CleanupOrphanedCommand::nsField("cleanupOrphaned");
+BSONField<BSONObj> CleanupOrphanedCommand::startingFromKeyField("startingFromKey");
+BSONField<BSONObj> CleanupOrphanedCommand::stoppedAtKeyField("stoppedAtKey");
-} // namespace mongo
+MONGO_INITIALIZER(RegisterCleanupOrphanedCommand)(InitializerContext* context) {
+ // Leaked intentionally: a Command registers itself when constructed.
+ new CleanupOrphanedCommand();
+ return Status::OK();
+}
+} // namespace mongo