diff options
author | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-01-11 10:59:01 -0500 |
---|---|---|
committer | Kaloian Manassiev <kaloian.manassiev@mongodb.com> | 2016-01-11 13:58:55 -0500 |
commit | 8d54bdfba49d0163be875def30e107348177c241 (patch) | |
tree | ab990338580e8efb1bb1d8a69700ed3baa23c42b /src/mongo/db/dbhelpers.cpp | |
parent | 715a6603f39869afa01079e38990eaa6f227691f (diff) | |
download | mongo-8d54bdfba49d0163be875def30e107348177c241.tar.gz |
SERVER-22113 Remove unused sharding-specific getLocsInRange code
Diffstat (limited to 'src/mongo/db/dbhelpers.cpp')
-rw-r--r-- | src/mongo/db/dbhelpers.cpp | 94 |
1 files changed, 0 insertions, 94 deletions
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index d1541c2409c..6ea104b6faa 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -465,100 +465,6 @@ long long Helpers::removeRange(OperationContext* txn, return numDeleted; } -const long long Helpers::kMaxDocsPerChunk(250000); - -// Used by migration clone step -// TODO: Cannot hook up quite yet due to _trackerLocks in shared migration code. -// TODO: This function is not used outside of tests -Status Helpers::getLocsInRange(OperationContext* txn, - const KeyRange& range, - long long maxChunkSizeBytes, - set<RecordId>* locs, - long long* numDocs, - long long* estChunkSizeBytes) { - const string ns = range.ns; - *estChunkSizeBytes = 0; - *numDocs = 0; - - AutoGetCollectionForRead ctx(txn, ns); - - Collection* collection = ctx.getCollection(); - if (!collection) { - return Status(ErrorCodes::NamespaceNotFound, ns); - } - - // Require single key - IndexDescriptor* idx = - collection->getIndexCatalog()->findShardKeyPrefixedIndex(txn, range.keyPattern, true); - - if (idx == NULL) { - return Status(ErrorCodes::IndexNotFound, range.keyPattern.toString()); - } - - // use the average object size to estimate how many objects a full chunk would carry - // do that while traversing the chunk's range using the sharding index, below - // there's a fair amount of slack before we determine a chunk is too large because object - // sizes will vary - long long avgDocsWhenFull; - long long avgDocSizeBytes; - const long long totalDocsInNS = collection->numRecords(txn); - if (totalDocsInNS > 0) { - // TODO: Figure out what's up here - avgDocSizeBytes = collection->dataSize(txn) / totalDocsInNS; - avgDocsWhenFull = maxChunkSizeBytes / avgDocSizeBytes; - avgDocsWhenFull = std::min(kMaxDocsPerChunk + 1, 130 * avgDocsWhenFull / 100 /* slack */); - } else { - avgDocSizeBytes = 0; - avgDocsWhenFull = kMaxDocsPerChunk + 1; - } - - // Assume both min and max non-empty, append MinKey's to make them fit chosen index - KeyPattern idxKeyPattern(idx->keyPattern()); - BSONObj min = Helpers::toKeyFormat(idxKeyPattern.extendRangeBound(range.minKey, false)); - BSONObj max = Helpers::toKeyFormat(idxKeyPattern.extendRangeBound(range.maxKey, false)); - - - // do a full traversal of the chunk and don't stop even if we think it is a large chunk - // we want the number of records to better report, in that case - bool isLargeChunk = false; - long long docCount = 0; - - unique_ptr<PlanExecutor> exec(InternalPlanner::indexScan(txn, - collection, - idx, - min, - max, - false, // endKeyInclusive - PlanExecutor::YIELD_MANUAL)); - // we can afford to yield here because any change to the base data that we might miss is - // already being queued and will be migrated in the 'transferMods' stage - exec->setYieldPolicy(PlanExecutor::YIELD_AUTO); - - RecordId loc; - PlanExecutor::ExecState state; - while (PlanExecutor::ADVANCED == (state = exec->getNext(NULL, &loc))) { - if (!isLargeChunk) { - locs->insert(loc); - } - - if (++docCount > avgDocsWhenFull) { - isLargeChunk = true; - } - } - - *numDocs = docCount; - *estChunkSizeBytes = docCount* avgDocSizeBytes; - - if (isLargeChunk) { - stringstream ss; - ss << estChunkSizeBytes; - return Status(ErrorCodes::InvalidLength, ss.str()); - } - - return Status::OK(); -} - - void Helpers::emptyCollection(OperationContext* txn, const char* ns) { OldClientContext context(txn, ns); bool shouldReplicateWrites = txn->writesAreReplicated(); |