summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Milkie <milkie@10gen.com>2017-05-16 08:51:32 -0400
committerEric Milkie <milkie@10gen.com>2017-05-31 14:58:13 -0400
commit3a3a6def395bab81e7545b15a93bee7799d9efb1 (patch)
tree7436b62c00f236a8766f750fffc06db4b4059461
parent9b1be97cca615505df0c14b449837b9cea33563e (diff)
downloadmongo-3a3a6def395bab81e7545b15a93bee7799d9efb1.tar.gz
SERVER-29197 initial sync now builds capped indexes correctly
In initial sync, capped indexes were built along with regular indexes, as documents were inserted, using the MultiIndexBlock. This structure requires that no records are deleted before committing the index builds. But the collection cap might delete records prior to commit. The new way to build capped indexes will be to create the indexes on a capped collection prior to inserting any records, for initial sync.
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp52
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp31
2 files changed, 61 insertions, 22 deletions
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index 2732376875a..efcaa10231c 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -124,34 +124,46 @@ Status CollectionBulkLoaderImpl::init(Collection* coll,
Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::const_iterator begin,
const std::vector<BSONObj>::const_iterator end) {
int count = 0;
- return _runTaskReleaseResourcesOnFailure(
- [begin, end, &count, this](OperationContext* opCtx) -> Status {
- invariant(opCtx);
- UnreplicatedWritesBlock uwb(opCtx);
+ return _runTaskReleaseResourcesOnFailure([begin, end, &count, this](
+ OperationContext* opCtx) -> Status {
+ invariant(opCtx);
+ UnreplicatedWritesBlock uwb(opCtx);
- for (auto iter = begin; iter != end; ++iter) {
- std::vector<MultiIndexBlock*> indexers;
- if (_idIndexBlock) {
- indexers.push_back(_idIndexBlock.get());
- }
- if (_secondaryIndexesBlock) {
- indexers.push_back(_secondaryIndexesBlock.get());
- }
- MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- WriteUnitOfWork wunit(opCtx);
+ for (auto iter = begin; iter != end; ++iter) {
+ std::vector<MultiIndexBlock*> indexers;
+ if (_idIndexBlock) {
+ indexers.push_back(_idIndexBlock.get());
+ }
+ if (_secondaryIndexesBlock) {
+ indexers.push_back(_secondaryIndexesBlock.get());
+ }
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
+ WriteUnitOfWork wunit(opCtx);
+ if (!indexers.empty()) {
+ // This flavor of insertDocument will not update any pre-existing indexes, only
+ // the indexers passed in.
const auto status = _coll->insertDocument(opCtx, *iter, indexers, false);
if (!status.isOK()) {
return status;
}
- wunit.commit();
+ } else {
+ // For capped collections, we use regular insertDocument, which will update
+ // pre-existing indexes.
+ const auto status = _coll->insertDocument(opCtx, *iter, nullptr, false, false);
+ if (!status.isOK()) {
+ return status;
+ }
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
- _opCtx, "CollectionBulkLoaderImpl::insertDocuments", _nss.ns());
- ++count;
+ wunit.commit();
}
- return Status::OK();
- });
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
+ _opCtx, "CollectionBulkLoaderImpl::insertDocuments", _nss.ns());
+
+ ++count;
+ }
+ return Status::OK();
+ });
}
Status CollectionBulkLoaderImpl::commit() {
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index ae01e86d259..17d1077c559 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -199,12 +199,37 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
collection = db->getDb()->createCollection(opCtx, nss.ns(), options, false);
invariant(collection);
wunit.commit();
+
+ // Build empty capped indexes. Capped indexes cannot be build by the MultiIndexBlock
+ // because the cap might delete documents off the back while we are inserting them into
+ // the front.
+ if (options.capped) {
+ WriteUnitOfWork wunit(opCtx);
+ if (!idIndexSpec.isEmpty()) {
+ auto status = collection->getIndexCatalog()->createIndexOnEmptyCollection(
+ opCtx, idIndexSpec);
+ if (!status.getStatus().isOK()) {
+ return status.getStatus();
+ }
+ }
+ for (auto&& spec : secondaryIndexSpecs) {
+ auto status =
+ collection->getIndexCatalog()->createIndexOnEmptyCollection(opCtx, spec);
+ if (!status.getStatus().isOK()) {
+ return status.getStatus();
+ }
+ }
+ wunit.commit();
+ }
+
+
coll = stdx::make_unique<AutoGetCollection>(opCtx, nss, MODE_IX);
// Move locks into loader, so it now controls their lifetime.
auto loader = stdx::make_unique<CollectionBulkLoaderImpl>(opCtx,
collection,
- idIndexSpec,
+ options.capped ? BSONObj()
+ : idIndexSpec,
std::move(threadPool),
std::move(runner),
std::move(db),
@@ -223,7 +248,9 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
}
invariant(collection);
- status = loaderToReturn->init(collection, secondaryIndexSpecs);
+
+ status = loaderToReturn->init(collection,
+ options.capped ? std::vector<BSONObj>() : secondaryIndexSpecs);
if (!status.isOK()) {
return status;
}