summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--jstests/noPassthrough/indexbg1.js52
-rw-r--r--jstests/noPassthrough/indexbg2.js67
-rw-r--r--src/mongo/base/error_codes.err2
-rw-r--r--src/mongo/db/catalog/collection.cpp17
-rw-r--r--src/mongo/db/catalog/collection.h2
-rw-r--r--src/mongo/db/catalog/index_create.cpp12
-rw-r--r--src/mongo/db/index/btree_based_access_method.cpp7
-rw-r--r--src/mongo/db/query/internal_plans.h4
-rw-r--r--src/mongo/db/query/plan_yield_policy.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp17
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp53
11 files changed, 151 insertions, 84 deletions
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index 666f80284b6..6300a617e34 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -1,44 +1,43 @@
-// SERVER-13922
-if (0) {
// Test background index creation
-load( "jstests/libs/slow_weekly_util.js" )
+load( "jstests/libs/slow_weekly_util.js" );
-testServer = new SlowWeeklyMongod( "indexbg1" )
-db = testServer.getDB( "test" );
+var testServer = new SlowWeeklyMongod( "indexbg1" );
+var db = testServer.getDB( "test" );
+var baseName = "jstests_indexbg1";
-parallel = function() {
+var parallel = function() {
return db[ baseName + "_parallelStatus" ];
-}
+};
-resetParallel = function() {
+var resetParallel = function() {
parallel().drop();
-}
+};
-doParallel = function(work) {
+var doParallel = function(work) {
resetParallel();
print("doParallel: " + work);
startMongoProgramNoConnect("mongo", "--eval", work + "; db." + baseName + "_parallelStatus.save( {done:1} );", db.getMongo().host);
-}
+};
-doneParallel = function() {
+var doneParallel = function() {
return !!parallel().findOne();
-}
+};
-waitParallel = function() {
+var waitParallel = function() {
assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
-}
+};
-size = 1000 * 1000;
+var size = 200000;
while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ more data
print( "size: " + size );
- baseName = "jstests_indexbg1";
- fullName = "db." + baseName;
- t = db[ baseName ];
+
+ var fullName = "db." + baseName;
+ var t = db[ baseName ];
t.drop();
var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
- for( i = 0; i < size; ++i ) {
+ for( var i = 0; i < size; ++i ) {
bulk.insert({ i: i });
}
assert.writeOK(bulk.execute());
@@ -48,19 +47,19 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m
try {
// wait for indexing to start
print("wait for indexing to start");
- assert.soon( function() { return 2 == db.system.indexes.count( {ns:"test."+baseName} ) }, "no index created", 30000, 50 );
+ assert.soon( function() { return 2 == db.system.indexes.count( {ns:"test."+baseName} ); }, "no index created", 30000, 50 );
print("started.");
sleep(1000); // there is a race between when the index build shows up in curop and
// when it first attempts to grab a write lock.
assert.eq( size, t.count() );
assert.eq( 100, t.findOne( {i:100} ).i );
- q = t.find();
+ var q = t.find();
for( i = 0; i < 120; ++i ) { // getmore
q.next();
assert( q.hasNext(), "no next" );
}
- var ex = t.find( {i:100} ).limit(-1).explain("executionStats")
- printjson(ex)
+ var ex = t.find( {i:100} ).limit(-1).explain("executionStats");
+ printjson(ex);
assert( ex.executionStats.totalKeysExamined < 1000 ,
"took too long to find 100: " + tojson( ex ) );
@@ -68,7 +67,7 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m
assert.writeOK(t.remove({ i: 40 }, true )); // table scan
assert.writeOK(t.update({ i: 10 }, { i :-10 })); // should scan 10
- id = t.find().hint( {$natural:-1} ).next()._id;
+ var id = t.find().hint( {$natural:-1} ).next()._id;
assert.writeOK(t.update({ _id: id }, { i: -2 } ));
assert.writeOK(t.save({ i: -50 }));
@@ -85,7 +84,7 @@ while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ m
if ( !doneParallel() ) {
throw e;
}
- print("but that's OK")
+ print("but that's OK");
}
print( "going to check if index is done" );
@@ -113,4 +112,3 @@ printjson( gle );
assert( !gle );
testServer.stop();
-} // if(0)
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index 7e4560703a0..8b5ee185410 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -1,55 +1,53 @@
-// SERVER-13922
-if (0) {
// Test background index creation w/ constraints
-load( "jstests/libs/slow_weekly_util.js" )
+load( "jstests/libs/slow_weekly_util.js" );
-testServer = new SlowWeeklyMongod( "indexbg2" )
-db = testServer.getDB( "test" );
+var testServer = new SlowWeeklyMongod( "indexbg2" );
+var db = testServer.getDB( "test" );
+var baseName = "jstests_index12";
-parallel = function() {
+var parallel = function() {
return db[ baseName + "_parallelStatus" ];
-}
+};
-resetParallel = function() {
+var resetParallel = function() {
parallel().drop();
-}
+};
-doParallel = function( work ) {
+var doParallel = function( work ) {
resetParallel();
startMongoProgramNoConnect( "mongo", "--eval", work + "; db." + baseName + "_parallelStatus.save( {done:1} );", db.getMongo().host );
-}
+};
-doneParallel = function() {
+var doneParallel = function() {
return !!parallel().findOne();
-}
+};
-waitParallel = function() {
+var waitParallel = function() {
assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
-}
+};
-doTest = function(dropDups) {
-
- size = 10000;
+var doTest = function() {
+ "use strict";
+ var size = 10000;
while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
print("size: " + size);
- baseName = "jstests_index12";
- fullName = "db." + baseName;
- t = db[baseName];
+ var fullName = "db." + baseName;
+ var t = db[baseName];
t.drop();
db.eval(function(size) {
- for (i = 0; i < size; ++i) {
+ for (var i = 0; i < size; ++i) {
db.jstests_index12.save({ i: i });
}
},
size);
assert.eq(size, t.count());
- doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true, dropDups:" + dropDups + "} )");
+ doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )");
try {
// wait for indexing to start
- assert.soon(function() { return 2 == db.system.indexes.count({ ns: "test." + baseName }) }, "no index created", 30000, 50);
+ assert.soon(function() { return 2 == db.system.indexes.count({ ns: "test." + baseName }); }, "no index created", 30000, 50);
t.save({ i: 0, n: true });
t.save({ i: size - 1, n: true });
} catch (e) {
@@ -70,22 +68,15 @@ doTest = function(dropDups) {
waitParallel();
- if( dropDups == "true" ) {
- assert.eq(size, t.find().toArray().length, "full query failed");
- assert.eq(size, t.count(), "count failed");
- }
- else {
- /* without dropdups, it could be that there is more than size now but the index failed
- to build - which is valid. we check index isn't there.
- */
- if (t.count() != size)
- assert.eq(1, t.getIndexes().length, "change in # of elems yet index is there");
+ /* it could be that there is more than size now but the index failed
+ to build - which is valid. we check index isn't there.
+ */
+ if (t.count() != size) {
+ assert.eq(1, t.getIndexes().length, "change in # of elems yet index is there");
}
-}
+};
-doTest( "false" );
-doTest( "true" );
+doTest();
testServer.stop();
-} // if (0)
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err
index 91a7272c693..566bf6f5429 100644
--- a/src/mongo/base/error_codes.err
+++ b/src/mongo/base/error_codes.err
@@ -83,7 +83,7 @@ error_code("RoleDataInconsistent", 80)
error_code("NoWhereParseContext", 81)
error_code("NoProgressMade", 82)
error_code("RemoteResultsUnavailable", 83)
-error_code("UniqueIndexViolation", 84)
+error_code("DuplicateKeyValue", 84)
error_code("IndexOptionsConflict", 85 )
error_code("IndexKeySpecsConflict", 86 )
error_code("CannotSplit", 87)
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 9b9df921bf1..dcb7697dc02 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -306,6 +306,10 @@ namespace mongo {
/* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
*/
+
+ // At the end of this step, we will have a map of UpdateTickets, one per index, which
+ // represent the index updates needed to be done, based on the changes between objOld and
+ // objNew.
OwnedPointerMap<IndexDescriptor*,UpdateTicket> updateTickets;
IndexCatalog::IndexIterator ii = _indexCatalog.getIndexIterator( txn, true );
while ( ii.more() ) {
@@ -325,7 +329,8 @@ namespace mongo {
}
}
- // this can callback into Collection::recordStoreGoingToMove
+ // This can call back into Collection::recordStoreGoingToMove. If that happens, the old
+ // object is removed from all indexes.
StatusWith<DiskLoc> newLocation = _recordStore->updateRecord( txn,
oldLocation,
objNew.objdata(),
@@ -337,8 +342,12 @@ namespace mongo {
return newLocation;
}
+ // At this point, the old object may or may not still be indexed, depending on if it was
+ // moved.
+
_infoCache.notifyOfWriteOp();
+ // If the object did move, we need to add the new location to all indexes.
if ( newLocation.getValue() != oldLocation ) {
if ( debug ) {
@@ -355,6 +364,8 @@ namespace mongo {
return newLocation;
}
+ // Object did not move. We update each index with each respective UpdateTicket.
+
if ( debug )
debug->keyUpdates = 0;
@@ -391,13 +402,13 @@ namespace mongo {
Status Collection::updateDocumentWithDamages( OperationContext* txn,
const DiskLoc& loc,
const RecordData& oldRec,
- const char* damangeSource,
+ const char* damageSource,
const mutablebson::DamageVector& damages ) {
// Broadcast the mutation so that query results stay correct.
_cursorCache.invalidateDocument(loc, INVALIDATION_MUTATION);
- return _recordStore->updateWithDamages( txn, loc, oldRec, damangeSource, damages );
+ return _recordStore->updateWithDamages( txn, loc, oldRec, damageSource, damages );
}
bool Collection::_enforceQuota( bool userEnforeQuota ) const {
diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h
index fa5a6831be1..0f844e7f3ff 100644
--- a/src/mongo/db/catalog/collection.h
+++ b/src/mongo/db/catalog/collection.h
@@ -202,7 +202,7 @@ namespace mongo {
Status updateDocumentWithDamages( OperationContext* txn,
const DiskLoc& loc,
const RecordData& oldRec,
- const char* damangeSource,
+ const char* damageSource,
const mutablebson::DamageVector& damages );
// -----------
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 6f6146bce14..2f29517d46e 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -199,13 +199,18 @@ namespace mongo {
Timer t;
unsigned long long n = 0;
+
scoped_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(_txn,
_collection->ns().ns(),
_collection));
+ if (_buildInBackground) {
+ exec->setYieldPolicy(PlanExecutor::YIELD_AUTO);
+ }
BSONObj objToIndex;
DiskLoc loc;
- while (PlanExecutor::ADVANCED == exec->getNext(&objToIndex, &loc)) {
+ PlanExecutor::ExecState state;
+ while (PlanExecutor::ADVANCED == (state = exec->getNext(&objToIndex, &loc))) {
{
bool shouldCommitWUnit = true;
WriteUnitOfWork wunit(_txn);
@@ -235,6 +240,11 @@ namespace mongo {
progress->setTotalWhileRunning( _collection->numRecords(_txn) );
}
+ if (state != PlanExecutor::IS_EOF) {
+ uasserted(28550,
+ "Unable to complete index build as the collection is no longer readable");
+ }
+
progress->finished();
Status ret = doneInserting(dupsOut);
diff --git a/src/mongo/db/index/btree_based_access_method.cpp b/src/mongo/db/index/btree_based_access_method.cpp
index f8e8ffdd2ed..3a148a37546 100644
--- a/src/mongo/db/index/btree_based_access_method.cpp
+++ b/src/mongo/db/index/btree_based_access_method.cpp
@@ -100,10 +100,11 @@ namespace mongo {
}
}
- if (ErrorCodes::UniqueIndexViolation == status.code()) {
- // We ignore it for some reason in BG indexing.
+ if (ErrorCodes::DuplicateKeyValue == status.code()) {
+ // A document might be indexed multiple times during a background index build
+ // if it moves ahead of the collection scan cursor (e.g. via an update).
if (!_btreeState->isReady(txn)) {
- DEV log() << "info: key already in index during bg indexing (ok)\n";
+ LOG(3) << "key " << *i << " already in index during background indexing (ok)";
continue;
}
}
diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h
index 398c2ccbbb5..88c27b8132b 100644
--- a/src/mongo/db/query/internal_plans.h
+++ b/src/mongo/db/query/internal_plans.h
@@ -74,7 +74,7 @@ namespace mongo {
if (NULL == collection) {
EOFStage* eof = new EOFStage();
PlanExecutor* exec;
- // Takes ownership if 'ws' and 'eof'.
+ // Takes ownership of 'ws' and 'eof'.
Status execStatus = PlanExecutor::make(txn,
ws,
eof,
@@ -85,7 +85,7 @@ namespace mongo {
return exec;
}
- dassert( ns == collection->ns().ns() );
+ invariant( ns == collection->ns().ns() );
CollectionScanParams params;
params.collection = collection;
diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp
index 635f3024f04..7aa624d6ee9 100644
--- a/src/mongo/db/query/plan_yield_policy.cpp
+++ b/src/mongo/db/query/plan_yield_policy.cpp
@@ -62,6 +62,8 @@ namespace mongo {
return true;
}
+ invariant(_planYielding);
+
// No need to yield if the collection is NULL.
if (NULL == _planYielding->collection()) {
return true;
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index c98b95a5965..b18621c3c8d 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -1159,7 +1159,7 @@ namespace mongo {
LocType genericRecordLoc;
genericRecordLoc = recordLoc;
- bool dupsChecked = false;
+ bool dupsCheckedYet = false;
int low = 0;
int high = bucket->n - 1;
@@ -1178,23 +1178,25 @@ namespace mongo {
// there aren't other entries for the key then. as it is very rare that
// we get here, we don't put any coding effort in here to make this
// particularly fast
- if (!dupsChecked) {
+ if (!dupsCheckedYet) {
// This is expensive and we only want to do it once(? -- when would
// it happen twice).
- dupsChecked = true;
+ dupsCheckedYet = true;
if (exists(txn, key)) {
if (wouldCreateDup(txn, key, genericRecordLoc)) {
return Status(ErrorCodes::DuplicateKey, dupKeyError(key), 11000);
}
else {
- return Status(ErrorCodes::UniqueIndexViolation, "FIXME");
+ return Status(ErrorCodes::DuplicateKeyValue,
+ "key/value already in index");
}
}
}
}
else {
if (fullKey.recordLoc == recordLoc) {
- return Status(ErrorCodes::UniqueIndexViolation, "FIXME");
+ return Status(ErrorCodes::DuplicateKeyValue,
+ "key/value already in index");
}
else {
return Status(ErrorCodes::DuplicateKey, dupKeyError(key), 11000);
@@ -2261,7 +2263,10 @@ namespace mongo {
txn->recoveryUnit()->writing(&header)->setUsed();
return Status::OK();
}
- return Status(ErrorCodes::UniqueIndexViolation, "FIXME");
+ // The logic in _find() prohibits finding and returning a position if the 'used' bit
+ // in the header is set and dups are disallowed.
+ invariant(dupsAllowed);
+ return Status(ErrorCodes::DuplicateKeyValue, "key/value already in index");
}
DiskLoc childLoc = childLocForPos(bucket, pos);
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
index 16d773f9a22..2003aaeeb68 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic_test.cpp
@@ -70,9 +70,9 @@ namespace mongo {
ASSERT_EQUALS(nKeys, _helper.btree.fullValidate(&txn, NULL, true, false, 0));
}
- void insert(const BSONObj &key, const DiskLoc dl) {
+ Status insert(const BSONObj &key, const DiskLoc dl, bool dupsAllowed = true) {
OperationContextNoop txn;
- _helper.btree.insert(&txn, key, dl, true);
+ return _helper.btree.insert(&txn, key, dl, dupsAllowed);
}
bool unindex(const BSONObj &key) {
@@ -2027,6 +2027,53 @@ namespace mongo {
}
};
+ template<class OnDiskFormat>
+ class DuplicateKeys : public BtreeLogicTestBase<OnDiskFormat> {
+ public:
+ void run() {
+ OperationContextNoop txn;
+ this->_helper.btree.initAsEmpty(&txn);
+
+ BSONObj key1 = simpleKey('z');
+ ASSERT_OK(this->insert(key1, this->_helper.dummyDiskLoc, true));
+ this->checkValidNumKeys(1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+
+ // Attempt to insert a dup key/value.
+ ASSERT_EQUALS(ErrorCodes::DuplicateKeyValue,
+ this->insert(key1, this->_helper.dummyDiskLoc, true));
+ this->checkValidNumKeys(1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+
+ // Attempt to insert a dup key/value with dupsAllowed=false.
+ ASSERT_EQUALS(ErrorCodes::DuplicateKeyValue,
+ this->insert(key1, this->_helper.dummyDiskLoc, false));
+ this->checkValidNumKeys(1);
+ this->locate(key1, 0, true, this->_helper.headManager.getHead(&txn), 1);
+
+ // Add another record to produce another diskloc.
+ StatusWith<DiskLoc> s = this->_helper.recordStore.insertRecord(&txn, "a", 1, false);
+
+ ASSERT_TRUE(s.isOK());
+ ASSERT_EQUALS(3, this->_helper.recordStore.numRecords(NULL));
+
+ DiskLoc dummyDiskLoc2 = s.getValue();
+
+ // Attempt to insert a dup key but this time with a different value.
+ ASSERT_EQUALS(ErrorCodes::DuplicateKey, this->insert(key1, dummyDiskLoc2, false));
+ this->checkValidNumKeys(1);
+
+ // Insert a dup key with dupsAllowed=true, should succeed.
+ ASSERT_OK(this->insert(key1, dummyDiskLoc2, true));
+ this->checkValidNumKeys(2);
+
+ // Clean up.
+ this->_helper.recordStore.deleteRecord(&txn, dummyDiskLoc2);
+ ASSERT_EQUALS(2, this->_helper.recordStore.numRecords(NULL));
+ }
+ };
+
+
/* This test requires the entire server to be linked-in and it is better implemented using
the JS framework. Disabling here and will put in jsCore.
@@ -2254,6 +2301,8 @@ namespace mongo {
add< LocateEmptyForward<OnDiskFormat> >();
add< LocateEmptyReverse<OnDiskFormat> >();
+
+ add< DuplicateKeys<OnDiskFormat> >();
}
};