From 0733ab9c06cabbd9d5d07f68d94714bfb0b2e913 Mon Sep 17 00:00:00 2001 From: Mathias Stearn Date: Mon, 27 Jul 2015 12:11:57 -0400 Subject: SERVER-19559 Ignore KeyTooLarge errors when rolling back failed updates --- .../update_26only_server19559.js | 47 ++++++++++++++++++++++ src/mongo/db/catalog/collection.cpp | 11 ++--- src/mongo/db/catalog/collection.h | 3 +- src/mongo/db/catalog/index_catalog.cpp | 11 +++-- src/mongo/db/catalog/index_catalog.h | 4 +- src/mongo/db/index/btree_based_access_method.cpp | 14 +++++-- src/mongo/db/index/btree_interface.cpp | 8 ++++ src/mongo/db/index/btree_interface.h | 4 ++ src/mongo/db/index/index_access_method.h | 5 ++- src/mongo/db/structure/btree/btree.cpp | 19 ++++++--- src/mongo/db/structure/btree/btree.h | 3 ++ 11 files changed, 108 insertions(+), 21 deletions(-) create mode 100644 jstests/noPassthroughWithMongod/update_26only_server19559.js diff --git a/jstests/noPassthroughWithMongod/update_26only_server19559.js b/jstests/noPassthroughWithMongod/update_26only_server19559.js new file mode 100644 index 00000000000..040c3fb4eb8 --- /dev/null +++ b/jstests/noPassthroughWithMongod/update_26only_server19559.js @@ -0,0 +1,47 @@ +(function() { +var t = db.server19559; + +t.drop(); +t.ensureIndex({otherField : 1}); // This must be created before {data:1}. +t.ensureIndex({data : 1}); + +function getDiskLoc() { + return t.find()._addSpecial('$showDiskLoc', 1).next().$diskLoc; +} + +function assertCountEq(count) { + assert.eq(t.find().itcount(), count); + assert.eq(t.find().hint({_id:1}).itcount(), count); + assert.eq(t.find().hint({otherField:1}).itcount(), count); +} + +// insert document with large value +db.adminCommand({setParameter:1, failIndexKeyTooLong:false}); +var bigStrX = new Array(1024).join('x'); +var bigStrY = new Array(1024).join('y'); // same size as bigStrX +assert.writeOK(t.insert({_id : 1, data: bigStrX})); +assertCountEq(1); +var origLoc = getDiskLoc(); +db.adminCommand({setParameter:1, failIndexKeyTooLong:true}); + +// update to document needing move will fail when it tries to index oversized field. Ensure that it +// fully reverts to the original state. +var biggerStr = new Array(4096).join('y'); +assert.writeError(t.update({_id:1}, {$set: {data: biggerStr}})); +assert.eq(getDiskLoc(), origLoc); +assertCountEq(1); + +// non-moving update that will fail +assert.writeError(t.update({_id:1}, {$set: {otherField:1, data:bigStrY}})); +assert.eq(getDiskLoc(), origLoc); +assertCountEq(1); + +// moving update that will succeed since not changing indexed fields. +assert.writeOK(t.update({_id:1}, {$set: {otherField:1, notIndexed:bigStrY}})); +assert(!friendlyEqual(getDiskLoc(), origLoc)); +assertCountEq(1); + +// remove and assert there are no orphan entries. +assert.writeOK(t.remove({})); +assertCountEq(0); +})(); diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp index 27977ed6793..dae649f304f 100644 --- a/src/mongo/db/catalog/collection.cpp +++ b/src/mongo/db/catalog/collection.cpp @@ -186,7 +186,7 @@ namespace mongo { return StatusWith( ret ); } - StatusWith status = _insertDocument( docToInsert, enforceQuota, preGen ); + StatusWith status = _insertDocument( docToInsert, enforceQuota, preGen, false ); if ( status.isOK() ) { _details->paddingFits(); } @@ -217,7 +217,8 @@ namespace mongo { StatusWith Collection::_insertDocument( const BSONObj& docToInsert, bool enforceQuota, - const PregeneratedKeys* preGen ) { + const PregeneratedKeys* preGen, + bool ignoreKeyTooLong ) { // TODO: for now, capped logic lives inside NamespaceDetails, which is hidden // under the RecordStore, this feels broken since that should be a @@ -236,7 +237,7 @@ namespace mongo { _infoCache.notifyOfWriteOp(); try { - _indexCatalog.indexRecord( docToInsert, loc.getValue(), preGen ); + _indexCatalog.indexRecord( docToInsert, loc.getValue(), preGen, ignoreKeyTooLong ); } catch ( AssertionException& e ) { if ( _details->isCapped() ) { @@ -347,7 +348,7 @@ namespace mongo { debug->nmoved += 1; } - StatusWith loc = _insertDocument( objNew, enforceQuota, NULL ); + StatusWith loc = _insertDocument( objNew, enforceQuota, NULL, true ); if ( loc.isOK() ) { // insert successful, now lets deallocate the old location @@ -356,7 +357,7 @@ namespace mongo { } else { // new doc insert failed, so lets re-index the old document and location - _indexCatalog.indexRecord( objOld, oldLocation ); + _indexCatalog.indexRecord( objOld, oldLocation, NULL, true ); } return loc; diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h index 7afabb97c70..4b8e23805d8 100644 --- a/src/mongo/db/catalog/collection.h +++ b/src/mongo/db/catalog/collection.h @@ -215,7 +215,8 @@ namespace mongo { */ StatusWith _insertDocument( const BSONObj& doc, bool enforceQuota, - const PregeneratedKeys* preGen ); + const PregeneratedKeys* preGen, + bool ignoreUniqueIndex ); void _compactExtent(const DiskLoc diskloc, int extentNumber, MultiIndexBlock& indexesToInsertTo, diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp index 72a9de02b09..9cee0d97585 100644 --- a/src/mongo/db/catalog/index_catalog.cpp +++ b/src/mongo/db/catalog/index_catalog.cpp @@ -1177,9 +1177,11 @@ namespace mongo { Status IndexCatalog::_indexRecord( IndexCatalogEntry* index, const BSONObj& obj, const DiskLoc &loc, - const PregeneratedKeysOnIndex* prep ) { + const PregeneratedKeysOnIndex* prep, + bool ignoreKeyTooLong ) { InsertDeleteOptions options; options.logIfError = false; + options.ignoreKeyTooLong = ignoreKeyTooLong; bool isUnique = index->descriptor()->isIdIndex() || @@ -1188,7 +1190,7 @@ namespace mongo { options.dupsAllowed = ignoreUniqueIndex( index->descriptor() ) || !isUnique; int64_t inserted; - return index->accessMethod()->insert(obj, loc, options, &inserted, prep ); + return index->accessMethod()->insert(obj, loc, options, &inserted, prep); } Status IndexCatalog::_unindexRecord( IndexCatalogEntry* index, @@ -1233,7 +1235,8 @@ namespace mongo { void IndexCatalog::indexRecord( const BSONObj& obj, const DiskLoc &loc, - const PregeneratedKeys* preGen ) { + const PregeneratedKeys* preGen, + bool ignoreKeyTooLong ) { for ( IndexCatalogEntryContainer::const_iterator i = _entries.begin(); i != _entries.end(); @@ -1246,7 +1249,7 @@ namespace mongo { perIndex = preGen->get( entry ); try { - Status s = _indexRecord( entry, obj, loc, perIndex ); + Status s = _indexRecord( entry, obj, loc, perIndex, ignoreKeyTooLong ); uassert(s.location(), s.reason(), s.isOK() ); } catch ( AssertionException& ae ) { diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h index 525ffbe416e..3ee9f467254 100644 --- a/src/mongo/db/catalog/index_catalog.h +++ b/src/mongo/db/catalog/index_catalog.h @@ -252,7 +252,7 @@ namespace mongo { // this throws for now void indexRecord( const BSONObj& obj, const DiskLoc &loc, - const PregeneratedKeys* preGen = NULL ); + const PregeneratedKeys* preGen, bool ignoreKeyTooLong ); void unindexRecord( const BSONObj& obj, const DiskLoc& loc, bool noWarn ); @@ -303,7 +303,7 @@ namespace mongo { Status _indexRecord( IndexCatalogEntry* index, const BSONObj& obj, const DiskLoc &loc, - const PregeneratedKeysOnIndex* pregen ); + const PregeneratedKeysOnIndex* pregen, bool ignoreKeyTooLong ); Status _unindexRecord( IndexCatalogEntry* index, const BSONObj& obj, const DiskLoc &loc, bool logIfError ); diff --git a/src/mongo/db/index/btree_based_access_method.cpp b/src/mongo/db/index/btree_based_access_method.cpp index 9b7ed5effd1..a46a69e3d47 100644 --- a/src/mongo/db/index/btree_based_access_method.cpp +++ b/src/mongo/db/index/btree_based_access_method.cpp @@ -87,11 +87,17 @@ namespace mongo { ++*numInserted; } catch (AssertionException& e) { - if (10287 == e.getCode() && !_btreeState->isReady()) { + const int code = e.getCode(); + if (10287 == code && !_btreeState->isReady()) { // This is the duplicate key exception. We ignore it for some reason in BG // indexing. DEV log() << "info: key already in index during bg indexing (ok)\n"; } + else if (options.ignoreKeyTooLong && (code == 17280 || code == 17281)) { + // Behave as-if failIndexKeyTooLong was false and bt_insert silently didn't + // insert the record. + continue; + } else if (!options.dupsAllowed) { // Assuming it's a duplicate key exception. Clean up any inserted keys. for (BSONObjSet::const_iterator j = keysToUse->begin(); j != i; ++j) { @@ -270,8 +276,8 @@ namespace mongo { && (KeyPattern::isIdKeyPattern(_descriptor->keyPattern()) || _descriptor->unique()) && !options.dupsAllowed; - if (checkForDups) { - for (vector::iterator i = data->added.begin(); i != data->added.end(); i++) { + for (vector::iterator i = data->added.begin(); i != data->added.end(); i++) { + if (checkForDups) { if (_interface->wouldCreateDup(_btreeState, _btreeState->head(), **i, record)) { @@ -282,6 +288,8 @@ namespace mongo { **i)); } } + + _interface->assertIfKeyTooLongAndNotIgnored(_btreeState, _btreeState->head(), **i); } status->_isValid = true; diff --git a/src/mongo/db/index/btree_interface.cpp b/src/mongo/db/index/btree_interface.cpp index ce257314ef1..ace3d45ada8 100644 --- a/src/mongo/db/index/btree_interface.cpp +++ b/src/mongo/db/index/btree_interface.cpp @@ -104,6 +104,14 @@ namespace mongo { self); } + virtual void assertIfKeyTooLongAndNotIgnored(const IndexCatalogEntry* btreeState, + const DiskLoc& thisLoc, + const BSONObj& key) const { + typename Version::KeyOwned ownedVersion(key); + getBucket( btreeState, thisLoc )->assertIfKeyTooLongAndNotIgnored(btreeState, + ownedVersion); + } + virtual void customLocate(const IndexCatalogEntry* btreeState, DiskLoc& locInOut, int& keyOfs, diff --git a/src/mongo/db/index/btree_interface.h b/src/mongo/db/index/btree_interface.h index 1c24015590e..edeb05dacd5 100644 --- a/src/mongo/db/index/btree_interface.h +++ b/src/mongo/db/index/btree_interface.h @@ -76,6 +76,10 @@ namespace mongo { const BSONObj& key, const DiskLoc& self) const = 0; + virtual void assertIfKeyTooLongAndNotIgnored(const IndexCatalogEntry* btreeState, + const DiskLoc& thisLoc, + const BSONObj& key) const = 0; + virtual void customLocate(const IndexCatalogEntry* btreeState, DiskLoc& locInOut, int& keyOfs, diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h index ac587f7fdb7..4a0bdccc038 100644 --- a/src/mongo/db/index/index_access_method.h +++ b/src/mongo/db/index/index_access_method.h @@ -223,13 +223,16 @@ namespace mongo { * Flags we can set for inserts and deletes (and updates, which are kind of both). */ struct InsertDeleteOptions { - InsertDeleteOptions() : logIfError(false), dupsAllowed(false) { } + InsertDeleteOptions() : logIfError(false), dupsAllowed(false), ignoreKeyTooLong(false) { } // If there's an error, log() it. bool logIfError; // Are duplicate keys allowed in the index? bool dupsAllowed; + + // Ignore key too long failures. + bool ignoreKeyTooLong; }; } // namespace mongo diff --git a/src/mongo/db/structure/btree/btree.cpp b/src/mongo/db/structure/btree/btree.cpp index 3e2286fe418..e356d564a8f 100644 --- a/src/mongo/db/structure/btree/btree.cpp +++ b/src/mongo/db/structure/btree/btree.cpp @@ -752,6 +752,19 @@ namespace mongo { return false; } + template< class V > + void BtreeBucket::assertIfKeyTooLongAndNotIgnored(const IndexCatalogEntry* btreeState, + const Key& key) const { + if ( key.dataSize() <= getKeyMax() ) + return; + + string msg = str::stream() << "Btree::insert: key too large to index, failing " + << btreeState->descriptor()->indexNamespace() << ' ' + << key.dataSize() << ' ' << key.toString(); + problem() << msg << endl; + keyTooLongAssert( 17280, msg ); + } + template< class V > string BtreeBucket::dupKeyError( const IndexDescriptor* idx , const Key& key ) { stringstream ss; @@ -1901,11 +1914,7 @@ namespace mongo { dassert(toplevel); if ( toplevel ) { if ( key.dataSize() > getKeyMax() ) { - string msg = str::stream() << "Btree::insert: key too large to index, failing " - << btreeState->descriptor()->indexNamespace() << ' ' - << key.dataSize() << ' ' << key.toString(); - problem() << msg << endl; - keyTooLongAssert( 17280, msg ); + assertIfKeyTooLongAndNotIgnored(btreeState, key); return 3; } } diff --git a/src/mongo/db/structure/btree/btree.h b/src/mongo/db/structure/btree/btree.h index 8a22e0ebbed..4756640fa7f 100644 --- a/src/mongo/db/structure/btree/btree.h +++ b/src/mongo/db/structure/btree/btree.h @@ -688,6 +688,9 @@ namespace mongo { const Key& key, const DiskLoc& self) const; + void assertIfKeyTooLongAndNotIgnored(const IndexCatalogEntry* btreeState, + const Key& key) const; + /** * Preconditions: none * Postconditions: @return a new bucket allocated from pdfile storage -- cgit v1.2.1