diff options
author | Greg Studer <greg@10gen.com> | 2013-06-07 16:11:49 -0400 |
---|---|---|
committer | Greg Studer <greg@10gen.com> | 2013-06-17 12:23:32 -0400 |
commit | 88b7299f433e6b7bcae6df68a021fee91dc21dd3 (patch) | |
tree | cc2e42d864b79fba22c3369bd22f01f745edf0c4 /src | |
parent | 432d1c9546340633dd5f0ef07259708a75f4a87e (diff) | |
download | mongo-88b7299f433e6b7bcae6df68a021fee91dc21dd3.tar.gz |
SERVER-8598 preserve semantics of exception-on-conn-failure in trySetVersion
Diffstat (limited to 'src')
-rw-r--r-- | src/mongo/base/error_codes.err | 1 | ||||
-rw-r--r-- | src/mongo/s/collection_metadata.h | 12 | ||||
-rw-r--r-- | src/mongo/s/collection_metadata_test.cpp | 232 | ||||
-rw-r--r-- | src/mongo/s/d_state.cpp | 28 | ||||
-rw-r--r-- | src/mongo/s/metadata_loader.cpp | 109 | ||||
-rw-r--r-- | src/mongo/s/metadata_loader.h | 69 | ||||
-rw-r--r-- | src/mongo/s/metadata_loader_test.cpp | 509 |
7 files changed, 496 insertions, 464 deletions
diff --git a/src/mongo/base/error_codes.err b/src/mongo/base/error_codes.err index b4d6d71120b..949c042fb63 100644 --- a/src/mongo/base/error_codes.err +++ b/src/mongo/base/error_codes.err @@ -36,5 +36,6 @@ error_code("RolesNotRelated", 32) error_code("PrivilegeNotFound", 33) error_code("CannotBackfillArray", 34) error_code("UserModificationFailed", 35) +error_code("RemoteChangeDetected", 36) error_class("NetworkError", ["HostUnreachable", "HostNotFound"]) diff --git a/src/mongo/s/collection_metadata.h b/src/mongo/s/collection_metadata.h index 5613e10a687..27bc422509f 100644 --- a/src/mongo/s/collection_metadata.h +++ b/src/mongo/s/collection_metadata.h @@ -44,6 +44,12 @@ namespace mongo { class CollectionMetadata { MONGO_DISALLOW_COPYING(CollectionMetadata); public: + + /** + * Use the MetadataLoader to fill the empty metadata from the config server, or use + * clone*() methods to use existing managers to build new ones. + */ + CollectionMetadata(); ~CollectionMetadata(); // @@ -161,12 +167,6 @@ namespace mongo { RangeMap _rangesMap; /** - * Use the MetadataLoader to build new managers using config server data, or the - * clone*() methods to use existing managers to build new ones. - */ - CollectionMetadata(); - - /** * Returns true if this manager was loaded with all necessary information. */ bool isValid() const; diff --git a/src/mongo/s/collection_metadata_test.cpp b/src/mongo/s/collection_metadata_test.cpp index 46f32d9dc6b..839feb0843b 100644 --- a/src/mongo/s/collection_metadata_test.cpp +++ b/src/mongo/s/collection_metadata_test.cpp @@ -47,6 +47,7 @@ namespace { using mongo::ChunkVersion; using mongo::MockConnRegistry; using mongo::MockRemoteDBServer; + using mongo::Status; using std::string; using std::vector; @@ -68,35 +69,39 @@ namespace { ConnectionString configLoc( CONFIG_HOST_PORT ); MetadataLoader loader( configLoc ); - _metadata.reset( loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, NULL ) ); - ASSERT( _metadata.get() != NULL ); + + Status status = loader.makeCollectionMetadata( "test.foo", + "shard0000", + NULL, + &_metadata ); + ASSERT( status.isOK() ); } void tearDown() { MockConnRegistry::get()->clear(); } - CollectionMetadata* getCollMetadata() const { - return _metadata.get(); + const CollectionMetadata& getCollMetadata() const { + return _metadata; } private: scoped_ptr<MockRemoteDBServer> _dummyConfig; - scoped_ptr<CollectionMetadata> _metadata; + CollectionMetadata _metadata; }; TEST_F(NoChunkFixture, BasicBelongsToMe) { - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << MINKEY)) ); - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << 10)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << MINKEY)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << 10)) ); } TEST_F(NoChunkFixture, CompoudKeyBelongsToMe) { - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << 1 << "b" << 2)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << 1 << "b" << 2)) ); } TEST_F(NoChunkFixture, getNextFromEmpty) { ChunkType nextChunk; - ASSERT( getCollMetadata()->getNextChunk( BSONObj(), &nextChunk ) ); + ASSERT( getCollMetadata().getNextChunk( BSONObj(), &nextChunk ) ); } TEST_F(NoChunkFixture, FirstChunkClonePlus) { @@ -106,9 +111,9 @@ namespace { string errMsg; const ChunkVersion version( 99, 0, OID() ); - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->clonePlus( chunk, - version, - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().clonePlus( chunk, + version, + &errMsg ) ); ASSERT( errMsg.empty() ); ASSERT_EQUALS( 1u, cloned->getNumChunks() ); @@ -123,11 +128,11 @@ namespace { chunk.setMax( BSON("a" << 20) ); string errMsg; - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->clonePlus( chunk, - ChunkVersion( 0, - 0, - OID() ), - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().clonePlus( chunk, + ChunkVersion( 0, + 0, + OID() ), + &errMsg ) ); ASSERT( cloned == NULL ); ASSERT_FALSE( errMsg.empty() ); @@ -165,52 +170,56 @@ namespace { ConnectionString configLoc( CONFIG_HOST_PORT ); MetadataLoader loader( configLoc ); - _metadata.reset( loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, NULL ) ); - ASSERT( _metadata.get() != NULL ); + + Status status = loader.makeCollectionMetadata( "test.foo", + "shard0000", + NULL, + &_metadata ); + ASSERT( status.isOK() ); } void tearDown() { MockConnRegistry::get()->clear(); } - CollectionMetadata* getCollMetadata() const { - return _metadata.get(); + const CollectionMetadata& getCollMetadata() const { + return _metadata; } private: scoped_ptr<MockRemoteDBServer> _dummyConfig; - scoped_ptr<CollectionMetadata> _metadata; + CollectionMetadata _metadata; }; TEST_F(SingleChunkFixture, BasicBelongsToMe) { - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << 10)) ); - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << 15)) ); - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << 19)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << 10)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << 15)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << 19)) ); } TEST_F(SingleChunkFixture, DoesntBelongsToMe) { - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << 0)) ); - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << 9)) ); - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << 20)) ); - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << 1234)) ); - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << MINKEY)) ); - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << MAXKEY)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << 0)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << 9)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << 20)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << 1234)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << MINKEY)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << MAXKEY)) ); } TEST_F(SingleChunkFixture, CompoudKeyBelongsToMe) { - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << 15 << "a" << 14)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << 15 << "a" << 14)) ); } TEST_F(SingleChunkFixture, getNextFromEmpty) { ChunkType nextChunk; - ASSERT( getCollMetadata()->getNextChunk( BSONObj(), &nextChunk ) ); + ASSERT( getCollMetadata().getNextChunk( BSONObj(), &nextChunk ) ); ASSERT_EQUALS( 0, nextChunk.getMin().woCompare(BSON("a" << 10)) ); ASSERT_EQUALS( 0, nextChunk.getMax().woCompare(BSON("a" << 20)) ); } TEST_F(SingleChunkFixture, GetNextFromLast) { ChunkType nextChunk; - ASSERT( getCollMetadata()->getNextChunk( BSONObj(), &nextChunk ) ); + ASSERT( getCollMetadata().getNextChunk( BSONObj(), &nextChunk ) ); } TEST_F(SingleChunkFixture, LastChunkCloneMinus) { @@ -220,15 +229,15 @@ namespace { string errMsg; const ChunkVersion zeroVersion( 0, 0, OID() ); - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->cloneMinus( chunk, - zeroVersion, - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().cloneMinus( chunk, + zeroVersion, + &errMsg ) ); ASSERT( errMsg.empty() ); ASSERT_EQUALS( 0u, cloned->getNumChunks() ); ASSERT_EQUALS( cloned->getShardVersion().toLong(), zeroVersion.toLong() ); ASSERT_EQUALS( cloned->getCollVersion().toLong(), - getCollMetadata()->getCollVersion().toLong() ); + getCollMetadata().getCollVersion().toLong() ); ASSERT_FALSE( cloned->keyBelongsToMe(BSON("a" << 15)) ); } @@ -239,9 +248,9 @@ namespace { string errMsg; ChunkVersion version( 99, 0, OID() ); - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->cloneMinus( chunk, - version, - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().cloneMinus( chunk, + version, + &errMsg ) ); ASSERT( cloned == NULL ); ASSERT_FALSE( errMsg.empty() ); @@ -279,31 +288,35 @@ namespace { ConnectionString configLoc( CONFIG_HOST_PORT ); MetadataLoader loader( configLoc ); - _metadata.reset( loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, NULL ) ); - ASSERT( _metadata.get() != NULL ); + + Status status = loader.makeCollectionMetadata( "test.foo", + "shard0000", + NULL, + &_metadata ); + ASSERT( status.isOK() ); } void tearDown() { MockConnRegistry::get()->clear(); } - CollectionMetadata* getCollMetadata() const { - return _metadata.get(); + const CollectionMetadata& getCollMetadata() const { + return _metadata; } private: scoped_ptr<MockRemoteDBServer> _dummyConfig; - scoped_ptr<CollectionMetadata> _metadata; + CollectionMetadata _metadata; }; // Note: no tests for single key belongsToMe because they are not allowed // if shard key is compound. TEST_F(SingleChunkMinMaxCompoundKeyFixture, CompoudKeyBelongsToMe) { - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << MINKEY << "b" << MINKEY)) ); - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << MAXKEY << "b" << MAXKEY)) ); - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << MINKEY << "b" << 10)) ); - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << 10 << "b" << 20)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << MINKEY << "b" << MINKEY)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << MAXKEY << "b" << MAXKEY)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << MINKEY << "b" << 10)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << 10 << "b" << 20)) ); } /** @@ -345,21 +358,25 @@ namespace { ConnectionString configLoc( CONFIG_HOST_PORT ); MetadataLoader loader( configLoc ); - _metadata.reset( loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, NULL ) ); - ASSERT( _metadata.get() != NULL ); + + Status status = loader.makeCollectionMetadata( "test.foo", + "shard0000", + NULL, + &_metadata ); + ASSERT( status.isOK() ); } void tearDown() { MockConnRegistry::get()->clear(); } - CollectionMetadata* getCollMetadata() const { - return _metadata.get(); + const CollectionMetadata& getCollMetadata() const { + return _metadata; } private: scoped_ptr<MockRemoteDBServer> _dummyConfig; - scoped_ptr<CollectionMetadata> _metadata; + CollectionMetadata _metadata; }; TEST_F(TwoChunksWithGapCompoundKeyFixture, ClonePlusBasic) { @@ -369,12 +386,12 @@ namespace { string errMsg; ChunkVersion version( 1, 0, OID() ); - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->clonePlus( chunk, + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().clonePlus( chunk, version, &errMsg ) ); ASSERT( errMsg.empty() ); - ASSERT_EQUALS( 2u, getCollMetadata()->getNumChunks() ); + ASSERT_EQUALS( 2u, getCollMetadata().getNumChunks() ); ASSERT_EQUALS( 3u, cloned->getNumChunks() ); // TODO: test maxShardVersion, maxCollVersion @@ -393,14 +410,14 @@ namespace { chunk.setMax( BSON("a" << 25 << "b" << 0) ); string errMsg; - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->clonePlus( chunk, - ChunkVersion( 1, - 0, - OID() ), - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().clonePlus( chunk, + ChunkVersion( 1, + 0, + OID() ), + &errMsg ) ); ASSERT( cloned == NULL ); ASSERT_FALSE( errMsg.empty() ); - ASSERT_EQUALS( 2u, getCollMetadata()->getNumChunks() ); + ASSERT_EQUALS( 2u, getCollMetadata().getNumChunks() ); } TEST_F(TwoChunksWithGapCompoundKeyFixture, CloneMinusBasic) { @@ -410,12 +427,12 @@ namespace { string errMsg; ChunkVersion version( 2, 0, OID() ); - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->cloneMinus( chunk, - version, - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().cloneMinus( chunk, + version, + &errMsg ) ); ASSERT( errMsg.empty() ); - ASSERT_EQUALS( 2u, getCollMetadata()->getNumChunks() ); + ASSERT_EQUALS( 2u, getCollMetadata().getNumChunks() ); ASSERT_EQUALS( 1u, cloned->getNumChunks() ); // TODO: test maxShardVersion, maxCollVersion @@ -433,14 +450,14 @@ namespace { chunk.setMax( BSON("a" << 28 << "b" << 0) ); string errMsg; - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->cloneMinus( chunk, - ChunkVersion( 1, - 0, - OID() ), - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().cloneMinus( chunk, + ChunkVersion( 1, + 0, + OID() ), + &errMsg ) ); ASSERT( cloned == NULL ); ASSERT_FALSE( errMsg.empty() ); - ASSERT_EQUALS( 2u, getCollMetadata()->getNumChunks() ); + ASSERT_EQUALS( 2u, getCollMetadata().getNumChunks() ); } TEST_F(TwoChunksWithGapCompoundKeyFixture, CloneSplitBasic) { @@ -459,16 +476,16 @@ namespace { ChunkVersion version( 1, 99, OID() ); // first chunk 1|99 , second 1|100 string errMsg; - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->cloneSplit( chunk, - splitKeys, - version, - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().cloneSplit( chunk, + splitKeys, + version, + &errMsg ) ); version.incMinor(); /* second chunk 1|100, first split point */ version.incMinor(); /* third chunk 1|101, second split point */ ASSERT_EQUALS( cloned->getShardVersion().toLong(), version.toLong() /* 1|101 */); ASSERT_EQUALS( cloned->getCollVersion().toLong(), version.toLong() ); - ASSERT_EQUALS( getCollMetadata()->getNumChunks(), 2u ); + ASSERT_EQUALS( getCollMetadata().getNumChunks(), 2u ); ASSERT_EQUALS( cloned->getNumChunks(), 4u ); ASSERT( cloned->keyBelongsToMe( min ) ); ASSERT( cloned->keyBelongsToMe( split1 ) ); @@ -485,16 +502,16 @@ namespace { splitKeys.push_back( BSON("a" << 5 << "b" << 0) ); string errMsg; - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->cloneSplit( chunk, - splitKeys, - ChunkVersion( 1, - 0, - OID() ), - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().cloneSplit( chunk, + splitKeys, + ChunkVersion( 1, + 0, + OID() ), + &errMsg ) ); ASSERT( cloned == NULL ); ASSERT_FALSE( errMsg.empty() ); - ASSERT_EQUALS( 2u, getCollMetadata()->getNumChunks() ); + ASSERT_EQUALS( 2u, getCollMetadata().getNumChunks() ); } TEST_F(TwoChunksWithGapCompoundKeyFixture, CloneSplitBadChunkRange) { @@ -509,16 +526,16 @@ namespace { splitKeys.push_back( BSON("a" << 15 << "b" << 0) ); string errMsg; - scoped_ptr<CollectionMetadata> cloned( getCollMetadata()->cloneSplit( chunk, - splitKeys, - ChunkVersion( 1, - 0, - OID() ), - &errMsg ) ); + scoped_ptr<CollectionMetadata> cloned( getCollMetadata().cloneSplit( chunk, + splitKeys, + ChunkVersion( 1, + 0, + OID() ), + &errMsg ) ); ASSERT( cloned == NULL ); ASSERT_FALSE( errMsg.empty() ); - ASSERT_EQUALS( 2u, getCollMetadata()->getNumChunks() ); + ASSERT_EQUALS( 2u, getCollMetadata().getNumChunks() ); } /** @@ -577,55 +594,54 @@ namespace { ConnectionString configLoc( CONFIG_HOST_PORT ); MetadataLoader loader( configLoc ); - string errmsg; - _metadata.reset( loader.makeCollectionMetadata( "test.foo", + Status status = loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, - &errmsg ) ); - ASSERT( _metadata.get() != NULL ); + &_metadata ); + ASSERT( status.isOK() ); } void tearDown() { MockConnRegistry::get()->clear(); } - CollectionMetadata* getCollMetadata() const { - return _metadata.get(); + const CollectionMetadata& getCollMetadata() const { + return _metadata; } private: scoped_ptr<MockRemoteDBServer> _dummyConfig; - scoped_ptr<CollectionMetadata> _metadata; + CollectionMetadata _metadata; }; TEST_F(ThreeChunkWithRangeGapFixture, ShardOwnsDoc) { - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << 5)) ); - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << 10)) ); - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << 30)) ); - ASSERT( getCollMetadata()->keyBelongsToMe(BSON("a" << 40)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << 5)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << 10)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << 30)) ); + ASSERT( getCollMetadata().keyBelongsToMe(BSON("a" << 40)) ); } TEST_F(ThreeChunkWithRangeGapFixture, ShardDoesntOwnDoc) { - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << 25)) ); - ASSERT_FALSE( getCollMetadata()->keyBelongsToMe(BSON("a" << MAXKEY)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << 25)) ); + ASSERT_FALSE( getCollMetadata().keyBelongsToMe(BSON("a" << MAXKEY)) ); } TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromEmpty) { ChunkType nextChunk; - ASSERT_FALSE( getCollMetadata()->getNextChunk( BSONObj(), &nextChunk ) ); + ASSERT_FALSE( getCollMetadata().getNextChunk( BSONObj(), &nextChunk ) ); ASSERT_EQUALS( 0, nextChunk.getMin().woCompare(BSON("a" << MINKEY)) ); ASSERT_EQUALS( 0, nextChunk.getMax().woCompare(BSON("a" << 10)) ); } TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromMiddle) { ChunkType nextChunk; - ASSERT_FALSE( getCollMetadata()->getNextChunk(BSON("a" << 10), &nextChunk) ); + ASSERT_FALSE( getCollMetadata().getNextChunk(BSON("a" << 10), &nextChunk) ); ASSERT_EQUALS( 0, nextChunk.getMin().woCompare(BSON("a" << 30)) ); ASSERT_EQUALS( 0, nextChunk.getMax().woCompare(BSON("a" << MAXKEY)) ); } TEST_F(ThreeChunkWithRangeGapFixture, GetNextFromLast) { ChunkType nextChunk; - ASSERT( getCollMetadata()->getNextChunk(BSON("a" << 30), &nextChunk) ); + ASSERT( getCollMetadata().getNextChunk(BSON("a" << 30), &nextChunk) ); } } // unnamed namespace diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp index 38bcfa6754d..4d5044fb797 100644 --- a/src/mongo/s/d_state.cpp +++ b/src/mongo/s/d_state.cpp @@ -173,7 +173,7 @@ namespace mongo { CollectionMetadataPtr cloned( p->cloneMinus( chunk, version, &errMsg ) ); // Errors reported via assertions here - uassert( 16844, errMsg, NULL != cloned.get() ); + uassert( 16849, errMsg, NULL != cloned.get() ); // TODO: a bit dangerous to have two different zero-version states - no-metadata and // no-version @@ -194,7 +194,7 @@ namespace mongo { CollectionMetadataPtr cloned( it->second->clonePlus( chunk, version, &errMsg ) ); // Errors reported via assertions here - uassert( 16845, errMsg, NULL != cloned.get() ); + uassert( 16850, errMsg, NULL != cloned.get() ); _collMetadata[ns] = cloned; } @@ -217,7 +217,7 @@ namespace mongo { CollectionMetadataPtr cloned( it->second->cloneSplit( chunk, splitKeys, version, &errMsg ) ); // Errors reported via assertions here - uassert( 16846, errMsg, NULL != cloned.get() ); + uassert( 16851, errMsg, NULL != cloned.get() ); _collMetadata[ns] = cloned; } @@ -306,17 +306,23 @@ namespace mongo { } MetadataLoader mdLoader( configLoc ); - CollectionMetadataPtr newMetadata( mdLoader.makeCollectionMetadata( ns, - _shardName, - currMetadata.get(), - &errMsg ) ); - - if ( !newMetadata ) { + CollectionMetadata* newMetadataRaw = new CollectionMetadata(); + CollectionMetadataPtr newMetadata( newMetadataRaw ); + Status status = mdLoader.makeCollectionMetadata( ns, + _shardName, + currMetadata.get(), + newMetadataRaw ); + + if ( status.code() == ErrorCodes::RemoteChangeDetected ) { version = ChunkVersion( 0, OID() ); - warning() << errMsg << endl; - // There was an error getting sharded data for this collection, return false + warning() << "did not load new metadata for " << causedBy( status.reason() ) << endl; + // we loaded something unexpected, the collection may be dropped or dropping return false; } + else if ( !status.isOK() ) { + // Throw exception on connectivity or parsing errors to maintain interface + uasserted( 16848, status.reason() ); + } { // NOTE: This lock prevents the ns version from changing while a write operation occurs. diff --git a/src/mongo/s/metadata_loader.cpp b/src/mongo/s/metadata_loader.cpp index 3ebc241507e..a5fda2d3d3e 100644 --- a/src/mongo/s/metadata_loader.cpp +++ b/src/mongo/s/metadata_loader.cpp @@ -78,28 +78,14 @@ namespace mongo { MetadataLoader::~MetadataLoader() { } - CollectionMetadata* // br - MetadataLoader::makeCollectionMetadata( const string& ns, - const string& shard, - const CollectionMetadata* oldMetadata, - string* errMsg ) + Status MetadataLoader::makeCollectionMetadata( const string& ns, + const string& shard, + const CollectionMetadata* oldMetadata, + CollectionMetadata* metadata ) { - // The error message string is optional. - string dummy; - if ( errMsg == NULL ) { - errMsg = &dummy; - } - - auto_ptr<CollectionMetadata> metadata( new CollectionMetadata ); - if ( initCollection( ns, shard, oldMetadata, metadata.get(), errMsg ) ) { - if ( metadata->getNumChunks() > 0 ) { - dassert(metadata->isValid()); - } - - return metadata.release(); - } - - return NULL; + Status status = initCollection( ns, shard, metadata ); + if ( !status.isOK() || metadata->getKeyPattern().isEmpty() ) return status; + return initChunks( ns, shard, oldMetadata, metadata ); } CollectionMetadata* MetadataLoader::makeEmptyCollectionMetadata() { @@ -110,11 +96,9 @@ namespace mongo { return metadata; } - bool MetadataLoader::initCollection( const string& ns, - const string& shard, - const CollectionMetadata* oldMetadata, - CollectionMetadata* metadata, - string* errMsg ) + Status MetadataLoader::initCollection( const string& ns, + const string& shard, + CollectionMetadata* metadata ) { // // Bring collection entry from the config server. @@ -128,19 +112,20 @@ namespace mongo { conn.done(); } catch ( const DBException& e ) { - *errMsg = str::stream() << "caught exception accessing the config servers " - << causedBy( e ); + string errMsg = str::stream() << "could not query collection metadata" + << causedBy( e ); // We deliberately do not return conn to the pool, since it was involved // with the error here. - return false; + return Status( ErrorCodes::HostUnreachable, errMsg ); } } CollectionType collDoc; - if ( !collDoc.parseBSON( collObj, errMsg ) || !collDoc.isValid( errMsg ) ) { - return false; + string errMsg; + if ( !collDoc.parseBSON( collObj, &errMsg ) || !collDoc.isValid( &errMsg ) ) { + return Status( ErrorCodes::FailedToParse, errMsg ); } // @@ -150,10 +135,10 @@ namespace mongo { if ( !collDoc.getKeyPattern().isEmpty() ) { metadata->_keyPattern = collDoc.getKeyPattern(); + metadata->_shardVersion = ChunkVersion( 0, 0, collDoc.getEpoch() ); + metadata->_collVersion = ChunkVersion( 0, 0, collDoc.getEpoch() ); - if ( !initChunks( collDoc, ns, shard, oldMetadata, metadata, errMsg ) ) { - return false; - } + return Status::OK(); } else if ( collDoc.getPrimary() == shard ) { @@ -165,37 +150,34 @@ namespace mongo { metadata->_keyPattern = BSONObj(); metadata->_shardVersion = ChunkVersion( 1, 0, collDoc.getEpoch() ); metadata->_collVersion = metadata->_shardVersion; + + return Status::OK(); } else { - *errMsg = str::stream() << "collection " << ns << " does not have a shard key " - << "and primary " << collDoc.getPrimary() - << " does not match this shard " << shard; - return false; + errMsg = str::stream() << "collection " << ns << " does not have a shard key " + << "and primary " << collDoc.getPrimary() + << " does not match this shard " << shard; + return Status( ErrorCodes::RemoteChangeDetected, errMsg ); } - - return true; } - bool MetadataLoader::initChunks( const CollectionType& collDoc, - const string& ns, - const string& shard, - const CollectionMetadata* oldMetadata, - CollectionMetadata* metadata, - string* errMsg ) + Status MetadataLoader::initChunks( const string& ns, + const string& shard, + const CollectionMetadata* oldMetadata, + CollectionMetadata* metadata ) { - map<string, ChunkVersion> versionMap; - metadata->_collVersion = ChunkVersion( 0, 0, collDoc.getEpoch() ); + OID epoch = metadata->getCollVersion().epoch(); // Check to see if we should use the old version or not. if ( oldMetadata ) { ChunkVersion oldVersion = oldMetadata->getShardVersion(); - if ( oldVersion.isSet() && oldVersion.hasCompatibleEpoch( collDoc.getEpoch() ) ) { + if ( oldVersion.isSet() && oldVersion.hasCompatibleEpoch( epoch ) ) { // Our epoch for coll version and shard version should be the same. - verify(oldMetadata->getCollVersion().hasCompatibleEpoch(collDoc.getEpoch())); + verify( oldMetadata->getCollVersion().hasCompatibleEpoch( epoch ) ); versionMap[shard] = oldMetadata->_shardVersion; metadata->_collVersion = oldMetadata->_collVersion; @@ -223,11 +205,11 @@ namespace mongo { differ.configDiffQuery() ); if ( !cursor.get() ) { - // 'errMsg' was filled by the getChunkCursor() call. metadata->_collVersion = ChunkVersion(); metadata->_chunksMap.clear(); conn.done(); - return false; + return Status( ErrorCodes::HostUnreachable, + "problem opening chunk metadata cursor" ); } // Diff tracker should *always* find at least one chunk if this shard owns a chunk. @@ -240,44 +222,43 @@ namespace mongo { metadata->_shardVersion = versionMap[shard]; metadata->fillRanges(); conn.done(); - return true; + return Status::OK(); } else if ( diffsApplied == 0 ) { warning() << "no chunks found when reloading " << ns << ", previous version was " << metadata->_collVersion.toString() << endl; - metadata->_collVersion = ChunkVersion(); + metadata->_collVersion = ChunkVersion( 0, 0, OID() ); metadata->_chunksMap.clear(); conn.done(); - return true; + return Status::OK(); } else { // TODO: make this impossible by making sure we don't migrate / split on this // shard during the reload. No chunks were found for the ns. - *errMsg = str::stream() << "invalid chunks found when reloading " << ns - << ", previous version was " - << metadata->_collVersion.toString() - << ", this should be rare"; + string errMsg = str::stream() << "invalid chunks found when reloading " << ns + << ", previous version was " + << metadata->_collVersion.toString() + << ", this should be rare"; warning() << errMsg << endl; - metadata->_collVersion = ChunkVersion(); + metadata->_collVersion = ChunkVersion( 0, 0, OID() ); metadata->_chunksMap.clear(); conn.done(); - return false; + return Status( ErrorCodes::RemoteChangeDetected, errMsg ); } } catch ( const DBException& e ) { - *errMsg = str::stream() << "caught exception accessing the config servers" - << causedBy( e ); + string errMsg = str::stream() << "problem querying chunks metadata" << causedBy( e ); // We deliberately do not return connPtr to the pool, since it was involved // with the error here. - return false; + return Status( ErrorCodes::HostUnreachable, errMsg ); } } diff --git a/src/mongo/s/metadata_loader.h b/src/mongo/s/metadata_loader.h index 0fa4d69f3dc..c7a6ef0248d 100644 --- a/src/mongo/s/metadata_loader.h +++ b/src/mongo/s/metadata_loader.h @@ -18,6 +18,7 @@ #include <string> +#include "mongo/base/status.h" #include "mongo/client/dbclientinterface.h" #include "mongo/db/jsobj.h" @@ -51,18 +52,24 @@ namespace mongo { ~MetadataLoader(); /** - * Returns a new metadata instance representing the chunkset of the collection 'ns' - * (or its entirety, if not sharded) that lives on 'shard'. Optionally, uses an - * 'oldMetadata' for the same 'ns'/'shard'; the contents of 'oldMetadata' can help - * reducing the amount of data read from the config servers. + * Fills a new metadata instance representing the chunkset of the collection 'ns' + * (or its entirety, if not sharded) that lives on 'shard' with data from the config server. + * Optionally, uses an 'oldMetadata' for the same 'ns'/'shard'; the contents of + * 'oldMetadata' can help reducing the amount of data read from the config servers. * - * If the collection's information can't be loaded, returns NULL and fill in 'errMsg' - * with a description, if 'errMsg' was provided. + * OK on success. + * + * Failure return values: + * Abnormal: + * @return FailedToParse if there was an error parsing the remote config data + * Normal: + * @return HostUnreachable if there was an error contacting the config servers + * @return RemoteChangeDetected if the data loaded was modified by another operation */ - CollectionMetadata* makeCollectionMetadata( const string& ns, - const string& shard, - const CollectionMetadata* oldMetadata, - string* errMsg ); + Status makeCollectionMetadata( const string& ns, + const string& shard, + const CollectionMetadata* oldMetadata, + CollectionMetadata* metadata ); /** * Returns a new metadata instance representing an non-sharded, empty collection with @@ -74,31 +81,33 @@ namespace mongo { ConnectionString _configLoc; /** - * Returns true and fills in the internal state of 'metadata' to portray the portion of - * the collection 'ns' that lives in 'shard'. If provided, uses the contents of - * 'oldMetadata' as a base, which allows less data to be brought from the config - * server. If information about the collection can be accessed or is invalid, returns - * false and fills in an error description on '*errMsg', which is mandatory here. + * Returns OK and fills in the internal state of 'metadata' with general collection + * information, not including chunks. + * + * If information about the collection can be accessed or is invalid, returns: + * @return FailedToParse if there was an error parsing the remote config data + * @return HostUnreachable if there was an error contacting the config servers + * @return RemoteChangeDetected if the collection doc loaded is unexpectedly different + * */ - bool initCollection( const string& ns, - const string& shard, - const CollectionMetadata* oldMetadata, - CollectionMetadata* metadata, - string* errMsg ); + Status initCollection( const string& ns, + const string& shard, + CollectionMetadata* metadata ); /** - * Returns true and fills in the chunk state of 'metadata' to portray the chunks of the + * Returns OK and fills in the chunk state of 'metadata' to portray the chunks of the * collection 'ns' that sit in 'shard'. If provided, uses the contents of 'oldMetadata' - * as a base (see description in initCollection above). If information about the - * chunks can be accessed or is invalid, returns false and fills in an error - * description on '*errMsg', which is mandatory here. + * as a base (see description in initCollection above). + * + * If information about the chunks can be accessed or is invalid, returns: + * @return HostUnreachable if there was an error contacting the config servers + * @return RemoteChangeDetected if the chunks loaded are unexpectedly different + * TODO: @return FailedToParse */ - bool initChunks( const CollectionType& collDoc, - const string& ns, - const string& shard, - const CollectionMetadata* oldMetadata, - CollectionMetadata* metadata, - string* errMsg ); + Status initChunks( const string& ns, + const string& shard, + const CollectionMetadata* oldMetadata, + CollectionMetadata* metadata ); }; } // namespace mongo diff --git a/src/mongo/s/metadata_loader_test.cpp b/src/mongo/s/metadata_loader_test.cpp index 9d86ad831ab..2d8724441f9 100644 --- a/src/mongo/s/metadata_loader_test.cpp +++ b/src/mongo/s/metadata_loader_test.cpp @@ -18,6 +18,7 @@ #include <vector> +#include "mongo/base/status.h" #include "mongo/client/connpool.h" #include "mongo/client/dbclientinterface.h" #include "mongo/db/jsobj.h" @@ -41,6 +42,7 @@ namespace { using mongo::CollectionType; using mongo::ConnectionString; using mongo::Date_t; + using mongo::ErrorCodes; using mongo::HostAndPort; using mongo::MAXKEY; using mongo::MINKEY; @@ -49,12 +51,14 @@ namespace { using mongo::MockConnRegistry; using mongo::MockRemoteDBServer; using mongo::ScopedDbConnection; + using mongo::Status; using std::string; using std::vector; const std::string CONFIG_HOST_PORT = "$dummy_config:27017"; // TODO: Test config server down + // TODO: Test read of chunks with new epoch // TODO: Test that you can properly load config using format with deprecated fields? TEST(MetadataLoader, DroppedColl) { @@ -62,8 +66,16 @@ namespace { mongo::ConnectionString::setConnectionHook( MockConnRegistry::get()->getConnStrHook() ); MockConnRegistry::get()->addServer( &dummyConfig ); - dummyConfig.insert( CollectionType::ConfigNS, BSON(CollectionType::ns("test.foo") << - CollectionType::dropped(true)) ); + CollectionType collInfo; + collInfo.setNS( "test.foo"); + collInfo.setUpdatedAt( 0 ); + collInfo.setEpoch( OID() ); + collInfo.setDropped( true ); + + string errMsg; + ASSERT( collInfo.isValid( &errMsg ) ); + + dummyConfig.insert( CollectionType::ConfigNS, collInfo.toBSON() ); dummyConfig.insert( ChunkType::ConfigNS, BSONObj() ); @@ -72,13 +84,13 @@ namespace { MetadataLoader loader( configLoc ); string errmsg; - scoped_ptr<CollectionMetadata> metadata; - metadata.reset( loader.makeCollectionMetadata( "test.foo", // br + CollectionMetadata metadata; + Status status = loader.makeCollectionMetadata( "test.foo", // br "shard0000", NULL, /* no old metadata */ - &errmsg ) ); + &metadata ); - ASSERT_FALSE( errmsg.empty() ); + ASSERT_EQUALS( status.code(), ErrorCodes::RemoteChangeDetected ); MockConnRegistry::get()->clear(); ScopedDbConnection::clearPool(); @@ -115,24 +127,27 @@ namespace { ConnectionString configLoc( confServerStr ); MetadataLoader loader( configLoc ); - CollectionMetadata* metadata = - loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ - NULL /* no need for errMsg */); - ASSERT_TRUE( metadata != NULL ); - ASSERT_EQUALS( 0U, metadata->getNumChunks() ); + CollectionMetadata metadata; + Status status = loader.makeCollectionMetadata( "test.foo", // br + "shard0000", + NULL, /* no old metadata */ + &metadata ); + ASSERT_TRUE( status.isOK() ); + ASSERT_EQUALS( 0U, metadata.getNumChunks() ); } TEST_F(NoChunkFixture, VersionIsZero) { ConnectionString confServerStr( CONFIG_HOST_PORT ); ConnectionString configLoc( confServerStr ); MetadataLoader loader( configLoc ); - CollectionMetadata* metadata = - loader.makeCollectionMetadata( "test.foo", // br - "shard0000", - NULL, /* no old metadata */ - NULL /* no need for errMsg */); - ASSERT_TRUE( metadata != NULL ); - ASSERT_EQUALS( 0U, metadata->getShardVersion().toLong() ); + CollectionMetadata metadata; + Status status = loader.makeCollectionMetadata( "test.foo", // br + "shard0000", + NULL, /* no old metadata */ + &metadata ); + + ASSERT( status.isOK() ); + ASSERT_EQUALS( 0U, metadata.getShardVersion().toLong() ); } class ConfigServerFixture : public mongo::unittest::Test { @@ -188,54 +203,56 @@ namespace { ConnectionString confServerStr( CONFIG_HOST_PORT ); ConnectionString configLoc( confServerStr ); MetadataLoader loader( configLoc ); - CollectionMetadata* metadata = - loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ - NULL /* no need for errMsg */); - ASSERT_TRUE( metadata != NULL ); - ASSERT_EQUALS( 1U, metadata->getNumChunks() ); + CollectionMetadata metadata; + Status status = loader.makeCollectionMetadata( "test.foo", // br + "shard0000", + NULL, /* no old metadata */ + &metadata ); + ASSERT( status.isOK() ); + ASSERT_EQUALS( 1U, metadata.getNumChunks() ); } TEST_F(ConfigServerFixture, SingleChunkGetNext) { ConnectionString confServerStr( CONFIG_HOST_PORT ); ConnectionString configLoc( confServerStr ); MetadataLoader loader( configLoc ); - CollectionMetadata* metadata = - loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ - NULL /* no need for errMsg */); + CollectionMetadata metadata; + loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ + &metadata ); ChunkType chunkInfo; - ASSERT_TRUE( metadata->getNextChunk(BSON("a" << MINKEY), &chunkInfo) ); + ASSERT_TRUE( metadata.getNextChunk(BSON("a" << MINKEY), &chunkInfo) ); } TEST_F(ConfigServerFixture, SingleChunkGetShardKey) { ConnectionString confServerStr( CONFIG_HOST_PORT ); ConnectionString configLoc( confServerStr ); MetadataLoader loader( configLoc ); - CollectionMetadata* metadata = - loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ - NULL /* no need for errMsg */); - ASSERT_TRUE( metadata->getKeyPattern().equal(BSON("a" << 1)) ); + CollectionMetadata metadata; + loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ + &metadata ); + ASSERT_TRUE( metadata.getKeyPattern().equal(BSON("a" << 1)) ); } TEST_F(ConfigServerFixture, SingleChunkGetMaxCollVersion) { ConnectionString confServerStr( CONFIG_HOST_PORT ); ConnectionString configLoc( confServerStr ); MetadataLoader loader( configLoc ); - CollectionMetadata* metadata = - loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ - NULL /* no need for errMsg */); + CollectionMetadata metadata; + loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ + &metadata ); - ASSERT_TRUE( getMaxCollVersion().isEquivalentTo( metadata->getCollVersion() ) ); + ASSERT_TRUE( getMaxCollVersion().isEquivalentTo( metadata.getCollVersion() ) ); } TEST_F(ConfigServerFixture, SingleChunkGetMaxShardVersion) { ConnectionString confServerStr( CONFIG_HOST_PORT ); ConnectionString configLoc( confServerStr ); MetadataLoader loader( configLoc ); - CollectionMetadata* metadata = - loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ - NULL /* no need for errMsg */); + CollectionMetadata metadata; + loader.makeCollectionMetadata( "test.foo", "shard0000", NULL, /* no old metadata */ + &metadata ); - ASSERT_TRUE( getMaxShardVersion().isEquivalentTo( metadata->getShardVersion() ) ); + ASSERT_TRUE( getMaxShardVersion().isEquivalentTo( metadata.getShardVersion() ) ); } TEST_F(ConfigServerFixture, NoChunks) { @@ -244,235 +261,237 @@ namespace { ConnectionString confServerStr( CONFIG_HOST_PORT ); ConnectionString configLoc( confServerStr ); MetadataLoader loader( configLoc ); + CollectionMetadata metadata; + Status status = loader.makeCollectionMetadata( "test.foo", // br + "shard0000", + NULL, /* no old metadata */ + &metadata ); - CollectionMetadata* metadata = loader.makeCollectionMetadata( "test.foo", // br - "shard0000", - NULL, /* no old metadata */ - NULL ); - - ASSERT_TRUE( metadata != NULL ); + ASSERT( status.isOK() ); ChunkVersion versionZero( 0, 0, OID() ); - ASSERT_EQUALS( versionZero.toLong(), metadata->getCollVersion().toLong() ); - ASSERT_EQUALS( versionZero.toLong(), metadata->getShardVersion().toLong() ); - ASSERT_EQUALS( 0U, metadata->getNumChunks() ); + ASSERT_EQUALS( versionZero.toLong(), metadata.getCollVersion().toLong() ); + ASSERT_EQUALS( versionZero.toLong(), metadata.getShardVersion().toLong() ); + ASSERT_EQUALS( 0U, metadata.getNumChunks() ); } #if 0 -// TODO: MockServer functionality does not support selective query - consider -// inserting nothing at all to chunk/collections collection -TEST_F(ConfigServerFixture, EmptyDataForNS) { - ConnectionString confServerStr(CONFIG_HOST_PORT); - ConnectionString configLoc(confServerStr); - MetadataLoader loader(configLoc); - CollectionMetadata* metadata = loader.makeCollectionMetadata("not.sharded", - "shard0000", - NULL, /* no old metadata */ - NULL /* no need for errMsg */); - ASSERT_TRUE(metadata == NULL); -} + // TODO: MockServer functionality does not support selective query - consider + // inserting nothing at all to chunk/collections collection + TEST_F(ConfigServerFixture, EmptyDataForNS) { + ConnectionString confServerStr( CONFIG_HOST_PORT ); + ConnectionString configLoc( confServerStr ); + MetadataLoader loader( configLoc ); + CollectionMetadata metadata; + Status status = loader.makeCollectionMetadata( "not.sharded", // br + "shard0000", + NULL, /* no old metadata */ + &metadata ); + ASSERT( !status.isOK() ); + } #endif #if 0 -// TODO: d_chunk_manager_test has no tests for passing old ShardChunkManager -class TwoChunkFixture : public mongo::unittest::Test { -protected: - void setUp() { - _dummyConfig.reset(new MockRemoteDBServer(CONFIG_HOST_PORT)); - mongo::ConnectionString::setConnectionHook(MockConnRegistry::get()->getConnStrHook()); - MockConnRegistry::get()->addServer(_dummyConfig.get()); - - OID epoch = OID::gen(); - _maxCollVersion = ChunkVersion(1, 0, epoch); - - BSONObj collFoo = BSON(CollectionType::ns("test.foo") << - CollectionType::keyPattern(BSON("a" << 1)) << - CollectionType::unique(false) << - CollectionType::updatedAt(1ULL) << - CollectionType::epoch(epoch)); - _dummyConfig->insert(CollectionType::ConfigNS, collFoo); - - BSONObj fooSingle = BSON(ChunkType::name("test.foo-a_MinKey") << - ChunkType::ns("test.foo") << - ChunkType::min(BSON("a" << MINKEY)) << - ChunkType::max(BSON("a" << MAXKEY)) << - ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << - ChunkType::DEPRECATED_epoch(epoch) << - ChunkType::shard("shard0000")); - _dummyConfig->insert(ChunkType::ConfigNS, fooSingle); - - ConnectionString confServerStr(CONFIG_HOST_PORT); - ConnectionString configLoc(confServerStr); - MetadataLoader loader(configLoc); - _oldMetadata = loader.makeCollectionMetadata("not.sharded", - "shard0000", - NULL, /* no old metadata */ - NULL /* no need for errMsg */); - ASSERT(_oldMetadata != NULL); - - // Needs to delete the collection and rebuild because the mock server - // not support updates. - _dummyConfig->remove(CollectionType::ConfigNS, BSONObj()); - _dummyConfig->remove(ChunkType::ConfigNS, BSONObj()); - - OID epoch2 = OID::gen(); - _maxCollVersion = ChunkVersion(2, 0, epoch2); - - BSONObj collFoo = BSON(CollectionType::ns("test.foo") << - CollectionType::keyPattern(BSON("a" << 1)) << - CollectionType::unique(false) << - CollectionType::updatedAt(2ULL) << - CollectionType::epoch(epoch2)); - _dummyConfig->insert(CollectionType::ConfigNS, collFoo); - - BSONObj chunk1 = BSON(ChunkType::name("test.foo-a_MinKey") << - ChunkType::ns("test.foo") << - ChunkType::min(BSON("a" << MINKEY)) << - ChunkType::max(BSON("a" << 100)) << - ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << - ChunkType::DEPRECATED_epoch(epoch2) << - ChunkType::shard("shard0000")); - _dummyConfig->insert(ChunkType::ConfigNS, chunk1); - - BSONObj chunk2 = BSON(ChunkType::name("test.foo-a_100") << - ChunkType::ns("test.foo") << - ChunkType::min(BSON("a" << 100)) << - ChunkType::max(BSON("a" << MAXKEY)) << - ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << - ChunkType::DEPRECATED_epoch(epoch2) << - ChunkType::shard("shard0000")); - _dummyConfig->insert(ChunkType::ConfigNS, chunk2); - } + // TODO: d_chunk_manager_test has no tests for passing old ShardChunkManager + class TwoChunkFixture : public mongo::unittest::Test { + protected: + void setUp() { + _dummyConfig.reset( new MockRemoteDBServer( CONFIG_HOST_PORT ) ); + mongo::ConnectionString::setConnectionHook( MockConnRegistry::get()->getConnStrHook() ); + MockConnRegistry::get()->addServer( _dummyConfig.get() ); - void tearDown() { - MockConnRegistry::get()->removeServer(_dummyConfig->getServerAddress()); - } + OID epoch = OID::gen(); + _maxCollVersion = ChunkVersion( 1, 0, epoch ); - ChunkVersion getCollVersion() const { - return _maxCollVersion; - } + BSONObj collFoo = BSON(CollectionType::ns("test.foo") << + CollectionType::keyPattern(BSON("a" << 1)) << + CollectionType::unique(false) << + CollectionType::updatedAt(1ULL) << + CollectionType::epoch(epoch)); + _dummyConfig->insert( CollectionType::ConfigNS, collFoo ); - const ChunkVersion& getShardVersion(size_t shard) const { - return _maxCollVersion; - } + BSONObj fooSingle = BSON(ChunkType::name("test.foo-a_MinKey") << + ChunkType::ns("test.foo") << + ChunkType::min(BSON("a" << MINKEY)) << + ChunkType::max(BSON("a" << MAXKEY)) << + ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << + ChunkType::DEPRECATED_epoch(epoch) << + ChunkType::shard("shard0000")); + _dummyConfig->insert( ChunkType::ConfigNS, fooSingle ); - const CollectionMetadata* getOldMetadata() const { - return _oldMetadata; - } + ConnectionString confServerStr( CONFIG_HOST_PORT ); + ConnectionString configLoc( confServerStr ); + MetadataLoader loader( configLoc ); + Status status = loader.makeCollectionMetadata( "not.sharded", // br + "shard0000", + NULL, /* no old metadata */ + &_oldMetadata ); + ASSERT( status.isOK() ); + + // Needs to delete the collection and rebuild because the mock server + // not support updates. + _dummyConfig->remove( CollectionType::ConfigNS, BSONObj() ); + _dummyConfig->remove( ChunkType::ConfigNS, BSONObj() ); + + OID epoch2 = OID::gen(); + _maxCollVersion = ChunkVersion( 2, 0, epoch2 ); + + BSONObj collFoo = BSON(CollectionType::ns("test.foo") << + CollectionType::keyPattern(BSON("a" << 1)) << + CollectionType::unique(false) << + CollectionType::updatedAt(2ULL) << + CollectionType::epoch(epoch2)); + _dummyConfig->insert( CollectionType::ConfigNS, collFoo ); -private: - scoped_ptr<MockRemoteDBServer> _dummyConfig; - CollectionMetadata* _oldMetadata; + BSONObj chunk1 = BSON(ChunkType::name("test.foo-a_MinKey") << + ChunkType::ns("test.foo") << + ChunkType::min(BSON("a" << MINKEY)) << + ChunkType::max(BSON("a" << 100)) << + ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << + ChunkType::DEPRECATED_epoch(epoch2) << + ChunkType::shard("shard0000")); + _dummyConfig->insert( ChunkType::ConfigNS, chunk1 ); + + BSONObj chunk2 = BSON(ChunkType::name("test.foo-a_100") << + ChunkType::ns("test.foo") << + ChunkType::min(BSON("a" << 100)) << + ChunkType::max(BSON("a" << MAXKEY)) << + ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << + ChunkType::DEPRECATED_epoch(epoch2) << + ChunkType::shard("shard0000")); + _dummyConfig->insert( ChunkType::ConfigNS, chunk2 ); + } + + void tearDown() { + MockConnRegistry::get()->removeServer( _dummyConfig->getServerAddress() ); + } + + ChunkVersion getCollVersion() const { + return _maxCollVersion; + } - ChunkVersion _maxCollVersion; -}; + const ChunkVersion& getShardVersion( size_t shard ) const { + return _maxCollVersion; + } + + const CollectionMetadata* getOldMetadata() const { + return _oldMetadata; + } + + private: + scoped_ptr<MockRemoteDBServer> _dummyConfig; + CollectionMetadata _oldMetadata; + + ChunkVersion _maxCollVersion; + }; #endif #if 0 -// TODO: MockServer functionality does not support selective query -class ThreeChunkTwoShardFixture : public mongo::unittest::Test { -protected: - void setUp() { - _dummyConfig.reset(new MockRemoteDBServer(CONFIG_HOST_PORT)); - mongo::ConnectionString::setConnectionHook(MockConnRegistry::get()->getConnStrHook()); - MockConnRegistry::get()->addServer(_dummyConfig.get()); - - OID epoch = OID::gen(); - _maxCollVersion = ChunkVersion(1, 0, epoch); - - BSONObj collFoo = BSON(CollectionType::ns("test.foo") << - CollectionType::keyPattern(BSON("a" << 1)) << - CollectionType::unique(false) << - CollectionType::updatedAt(1ULL) << - CollectionType::epoch(epoch)); - _dummyConfig->insert(CollectionType::ConfigNS, collFoo); - - BSONObj fooSingle = BSON(ChunkType::name("test.foo-a_MinKey") << - ChunkType::ns("test.foo") << - ChunkType::min(BSON("a" << MINKEY)) << - ChunkType::max(BSON("a" << MAXKEY)) << - ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << - ChunkType::DEPRECATED_epoch(epoch) << - ChunkType::shard("shard0000")); - _dummyConfig->insert(ChunkType::ConfigNS, fooSingle); - - ConnectionString confServerStr(CONFIG_HOST_PORT); - ConnectionString configLoc(confServerStr); - MetadataLoader loader(configLoc); - CollectionMetadata* metadata = loader.makeCollectionMetadata("not.sharded", - "shard0000", - NULL, /* no old metadata */ - NULL /* no need for errMsg */); - ASSERT(metadata != NULL); - - // Needs to delete the collection and rebuild because the mock server - // not support updates. - _dummyConfig->remove(CollectionType::ConfigNS, BSONObj()); - _dummyConfig->remove(ChunkType::ConfigNS, BSONObj()); - - OID epoch2 = OID::gen(); - _maxCollVersion = ChunkVersion(2, 0, epoch2); - _maxShardVersion.push_back(_maxCollVersion); - - BSONObj collFoo = BSON(CollectionType::ns("test.foo") << - CollectionType::keyPattern(BSON("a" << 1)) << - CollectionType::unique(false) << - CollectionType::updatedAt(2ULL) << - CollectionType::epoch(epoch2)); - _dummyConfig->insert(CollectionType::ConfigNS, collFoo); - - BSONObj chunk1 = BSON(ChunkType::name("test.foo-a_MinKey") << - ChunkType::ns("test.foo") << - ChunkType::min(BSON("a" << MINKEY)) << - ChunkType::max(BSON("a" << 10)) << - ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << - ChunkType::DEPRECATED_epoch(epoch2) << - ChunkType::shard("shard0000")); - _dummyConfig->insert(ChunkType::ConfigNS, chunk1); - - OID epoch3 = OID::gen(); - _maxCollVersion = ChunkVersion(2, 0, epoch3); - _maxShardVersion.push_back(_maxCollVersion); - - BSONObj chunk2 = BSON(ChunkType::name("test.foo-a_10") << - ChunkType::ns("test.foo") << - ChunkType::min(BSON("a" << 10)) << - ChunkType::max(BSON("a" << 100)) << - ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << - ChunkType::DEPRECATED_epoch(epoch3) << - ChunkType::shard("shard0001")); - _dummyConfig->insert(ChunkType::ConfigNS, chunk2); - - BSONObj chunk3 = BSON(ChunkType::name("test.foo-a_100") << - ChunkType::ns("test.foo") << - ChunkType::min(BSON("a" << 100)) << - ChunkType::max(BSON("a" << MAXKEY)) << - ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << - ChunkType::DEPRECATED_epoch(epoch3) << - ChunkType::shard("shard0001")); - _dummyConfig->insert(ChunkType::ConfigNS, chunk3); - } + // TODO: MockServer functionality does not support selective query + class ThreeChunkTwoShardFixture : public mongo::unittest::Test { + protected: + void setUp() { + _dummyConfig.reset( new MockRemoteDBServer( CONFIG_HOST_PORT ) ); + mongo::ConnectionString::setConnectionHook( MockConnRegistry::get()->getConnStrHook() ); + MockConnRegistry::get()->addServer( _dummyConfig.get() ); - void tearDown() { - MockConnRegistry::get()->removeServer(_dummyConfig->getServerAddress()); - } + OID epoch = OID::gen(); + _maxCollVersion = ChunkVersion( 1, 0, epoch ); - ChunkVersion getCollVersion() const { - return _maxCollVersion; - } + BSONObj collFoo = BSON(CollectionType::ns("test.foo") << + CollectionType::keyPattern(BSON("a" << 1)) << + CollectionType::unique(false) << + CollectionType::updatedAt(1ULL) << + CollectionType::epoch(epoch)); + _dummyConfig->insert( CollectionType::ConfigNS, collFoo ); - const ChunkVersion& getShardVersion(size_t shard) const { - return _maxShardVersion[shard]; - } + BSONObj fooSingle = BSON(ChunkType::name("test.foo-a_MinKey") << + ChunkType::ns("test.foo") << + ChunkType::min(BSON("a" << MINKEY)) << + ChunkType::max(BSON("a" << MAXKEY)) << + ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << + ChunkType::DEPRECATED_epoch(epoch) << + ChunkType::shard("shard0000")); + _dummyConfig->insert( ChunkType::ConfigNS, fooSingle ); -private: - scoped_ptr<MockRemoteDBServer> _dummyConfig; - CollectionMetadata* _oldMetadata; + ConnectionString confServerStr( CONFIG_HOST_PORT ); + ConnectionString configLoc( confServerStr ); + MetadataLoader loader( configLoc ); + CollectionMetadata metadata; + Status status = loader.makeCollectionMetadata( "not.sharded", // br + "shard0000", + NULL, /* no old metadata */ + &metadata ); + ASSERT( status.isOK() ); + + // Needs to delete the collection and rebuild because the mock server + // not support updates. + _dummyConfig->remove( CollectionType::ConfigNS, BSONObj() ); + _dummyConfig->remove( ChunkType::ConfigNS, BSONObj() ); + + OID epoch2 = OID::gen(); + _maxCollVersion = ChunkVersion( 2, 0, epoch2 ); + _maxShardVersion.push_back( _maxCollVersion ); - ChunkVersion _maxCollVersion; - vector<ChunkVersion> _maxShardVersion; -}; + BSONObj collFoo = BSON(CollectionType::ns("test.foo") << + CollectionType::keyPattern(BSON("a" << 1)) << + CollectionType::unique(false) << + CollectionType::updatedAt(2ULL) << + CollectionType::epoch(epoch2)); + _dummyConfig->insert( CollectionType::ConfigNS, collFoo ); + + BSONObj chunk1 = BSON(ChunkType::name("test.foo-a_MinKey") << + ChunkType::ns("test.foo") << + ChunkType::min(BSON("a" << MINKEY)) << + ChunkType::max(BSON("a" << 10)) << + ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << + ChunkType::DEPRECATED_epoch(epoch2) << + ChunkType::shard("shard0000")); + _dummyConfig->insert( ChunkType::ConfigNS, chunk1 ); + + OID epoch3 = OID::gen(); + _maxCollVersion = ChunkVersion( 2, 0, epoch3 ); + _maxShardVersion.push_back( _maxCollVersion ); + + BSONObj chunk2 = BSON(ChunkType::name("test.foo-a_10") << + ChunkType::ns("test.foo") << + ChunkType::min(BSON("a" << 10)) << + ChunkType::max(BSON("a" << 100)) << + ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << + ChunkType::DEPRECATED_epoch(epoch3) << + ChunkType::shard("shard0001")); + _dummyConfig->insert( ChunkType::ConfigNS, chunk2 ); + + BSONObj chunk3 = BSON(ChunkType::name("test.foo-a_100") << + ChunkType::ns("test.foo") << + ChunkType::min(BSON("a" << 100)) << + ChunkType::max(BSON("a" << MAXKEY)) << + ChunkType::DEPRECATED_lastmod(_maxCollVersion.toLong()) << + ChunkType::DEPRECATED_epoch(epoch3) << + ChunkType::shard("shard0001")); + _dummyConfig->insert( ChunkType::ConfigNS, chunk3 ); + } + + void tearDown() { + MockConnRegistry::get()->removeServer( _dummyConfig->getServerAddress() ); + } + + ChunkVersion getCollVersion() const { + return _maxCollVersion; + } + + const ChunkVersion& getShardVersion( size_t shard ) const { + return _maxShardVersion[shard]; + } + + private: + scoped_ptr<MockRemoteDBServer> _dummyConfig; + CollectionMetadata _oldMetadata; + + ChunkVersion _maxCollVersion; + vector<ChunkVersion> _maxShardVersion; + }; #endif } - // unnamed namespace +// unnamed namespace |